
    sgq)                     Z    d Z ddlmZ ddlmZ  ej
                  e      Z G d de      Zy)zPhi-3 model configuration   )PretrainedConfig)loggingc                   f     e Zd ZdZdZdgZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Zd Zd Z xZ	S )
Phi3Configa  
    This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the
    [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 32064):
            Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`Phi3Model`].
        hidden_size (`int`, *optional*, defaults to 3072):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 8192):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details checkout [this
            paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
            `num_attention_heads`.
        resid_pdrop (`float`, *optional*, defaults to 0.0):
            Dropout probability for mlp outputs.
        embd_pdrop (`int`, *optional*, defaults to 0.0):
            The dropout ratio for the embeddings.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio after computing the attention scores.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the decoder.
        max_position_embeddings (`int`, *optional*, defaults to 4096):
            The maximum sequence length that this model might ever be used with.
        original_max_position_embeddings (`int`, *optional*, defaults to 4096):
            The maximum sequence length that this model was trained with. This is used to determine the size of the
            original RoPE embeddings when using long scaling.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        rms_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon value used for the RMSNorm.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie weight embeddings
        rope_theta (`float`, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings.
        rope_scaling (`dict`, *optional*):
            The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
            contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and
            the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
            divided by the number of attention heads divided by 2.
        bos_token_id (`int`, *optional*, defaults to 1):
            The id of the "beginning-of-sequence" token.
        eos_token_id (`int`, *optional*, defaults to 32000):
            The id of the "end-of-sequence" token.
        pad_token_id (`int`, *optional*, defaults to 32000):
            The id of the padding token.
        sliding_window (`int`, *optional*):
            Sliding window attention window size. If `None`, no sliding window is applied.

    Example:

    ```python
    >>> from transformers import Phi3Model, Phi3Config

    >>> # Initializing a Phi-3 style configuration
    >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")

    >>> # Initializing a model from the configuration
    >>> model = Phi3Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```phi3past_key_valuesc                 r   || _         || _        || _        || _        || _        ||}|| _        || _        || _        |	| _        |
| _	        || _
        || _        || _        || _        || _        || _        || _        | j#                          | j%                          || _        t)        | T  d||||d| y )N)bos_token_ideos_token_idpad_token_idtie_word_embeddings )
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_headsresid_pdrop
embd_pdropattention_dropout
hidden_actmax_position_embeddings original_max_position_embeddingsinitializer_rangerms_norm_eps	use_cache
rope_thetarope_scaling_rope_scaling_adjustment_rope_scaling_validationsliding_windowsuper__init__)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r
   r   r   r"   kwargs	__class__s                           ^/var/www/html/venv/lib/python3.12/site-packages/transformers/models/phi3/configuration_phi3.pyr$   zPhi3Config.__init__o   s    4 %&!2!2#6 &"5#6 &$!2$'>$0P-!2("$(%%'%%', 	
%%% 3		

 	
    c                     | j                   y| j                   j                  dd      }||dv rd| j                   d<   yyy)zc
        Adjust the `type` of the `rope_scaling` configuration for backward compatibility.
        Ntype)suyarnlongrope)r   get)r%   rope_scaling_types     r(   r    z#Phi3Config._rope_scaling_adjustment   sU     $ --11&$? (->.-P(2Df% .Q(r)   c                    | j                   yt        | j                   t              rt        | j                         dk7  rt	        d| j                          | j                   j                  dd      }| j                   j                  dd      }| j                   j                  dd      }||dvrt	        d|       t        |t              rt        d	 |D              st	        d
|       t        |      | j                  | j                  z  dz  k(  s4t	        d| j                  | j                  z  dz   dt        |             t        |t              rt        d |D              st	        d|       t        |      | j                  | j                  z  dz  k(  s4t	        d| j                  | j                  z  dz   dt        |             y)z<
        Validate the `rope_scaling` configuration.
        Nr   ze`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, got r+   short_factorlong_factor)r.   z=`rope_scaling`'s type field must be one of ['longrope'], got c              3   H   K   | ]  }t        |t        t        f        y wN
isinstanceintfloat.0xs     r(   	<genexpr>z6Phi3Config._rope_scaling_validation.<locals>.<genexpr>   s     SAJq3,/S    "zC`rope_scaling`'s short_factor field must be a list of numbers, got    z5`rope_scaling`'s short_factor field must have length z, got c              3   H   K   | ]  }t        |t        t        f        y wr5   r6   r:   s     r(   r=   z6Phi3Config._rope_scaling_validation.<locals>.<genexpr>   s     RAJq3,/Rr>   zB`rope_scaling`'s long_factor field must be a list of numbers, got z4`rope_scaling`'s long_factor field must have length )
r   r7   dictlen
ValueErrorr/   listallr   r   )r%   r0   rope_scaling_short_factorrope_scaling_long_factors       r(   r!   z#Phi3Config._rope_scaling_validation   s-    $$++T2c$:K:K6LPQ6Q(()+  !--11&$?$($5$5$9$9.$$O!#'#4#4#8#8#M $(9(M\]n\opqq0$7S9RSSUVoUpq  ,-1A1ATE]E]1]ab1bbGHXHX\`\t\tHtxyHyGz  {A  BE  F_  B`  Aa  b  /6R9QRRTUmTno  +,0@0@DD\D\0\`a0aaFtGWGW[_[s[sGswxGxFyy  AD  E]  A^  @_  `  br)   )i@}  i   i        rH   N        rI   rI   silu   rK   g{Gz?gh㈵>TFg     @N    }  rM   N)
__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencer$   r    r!   __classcell__)r'   s   @r(   r   r      sn    Pd J#4"5   $)-!/9
v3&r)   r   N)	rQ   configuration_utilsr   utilsr   
get_loggerrN   loggerr   r   r)   r(   <module>rY      s3       3  
		H	%D! Dr)   