
    sgF                         d Z ddlmZmZmZ ddlmZ ddlmZ ddl	m
Z
  e
j                  e      Z G d de      Z G d	 d
e      Z G d de      Zy)zMllama model configuration    )DictListOptional   )PretrainedConfig)rope_config_validation)loggingc                         e Zd ZdZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddededededed	ed
edededededede	e
e      de	e
e
e         def fdZedefd       Z xZS )MllamaVisionConfiga+  
    This is the configuration class to store the configuration of a [`MllamaVisionModel`]. It is used to instantiate an
    Mllama vision model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Mllama-11B.

    e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        hidden_size (`int`, *optional*, defaults to 1280):
            Dimensionality of the encoder layers and the pooler layer.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer encoder.
        num_global_layers (`int`, *optional*, defaults to 8):
            Number of global layers in the Transformer encoder.
            Vision model has a second transformer encoder, called global.
        num_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_channels (`int`, *optional*, defaults to 3):
            Number of channels in the input image.
        intermediate_size (`int`, *optional*, defaults to 5120):
            Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
        vision_output_dim (`int`, *optional*, defaults to 7680):
            Dimensionality of the vision model output. Includes output of transformer
            encoder with intermediate layers and global transformer encoder.
        image_size (`int`, *optional*, defaults to 448):
            The size (resolution) of each image *tile*.
        patch_size (`int`, *optional*, defaults to 14):
            The size (resolution) of each patch.
        norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the layer normalization layers.
        max_num_tiles (`int`, *optional*, defaults to 4):
            Maximum number of tiles for image splitting.
        intermediate_layers_indices (`List[int]`, *optional*, defaults to [3, 7, 15, 23, 30]):
            Indices of intermediate layers of transformer encoder from which to extract and output features.
            These output features are concatenated with final hidden state of transformer encoder.
        supported_aspect_ratios (`List[List[int]]`, *optional*):
            List of supported aspect ratios for image splitting. If not specified, the default supported aspect ratios
            are [[1, 1], [1, 2], [1, 3], [1, 4], [2, 1], [2, 2], [3, 1], [4, 1]] for `max_num_tiles=4`.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.

    Example:

    ```python
    >>> from transformers import MllamaVisionConfig, MllamaVisionModel

    >>> # Initializing a Llama config
    >>> config = MllamaVisionConfig()

    >>> # Initializing a vision model from the mllama-11b style configuration
    >>> model = MllamaVisionModel(config)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```mllama_vision_modelvision_confighidden_size
hidden_actnum_hidden_layersnum_global_layersnum_attention_headsnum_channelsintermediate_sizevision_output_dim
image_size
patch_sizenorm_epsmax_num_tilesintermediate_layers_indicessupported_aspect_ratiosinitializer_rangec           	      Z   |*|dk7  rt        d      ddgddgddgddgddgddgddgddgg}|g d}|| _        || _        || _        || _        || _        |	| _        || _        |
| _        || _	        || _
        || _        || _        || _        || _        || _        t!        | D  di | y )N   z;max_num_tiles must be 4 for default supported aspect ratios      r   )r                )
ValueErrorr   r   r   r   r   r   r   r   r   r   r   r   attention_headsr   r   super__init__)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   kwargs	__class__s                    b/var/www/html/venv/lib/python3.12/site-packages/transformers/models/mllama/configuration_mllama.pyr)   zMllamaVisionConfig.__init__\   s    & #*! !^__()1v1v1v1v1vPQSTvXY[\W]`acd_e&f#&.*<'&$!2(!2$!2$+F(!2* 2'>$!2"6"    returnc                 ,    t        | j                        S )N)lenr   )r*   s    r-   max_aspect_ratio_idz&MllamaVisionConfig.max_aspect_ratio_id   s    4//00r.   )i   gelu          r   i   i   i     h㈵>r   NN{Gz?)__name__
__module____qualname____doc__
model_typebase_config_keyintstrfloatr   r   r)   propertyr2   __classcell__r,   s   @r-   r   r      s   <| 'J%O   !#!"#%!%!%;?=A#'!*#*# *# 	*#
 *# !*# *# *# *# *# *# *# *# &.d3i%8*# "*$tCy/!:*#  !!*#X 1S 1 1r.   r   c            (            e Zd ZdZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddededededed	ed
edede	e
   dededededede	ee      dededede	e   f& fdZ xZS )MllamaTextConfiga(  
    This is the configuration class to store the configuration of a [`MllamaTextModel`]. It is used to instantiate an
    Mllama text model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Mllama-11B.

    e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vocab_size (`int`, *optional*, defaults to 128256):
            Vocabulary size of the Mllama text model. Defines the maximum number of different tokens that can be represented
            by the `inputs_ids` passed when calling [`MllamaTextModel`].
        hidden_size (`int`, *optional*, defaults to 4096):
            Dimensionality of the embeddings and hidden states.
        hidden_act (`str` or `Callable`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the encoder and pooler.
        num_hidden_layers (`int`, *optional*, defaults to 40):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_key_value_heads (`int`, *optional*, defaults to 8):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If not
            specified, will default to `num_attention_heads`.
        intermediate_size (`int`, *optional*, defaults to 14336):
            Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
        rope_theta (`float`, *optional*, defaults to `500000.0`):
            The base period of the RoPE embeddings.
        rope_scaling (`Dict`, *optional*):
            Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
            and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
            accordingly.
            Expected contents:
                `rope_type` (`str`):
                    The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
                    'llama3'], with 'default' being the original RoPE implementation.
                `factor` (`float`, *optional*):
                    Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
                    most scaling types, a `factor` of x will enable the model to handle sequences of length x *
                    original maximum pre-trained length.
                `original_max_position_embeddings` (`int`, *optional*):
                    Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
                    pretraining.
                `attention_factor` (`float`, *optional*):
                    Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
                    computation. If unspecified, it defaults to value recommended by the implementation, using the
                    `factor` field to infer the suggested value.
                `beta_fast` (`float`, *optional*):
                    Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
                    ramp function. If unspecified, it defaults to 32.
                `beta_slow` (`float`, *optional*):
                    Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
                    ramp function. If unspecified, it defaults to 1.
                `short_factor` (`List[float]`, *optional*):
                    Only used with 'longrope'. The scaling factor to be applied to short contexts (<
                    `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                    size divided by the number of attention heads divided by 2
                `long_factor` (`List[float]`, *optional*):
                    Only used with 'longrope'. The scaling factor to be applied to long contexts (<
                    `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
                    size divided by the number of attention heads divided by 2
                `low_freq_factor` (`float`, *optional*):
                    Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
                `high_freq_factor` (`float`, *optional*):
                    Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
        rms_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the rms normalization layers.
        max_position_embeddings (`int`, *optional*, defaults to 131072):
            The maximum sequence length that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie weight embeddings
        cross_attention_layers (`List[int]`, *optional*):
            Indices of the cross attention layers. If not specified, will default to [3, 8, 13, 18, 23, 28, 33, 38].
        dropout (`float`, *optional*, defaults to 0):
            The dropout probability for self- and cross-attention layers.
        bos_token_id (`int`, *optional*, defaults to 128000):
            The id of the beginning of sentence token.
        eos_token_id (`int`, *optional*, defaults to 128001):
            The id of the end of sentence token.
        pad_token_id (`int`, *optional*, defaults to 128004):
            The id of the padding token.

    Example:

    ```python
    >>> from transformers import MllamaTextModel, MllamaTextConfig

    >>> # Initializing a Mllama text config
    >>> config = MllamaTextConfig()

    >>> # Initializing a model from the Mllama text configuration
    >>> model = MllamaTextModel(config)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```mllama_text_modeltext_config
vocab_sizer   r   r   r   num_key_value_headsr   
rope_thetarope_scalingrms_norm_epsmax_position_embeddingsr   	use_cachetie_word_embeddingscross_attention_layersdropoutbos_token_ideos_token_idpad_token_idc                 "   |g d}|| _         || _        || _        || _        || _        || _        || _        || _        || _        |
| _	        || _
        || _        || _        |	| _        || _        t        |        t!        | D  d||||d| y )N)r   r5         r#      !   &   )rV   rT   rU   rQ   r%   )rJ   r   rR   r   r   rK   r   rP   rL   rN   r   rS   r   rM   rO   r   r(   r)   )r*   rJ   r   r   r   r   rK   r   rL   rM   rN   rO   r   rP   rQ   rR   rS   rT   rU   rV   r+   r,   s                        r-   r)   zMllamaTextConfig.__init__   s    . ")%C"$!2&<#&#6 #6 !2"$(!2$('>$t$ 	
%%% 3		

 	
r.   )  i   silu(   r4   r5   i 8  i  Nr8   i   r9   TFNr   i  i i )r:   r;   r<   r=   r>   r?   r@   rA   rB   r   r   boolr   r)   rD   rE   s   @r-   rG   rG      s(   dL %J#O ! !##%#$!'#'+"'.#'$)6:""&,)1
1
 1
 	1

 1
 !1
 !1
 1
 1
 tn1
 1
 "%1
 !1
 1
 "1
  !)c 3!1
" #1
$ %1
& '1
( sm)1
 1
r.   rG   c                   8     e Zd ZdZdZeedZ	 	 	 d fd	Z xZ	S )MllamaConfiga  
    This is the configuration class to store the configuration of a [`MllamaForConditionalGeneration`]. It is used to instantiate an
    Mllama model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the Mllama-9B.

    e.g. [meta-llama/Llama-3.2-11B-Vision](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision)

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaVisionConfig`):
            The config object or dictionary of the vision backbone.
        text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `MllamaTextConfig`):
            The config object or dictionary of the text backbone.
        image_token_index (`int`, *optional*, defaults to 128256):
            The image token index to encode the image prompt.

    Example:

    ```python
    >>> from transformers import MllamaForConditionalGeneration, MllamaConfig, MllamaVisionConfig, MllamaTextConfig

    >>> # Initializing a CLIP-vision config
    >>> vision_config = MllamaVisionConfig()

    >>> # Initializing a Llama config
    >>> text_config = MllamaTextConfig()

    >>> # Initializing a mllama-11b style configuration
    >>> configuration = MllamaConfig(vision_config, text_config)

    >>> # Initializing a model from the mllama-11b style configuration
    >>> model = MllamaForConditionalGeneration(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```mllama)rI   r   c                    |%t               | _        t        j                  d       n8t	        |t
              rt        di || _        nt	        |t               r|| _        || _        |%t               | _        t        j                  d       n8t	        |t
              rt        di || _        nt	        |t              r|| _        t        | (  di | y )Nz9vision_config is None, using default mllama vision configz5text_config is None, using default mllama text configr%   )r   r   loggerinfo
isinstancedictimage_token_indexrG   rI   r(   r)   )r*   r   rI   ri   r+   r,   s        r-   r)   zMllamaConfig.__init__V  s      !3!5DKKSTt,!3!Dm!DD'9:!.D!2/1DKKOPT*/>+>D%56*D"6"r.   )NNr]   )
r:   r;   r<   r=   r>   rG   r   sub_configsr)   rD   rE   s   @r-   rb   rb   +  s0    %N J"2EWXK  	# #r.   rb   N)r=   typingr   r   r   configuration_utilsr   modeling_rope_utilsr   utilsr	   
get_loggerr:   re   r   rG   rb   r%   r.   r-   <module>rp      s\    ! ' ' 3 9  
		H	%p1) p1f[
' [
|D## D#r.   