
    sg,1                         d Z ddlmZ ddlmZ ddlmZ ddlmZ ddl	m
Z
  e
j                  e      Z G d d	e      Z G d
 de      Zy)zMEGA configuration    )OrderedDict)Mapping   )PretrainedConfig)
OnnxConfig)loggingc                   r     e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )
MegaConfigaS  
    This is the configuration class to store the configuration of a [`MegaModel`]. It is used to instantiate a Mega
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the Mega
    [mnaylor/mega-base-wikitext](https://huggingface.co/mnaylor/mega-base-wikitext) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 30522):
            Vocabulary size of the Mega model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`MegaModel`].
        hidden_size (`int`, *optional*, defaults to 128):
            Dimensionality of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 4):
            Number of hidden layers in the Mega encoder.
        intermediate_size (`int`, *optional*, defaults to 256):
            Dimensionality of the hidden size (self-attention value projection) within the Mega encoder
        ema_projection_size (`int`, *optional*, defaults to 16):
            Dimensionality of the MegaMultiDimensionDampedEma
        bidirectional (`bool`, *optional*, defaults to `True`):
            Whether the MegaMultiDimensionDampedEma used in Mega's self-attention should work bidirectionally (`True`)
            or unidirectionally (`False`). Bidirectional EMA is incompatible with causal decoding, so this should be
            False if you intend to use the model as a decoder.
        shared_representation_size (`int`, *optional*, defaults to 64):
            Dimensionality of the linear projection for shared representation of self-attention queries and keys
        use_chunking (`bool`, *optional*, defaults to `False`):
            Whether to chunk inputs for linear self-attention complexity (described as Mega-chunk in the paper)
        chunk_size (`int`, *optional*, defaults to -1):
            If `use_chunking` is set to `True`, determines the size of the chunks to apply to the input sequence. If
            chunking is used, input sequences must be padded to a multiple of `chunk_size`
        truncation (`int`, *optional*):
            If specified, the sequence length for which to truncate MegaMultiDimensionDampedEma
        normalize_before_mega (`bool`, *optional*, defaults to `True`):
            Whether to normalize before (`True`) or after (`False`) passing through Mega encoder blocks
        normalization_type (`str`, *optional*, defaults to `"scalenorm"`):
            Type of normalization to use in Mega encoder blocks. Choose one of `"scalenorm"`, `"layernorm"`,
            `"rmsnorm"`, `"batchnorm"`, or `"syncbatchnorm"` (GPU required for syncbatchnorm)
        norm_affine (`bool`, *optional*, defaults to `True`):
            If `True`, applies a parameterized affine transformation to inputs during normalization
        activation (`str`, *optional*, defaults to `"silu"`):
            Activation function to apply within Mega encoder blocks. Choose one of `"silu"`, `"relu"`, `"linear"`,
            `"gelu"`, or `"gelu_accurate"`
        attention_activation (`str`, *optional*, defaults to `"softmax"`):
            Activation function to apply for single-headed self-attention (a la Transformer). Choose one of
            `"softmax"`, `"laplace"`, or `"relu2"`
        dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout probability for EMA self-attention
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        use_feature_dropout (`bool`, *optional*, defaults to `False`):
            Whether to use feature-based (`True`) or standard dropout (`False`)
        use_normalized_ffn (`bool`, *optional*, defaults to `True`):
            Whether to use the normalized feed-forward sub-layer in Mega blocks (`True`) or pass Mega encoder output
            as-is (`False`)
        nffn_hidden_size (`int`, *optional*, defaults to 256):
            If using the normalized feed-forward network (NFFN) layer within Mega (`use_normalized_ffn = True`), this
            is the hidden size of the NFFN
        normalize_before_ffn (`bool`, *optional*, defaults to `True`):
            Whether to normalize before (`True`) or after (`False`) the feed-forward portion of NFFN
        nffn_activation_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the NFFN component.
        max_positions (`int`, *optional*, defaults to 2048):
            The maximum sequence length to use for positional representations. For `"simple"` relative positional bias,
            this is a hard limit on input length; `"rotary"` relative positional bias will extrapolate to longer
            sequences
        add_token_type_embeddings (`bool`, *optional*, defaults to `True`):
            Whether to account for token types in embeddings. Left as optional to maintain compatibility with original
            implementation while adding support for token types.
        type_vocab_size (`int`, *optional*, defaults to 2):
            The vocabulary size of the `token_type_ids` passed when calling [`MegaModel`]. Only used if
            `add_token_type_embeddings = True`
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        ema_delta_alpha_range (`float`, *optional*, defaults to 0.2):
            The standard deviation for initializing the delta (damping factor) and alpha (decay factor) parameters in
            MegaMultiDimensionDampedEma.
        ema_beta_range (`float`, *optional*, defaults to 0.02):
            The standard deviation for initializing the beta parameter (expansion matrix) in
            MegaMultiDimensionDampedEma.
        ema_gamma_omega_range (`float`, *optional*, defaults to 1.0):
            The standard deviation for initializing the gamma (projection matrix) and omega (residual weight)
            parameters in MultiDimensionEMA.
        relative_positional_bias (`str`, *optional*, defaults to `"rotary"`):
            Type of relative positional encoding. Choose one of `"rotary"` or `"simple"`. If `"simple"` is selected,
            `max_positions` is used as a limit on input size, while `"rotary"` extrapolates beyond `max_positions`.
        is_decoder (`bool`, *optional*, defaults to `False`):
            Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        classifier_dropout (`float`, *optional*):
            The dropout ratio for the classification head.
        add_lm_hidden_dense_layer (`bool`, *optional*, defaults to `True`):
            Whether to include a hidden layer for projection between encoder outputs and LM heads (`True`) or pass
            hidden states directly to LM head (`False`). Remains optional for compatibility with original
            implementation

    Examples:

    ```python
    >>> from transformers import MegaConfig, MegaModel

    >>> # Initializing a Mega configuration
    >>> configuration = MegaConfig()

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = MegaModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```megac&                    t        '|   d|| |!d|& || _        || _        || _        || _        || _        || _        || _        || _	        || _
        || _        |	| _        |
| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        |"| _         |$| _!        |#| _"        |%| _#        d| _$        y )N)pad_token_idbos_token_ideos_token_id    )%super__init__
vocab_sizehidden_sizenum_hidden_layers
activationattention_activationintermediate_sizeema_projection_sizebidirectionalshared_representation_sizeuse_chunking
chunk_size
truncationnormalize_before_meganormalization_typenorm_affinedropout_probhidden_dropout_probattention_probs_dropout_probuse_feature_dropoutuse_normalized_ffnnffn_hidden_sizenormalize_before_ffnnffn_activation_dropout_probmax_positionsadd_token_type_embeddingstype_vocab_sizeinitializer_rangeema_delta_alpha_rangeema_beta_rangeema_gamma_omega_rangerelative_positional_bias	use_cacheclassifier_dropoutadd_lm_hidden_dense_layernum_attention_heads)(selfr   r   r   r   r   r   r   r   r   r   r    r!   r"   r   r   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r   r   r   r2   r4   r3   r5   kwargs	__class__s(                                          i/var/www/html/venv/lib/python3.12/site-packages/transformers/models/deprecated/mega/configuration_mega.pyr   zMegaConfig.__init__   s4   R 	sl\hslrs$&!2$$8!!2#6 **D'($$%:""4&(#6 ,H)#6 "4 0$8!,H)*)B&.!2%:",%:"(@%""4)B&#$     )%i:w     r         T@   FNT	scalenormTsilusoftmax皙?rD   rD   FTr=   TrD   i   F   {Gz?g?rF   g      ?r   r   rE   rotaryNTT)__name__
__module____qualname____doc__
model_typer   __classcell__)r9   s   @r:   r
   r
      s    sj J #%"&&%(!!%("'!!!)"&MM% M%r;   r
   c                   6    e Zd Zedeeeeef   f   fd       Zy)MegaOnnxConfigreturnc                 Z    | j                   dk(  rdddd}nddd}t        d|fd|fg      S )	Nzmultiple-choicebatchchoicesequence)r   r   rE   )r   r   	input_idsattention_mask)taskr   )r7   dynamic_axiss     r:   inputszMegaOnnxConfig.inputs   sG    99))&8
CL&:6Ll+!<0
 	
r;   N)rH   rI   rJ   propertyr   strintrY   r   r;   r:   rO   rO      s.    

WS#X%6 67 

 

r;   rO   N)rK   collectionsr   typingr   configuration_utilsr   onnxr   utilsr   
get_loggerrH   loggerr
   rO   r   r;   r:   <module>rd      sI     #  4   
		H	%E%! E%P
Z 
r;   