
    sg"                         d Z ddlmZ ddlmZmZmZmZmZ ddl	m
Z
 ddlmZ ddlmZ er
ddlmZmZmZ  ej&                  e      Z G d	 d
e
      Z G d de      Zy)zDeBERTa model configuration    )OrderedDict)TYPE_CHECKINGAnyMappingOptionalUnion   )PretrainedConfig)
OnnxConfig)logging)FeatureExtractionMixinPreTrainedTokenizerBase
TensorTypec                   P     e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )DebertaConfigaL  
    This is the configuration class to store the configuration of a [`DebertaModel`] or a [`TFDebertaModel`]. It is
    used to instantiate a DeBERTa model according to the specified arguments, defining the model architecture.
    Instantiating a configuration with the defaults will yield a similar configuration to that of the DeBERTa
    [microsoft/deberta-base](https://huggingface.co/microsoft/deberta-base) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Arguments:
        vocab_size (`int`, *optional*, defaults to 50265):
            Vocabulary size of the DeBERTa model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
        hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"silu"`, `"gelu"`, `"tanh"`, `"gelu_fast"`, `"mish"`, `"linear"`, `"sigmoid"` and `"gelu_new"`
            are supported.
        hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention probabilities.
        max_position_embeddings (`int`, *optional*, defaults to 512):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        type_vocab_size (`int`, *optional*, defaults to 0):
            The vocabulary size of the `token_type_ids` passed when calling [`DebertaModel`] or [`TFDebertaModel`].
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        relative_attention (`bool`, *optional*, defaults to `False`):
            Whether use relative position encoding.
        max_relative_positions (`int`, *optional*, defaults to 1):
            The range of relative positions `[-max_position_embeddings, max_position_embeddings]`. Use the same value
            as `max_position_embeddings`.
        pad_token_id (`int`, *optional*, defaults to 0):
            The value used to pad input_ids.
        position_biased_input (`bool`, *optional*, defaults to `True`):
            Whether add absolute position embedding to content embedding.
        pos_att_type (`List[str]`, *optional*):
            The type of relative position attention, it can be a combination of `["p2c", "c2p"]`, e.g. `["p2c"]`,
            `["p2c", "c2p"]`.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        legacy (`bool`, *optional*, defaults to `True`):
            Whether or not the model should use the legacy `LegacyDebertaOnlyMLMHead`, which does not work properly
            for mask infilling tasks.

    Example:

    ```python
    >>> from transformers import DebertaConfig, DebertaModel

    >>> # Initializing a DeBERTa microsoft/deberta-base style configuration
    >>> configuration = DebertaConfig()

    >>> # Initializing a model (with random weights) from the microsoft/deberta-base style configuration
    >>> model = DebertaModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```debertac                    t        |   di | || _        || _        || _        || _        || _        || _        || _        |	| _	        |
| _
        || _        || _        || _        || _        || _        t!        |t"              r:|j%                         j'                  d      D cg c]  }|j)                          }}|| _        || _        || _        |j1                  d|      | _        || _        || _        || _        y c c}w )N|pooler_hidden_size )super__init__hidden_sizenum_hidden_layersnum_attention_headsintermediate_size
hidden_acthidden_dropout_probattention_probs_dropout_probmax_position_embeddingstype_vocab_sizeinitializer_rangerelative_attentionmax_relative_positionspad_token_idposition_biased_input
isinstancestrlowersplitstrippos_att_type
vocab_sizelayer_norm_epsgetr   pooler_dropoutpooler_hidden_actlegacy)selfr-   r   r   r   r   r   r   r   r    r!   r"   r.   r#   r$   r%   r&   r,   r0   r1   r2   kwargsx	__class__s                          d/var/www/html/venv/lib/python3.12/site-packages/transformers/models/deberta/configuration_deberta.pyr   zDebertaConfig.__init__j   s   0 	"6"&!2#6 !2$#6 ,H)'>$.!2"4&<#(%:" lC(/;/A/A/C/I/I#/NO!AGGIOLO($,"(**-A;"O,!2 Ps   $C?)iY  i      r8   i   gelu皙?r:   i   r   g{Gz?gHz>Fr   TNr   r9   T)__name__
__module____qualname____doc__
model_typer   __classcell__r6   s   @r7   r   r       sY    EN J %( # !" +4 4    r   c                        e Zd Zedeeeeef   f   fd       Zedefd       Z	 	 	 	 	 	 	 	 	 dde	d   dededed	e
d
ed   dedededddeeef   f fdZ xZS )DebertaOnnxConfigreturnc                     | j                   dk(  rdddd}nddd}| j                  j                  dkD  rt        d|fd	|fd
|fg      S t        d|fd	|fg      S )Nzmultiple-choicebatchchoicesequence)r         )r   rK   r   	input_idsattention_masktoken_type_ids)task_configr!   r   )r3   dynamic_axiss     r7   inputszDebertaOnnxConfig.inputs   s~    99))&8
CL&:6L<<''!+|,/?.NQacoPpq  l ;>NP\=]^__rC   c                      y)Nr8   r   )r3   s    r7   default_onnx_opsetz$DebertaOnnxConfig.default_onnx_opset   s    rC   preprocessor)r   r   
batch_size
seq_lengthnum_choicesis_pair	frameworkr   num_channelsimage_widthimage_height	tokenizerr   c                 j    t         |   ||      }| j                  j                  dk(  rd|v r|d= |S )N)rV   r[   r   rO   )r   generate_dummy_inputsrQ   r!   )r3   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   dummy_inputsr6   s               r7   ra   z'DebertaOnnxConfig.generate_dummy_inputs   sC     w4,Zc4d<<''1,1A\1Q-.rC   )	r;   r;   r;   FNr	   (   rc   N)r<   r=   r>   propertyr   r(   intrS   rU   r   boolr   r   ra   rA   rB   s   @r7   rE   rE      s    
`WS#X%6 67 
` 
` C   ,0/3OP  	
   L)    - 
c	 rC   rE   N)r?   collectionsr   typingr   r   r   r   r   configuration_utilsr
   onnxr   utilsr    r   r   r   
get_loggerr<   loggerr   rE   r   rC   r7   <module>ro      sW    " # ? ? 3   OO 
		H	%~$ ~D"
 "rC   