
    sg.                         d Z ddlmZ ddlmZmZmZmZ ddlm	Z	m
Z
mZ ddlmZ ddlmZmZ ddlmZ  ej&                  e      Z G d	 d
e      Z G d de      Zy)zOpenAI GPT-2 configuration    )OrderedDict)AnyListMappingOptional   )PreTrainedTokenizer
TensorTypeis_torch_available)PretrainedConfig)OnnxConfigWithPastPatchingSpec)loggingc                   j     e Zd ZdZdZdgZdddddZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d
 fd		Z xZS )
GPT2ConfigaK  
    This is the configuration class to store the configuration of a [`GPT2Model`] or a [`TFGPT2Model`]. It is used to
    instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the GPT-2
    [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 50257):
            Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`GPT2Model`] or [`TFGPT2Model`].
        n_positions (`int`, *optional*, defaults to 1024):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        n_embd (`int`, *optional*, defaults to 768):
            Dimensionality of the embeddings and hidden states.
        n_layer (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        n_head (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        n_inner (`int`, *optional*):
            Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
        activation_function (`str`, *optional*, defaults to `"gelu_new"`):
            Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`.
        resid_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        embd_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the embeddings.
        attn_pdrop (`float`, *optional*, defaults to 0.1):
            The dropout ratio for the attention.
        layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
            The epsilon to use in the layer normalization layers.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        summary_type (`string`, *optional*, defaults to `"cls_index"`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
            [`TFGPT2DoubleHeadsModel`].

            Has to be one of the following options:

                - `"last"`: Take the last token hidden state (like XLNet).
                - `"first"`: Take the first token hidden state (like BERT).
                - `"mean"`: Take the mean of all tokens hidden states.
                - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2).
                - `"attn"`: Not implemented now, use multi-head attention.
        summary_use_proj (`bool`, *optional*, defaults to `True`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
            [`TFGPT2DoubleHeadsModel`].

            Whether or not to add a projection after the vector extraction.
        summary_activation (`str`, *optional*):
            Argument used when doing sequence summary. Used in for the multiple choice head in
            [`GPT2DoubleHeadsModel`].

            Pass `"tanh"` for a tanh activation to the output, any other value will result in no activation.
        summary_proj_to_labels (`bool`, *optional*, defaults to `True`):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
            [`TFGPT2DoubleHeadsModel`].

            Whether the projection outputs should have `config.num_labels` or `config.hidden_size` classes.
        summary_first_dropout (`float`, *optional*, defaults to 0.1):
            Argument used when doing sequence summary, used in the models [`GPT2DoubleHeadsModel`] and
            [`TFGPT2DoubleHeadsModel`].

            The dropout ratio to be used after the projection and activation.
        scale_attn_weights (`bool`, *optional*, defaults to `True`):
            Scale attention weights by dividing by sqrt(hidden_size)..
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        bos_token_id (`int`, *optional*, defaults to 50256):
            Id of the beginning of sentence token in the vocabulary.
        eos_token_id (`int`, *optional*, defaults to 50256):
            Id of the end of sentence token in the vocabulary.
        scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
            Whether to additionally scale attention weights by `1 / layer_idx + 1`.
        reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
            Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
            dot-product/softmax to float() when training with mixed precision.

    Example:

    ```python
    >>> from transformers import GPT2Config, GPT2Model

    >>> # Initializing a GPT2 configuration
    >>> configuration = GPT2Config()

    >>> # Initializing a model (with random weights) from the configuration
    >>> model = GPT2Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```gpt2past_key_valuesn_embdn_positionsn_headn_layer)hidden_sizemax_position_embeddingsnum_attention_headsnum_hidden_layersc                 l   || _         || _        || _        || _        || _        || _        || _        || _        |	| _        |
| _	        || _
        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        t/        | `  d||d| y )N)bos_token_ideos_token_id )
vocab_sizer   r   r   r   n_inneractivation_functionresid_pdrop
embd_pdrop
attn_pdroplayer_norm_epsiloninitializer_rangesummary_typesummary_use_projsummary_activationsummary_first_dropoutsummary_proj_to_labelsscale_attn_weights	use_cachescale_attn_by_inverse_layer_idxreorder_and_upcast_attnr   r   super__init__)selfr    r   r   r   r   r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r,   r+   r-   r.   r   r   r/   r0   kwargs	__class__s                            ^/var/www/html/venv/lib/python3.12/site-packages/transformers/models/gpt2/configuration_gpt2.pyr2   zGPT2Config.__init__   s    6 %&#6 &$$"4!2( 0"4%:"&<#"4"/N,'>$((XlXQWX    )iQ  i   i      r8   Ngelu_new皙?r:   r:   gh㈵>g{Gz?	cls_indexTNTr:   TTP  r<   FF)	__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferenceattribute_mapr2   __classcell__r5   s   @r6   r   r      s    _B J#4"5#0'&	M & #!(- %14Y 4Yr7   r   c                        e Zd Z	 	 	 ddededee   def fdZe	de
ee
eef   f   fd       Ze	defd       Ze	defd	       Z	 	 	 	 dd
ededededee   de
eef   f fdZe	defd       Z xZS )GPT2OnnxConfigconfigtaskpatching_specsuse_pastc                 ~    t         |   ||||       t        | j                  dd       sd| j                  _        y y )N)rI   rJ   rK   pad_token_idr   )r1   r2   getattr_configrM   )r3   rH   rI   rJ   rK   r5   s        r6   r2   zGPT2OnnxConfig.__init__   s=     	d>T\]t||^T:()DLL% ;r7   returnc                     t        ddddi      }| j                  r| j                  |d       ddd|d<   |S ddd|d<   |S )	N	input_idsbatchsequence)r      inputs)	directionzpast_sequence + sequenceattention_mask)r   rK   fill_with_past_key_values_)r3   common_inputss     r6   rV   zGPT2OnnxConfig.inputs   sa    #[g*2M$NO==++MX+N29>X.YM*+  3:j.IM*+r7   c                 .    | j                   j                  S N)rO   r   r3   s    r6   
num_layerszGPT2OnnxConfig.num_layers   s    ||###r7   c                 .    | j                   j                  S r\   )rO   r   r]   s    r6   r   z"GPT2OnnxConfig.num_attention_heads   s    ||"""r7   	tokenizer
batch_size
seq_lengthis_pair	frameworkc                 h   t         t        |   |||||      }t        d|d   i      }| j                  rt               st        d      dd l}|d   j                  \  }	}
|
dz   }|	| j                  || j                  j                  | j                  z  f}t        | j                        D cg c]$  }|j                  |      |j                  |      f& c}|d<   |d   |d<   | j                  r<|d   j                  }j!                  |d   |j#                  	|      gd	
      |d<   |S c c}w )N)ra   rb   rc   rd   rR   zACannot generate dummy past_keys inputs without PyTorch installed.r      r   rX   )dtyperU   )dim)r1   r   generate_dummy_inputsr   rK   r   
ValueErrortorchshaper   rO   r   ranger^   zerosrg   catones)r3   r`   ra   rb   rc   rd   rZ   ordered_inputsrk   rS   seqlenpast_key_values_length
past_shape_
mask_dtyper5   s                  r6   ri   z$GPT2OnnxConfig.generate_dummy_inputs   s^    0$M*W`i N 

 %k=3M%NO ==%' !dee -k : @ @v)/!&,,*LL,,0H0HH	
 QVVZVeVePf5KLU[[,ekk*.EF501 ,99I+J'(=='(89??J/4yy 015::eE[cm:3nouv 09 0N+, 5s   .)D/c                      y)N   r   r]   s    r6   default_onnx_opsetz!GPT2OnnxConfig.default_onnx_opset  s    r7   )defaultNF)r{   FN)r=   r>   r?   r   strr   r   boolr2   propertyr   intrV   r^   r   r	   r   r
   r   ri   ry   rD   rE   s   @r6   rG   rG      s    -1
* 
* 
* \*	
*
 
* WS#X%6 67   $C $ $ #S # # *.*&* * 	*
 * J'* 
c	*X C  r7   rG   N)r@   collectionsr   typingr   r   r   r    r	   r
   r   configuration_utilsr   onnxr   r   utilsr   
get_loggerr=   loggerr   rG   r   r7   r6   <module>r      sW     ! # / / C C 3 4  
		H	%_Y! _YDN' Nr7   