
    sg]I                         d Z ddlmZ ddlmZmZmZ ddlmZ ddl	m
Z
 ddlmZmZ ddlmZmZmZ dd	lmZ dd
lmZ  ej,                  e      Z G d de
      Z G d de      Zy)zBlenderbot model configuration    )OrderedDict)AnyMappingOptional   )PreTrainedTokenizer)PretrainedConfig)
TensorTypeis_torch_available)
OnnxConfigOnnxConfigWithPastOnnxSeq2SeqConfigWithPast) compute_effective_axis_dimension)loggingc                   j     e Zd ZdZdZdgZdddZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )	BlenderbotConfiga  
    This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an
    Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a
    configuration with the defaults will yield a similar configuration to that of the Blenderbot
    [facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 50265):
            Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by
            the `inputs_ids` passed when calling [`BlenderbotModel`] or [`TFBlenderbotModel`].
        d_model (`int`, *optional*, defaults to 1024):
            Dimensionality of the layers and the pooler layer.
        encoder_layers (`int`, *optional*, defaults to 12):
            Number of encoder layers.
        decoder_layers (`int`, *optional*, defaults to 12):
            Number of decoder layers.
        encoder_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer encoder.
        decoder_attention_heads (`int`, *optional*, defaults to 16):
            Number of attention heads for each attention layer in the Transformer decoder.
        decoder_ffn_dim (`int`, *optional*, defaults to 4096):
            Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
        encoder_ffn_dim (`int`, *optional*, defaults to 4096):
            Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
        activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"silu"` and `"gelu_new"` are supported.
        dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        activation_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for activations inside the fully connected layer.
        max_position_embeddings (`int`, *optional*, defaults to 128):
            The maximum sequence length that this model might ever be used with. Typically set this to something large
            just in case (e.g., 512 or 1024 or 2048).
        init_std (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        encoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
            for more details.
        decoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
            for more details.
        scale_embedding (`bool`, *optional*, defaults to `False`):
            Scale embeddings by diving by sqrt(d_model).
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models)
        forced_eos_token_id (`int`, *optional*, defaults to 2):
            The id of the token to force as the last generated token when `max_length` is reached. Usually set to
            `eos_token_id`.

    Example:

    ```python
    >>> from transformers import BlenderbotConfig, BlenderbotModel

    >>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration
    >>> configuration = BlenderbotConfig()

    >>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration
    >>> model = BlenderbotModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
blenderbotpast_key_valuesencoder_attention_headsd_model)num_attention_headshidden_sizec                 >   || _         || _        || _        || _        || _        || _        || _        || _        || _        || _	        || _
        || _        || _        || _        |	| _        |
| _        || _        || _        || _        t'        | P  d|||||||d| y )N)pad_token_idbos_token_ideos_token_idis_encoder_decoderdecoder_start_token_idencoder_no_repeat_ngram_sizeforced_eos_token_id )
vocab_sizemax_position_embeddingsr   encoder_ffn_dimencoder_layersr   decoder_ffn_dimdecoder_layersdecoder_attention_headsdropoutattention_dropoutactivation_dropoutactivation_functioninit_stdencoder_layerdropdecoder_layerdrop	use_cachenum_hidden_layersscale_embeddingsuper__init__)selfr"   r#   r%   r$   r   r'   r&   r(   r.   r/   r0   r   r,   r   r)   r*   r+   r-   r   r2   r   r   r   r   r    kwargs	__class__s                              j/var/www/html/venv/lib/python3.12/site-packages/transformers/models/blenderbot/configuration_blenderbot.pyr4   zBlenderbotConfig.__init__k   s    : %'>$.,'>$.,'>$!2"4#6  !2!2"!/. 		
%%%1#9)E 3		
 		
    )iH         (         r<   r=           r?   TTgelui 
  g?r?   r?   g{Gz?   Fr   rA   r;   r   r;   )	__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferenceattribute_mapr4   __classcell__r7   s   @r8   r   r      s|    EN J#4"5,EV_`M  # " "" %&5:
 :
r9   r   c                       e Zd Zedeeeeef   f   fd       Zedeeeeef   f   f fd       Z	 	 	 	 dde	dedede
dee   deeef   fd	Z	 	 	 	 dde	dedede
dee   deeef   fd
Z	 	 	 	 dde	dedede
dee   deeef   fdZ	 	 	 	 dde	dedede
dee   deeef   fdZ fdZdeeeeef   f   defdZ xZS )BlenderbotOnnxConfigreturnc           	         | j                   dv rdt        ddddfddddfg      }| j                  rddi|d<   dd	d|d
<   nddd|d<   ddd|d
<   | j                  r| j                  |d       |S | j                   dk(  r\t        ddddfddddfg      }| j                  r7| j                  \  }}t        |      D ]  }ddd|d| d<   ddd|d| d<    |S t        ddddfddddfddddfd
dddfg      }|S )Ndefaultz
seq2seq-lm	input_idsbatchencoder_sequence)r   rA   attention_maskr   decoder_input_ids past_decoder_sequence + sequencedecoder_attention_maskdecoder_sequenceinputs)	direction	causal-lmpast_sequence + sequencer   r;   zpast_key_values..key.value)taskr   use_pastfill_with_past_key_values_
num_layersrange)r5   common_inputs_num_decoder_layersis        r8   rY   zBlenderbotOnnxConfig.inputs   s   9911' g2D"EF%77I'JKM }}67\12>EJl:m679@EW5X12>EJ\:]67}}///R. - YY+%' g2D"EF%77I'JKM }}(,%%12 nADKPj@kM$4QCt"<=FMRlBmM$4QCv">?n  ( g2D"EF%77I'JK(g:L*MN-7?Q/RS	M r9   c                     | j                   dv rt        |   }|S t        t        | 
  }| j                  r7| j
                  \  }}t        |      D ]  }ddd|d| d<   ddd|d| d<    |S )NrO   rR   r\   r]   zpresent.r^   r_   )r`   r3   outputsr   ra   rc   rd   )r5   common_outputsnum_encoder_layersrf   rh   r7   s        r8   rj   zBlenderbotOnnxConfig.outputs   s     9911"W_N  ##5tDN}}(,%"A12 gA=DIc9dNXaS#56?FKe;fNXaS#78g r9   	tokenizer
batch_size
seq_lengthis_pair	frameworkc           	      \   | j                  |||||      }| j                  s|nd}| j                  |||||      }|j                         D 	
ci c]  \  }	}
d|	 |
 }}	}
t        d
i ||}| j                  r+t	               st        d      dd l}|d   j                  \  }}|d   j                  d   }| j                  \  }}|||| j                  j                  |z  f}|}|||| j                  j                  |z  f}|j                  |d   |j                  ||      gd      |d<   g |d	<   | j                  \  }}t        |      D ]V  }|d	   j                  |j!                  |      |j!                  |      |j!                  |      |j!                  |      f       X |S c c}
}	w )NrA   decoder_ACannot generate dummy past_keys inputs without PyTorch installed.r   rQ   rU   rW   dimr   r!   )I_generate_dummy_inputs_for_sequence_classification_and_question_answeringra   itemsdictr   
ValueErrortorchshaper   _configr   catonesrc   rd   appendzeros)r5   rm   rn   ro   rp   rq   encoder_inputsdecoder_seq_lengthdecoder_inputsnametensorre   r{   rR   encoder_seq_lengthnum_encoder_attention_headsnum_decoder_attention_headsencoder_shapedecoder_past_lengthdecoder_shaperf   rg   s                         r8   1_generate_dummy_inputs_for_default_and_seq2seq_lmzFBlenderbotOnnxConfig._generate_dummy_inputs_for_default_and_seq2seq_lm   s    ggz:w	
 04}}Z!ggz#5w	
 IWH\H\H^_fHTF+V3__@~@@==%' !dee(5k(B(H(H%E%!./B!C!I!I!!LGKG_G_D')D+"((,GG	M #5+#((,GG	M 7<ii78%**UL_:`agh 7@ 7M23 02M+,$(OO!A!-. /077M2M2M2M2	 O `s   F(c                    | j                  |||||      }| j                  rt               st        d      dd l}|d   j
                  \  }}	|	}
| j                  \  }}| j                  \  }}|||
| j                  j                  |z  f}|d   j                  }|j                  |d   |j                  ||
|      gd      |d<   t        |      D cg c]$  }|j                  |      |j                  |      f& c}|d<   |S c c}w )	Nrt   r   rQ   rT   )dtyperA   ru   r   )rw   ra   r   rz   r{   r|   rc   r   r}   r   r   r~   r   rd   r   )r5   rm   rn   ro   rp   rq   re   r{   rR   seqlenpast_key_values_lengthrf   rg   r   
past_shape
mask_dtypes                   r8   $_generate_dummy_inputs_for_causal_lmz9BlenderbotOnnxConfig._generate_dummy_inputs_for_causal_lm  s1    ffz:w	
 ==%' !dee)+6<<ME6%+"$(OO!A!-1-E-E*'+&((,GG	J ''78>>J.3ii/0%**UDZbl*2mntu /8 /M*+ MRRdLe0GHZ(%++j*AB0M+, 0s   )Dc                    t        |t        j                  d      }|j                  |      }t        |t        j                  |      }dj                  |j                  g      |z  g|z  }t         |||            }|S )Nr   )fixed_dimensionnum_token_to_add )return_tensors)r   r   default_fixed_batchnum_special_tokens_to_adddefault_fixed_sequencejoin	unk_tokenry   )	r5   rm   rn   ro   rp   rq   token_to_adddummy_inputre   s	            r8   rw   z^BlenderbotOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering>  s     6
(F(FYZ


 !::7C5
(I(I\h


 xx!4!4 56CDzQY{9MNr9   c                     | j                   dv r| j                  |||||      }|S | j                   dk(  r| j                  |||||      }|S | j                  |||||      }|S )NrO   )rn   ro   rp   rq   r[   )r`   r   r   rw   )r5   rm   rn   ro   rp   rq   re   s          r8   generate_dummy_inputsz*BlenderbotOnnxConfig.generate_dummy_inputsY  s     9911 RRjZQXdm S M  YY+% EEjZQXdm F M 	 !jjjZQXdm k M r9   c                 t    | j                   dv rt        | 	  ||||      }y t        t        |   ||||      }y )NrO   )r`   r3   _flatten_past_key_values_r   )r5   flattened_outputr   idxtr7   s        r8   r   z.BlenderbotOnnxConfig._flatten_past_key_values_r  sF    9911$w@AQSWY\^_`$%>_ $Q r9   inputs_or_outputsrZ   c                     |dvrt        d| d      |dk(  rdnd}| j                  \  }}d}|dk(  rdnd	}t        |      D ]:  }d
|d|| d| d<   d
|d|| d| d<   d
|d|| d| d<   d
|d|| d| d<   < y )N)rY   rj   z4direction must either be "inputs" or "outputs", but z
 was givenrY   r   presentpast_encoder_sequencepast_decoder_sequencerV   rR   r]   .z.decoder.keyz.decoder.valuez.encoder.keyz.encoder.value)rz   rc   rd   )	r5   r   rZ   r   rf   rg   rS   rX   rh   s	            r8   rb   z/BlenderbotOnnxConfig.fill_with_past_key_values_z  s    11ST]S^^hijj$-$9 y $26?86K2Qs)* 	_A?FK[;\as,78AHM]=^as.9:?FK[;\as,78AHM]=^as.9:		_r9   )r   FN)rB   rC   rD   propertyr   strintrY   rj   r   boolr   r
   r   r   r   rw   r   r   rb   rI   rJ   s   @r8   rL   rL      s   &WS#X%6 67 & &P 
gc3h&7!78 
 
 *.7&7 7 	7
 7 J'7 
c	7x *."&" " 	"
 " J'" 
c	"P *.&  	
  J' 
c	< *.&  	
  J' 
c	2_GCQTVYQYIZDZ<[ _hk _r9   rL   N)rE   collectionsr   typingr   r   r    r   configuration_utilsr	   
file_utilsr
   r   onnxr   r   r   
onnx.utilsr   utilsr   
get_loggerrB   loggerr   rL   r!   r9   r8   <module>r      s]    % # ) ) # 3 8 M M :  
		H	%F
' F
R`_4 `_r9   