
    sg                       d Z ddlmZ ddlmZmZmZ ddlZddl	Z
ddlmZ ddlmZmZ ddlmZmZmZmZmZmZmZ dd	lmZmZmZ dd
lmZmZmZm Z m!Z! ddl"m#Z#  e jH                  e%      Z&dZ'dZ(g dZ)dZ*dZ+d)d*dZ,d+d,dZ- G d dej\                  j^                        Z0 G d dej\                  jb                        Z2 G d dej\                  jb                        Z3dZ4 ede4       G d de             Z5dZ6e G d d ej\                  jb                               Z7e G d! d"ej\                  jb                               Z8 ed#e4      e G d$ d%e5                    Z9 ed&e4      e G d' d(e5e                    Z:y)-zTF 2.0 OPT model.    )annotations)OptionalTupleUnionN   )get_tf_activation)TFBaseModelOutputWithPastTFCausalLMOutputWithPast)TFCausalLanguageModelingLossTFModelInputTypeTFPreTrainedModelTFSharedEmbeddingskeraskeras_serializableunpack_inputs)check_embeddings_within_bounds
shape_liststable_softmax)add_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )	OPTConfigzfacebook/opt-350mr   )r      i   zaHey, are you conscious? Can you talk to me?
I'm not conscious. I'm just a little bit of a weirdo.g    חc                   | d   }| d   }t        j                  ||ft        j                  t        t         j                              }t         j
                  j                  |dd      t         j
                  j                  |dd      z
  }|dkD  r.t        j                  t        j                  ||f      |gd      }t        j                  |ddddddf   |dddf      S )zB
    Make causal mask used for bi-directional self-attention.
    r   r   axisN)
tffillcastLARGE_NEGATIVEfloat32linalg	band_partconcatzerostile)input_ids_shapepast_key_values_lengthbsztgt_lenmasks        Z/var/www/html/venv/lib/python3.12/site-packages/transformers/models/opt/modeling_tf_opt.py_make_causal_maskr1   @   s     !
Ca G77GW%rww~rzz'JKD99tQ+bii.A.A$1.MMD!yy"((G-C#DEtLSUV774dAq()CAq>::    c                    t        |       d   }||n|}t        j                  d      }t        j                  | |j                        } t        j
                  | ddddddf   dd|df      }||z
  t        z  S )z_
    Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
    r   Ng      ?dtype)r   r!   constantr#   r5   r*   r$   )r/   r.   src_lenone_cstexpanded_masks        r0   _expand_maskr:   Q   sx     q!G ,g'Gkk#G774w}}-DGGDD$!12Q7A4FGMm#~55r2   c                  2     e Zd ZdZd fdZdd fdZ xZS )TFOPTLearnedPositionalEmbeddingzN
    This module learns positional embeddings up to a fixed maximum size.
    c                P    d| _         t        |   || j                   z   |fi | y )N   )offsetsuper__init__)selfnum_embeddingsembedding_dimkwargs	__class__s       r0   rA   z(TFOPTLearnedPositionalEmbedding.__init__c   s)     $++5}OOr2   c                    t        j                  |t         j                        }t         j                  j	                  |d      |z  dz
  }|dd|df   }t
        |   || j                  z         S )z3`input_ids_shape` is expected to be [bsz x seqlen].r   r   N)r!   r#   int64mathcumsumr@   callr?   )rB   attention_maskr,   	positionsrF   s       r0   rK   z$TFOPTLearnedPositionalEmbedding.calli   sf    : GGNN>N:^KaO	 a!7!889	w|I344r2   )rC   intrD   rN   r   )r,   rN   )__name__
__module____qualname____doc__rA   rK   __classcell__rF   s   @r0   r<   r<   ^   s    P
5 
5r2   r<   c                  |     e Zd ZdZ	 	 	 d	 	 	 	 	 	 	 	 	 d fdZddZ	 	 	 	 	 d		 	 	 	 	 	 	 	 	 	 	 	 	 d
dZddZ xZS )TFOPTAttentionz6Multi-headed attention from "Attention Is All You Needc                z   t        |   d
i | || _        || _        t        j
                  j                  |      | _        ||z  | _        | j                  |z  | j                  k7  rt        d| j                   d| d      | j                  dz  | _
        || _        t        j
                  j                  ||d      | _        t        j
                  j                  ||d      | _        t        j
                  j                  ||d      | _        t        j
                  j                  ||d	      | _        y )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      k_proj)use_biasnameq_projv_projout_proj )r@   rA   	embed_dim	num_headsr   layersDropoutdropouthead_dim
ValueErrorscaling
is_decoderDenserY   r\   r]   r^   )rB   r`   ra   rd   rh   biasrE   rF   s          r0   rA   zTFOPTAttention.__init__z   s    	"6"""||++G4!Y.MMI%$..8MdnnM]$YKr3  }}d*$ll((T(Qll((T(Qll((T(Q**9t**Ur2   c           	         t        j                  t        j                  |||| j                  | j                  f      d      S )Nr   r>   r   r   )r!   	transposereshapera   re   )rB   tensorseq_lenr-   s       r0   _shapezTFOPTAttention._shape   s0    ||BJJvWdnndmm/\]_kllr2   c           
     	   |du}t        |      \  }}	}
| j                  |      | j                  z  }|r||d   }|d   }n
|rE| j                  | j	                  |      d|      }| j                  | j                  |      d|      }n|}| j                  | j	                  |      d|      }| j                  | j                  |      d|      }t        j                  |d   |gd      }t        j                  |d   |gd      }nD| j                  | j	                  |      d|      }| j                  | j                  |      d|      }| j                  r||f}|| j                  z  d| j                  f}t        j                  | j                  ||	|      |      }t        j                  ||      }t        j                  ||      }t        |      d   }t        j                  ||d      }t        j                  j                  t        |      || j                  z  |	|gd	|| j                  z  |	|f d
t        |              |t        j                  j                  t        |      |d|	|gd|d|	|f d
t        |              t        j                  ||j                         }t        j                  ||| j                  |	|f      |z   }t        j                  ||| j                  z  |	|f      }t#        |d      }|t        j                  j                  t        |      | j                  gd| j                   d
t        |              t        j                  |d      t        j                  ||| j                  |	|f      z  }t        j                  ||| j                  z  |	|f      }| j%                  ||      }t        j                  ||      }t        j                  j                  t        |      || j                  z  |	| j                  gd|| j                  |	| j                  f d
t        |              t        j&                  t        j                  ||| j                  |	| j                  f      d      }t        j                  |||	|
f      }| j)                  |      }t        j                  ||| j                  |	|f      }|||fS )z#Input shape: Batch x Time x ChannelNr   r   r   r>   r   T)transpose_bz$Attention weights should be of size z	, but is messagez!Attention mask should be of size r4   z/Head mask for a single layer should be of size )r   r   r   r   trainingz `attn_output` should be of size rl   )r   r\   rg   rq   rY   r]   r!   r(   rh   ra   re   rn   matmul	debuggingassert_equalr#   r5   r   rd   rm   r^   )rB   hidden_stateskey_value_statespast_key_valuerL   layer_head_maskrw   is_cross_attentionr-   r.   r`   query_states
key_statesvalue_states
proj_shaper7   attn_weights
attn_probsattn_outputs                      r0   rK   zTFOPTAttention.call   s    .T9",]";Wi {{=1DLL@."<'*J)!,LT[[1A%BBLJ;;t{{3C'Db#NL'T[[%?SIJ;;t{{='A2sKLN1$5z#BKJ99nQ&7%FQOL T[[%?SIJ;;t{{='A2sKL?? ),7NDNN*B>
zz$++lGS"I:VZZ
J7
zz,
;Z(+yyztL
!!|$4>>!7G46dnn8LgW^7_6` a|,-/	 	" 	
 %LL%%>*a'*7a'8R7S T">235	 &   WW^<;M;MNN::lS$..'SZ4[\_mmL::lS4>>5I7T[4\]L%l<&LL%%?+ Et~~EW X"?346	 &  ::o}E

sDNNGWEI L ::lS4>>5I7T[4\]L\\,\B
ii
L9
!!{#4>>!7DMM:2CRVR_R_3`2a b{+,.	 	" 	
 llJJ{S$..'4==$QRT`
 jjsGY.GHmmK0"$**\CQXZa;b"cL.88r2   c                   | j                   ry d| _         t        | dd       Zt        j                  | j                  j
                        5  | j                  j                  d d | j                  g       d d d        t        | dd       Zt        j                  | j                  j
                        5  | j                  j                  d d | j                  g       d d d        t        | dd       Zt        j                  | j                  j
                        5  | j                  j                  d d | j                  g       d d d        t        | dd       [t        j                  | j                  j
                        5  | j                  j                  d d | j                  g       d d d        y y # 1 sw Y   AxY w# 1 sw Y   xY w# 1 sw Y   xY w# 1 sw Y   y xY w)NTrY   r\   r]   r^   )builtgetattrr!   
name_scoperY   r[   buildr`   r\   r]   r^   rB   input_shapes     r0   r   zTFOPTAttention.build  s   ::
44(4t{{//0 @!!4t~~">?@44(4t{{//0 @!!4t~~">?@44(4t{{//0 @!!4t~~">?@4T*6t}}112 B##T4$@AB B 7@ @@ @@ @B Bs0   )F32)G )G )G3F= G	GG!)g        FT)
r`   rN   ra   rN   rd   floatrh   boolrj   r   )ro   	tf.Tensorrp   rN   r-   rN   )NNNNF)r{   r   r|   tf.Tensor | Noner}   zTuple[Tuple[tf.Tensor]] | NonerL   r   r~   r   rw   Optional[bool]returnz"Tuple[tf.Tensor, tf.Tensor | None]N)	rP   rQ   rR   rS   rA   rq   rK   r   rT   rU   s   @r0   rW   rW   w   s    @  VV V 	V
 V V8m .29=+/,0#(t9 t9 +t9 7	t9
 )t9 *t9 !t9 
,t9lBr2   rW   c                  \     e Zd Zd fdZ	 	 	 	 	 	 d	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 ddZddZ xZS )TFOPTDecoderLayerc                   t        |   di | |j                  | _        |j                  | _        t        | j                  |j                  |j                  dd      | _        t        j                  j                  |j                        | _        t        |j                        | _        t        j                  j!                  dd      | _        t        j                  j%                  |j&                  d      | _        t        j                  j%                  | j                  d	      | _        t        j                  j!                  dd
      | _        || _        y )N	self_attnT)r`   ra   rd   r[   rh   h㈵>self_attn_layer_normepsilonr[   fc1r[   fc2final_layer_normr_   )r@   rA   do_layer_norm_beforehidden_sizer`   rW   num_attention_headsattention_dropoutr   r   rb   rc   rd   r   activation_functionactivation_fnLayerNormalizationr   ri   ffn_dimr   r   r   configrB   r   rE   rF   s      r0   rA   zTFOPTDecoderLayer.__init__"  s   "6"$*$?$?!++'nn00,,
 ||++FNN;.v/I/IJ$)LL$C$CDWm$C$n!<<%%fnn5%A<<%%dnn5%A % ? ?Se ? fr2   c                   |}| j                   r| j                  |      }||dd nd}	| j                  ||	||      \  }}
}| j                  ||      }||z   }| j                   s| j                  |      }|}| j                   r| j	                  |      }| j                  |      }| j                  |      }| j                  |      }| j                  ||      }||z   }| j                   s| j	                  |      }||
|fS )a(  
        Args:
            hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`tf.Tensor`, *optional*): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
            layer_head_mask (`tf.Tensor`, *optional*): mask for attention heads in a given layer of size
                `(decoder_attention_heads,)`
            past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states
            training (`bool`, *optional*, defaults to `False`):
                Whether or not to use the model in training mode (some modules like dropout modules have different
                behaviors between training and evaluation).
        Nr>   )r{   r}   rL   r~   rv   )r   r   r   rd   r   r   r   r   )rB   r{   rL   r~   r}   rw   output_attentions	use_cacheresidualself_attn_past_key_valueself_attn_weightspresent_key_values               r0   rK   zTFOPTDecoderLayer.call6  s3   , ! $$ 55mDM :H9S>"1#5Y]  ?Cnn'3)+	 ?M ?
;(*; ]XF =0 (( 55mDM !$$ 11-@M/**=9/]XF =0 (( 11-@M02CDDr2   c                   | j                   ry d| _         t        | dd       Mt        j                  | j                  j
                        5  | j                  j                  d        d d d        t        | dd       Zt        j                  | j                  j
                        5  | j                  j                  d d | j                  g       d d d        t        | dd       Zt        j                  | j                  j
                        5  | j                  j                  d d | j                  g       d d d        t        | dd       dt        j                  | j                  j
                        5  | j                  j                  d d | j                  j                  g       d d d        t        | dd       [t        j                  | j                  j
                        5  | j                  j                  d d | j                  g       d d d        y y # 1 sw Y   xY w# 1 sw Y   XxY w# 1 sw Y   xY w# 1 sw Y   xY w# 1 sw Y   y xY w)NTr   r   r   r   r   )r   r   r!   r   r   r[   r   r   r`   r   r   r   r   r   r   s     r0   r   zTFOPTDecoderLayer.buildw  s   ::
4d+7t~~223 +$$T*+4/6Bt88==> N))//tT^^0LMN4%1txx}}- =dDNN;<=4%1txx}}- BdDKK,?,?@AB4+T2>t4499: J%%++T4,HIJ J ?+ +N N= =B BJ Js<   H%)H$)H133H=$)I	H!$H.1H:=I	Ir   r   )NNNFFF)r{   r   rL   np.ndarray | tf.Tensor | Noner~   r   r}   4Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]]rw   r   r   r   r   r   r   z4Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]r   )rP   rQ   rR   rA   rK   r   rT   rU   s   @r0   r   r   !  s    . 9=,0OS#(,1$)?E ?E 6?E *	?E
 M?E !?E *?E "?E 
>?EBJr2   r   at	  
    This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
    as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
    behavior.

    <Tip>

    TensorFlow models and layers in `transformers` accept two formats as input:

    - having all inputs as keyword arguments (like PyTorch models), or
    - having all inputs as a list, tuple or dict in the first positional argument.

    The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
    and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
    pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
    format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
    the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
    positional argument:

    - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
    - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
    `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
    - a dictionary with one or several input Tensors associated to the input names given in the docstring:
    `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`

    Note that when creating models and layers with
    [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
    about any of this, as you can just pass inputs like you would to any other Python function!

    </Tip>

    Args:
        config ([`OPTConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
zQThe bare OPT Model outputting raw hidden-states without any specific head on top.c                      e Zd ZdZeZdZy)TFOPTPreTrainedModelzz
    TFOPT Pretrained Model that inheritates from transformers.TFPreTrainedModel

    Args:
        config: OPTConfig
    modelN)rP   rQ   rR   rS   r   config_classbase_model_prefixr_   r2   r0   r   r     s    
 Lr2   r   a=  
    Args:
        input_ids (`tf.Tensor` of shape `({0})`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
            Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
            contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
            If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
            don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
            `decoder_input_ids` of shape `(batch_size, sequence_length)`.
        use_cache (`bool`, *optional*, defaults to `True`):
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
            `past_key_values`). Set to `False` during training, `True` during generation
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
            config will be used instead.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
            used instead.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
            eager mode, in graph mode the value will always be set to True.
        training (`bool`, *optional*, defaults to `False`):
            Whether or not to use the model in training mode (some modules like dropout modules have different
            behaviors between training and evaluation).
c                       e Zd ZeZd	 fdZd Zd Zd Zd Z	d Z
e	 	 	 	 	 	 	 	 	 	 d
	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       ZddZ xZS )TFOPTDecoderc                   t        |   di | || _        |j                  | _        |j
                  | _        |j                  }t        |j                  |j                  |j                  d      | _
        t        ||j                  d      | _        |j                  r3|j                  s't         j"                  j%                  dd      | _        nd | _        |j                  |j                  k7  rct         j"                  j)                  |j                  dd	      | _        t         j"                  j)                  |j                  d
d	      | _        nd | _        d | _        t/        |j0                        D cg c]  }t3        |d|        c}| _        t         j"                  j5                  |j6                        | _        y c c}w )Nembed_tokensr   embed_positionsr   r   r   project_outF)r[   rZ   
project_inzlayers.r_   )r@   rA   r   pad_token_idpadding_idx	layerdropmax_position_embeddingsr   
vocab_sizeword_embed_proj_dimr   r<   r   r   r   _remove_final_layer_normr   rb   r   r   ri   r   r   rangenum_hidden_layersr   rc   rd   )rB   r   rE   rC   irF   s        r0   rA   zTFOPTDecoder.__init__  sz   "6"!..))77.v996;N;NUc
  ?" 
 &&v/N/N$)LL$C$CDWi$C$jD!$(D!%%););;$||11&2L2LS`kp1qD#ll001C1C,af0gDO #DO#DNSTZTlTlNmn(smDn||++FNN; os   <G
c                    | j                   S r   r   rB   s    r0   get_embed_tokenszTFOPTDecoder.get_embed_tokens         r2   c                    || _         y r   r   )rB   r   s     r0   set_embed_tokenszTFOPTDecoder.set_embed_tokens  s
    (r2   c                b    |j                   d   | j                  _        || j                  _        y )Nr   )shaper   r   weightrB   new_embeddingss     r0   set_input_embeddingsz!TFOPTDecoder.set_input_embeddings!  s)    '5';';A'>$#1 r2   c                    | j                   S r   r   r   s    r0   get_input_embeddingsz!TFOPTDecoder.get_input_embeddings%  r   r2   c                    |\  }}t         j                  j                  ||z   t        |      d   dt        |      d    d| d| d       t	        ||d         }|dkD  rt        ||	      |z   }|S |}|S )
Nr   zXAttention mask shape should be (batch_size, seq_length + past_key_values_length) but is z with input_ids shape z and past length .rt   r   )r.   )r,   )r!   ry   rz   r   r:   r1   )rB   rL   r   r,   _
seq_lengthexpanded_attn_maskcombined_attention_masks           r0   _prepare_decoder_attention_maskz,TFOPTDecoder._prepare_decoder_attention_mask(  s     $:
!!//~&q)!.1!455KK= Y&'q* 	" 	
 *.+b/R>!+F\]`rr $ '& '9#&&r2   c                   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|	|	n| j                   j                  }	||t        d      |t        |      }n|t        |      dd }nt        d      |t        |d   d         d   nd}|1t        || j                  j                         | j                  |      }|1t        j                  |d   |d   |z   ft        j                        }nWt        j                  j                  t        |      d   ||d   z   d	t        j                  |      d    d
||d   z    d       | j!                  ||      }| j#                  |||      }| j$                  | j%                  |      }||z   }|rdnd}|rdnd}|rdnd}d|ffD ]r  \  }}|	t        j                  j                  t        |      d   t'        | j(                        d| dt'        | j(                         dt        |      d    d       t t+        | j(                        D ]>  \  }}|r||fz  }|||   nd} ||||||   nd|      \  }}}|r||fz  }|s9||fz  }@ | j,                  | j-                  |      }| j.                  | j/                  |      }|r||fz  }|	st1        d ||||fD              S t3        ||||      S )a  
        Args:
            input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)

            head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
                Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
                decoding.

                If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
                that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
                all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            inputs_embeds (`tf.Tensor` of
                shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
                `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
                control over how to convert `input_ids` indices into associated vectors than the model's internal
                embedding lookup matrix.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
            training (`bool`, *optional*, defaults to `False`):
                Whether or not to use the model in training mode (some modules like dropout modules have different
                behaviors between training and evaluation).
        NzTYou cannot specify both decoder_input_ids and decoder_inputs_embeds at the same timer   zEYou have to specify either decoder_input_ids or decoder_inputs_embedsr   r>   r   r4   z'The provided attention mask has length z, but its length should be z0 (sum of the lengths of current and past inputs)rt   r_   	head_maskzThe z should be specified for z layers, but it is for r   )rL   r~   r}   c              3  &   K   | ]	  }||  y wr   r_   ).0vs     r0   	<genexpr>z$TFOPTDecoder.call.<locals>.<genexpr>  s      efers   last_hidden_statepast_key_valuesr{   
attentions)r   r   output_hidden_statesr   use_return_dictrf   r   r   r   r   r!   onesr   ry   rz   r   r   r   r   lenrb   	enumerater   r   tupler	   )rB   	input_idsinputs_embedsrL   r   r   r   r   r   return_dictrw   r   r,   
pos_embedsr{   all_hidden_statesall_self_attnspresent_key_valuesattn_mask_name	attn_maskidxdecoder_layerr}   layer_self_attnr   s                            r0   rK   zTFOPTDecoder.call>  s   | 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B] ]%>stt"$Y/K&$]3CR8KdeeIXIdOA,>q,A!B1!Ejk *9d6G6G6R6RS --i8M!WWk!nk!nG]6]%^fhfmfmnNLL%%>*1-&Q7=bhh~>VWX>Y=ZZu-A>??oq	 &  )).:PQ
==nk[qr??& OOM:M%
2 #7BD0d#,R$ ,7	*B)C 		%NI$))y)!,$~..GDKKHXGY Z&y1!45Q8	 * 		 #,DKK"8 	5C#!m%55!5D5P_S1VZN@M-2;2G	#T-	A=M?,= "'8&::" ?"44#	5&   , 11-@M' ,,];M-!11 )+=?PR`a  
 -"/ 2/)	 r2   c                   | j                   ry d| _         t        | dd       Mt        j                  | j                  j
                        5  | j                  j                  d        d d d        t        | dd       Mt        j                  | j                  j
                        5  | j                  j                  d        d d d        t        | dd       dt        j                  | j                  j
                        5  | j                  j                  d d | j                  j                  g       d d d        t        | dd       dt        j                  | j                  j
                        5  | j                  j                  d d | j                  j                  g       d d d        t        | dd       dt        j                  | j                  j
                        5  | j                  j                  d d | j                  j                  g       d d d        t        | dd       K| j                  D ];  }t        j                  |j
                        5  |j                  d        d d d        = y y # 1 sw Y   xY w# 1 sw Y   xY w# 1 sw Y   _xY w# 1 sw Y   xY w# 1 sw Y   xY w# 1 sw Y   xY w)NTr   r   r   r   r   rb   )r   r   r!   r   r   r[   r   r   r   r   r   r   r   r   rb   )rB   r   layers      r0   r   zTFOPTDecoder.build  s=   ::
4.:t00556 .!!''-.4*D1=t33889 1$$**4014+T2>t4499: S%%++T49P9P,QRS4-9t//445 N  &&dDKK4K4K'LMN4t,8t334 U%%tT4;;3R3R&STU44(4 &]]5::. &KK%& && 5. .1 1S SN NU U& &sH   I5%J?3J03J!3J(J45I?JJJ%(J14J=	r   
NNNNNNNNNF)r   TFModelInputType | Noner   r   rL   r   r   r   r   r   r   r   r   r   r   r   r   r   rw   r   r   2Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]r   )rP   rQ   rR   r   r   rA   r   r   r   r   r   r   rK   r   rT   rU   s   @r0   r   r     s    L <D!)2!',  .27;8<37PT$(,0/3&*#(`*` 5` 6	`
 1` N` "` *` -` $` !` 
<` `D&r2   r   c                       e Zd ZeZd fdZd Zd Ze	 	 	 	 	 	 	 	 	 	 d	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Z	d	dZ
 xZS )
TFOPTMainLayerc                V    t        |   di | || _        t        |d      | _        y )Ndecoderr   r_   )r@   rA   r   r   r  r   s      r0   rA   zTFOPTMainLayer.__init__  s(    "6"#F;r2   c                .    | j                   j                  S r   )r  r   r   s    r0   r   z#TFOPTMainLayer.get_input_embeddings  s    ||(((r2   c                :    | j                   j                  |       y r   )r  r   r   s     r0   r   z#TFOPTMainLayer.set_input_embeddings  s    )).9r2   c                ~   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|	|	n| j                   j                  }	| j                  |||||||||	|

      }|	s|S t        |j                  |j                  |j                  |j                        S N)	rL   r   r   r   r   r   r   r   rw   r   )r   r   r   r   r   r  r	   r   r   r{   r   rB   r   rL   r   r   r   r   r   r   r   rw   rE   outputss                r0   rK   zTFOPTMainLayer.call	  s     2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B],,)+'/!5#  
 N(%77#33!//))	
 	
r2   c                    | j                   ry d| _         t        | dd       Nt        j                  | j                  j
                        5  | j                  j                  d        d d d        y y # 1 sw Y   y xY w)NTr  )r   r   r!   r   r  r[   r   r   s     r0   r   zTFOPTMainLayer.build6  si    ::
4D)5t||001 )""4() ) 6) )   A11A:r   r   r   r  rL   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   rw   r   r   r  r   )rP   rQ   rR   r   r   rA   r   r   r   rK   r   rT   rU   s   @r0   r  r    s    L<
):  .28<37PT7;$(,0/3&*#(*
**
 6*
 1	*

 N*
 5*
 "*
 **
 -*
 $*
 !*
 
<*
 *
X)r2   r  zTThe bare TF OPT Model outputting raw hidden-states without any specific head on top.c                       e Zd ZeZd fdZd Zd Ze e	e
       eeeee      	 	 	 	 	 	 	 	 	 	 d		 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d
d                     Zd ZddZ xZS )
TFOPTModelc                X    t        |   |fi | || _        t        |d      | _        y Nr   r   r@   rA   r   r  r   r   s      r0   rA   zTFOPTModel.__init__G  *    *6*#F9
r2   c                B    | j                   j                  j                  S r   )r   r  r   r   s    r0   r   zTFOPTModel.get_input_embeddingsL  s    zz!!...r2   c                :    | j                   j                  |       y r   )r   r   r   s     r0   r   zTFOPTModel.set_input_embeddingsO  s    

''7r2   
checkpointoutput_typer   expected_outputc                ~   ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|	|	n| j                   j                  }	| j                  |||||||||	|

      }|	s|S t        |j                  |j                  |j                  |j                        S r
  )r   r   r   r   r   r   r	   r   r   r{   r   r  s                r0   rK   zTFOPTModel.callR  s    , 2C1N-TXT_T_TqTq$8$D $++JjJj 	 "+!6IDKK<Q<Q	%0%<k$++B]B]**)+'/!5#  
 N(%77#33!//))	
 	
r2   c                   | j                   j                  r"t        j                  |j                        d   nd }| j                   j
                  rt        j                  |j                        nd }| j                   j                  rt        j                  |j                        nd }t        |j                  |||      S )Nr   r   )r   r   r!   r   r   r   convert_to_tensorr{   r   r   r	   r   rB   outputpkvhsattnss        r0   serving_outputzTFOPTModel.serving_output  s    59[[5J5Jbhhv--.q1PT;?;;;[;[R!!&"6"67ae;?;;;X;X$$V%6%67^b($66	
 	
r2   c                    | j                   ry d| _         t        | dd       Nt        j                  | j                  j
                        5  | j                  j                  d        d d d        y y # 1 sw Y   y xY wNTr   r   r   r!   r   r   r[   r   r   s     r0   r   zTFOPTModel.build  g    ::
4$'3tzz/ '

  &' ' 4' 'r  r   r   r  r   )rP   rQ   rR   r   r   rA   r   r   r   r   OPT_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr	   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPErK   r$  r   rT   rU   s   @r0   r  r  ?  s     L:
/8 *+?@&-$.	 .28<37PT7;$(,0/3&*#(*
**
 6*
 1	*

 N*
 5*
 "*
 **
 -*
 $*
 !*
 
<*
 A *
X

'r2   r  zI
    The OPT Model transformer with a language modeling head on top.
    c                       e Zd ZeZd	 fdZd Zd
dZe e	e
e       eee
ee      	 	 	 	 	 	 	 	 	 	 	 	 d	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd                     Zd ZddZ xZS )TFOPTForCausalLMc                X    t        |   |fi | || _        t        |d      | _        y r  r  r   s      r0   rA   zTFOPTForCausalLM.__init__  r  r2   c                6    | j                   j                         S r   )r   r   r   s    r0   get_output_embeddingsz&TFOPTForCausalLM.get_output_embeddings  s    zz..00r2   c                r    |j                  dd       }|rt        j                  |d d df   d      }||||dS )NrL   r   )r   rL   r   r   )getr!   expand_dims)rB   inputsr   r   rE   rL   s         r0   prepare_inputs_for_generationz.TFOPTForCausalLM.prepare_inputs_for_generation  sH    $4d; ^^F1b5M26F  ,."	
 	
r2   )r  r   r  c                   |	|	n| j                   j                  }	|
|
n| j                   j                  }
||n| j                   j                  }| j	                  ||||||||	|
||      }| j                  j
                  j                  |d   d      }d}|(|ddddf   }|ddddf   }| j                  ||      }|s|f|dd z   }||f|z   S |S t        |||j                  |j                  |j                        S )	az  
        Args:
            input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
                Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
                provide it.

                Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
                [`PreTrainedTokenizer.__call__`] for details.

                [What are input IDs?](../glossary#input-ids)
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
                Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:

                - 1 indicates the head is **not masked**,
                - 0 indicates the head is **masked**.

            past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
                Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
                shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
                shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
                tensors are only required when the model is used as a decoder in a Sequence to Sequence model.

                Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
                cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.

                If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that
                don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
                `decoder_input_ids` of shape `(batch_size, sequence_length)`.
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
                Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
                config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
                (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
            use_cache (`bool`, *optional*):
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
                (see `past_key_values`).
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        N)r   r   rL   position_idsr   r   r   r   r   r   rw   r   linear)moder   r   )losslogitsr   r{   r   )r   r   r   r   r   r  r   hf_compute_lossr
   r   r{   r   )rB   r   r   rL   r8  r   r   labelsr   r   r   r   rw   rE   r  r<  r;  shifted_logitsr   s                      r0   rK   zTFOPTForCausalLM.call  sC   ` 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]**+)%'/!5#  
 ##00(0K#AssF^NAqrE]F''?DY,F)-)9TGf$EvE'#33!//))
 	
r2   c                   | j                   j                  r"t        j                  |j                        d   nd }| j                   j
                  rt        j                  |j                        nd }| j                   j                  rt        j                  |j                        nd }t        ||||j                  |j                        S )Nr   )r   r{   r   r;  r<  )r   r   r!   r   r   r   r  r{   r   r   r
   r;  r<  r  s        r0   r$  zTFOPTForCausalLM.serving_output3  s    59[[5J5Jbhhv--.q1PT;?;;;[;[R!!&"6"67ae;?;;;X;X$$V%6%67^b'==
 	
r2   c                    | j                   ry d| _         t        | dd       Nt        j                  | j                  j
                        5  | j                  j                  d        d d d        y y # 1 sw Y   y xY wr&  r'  r   s     r0   r   zTFOPTForCausalLM.build@  r(  r  r   )NN)NNNNNNNNNNNF)r   r  r   r   rL   r   r8  r   r   r   r   r   r>  r   r   r   r   r   r   r   r   r   rw   r   r   z1Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]r   )rP   rQ   rR   r   r   rA   r1  r6  r   r   r
   r+  r   r*  _CAUSAL_LM_EXPECTED_OUTPUTrK   r$  r   rT   rU   s   @r0   r.  r.    s    L:
1
 +CRab&,$2	 .2PT8<6:377;04$(,0/3&*#(n
*n
 Nn
 6	n

 4n
 1n
 5n
 .n
 "n
 *n
 -n
 $n
 !n
 
;n
 c n
`
'r2   r.  rO   )r+   ztf.TensorShaper,   rN   r   )r/   r   r.   zOptional[int]);rS   
__future__r   typingr   r   r   numpynp
tensorflowr!   activations_tfr   modeling_tf_outputsr	   r
   modeling_tf_utilsr   r   r   r   r   r   r   tf_utilsr   r   r   utilsr   r   r   r   r   configuration_optr   
get_loggerrP   loggerr*  r+  r,  rB  r$   r1   r:   rb   	Embeddingr<   LayerrW   r   OPT_START_DOCSTRINGr   r)  r   r  r  r.  r_   r2   r0   <module>rS     s    " ) )   / V   S R  ) 
		H	%)  &  i  ;"
65ell&<&< 52gBU\\'' gBThJ** hJV' T W	 , 	 		 , ^ A&5<<%% A& A&H A)U\\'' A) A)H Z T'% T' 	
T'n  	 d'+-I d' d'r2   