
    sg;Z                        d dl Z d dlmZmZ d dlZd dlmZ ddlmZ ddlm	Z	 ddl
mZmZmZ dd	lmZ dd
lmZ ddlmZmZmZmZmZmZmZmZmZ  e       rddlmZ  ej<                  e      Z  G d de      Z! G d de      Z" e	jF                  e"        G d de      Z$ G d dee$      Z% G d dee$      Z& G d de      Z' G d de      Z( G d de      Z) G d de      Z*g dZ+y)     N)OptionalTuple)nn   )Cache)ALL_LAYERNORM_LAYERS)is_flash_attn_2_available#is_flash_attn_greater_or_equal_2_10logging   )LlamaRMSNorm)
OlmoConfig)	OlmoAttentionOlmoDecoderLayerOlmoFlashAttention2OlmoForCausalLM	OlmoModelOlmoPreTrainedModelOlmoSdpaAttentionapply_rotary_pos_emb	repeat_kv)_flash_attention_forwardc                   N     e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )Olmo2Configa  
    This is the configuration class to store the configuration of a [`Olmo2Model`]. It is used to instantiate an OLMo2
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the [allenai/Olmo2-7B-1124-hf](https://huggingface.co/allenai/Olmo2-7B-1124-hf).

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.


    Args:
        vocab_size (`int`, *optional*, defaults to 50304):
            Vocabulary size of the Olmo2 model. Defines the number of different tokens that can be represented by the
            `inputs_ids` passed when calling [`Olmo2Model`]
        hidden_size (`int`, *optional*, defaults to 4096):
            Dimension of the hidden representations.
        intermediate_size (`int`, *optional*, defaults to 11008):
            Dimension of the MLP representations.
        num_hidden_layers (`int`, *optional*, defaults to 32):
            Number of hidden layers in the Transformer decoder.
        num_attention_heads (`int`, *optional*, defaults to 32):
            Number of attention heads for each attention layer in the Transformer decoder.
        num_key_value_heads (`int`, *optional*):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details checkout [this
            paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
            `num_attention_heads`.
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
            The non-linear activation function (function or string) in the decoder.
        max_position_embeddings (`int`, *optional*, defaults to 2048):
            The maximum sequence length that this model might ever be used with.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        pad_token_id (`int`, *optional*, defaults to 1):
            Padding token id.
        bos_token_id (`int`, *optional*):
            Beginning of stream token id.
        eos_token_id (`int`, *optional*, defaults to 50279):
            End of stream token id.
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to tie weight embeddings
        rope_theta (`float`, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings.
        rope_scaling (`Dict`, *optional*):
            Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
            strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
            `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
            `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
            these scaling strategies behave:
            https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
            experimental feature, subject to breaking API changes in future versions.
        attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        rms_norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the rms normalization layers.

    ```python
    >>> from transformers import Olmo2Model, Olmo2Config

    >>> # Initializing a Olmo2 7B style configuration
    >>> configuration = Olmo2Config()

    >>> # Initializing a model from the Olmo2 7B style configuration
    >>> model = Olmo2Model(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    olmo2c                     t        |   di d|d|d|d|d|d|d|d|d	|	d
|
d|d|d|d|d|d|d|d|| || _        | `y )N
vocab_sizehidden_sizeintermediate_sizenum_hidden_layersnum_attention_headsnum_key_value_heads
hidden_actmax_position_embeddingsinitializer_range	use_cachepad_token_idbos_token_ideos_token_idtie_word_embeddings
rope_thetarope_scalingattention_biasattention_dropout )super__init__rms_norm_epsclip_qkv)selfr   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r2   kwargs	__class__s                        Z/var/www/html/venv/lib/python3.12/site-packages/transformers/models/olmo2/modular_olmo2.pyr1   zOlmo2Config.__init__o   s    . 	 	
!	
#	
 0	
 0		

 !4	
 !4	
 "	
 %<	
 0	
  	
 &	
 &	
 &	
 !4	
 "	
  &!	
" *#	
$ 0'	
, )M    )i  i   i +      r9   Nsilui   g{Gz?T   Nig  Fg     @NF        gh㈵>)__name__
__module____qualname____doc__
model_typer1   __classcell__r6   s   @r7   r   r      sV    KZ J   $!). .r8   r   c                       e Zd Zy)Olmo2RMSNormNr=   r>   r?   r/   r8   r7   rE   rE          r8   rE   c                   (    e Zd Zddedee   f fdZ	 	 	 	 	 	 ddej                  deej                     deej                     dee
   ded	ed
eej                     deej                  eej                     eeej                        f   fdZ xZS )Olmo2Attentionconfig	layer_idxc                     t         |   ||       t        | j                  | j                  z  |j
                        | _        t        | j                  | j                  z  |j
                        | _        y )NrK   )	r0   r1   rE   	num_headshead_dimr2   q_normr"   k_normr4   rJ   rK   r6   s      r7   r1   zOlmo2Attention.__init__   sY    95"4>>DMM#A6CVCVW"4#;#;dmm#KVM`M`ar8   hidden_statesattention_maskposition_idspast_key_valueoutput_attentionsr&   cache_positionreturnc                    |j                         \  }	}
}| j                  | j                  |            }| j                  | j	                  |            }| j                  |      }|j                  |	|
| j                  | j                        j                  dd      }|j                  |	|
| j                  | j                        j                  dd      }|j                  |	|
| j                  | j                        j                  dd      }| j                  ||      \  }}t        ||||      \  }}|'|||d}|j                  ||| j                  |      \  }}t        || j                         }t        || j                         }t#        j$                  ||j                  dd            t'        j(                  | j                        z  }|#|d d d d d d d |j*                  d   f   }||z   }t,        j.                  j1                  |dt"        j2                        j5                  |j6                        }t,        j.                  j9                  || j:                  | j<                        }t#        j$                  ||      }|j                         |	| j                  |
| j                  fk7  r7t?        d	|	| j                  |
| j                  f d
|j                                |j                  dd      jA                         }|jC                  |	|
| jD                        }| jG                  |      }|sd }|||fS )Nr;   r   sincosrX   r   )dimdtype)ptrainingz `attn_output` should be of size z	, but is )$sizerP   q_projrQ   k_projv_projviewrN   rO   	transposer"   
rotary_embr   updaterK   r   num_key_value_groupstorchmatmulmathsqrtshaper   
functionalsoftmaxfloat32tora   dropoutr.   rc   
ValueError
contiguousreshaper   o_proj)r4   rS   rT   rU   rV   rW   r&   rX   r5   bszq_len_query_states
key_statesvalue_statesr]   r\   cache_kwargsattn_weightscausal_maskattn_outputs                        r7   forwardzOlmo2Attention.forward   s    &**,UA{{4;;}#=>[[]!;<
{{=1#((eT^^T]]S]]^_abc__S%1I1I4==Yccdeghi
#((eT5M5Mt}}]gghiklm??<>S#7jRUWZ#[ j%#&snUL'5'<'<ZW[WeWegs't$Jz4+D+DE
 t/H/HI||L*2F2Fq!2LMPTPYPYZ^ZgZgPhh%(Aq2HJ4D4DR4H2H)HIK'+5L }},,\r,WZZ[g[m[mn}},,\T=S=S^b^k^k,lll<>#t~~udmm!LL2CPTP]P]3^2_ `$$&') 
 "++Aq1<<>!))#ud6F6FGkk+. LL.88r8   NNNNFFN)r=   r>   r?   r   r   intr1   rm   Tensor
LongTensorr   boolr   r   rB   rC   s   @r7   rI   rI      s    b{ bx} b 2637*."'59:9||:9 !.:9 u//0	:9
 !:9  :9 :9 !!1!12:9 
u||Xell3XeELL>Q5RR	S:9r8   rI   c                      e Zd ZdZd Z	 	 	 	 	 	 ddej                  deej                     deej                     dee	   de
d	e
d
eej                     deej                  eej                     eeej                        f   fdZy)Olmo2FlashAttention2aF  
    OLMo2 flash attention module. This module inherits from `Olmo2Attention` as the weights of the module stays
    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
    flash attention and deal with padding tokens in case the input contains any of them.
    c                 N    t        j                  |i | t                | _        y r   )rI   r1   r
   _flash_attn_uses_top_left_mask)r4   argsr5   s      r7   r1   zOlmo2FlashAttention2.__init__   s&    00
 3V2W.W+r8   NrS   rT   rU   rV   rW   r&   rX   rY   c                    d}|j                         \  }	}
}| j                  | j                  |            }| j                  | j	                  |            }| j                  |      }|j                  |	|
| j                  | j                        j                  dd      }|j                  |	|
| j                  | j                        j                  dd      }|j                  |	|
| j                  | j                        j                  dd      }| j                  ||      \  }}t        ||||      \  }}|'|||d}|j                  ||| j                  |      \  }}|j                  dd      }|j                  dd      }|j                  dd      }| j                  r| j                   nd}|j"                  }|t$        j&                  k(  rt%        j(                         rt%        j*                         }nMt-        | j.                  d      r| j.                  j0                  }n | j                  j2                  j"                  }t4        j7                  d| d       |j9                  |      }|j9                  |      }|j9                  |      }t;        |||||
||| j<                  | j>                  		      }|jA                  |	|
| jB                        jE                         }| jG                  |      }|sd }||fS )
NFr;   r   r[   r<   _pre_quantization_dtypezThe input hidden states seems to be silently casted in float32, this might be related to the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in .)rU   rv   use_top_left_mask	is_causal)$rd   rP   re   rQ   rf   rg   rh   rN   rO   ri   r"   rj   r   rk   rK   rc   r.   ra   rm   rt   is_autocast_enabledget_autocast_gpu_dtypehasattrrJ   r   weightloggerwarning_onceru   r   r   r   ry   r   rx   rz   )r4   rS   rT   rU   rV   rW   r&   rX   r5   r{   r|   r}   r~   r   r   r]   r\   r   dropout_rateinput_dtypetarget_dtyper   r   s                          r7   r   zOlmo2FlashAttention2.forward   s    "%**,UA{{4;;}#=>[[]!;<
{{=1
 $((eT^^T]]S]]^_abc__S%1I1I4==Yccdeghi
#((eT5M5Mt}}]gghiklm??<>S#7jRUWZ#[ j%#&snUL'5'<'<ZW[WeWegs't$J $--a3))!Q/
#--a315t--C #((%--'((*$;;=&?@#{{BB#{{1177 >$ (??<8L#|4J'??<8L.% "AAnn

 "))#ud6F6FGRRTkk+. LL.88r8   r   )r=   r>   r?   r@   r1   rm   r   r   r   r   r   r   r   r/   r8   r7   r   r      s    X 6:37*."'59V9||V9 !!1!12V9 u//0	V9
 !V9  V9 V9 !!1!12V9 
u||Xell3XeELL>Q5RR	SV9r8   r   c                       e Zd Z	 	 	 	 	 	 d
dej                  deej                     deej                     dee   dededeej                     de	ej                  eej                     ee	ej                        f   f fd	Z
 xZS )Olmo2SdpaAttentionrS   rT   rU   rV   rW   r&   rX   rY   c           	         |r+t         j                  d       t        |   |||||||      S |j	                         \  }}	}
| j                  | j                  |            }| j                  | j                  |            }| j                  |      }|j                  ||	| j                  | j                        j                  dd      }|j                  ||	| j                  | j                        j                  dd      }|j                  ||	| j                  | j                        j                  dd      }| j                  ||      \  }}t!        ||||      \  }}|'|||d}|j#                  ||| j$                  |      \  }}t'        || j(                        }t'        || j(                        }|}||d d d d d d d |j*                  d   f   }|j,                  j.                  dk(  r2|0|j1                         }|j1                         }|j1                         }||	dkD  rdnd	}t2        j4                  j6                  j9                  ||||| j:                  r| j<                  nd
|      }|j                  dd      j1                         }|j                  ||	| j>                        }| jA                  |      }|d |fS )Na  Olmo2Model is using Olmo2SdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.rS   rT   rU   rV   rW   r&   rX   r;   r   r[   r^   cudaTFr<   )	attn_mask	dropout_pr   )!r   r   r0   r   rd   rP   re   rQ   rf   rg   rh   rN   rO   ri   r"   rj   r   rk   rK   r   rl   rq   devicetyperx   rm   r   rr   scaled_dot_product_attentionrc   r.   r   rz   )r4   rS   rT   rU   rV   rW   r&   rX   r{   r|   r}   r~   r   r   r]   r\   r   r   r   r   r6   s                       r7   r   zOlmo2SdpaAttention.forwardW  s    [ 7?+-)-"3#- #   &**,UA{{4;;}#=>[[]!;<
{{=1#((eT^^T]]S]]^_abc__S%1I1I4==Yccdeghi
#((eT5M5Mt}}]gghiklm??<>S#7jRUWZ#[ j%#&snUL'5'<'<ZW[WeWegs't$Jz4+D+DE
 t/H/HI$%%aA/E1A1A"1E/E&EFK ##v-+2I'224L#..0J'224L (/EAID5	hh))FF!04d,,3 G 
 "++Aq1<<>!&&sE43C3CDkk+.D.00r8   r   )r=   r>   r?   rm   r   r   r   r   r   r   r   rB   rC   s   @r7   r   r   U  s    
 2637*."'59A1||A1 !.A1 u//0	A1
 !A1  A1 A1 !!1!12A1 
u||Xell3XeELL>Q5RR	SA1 A1r8   r   c                   (    e Zd Zdedef fdZ	 	 	 	 	 	 ddej                  deej                     deej                     dee
   dee   d	ee   d
eej                     deej                  eeej                  ej                  f      f   fdZ xZS )Olmo2DecoderLayerrJ   rK   c                     t         |   ||       t        |j                  |j                        | _        t        |j                  |j                        | _        | `y )NrM   eps)r0   r1   rE   r   r2   post_attention_layernormpost_feedforward_layernorminput_layernormrR   s      r7   r1   zOlmo2DecoderLayer.__init__  sQ    95(4V5G5GVM`M`(a%*6v7I7IvObOb*c' r8   rS   rT   rU   rV   rW   r&   rX   rY   c                     |}	 | j                   d|||||||d|\  }}
}| j                  |      }|	|z   }|}	| j                  |      }| j                  |      }|	|z   }|f}|r||
fz  }|r||fz  }|S )Nr   r/   )	self_attnr   mlpr   )r4   rS   rT   rU   rV   rW   r&   rX   r5   residualself_attn_weightspresent_key_valueoutputss                r7   r   zOlmo2DecoderLayer.forward  s     ! ?Mdnn 	?
')%)/)	?
 	?
;(*; 55mD =0 !/77F =0 ")++G)++Gr8   r   )r=   r>   r?   r   r   r1   rm   r   r   r   r   r   r   FloatTensorr   rB   rC   s   @r7   r   r     s    !{ !s ! 2637*.,1$)59&||& !.& u//0	&
 !& $D>& D>& !!1!12& 
u  (51B1BEDUDU1U+V"WW	X&r8   r   c                       e Zd Zy)Olmo2PreTrainedModelNrF   r/   r8   r7   r   r     rG   r8   r   c                   $     e Zd Zdef fdZ xZS )
Olmo2ModelrJ   c           	         t         |   |       t        j                  t	        |j
                        D cg c]  }t        ||       c}      | _        t        |j                  |j                        | _        y c c}w )Nr   )r0   r1   r   
ModuleListranger    r   layersrE   r   r2   normrR   s      r7   r1   zOlmo2Model.__init__  sc     mmCHIaIaCbcivy1c
 !!3!39L9LM	 ds   A=r=   r>   r?   r   r1   rB   rC   s   @r7   r   r     s    N{ N Nr8   r   c                   $     e Zd Zdef fdZ xZS )Olmo2ForCausalLMrJ   c                 D    t         |   |       t        |      | _        y r   )r0   r1   r   model)r4   rJ   r6   s     r7   r1   zOlmo2ForCausalLM.__init__  s     '
r8   r   rC   s   @r7   r   r     s    ({ ( (r8   r   )r   r   r   r   ),ro   typingr   r   rm   r   cache_utilsr   pytorch_utilsr   utilsr	   r
   r   llama.modeling_llamar   olmo.configuration_olmor   olmo.modeling_olmor   r   r   r   r   r   r   r   r   modeling_flash_attention_utilsr   
get_loggerr=   r   r   rE   appendrI   r   r   r   r   r   r   __all__r/   r8   r7   <module>r      s     "     1 \ \ / 0
 
 
 J			H	%~* ~B	< 	    L )@9] @9Fe9. e9PC1*N C1R-( -`	. 	N N( (r8   