
    sg                        d Z ddlZddlZddlmZ ddlmZ ddlm	Z	m
Z
mZmZ ddlZddlZddlZddlmZ ddlmZmZmZ dd	lmZ dd
lmZmZ ddlmZ ddlmZmZ ddl m!Z!m"Z"m#Z#m$Z$m%Z% ddl&m'Z'm(Z( ddl)m*Z*  e$jV                  e,      Z-dZ.dZ/e G d de!             Z0e G d de!             Z1d Z2 G d dejf                        Z4 G d dejf                        Z5 G d dejf                        Z6 G d de6      Z7 G d  d!ejf                        Z8 G d" d#ejf                        Z9 G d$ d%e9      Z: G d& d'ejf                        Z; G d( d)ejf                        Z<e9e:d*Z= G d+ d,ejf                        Z> G d- d.ejf                        Z? G d/ d0e      Z@d1ZAd2ZB e"d3eA       G d4 d5e@             ZC G d6 d7ejf                        ZD e"d8eA       G d9 d:e@             ZE e"d;eA       G d< d=e@             ZFy)>z,PyTorch VideoMAE (masked autoencoder) model.    N)deepcopy)	dataclass)OptionalSetTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputImageClassifierOutput)PreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)ModelOutputadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STD   )VideoMAEConfigr   zMCG-NJU/videomae-basec                       e Zd ZU dZdZej                  ed<   dZe	e
ej                        ed<   dZe	e
ej                        ed<   y)VideoMAEDecoderOutputaO  
    Class for VideoMAEDecoder's outputs, with potential hidden states and attentions.

    Args:
        logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
            Pixel reconstruction logits.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
            plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
            the self-attention heads.
    Nlogitshidden_states
attentions)__name__
__module____qualname____doc__r   torchFloatTensor__annotations__r    r   r   r!        a/var/www/html/venv/lib/python3.12/site-packages/transformers/models/videomae/modeling_videomae.pyr   r   2   sM      !%FE$8<M8E%"3"345<59Ju00129r*   r   c                       e Zd ZU dZdZeej                     ed<   dZ	ej                  ed<   dZ
eeej                        ed<   dZeeej                        ed<   y)VideoMAEForPreTrainingOutputa  
    Class for VideoMAEForPreTraining's outputs, with potential hidden states and attentions.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`):
            Pixel reconstruction loss.
        logits (`torch.FloatTensor` of shape `(batch_size, patch_size ** 2 * num_channels)`):
            Pixel reconstruction logits.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
            shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
            plus the initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
            the self-attention heads.
    Nlossr   r    r!   )r"   r#   r$   r%   r.   r   r&   r'   r(   r   r    r   r!   r)   r*   r+   r-   r-   I   sb    $ )-D(5$$
%, $FE$8<M8E%"3"345<59Ju00129r*   r-   c                 h   fd}t        j                  t        |       D cg c]
  } ||       c}      }t        j                  |dddddf         |dddddf<   t        j                  |dddddf         |dddddf<   t        j                  |      j                  d      S c c}w )z Sinusoid position encoding tablec           
          t              D cg c]$  }| t        j                  dd|dz  z  z        z  & c}S c c}w )Ni'     )rangenppower)positionhid_jd_hids     r+   get_position_angle_vecz;get_sinusoid_encoding_table.<locals>.get_position_angle_veci   s;    RWX]R^_288E1
+;e+CDD___s   );Nr   r1   r   )r3   arrayr2   sincosr&   r'   	unsqueeze)
n_positionr7   r8   pos_isinusoid_tables    `   r+   get_sinusoid_encoding_tabler@   e   s    ` XX%PZJ[\5e<\]N ff^Aqt!tG%<=N1add7 ff^Aqt!tG%<=N1add7^,66q99	 ]s   B/c                   (     e Zd ZdZ fdZd Z xZS )VideoMAEEmbeddingsz7
    Construct the patch and position embeddings.

    c                     t         |           t        |      | _        | j                  j                  | _        t        | j                  |j                        | _        || _        y N)	super__init__VideoMAEPatchEmbeddingspatch_embeddingsnum_patchesr@   hidden_sizeposition_embeddingsconfigselfrL   	__class__s     r+   rF   zVideoMAEEmbeddings.__init__y   sR     7 ?00<<#>t?O?OQWQcQc#d r*   c                 $   | j                  |      }|| j                  j                  |      j                  |j                        j                         j                         z   }|)|j                  \  }}}||    }|j                  |d|      }|S )N)	rH   rK   type_astodeviceclonedetachshapereshape)rN   pixel_valuesbool_masked_pos
embeddings
batch_size_num_channelss          r+   forwardzVideoMAEEmbeddings.forward   s    **<8
  $":":"B"B:"N"Q"QR\RcRc"d"j"j"l"s"s"uu
 &*4*:*:'J<#_$45J#++JLIJr*   r"   r#   r$   r%   rF   r_   __classcell__rO   s   @r+   rB   rB   s   s    
r*   rB   c                   (     e Zd ZdZ fdZd Z xZS )rG   aw  
    Video to Patch Embedding. This module turns a batch of videos of shape (batch_size, num_frames, num_channels,
    height, width) into a tensor of shape (batch_size, seq_len, hidden_size) to be consumed by a Transformer encoder.

    The seq_len (the number of patches) equals (number of frames // tubelet_size) * (height // patch_size) * (width //
    patch_size).

    c           	         t         	|           |j                  }|j                  }|j                  }|j
                  }|j                  }|j                  }t        |t        j                  j                        r|n||f}t        |t        j                  j                        r|n||f}|| _        || _        t        |      | _        |d   |d   z  |d   |d   z  z  || j                  z  z  }|| _        || _        t        j                  ||| j                  |d   |d   f| j                  |d   |d   f      | _        y )Nr   r   )in_channelsout_channelskernel_sizestride)rE   rF   
image_size
patch_sizer^   rJ   
num_framestubelet_size
isinstancecollectionsabcIterableintrI   r	   Conv3d
projection)
rN   rL   ri   rj   r^   rJ   rk   rl   rI   rO   s
            r+   rF   z VideoMAEPatchEmbeddings.__init__   s>   &&
&&
**((&&
**#-j+//:R:R#SZZdfpYq
#-j+//:R:R#SZZdfpYq
$$-]jm+
1A0NOS]aeararSrs 	 )&))$$**JqM:a=I%%z!}jmD	
r*   c                    |j                   \  }}}}}|| j                  k7  rt        d      || j                  d   k7  s|| j                  d   k7  r2t        d| d| d| j                  d    d| j                  d    d	      |j	                  dddd	d
      }| j                  |      j                  d      j                  dd      }|S )NzeMake sure that the channel dimension of the pixel values match with the one set in the configuration.r   r   zInput image size (*z) doesn't match model (z).r1   r      )rW   r^   
ValueErrorri   permuters   flatten	transpose)rN   rY   r\   rk   r^   heightwidthr[   s           r+   r_   zVideoMAEPatchEmbeddings.forward   s    >J>P>P;
Jfe4,,,w  T__Q''5DOOA4F+F$VHAeW4KDOO\]L^K__`aeapapqras`ttvw  $++Aq!Q:__\2::1=GG1M
r*   r`   rb   s   @r+   rG   rG      s    
6r*   rG   c            
            e Zd Zdeddf fdZdej                  dej                  fdZ	 d
deej                     de	de
eej                  ej                  f   eej                     f   fd	Z xZS )VideoMAESelfAttentionrL   returnNc                    t         |           |j                  |j                  z  dk7  r3t	        |d      s't        d|j                  f d|j                   d      |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _        t        j                  |j                  | j                  d      | _        t        j                  |j                  | j                  d      | _        t        j                  |j                  | j                  d      | _        |j                  rot        j                  t!        j"                  | j                              | _        t        j                  t!        j"                  | j                              | _        nd | _        d | _        t        j(                  |j*                        | _        y )Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads .Fbias)rE   rF   rJ   num_attention_headshasattrrw   rq   attention_head_sizeall_head_sizer	   Linearquerykeyvalueqkv_bias	Parameterr&   zerosq_biasv_biasDropoutattention_probs_dropout_probdropoutrM   s     r+   rF   zVideoMAESelfAttention.__init__   sx    : ::a?PVXhHi"6#5#5#6"7 8334A7 
 $*#=#= #&v'9'9F<V<V'V#W !558P8PPYYv1143E3EER
99V//1C1C%PYYv1143E3EER
??,,u{{43E3E'FGDK,,u{{43E3E'FGDKDKDKzz&"E"EFr*   xc                     |j                         d d | j                  | j                  fz   }|j                  |      }|j	                  dddd      S )NrQ   r   r1   r   r   )sizer   r   viewrx   )rN   r   new_x_shapes      r+   transpose_for_scoresz*VideoMAESelfAttention.transpose_for_scores   sL    ffhsmt'?'?AYAY&ZZFF;yyAq!$$r*   	head_maskoutput_attentionsc                 "   | j                   !t        j                  | j                  d      nd }t        j
                  j                  || j                  j                  |      }t        j
                  j                  || j                  j                  | j                        }t        j
                  j                  || j                  j                  | j                         }| j                  |      }| j                  |      }	| j                  |      }
t        j                  |
|j                  dd            }|t        j                  | j                         z  }t        j
                  j#                  |d      }| j%                  |      }|||z  }t        j                  ||	      }|j'                  ddd	d
      j)                         }|j+                         d d | j,                  fz   }|j/                  |      }|r||f}|S |f}|S )NFrequires_gradinputweightr   rQ   dimr   r1   r   r   )r   r&   
zeros_liker   r	   
functionallinearr   r   r   r   r   matmulrz   mathsqrtr   softmaxr   rx   
contiguousr   r   r   )rN   r    r   r   k_biaskeysvaluesqueries	key_layervalue_layerquery_layerattention_scoresattention_probscontext_layernew_context_layer_shapeoutputss                   r+   r_   zVideoMAESelfAttention.forward   s    HL{{G^!!$++UCdh}}##-V\#]%%M$**BSBSZ^ZeZe%f--&&]4::CTCT[_[f[f&g--d3	//7//8 !<<Y5H5HR5PQ+dii8P8P.QQ --//0@b/I ,,7  -	9O_kB%--aAq9DDF"/"4"4"6s";t?Q?Q>S"S%**+BC6G=/2 O\M]r*   NF)r"   r#   r$   r   rF   r&   Tensorr   r   boolr   r   r_   ra   rb   s   @r+   r~   r~      s    G~ G$ G2%ell %u|| % bg$(0(>$Z^$	uU\\5<</0%2EE	F$r*   r~   c            
            e Zd Zdeddf fdZ	 ddeej                     dede	e
ej                  ej                  f   e
ej                     f   fdZ xZS )	VideoMAESdpaSelfAttentionrL   r   Nc                 F    t         |   |       |j                  | _        y rD   )rE   rF   r   rM   s     r+   rF   z"VideoMAESdpaSelfAttention.__init__  s     ,2,O,O)r*   r   r   c           	      r   | j                   !t        j                  | j                  d      nd }t        j
                  j                  || j                  j                  |      }t        j
                  j                  || j                  j                  | j                        }t        j
                  j                  || j                  j                  | j                         }| j                  |      }| j                  |      }	| j                  |      }
t        j                  j
                  j                  |
||	|| j                  r| j                  nddd       }|j                  dddd	      j!                         }|j#                         d d
 | j$                  fz   }|j'                  |      }|d fS )NFr   r           )	is_causalscaler   r1   r   r   r   )r   r&   r   r   r	   r   r   r   r   r   r   r   scaled_dot_product_attentiontrainingr   rx   r   r   r   r   )rN   r    r   r   r   r   r   r   r   r   r   r   r   s                r+   r_   z!VideoMAESdpaSelfAttention.forward  sx    HL{{G^!!$++UCdh}}##-V\#]%%M$**BSBSZ^ZeZe%f--&&]4::CTCT[_[f[f&g--d3	//7//8++HH15D--C I 
 &--aAq9DDF"/"4"4"6s";t?Q?Q>S"S%**+BCd""r*   r   )r"   r#   r$   r   rF   r   r&   r   r   r   r   r_   ra   rb   s   @r+   r   r     sk    P~ P$ P
 bg#(0(>#Z^#	uU\\5<</0%2EE	F#r*   r   c                   |     e Zd ZdZdeddf fdZdej                  dej                  dej                  fdZ xZ	S )	VideoMAESelfOutputz
    The residual connection is defined in VideoMAELayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    rL   r   Nc                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y rD   )	rE   rF   r	   r   rJ   denser   hidden_dropout_probr   rM   s     r+   rF   zVideoMAESelfOutput.__init__6  sB    YYv1163E3EF
zz&"<"<=r*   r    input_tensorc                 J    | j                  |      }| j                  |      }|S rD   r   r   rN   r    r   s      r+   r_   zVideoMAESelfOutput.forward;  s$    

=1]3r*   )
r"   r#   r$   r%   r   rF   r&   r   r_   ra   rb   s   @r+   r   r   0  sD    
>~ >$ >
U\\  RWR^R^ r*   r   c                        e Zd Zdeddf fdZdee   ddfdZ	 	 ddej                  de
ej                     d	edeeej                  ej                  f   eej                     f   fd
Z xZS )VideoMAEAttentionrL   r   Nc                     t         |           t        |      | _        t	        |      | _        t               | _        y rD   )rE   rF   r~   	attentionr   outputsetpruned_headsrM   s     r+   rF   zVideoMAEAttention.__init__D  s0    .v6(0Er*   headsc                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r   r   )lenr   r   r   r   r   r   r   r   r   r   r   r   union)rN   r   indexs      r+   prune_headszVideoMAEAttention.prune_headsJ  s   u:?74>>55t~~7Y7Y[_[l[l
u
  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r*   r    r   r   c                 h    | j                  |||      }| j                  |d   |      }|f|dd  z   }|S )Nr   r   )r   r   )rN   r    r   r   self_outputsattention_outputr   s          r+   r_   zVideoMAEAttention.forward\  sE     ~~mY@QR;;|AF#%QR(88r*   r   )r"   r#   r$   r   rF   r   rq   r   r&   r   r   r   r   r   r_   ra   rb   s   @r+   r   r   C  s    "~ "$ ";S ;d ;* -1"'	|| ELL)  	
 
uU\\5<</0%2EE	Fr*   r   c                   (     e Zd Zdeddf fdZ xZS )VideoMAESdpaAttentionrL   r   Nc                 D    t         |   |       t        |      | _        y rD   )rE   rF   r   r   rM   s     r+   rF   zVideoMAESdpaAttention.__init__l  s     26:r*   )r"   r#   r$   r   rF   ra   rb   s   @r+   r   r   k  s    ;~ ;$ ; ;r*   r   c                   `     e Zd Zdeddf fdZdej                  dej                  fdZ xZS )VideoMAEIntermediaterL   r   Nc                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y rD   )rE   rF   r	   r   rJ   intermediate_sizer   rm   
hidden_actstrr   intermediate_act_fnrM   s     r+   rF   zVideoMAEIntermediate.__init__s  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r*   r    c                 J    | j                  |      }| j                  |      }|S rD   )r   r   )rN   r    s     r+   r_   zVideoMAEIntermediate.forward{  s&    

=100?r*   	r"   r#   r$   r   rF   r&   r   r_   ra   rb   s   @r+   r   r   r  s1    9~ 9$ 9U\\ ell r*   r   c                   x     e Zd Zdeddf fdZdej                  dej                  dej                  fdZ xZS )VideoMAEOutputrL   r   Nc                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y rD   )
rE   rF   r	   r   r   rJ   r   r   r   r   rM   s     r+   rF   zVideoMAEOutput.__init__  sB    YYv779K9KL
zz&"<"<=r*   r    r   c                 T    | j                  |      }| j                  |      }||z   }|S rD   r   r   s      r+   r_   zVideoMAEOutput.forward  s.    

=1]3%4r*   r   rb   s   @r+   r   r     s?    >~ >$ >
U\\  RWR^R^ r*   r   )eagersdpac                        e Zd ZdZdeddf fdZ	 	 d
dej                  deej                     de	de
eej                  ej                  f   eej                     f   fd	Z xZS )VideoMAELayerz?This corresponds to the Block class in the timm implementation.rL   r   Nc                    t         |           |j                  | _        d| _        t	        |j
                     |      | _        t        |      | _        t        |      | _
        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)rE   rF   chunk_size_feed_forwardseq_len_dimVIDEOMAE_ATTENTION_CLASSES_attn_implementationr   r   intermediater   r   r	   	LayerNormrJ   layer_norm_epslayernorm_beforelayernorm_afterrM   s     r+   rF   zVideoMAELayer.__init__  s    '-'E'E$3F4O4OPQWX08$V, "V-?-?VEZEZ [!||F,>,>FDYDYZr*   r    r   r   c                     | j                  | j                  |      ||      }|d   }|dd  }||z   }| j                  |      }| j                  |      }| j	                  ||      }|f|z   }|S )N)r   r   r   )r   r   r   r   r   )rN   r    r   r   self_attention_outputsr   r   layer_outputs           r+   r_   zVideoMAELayer.forward  s     "&!!-0/ "0 "

 2!4(, )=8 ++M:((6 {{<?/G+r*   r   )r"   r#   r$   r%   r   rF   r&   r   r   r   r   r   r_   ra   rb   s   @r+   r   r     s    I[~ [$ [ -1"'	|| ELL)  	
 
uU\\5<</0%2EE	Fr*   r   c                        e Zd Zdeddf fdZ	 	 	 	 ddej                  deej                     deded	ede	e
ef   fd
Z xZS )VideoMAEEncoderrL   r   Nc                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w r   )
rE   rF   rL   r	   
ModuleListr2   num_hidden_layersr   layergradient_checkpointing)rN   rL   r]   rO   s      r+   rF   zVideoMAEEncoder.__init__  sN    ]]5IaIaCb#caM&$9#cd
&+# $ds   A#r    r   r   output_hidden_statesreturn_dictc                 t   |rdnd }|rdnd }t        | j                        D ]h  \  }}	|r||fz   }|||   nd }
| j                  r+| j                  r| j	                  |	j
                  ||
|      }n
 |	||
|      }|d   }|s`||d   fz   }j |r||fz   }|st        d |||fD              S t        |||      S )Nr)   r   r   c              3   &   K   | ]	  }||  y wrD   r)   .0vs     r+   	<genexpr>z*VideoMAEEncoder.forward.<locals>.<genexpr>  s     mq_`_lm   last_hidden_stater    r!   )	enumerater  r  r   _gradient_checkpointing_func__call__tupler   )rN   r    r   r   r	  r
  all_hidden_statesall_self_attentionsilayer_modulelayer_head_masklayer_outputss               r+   r_   zVideoMAEEncoder.forward  s     #7BD$5b4(4 	POA|#$58H$H!.7.CilO**t}} $ A A ))!#%	! !-]OM^ _)!,M &9]1=M<O&O#'	P*   1]4D Dm]4EGZ$[mmm++*
 	
r*   )NFFT)r"   r#   r$   r   rF   r&   r   r   r   r   r  r   r_   ra   rb   s   @r+   r  r    sz    ,~ ,$ , -1"'%* )
||)
 ELL))
  	)

 #)
 )
 
uo%	&)
r*   r  c                   *    e Zd ZdZeZdZdZdZdZ	d Z
y)VideoMAEPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    videomaerY   Tc                    t        |t        j                  t        j                  f      rm|j                  j
                  j                  d| j                  j                         |j                  %|j                  j
                  j                          yyt        |t        j                        rJ|j                  j
                  j                          |j                  j
                  j                  d       yy)zInitialize the weightsr   )meanstdNg      ?)rm   r	   r   rr   r   datanormal_rL   initializer_ranger   zero_r   fill_)rN   modules     r+   _init_weightsz%VideoMAEPreTrainedModel._init_weights   s    fryy"))45 MM&&CT[[5R5R&S{{&  &&( '-KK""$MM$$S) .r*   N)r"   r#   r$   r%   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_supports_sdpar*  r)   r*   r+   r  r    s)    
 "L"$O&*#N
*r*   r  aJ  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`VideoMAEConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a\  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_frames, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
            [`VideoMAEImageProcessor.__call__`] for details.

        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
zbThe bare VideoMAE Model transformer outputting raw hidden-states without any specific head on top.c                        e Zd Z fdZd Zd Z ee       ee	e
      	 	 	 	 	 ddej                  deej                     deej                     dee   d	ee   d
ee   deee	f   fd              Z xZS )VideoMAEModelc                    t         |   |       || _        t        |      | _        t        |      | _        |j                  rd | _        n0t        j                  |j                  |j                        | _        | j                          y )Nr   )rE   rF   rL   rB   r[   r  encoderuse_mean_pooling	layernormr	   r   rJ   r   	post_initrM   s     r+   rF   zVideoMAEModel.__init__4  si     ,V4&v.""!DN\\&*<*<&BWBWXDN 	r*   c                 .    | j                   j                  S rD   )r[   rH   )rN   s    r+   get_input_embeddingsz"VideoMAEModel.get_input_embeddingsC  s    ///r*   c                     |j                         D ]7  \  }}| j                  j                  |   j                  j	                  |       9 y)z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr3  r  r   r   )rN   heads_to_pruner  r   s       r+   _prune_headszVideoMAEModel._prune_headsF  sE    
 +002 	CLE5LLu%//;;EB	Cr*   output_typer+  rY   rZ   r   r   r	  r
  r   c                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }| j	                  || j                   j
                        }| j                  ||      }| j                  |||||      }|d   }	| j                  | j                  |	      }	|s	|	f|dd z   S t        |	|j                  |j                        S )a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
            batch must have the same number of masked patches. If `None`, then all patches are considered. Sequence
            length is `(num_frames // tubelet_size) * (image_size // patch_size) ** 2`.

        Returns:

        Examples:

        ```python
        >>> import av
        >>> import numpy as np

        >>> from transformers import AutoImageProcessor, VideoMAEModel
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 16 frames
        >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container, indices)

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
        >>> model = VideoMAEModel.from_pretrained("MCG-NJU/videomae-base")

        >>> # prepare video for the model
        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> # forward pass
        >>> outputs = model(**inputs)
        >>> last_hidden_states = outputs.last_hidden_state
        >>> list(last_hidden_states.shape)
        [1, 1568, 768]
        ```Nr   r   r	  r
  r   r   r  )rL   r   r	  use_return_dictget_head_maskr  r[   r3  r5  r   r    r!   )
rN   rY   rZ   r   r   r	  r
  embedding_outputencoder_outputssequence_outputs
             r+   r_   zVideoMAEModel.forwardN  s   x 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B] &&y$++2O2OP	??<I,,/!5# ' 
 *!,>>%"nn_=O#%(;;;-)77&11
 	
r*   )NNNNN)r"   r#   r$   rF   r8  r<  r   VIDEOMAE_INPUTS_DOCSTRINGr   r   _CONFIG_FOR_DOCr&   r'   r   
BoolTensorr   r   r   r   r_   ra   rb   s   @r+   r1  r1  /  s    
0C ++DE?Y 7;,0,0/3&*{
''{
 "%"2"23{
 ELL)	{

 $D>{
 'tn{
 d^{
 
uo%	&{
 Z F{
r*   r1  c                   ,     e Zd Z fdZ	 	 	 ddZ xZS )VideoMAEDecoderc                    t         |           |j                  |j                  z  |j                  dz  z  }t        |      }|j                  |_        |j                  |_	        |j                  |_        |j                  |_        t        j                  t!        |j                        D cg c]  }t#        |       c}      | _        t        j&                  |j                        | _        |dkD  r t        j*                  |j                  |      nt        j,                         | _        d| _        || _        y c c}w )Nr1   r   F)rE   rF   r^   rl   rj   r   decoder_hidden_sizerJ   decoder_num_hidden_layersr  decoder_num_attention_headsr   decoder_intermediate_sizer   r	   r  r2   r   decoder_layersr   normr   Identityheadr  rL   )rN   rL   rI   decoder_num_labelsdecoder_configr]   rO   s         r+   rF   zVideoMAEDecoder.__init__  s   #0063F3FFIZIZ\]I]]!&)%+%?%?"+1+K+K(-3-O-O*+1+K+K( mm49&:Z:Z4[\q]>*\
 LL!;!;<	I[^_I_BIIf002DEegepeper 		 ',# ]s   .D=c                    |rdnd }|rdnd }t        | j                        D ]`  \  }}	|r||fz   }| j                  r+| j                  r| j	                  |	j
                  |d |      }
n |	|d |      }
|
d   }|sX||
d   fz   }b |r||fz   }|dkD  r|d d | d f   }| j                  |      }| j                  |      }|st        d |||fD              S t        |||      S )Nr)   )r   r   r   r   c              3   &   K   | ]	  }||  y wrD   r)   r  s     r+   r  z*VideoMAEDecoder.forward.<locals>.<genexpr>  s     fqXYXefr  )r   r    r!   )
r  rP  r  r   r  r  rQ  rS  r  r   )rN   r    return_token_numr   r	  r
  r  r  r  r  r  r   s               r+   r_   zVideoMAEDecoder.forward  s(    #7BD$5b4()<)<= 	POA|#$58H$H!**t}} $ A A ))!%	! !-]d^o p)!,M &9]1=M<O&O##	P&   1]4D Da)!.>->-?*?@M 		-0=)fV->@S$Tfff$FBS`sttr*   )FFT)r"   r#   r$   rF   r_   ra   rb   s   @r+   rJ  rJ    s    4  "*ur*   rJ  zXThe VideoMAE Model transformer with the decoder on top for self-supervised pre-training.c                        e Zd Z fdZ ee       eee      	 	 	 	 dde	j                  de	j                  dee	j                     dee   dee   dee   d	eeef   fd
              Z xZS )VideoMAEForPreTrainingc                    t         |   |       || _        t        |      | _        t        j                  |j                  |j                  d      | _	        t        j                  t        j                  dd|j                              | _        t        | j                  j                  j                   |j                        | _        t%        || j                  j                  j                         | _        | j)                          y )NFr   r   )rI   )rE   rF   rL   r1  r   r	   r   rJ   rL  encoder_to_decoderr   r&   r   
mask_tokenr@   r[   rI   rK   rJ  decoderr6  rM   s     r+   rF   zVideoMAEForPreTraining.__init__  s     %f-"$))F,>,>@Z@Zaf"g,,u{{1a9S9S'TU#>MM$$00&2L2L$
  'v4==;S;S;_;_` 	r*   r=  rY   rZ   r   r   r	  r
  r   c                    ||n| j                   j                  }| j                  ||||||      }|d   }| j                  |      }|j                  \  }	}
}|t        d      | j                  j                  |	dd      j                  |      }|j                  |j                        j                         j                         }||    j                  |	d|      }||   j                  |	d|      }t        j                  ||z   | j                   |z   gd      }| j#                  ||j                  d         }|j$                  }d}t        j&                         5  | j                   j(                  dk7  r|}n|j                  }|j*                  }t        j,                  t.              j                  ||	      ddddddf   }t        j,                  t0              j                  ||	      ddddddf   }||z  |z   }|j                  \  }	}}}}| j                   j2                  | j                   j4                  }}| j                   j6                  r|j9                  |	||z  ||||z  |||z  |      }|j;                  ddd
ddddd      j=                         }|j9                  |	||z  |z  |z  |z  |z  ||z  |z  |      }||j?                  dd      z
  |jA                  ddd      jC                         dz   z  }|j9                  |	||z  |z  |z  |z  |z  ||z  |z  |z        }n| j                   j(                  dk7  rt        d      |j9                  |	||z  ||||z  |||z  |      }|j;                  ddd
ddddd      j=                         }|j9                  |	||z  |z  |z  |z  |z  ||z  |z  |z        }|j                  \  }	}}||   j                  |	d|      } ddd       tE               }! |!|       }|s|f|dd z   }"||f|"z   S |"S tG        |||jH                  |jJ                        S # 1 sw Y   TxY w)a  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Each video in the
            batch must have the same number of masked patches. Sequence length is `(num_frames // tubelet_size) *
            (image_size // patch_size) ** 2`.

        Returns:

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, VideoMAEForPreTraining
        >>> import numpy as np
        >>> import torch

        >>> num_frames = 16
        >>> video = list(np.random.randint(0, 256, (num_frames, 3, 224, 224)))

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base")
        >>> model = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base")

        >>> pixel_values = image_processor(video, return_tensors="pt").pixel_values

        >>> num_patches_per_frame = (model.config.image_size // model.config.patch_size) ** 2
        >>> seq_length = (num_frames // model.config.tubelet_size) * num_patches_per_frame
        >>> bool_masked_pos = torch.randint(0, 2, (1, seq_length)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss = outputs.loss
        ```N)rZ   r   r   r	  r
  r   z!One must provided a boolean mask rQ   r   r   r   )rT   dtyperv      r1         r   T)r   keepdim)r   unbiasedrd  gư>zQCan't unnormalize non-RGB images. Consider setting config.norm_pix_loss to False.r.   r   r    r!   )&rL   rA  r   r\  rW   rw   rK   expandrR   rS   rT   rU   rV   rX   r&   catr]  r^  r   no_gradr^   r`  	as_tensorr   r   rl   rj   norm_pix_lossr   rx   r   r"  varr   r   r-   r    r!   )#rN   rY   rZ   r   r   r	  r
  r   rE  r\   seq_lenr^   expanded_position_embeddingspos_emb_visiblepos_emb_maskx_fulldecoder_outputsr   r.   framesrT   r`  r"  r#  timer{   r|   rl   rj   frames_normvideos_patchr]   labelsloss_fctr   s#                                      r+   r_   zVideoMAEForPreTraining.forward(  s   P &1%<k$++B]B]--+/!5#   
 "!*11
 -<,A,A)
G\ "@AA'+'?'?'F'FzSUWY'Z'b'bco'p$'C'F'F|GZGZ'['a'a'c'j'j'l$67GHPPQ[]_amn3ODLLZY[]ij Oo=tQ]?]^def ,,v|/A/A!/DE '']]_ H	Y{{''1,% &,,$**'<=@@V[@\]acgijlprv]vwoo&:;>>fTY>Z[_aeghjnpt[tu%+d2<BLL9JlFE'+{{'?'?AWAW*L{{((L(  j(Z'	  1aAq!Q?JJLL(61Z?%G:U :-
: 	  &D(IIJJ2dJCHHJTQ  +//L(61Z?%G:U :-
:\I  ;;++q0$k   L(  j(Z'	  1aAq!Q?JJL%{{L(61Z?%G:U :-
:\I  +7*<*<'J<!/2:::r<XFQH	YT 9'Y,F)-)9TGf$EvE+!//))	
 	
cH	Y H	Ys   JP??Q)NNNN)r"   r#   r$   rF   r   rF  r   r-   rG  r&   r'   rH  r   r   r   r   r  r_   ra   rb   s   @r+   rZ  rZ    s    
" ++DE+GVef
 -1,0/3&*]
'']
 ))]
 ELL)	]

 $D>]
 'tn]
 d^]
 
u22	3]
 g F]
r*   rZ  zVideoMAE Model transformer with a video classification head on top (a linear layer on top of the average pooled hidden
    states of all tokens) e.g. for ImageNet.c                        e Zd Z fdZ ee       eee      	 	 	 	 	 	 dde	e
j                     de	e
j                     de	e
j                     de	e   de	e   de	e   d	eeef   fd
              Z xZS )VideoMAEForVideoClassificationc                    t         |   |       |j                  | _        t        |      | _        |j
                  rt        j                  |j                        nd | _	        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _        | j                          y )Nr   )rE   rF   
num_labelsr1  r   r4  r	   r   rJ   fc_normr   rR  
classifierr6  rM   s     r+   rF   z'VideoMAEForVideoClassification.__init__  s      ++%f- <B;R;Rr||F$6$67X\NTN_N_bcNc"))F$6$68I8IJikititiv 	r*   r=  rY   r   rw  r   r	  r
  r   c                    ||n| j                   j                  }| j                  |||||      }|d   }| j                  !| j                  |j	                  d            }n	|dddf   }| j                  |      }	d}
|| j                   j                  | j                  dk(  rd| j                   _        nl| j                  dkD  rL|j                  t        j                  k(  s|j                  t        j                  k(  rd| j                   _        nd| j                   _        | j                   j                  dk(  rIt               }| j                  dk(  r& ||	j                         |j                               }
n ||	|      }
n| j                   j                  dk(  r=t               } ||	j                  d| j                        |j                  d            }
n,| j                   j                  dk(  rt!               } ||	|      }
|s|	f|dd z   }|
|
f|z   S |S t#        |
|	|j$                  |j&                  	      S )
a3  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).

        Returns:

        Examples:

        ```python
        >>> import av
        >>> import torch
        >>> import numpy as np

        >>> from transformers import AutoImageProcessor, VideoMAEForVideoClassification
        >>> from huggingface_hub import hf_hub_download

        >>> np.random.seed(0)


        >>> def read_video_pyav(container, indices):
        ...     '''
        ...     Decode the video with PyAV decoder.
        ...     Args:
        ...         container (`av.container.input.InputContainer`): PyAV container.
        ...         indices (`List[int]`): List of frame indices to decode.
        ...     Returns:
        ...         result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
        ...     '''
        ...     frames = []
        ...     container.seek(0)
        ...     start_index = indices[0]
        ...     end_index = indices[-1]
        ...     for i, frame in enumerate(container.decode(video=0)):
        ...         if i > end_index:
        ...             break
        ...         if i >= start_index and i in indices:
        ...             frames.append(frame)
        ...     return np.stack([x.to_ndarray(format="rgb24") for x in frames])


        >>> def sample_frame_indices(clip_len, frame_sample_rate, seg_len):
        ...     '''
        ...     Sample a given number of frame indices from the video.
        ...     Args:
        ...         clip_len (`int`): Total number of frames to sample.
        ...         frame_sample_rate (`int`): Sample every n-th frame.
        ...         seg_len (`int`): Maximum allowed index of sample's last frame.
        ...     Returns:
        ...         indices (`List[int]`): List of sampled frame indices
        ...     '''
        ...     converted_len = int(clip_len * frame_sample_rate)
        ...     end_idx = np.random.randint(converted_len, seg_len)
        ...     start_idx = end_idx - converted_len
        ...     indices = np.linspace(start_idx, end_idx, num=clip_len)
        ...     indices = np.clip(indices, start_idx, end_idx - 1).astype(np.int64)
        ...     return indices


        >>> # video clip consists of 300 frames (10 seconds at 30 FPS)
        >>> file_path = hf_hub_download(
        ...     repo_id="nielsr/video-demo", filename="eating_spaghetti.mp4", repo_type="dataset"
        ... )
        >>> container = av.open(file_path)

        >>> # sample 16 frames
        >>> indices = sample_frame_indices(clip_len=16, frame_sample_rate=1, seg_len=container.streams.video[0].frames)
        >>> video = read_video_pyav(container, indices)

        >>> image_processor = AutoImageProcessor.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")
        >>> model = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics")

        >>> inputs = image_processor(list(video), return_tensors="pt")

        >>> with torch.no_grad():
        ...     outputs = model(**inputs)
        ...     logits = outputs.logits

        >>> # model predicts one of the 400 Kinetics-400 classes
        >>> predicted_label = logits.argmax(-1).item()
        >>> print(model.config.id2label[predicted_label])
        eating spaghetti
        ```Nr@  r   r   
regressionsingle_label_classificationmulti_label_classificationrQ   rf  )rL   rA  r   r}  r"  r~  problem_typer|  r`  r&   longrq   r   squeezer   r   r
   r   r    r!   )rN   rY   r   rw  r   r	  r
  r   rE  r   r.   rx  r   s                r+   r_   z&VideoMAEForVideoClassification.forward  s    ~ &1%<k$++B]B]--/!5#   
 "!*<<#"ll?+?+?+BCO-ad3O1{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#FNN$4fnn6FGD#FF3D))-JJ+-B @&++b/R))-II,./Y,F)-)9TGf$EvE$!//))	
 	
r*   )NNNNNN)r"   r#   r$   rF   r   rF  r   r   rG  r   r&   r   r   r   r   r_   ra   rb   s   @r+   rz  rz    s     ++DE+@_ 04,0)-,0/3&*P
u||,P
 ELL)P
 &	P

 $D>P
 'tnP
 d^P
 
u++	,P
 ` FP
r*   rz  )Gr%   collections.abcrn   r   copyr   dataclassesr   typingr   r   r   r   numpyr3   r&   torch.utils.checkpointr	   torch.nnr
   r   r   activationsr   modeling_outputsr   r   modeling_utilsr   pytorch_utilsr   r   utilsr   r   r   r   r   utils.constantsr   r   configuration_videomaer   
get_loggerr"   loggerrG  _CHECKPOINT_FOR_DOCr   r-   r@   ModulerB   rG   r~   r   r   r   r   r   r   r   r   r  r  VIDEOMAE_START_DOCSTRINGrF  r1  rJ  rZ  rz  r)   r*   r+   <module>r     s   3    ! . .     A A ! F - Q  K 2 
		H	%"-  :K : :, :; : :6: >2bii 2jCBII CL# 5 #F &$		 $P;- ;299 "RYY  (9BWX 'BII 'V0
bii 0
f*o *2	  . hX
+ X
	X
vAubii AuH ^q
4 q
	q
h 0
`
%< `

`
r*   