
    sgY                        d Z ddlZddlZddlmZmZmZmZm	Z	m
Z
 ddlZddlZddlmZ ddlmZmZmZ ddlmZ ddlmZmZmZmZ dd	lmZ dd
lmZmZ ddlmZm Z m!Z!m"Z"m#Z#m$Z$ ddl%m&Z&  e"jN                  e(      Z)dZ*dZ+g dZ,dZ-dZ. G d dej^                        Z0 G d dej^                        Z1 G d dej^                        Z2 G d de2      Z3 G d dej^                        Z4 G d dej^                        Z5 G d d e5      Z6 G d! d"ej^                        Z7 G d# d$ej^                        Z8e5e6d%Z9 G d& d'ej^                        Z: G d( d)ej^                        Z; G d* d+e      Z<d,Z=d-Z> e d.e=       G d/ d0e<             Z? G d1 d2ej^                        Z@ e d3e=       G d4 d5e<             ZA e d6e=       G d7 d8e<             ZBy)9zPyTorch ViT model.    N)DictListOptionalSetTupleUnion)nn)BCEWithLogitsLossCrossEntropyLossMSELoss   )ACT2FN)BaseModelOutputBaseModelOutputWithPoolingImageClassifierOutputMaskedImageModelingOutput)PreTrainedModel) find_pruneable_heads_and_indicesprune_linear_layer)add_code_sample_docstringsadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings	torch_int   )	ViTConfigr   z!google/vit-base-patch16-224-in21k)r      i   zgoogle/vit-base-patch16-224zEgyptian catc            	            e Zd ZdZddededdf fdZdej                  de	d	e	dej                  fd
Z
	 	 ddej                  deej                     dedej                  fdZ xZS )ViTEmbeddingszb
    Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
    configuse_mask_tokenreturnNc                 J   t         |           t        j                  t	        j
                  dd|j                              | _        |r4t        j                  t	        j                  dd|j                              nd | _	        t        |      | _        | j                  j                  }t        j                  t	        j
                  d|dz   |j                              | _        t        j                  |j                        | _        |j"                  | _        || _        y )Nr   )super__init__r	   	Parametertorchrandnhidden_size	cls_tokenzeros
mask_tokenViTPatchEmbeddingspatch_embeddingsnum_patchesposition_embeddingsDropouthidden_dropout_probdropout
patch_sizer!   )selfr!   r"   r0   	__class__s       W/var/www/html/venv/lib/python3.12/site-packages/transformers/models/vit/modeling_vit.pyr&   zViTEmbeddings.__init__A   s    ekk!Q8J8J&KLQ_",,u{{1a9K9K'LMei 26 :++77#%<<A{QPVPbPb0c#d zz&"<"<= ++    
embeddingsheightwidthc                    |j                   d   dz
  }| j                  j                   d   dz
  }t        j                  j	                         s||k(  r||k(  r| j                  S | j                  ddddf   }| j                  ddddf   }|j                   d   }|| j
                  z  }	|| j
                  z  }
t        |dz        }|j                  d|||      }|j                  dddd      }t        j                  j                  ||	|
fdd	
      }|j                  dddd      j                  dd|      }t        j                  ||fd      S )a   
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
        images. This method is also adapted to support torch.jit tracing.

        Adapted from:
        - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
        - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
        r   N      ?r   r      bicubicF)sizemodealign_cornersdim)shaper1   r(   jit
is_tracingr5   r   reshapepermuter	   
functionalinterpolateviewcat)r6   r:   r;   r<   r0   num_positionsclass_pos_embedpatch_pos_embedrF   
new_height	new_widthsqrt_num_positionss               r8   interpolate_pos_encodingz&ViTEmbeddings.interpolate_pos_encodingM   s`    !&&q)A-0066q9A= yy##%+*F6UZ?+++221bqb59221ab59r"t.
T__,	&}c'9:)11!5GI[]`a)11!Q1=--33i(	 4 
 *11!Q1=BB1b#Nyy/?;CCr9   pixel_valuesbool_masked_posrV   c                    |j                   \  }}}}| j                  ||      }|Z|j                   d   }	| j                  j                  ||	d      }
|j	                  d      j                  |
      }|d|z
  z  |
|z  z   }| j                  j                  |dd      }t        j                  ||fd      }|r|| j                  |||      z   }n|| j                  z   }| j                  |      }|S )N)rV   r   r>         ?rE   )rG   r/   r-   expand	unsqueezetype_asr+   r(   rO   rV   r1   r4   )r6   rW   rX   rV   
batch_sizenum_channelsr;   r<   r:   
seq_lengthmask_tokensmask
cls_tokenss                r8   forwardzViTEmbeddings.forwardu   s    3?2D2D/
L&%**<Rj*k
&#))!,J//00ZLK",,R088ED#sTz2[45GGJ ^^**:r2>
YY
J7Q?
 $#d&C&CJPVX]&^^J#d&>&>>J\\*-
r9   FNF)__name__
__module____qualname____doc__r   boolr&   r(   TensorintrV   r   
BoolTensorrd   __classcell__r7   s   @r8   r    r    <   s    
y 
$ 
4 
&D5<< &D &DUX &D]b]i]i &DV 7;).	ll "%"2"23 #'	
 
r9   r    c                   `     e Zd ZdZ fdZddej                  dedej                  fdZ xZ	S )r.   z
    This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
    `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
    Transformer.
    c                    t         |           |j                  |j                  }}|j                  |j
                  }}t        |t        j                  j                        r|n||f}t        |t        j                  j                        r|n||f}|d   |d   z  |d   |d   z  z  }|| _        || _        || _        || _
        t        j                  ||||      | _        y )Nr   r   )kernel_sizestride)r%   r&   
image_sizer5   r_   r*   
isinstancecollectionsabcIterabler0   r	   Conv2d
projection)r6   r!   ru   r5   r_   r*   r0   r7   s          r8   r&   zViTPatchEmbeddings.__init__   s    !'!2!2F4E4EJ
$*$7$79K9Kk#-j+//:R:R#SZZdfpYq
#-j+//:R:R#SZZdfpYq
!!}
15*Q-:VW=:XY$$(&))L+:^hir9   rW   rV   r#   c                    |j                   \  }}}}|| j                  k7  rt        d| j                   d| d      |sV|| j                  d   k7  s|| j                  d   k7  r2t        d| d| d| j                  d    d| j                  d    d		      | j	                  |      j                  d
      j                  dd
      }|S )NzoMake sure that the channel dimension of the pixel values match with the one set in the configuration. Expected z	 but got .r   r   zInput image size (*z) doesn't match model (z).r@   )rG   r_   
ValueErrorru   r{   flatten	transpose)r6   rW   rV   r^   r_   r;   r<   r:   s           r8   rd   zViTPatchEmbeddings.forward   s    2>2D2D/
L&%4,,,!../yaI  (++u8J/J (% 9+,Adooa.@-AE  __\2::1=GG1M
r9   re   )
rg   rh   ri   rj   r&   r(   rl   rk   rd   ro   rp   s   @r8   r.   r.      s3    jELL D ]b]i]i r9   r.   c            
            e Zd Zdeddf fdZdej                  dej                  fdZ	 d
deej                     de	de
eej                  ej                  f   eej                     f   fd	Z xZS )ViTSelfAttentionr!   r#   Nc                    t         |           |j                  |j                  z  dk7  r3t	        |d      s't        d|j                  f d|j                   d      |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _        t        j                  |j                  | j                  |j                        | _        t        j                  |j                  | j                  |j                        | _        t        j                  |j                  | j                  |j                        | _        t        j                  |j                         | _        y )Nr   embedding_sizezThe hidden size z4 is not a multiple of the number of attention heads r}   )bias)r%   r&   r*   num_attention_headshasattrr   rm   attention_head_sizeall_head_sizer	   Linearqkv_biasquerykeyvaluer2   attention_probs_dropout_probr4   r6   r!   r7   s     r8   r&   zViTSelfAttention.__init__   s1    : ::a?PVXhHi"6#5#5#6"7 8334A7 
 $*#=#= #&v'9'9F<V<V'V#W !558P8PPYYv1143E3EFOO\
99V//1C1C&//ZYYv1143E3EFOO\
zz&"E"EFr9   xc                     |j                         d d | j                  | j                  fz   }|j                  |      }|j	                  dddd      S )Nr>   r   r@   r   r   )rB   r   r   rN   rK   )r6   r   new_x_shapes      r8   transpose_for_scoresz%ViTSelfAttention.transpose_for_scores   sL    ffhsmt'?'?AYAY&ZZFF;yyAq!$$r9   	head_maskoutput_attentionsc                    | j                  |      }| j                  | j                  |            }| j                  | j                  |            }| j                  |      }t	        j
                  ||j                  dd            }|t        j                  | j                        z  }t        j                  j                  |d      }	| j                  |	      }	||	|z  }	t	        j
                  |	|      }
|
j                  dddd      j                         }
|
j!                         d d | j"                  fz   }|
j%                  |      }
|r|
|	f}|S |
f}|S )Nr>   rE   r   r@   r   r   )r   r   r   r   r(   matmulr   mathsqrtr   r	   rL   softmaxr4   rK   
contiguousrB   r   rN   )r6   hidden_statesr   r   mixed_query_layer	key_layervalue_layerquery_layerattention_scoresattention_probscontext_layernew_context_layer_shapeoutputss                r8   rd   zViTSelfAttention.forward   sT    !JJ}5--dhh}.EF	//

=0IJ//0AB !<<Y5H5HR5PQ+dii8P8P.QQ --//0@b/I ,,7  -	9O_kB%--aAq9DDF"/"4"4"6s";t?Q?Q>S"S%**+BC6G=/2 O\M]r9   rf   )rg   rh   ri   r   r&   r(   rl   r   r   rk   r   r   rd   ro   rp   s   @r8   r   r      s    Gy GT G$%ell %u|| % bg!(0(>!Z^!	uU\\5<</0%2EE	F!r9   r   c                        e Zd Zdeddf fdZ	 	 d	dej                  deej                     de	de
eej                  ej                  f   eej                     f   f fdZ xZS )
ViTSdpaSelfAttentionr!   r#   Nc                 F    t         |   |       |j                  | _        y N)r%   r&   r   r   s     r8   r&   zViTSdpaSelfAttention.__init__   s     ,2,O,O)r9   r   r   r   c           	      ^   |s|'t         j                  d       t        
|   |||      S | j	                  |      }| j                  | j                  |            }| j                  | j                  |            }| j                  |      }t        j                  j                  j                  ||||| j                  r| j                  nddd       }|j                  dddd	      j                         }|j!                         d d
 | j"                  fz   }	|j%                  |	      }|d fS )Na  `ViTSdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.)r   r   r           F)	is_causalscaler   r@   r   r   r   )loggerwarning_oncer%   rd   r   r   r   r   r(   r	   rL   scaled_dot_product_attentiontrainingr   rK   r   rB   r   rN   )r6   r   r   r   r   r   r   r   r   r   r7   s             r8   rd   zViTSdpaSelfAttention.forward   s:    	 5w 7?+#"3 #   !JJ}5--dhh}.EF	//

=0IJ//0AB++HH15D--C I 
 &--aAq9DDF"/"4"4"6s";t?Q?Q>S"S%**+BCd""r9   rf   )rg   rh   ri   r   r&   r(   FloatTensorr   rl   rk   r   r   rd   ro   rp   s   @r8   r   r      s    Py PT P -1"'	'#(('# ELL)'#  	'#
 
uU\\5<</0%2EE	F'# '#r9   r   c                   |     e Zd ZdZdeddf fdZdej                  dej                  dej                  fdZ xZ	S )	ViTSelfOutputz
    The residual connection is defined in ViTLayer instead of here (as is the case with other models), due to the
    layernorm applied before each block.
    r!   r#   Nc                     t         |           t        j                  |j                  |j                        | _        t        j                  |j                        | _        y r   )	r%   r&   r	   r   r*   denser2   r3   r4   r   s     r8   r&   zViTSelfOutput.__init__,  sB    YYv1163E3EF
zz&"<"<=r9   r   input_tensorc                 J    | j                  |      }| j                  |      }|S r   r   r4   r6   r   r   s      r8   rd   zViTSelfOutput.forward1  s$    

=1]3r9   )
rg   rh   ri   rj   r   r&   r(   rl   rd   ro   rp   s   @r8   r   r   &  sD    
>y >T >
U\\  RWR^R^ r9   r   c                        e Zd Zdeddf fdZdee   ddfdZ	 	 ddej                  de
ej                     d	edeeej                  ej                  f   eej                     f   fd
Z xZS )ViTAttentionr!   r#   Nc                     t         |           t        |      | _        t	        |      | _        t               | _        y r   )r%   r&   r   	attentionr   outputsetpruned_headsr   s     r8   r&   zViTAttention.__init__9  s0    )&1#F+Er9   headsc                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r   rE   )lenr   r   r   r   r   r   r   r   r   r   r   r   union)r6   r   indexs      r8   prune_headszViTAttention.prune_heads?  s   u:?74>>55t~~7Y7Y[_[l[l
u
  2$..2F2FN/0B0BEJ1$..2F2FN.t{{/@/@%QO .2^^-O-ORUV[R\-\*'+~~'I'IDNNLnLn'n$ --33E:r9   r   r   r   c                 h    | j                  |||      }| j                  |d   |      }|f|dd  z   }|S )Nr   r   )r   r   )r6   r   r   r   self_outputsattention_outputr   s          r8   rd   zViTAttention.forwardQ  sE     ~~mY@QR;;|AF#%QR(88r9   rf   )rg   rh   ri   r   r&   r   rm   r   r(   rl   r   rk   r   r   rd   ro   rp   s   @r8   r   r   8  s    "y "T ";S ;d ;* -1"'	|| ELL)  	
 
uU\\5<</0%2EE	Fr9   r   c                   (     e Zd Zdeddf fdZ xZS )ViTSdpaAttentionr!   r#   Nc                 D    t         |   |       t        |      | _        y r   )r%   r&   r   r   r   s     r8   r&   zViTSdpaAttention.__init__`  s     -f5r9   )rg   rh   ri   r   r&   ro   rp   s   @r8   r   r   _  s    6y 6T 6 6r9   r   c                   `     e Zd Zdeddf fdZdej                  dej                  fdZ xZS )ViTIntermediater!   r#   Nc                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )r%   r&   r	   r   r*   intermediate_sizer   rv   
hidden_actstrr   intermediate_act_fnr   s     r8   r&   zViTIntermediate.__init__f  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r9   r   c                 J    | j                  |      }| j                  |      }|S r   )r   r   )r6   r   s     r8   rd   zViTIntermediate.forwardn  s&    

=100?r9   	rg   rh   ri   r   r&   r(   rl   rd   ro   rp   s   @r8   r   r   e  s1    9y 9T 9U\\ ell r9   r   c                   x     e Zd Zdeddf fdZdej                  dej                  dej                  fdZ xZS )	ViTOutputr!   r#   Nc                     t         |           t        j                  |j                  |j
                        | _        t        j                  |j                        | _	        y r   )
r%   r&   r	   r   r   r*   r   r2   r3   r4   r   s     r8   r&   zViTOutput.__init__v  sB    YYv779K9KL
zz&"<"<=r9   r   r   c                 T    | j                  |      }| j                  |      }||z   }|S r   r   r   s      r8   rd   zViTOutput.forward{  s.    

=1]3%4r9   r   rp   s   @r8   r   r   u  s?    >y >T >
U\\  RWR^R^ r9   r   )eagersdpac                        e Zd ZdZdeddf fdZ	 	 d
dej                  deej                     de	de
eej                  ej                  f   eej                     f   fd	Z xZS )ViTLayerz?This corresponds to the Block class in the timm implementation.r!   r#   Nc                    t         |           |j                  | _        d| _        t	        |j
                     |      | _        t        |      | _        t        |      | _
        t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        y )Nr   eps)r%   r&   chunk_size_feed_forwardseq_len_dimVIT_ATTENTION_CLASSES_attn_implementationr   r   intermediater   r   r	   	LayerNormr*   layer_norm_epslayernorm_beforelayernorm_afterr   s     r8   r&   zViTLayer.__init__  s    '-'E'E$.v/J/JKFS+F3' "V-?-?VEZEZ [!||F,>,>FDYDYZr9   r   r   r   c                     | j                  | j                  |      ||      }|d   }|dd  }||z   }| j                  |      }| j                  |      }| j	                  ||      }|f|z   }|S )N)r   r   r   )r   r   r   r   r   )r6   r   r   r   self_attention_outputsr   r   layer_outputs           r8   rd   zViTLayer.forward  s     "&!!-0/ "0 "

 2!4(, )=8 ++M:((6 {{<?/G+r9   rf   )rg   rh   ri   rj   r   r&   r(   rl   r   rk   r   r   rd   ro   rp   s   @r8   r   r     s    I[y [T [ -1"'	|| ELL)  	
 
uU\\5<</0%2EE	Fr9   r   c                        e Zd Zdeddf fdZ	 	 	 	 ddej                  deej                     deded	ede	e
ef   fd
Z xZS )
ViTEncoderr!   r#   Nc                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w rf   )
r%   r&   r!   r	   
ModuleListrangenum_hidden_layersr   layergradient_checkpointing)r6   r!   _r7   s      r8   r&   zViTEncoder.__init__  sN    ]]eFD\D\>]#^HV$4#^_
&+# $_s   A#r   r   r   output_hidden_statesreturn_dictc                 t   |rdnd }|rdnd }t        | j                        D ]h  \  }}	|r||fz   }|||   nd }
| j                  r+| j                  r| j	                  |	j
                  ||
|      }n
 |	||
|      }|d   }|s`||d   fz   }j |r||fz   }|st        d |||fD              S t        |||      S )N r   r   c              3   &   K   | ]	  }||  y wr   r   ).0vs     r8   	<genexpr>z%ViTEncoder.forward.<locals>.<genexpr>  s     mq_`_lms   )last_hidden_stater   
attentions)	enumerater   r   r   _gradient_checkpointing_func__call__tupler   )r6   r   r   r   r   r   all_hidden_statesall_self_attentionsilayer_modulelayer_head_masklayer_outputss               r8   rd   zViTEncoder.forward  s     #7BD$5b4(4 	POA|#$58H$H!.7.CilO**t}} $ A A ))!#%	! !-]OM^ _)!,M &9]1=M<O&O#'	P*   1]4D Dm]4EGZ$[mmm++*
 	
r9   )NFFT)rg   rh   ri   r   r&   r(   rl   r   rk   r   r  r   rd   ro   rp   s   @r8   r   r     sz    ,y ,T , -1"'%* )
||)
 ELL))
  	)

 #)
 )
 
uo%	&)
r9   r   c                       e Zd ZdZeZdZdZdZddgZ	dZ
deej                  ej                  ej                  f   dd	fd
Zy	)ViTPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    vitrW   Tr    r   moduler#   Nc                 >   t        |t        j                  t        j                  f      rt        j                  j                  |j                  j                  j                  t        j                        d| j                  j                        j                  |j                  j                        |j                  _        |j                  %|j                  j                  j                          yyt        |t        j                         rJ|j                  j                  j                          |j                  j                  j#                  d       yt        |t$              r3t        j                  j                  |j&                  j                  j                  t        j                        d| j                  j                        j                  |j&                  j                        |j&                  _        t        j                  j                  |j(                  j                  j                  t        j                        d| j                  j                        j                  |j(                  j                        |j(                  _        yy)zInitialize the weightsr   )meanstdNrZ   )rv   r	   r   rz   inittrunc_normal_weightdatator(   float32r!   initializer_rangedtyper   zero_r   fill_r    r1   r+   )r6   r  s     r8   _init_weightsz ViTPreTrainedModel._init_weights  s   fryy"))45 "$!6!6""%%emm43DKKDaDa "7 "b$$% MM {{&  &&( '-KK""$MM$$S)..0gg.C.C**//225==AKK11 /D / b++112	 &&+ %'GG$9$9  %%((7KK11 %: % b!!''(	 ! /r9   )rg   rh   ri   rj   r   config_classbase_model_prefixmain_input_namesupports_gradient_checkpointing_no_split_modules_supports_sdpar   r	   r   rz   r   r  r   r9   r8   r  r    s[    
 L$O&*#(*5N)E"))RYY*L$M )RV )r9   r  aE  
    This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
    as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
    behavior.

    Parameters:
        config ([`ViTConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`]
            for details.

        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        interpolate_pos_encoding (`bool`, *optional*):
            Whether to interpolate the pre-trained position encodings.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
z]The bare ViT Model transformer outputting raw hidden-states without any specific head on top.c                   8    e Zd Zddededef fdZdefdZdee	e
e	   f   ddfd	Z ee       eeeed
e      	 	 	 	 	 	 	 ddeej*                     deej,                     deej*                     dee   dee   dee   dee   deeef   fd              Z xZS )ViTModelr!   add_pooling_layerr"   c                    t         |   |       || _        t        ||      | _        t        |      | _        t        j                  |j                  |j                        | _        |rt        |      nd | _        | j                          y )N)r"   r   )r%   r&   r!   r    r:   r   encoderr	   r   r*   r   	layernorm	ViTPoolerpooler	post_init)r6   r!   r(  r"   r7   s       r8   r&   zViTModel.__init__8  sk     '~N!&)f&8&8f>S>ST+<i'$ 	r9   r#   c                 .    | j                   j                  S r   )r:   r/   )r6   s    r8   get_input_embeddingszViTModel.get_input_embeddingsE  s    ///r9   heads_to_pruneNc                     |j                         D ]7  \  }}| j                  j                  |   j                  j	                  |       9 y)z
        Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
        class PreTrainedModel
        N)itemsr*  r   r   r   )r6   r1  r   r   s       r8   _prune_headszViTModel._prune_headsH  sE    
 +002 	CLE5LLu%//;;EB	Cr9   vision)
checkpointoutput_typer   modalityexpected_outputrW   rX   r   r   r   rV   r   c                    ||n| j                   j                  }||n| j                   j                  }||n| j                   j                  }|t	        d      | j                  || j                   j                        }| j                  j                  j                  j                  j                  }|j                  |k7  r|j                  |      }| j                  |||      }	| j                  |	||||      }
|
d   }| j                  |      }| j                  | j                  |      nd}|s|||fn|f}||
dd z   S t!        |||
j"                  |
j$                        S )z
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
        Nz You have to specify pixel_values)rX   rV   )r   r   r   r   r   r   )r  pooler_outputr   r  )r!   r   r   use_return_dictr   get_head_maskr   r:   r/   r{   r  r  r  r*  r+  r-  r   r   r  )r6   rW   rX   r   r   r   rV   r   expected_dtypeembedding_outputencoder_outputssequence_outputpooled_outputhead_outputss                 r8   rd   zViTModel.forwardP  s   , 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]?@@ &&y$++2O2OP	 99DDKKQQ/'??>:L??/Tl + 
 ,,/!5# ' 
 *!,..98<8OO4UY?L?XO];_n^pL/!""555)-')77&11	
 	
r9   )TFNNNNNNN)rg   rh   ri   r   rk   r&   r.   r0  r   rm   r   r4  r   VIT_INPUTS_DOCSTRINGr   _CHECKPOINT_FOR_DOCr   _CONFIG_FOR_DOC_EXPECTED_OUTPUT_SHAPEr   r(   rl   rn   r   r   rd   ro   rp   s   @r8   r'  r'  3  s"   
y T Z^ 0&8 0C4T#Y+? CD C ++?@&.$. 046:,0,0/337&*;
u||,;
 "%"2"23;
 ELL)	;

 $D>;
 'tn;
 #+4.;
 d^;
 
u00	1;
 A;
r9   r'  c                   *     e Zd Zdef fdZd Z xZS )r,  r!   c                     t         |           t        j                  |j                  |j                        | _        t        j                         | _        y r   )r%   r&   r	   r   r*   r   Tanh
activationr   s     r8   r&   zViTPooler.__init__  s9    YYv1163E3EF
'')r9   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )r   rL  )r6   r   first_token_tensorrB  s       r8   rd   zViTPooler.forward  s6     +1a40

#566r9   )rg   rh   ri   r   r&   rd   ro   rp   s   @r8   r,  r,    s    $y $
r9   r,  aV  ViT Model with a decoder on top for masked image modeling, as proposed in [SimMIM](https://arxiv.org/abs/2111.09886).

    <Tip>

    Note that we provide a script to pre-train this model on custom data in our [examples
    directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).

    </Tip>
    c                        e Zd Zdeddf fdZ ee       eee	      	 	 	 	 	 	 	 dde
ej                     de
ej                     de
ej                     d	e
e   d
e
e   de
e   de
e   deeef   fd              Z xZS )ViTForMaskedImageModelingr!   r#   Nc                 N   t         |   |       t        |dd      | _        t	        j
                  t	        j                  |j                  |j                  dz  |j                  z  d      t	        j                  |j                              | _        | j                          y )NFT)r(  r"   r@   r   )in_channelsout_channelsrs   )r%   r&   r'  r  r	   
Sequentialrz   r*   encoder_strider_   PixelShuffledecoderr.  r   s     r8   r&   z"ViTForMaskedImageModeling.__init__  s     FeDQ}}II"..#22A58K8KK
 OOF112
 	r9   )r7  r   rW   rX   r   r   r   rV   r   c           	         ||n| j                   j                  }|g| j                   j                  | j                   j                  k7  r:t	        d| j                   j                   d| j                   j                   d      | j                  |||||||      }|d   }	|	ddddf   }	|	j                  \  }
}}t        j                  |dz        x}}|	j                  dd	d      j                  |
|||      }	| j                  |	      }d}|| j                   j                  | j                   j                  z  }|j                  d
||      }|j                  | j                   j                  d      j                  | j                   j                  d	      j                  d      j                         }t         j"                  j%                  ||d      }||z  j'                         |j'                         dz   z  | j                   j(                  z  }|s|f|dd z   }||f|z   S |S t+        |||j,                  |j.                        S )a=  
        bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
            Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).

        Returns:

        Examples:
        ```python
        >>> from transformers import AutoImageProcessor, ViTForMaskedImageModeling
        >>> import torch
        >>> from PIL import Image
        >>> import requests

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
        >>> model = ViTForMaskedImageModeling.from_pretrained("google/vit-base-patch16-224-in21k")

        >>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
        >>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
        >>> # create random boolean mask of shape (batch_size, num_patches)
        >>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()

        >>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
        >>> loss, reconstructed_pixel_values = outputs.loss, outputs.reconstruction
        >>> list(reconstructed_pixel_values.shape)
        [1, 3, 224, 224]
        ```NzWhen `bool_masked_pos` is provided, `patch_size` must be equal to `encoder_stride` to ensure that the reconstructed image has the same dimensions as the input. Got `patch_size` = z and `encoder_stride` = r}   )rX   r   r   r   rV   r   r   r   r?   r@   r>   none)	reductiongh㈵>)lossreconstructionr   r  )r!   r<  r5   rU  r   r  rG   r   floorrK   rJ   rW  ru   repeat_interleaver\   r   r	   rL   l1_losssumr_   r   r   r  )r6   rW   rX   r   r   r   rV   r   r   rA  r^   sequence_lengthr_   r;   r<   reconstructed_pixel_valuesmasked_im_lossrB   rb   reconstruction_lossr   s                        r8   rd   z!ViTForMaskedImageModeling.forward  sR   R &1%<k$++B]B]&DKK,B,BdkkF`F`,`&&*kk&<&<%==UVZVaVaVpVpUqqrt  ((+/!5%=#  
 "!* *!QR%04C4I4I1
O\OS$899)11!Q:BB:|]cejk &*\\/%B"&;;))T[[-C-CCD-55b$EO11$++2H2H!L""4;;#9#91=1	  #%--"7"7F`lr"7"s1D8==?488:PTCTUX\XcXcXpXppN02WQR[@F3A3M^%.YSYY(5!//))	
 	
r9   rD  )rg   rh   ri   r   r&   r   rE  r   r   rG  r   r(   rl   rn   rk   r   r  rd   ro   rp   s   @r8   rP  rP    s    y T " ++?@+DSbc 046:,0,0/337&*[
u||,[
 "%"2"23[
 ELL)	[

 $D>[
 'tn[
 #+4.[
 d^[
 
u//	0[
 d A[
r9   rP  a  
    ViT Model transformer with an image classification head on top (a linear layer on top of the final hidden state of
    the [CLS] token) e.g. for ImageNet.

    <Tip>

        Note that it's possible to fine-tune ViT on higher resolution images than the ones it has been trained on, by
        setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
        position embeddings to the higher resolution.

    </Tip>
    c                       e Zd Zdeddf fdZ ee       eee	e
e      	 	 	 	 	 	 	 ddeej                     deej                     deej                     d	ee   d
ee   dee   dee   deee	f   fd              Z xZS )ViTForImageClassificationr!   r#   Nc                 .   t         |   |       |j                  | _        t        |d      | _        |j                  dkD  r*t        j                  |j                  |j                        nt        j                         | _	        | j                          y )NF)r(  r   )r%   r&   
num_labelsr'  r  r	   r   r*   Identity
classifierr.  r   s     r8   r&   z"ViTForImageClassification.__init__3  ss      ++Fe< OUN_N_bcNc"))F$6$68I8IJikititiv 	r9   )r6  r7  r   r9  rW   r   labelsr   r   rV   r   c                 b   ||n| j                   j                  }| j                  ||||||      }|d   }	| j                  |	dddddf         }
d}||j	                  |
j
                        }| j                   j                  | j                  dk(  rd| j                   _        nl| j                  dkD  rL|j                  t        j                  k(  s|j                  t        j                  k(  rd| j                   _        nd| j                   _        | j                   j                  dk(  rIt               }| j                  dk(  r& ||
j                         |j                               }n ||
|      }n| j                   j                  dk(  r=t               } ||
j                  d| j                        |j                  d            }n,| j                   j                  dk(  rt!               } ||
|      }|s|
f|dd z   }||f|z   S |S t#        ||
|j$                  |j&                  	      S )
a  
        labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
            Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
            config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
            `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
        N)r   r   r   rV   r   r   r   
regressionsingle_label_classificationmulti_label_classificationr>   )r[  logitsr   r  )r!   r<  r  rj  r  deviceproblem_typerh  r  r(   longrm   r   squeezer   rN   r
   r   r   r  )r6   rW   r   rk  r   r   rV   r   r   rA  rp  r[  loss_fctr   s                 r8   rd   z!ViTForImageClassification.forward?  s   . &1%<k$++B]B]((/!5%=#  
 "!*Aq!9:YYv}}-F{{''/??a'/;DKK,__q(fllejj.HFLL\a\e\eLe/LDKK,/KDKK,{{''<7"9??a'#FNN$4fnn6FGD#FF3D))-JJ+-B @&++b/R))-II,./Y,F)-)9TGf$EvE$!//))	
 	
r9   rD  )rg   rh   ri   r   r&   r   rE  r   _IMAGE_CLASS_CHECKPOINTr   rG  _IMAGE_CLASS_EXPECTED_OUTPUTr   r(   rl   rk   r   r  rd   ro   rp   s   @r8   rf  rf  #  s     
y 
T 
 ++?@*)$4	 04,0)-,0/337&*A
u||,A
 ELL)A
 &	A

 $D>A
 'tnA
 #+4.A
 d^A
 
u++	,A
 AA
r9   rf  )Crj   collections.abcrw   r   typingr   r   r   r   r   r   r(   torch.utils.checkpointr	   torch.nnr
   r   r   activationsr   modeling_outputsr   r   r   r   modeling_utilsr   pytorch_utilsr   r   utilsr   r   r   r   r   r   configuration_vitr   
get_loggerrg   r   rG  rF  rH  rv  rw  Moduler    r.   r   r   r   r   r   r   r   r   r   r   r  VIT_START_DOCSTRINGrE  r'  r,  rP  rf  r   r9   r8   <module>r     s      : :    A A !  . Q  ) 
		H	%  : &  8 - UBII Up$ $N9ryy 9x,#+ ,#^BII $$299 $N6| 6bii  		    'ryy 'T0
 0
f%) %)P	  2 c\
! \
	\
~		   o
 2 o
o
d  U
 2 U
U
r9   