
    sg$                    t   d Z ddlZddlmZ ddlmZmZmZmZ ddl	Z	ddl
Z	ddl	mZ ddlmZ ddlmZmZmZmZ dd	lmZ dd
lmZmZmZ ddlmZmZmZmZmZ ddlm Z m!Z!m"Z"  ejF                  e$      Z%dZ&dZ'dZ(dZ)dZ*dZ+e G d de             Z,e G d de             Z-e G d de             Z.de	j^                  de	j^                  fdZ0de	j^                  de	j^                  fdZ1de"d e2fd!Z3dSd"ee2ef   d#e4fd$Z5 G d% d&ejl                        Z7 G d' d(ejp                        Z9 G d) d*ejl                        Z: G d+ d,ejl                        Z; G d- d.ejl                        Z< G d/ d0ejl                        Z= G d1 d2ejl                        Z> G d3 d4ejl                        Z? G d5 d6ejl                        Z@ G d7 d8ejl                        ZA G d9 d:ejl                        ZBd;eAiZC G d< d=ejl                        ZD G d> d?ejl                        ZE G d@ dAejl                        ZF G dB dCejl                        ZG G dD dEejl                        ZH G dF dGejl                        ZI G dH dIe      ZJ edJe(       G dK dLeJ             ZK edMe(       G dN dOeJ             ZL ee(       G dP dQeJ             ZMg dRZNy)TzPyTorch ALIGN model.    N)	dataclass)AnyOptionalTupleUnion)nn   )ACT2FN)BaseModelOutputWithNoAttention)BaseModelOutputWithPastAndCrossAttentions,BaseModelOutputWithPoolingAndCrossAttentions(BaseModelOutputWithPoolingAndNoAttention)PreTrainedModel)apply_chunking_to_forward find_pruneable_heads_and_indicesprune_linear_layer)ModelOutputadd_start_docstrings%add_start_docstrings_to_model_forwardloggingreplace_return_docstrings   )AlignConfigAlignTextConfigAlignVisionConfigzkakaobrain/align-baser   a>  
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`AlignConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
a
  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
a  
    Args:
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
            [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
aW  
    Args:
       input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
            it.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

            - 1 for tokens that are **not masked**,
            - 0 for tokens that are **masked**.

            [What are attention masks?](../glossary#attention-mask)
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
            config.max_position_embeddings - 1]`.

            [What are position IDs?](../glossary#position-ids)
        token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
            Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
            1]`:

            - 0 corresponds to a *sentence A* token,
            - 1 corresponds to a *sentence B* token.

            [What are token type IDs?](../glossary#token-type-ids)
        head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
            Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:

            - 1 indicates the head is **not masked**,
            - 0 indicates the head is **masked**.

        inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
            model's internal embedding lookup matrix.
        pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
            [`AutoImageProcessor`]. See [`EfficientNetImageProcessor.__call__`] for details.
        return_loss (`bool`, *optional*):
            Whether or not to return the contrastive loss.
        output_attentions (`bool`, *optional*):
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
            tensors for more detail.
        output_hidden_states (`bool`, *optional*):
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
            more detail.
        return_dict (`bool`, *optional*):
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
c                       e Zd ZU dZdZeej                     ed<   dZ	ej                  ed<   dZ
eeej                        ed<   y)AlignVisionModelOutputa+  
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.

    Args:
        image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
            The image embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
    Nimage_embedslast_hidden_statehidden_states)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   r    r        [/var/www/html/venv/lib/python3.12/site-packages/transformers/models/align/modeling_align.pyr   r      sI     15L(5,,-4+/u((/8<M8E%"3"345<r)   r   c                       e Zd ZU dZdZeej                     ed<   dZ	ej                  ed<   dZ
eeej                        ed<   dZeeej                        ed<   y)AlignTextModelOutputa  
    Base class for text model's outputs that also contains a pooling of the last hidden states.

    Args:
        text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
            The text embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    Ntext_embedsr   r    
attentions)r!   r"   r#   r$   r-   r   r%   r&   r'   r   r    r   r.   r(   r)   r*   r,   r,      sc    * 04K%++,3+/u((/8<M8E%"3"345<59Ju00129r)   r,   c                       e Zd ZU dZdZeej                     ed<   dZ	ej                  ed<   dZ
ej                  ed<   dZej                  ed<   dZej                  ed<   dZeed<   dZeed	<   d
ee   fdZy)AlignOutputa  
    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
            Contrastive loss for image-text similarity.
        logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
            The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
            similarity scores.
        logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
            The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
            similarity scores.
        text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
            The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
        image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
            The output of [`AlignVisionModel`].
        text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`):
            The output of the [`AlignTextModel`].
        vision_model_output(`BaseModelOutputWithPoolingAndNoAttention`):
            The output of the [`AlignVisionModel`].
    Nlosslogits_per_imagelogits_per_textr-   r   text_model_outputvision_model_outputreturnc                 H     t         fd j                         D              S )Nc              3   d   K   | ]'  }|d vr|   nt        |      j                          ) yw))r4   r5   N)getattrto_tuple).0kselfs     r*   	<genexpr>z'AlignOutput.to_tuple.<locals>.<genexpr>	  s=      
  LLDGRYZ^`aRbRkRkRmm
s   -0)tuplekeysr=   s   `r*   r:   zAlignOutput.to_tuple  s#     
YY[
 
 	
r)   )r!   r"   r#   r$   r1   r   r%   r&   r'   r2   r3   r-   r   r4   r   r5   r   r   r   r:   r(   r)   r*   r0   r0      s    ( )-D(5$$
%,*.e''.)-OU&&-%)K"")&*L%##*FJCJDHAH
%* 
r)   r0   logitsr6   c                     t         j                  j                  | t        j                  t        |       | j                        d      S )Ndeviceg?)label_smoothing)r   
functionalcross_entropyr%   arangelenrE   )rB   s    r*   contrastive_lossrK     s5    ==&&vu||CKPVP]P]/^ps&ttr)   
similarityc                 Z    t        |       }t        | j                               }||z   dz  S )Ng       @)rK   t)rL   caption_loss
image_losss      r*   
align_lossrQ     s,    #J/L!*,,.1J:%,,r)   confignum_channelsc                     | j                   }|| j                  z  }t        |t        ||dz  z         |z  |z        }|d|z  k  r||z  }t        |      S )z<
    Round number of filters based on depth multiplier.
       g?)depth_divisorwidth_coefficientmaxint)rR   rS   divisornew_dims       r*   round_filtersr\     sf     ""GF,,,L'3|gk9:gEOPG |##7w<r)   kernel_sizeadjustc                     t        | t              r| | f} | d   dz  | d   dz  f}|r|d   dz
  |d   |d   dz
  |d   fS |d   |d   |d   |d   fS )aJ  
    Utility function to get the tuple padding value for the depthwise convolution.

    Args:
        kernel_size (`int` or `tuple`):
            Kernel size of the convolution layers.
        adjust (`bool`, *optional*, defaults to `True`):
            Adjusts padding value to apply to right and bottom sides of the input.
    r   rU   r   )
isinstancerY   )r]   r^   corrects      r*   correct_padrb   ,  s}     +s#"K01~"KNa$78G
Q
GAJNGAJGG
GAJ
GAJ??r)   c                   `     e Zd ZdZdef fdZdej                  dej                  fdZ xZ	S )AlignVisionEmbeddingszL
    A module that corresponds to the stem module of the original work.
    rR   c                    t         |           t        |d      | _        t	        j
                  d      | _        t	        j                  |j                  | j                  dddd      | _	        t	        j                  | j                  |j                  |j                  	      | _        t        |j                     | _        y )
N    )r   r   r   r   paddingr	   rU   validFr]   striderh   bias)epsmomentum)super__init__r\   out_dimr   	ZeroPad2drh   Conv2drS   convolutionBatchNorm2dbatch_norm_epsbatch_norm_momentum	batchnormr
   
hidden_act
activationr=   rR   	__class__s     r*   rp   zAlignVisionEmbeddings.__init__F  s    $VR0||L9991QPW^c
 &:O:OZ`ZtZtu !2!23r)   pixel_valuesr6   c                     | j                  |      }| j                  |      }| j                  |      }| j                  |      }|S N)rh   rt   rx   rz   )r=   r}   featuress      r*   forwardzAlignVisionEmbeddings.forwardQ  sA    <<-##H->>(+??8,r)   )
r!   r"   r#   r$   r   rp   r%   Tensorr   __classcell__r|   s   @r*   rd   rd   A  s0    	40 	4ELL U\\ r)   rd   c                   .     e Zd Z	 	 	 	 	 	 	 d fd	Z xZS )AlignVisionDepthwiseConv2dc	                 @    ||z  }	t         
|   ||	|||||||	       y )N)	in_channelsout_channelsr]   rk   rh   dilationgroupsrl   padding_mode)ro   rp   )r=   r   depth_multiplierr]   rk   rh   r   rl   r   r   r|   s             r*   rp   z#AlignVisionDepthwiseConv2d.__init__\  s=     #%55#%#% 	 
	
r)   )r   r	   r   r   r   Tzeros)r!   r"   r#   rp   r   r   s   @r*   r   r   [  s$     
 
r)   r   c                   l     e Zd ZdZdedededef fdZdej                  dej                  fd	Z
 xZS )
AlignVisionExpansionLayerz_
    This corresponds to the expansion phase of each block in the original implementation.
    rR   in_dimrq   rk   c                     t         |           t        j                  ||ddd      | _        t        j
                  ||j                        | _        t        |j                     | _
        y )Nr   sameFr   r   r]   rh   rl   )num_featuresrm   )ro   rp   r   rs   expand_convru   rv   	expand_bnr
   ry   
expand_act)r=   rR   r   rq   rk   r|   s        r*   rp   z"AlignVisionExpansionLayer.__init__{  sZ    99 
 W&BWBWX !2!23r)   r    r6   c                 l    | j                  |      }| j                  |      }| j                  |      }|S r   )r   r   r   r=   r    s     r*   r   z!AlignVisionExpansionLayer.forward  s4    ((7}56r)   )r!   r"   r#   r$   r   rY   rp   r%   r&   r   r   r   r   s   @r*   r   r   v  sH    
40 
4# 
4 
4UX 
4U%6%6 5<< r)   r   c            
       p     e Zd ZdZdededededef
 fdZdej                  d	ej                  fd
Z xZS )AlignVisionDepthwiseLayerzk
    This corresponds to the depthwise convolution phase of each block in the original implementation.
    rR   r   rk   r]   adjust_paddingc                 b   t         |           || _        | j                  dk(  rdnd}t        ||      }t	        j
                  |      | _        t        ||||d      | _        t	        j                  ||j                  |j                        | _        t        |j                     | _        y )	NrU   ri   r   )r^   rg   Frj   r   rm   rn   )ro   rp   rk   rb   r   rr   depthwise_conv_padr   depthwise_convru   rv   rw   depthwise_normr
   ry   depthwise_act)	r=   rR   r   rk   r]   r   conv_padrh   r|   s	           r*   rp   z"AlignVisionDepthwiseLayer.__init__  s     	"kkQ.7Fk.A"$,,w"?8FHSX
 !nnV%:%:VE_E_
 $F$5$56r)   r    r6   c                     | j                   dk(  r| j                  |      }| j                  |      }| j                  |      }| j	                  |      }|S )NrU   )rk   r   r   r   r   r   s     r*   r   z!AlignVisionDepthwiseLayer.forward  sT    ;;! 33MBM++M:++M:**=9r)   r!   r"   r#   r$   r   rY   boolrp   r%   r&   r   r   r   r   s   @r*   r   r     sZ    7!7 7 	7
 7 7,	U%6%6 	5<< 	r)   r   c            	       n     e Zd ZdZd
dedededef fdZdej                  dej                  fd	Z xZS )AlignVisionSqueezeExciteLayerzl
    This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
    rR   r   
expand_dimexpandc                    t         |           |r|n|| _        t        dt	        ||j
                  z              | _        t        j                  d      | _	        t        j                  | j                  | j                  dd      | _        t        j                  | j                  | j                  dd      | _        t        |j                     | _        t        j                          | _        y )Nr   )output_sizer   )r   r   r]   rh   )ro   rp   dimrX   rY   squeeze_expansion_ratiodim_ser   AdaptiveAvgPool2dsqueezers   reducer   r
   ry   
act_reduceSigmoid
act_expand)r=   rR   r   r   r   r|   s        r*   rp   z&AlignVisionSqueezeExciteLayer.__init__  s    !':V!S&*H*H!HIJ++:ii	
 ii	
 !!2!23**,r)   r    r6   c                     |}| j                  |      }| j                  |      }| j                  |      }| j                  |      }| j	                  |      }t        j                  ||      }|S r   )r   r   r   r   r   r%   mul)r=   r    inputss      r*   r   z%AlignVisionSqueezeExciteLayer.forward  sc    ]3M26M26		&-8r)   )Fr   r   s   @r*   r   r     sH    '0 '# '3 'X\ '*
U%6%6 
5<< 
r)   r   c                        e Zd ZdZdedededededef fdZd	e	j                  d
e	j                  de	j                  fdZ xZS )AlignVisionFinalBlockLayerz[
    This corresponds to the final phase of each block in the original implementation.
    rR   r   rq   rk   	drop_rateid_skipc                    t         |           |dk(  xr | | _        t        j                  ||ddd      | _        t        j                  ||j                  |j                        | _	        t        j                  |      | _        y )Nr   r   Fr   r   )p)ro   rp   apply_dropoutr   rs   project_convru   rv   rw   
project_bnDropoutdropout)r=   rR   r   rq   rk   r   r   r|   s          r*   rp   z#AlignVisionFinalBlockLayer.__init__  sz     	#q[8[II 
 .. f&;&;fF`F`
 zzI.r)   
embeddingsr    r6   c                     | j                  |      }| j                  |      }| j                  r| j                  |      }||z   }|S r   )r   r   r   r   )r=   r   r    s      r*   r   z"AlignVisionFinalBlockLayer.forward  sG    ))-86 LL7M)J6Mr)   r!   r"   r#   r$   r   rY   floatr   rp   r%   r&   r   r   r   r   s   @r*   r   r     sj    /'/14/?B/LO/\a/lp/"%"3"3 EDUDU Z_ZfZf r)   r   c                        e Zd ZdZdededededededed	ed
ef fdZde	j                  de	j                  fdZ xZS )AlignVisionBlocka  
    This corresponds to the block module of original the EfficientNet vision encoder implementation.

    Args:
        config ([`AlignVisionConfig`]):
            Model configuration class.
        in_dim (`int`):
            Number of input channels.
        out_dim (`int`):
            Number of output channels.
        stride (`int`):
            Stride size to be used in convolution layers.
        expand_ratio (`int`):
            Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
        kernel_size (`int`):
            Kernel size for the depthwise convolution layer.
        drop_rate (`float`):
            Dropout rate to be used in the final phase of each block.
        id_skip (`bool`):
            Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
            of each block. Set to `True` for the first block of each stage.
        adjust_padding (`bool`):
            Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
            operation, set to `True` for inputs with odd input sizes.
    rR   r   rq   rk   expand_ratior]   r   r   r   c
                 t   t         |           || _        | j                  dk7  rdnd| _        ||z  }
| j                  rt	        |||
|      | _        t        || j                  r|
n||||	      | _        t        |||
| j                        | _	        t        || j                  r|
n|||||      | _        y )Nr   TF)rR   r   rq   rk   )rR   r   rk   r]   r   )rR   r   r   r   )rR   r   rq   rk   r   r   )ro   rp   r   r   r   	expansionr   r   r   squeeze_exciter   
projection)r=   rR   r   rq   rk   r   r]   r   r   r   expand_in_dimr|   s              r*   rp   zAlignVisionBlock.__init__  s     	("//14d%-;;6fmFDN 8$(KK=V#)
 <&]4;;
 5$(KK=V
r)   r    r6   c                     |}| j                   dk7  r| j                  |      }| j                  |      }| j                  |      }| j	                  ||      }|S )Nr   )r   r   r   r   r   )r=   r    r   s      r*   r   zAlignVisionBlock.forwardE  s[    "
! NN=9M++M: ++M:
MBr)   r   r   s   @r*   r   r     s    4'
!'
 '
 	'

 '
 '
 '
 '
 '
 '
R
U%6%6 
5<< 
r)   r   c            	       f     e Zd ZdZdef fdZ	 	 d	dej                  dee	   dee	   de
fdZ xZS )
AlignVisionEncoderz
    Forward propogates the embeddings through each vision encoder (EfficientNet) block.

    Args:
        config ([`AlignVisionConfig`]):
            Model configuration class.
    rR   c                     t                    |j                   _         fdt        |j                        }t        fd|j                  D              }d}g }t        |      D ]  }t        ||j                  |         }t        ||j                  |         }|j                  |   }	|j                  |   }
|j                  |   }t         |j                  |               D ]k  }|dk(  rdnd}|dkD  rdn|	}	|dkD  r|n|}||j                  v rdnd}|j                  |z  |z  }t        ||||	|
||||	      }|j!                  |       |dz  }m  t#        j$                  |       _        y )Nc                 Z    t        t        j                  j                  | z              S r   )rY   mathceildepth_coefficient)repeatsr=   s    r*   round_repeatsz2AlignVisionEncoder.__init__.<locals>.round_repeats_  s"    tyy!7!7'!ABCCr)   c              3   .   K   | ]  } |        y wr   r(   )r;   nr   s     r*   r>   z.AlignVisionEncoder.__init__.<locals>.<genexpr>d  s     Laq)Ls   r   TFr   )	rR   r   rq   rk   r]   r   r   r   r   )ro   rp   r   rJ   r   sumnum_block_repeatsranger\   r   strideskernel_sizesexpand_ratiosdepthwise_paddingdrop_connect_rater   appendr   
ModuleListblocks)r=   rR   num_base_blocks
num_blockscurr_block_numr   ir   rq   rk   r]   r   jr   r   r   blockr   r|   s   `                @r*   rp   zAlignVisionEncoder.__init__[  s   !'!9!9	D f001L63K3KLL
' 	$A"66+=+=a+@AF#FF,?,?,BCG^^A&F --a0K!//2L=)A)A!)DEF $"#q&$e!e$%Ev*8F<T<T*TZ^"44~E
R	(!!#! +!-'##1
 e$!#'$	$8 mmF+r)   r    output_hidden_statesreturn_dictr6   c                     |r|fnd }| j                   D ]  } ||      }|s||fz  } |st        d ||fD              S t        ||      S )Nc              3   &   K   | ]	  }||  y wr   r(   r;   vs     r*   r>   z-AlignVisionEncoder.forward.<locals>.<genexpr>  s     Xq!-Xs   )r   r    )r   r?   r   )r=   r    r   r   all_hidden_statesr   s         r*   r   zAlignVisionEncoder.forward  so     1E],$[[ 	6E!-0M#!m%55!	6
 X]4E$FXXX-++
 	
r)   )FT)r!   r"   r#   r$   r   rp   r%   r&   r   r   r   r   r   r   s   @r*   r   r   R  sW    ),0 ),\ 05&*	
((
 'tn
 d^	

 
2
r)   r   c                        e Zd ZdZ fdZ	 	 	 	 	 d
deej                     deej                     deej                     deej                     de	dej                  fd	Z xZS )AlignTextEmbeddingszGConstruct the embeddings from word, position and token_type embeddings.c                 >   t         |           t        j                  |j                  |j
                  |j                        | _        t        j                  |j                  |j
                        | _	        t        j                  |j                  |j
                        | _        t        j                  |j
                  |j                        | _        t        j                  |j                        | _        t#        |dd      | _        | j'                  dt)        j*                  |j                        j-                  d      d       | j'                  d	t)        j.                  | j0                  j3                         t(        j4                  
      d       y )N)padding_idxrm   position_embedding_typeabsoluteposition_ids)r   F)
persistenttoken_type_idsdtype)ro   rp   r   	Embedding
vocab_sizehidden_sizepad_token_idword_embeddingsmax_position_embeddingsposition_embeddingstype_vocab_sizetoken_type_embeddings	LayerNormlayer_norm_epsr   hidden_dropout_probr   r9   r   register_bufferr%   rI   r   r   r   sizelongr{   s     r*   rp   zAlignTextEmbeddings.__init__  s/   !||F,=,=v?Q?Q_e_r_rs#%<<0N0NPVPbPb#c %'\\&2H2H&J\J\%]" f&8&8f>S>STzz&"<"<='.v7PR\']$ELL)G)GHOOPWXej 	 	
 	ekk$*;*;*@*@*B%**Ubg 	 	
r)   	input_idsr   r   inputs_embedspast_key_values_lengthr6   c                 Z   ||j                         }n|j                         d d }|d   }|| j                  d d |||z   f   }|st        | d      r-| j                  d d d |f   }|j	                  |d   |      }	|	}n:t        j                  |t
        j                  | j                  j                        }|| j                  |      }| j                  |      }
||
z   }| j                  dk(  r| j                  |      }||z  }| j                  |      }| j                  |      }|S )Nr   r   r   r   r  rE   r   )r  r   hasattrr   r   r%   r   r  rE   r  r
  r   r  r  r   )r=   r  r   r   r  r  input_shape
seq_lengthbuffered_token_type_ids buffered_token_type_ids_expandedr
  r   r  s                r*   r   zAlignTextEmbeddings.forward  sH     #..*K',,.s3K ^
,,Q0FVlIl0l-lmL
 !t-.*.*=*=a*n*M'3J3Q3QR]^_R`bl3m0!A!&[

SWSdSdSkSk!l  00;M $ : :> J"%::
'':5"&":":<"H--J^^J/
\\*-
r)   )NNNNr   )r!   r"   r#   r$   rp   r   r%   
LongTensorr&   rY   r   r   r   r   s   @r*   r   r     s    Q
* 15593759&''E,,-' !!1!12' u//0	'
   1 12' !$' 
'r)   r   c                   P    e Zd Zd fd	Zdej
                  dej
                  fdZ	 	 	 	 	 	 ddej
                  deej                     deej                     deej                     d	eej                     d
ee	e	ej                           dee
   de	ej
                     fdZ xZS )AlignTextSelfAttentionc                    t         |           |j                  |j                  z  dk7  r2t	        |d      s&t        d|j                   d|j                   d      |j                  | _        t        |j                  |j                  z        | _        | j                  | j                  z  | _        t        j                  |j                  | j                        | _        t        j                  |j                  | j                        | _        t        j                  |j                  | j                        | _        t        j                  |j                        | _        |xs t#        |dd      | _        | j$                  dk(  s| j$                  d	k(  rF|j&                  | _        t        j(                  d
|j&                  z  dz
  | j                        | _        |j,                  | _        y )Nr   embedding_sizezThe hidden size (z6) is not a multiple of the number of attention heads ()r   r   relative_keyrelative_key_queryrU   r   )ro   rp   r  num_attention_headsr  
ValueErrorrY   attention_head_sizeall_head_sizer   Linearquerykeyvaluer   attention_probs_dropout_probr   r9   r   r  r  distance_embedding
is_decoderr=   rR   r   r|   s      r*   rp   zAlignTextSelfAttention.__init__  s    : ::a?PVXhHi#F$6$6#7 8 445Q8 
 $*#=#= #&v'9'9F<V<V'V#W !558P8PPYYv1143E3EF
99V//1C1CDYYv1143E3EF
zz&"E"EF'> (
'-zC
$ ''>9T=Y=Y]q=q+1+I+ID(&(ll1v7U7U3UXY3Y[_[s[s&tD# ++r)   xr6   c                     |j                         d d | j                  | j                  fz   }|j                  |      }|j	                  dddd      S )Nr   r   rU   r   r	   )r  r#  r%  viewpermute)r=   r/  new_x_shapes      r*   transpose_for_scoresz+AlignTextSelfAttention.transpose_for_scores  sL    ffhsmt'?'?AYAY&ZZFF;yyAq!$$r)   r    attention_mask	head_maskencoder_hidden_statesencoder_attention_maskpast_key_valueoutput_attentionsc                 $   | j                  |      }|d u}	|	r||d   }
|d   }|}n |	rC| j                  | j                  |            }
| j                  | j                  |            }|}n|y| j                  | j                  |            }
| j                  | j                  |            }t	        j
                  |d   |
gd      }
t	        j
                  |d   |gd      }n@| j                  | j                  |            }
| j                  | j                  |            }| j                  |      }|d u}| j                  r|
|f}t	        j                  ||
j                  dd            }| j                  dk(  s| j                  dk(  r|j                  d   |
j                  d   }}|rDt	        j                  |dz
  t        j                  |j                  	      j                  dd      }n@t	        j                  |t        j                  |j                  	      j                  dd      }t	        j                  |t        j                  |j                  	      j                  dd      }||z
  }| j!                  || j"                  z   dz
        }|j%                  |j&                  
      }| j                  dk(  rt	        j(                  d||      }||z   }nE| j                  dk(  r6t	        j(                  d||      }t	        j(                  d|
|      }||z   |z   }|t+        j,                  | j.                        z  }|||z   }t0        j2                  j5                  |d      }| j7                  |      }|||z  }t	        j                  ||      }|j9                  dddd      j;                         }|j=                         d d | j>                  fz   }|j                  |      }|r||fn|f}| j                  r||fz   }|S )Nr   r   rU   r   r   r!  r"  r  r   zbhld,lrd->bhlrzbhrd,lrd->bhlrr	   ) r(  r4  r)  r*  r%   catr-  matmul	transposer   shapetensorr  rE   r1  rI   r,  r  tor  einsumr   sqrtr%  r   rG   softmaxr   r2  
contiguousr  r&  )r=   r    r5  r6  r7  r8  r9  r:  mixed_query_layeris_cross_attention	key_layervalue_layerquery_layer	use_cacheattention_scoresquery_length
key_lengthposition_ids_lposition_ids_rdistancepositional_embeddingrelative_position_scoresrelative_position_scores_queryrelative_position_scores_keyattention_probscontext_layernew_context_layer_shapeoutputss                               r*   r   zAlignTextSelfAttention.forward  s    !JJ}5
 3$>."<&q)I(+K3N11$((;P2QRI33DJJ?T4UVK3N'11$((=2IJI33DJJ}4MNK		>!#4i"@aHI))^A%6$D!LK11$((=2IJI33DJJ}4MNK//0AB"$.	?? (5N !<<Y5H5HR5PQ''>9T=Y=Y]q=q'2'8'8';Y__Q=O*L!&j1nEJJWdWkWk!l!q!q" "'l%**UbUiUi!j!o!oprtu!v"\\*EJJ}OcOcdiijkmopN%6H#'#:#:8dFbFb;bef;f#g #7#:#:ARAR#:#S ++~=+0<<8H+Wk+l(#36N#N --1EE16>NP[]q1r./4||<LiYm/n,#36T#TWs#s +dii8P8P.QQ%/.@ --//0@b/I ,,7  -	9O_kB%--aAq9DDF"/"4"4"6s";t?Q?Q>S"S%**+BC6G=/2mM]?? 11Gr)   r   NNNNNF)r!   r"   r#   rp   r%   r   r4  r   r&   r   r   r   r   r   s   @r*   r  r    s    ,4%ell %u|| % 7;15=A>BDH,1c||c !!2!23c E--.	c
  ((9(9:c !)):): ;c !uU->->'?!@Ac $D>c 
u||	cr)   r  c                   n     e Zd Z fdZdej
                  dej
                  dej
                  fdZ xZS )AlignTextSelfOutputc                 (   t         |           t        j                  |j                  |j                        | _        t        j                  |j                  |j                        | _        t        j                  |j                        | _
        y Nr   )ro   rp   r   r'  r  denser  r  r   r  r   r{   s     r*   rp   zAlignTextSelfOutput.__init__f  s`    YYv1163E3EF
f&8&8f>S>STzz&"<"<=r)   r    input_tensorr6   c                 r    | j                  |      }| j                  |      }| j                  ||z         }|S r   ra  r   r  r=   r    rb  s      r*   r   zAlignTextSelfOutput.forwardl  7    

=1]3}|'CDr)   r!   r"   r#   rp   r%   r   r   r   r   s   @r*   r^  r^  e  1    >U\\  RWR^R^ r)   r^  eagerc                       e Zd Zd fd	Zd Z	 	 	 	 	 	 ddej                  deej                     deej                     deej                     deej                     dee	e	ej                           d	ee
   d
e	ej                     fdZ xZS )AlignTextAttentionc                     t         |           t        |j                     ||      | _        t        |      | _        t               | _        y )Nr   )	ro   rp   !ALIGN_TEXT_SELF_ATTENTION_CLASSES_attn_implementationr=   r^  outputsetpruned_headsr.  s      r*   rp   zAlignTextAttention.__init__z  sC    5f6Q6QR,C
	 *&1Er)   c                 >   t        |      dk(  ry t        || j                  j                  | j                  j                  | j
                        \  }}t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _        t        | j                  j                  |      | j                  _	        t        | j                  j                  |d      | j                  _        | j                  j                  t        |      z
  | j                  _        | j                  j                  | j                  j                  z  | j                  _        | j
                  j                  |      | _        y )Nr   r   r<  )rJ   r   r=   r#  r%  rr  r   r(  r)  r*  rp  ra  r&  union)r=   headsindexs      r*   prune_headszAlignTextAttention.prune_heads  s   u:?749900$))2O2OQUQbQb
u
 -TYY__eD		*499==%@		,TYY__eD		.t{{/@/@%QO )-		(E(EE
(R		%"&))"?"?$))B_B_"_		 --33E:r)   r    r5  r6  r7  r8  r9  r:  r6   c           	      p    | j                  |||||||      }| j                  |d   |      }	|	f|dd  z   }
|
S )Nr   r   )r=   rp  )r=   r    r5  r6  r7  r8  r9  r:  self_outputsattention_outputr[  s              r*   r   zAlignTextAttention.forward  sW     yy!"
  ;;|AF#%QR(88r)   r   r\  )r!   r"   r#   rp   rw  r%   r   r   r&   r   r   r   r   r   s   @r*   rk  rk  y  s    ";* 7;15=A>BDH,1|| !!2!23 E--.	
  ((9(9: !)):): ; !uU->->'?!@A $D> 
u||	r)   rk  c                   V     e Zd Z fdZdej
                  dej
                  fdZ xZS )AlignTextIntermediatec                    t         |           t        j                  |j                  |j
                        | _        t        |j                  t              rt        |j                     | _        y |j                  | _        y r   )ro   rp   r   r'  r  intermediate_sizera  r`   ry   strr
   intermediate_act_fnr{   s     r*   rp   zAlignTextIntermediate.__init__  s]    YYv1163K3KL
f''-'-f.?.?'@D$'-'8'8D$r)   r    r6   c                 J    | j                  |      }| j                  |      }|S r   )ra  r  r   s     r*   r   zAlignTextIntermediate.forward  s&    

=100?r)   rg  r   s   @r*   r|  r|    s#    9U\\ ell r)   r|  c                   n     e Zd Z fdZdej
                  dej
                  dej
                  fdZ xZS )AlignTextOutputc                 (   t         |           t        j                  |j                  |j
                        | _        t        j                  |j
                  |j                        | _        t        j                  |j                        | _        y r`  )ro   rp   r   r'  r~  r  ra  r  r  r   r  r   r{   s     r*   rp   zAlignTextOutput.__init__  s`    YYv779K9KL
f&8&8f>S>STzz&"<"<=r)   r    rb  r6   c                 r    | j                  |      }| j                  |      }| j                  ||z         }|S r   rd  re  s      r*   r   zAlignTextOutput.forward  rf  r)   rg  r   s   @r*   r  r    rh  r)   r  c                       e Zd Z fdZ	 	 	 	 	 	 ddej
                  deej                     deej                     deej                     deej                     deeeej                           dee	   d	eej
                     fd
Z
d Z xZS )AlignTextLayerc                 f   t         |           |j                  | _        d| _        t	        |      | _        |j                  | _        |j                  | _        | j                  r,| j                  st        |  d      t	        |d      | _	        t        |      | _        t        |      | _        y )Nr   z> should be used as a decoder model if cross attention is addedr   rm  )ro   rp   chunk_size_feed_forwardseq_len_dimrk  	attentionr-  add_cross_attentionr$  crossattentionr|  intermediater  rp  r{   s     r*   rp   zAlignTextLayer.__init__  s    '-'E'E$+F3 ++#)#=#= ##?? D6)g!hii"4VU_"`D1&9%f-r)   r    r5  r6  r7  r8  r9  r:  r6   c           	         ||d d nd }| j                  |||||      }	|	d   }
| j                  r|	dd }|	d   }n|	dd  }d }| j                  rT|Rt        | d      st        d|  d      ||d	d  nd }| j	                  |
||||||      }|d   }
||dd z   }|d   }|z   }t        | j                  | j                  | j                  |
      }|f|z   }| j                  r|fz   }|S )
NrU   )r:  r9  r   r   r   r  z'If `encoder_hidden_states` are passed, z` has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`r=  )	r  r-  r  r$  r  r   feed_forward_chunkr  r  )r=   r    r5  r6  r7  r8  r9  r:  self_attn_past_key_valueself_attention_outputsrz  r[  present_key_valuecross_attn_present_key_valuecross_attn_past_key_valuecross_attention_outputslayer_outputs                    r*   r   zAlignTextLayer.forward  s}    :H9S>"1#5Y] !%/3 "0 "
 2!4 ??,Qr2G 6r :,QR0G'+$??4@4!12 =dV DD D  @N?Yrs(;_c%&*&9&9 %&)!'#  7q9 7" ==G ,C2+F( 14P P0##T%A%A4CSCSUe
  /G+ ??!2 44Gr)   c                 L    | j                  |      }| j                  ||      }|S r   )r  rp  )r=   rz  intermediate_outputr  s       r*   r  z!AlignTextLayer.feed_forward_chunk  s,    "//0@A{{#68HIr)   r\  )r!   r"   r#   rp   r%   r   r   r&   r   r   r   r  r   r   s   @r*   r  r    s    ." 7;15=A>BDH,1?||? !!2!23? E--.	?
  ((9(9:? !)):): ;? !uU->->'?!@A? $D>? 
u||	?Br)   r  c                   D    e Zd Z fdZ	 	 	 	 	 	 	 	 	 ddej
                  deej                     deej                     deej                     deej                     deeeej                           dee	   d	ee	   d
ee	   dee	   de
eej
                     ef   fdZ xZS )AlignTextEncoderc                     t         |           || _        t        j                  t        |j                        D cg c]  }t        |       c}      | _        d| _	        y c c}w )NF)
ro   rp   rR   r   r   r   num_hidden_layersr  layergradient_checkpointing)r=   rR   _r|   s      r*   rp   zAlignTextEncoder.__init__$  sN    ]]E&JbJbDc#dqN6$:#de
&+# $es   A#r    r5  r6  r7  r8  past_key_valuesrM  r:  r   r   r6   c                    |	rdnd }|rdnd }|r| j                   j                  rdnd }| j                  r%| j                  r|rt        j                  d       d}|rdnd }t        | j                        D ]  \  }}|	r||fz   }|||   nd }|||   nd }| j                  r/| j                  r#| j                  |j                  |||||||      }n ||||||||      }|d   }|r	||d   fz  }|s|||d   fz   }| j                   j                  s||d   fz   } |	r||fz   }|
st        d |||||fD              S t        |||||	      S )
Nr(   zZ`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...Fr   r   r   rU   c              3   $   K   | ]  }|| 
 y wr   r(   r   s     r*   r>   z+AlignTextEncoder.forward.<locals>.<genexpr>l  s      
 = 
s   )r   r  r    r.   cross_attentions)rR   r  r  trainingloggerwarning_once	enumerater  _gradient_checkpointing_func__call__r?   r   )r=   r    r5  r6  r7  r8  r  rM  r:  r   r   r   all_self_attentionsall_cross_attentionsnext_decoder_cacher   layer_modulelayer_head_maskr9  layer_outputss                       r*   r   zAlignTextEncoder.forward*  s    #7BD$5b4%64;;;Z;Zr`d&&4==##p "	#,R$(4 #	VOA|#$58H$H!.7.CilO3B3N_Q/TXN**t}} $ A A ))!"#)*"%	! !-!"#)*"%! *!,M"}R'8&::" &9]1=M<O&O#;;22+?=QRCSBU+U(G#	VJ   1]4D D 
 "&%'(
 
 
 9+.+*1
 	
r)   )	NNNNNNFFT)r!   r"   r#   rp   r%   r   r   r&   r   r   r   r   r   r   r   s   @r*   r  r  #  s   , 7;15=A>BEI$(,1/4&*S
||S
 !!2!23S
 E--.	S

  ((9(9:S
 !)):): ;S
 "%e.?.?(@"ABS
 D>S
 $D>S
 'tnS
 d^S
 
uU\\"$MM	NS
r)   r  c                   V     e Zd Z fdZdej
                  dej
                  fdZ xZS )AlignTextPoolerc                     t         |           t        j                  |j                  |j                        | _        t        j                         | _        y r   )ro   rp   r   r'  r  ra  Tanhrz   r{   s     r*   rp   zAlignTextPooler.__init__  s9    YYv1163E3EF
'')r)   r    r6   c                 \    |d d df   }| j                  |      }| j                  |      }|S )Nr   )ra  rz   )r=   r    first_token_tensorpooled_outputs       r*   r   zAlignTextPooler.forward  s6     +1a40

#566r)   rg  r   s   @r*   r  r    s#    $
U\\ ell r)   r  c                   "    e Zd ZdZeZdZdZd Zy)AlignPreTrainedModelz
    An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
    models.
    alignTc                 &   t        |t        j                  t        j                  f      rn|j                  j
                  j                  d| j                  j                         |j                  ;|j                  j
                  j                          nt        |t              rst        j                  j                  |j                  j                         |j                  j                  j
                  j                          d|j                  _        nt        |t        j                         rx|j                  j
                  j                  d| j                  j                         |j"                  1|j                  j
                  |j"                     j                          t        |t        j$                        rJ|j                  j
                  j                          |j                  j
                  j'                  d       yy)zInitialize the weightsg        )meanstdNTg      ?)r`   r   r'  rs   weightdatanormal_rR   initializer_rangerl   zero_
AlignModelinitxavier_uniform_text_projection_is_hf_initializedr  r   r  fill_)r=   modules     r*   _init_weightsz"AlignPreTrainedModel._init_weights  sW   fryy"))45MM&&CT[[5R5R&S{{&  &&(
+GG##F$:$:$A$AB""'',,2248<F""5-MM&&CT[[5R5R&S!!-""6#5#56<<>fbll+KK""$MM$$S) ,r)   N)	r!   r"   r#   r$   r   config_classbase_model_prefixsupports_gradient_checkpointingr  r(   r)   r*   r  r    s    
 L&*#*r)   r  z@The text model from ALIGN without any head or projection on top.c                   l    e Zd ZeZdgZddedef fdZd Zd Z	 e
e       eee      	 	 	 	 	 	 	 	 	 ddeej                      d	eej                      d
eej                      deej                      deej                      deej                      dee   dee   dee   deeef   fd              Z xZS )AlignTextModelr   rR   add_pooling_layerc                     t         |   |       || _        t        |      | _        t        |      | _        |rt        |      nd | _        | j                          y r   )
ro   rp   rR   r   r   r  encoderr  pooler	post_init)r=   rR   r  r|   s      r*   rp   zAlignTextModel.__init__  sK     -f5'/1Bof- 	r)   c                 .    | j                   j                  S r   r   r  rA   s    r*   get_input_embeddingsz#AlignTextModel.get_input_embeddings  s    ...r)   c                 &    || j                   _        y r   r  )r=   r*  s     r*   set_input_embeddingsz#AlignTextModel.set_input_embeddings  s    */'r)   output_typer  r  r5  r   r   r6  r  r:  r   r   r6   c
                 $   ||n| j                   j                  }||n| j                   j                  }|	|	n| j                   j                  }	||t	        d      |#| j                  ||       |j                         }
n!||j                         dd }
nt	        d      |
\  }}||j                  n|j                  }|t        j                  ||f|      }|pt        | j                  d      r4| j                  j                  ddd|f   }|j                  ||      }|}n&t        j                  |
t        j                  |      }| j!                  ||
      }| j#                  || j                   j$                        }| j                  ||||      }| j'                  ||||||		      }|d
   }| j(                  | j)                  |      nd}|	s
||f|dd z   S t+        |||j,                  |j.                  |j0                        S )a?  
        Returns:

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, AlignTextModel

        >>> model = AlignTextModel.from_pretrained("kakaobrain/align-base")
        >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")

        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled (EOS token) states
        ```NzDYou cannot specify both input_ids and inputs_embeds at the same timer   z5You have to specify either input_ids or inputs_embedsrD   r   r  )r  r   r   r  )r5  r6  r:  r   r   r   r   )r   pooler_outputr    r.   r  )rR   r:  r   use_return_dictr$  %warn_if_padding_and_no_attention_maskr  rE   r%   onesr  r   r   r   r   r  get_extended_attention_maskget_head_maskr  r  r  r   r    r.   r  )r=   r  r5  r   r   r6  r  r:  r   r   r  
batch_sizer  rE   r  r  extended_attention_maskembedding_outputencoder_outputssequence_outputr  s                        r*   r   zAlignTextModel.forward  s<   > 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B] ]%>cdd"66y.Q#..*K&',,.s3KTUU!,
J%.%:!!@T@T!"ZZ*j)A6RN!t(89*.//*H*HKZK*X'3J3Q3QR\^h3i0!A!&[

SY!Z 150P0PQ_al0m &&y$++2O2OP	??%)'	 + 
 ,,2/!5# ' 
 *!,8<8OO4UY#]3oab6III;-')77&11,==
 	
r)   T	NNNNNNNNN)r!   r"   r#   r   r  _no_split_modulesr   rp   r  r  r   ALIGN_TEXT_INPUTS_DOCSTRINGr   r   r   r%   r   r   r   r   r   r   s   @r*   r  r    s5   
 #L./
 
4 
/0 ++FG+Wfuv -11515/3,004,0/3&*`
ELL)`
 !.`
 !.	`

 u||,`
 ELL)`
  -`
 $D>`
 'tn`
 d^`
 
uBB	C`
 w H`
r)   r  zBThe vision model from ALIGN without any head or projection on top.c                        e Zd ZeZdZdZdef fdZdej                  fdZ
 ee       eee      	 	 	 ddeej"                     dee   d	ee   deeef   fd
              Z xZS )AlignVisionModelr}   FrR   c                    t         |   |       || _        t        |      | _        t        |      | _        |j                  dk(  r't        j                  |j                  d      | _        nN|j                  dk(  r't        j                  |j                  d      | _        nt        d|j                         | j                          y )Nr  T)	ceil_moderX   z2config.pooling must be one of ['mean', 'max'] got )ro   rp   rR   rd   r   r   r  pooling_typer   	AvgPool2d
hidden_dimr  	MaxPool2dr$  poolingr  r{   s     r*   rp   zAlignVisionModel.__init__5  s     /7)&1 &(,,v'8'8DIDK  E),,v'8'8DIDKQRXR`R`Qabcc 	r)   r6   c                 B    | j                   j                  j                  S r   )vision_modelr   rt   rA   s    r*   r  z%AlignVisionModel.get_input_embeddingsF  s      ++777r)   r  r   r   c                 ~   ||n| j                   j                  }||n| j                   j                  }|t        d      | j	                  |      }| j                  |||      }|d   }| j                  |      }|j                  |j                  dd       }|s
||f|dd z   S t        |||j                        S )a  
        Returns:

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, AlignVisionModel

        >>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base")
        >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled CLS states
        ```Nz You have to specify pixel_values)r   r   r   rU   r   )r   r  r    )rR   r   r  r$  r   r  r  reshaperA  r   r    )r=   r}   r   r   r  r  r   r  s           r*   r   zAlignVisionModel.forwardI  s    > %9$D $++JjJj 	 &1%<k$++B]B]?@@??<8,,!5# ' 
 ,A.$56%--m.A.A"1.EF%}58KKK7/')77
 	
r)   NNN)r!   r"   r#   r   r  main_input_namer  rp   r   Moduler  r   ALIGN_VISION_INPUTS_DOCSTRINGr   r   r   r%   r&   r   r   r   r   r   r   s   @r*   r  r  ,  s    
 %L$O&+#0 "8bii 8 ++HI+Sbst 59/3&*	7
u0017
 'tn7
 d^	7

 
u>>	?7
 u J7
r)   r  c                       e Zd ZeZdef fdZ ee      	 	 	 	 	 	 	 	 	 ddee	j                     dee	j                     dee	j                     dee	j                     dee	j                     dee	j                     d	ee   d
ee   dee   de	j                  fd       Z ee      	 	 	 ddee	j                     d
ee   dee   de	j                  fd       Z ee       eee      	 	 	 	 	 	 	 	 	 	 	 ddee	j&                     dee	j                     dee	j                     dee	j                     dee	j                     dee	j                     dee	j                     dee   d	ee   d
ee   dee   deeef   fd              Z xZS )r  rR   c                    t         |   |       t        |j                  t              s"t        dt        |j                         d      t        |j                  t              s"t        dt        |j                         d      |j                  }|j                  }|j                  | _	        |j                  | _        t        |      | _        t        |      | _        t!        j"                  | j                  | j                        | _        t!        j&                  t)        j*                  | j,                  j.                              | _        | j3                          y )NzLconfig.text_config is expected to be of type AlignTextConfig but is of type .zPconfig.vision_config is expected to be of type AlignVisionConfig but is of type )ro   rp   r`   text_configr   	TypeErrortypevision_configr   projection_dimr  text_embed_dimr  
text_modelr  r  r   r'  r  	Parameterr%   rB  rR   temperature_init_valuetemperaturer  )r=   rR   r   r  r|   s       r*   rp   zAlignModel.__init__  s#    &,,o>++,-Q0 
 &..0AB--./q2 
 ((,,$33)55(5,];!yy)<)<d>Q>QR<<T[[5W5W(XY 	r)   r  r5  r   r   r6  r  r:  r   r   r6   c
                    ||n| j                   j                  }||n| j                   j                  }|	|	n| j                   j                  }	| j	                  |||||||||		      }
|
d   dddddf   }| j                  |      }|S )a  
        Returns:
            text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
            applying the projection layer to the pooled output of [`AlignTextModel`].

        Examples:

        ```python
        >>> from transformers import AutoTokenizer, AlignModel

        >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
        >>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")

        >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
        >>> text_features = model.get_text_features(**inputs)
        ```N	r  r5  r   r   r6  r  r:  r   r   r   )rR   r:  r   r  r  r  )r=   r  r5  r   r   r6  r  r:  r   r   text_outputsr   text_featuress                r*   get_text_featureszAlignModel.get_text_features  s    < 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]))%'/!5# ' 

 )OAq!G4,,->?r)   r}   c                     ||n| j                   j                  }||n| j                   j                  }| j                  |||      }|d   }|S )a9  
        Returns:
            image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
            applying the projection layer to the pooled output of [`AlignVisionModel`].

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, AlignModel

        >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
        >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> image_features = model.get_image_features(**inputs)
        ```r}   r   r   r   )rR   r   r  r  )r=   r}   r   r   vision_outputsimage_featuress         r*   get_image_featureszAlignModel.get_image_features  sf    > %9$D $++JjJj 	 &1%<k$++B]B]**%!5# + 
 (*r)   r  return_lossc                    |	|	n| j                   j                  }	|
|
n| j                   j                  }
||n| j                   j                  }| j	                  ||
|      }| j                  |||||||	|
|	      }|d   }|d   dddddf   }| j                  |      }||j                  ddd	      z  }||j                  ddd	      z  }t        j                  ||j                               | j                  z  }|j                         }d}|rt        |      }|s||||||f}||f|z   S |S t        |||||||
      S )a}  
        Returns:

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, AlignModel

        >>> model = AlignModel.from_pretrained("kakaobrain/align-base")
        >>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(
        ...     images=image, text=["a photo of a cat", "a photo of a dog"], return_tensors="pt", padding=True
        ... )

        >>> outputs = model(**inputs)
        >>> logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
        >>> probs = logits_per_image.softmax(dim=1)  # we can take the softmax to get the label probabilities
        ```Nr  r  r   r   rU   r   T)r   r   keepdim)r1   r2   r3   r-   r   r4   r5   )rR   r:  r   r  r  r  r  normr%   r?  rN   r	  rQ   r0   )r=   r  r}   r5  r   r   r6  r  r  r:  r   r   r  r  r   r-   r3   r2   r1   rp  s                       r*   r   zAlignModel.forward	  s   R 2C1N-TXT_T_TqTq$8$D $++JjJj 	 &1%<k$++B]B]**%!5# + 
 ))%'/!5# ' 

 &a("1oaAg.**;7 $l&7&7!T&7&RR!K$4$4qb$$4$OO  ,,{LNN4DEHXHXX*,,.o.D&lT`bpqF)-)9TGf$EvE-+#%* .
 	
r)   r  r  )NNNNNNNNNNN)r!   r"   r#   r   r  rp   r   r  r   r%   r   r   r&   r  r  r  ALIGN_INPUTS_DOCSTRINGr   r0   r  r   r   r   r   r   s   @r*   r  r    s   L{ < ++FG -11515/3,004,0/3&*2ELL)2 !.2 !.	2
 u||,2 ELL)2  -2 $D>2 'tn2 d^2 
		2 H2h ++HI 59/3&*	*u001* 'tn* d^	*
 
		* J*X ++AB;[Q 15481515/3,004&*,0/3&*[
E,,-[
 u001[
 !.	[

 !.[
 u||,[
 ELL)[
  -[
 d^[
 $D>[
 'tn[
 d^[
 
uk!	"[
 R C[
r)   r  )r  r  r  r  r  )Or$   r   dataclassesr   typingr   r   r   r   r%   torch.utils.checkpointr   activationsr
   modeling_outputsr   r   r   r   modeling_utilsr   pytorch_utilsr   r   r   utilsr   r   r   r   r   configuration_alignr   r   r   
get_loggerr!   r  _CHECKPOINT_FOR_DOC_CONFIG_FOR_DOCALIGN_START_DOCSTRINGr  r  r  r   r,   r0   r   rK   rQ   rY   r\   r   rb   r  rd   rs   r   r   r   r   r   r   r   r   r  r^  rn  rk  r|  r  r  r  r  r  r  r  r  __all__r(   r)   r*   <module>r'     s     ! . .    !  . l l  Q P 
		H	%-  0 d
! 5 p =[ = =* :; : :8 !
+ !
 !
LuU\\ uell u-5<< -ELL -+ 3  @U3:. @ @*BII 4
 
6		 6$		 $P$BII $N BNryy NbG
 G
V=")) =BCRYY CN"))  #% !0 0hBII  bii SRYY SnZ
ryy Z
|bii *? *: Jx
) x
	x
v LR
+ R
	R
j +,`
% `
 -`
F Wr)   