
    sg-                         d Z ddlZddlmZ ddlmZ ddlmZ ddlm	Z	 ddl
mZ dd	lmZmZ  G d
 dee	      Z G d de      Zy)zBEiT model configuration    NOrderedDict)Mapping)version   )PretrainedConfig)
OnnxConfig)BackboneConfigMixin*get_aligned_output_features_output_indicesc                   j     e Zd ZdZdZddddddddd	d
ddddddddddg dddddddddddf fd	Z xZS )
BeitConfiga  
    This is the configuration class to store the configuration of a [`BeitModel`]. It is used to instantiate an BEiT
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the BEiT
    [microsoft/beit-base-patch16-224-pt22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k) architecture.

    Args:
        vocab_size (`int`, *optional*, defaults to 8192):
            Vocabulary size of the BEiT model. Defines the number of different image tokens that can be used during
            pre-training.
        hidden_size (`int`, *optional*, defaults to 768):
            Dimensionality of the encoder layers and the pooler layer.
        num_hidden_layers (`int`, *optional*, defaults to 12):
            Number of hidden layers in the Transformer encoder.
        num_attention_heads (`int`, *optional*, defaults to 12):
            Number of attention heads for each attention layer in the Transformer encoder.
        intermediate_size (`int`, *optional*, defaults to 3072):
            Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"selu"` and `"gelu_new"` are supported.
        hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        layer_norm_eps (`float`, *optional*, defaults to 1e-12):
            The epsilon used by the layer normalization layers.
        image_size (`int`, *optional*, defaults to 224):
            The size (resolution) of each image.
        patch_size (`int`, *optional*, defaults to 16):
            The size (resolution) of each patch.
        num_channels (`int`, *optional*, defaults to 3):
            The number of input channels.
        use_mask_token (`bool`, *optional*, defaults to `False`):
            Whether to use a mask token for masked image modeling.
        use_absolute_position_embeddings (`bool`, *optional*, defaults to `False`):
            Whether to use BERT-style absolute position embeddings.
        use_relative_position_bias (`bool`, *optional*, defaults to `False`):
            Whether to use T5-style relative position embeddings in the self-attention layers.
        use_shared_relative_position_bias (`bool`, *optional*, defaults to `False`):
            Whether to use the same relative position embeddings across all self-attention layers of the Transformer.
        layer_scale_init_value (`float`, *optional*, defaults to 0.1):
            Scale to use in the self-attention layers. 0.1 for base, 1e-5 for large. Set 0 to disable layer scale.
        drop_path_rate (`float`, *optional*, defaults to 0.1):
            Stochastic depth rate per sample (when applied in the main path of residual layers).
        use_mean_pooling (`bool`, *optional*, defaults to `True`):
            Whether to mean pool the final hidden states of the patches instead of using the final hidden state of the
            CLS token, before applying the classification head.
        pool_scales (`Tuple[int]`, *optional*, defaults to `[1, 2, 3, 6]`):
            Pooling scales used in Pooling Pyramid Module applied on the last feature map.
        use_auxiliary_head (`bool`, *optional*, defaults to `True`):
            Whether to use an auxiliary head during training.
        auxiliary_loss_weight (`float`, *optional*, defaults to 0.4):
            Weight of the cross-entropy loss of the auxiliary head.
        auxiliary_channels (`int`, *optional*, defaults to 256):
            Number of channels to use in the auxiliary head.
        auxiliary_num_convs (`int`, *optional*, defaults to 1):
            Number of convolutional layers to use in the auxiliary head.
        auxiliary_concat_input (`bool`, *optional*, defaults to `False`):
            Whether to concatenate the output of the auxiliary head with the input before the classification layer.
        semantic_loss_ignore_index (`int`, *optional*, defaults to 255):
            The index that is ignored by the loss function of the semantic segmentation model.
        out_features (`List[str]`, *optional*):
            If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
            (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
            corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
            same order as defined in the `stage_names` attribute.
        out_indices (`List[int]`, *optional*):
            If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
            many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
            If unset and `out_features` is unset, will default to the last stage. Must be in the
            same order as defined in the `stage_names` attribute.
        add_fpn (`bool`, *optional*, defaults to `False`):
            Whether to add a FPN as part of the backbone. Only relevant for [`BeitBackbone`].
        reshape_hidden_states (`bool`, *optional*, defaults to `True`):
            Whether to reshape the feature maps to 4D tensors of shape `(batch_size, hidden_size, height, width)` in
            case the model is used as backbone. If `False`, the feature maps will be 3D tensors of shape `(batch_size,
            seq_len, hidden_size)`. Only relevant for [`BeitBackbone`].

    Example:

    ```python
    >>> from transformers import BeitConfig, BeitModel

    >>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration
    >>> configuration = BeitConfig()

    >>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration
    >>> model = BeitModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```beiti    i      i   gelug        g{Gz?g-q=      r   Fg?T)      r      g?   r      Nc                     t        "|   di |  || _        || _        || _        || _        || _        || _        || _        || _	        |	| _
        |
| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        d| v r+t;        j<                  dt>               | jA                  d      }dgtC        d| j                  dz         D !cg c]  }!d|! 	 c}!z   | _"        tG        ||| jD                        \  | _$        | _%        || _&        || _'        y c c}!w )Nsegmentation_indiceszuThe `segmentation_indices` argument is deprecated and will be removed in a future version, use `out_indices` instead.stemr   stage)out_featuresout_indicesstage_names )(super__init__
vocab_sizehidden_sizenum_hidden_layersnum_attention_headsintermediate_size
hidden_acthidden_dropout_probattention_probs_dropout_probinitializer_rangelayer_norm_eps
image_size
patch_sizenum_channelsuse_mask_token use_absolute_position_embeddingsuse_relative_position_bias!use_shared_relative_position_biaslayer_scale_init_valuedrop_path_rateuse_mean_poolingpool_scalesuse_auxiliary_headauxiliary_loss_weightauxiliary_channelsauxiliary_num_convsauxiliary_concat_inputsemantic_loss_ignore_indexwarningswarnFutureWarningpopranger   r   _out_features_out_indicesadd_fpnreshape_hidden_states)#selfr"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r   r   rD   rE   kwargsidx	__class__s#                                     ^/var/www/html/venv/lib/python3.12/site-packages/transformers/models/beit/configuration_beit.pyr!   zBeitConfig.__init__   s   F 	"6"$&!2#6 !2$#6 ,H)!2,$$(,0P-*D'1R.&<#, 0&"4%:""4#6 &<#*D' "V+MM H !**%;<K #8aI_I_bcIc@d&ese}&ee0Z%;DL\L\1
-D- %:" 'fs   E$)__name__
__module____qualname____doc__
model_typer!   __classcell__)rI   s   @rJ   r   r      s}    ^@ J %().#(*/" !$#&"AR; R;    r   c                   p    e Zd Z ej                  d      Zedeeee	ef   f   fd       Z
edefd       Zy)BeitOnnxConfigz1.11returnc                 (    t        ddddddfg      S )Npixel_valuesbatchr.   heightwidth)r   r   r   r   r   rF   s    rJ   inputszBeitOnnxConfig.inputs   s&    WHQX!YZ
 	
rQ   c                      y)Ng-C6?r   rZ   s    rJ   atol_for_validationz"BeitOnnxConfig.atol_for_validation   s    rQ   N)rK   rL   rM   r   parsetorch_onnx_minimum_versionpropertyr   strintr[   floatr]   r   rQ   rJ   rS   rS      sZ    !.v!6
WS#X%6 67 
 
 U  rQ   rS   )rN   r=   collectionsr   typingr   	packagingr   configuration_utilsr   onnxr	   utils.backbone_utilsr
   r   r   rS   r   rQ   rJ   <module>rj      s>      #   3  cu;$&6 u;rZ rQ   