
    sg}                     l    d Z ddlmZ ddlmZ ddlmZmZ  ej                  e	      Z
 G d dee      Zy)zBiT model configuration   )PretrainedConfig)logging)BackboneConfigMixin*get_aligned_output_features_output_indicesc                   \     e Zd ZdZdZddgZddgZddg d	g d
ddddddddddf fd	Z xZS )	BitConfiga  
    This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the BiT
    [google/bit-50](https://huggingface.co/google/bit-50) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        num_channels (`int`, *optional*, defaults to 3):
            The number of input channels.
        embedding_size (`int`, *optional*, defaults to 64):
            Dimensionality (hidden size) for the embedding layer.
        hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
            Dimensionality (hidden size) at each stage.
        depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
            Depth (number of layers) for each stage.
        layer_type (`str`, *optional*, defaults to `"preactivation"`):
            The layer to use, it can be either `"preactivation"` or `"bottleneck"`.
        hidden_act (`str`, *optional*, defaults to `"relu"`):
            The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
            are supported.
        global_padding (`str`, *optional*):
            Padding strategy to use for the convolutional layers. Can be either `"valid"`, `"same"`, or `None`.
        num_groups (`int`, *optional*, defaults to 32):
            Number of groups used for the `BitGroupNormActivation` layers.
        drop_path_rate (`float`, *optional*, defaults to 0.0):
            The drop path rate for the stochastic depth.
        embedding_dynamic_padding (`bool`, *optional*, defaults to `False`):
            Whether or not to make use of dynamic padding for the embedding layer.
        output_stride (`int`, *optional*, defaults to 32):
            The output stride of the model.
        width_factor (`int`, *optional*, defaults to 1):
            The width factor for the model.
        out_features (`List[str]`, *optional*):
            If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
            (depending on how many stages the model has). If unset and `out_indices` is set, will default to the
            corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
            same order as defined in the `stage_names` attribute.
        out_indices (`List[int]`, *optional*):
            If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
            many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
            If unset and `out_features` is unset, will default to the last stage. Must be in the
            same order as defined in the `stage_names` attribute.

    Example:
    ```python
    >>> from transformers import BitConfig, BitModel

    >>> # Initializing a BiT bit-50 style configuration
    >>> configuration = BitConfig()

    >>> # Initializing a model (with random weights) from the bit-50 style configuration
    >>> model = BitModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```
    bitpreactivation
bottleneckSAMEVALIDr   @   )   i   i   i   )r         r   reluN    g        F   c                 p   t        |   d
i | || j                  vr*t        d| ddj	                  | j                               |<|j                         | j                  v r|j                         }nt        d| d      || _        || _        || _	        || _
        || _        || _        || _        || _        |	| _        |
| _        || _        || _        dgt'        dt)        |      dz         D cg c]  }d| 	 c}z   | _        t-        ||| j*                  	      \  | _        | _        y c c}w )Nzlayer_type=z is not one of ,zPadding strategy z not supportedstemr   stage)out_featuresout_indicesstage_names )super__init__layer_types
ValueErrorjoinuppersupported_paddingnum_channelsembedding_sizehidden_sizesdepths
layer_type
hidden_actglobal_padding
num_groupsdrop_path_rateembedding_dynamic_paddingoutput_stridewidth_factorrangelenr   r   _out_features_out_indices)selfr$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r   r   kwargsidx	__class__s                    \/var/www/html/venv/lib/python3.12/site-packages/transformers/models/bit/configuration_bit.pyr   zBitConfig.__init__[   s@   $ 	"6"T---{:,ochhtO_O_F`Eabcc%##%)?)??!/!5!5!7 #4^4DN!STT(,($$,$,)B&*("8aVWX@Y&Zse}&ZZ0Z%;DL\L\1
-D- '[s   7D3)	__name__
__module____qualname____doc__
model_typer   r#   r   __classcell__)r7   s   @r8   r   r      s[    ;z J"L1K) +""'*
 *
    r   N)r<   configuration_utilsr   utilsr   utils.backbone_utilsr   r   
get_loggerr9   loggerr   r   r?   r8   <module>rE      s9     3  c 
		H	%l
#%5 l
r?   