
    sg|6                     f    d Z ddlmZ ddlmZ ddlmZ  ej                  e      Z	 G d de      Z
y)	zDETA model configuration   )PretrainedConfig)logging   )CONFIG_MAPPINGc                        e Zd ZdZdZdddZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d
 fd	Zedefd       Z	edefd	       Z
 xZS )
DetaConfiga  
    This is the configuration class to store the configuration of a [`DetaModel`]. It is used to instantiate a DETA
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
    defaults will yield a similar configuration to that of the DETA
    [SenseTime/deformable-detr](https://huggingface.co/SenseTime/deformable-detr) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        backbone_config (`PretrainedConfig` or `dict`, *optional*, defaults to `ResNetConfig()`):
            The configuration of the backbone model.
        backbone (`str`, *optional*):
            Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
            will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
            is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
        use_pretrained_backbone (`bool`, *optional*, `False`):
            Whether to use pretrained weights for the backbone.
        use_timm_backbone (`bool`, *optional*, `False`):
            Whether to load `backbone` from the timm library. If `False`, the backbone is loaded from the transformers
            library.
        backbone_kwargs (`dict`, *optional*):
            Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
            e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
        num_queries (`int`, *optional*, defaults to 900):
            Number of object queries, i.e. detection slots. This is the maximal number of objects [`DetaModel`] can
            detect in a single image. In case `two_stage` is set to `True`, we use `two_stage_num_proposals` instead.
        d_model (`int`, *optional*, defaults to 256):
            Dimension of the layers.
        encoder_layers (`int`, *optional*, defaults to 6):
            Number of encoder layers.
        decoder_layers (`int`, *optional*, defaults to 6):
            Number of decoder layers.
        encoder_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        decoder_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer decoder.
        decoder_ffn_dim (`int`, *optional*, defaults to 2048):
            Dimension of the "intermediate" (often named feed-forward) layer in decoder.
        encoder_ffn_dim (`int`, *optional*, defaults to 2048):
            Dimension of the "intermediate" (often named feed-forward) layer in decoder.
        activation_function (`str` or `function`, *optional*, defaults to `"relu"`):
            The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
            `"relu"`, `"silu"` and `"gelu_new"` are supported.
        dropout (`float`, *optional*, defaults to 0.1):
            The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        activation_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for activations inside the fully connected layer.
        init_std (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        init_xavier_std (`float`, *optional*, defaults to 1):
            The scaling factor used for the Xavier initialization gain in the HM Attention map module.
        encoder_layerdrop (`float`, *optional*, defaults to 0.0):
            The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
            for more details.
        auxiliary_loss (`bool`, *optional*, defaults to `False`):
            Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
        position_embedding_type (`str`, *optional*, defaults to `"sine"`):
            Type of position embeddings to be used on top of the image features. One of `"sine"` or `"learned"`.
        class_cost (`float`, *optional*, defaults to 1):
            Relative weight of the classification error in the Hungarian matching cost.
        bbox_cost (`float`, *optional*, defaults to 5):
            Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
        giou_cost (`float`, *optional*, defaults to 2):
            Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
        mask_loss_coefficient (`float`, *optional*, defaults to 1):
            Relative weight of the Focal loss in the panoptic segmentation loss.
        dice_loss_coefficient (`float`, *optional*, defaults to 1):
            Relative weight of the DICE/F-1 loss in the panoptic segmentation loss.
        bbox_loss_coefficient (`float`, *optional*, defaults to 5):
            Relative weight of the L1 bounding box loss in the object detection loss.
        giou_loss_coefficient (`float`, *optional*, defaults to 2):
            Relative weight of the generalized IoU loss in the object detection loss.
        eos_coefficient (`float`, *optional*, defaults to 0.1):
            Relative classification weight of the 'no-object' class in the object detection loss.
        num_feature_levels (`int`, *optional*, defaults to 5):
            The number of input feature levels.
        encoder_n_points (`int`, *optional*, defaults to 4):
            The number of sampled keys in each feature level for each attention head in the encoder.
        decoder_n_points (`int`, *optional*, defaults to 4):
            The number of sampled keys in each feature level for each attention head in the decoder.
        two_stage (`bool`, *optional*, defaults to `True`):
            Whether to apply a two-stage deformable DETR, where the region proposals are also generated by a variant of
            DETA, which are further fed into the decoder for iterative bounding box refinement.
        two_stage_num_proposals (`int`, *optional*, defaults to 300):
            The number of region proposals to be generated, in case `two_stage` is set to `True`.
        with_box_refine (`bool`, *optional*, defaults to `True`):
            Whether to apply iterative bounding box refinement, where each decoder layer refines the bounding boxes
            based on the predictions from the previous layer.
        focal_alpha (`float`, *optional*, defaults to 0.25):
            Alpha parameter in the focal loss.
        assign_first_stage (`bool`, *optional*, defaults to `True`):
            Whether to assign each prediction i to the highest overlapping ground truth object if the overlap is larger than a threshold 0.7.
        assign_second_stage (`bool`, *optional*, defaults to `True`):
            Whether to assign second assignment procedure in the second stage closely follows the first stage assignment procedure.
        disable_custom_kernels (`bool`, *optional*, defaults to `True`):
            Disable the use of custom CUDA and CPU kernels. This option is necessary for the ONNX export, as custom
            kernels are not supported by PyTorch ONNX export.

    Examples:

    ```python
    >>> from transformers import DetaConfig, DetaModel

    >>> # Initializing a DETA SenseTime/deformable-detr style configuration
    >>> configuration = DetaConfig()

    >>> # Initializing a model (with random weights) from the SenseTime/deformable-detr style configuration
    >>> model = DetaModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```detad_modelencoder_attention_heads)hidden_sizenum_attention_headsc,                    |rt        d      ||t        d      |)|'t        j                  d       t        d   g d      }n;t	        |t
              r+|j                  d      }-t        |-   }.|.j                  |      }||r|t        d      || _        || _	        || _
        || _        || _        || _        || _        || _        |	| _        || _        |
| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _         || _!        || _"        || _#        || _$        | | _%        |!| _&        |d	u r|d
u rt        d      |"| _'        |#| _(        |$| _)        |%| _*        |&| _+        |'| _,        |(| _-        |)| _.        |*| _/        |+| _0        tc        /|   dd|i|, y )Nz+Pretrained backbones are not supported yet.z8You can't specify both `backbone` and `backbone_config`.zX`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.resnet)stage2stage3stage4)out_features
model_typez?You can't specify both `backbone_kwargs` and `backbone_config`.TFz3If two_stage is True, with_box_refine must be True.is_encoder_decoder )3
ValueErrorloggerinfor   
isinstancedictpop	from_dictbackbone_configbackboneuse_pretrained_backboneuse_timm_backbonebackbone_kwargsnum_queriesmax_position_embeddingsr
   encoder_ffn_dimencoder_layersr   decoder_ffn_dimdecoder_layersdecoder_attention_headsdropoutattention_dropoutactivation_dropoutactivation_functioninit_stdinit_xavier_stdencoder_layerdropauxiliary_lossposition_embedding_typenum_feature_levelsencoder_n_pointsdecoder_n_points	two_stagetwo_stage_num_proposalswith_box_refineassign_first_stageassign_second_stage
class_cost	bbox_cost	giou_costmask_loss_coefficientdice_loss_coefficientbbox_loss_coefficientgiou_loss_coefficienteos_coefficientfocal_alphadisable_custom_kernelssuper__init__)0selfr   r   r    r!   r"   r#   r$   r&   r%   r   r(   r'   r)   r0   r   r-   r
   r*   r+   r,   r.   r/   return_intermediater1   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   kwargsbackbone_model_typeconfig_class	__class__s0                                                  i/var/www/html/venv/lib/python3.12/site-packages/transformers/models/deprecated/deta/configuration_deta.pyrF   zDetaConfig.__init__   s   ^ #JKK&8+?WXX"x'7KKrs,X6DbcO/40&5&9&9,&G#-.AB"."8"8"I&??Z^__. '>$!2.&'>$.,'>$.,'>$!2"4#6  .!2,'>$"4 0 0"'>$."4#6 E!9RSS$""%:"%:"%:"%:".&&<#I,>I&I    returnc                     | j                   S N)r   rG   s    rM   r   zDetaConfig.num_attention_heads  s    +++rN   c                     | j                   S rQ   )r
   rR   s    rM   r   zDetaConfig.hidden_size	  s    ||rN   )+NNFFNi        rT      rU   i   rV           Trelu   皙?rW   rW   g{Gz?g      ?TFsine   r   r   Ti,  TTT   r\      r]   r]   r\   r^   rZ   g      ?T)__name__
__module____qualname____doc__r   attribute_maprF   propertyintr   r   __classcell__)rL   s   @rM   r   r      s    rh J 8M  % $ ! !"  & # #YoJb ,S , , S  rN   r   N)rb   configuration_utilsr   utilsr   autor   
get_loggerr_   r   r   r   rN   rM   <module>rk      s6     4  " 
		H	%r! rrN   