
    sg'                     Z    d Z ddlmZ ddlmZ  ej
                  e      Z G d de      Zy)zPop2Piano model configuration   )PretrainedConfig)loggingc                   T     e Zd ZdZdZdgZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Z xZS )Pop2PianoConfiga  
    This is the configuration class to store the configuration of a [`Pop2PianoForConditionalGeneration`]. It is used
    to instantiate a Pop2PianoForConditionalGeneration model according to the specified arguments, defining the model
    architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
    Pop2Piano [sweetcocoa/pop2piano](https://huggingface.co/sweetcocoa/pop2piano) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Arguments:
        vocab_size (`int`, *optional*, defaults to 2400):
            Vocabulary size of the `Pop2PianoForConditionalGeneration` model. Defines the number of different tokens
            that can be represented by the `inputs_ids` passed when calling [`Pop2PianoForConditionalGeneration`].
        composer_vocab_size (`int`, *optional*, defaults to 21):
            Denotes the number of composers.
        d_model (`int`, *optional*, defaults to 512):
            Size of the encoder layers and the pooler layer.
        d_kv (`int`, *optional*, defaults to 64):
            Size of the key, query, value projections per attention head. The `inner_dim` of the projection layer will
            be defined as `num_heads * d_kv`.
        d_ff (`int`, *optional*, defaults to 2048):
            Size of the intermediate feed forward layer in each `Pop2PianoBlock`.
        num_layers (`int`, *optional*, defaults to 6):
            Number of hidden layers in the Transformer encoder.
        num_decoder_layers (`int`, *optional*):
            Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
        num_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        relative_attention_num_buckets (`int`, *optional*, defaults to 32):
            The number of buckets to use for each attention layer.
        relative_attention_max_distance (`int`, *optional*, defaults to 128):
            The maximum distance of the longer sequences for the bucket separation.
        dropout_rate (`float`, *optional*, defaults to 0.1):
            The ratio for all dropout layers.
        layer_norm_epsilon (`float`, *optional*, defaults to 1e-6):
            The epsilon used by the layer normalization layers.
        initializer_factor (`float`, *optional*, defaults to 1.0):
            A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
            testing).
        feed_forward_proj (`string`, *optional*, defaults to `"gated-gelu"`):
            Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`.
        use_cache (`bool`, *optional*, defaults to `True`):
            Whether or not the model should return the last key/values attentions (not used by all models).
        dense_act_fn (`string`, *optional*, defaults to `"relu"`):
            Type of Activation Function to be used in `Pop2PianoDenseActDense` and in `Pop2PianoDenseGatedActDense`.
    	pop2pianopast_key_valuesc                    || _         || _        || _        || _        || _        || _        ||n| j
                  | _        || _        |	| _        |
| _	        || _
        || _        || _        || _        || _        || _        | j                  j!                  d      d   dk(  | _        | j                  | _        || _        || _        t+        | X  d|||d| y )N-    gated)pad_token_ideos_token_idis_encoder_decoder )
vocab_sizecomposer_vocab_sized_modeld_kvd_ff
num_layersnum_decoder_layers	num_headsrelative_attention_num_bucketsrelative_attention_max_distancedropout_ratelayer_norm_epsiloninitializer_factorfeed_forward_proj	use_cachedense_act_fnsplitis_gated_acthidden_sizenum_attention_headsnum_hidden_layerssuper__init__)selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    kwargs	__class__s                        h/var/www/html/venv/lib/python3.12/site-packages/transformers/models/pop2piano/configuration_pop2piano.pyr'   zPop2PianoConfig.__init__K   s    . %#6 		$8J8V"4\`\k\k".L+/N,("4"4!2"( 2288=a@GK<<#, !+ 	
%%1	
 		
    )i`	     i   @   i      N          g?gư>g      ?z
gated-geluTTr      relu)__name__
__module____qualname____doc__
model_typekeys_to_ignore_at_inferencer'   __classcell__)r*   s   @r+   r   r      s^    -^ J#4"5 ')(+&)1
 1
r,   r   N)	r8   configuration_utilsr   utilsr   
get_loggerr5   loggerr   r   r,   r+   <module>r@      s3    $ 3  
		H	%d
& d
r,   