
    sgv.                     j    d Z ddlZddlZddlmZ ddlmZ  ej                  e	      Z
 G d de      Zy)zMimi model configuration    N   )PretrainedConfig)loggingc                        e Zd ZdZdZ	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d fd	Zedefd       Zedefd       Z	 xZ
S )
MimiConfiga  
    This is the configuration class to store the configuration of an [`MimiModel`]. It is used to instantiate a
    Mimi model according to the specified arguments, defining the model architecture. Instantiating a configuration
    with the defaults will yield a similar configuration to that of the
    [kyutai/mimi](https://huggingface.co/kyutai/mimi) architecture.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        sampling_rate (`int`, *optional*, defaults to 24000):
            The sampling rate at which the audio waveform should be digitalized expressed in hertz (Hz).
        frame_rate (`float`, *optional*, defaults to 12.5):
            Framerate of the model.
        audio_channels (`int`, *optional*, defaults to 1):
            Number of channels in the audio data. Either 1 for mono or 2 for stereo.
        hidden_size (`int`, *optional*, defaults to 512):
            Intermediate representation dimension.
        num_filters (`int`, *optional*, defaults to 64):
            Number of convolution kernels of first `MimiConv1d` down sampling layer.
        num_residual_layers (`int`,  *optional*, defaults to 1):
            Number of residual layers.
        upsampling_ratios (`Sequence[int]`, *optional*):
            Kernel size and stride ratios. The encoder uses downsampling ratios instead of upsampling ratios, hence it
            will use the ratios in the reverse order to the ones specified here that must match the decoder order.
            If not specified, will defaults to `[8, 6, 5, 4]`
        kernel_size (`int`, *optional*, defaults to 7):
            Kernel size for the initial convolution.
        last_kernel_size (`int`, *optional*, defaults to 3):
            Kernel size for the last convolution layer.
        residual_kernel_size (`int`, *optional*, defaults to 3):
            Kernel size for the residual layers.
        dilation_growth_rate (`int`, *optional*, defaults to 2):
            How much to increase the dilation with each layer.
        use_causal_conv (`bool`, *optional*, defaults to `True`):
            Whether to use fully causal convolution.
        pad_mode (`str`, *optional*, defaults to `"constant"`):
            Padding mode for the convolutions.
        compress (`int`, *optional*, defaults to 2):
            Reduced dimensionality in residual branches.
        trim_right_ratio (`float`, *optional*, defaults to 1.0):
            Ratio for trimming at the right of the transposed convolution under the `use_causal_conv = True` setup. If
            equal to 1.0, it means that all the trimming is done at the right.
        codebook_size (`int`, *optional*, defaults to 2048):
            Number of discret codes in each codebooks.
        codebook_dim (`int`, *optional*, defaults to 256):
            Dimension of the unquantized codebook vectors. If not defined, uses `hidden_size`.
        num_quantizers (`int`, *optional*, defaults to 32):
            Number of quantizer channels, or codebooks, in the quantizer.
        use_conv_shortcut (`bool`, *optional*, defaults to `False`):
            Whether to use a convolutional layer as the 'skip' connection in the `MimiResnetBlock` block. If False,
            an identity function will be used, giving a generic residual connection.
        vector_quantization_hidden_dimension (`int`, *optional*, defaults to 256):
            Intermediate representation dimension in the residual vector quantization space.
        num_semantic_quantizers (`int`, *optional*, defaults to 1):
            Number of semantic quantizer channels, or codebooks, in the semantic quantizer. Must be lower than `num_quantizers`.
        upsample_groups (`int`, *optional*, defaults to 512):
            If `frame_rate!=encodec_frame_rate`, indicates the number of groups used in the upsampling operation to go from one rate to another.
        num_hidden_layers (`int`, *optional*, defaults to 8):
            Number of hidden layers in the Transformer models.
        intermediate_size (`int`, *optional*, defaults to 2048):
            Dimension of the MLP representations.
        num_attention_heads (`int`, *optional*, defaults to 8):
            Number of attention heads for each attention layer in the Transformer encoder.
        num_key_value_heads (`int`, *optional*, defaults to 8):
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
            by meanpooling all the original heads within that group. For more details checkout [this
            paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `8`.
        head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
            The attention head dimension.
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
            The non-linear activation function (function or string) in the decoder.
        max_position_embeddings (`int`, *optional*, defaults to 8000):
            The maximum sequence length that this model might ever be used with. Mimi's sliding window attention
            allows sequence of up to 8000 tokens.
        initializer_range (`float`, *optional*, defaults to 0.02):
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
        norm_eps (`float`, *optional*, defaults to 1e-05):
            The epsilon used by the LayerNorm normalization layers.
        use_cache (`bool`, *optional*, defaults to `False`):
            Whether or not the model should return the last key/values attentions (not used by all models). Only
            relevant if `config.is_decoder=True`.
        rope_theta (`float`, *optional*, defaults to 10000.0):
            The base period of the RoPE embeddings.
        sliding_window (`int`, *optional*, defaults to 250):
            Sliding window attention window size. If not specified, will default to `250`.
        attention_dropout (`float`, *optional*, defaults to 0.0):
            The dropout ratio for the attention probabilities.
        layer_scale_initial_scale (`float`, *optional*, defaults to 0.01):
            Initiale scale of the residual rescaling operation done in the Transformer models.
        attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
    Example:

    ```python
    >>> from transformers import MimiModel, MimiConfig

    >>> # Initializing a "kyutai/mimi" style configuration
    >>> configuration = MimiConfig()

    >>> # Initializing a model (with random weights) from the "kyutai/mimi" style configuration
    >>> model = MimiModel(configuration)

    >>> # Accessing the model configuration
    >>> configuration = model.config
    ```mimic&                    || _         || _        || _        || _        || _        || _        |r|ng d| _        || _        |	| _        |
| _	        || _
        || _        || _        || _        || _        || _        ||n|| _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        || _        | | _        |!| _        |"| _        |#| _         |xs ||z  | _!        |$| _"        |%| _#        || j"                  k\  rtI        d| j"                   d| d      || _%        tM        '|   di |& y )N)            zVThe number of semantic quantizers should be lower than the total number of quantizers z, but is currently . )(sampling_rate
frame_rateaudio_channelshidden_sizenum_filtersnum_residual_layersupsampling_ratioskernel_sizelast_kernel_sizeresidual_kernel_sizedilation_growth_rateuse_causal_convpad_modecompresstrim_right_ratiocodebook_sizecodebook_dimnum_quantizersuse_conv_shortcut$vector_quantization_hidden_dimensionupsample_groupsnum_hidden_layersintermediate_sizenum_attention_headsnum_key_value_heads
hidden_actmax_position_embeddingsinitializer_rangenorm_eps	use_cache
rope_thetasliding_windowattention_dropouthead_dimlayer_scale_initial_scaleattention_bias
ValueErrornum_semantic_quantizerssuper__init__)(selfr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r5   r$   r%   r&   r'   r(   r1   r)   r*   r+   r,   r-   r.   r/   r0   r2   r3   kwargs	__class__s(                                          ^/var/www/html/venv/lib/python3.12/site-packages/transformers/models/mimi/configuration_mimi.pyr7   zMimiConfig.__init__   s   R +$,&&#6 6G!2\& 0$8!$8!.   0*,8,DL+,!24X1.!2!2#6 #6 $'>$!2 "$,!2 FK3F$F)B&,"d&9&99himi|i|h}  ~Q  Ri  Qj  jk  l  (?$"6"    returnc                     t        j                  | j                        }t        j                  | j
                  |z        S N)npprodr   mathceilr   )r8   
hop_lengths     r;   encodec_frame_ratezMimiConfig.encodec_frame_rate   s0    WWT334
yy++j899r<   c                     | j                   S r?   )r!   )r8   s    r;   num_codebookszMimiConfig.num_codebooks   s     """r<   )%i]  g      )@      @   rH   N   r   r      TconstantrL   g      ?          FrO   rH   rI   r
   rN   r
   r
   Ngelui@  g{Gz?gh㈵>Fg     @   g        g{Gz?F)__name__
__module____qualname____doc__
model_typer7   propertyintrE   rG   __classcell__)r:   s   @r;   r   r      s    l\ J -0 ! $"&MS#j :C : : #s # #r<   r   )rV   rB   numpyr@   configuration_utilsr   utilsr   
get_loggerrS   loggerr   r   r<   r;   <module>r`      s9       3  
		H	%N#! N#r<   