
    sgiO                         d dl Z d dlmc mZ d dlmZ ddlmZm	Z	 g dZ
 G d de	      Z G d d	e      Z G d
 dee      Z G d de      Z G d dee      Z G d de      Z G d dee      Zy)    N)Tensor   )_LazyNormBase	_NormBase)InstanceNorm1dInstanceNorm2dInstanceNorm3dLazyInstanceNorm1dLazyInstanceNorm2dLazyInstanceNorm3dc                   x     e Zd Z	 	 	 	 	 	 ddedededededdf fdZd	 Zd
 Zd Z	d Z
 fdZdedefdZ xZS )_InstanceNormNnum_featuresepsmomentumaffinetrack_running_statsreturnc                 8    ||d}t        	|   |||||fi | y )N)devicedtype)super__init__)
selfr   r   r   r   r   r   r   factory_kwargs	__class__s
            P/var/www/html/venv/lib/python3.12/site-packages/torch/nn/modules/instancenorm.pyr   z_InstanceNorm.__init__   s0     %+U;#x1D	
HV	
    c                     t         NNotImplementedErrorr   inputs     r   _check_input_dimz_InstanceNorm._check_input_dim%       !!r   c                     t         r    r!   r   s    r   _get_no_batch_dimz_InstanceNorm._get_no_batch_dim(   r&   r   c                 `    | j                  |j                  d            j                  d      S )Nr   )_apply_instance_norm	unsqueezesqueezer#   s     r   _handle_no_batch_inputz$_InstanceNorm._handle_no_batch_input+   s'    (();<DDQGGr   c           
         t        j                  || j                  | j                  | j                  | j
                  | j                  xs | j                   | j                  | j                  | j                        S d| j                        S )Ng        )
Finstance_normrunning_meanrunning_varweightbiastrainingr   r   r   r#   s     r   r+   z"_InstanceNorm._apply_instance_norm.   sy    KKIIMM9!9!99!]]6DMMHH	
 		
 =@HH	
 		
r   c           	         |j                  dd       }|| j                  sg }	dD ]  }
||
z   }||v s|	j                  |        t        |	      dkD  rd|j                  dj	                  dj                  d |	D              | j                  j                               |	D ]  }|j                  |        t        | )  |||||||       y )Nversion)r2   r3   r   a  Unexpected running stats buffer(s) {names} for {klass} with track_running_stats=False. If state_dict is a checkpoint saved before 0.4.0, this may be expected because {klass} does not track running stats by default since 0.4.0. Please remove these keys from state_dict. If the running stats are actually needed, instead set track_running_stats=True in {klass} to enable them. See the documentation of {klass} for details.z and c              3   (   K   | ]
  }d | d   yw)"N ).0ks     r   	<genexpr>z6_InstanceNorm._load_from_state_dict.<locals>.<genexpr>W   s     *PQqc8*Ps   )namesklass)getr   appendlenformatjoinr   __name__popr   _load_from_state_dict)r   
state_dictprefixlocal_metadatastrictmissing_keysunexpected_keys
error_msgsr8   running_stats_keysnamekeyr   s               r   rH   z#_InstanceNorm._load_from_state_dict:   s     !$$Y5 ?4#;#;!#7 3tm*$&--c23 %&*!!@ AG%ll*P=O*PP"nn55 AG A . (CNN3'( 	%	
r   r$   c           
         | j                  |       |j                         | j                         z
  }|j                  |      | j                  k7  rS| j
                  r.t        d| d| j                   d|j                  |       d      t        j                  d| d       |j                         | j                         k(  r| j                  |      S | j                  |      S )Nzexpected input's size at dim=z to match num_features (z), but got: .zinput's size at dim=z does not match num_features. You can silence this warning by not passing in num_features, which is not used because affine=False)r%   dimr)   sizer   r   
ValueErrorwarningswarnr.   r+   )r   r$   feature_dims      r   forwardz_InstanceNorm.forwardh   s    e$iikD$:$:$<<::k"d&7&77{{ 3K= A**+<

;8O7PPQS 
 *;- 8= = 99;$0022..u55((//r   )gh㈵>g?FFNN)rF   
__module____qualname__intfloatboolr   r%   r)   r.   r+   rH   r   r[   __classcell__)r   s   @r   r   r      s     $)

 
 	

 
 "
 

""H

,
\0V 0 0r   r   c                       e Zd ZdZd Zd Zy)r   a  Applies Instance Normalization.

    This operation applies Instance Normalization
    over a 2D (unbatched) or 3D (batched) input as described in the paper
    `Instance Normalization: The Missing Ingredient for Fast Stylization
    <https://arxiv.org/abs/1607.08022>`__.

    .. math::

        y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

    The mean and standard-deviation are calculated per-dimension separately
    for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
    of size `C` (where `C` is the number of features or channels of the input) if :attr:`affine` is ``True``.
    The standard-deviation is calculated via the biased estimator, equivalent to
    `torch.var(input, unbiased=False)`.

    By default, this layer uses instance statistics computed from input data in
    both training and evaluation modes.

    If :attr:`track_running_stats` is set to ``True``, during training this
    layer keeps running estimates of its computed mean and variance, which are
    then used for normalization during evaluation. The running estimates are
    kept with a default :attr:`momentum` of 0.1.

    .. note::
        This :attr:`momentum` argument is different from one used in optimizer
        classes and the conventional notion of momentum. Mathematically, the
        update rule for running statistics here is
        :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
        where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
        new observed value.

    .. note::
        :class:`InstanceNorm1d` and :class:`LayerNorm` are very similar, but
        have some subtle differences. :class:`InstanceNorm1d` is applied
        on each channel of channeled data like multidimensional time series, but
        :class:`LayerNorm` is usually applied on entire sample and often in NLP
        tasks. Additionally, :class:`LayerNorm` applies elementwise affine
        transform, while :class:`InstanceNorm1d` usually don't apply affine
        transform.

    Args:
        num_features: number of features or channels :math:`C` of the input
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, L)` or :math:`(C, L)`
        - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)

    Examples::

        >>> # Without Learnable Parameters
        >>> m = nn.InstanceNorm1d(100)
        >>> # With Learnable Parameters
        >>> m = nn.InstanceNorm1d(100, affine=True)
        >>> input = torch.randn(20, 100, 40)
        >>> output = m(input)
    c                      yN   r;   r(   s    r   r)   z InstanceNorm1d._get_no_batch_dim       r   c                 b    |j                         dvrt        d|j                          d      y N)re      zexpected 2D or 3D input (got D input)rU   rW   r#   s     r   r%   zInstanceNorm1d._check_input_dim   0    99;f$<UYY[MRSS %r   NrF   r\   r]   __doc__r)   r%   r;   r   r   r   r      s    BHTr   r   c                        e Zd ZdZeZd Zd Zy)r
   a  A :class:`torch.nn.InstanceNorm1d` module with lazy initialization of the ``num_features`` argument.

    The ``num_features`` argument of the :class:`InstanceNorm1d` is inferred from the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight`, `bias`, `running_mean` and `running_var`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, L)` or :math:`(C, L)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, L)` or :math:`(C, L)`
        - Output: :math:`(N, C, L)` or :math:`(C, L)` (same shape as input)
    c                      yrd   r;   r(   s    r   r)   z$LazyInstanceNorm1d._get_no_batch_dim   rf   r   c                 b    |j                         dvrt        d|j                          d      y rh   rk   r#   s     r   r%   z#LazyInstanceNorm1d._check_input_dim   rl   r   N)rF   r\   r]   rn   r   cls_to_becomer)   r%   r;   r   r   r
   r
      s    4 #MTr   r
   c                       e Zd ZdZd Zd Zy)r   a$  Applies Instance Normalization.

    This operation applies Instance Normalization
    over a 4D input (a mini-batch of 2D inputs
    with additional channel dimension) as described in the paper
    `Instance Normalization: The Missing Ingredient for Fast Stylization
    <https://arxiv.org/abs/1607.08022>`__.

    .. math::

        y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

    The mean and standard-deviation are calculated per-dimension separately
    for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
    of size `C` (where `C` is the input size) if :attr:`affine` is ``True``.
    The standard-deviation is calculated via the biased estimator, equivalent to
    `torch.var(input, unbiased=False)`.

    By default, this layer uses instance statistics computed from input data in
    both training and evaluation modes.

    If :attr:`track_running_stats` is set to ``True``, during training this
    layer keeps running estimates of its computed mean and variance, which are
    then used for normalization during evaluation. The running estimates are
    kept with a default :attr:`momentum` of 0.1.

    .. note::
        This :attr:`momentum` argument is different from one used in optimizer
        classes and the conventional notion of momentum. Mathematically, the
        update rule for running statistics here is
        :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
        where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
        new observed value.

    .. note::
        :class:`InstanceNorm2d` and :class:`LayerNorm` are very similar, but
        have some subtle differences. :class:`InstanceNorm2d` is applied
        on each channel of channeled data like RGB images, but
        :class:`LayerNorm` is usually applied on entire sample and often in NLP
        tasks. Additionally, :class:`LayerNorm` applies elementwise affine
        transform, while :class:`InstanceNorm2d` usually don't apply affine
        transform.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, H, W)` or :math:`(C, H, W)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
        - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)

    Examples::

        >>> # Without Learnable Parameters
        >>> m = nn.InstanceNorm2d(100)
        >>> # With Learnable Parameters
        >>> m = nn.InstanceNorm2d(100, affine=True)
        >>> input = torch.randn(20, 100, 35, 45)
        >>> output = m(input)
    c                      yNri   r;   r(   s    r   r)   z InstanceNorm2d._get_no_batch_dim8  rf   r   c                 b    |j                         dvrt        d|j                          d      y N)ri      zexpected 3D or 4D input (got rj   rk   r#   s     r   r%   zInstanceNorm2d._check_input_dim;  rl   r   Nrm   r;   r   r   r   r      s    DLTr   r   c                        e Zd ZdZeZd Zd Zy)r   a  A :class:`torch.nn.InstanceNorm2d` module with lazy initialization of the ``num_features`` argument.

    The ``num_features`` argument of the :class:`InstanceNorm2d` is inferred from the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight`, `bias`,
    `running_mean` and `running_var`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, H, W)` or :math:`(C, H, W)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, H, W)` or :math:`(C, H, W)`
        - Output: :math:`(N, C, H, W)` or :math:`(C, H, W)` (same shape as input)
    c                      yru   r;   r(   s    r   r)   z$LazyInstanceNorm2d._get_no_batch_dim^  rf   r   c                 b    |j                         dvrt        d|j                          d      y rw   rk   r#   s     r   r%   z#LazyInstanceNorm2d._check_input_dima  rl   r   N)rF   r\   r]   rn   r   rr   r)   r%   r;   r   r   r   r   @      6 #MTr   r   c                       e Zd ZdZd Zd Zy)r	   a@  Applies Instance Normalization.

    This operation applies Instance Normalization
    over a 5D input (a mini-batch of 3D inputs with additional channel dimension) as described in the paper
    `Instance Normalization: The Missing Ingredient for Fast Stylization
    <https://arxiv.org/abs/1607.08022>`__.

    .. math::

        y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta

    The mean and standard-deviation are calculated per-dimension separately
    for each object in a mini-batch. :math:`\gamma` and :math:`\beta` are learnable parameter vectors
    of size C (where C is the input size) if :attr:`affine` is ``True``.
    The standard-deviation is calculated via the biased estimator, equivalent to
    `torch.var(input, unbiased=False)`.

    By default, this layer uses instance statistics computed from input data in
    both training and evaluation modes.

    If :attr:`track_running_stats` is set to ``True``, during training this
    layer keeps running estimates of its computed mean and variance, which are
    then used for normalization during evaluation. The running estimates are
    kept with a default :attr:`momentum` of 0.1.

    .. note::
        This :attr:`momentum` argument is different from one used in optimizer
        classes and the conventional notion of momentum. Mathematically, the
        update rule for running statistics here is
        :math:`\hat{x}_\text{new} = (1 - \text{momentum}) \times \hat{x} + \text{momentum} \times x_t`,
        where :math:`\hat{x}` is the estimated statistic and :math:`x_t` is the
        new observed value.

    .. note::
        :class:`InstanceNorm3d` and :class:`LayerNorm` are very similar, but
        have some subtle differences. :class:`InstanceNorm3d` is applied
        on each channel of channeled data like 3D models with RGB color, but
        :class:`LayerNorm` is usually applied on entire sample and often in NLP
        tasks. Additionally, :class:`LayerNorm` applies elementwise affine
        transform, while :class:`InstanceNorm3d` usually don't apply affine
        transform.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
        - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)

    Examples::

        >>> # Without Learnable Parameters
        >>> m = nn.InstanceNorm3d(100)
        >>> # With Learnable Parameters
        >>> m = nn.InstanceNorm3d(100, affine=True)
        >>> input = torch.randn(20, 100, 35, 45, 10)
        >>> output = m(input)
    c                      yNrx   r;   r(   s    r   r)   z InstanceNorm3d._get_no_batch_dim  rf   r   c                 b    |j                         dvrt        d|j                          d      y N)rx      zexpected 4D or 5D input (got rj   rk   r#   s     r   r%   zInstanceNorm3d._check_input_dim  rl   r   Nrm   r;   r   r   r	   r	   f  s    CJTr   r	   c                        e Zd ZdZeZd Zd Zy)r   a  A :class:`torch.nn.InstanceNorm3d` module with lazy initialization of the ``num_features`` argument.

    The ``num_features`` argument of the :class:`InstanceNorm3d` is inferred from the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight`, `bias`,
    `running_mean` and `running_var`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        num_features: :math:`C` from an expected input of size
            :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
        eps: a value added to the denominator for numerical stability. Default: 1e-5
        momentum: the value used for the running_mean and running_var computation. Default: 0.1
        affine: a boolean value that when set to ``True``, this module has
            learnable affine parameters, initialized the same way as done for batch normalization.
            Default: ``False``.
        track_running_stats: a boolean value that when set to ``True``, this
            module tracks the running mean and variance, and when set to ``False``,
            this module does not track such statistics and always uses batch
            statistics in both training and eval modes. Default: ``False``

    Shape:
        - Input: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)`
        - Output: :math:`(N, C, D, H, W)` or :math:`(C, D, H, W)` (same shape as input)
    c                      yr   r;   r(   s    r   r)   z$LazyInstanceNorm3d._get_no_batch_dim  rf   r   c                 b    |j                         dvrt        d|j                          d      y r   rk   r#   s     r   r%   z#LazyInstanceNorm3d._check_input_dim  rl   r   N)rF   r\   r]   rn   r	   rr   r)   r%   r;   r   r   r   r     r|   r   r   )rX   torch.nn.functionalnn
functionalr0   torchr   	batchnormr   r   __all__r   r   r
   r   r   r	   r   r;   r   r   <module>r      s        /g0I g0TJT] JTZ"T "TJLT] LT^#T #TLKT] KT\#T #Tr   