
    sgy%                        d dl mZmZmZ d dlZd dlmZ d dlmZ d dl	m
Z
mZmZ d dlmZmZmZ g dZ edej$                  j&                  j(                        Z G d	 d
ej$                  j&                  j(                        Z G d deej,                        Z G d deej.                        Z G d deej0                        Zy)    )TupleTypeVarUnionN)_FusedModule)	_size_1_t	_size_2_t	_size_3_t)_pair_single_triple)Conv1dConv2dConv3dMOD)boundc                       e Zd ZeZ	 	 	 ddededeedf   deedf   deedf   deedf   d	ed
eedf   dedededdfdZ	d Z
edd       Zd Zy)_ConvNdNin_channelsout_channelskernel_size.stridepaddingdilation
transposedoutput_paddinggroupsbiaspadding_modereturnc                     ||d}t        j                  j                  j                  j                  | |||||||||	|
|fi | |sJ d       || _        |j                  |      | _        y )N)devicedtypez'qconfig must be provided for QAT module)factory_kwargs)nnmodulesconvr   __init__qconfigweightweight_fake_quant)selfr   r   r   r   r   r   r   r   r   r   r   r(   r!   r"   r#   s                   O/var/www/html/venv/lib/python3.12/site-packages/torch/ao/nn/qat/modules/conv.pyr'   z_ConvNd.__init__   s    " %+U;


((	
 	
 AAAw!(~!N    c                 n    | j                  || j                  | j                        | j                        S N_conv_forwardr*   r)   r   r+   inputs     r,   forwardz_ConvNd.forward8   *    !!%)?)?)LdiiXXr-   c                 R   t        |      | j                  k(  s.J d| j                  z   dz   | j                  j                  z          t        |d      sJ d       |j                  sJ d       t        t        |      t              r|d   }|j                  } | |j                  |j                  |j                  |j                  |j                  |j                  |j                  |j                  du|j                  |
      }|j                   |_        |j                  |_        |S )	zCreate a qat module from a float module

        Args:
           `mod`: a float module, either produced by torch.ao.quantization utilities
           or directly from user
        zqat.z.from_float only works for r(   z,Input float module must have qconfig definedz,Input float module must have a valid qconfigr   N)r   r   r   r   r   r   r(   )type_FLOAT_MODULE__name__hasattrr(   
issubclassr   r   r   r   r   r   r   r   r   r   r)   )clsmoduse_precomputed_fake_quantr(   qat_convs        r,   
from_floatz_ConvNd.from_float;   s    CyC--- 	
ll+, (()	
- sI&V(VV&{{JJJ{d3i.a&C++OOOO::KK\\::%))
 **r-   c                    t        |       }|j                  | j                  | j                  | j                  | j
                  | j                  | j                  | j                  | j                  du| j                  	      }t        j                  j                  | j                  j                               |_        | j                  <t        j                  j                  | j                  j                               |_	        t!        |t"              r^|g}t%        |d      sJ |j'                         }|j)                  |        |j*                  | }|j-                  | j.                         |S |S )zThis works for both single qat conv, and the qat conv - relu modules
        to convert the qat module to a floating point module
        N_FLOAT_RELU_MODULE)r7   _FLOAT_CONV_MODULEr   r   r   r   r   r   r   r   r   torchr$   	Parameterr)   detachr;   r   r:   rB   appendr8   traintraining)r+   r<   r&   r%   relufuseds         r,   to_floatz_ConvNd.to_float^   s#    4j%%KKLLMMKKIIT!

 hh((););)=>99 **499+;+;+=>DIc<(fG3 4555))+DNN4 %C%%w/EKK&LKr-   )NNNF)r9   
__module____qualname__r   r8   intr   boolstrr'   r4   staticmethodr@   rL    r-   r,   r   r      s    M #O#O #O 38_	#O
 c3h#O sCx#O S/#O #O c3h#O #O #O #O  
!#OJY    Dr-   r   c                        e Zd ZdZej
                  Zej
                  Z	 	 	 	 	 	 	 	 	 ddedede	de	de
ee	f   de	d	ed
ededdf fdZed fd	       Z xZS )r   aZ  
    A Conv1d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as :class:`~torch.nn.Conv1d`

    Similar to :class:`~torch.nn.Conv2d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    Nr   r   r   r   r   r   r   r   r   r   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y NFr   )r   r   r   r   r   r   r   r   r(   r!   r"   )r   
isinstancerR   superr'   r+   r   r   r   r   r   r   r   r   r   r(   r!   r"   kernel_size_stride_padding_	dilation_	__class__s                    r,   r'   zConv1d.__init__   w     {+&/(#67GG<LH%	"1:% 	 	
r-   c                 (    t         |   | ||      S N)r>   rY   r@   r<   r=   r>   r_   s      r,   r@   zConv1d.from_float   "    w!1K " 
 	
r-   	   r   rg   rg   TzerosNNNrM   )r9   rN   rO   __doc__r$   r   r8   rC   rP   r   r   rR   rQ   r'   classmethodr@   __classcell__r_   s   @r,   r   r   ~   s     IIM )*#"
"
 "
 	"

 "
 sI~&"
 "
 "
 "
 "
 
"
H 
 
r-   r   c                        e Zd ZdZej
                  Zej
                  Z	 	 	 	 	 	 	 	 	 ddedede	de	de
ee	f   de	d	ed
ededdf fdZd Zed fd	       Z xZS )r   a  
    A Conv2d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as `torch.nn.Conv2d`, please see
    https://pytorch.org/docs/stable/nn.html?highlight=conv2d#torch.nn.Conv2d
    for documentation.

    Similar to `torch.nn.Conv2d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    Nr   r   r   r   r   r   r   r   r   r   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y rW   )r
   rX   rR   rY   r'   rZ   s                    r,   r'   zConv2d.__init__   su     [)-(#67E'N(O	 8% 	 	
r-   c                 n    | j                  || j                  | j                        | j                        S r/   r0   r2   s     r,   r4   zConv2d.forward   r5   r-   c                 (    t         |   | ||      S rb   rc   rd   s      r,   r@   zConv2d.from_float   re   r-   rf   rM   )r9   rN   rO   ri   r$   r   r8   rC   rP   r   r   rR   rQ   r'   r4   rj   r@   rk   rl   s   @r,   r   r           IIM )*#"
"
 "
 	"

 "
 sI~&"
 "
 "
 "
 "
 
"
HY 
 
r-   r   c                        e Zd ZdZej
                  Zej
                  Z	 	 	 	 	 	 	 	 	 ddedede	de	de
ee	f   de	d	ed
ededdf fdZd Zed fd	       Z xZS )r   a  
    A Conv3d module attached with FakeQuantize modules for weight,
    used for quantization aware training.

    We adopt the same interface as `torch.nn.Conv3d`, please see
    https://pytorch.org/docs/stable/nn.html?highlight=conv3d#torch.nn.Conv3d
    for documentation.

    Similar to `torch.nn.Conv3d`, with FakeQuantize modules initialized to
    default.

    Attributes:
        weight_fake_quant: fake quant module for weight
    Nr   r   r   r   r   r   r   r   r   r   c                     t        |      }t        |      }t        |t              r|n
t        |      }t        |      }t        |   ||||||dt        d      |||	|
||       y rW   )r   rX   rR   rY   r'   rZ   s                    r,   r'   zConv3d.__init__  r`   r-   c                 n    | j                  || j                  | j                        | j                        S r/   r0   r2   s     r,   r4   zConv3d.forward/  r5   r-   c                 (    t         |   | ||      S rb   rc   rd   s      r,   r@   zConv3d.from_float2  re   r-   rf   rM   )r9   rN   rO   ri   r$   r   r8   rC   rP   r	   r   rR   rQ   r'   r4   rj   r@   rk   rl   s   @r,   r   r      rq   r-   r   )typingr   r   r   rD   torch.nnr$   torch.ao.nn.intrinsicr   torch.nn.common_typesr   r   r	   torch.nn.modules.utilsr
   r   r   __all__r%   r&   r   r   r   r   r   rT   r-   r,   <module>r|      s    ( (   . A A : : )e2::??223kbjjoo%% k\8
Wbii 8
v=
Wbii =
@=
Wbii =
r-   