
    sg                    h    d dl mZ d dlmZmZmZ ddlmZmZ er
d dl	Z	d dl
mZ dgZ G d de      Zy)	    )annotations)DictListTYPE_CHECKING   )QuantizationAnnotation	QuantizerN)NodeComposableQuantizerc                  X     e Zd ZdZd fdZ	 	 	 	 	 	 ddZd	dZ	 	 	 	 d	dZd
dZ xZ	S )r   at  
    ComposableQuantizer allows users to combine more than one quantizer into a single quantizer.
    This allows users to quantize a model with multiple quantizers. E.g., embedding quantization
    maybe supported by one quantizer while linear layers and other ops might be supported by another
    quantizer.

    ComposableQuantizer is initialized with a list of `Quantizer` instances.
    The order of the composition matters since that is the order in which the quantizers will be
    applies.
    Example:
    ```
    embedding_quantizer = EmbeddingQuantizer()
    linear_quantizer = MyLinearQuantizer()
    xnnpack_quantizer = XNNPackQuantizer() # to handle ops not quantized by previous two quantizers
    composed_quantizer = ComposableQuantizer([embedding_quantizer, linear_quantizer, xnnpack_quantizer])
    prepared_m = prepare_pt2e(model, composed_quantizer)
    ```
    c                >    t         |           || _        i | _        y N)super__init__
quantizers_graph_annotations)selfr   	__class__s     g/var/www/html/venv/lib/python3.12/site-packages/torch/ao/quantization/quantizer/composable_quantizer.pyr   zComposableQuantizer.__init__%   s    $FH    c                   |j                   j                  D ]  }d|j                  v r|| j                  v rVt	        | j                  |         t	        |j                  d         k7  r%t        d|j                  j                   d|       |j                  d   | j                  |<   || j                  v st        d|j                  j                   d|        y )Nquantization_annotationz
Quantizer z! has changed annotations on node z! has removed annotations on node )graphnodesmetar   idRuntimeErrorr   __name__)r   gm	quantizerns       r    _record_and_validate_annotationsz4ComposableQuantizer._record_and_validate_annotations*   s      	A(AFF2 ///t..q12!&&!:;<= '$Y%8%8%A%A$BBcdecfg  238Q1RD++A.///&$Y%8%8%A%A$BBcdecfg 	r   c                n    | j                   D ]%  }|j                  |       | j                  ||       ' |S )z!just handling global spec for now)r   annotater"   r   modelr    s      r   r$   zComposableQuantizer.annotate@   s<     	DIu%11%C	D r   c                J    | j                   D ]  }|j                  |      } |S r   )r   transform_for_annotationr%   s      r   r(   z,ComposableQuantizer.transform_for_annotationG   s,      	>I66u=E	>r   c                     y r    )r   r&   s     r   validatezComposableQuantizer.validateN   s    r   )r   zList[Quantizer])r   torch.fx.GraphModuler    r	   returnNone)r&   r,   r-   r,   )r&   r,   r-   r.   )
r   
__module____qualname____doc__r   r"   r$   r(   r+   __classcell__)r   s   @r   r   r      sJ    &I
&3<	,)	r   )
__future__r   typingr   r   r   r    r   r	   torchtorch.fxr
   __all__r   r*   r   r   <module>r8      s4    " , , 8  
>) >r   