
    sg(D                         d Z ddlmZmZmZmZ ddlZddlm	Z	m
Z
mZ ddlmZmZmZ ddlmZmZmZmZmZmZmZmZmZmZmZmZ ddlmZmZm Z m!Z!  e        rddl"Z" e!jF                  e$      Z% G d	 d
e	      Z&y)z$Image processor class for Perceiver.    )DictListOptionalUnionN   )BaseImageProcessorBatchFeatureget_size_dict)center_cropresizeto_channel_dimension_format)IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STDChannelDimension
ImageInputPILImageResamplingget_image_sizeinfer_channel_dimension_formatis_scaled_imagemake_list_of_imagesto_numpy_arrayvalid_imagesvalidate_preprocess_arguments)
TensorTypefilter_out_non_signature_kwargsis_vision_availableloggingc            !           e Zd ZdZdgZddddej                  dddddf
dedee	e
f   ded	ee	e
f   d
ededee
ef   dedeeeee   f      deeeee   f      ddf fdZ	 	 	 ddej"                  dee	e
f   d	ee
   deee	ef      deee	ef      dej"                  fdZej                  ddfdej"                  d	ee	e
f   d
edeee	ef      deee	ef      dej"                  fdZ e       dddddddddddej,                  dfdedee   deee	e
f      dee   d	eee	e
f      d
edee   dee   dee   deeeee   f      deeeee   f      deee	ef      dedeee	ef      dej4                  j4                  fd       Z xZS )PerceiverImageProcessora/
  
    Constructs a Perceiver image processor.

    Args:
        do_center_crop (`bool`, `optional`, defaults to `True`):
            Whether or not to center crop the image. If the input size if smaller than `crop_size` along any edge, the
            image will be padded with zeros and then center cropped. Can be overridden by the `do_center_crop`
            parameter in the `preprocess` method.
        crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 256, "width": 256}`):
            Desired output size when applying center-cropping. Can be overridden by the `crop_size` parameter in the
            `preprocess` method.
        do_resize (`bool`, *optional*, defaults to `True`):
            Whether to resize the image to `(size["height"], size["width"])`. Can be overridden by the `do_resize`
            parameter in the `preprocess` method.
        size (`Dict[str, int]` *optional*, defaults to `{"height": 224, "width": 224}`):
            Size of the image after resizing. Can be overridden by the `size` parameter in the `preprocess` method.
        resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
            Defines the resampling filter to use if resizing the image. Can be overridden by the `resample` parameter
            in the `preprocess` method.
        do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
            parameter in the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
            Defines the scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter
            in the `preprocess` method.
        do_normalize:
            Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
            method.
        image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
            Mean to use if normalizing the image. This is a float or list of floats the length of the number of
            channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
            Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
            number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
    pixel_valuesTNgp?do_center_crop	crop_size	do_resizesizeresample
do_rescalerescale_factordo_normalize
image_mean	image_stdreturnc                 0   t        |   di | ||nddd}t        |d      }||nddd}t        |      }|| _        || _        || _        || _        || _        || _        || _	        || _
        |	|	nt        | _        |
|
| _        y t        | _        y )N   )heightwidthr"   
param_name    )super__init__r
   r!   r"   r#   r$   r%   r&   r'   r(   r   r)   r   r*   )selfr!   r"   r#   r$   r%   r&   r'   r(   r)   r*   kwargs	__class__s               k/var/www/html/venv/lib/python3.12/site-packages/transformers/models/perceiver/image_processing_perceiver.pyr5   z PerceiverImageProcessor.__init__V   s     	"6"!*!6IsUX<Y	!)D	'tc-JT",""	 $,((2(>*DY&/&;AU    imagedata_formatinput_data_formatc                     || j                   n|}t        |      }t        |d      }t        ||      \  }}t        ||      }	|d   |d   z  |	z  }
|d   |d   z  |	z  }t	        |f|
|f||d|S )a  
        Center crop an image to `(size["height"] / crop_size["height"] * min_dim, size["width"] / crop_size["width"] *
        min_dim)`. Where `min_dim = min(size["height"], size["width"])`.

        If the input size is smaller than `crop_size` along any edge, the image will be padded with zeros and then
        center cropped.

        Args:
            image (`np.ndarray`):
                Image to center crop.
            crop_size (`Dict[str, int]`):
                Desired output size after applying the center crop.
            size (`Dict[str, int]`, *optional*):
                Size of the image after resizing. If not provided, the self.size attribute will be used.
            data_format (`str` or `ChannelDimension`, *optional*):
                The channel dimension format of the image. If not provided, it will be the same as the input image.
            input_data_format (`str` or `ChannelDimension`, *optional*):
                The channel dimension format of the input image. If not provided, it will be inferred.
        r"   r0   )channel_dimr.   r/   )r$   r<   r=   )r$   r
   r   minr   )r6   r;   r"   r$   r<   r=   r7   r.   r/   min_dimcropped_heightcropped_widths               r9   r   z#PerceiverImageProcessor.center_cropu   s    8 !LtyydT"!)D	&u:KLfe$x.9X+>>'Ig7);;wF
 -0#/	

 
 	
r:   c                     t        |      }d|vsd|vrt        d|j                                |d   |d   f}t        |f||||d|S )a  
        Resize an image to `(size["height"], size["width"])`.

        Args:
            image (`np.ndarray`):
                Image to resize.
            size (`Dict[str, int]`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
            resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
                `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
            data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the output image. If unset, the channel dimension format of the input
                image is used. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.

        Returns:
            `np.ndarray`: The resized image.
        r.   r/   zFThe `size` dictionary must contain the keys `height` and `width`. Got )r$   r%   r<   r=   )r
   
ValueErrorkeysr   )r6   r;   r$   r%   r<   r=   r7   output_sizes           r9   r   zPerceiverImageProcessor.resize   sy    F T"47$#6efjfofofqersttH~tG}5
#/
 
 	
r:   imagesreturn_tensorsc                 *   ||n| j                   }||n| j                  }t        |d      }||n| j                  }||n| j                  }t        |      }||n| j
                  }||n| j                  }||n| j                  }|	|	n| j                  }	|
|
n| j                  }
||n| j                  }t        |      }t        |      st        d      t        |||	|
||||||
       |D cg c]  }t        |       }}t!        |d         r|rt"        j%                  d       |t'        |d         }|r"|D cg c]  }| j)                  ||||       }}|r"|D cg c]  }| j+                  ||||       }}|r!|D cg c]  }| j-                  |||	       }}|	r"|D cg c]  }| j/                  ||
||
       }}|D cg c]  }t1        |||       }}d|i}t3        ||      S c c}w c c}w c c}w c c}w c c}w c c}w )a  
        Preprocess an image or batch of images.

        Args:
            images (`ImageInput`):
                Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
                Whether to center crop the image to `crop_size`.
            crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
                Desired output size after applying the center crop.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`Dict[str, int]`, *optional*, defaults to `self.size`):
                Size of the image after resizing.
            resample (`int`, *optional*, defaults to `self.resample`):
                Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
                has an effect if `do_resize` is set to `True`.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image.
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Rescale factor to rescale the image by if `do_rescale` is set to `True`.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
                Image mean.
            image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
                Image standard deviation.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                    - Unset: Return a list of `np.ndarray`.
                    - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                    - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                    - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                    - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                    - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                    - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
        r"   r0   zkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.)
r&   r'   r(   r)   r*   r!   r"   r#   r$   r%   r   zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.)r$   r=   )r;   r$   r%   r=   )r;   scaler=   )r;   meanstdr=   )input_channel_dimr    )datatensor_type)r!   r"   r
   r#   r$   r%   r&   r'   r(   r)   r*   r   r   rE   r   r   r   loggerwarning_oncer   r   r   rescale	normalizer   r	   )r6   rH   r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   rI   r<   r=   r;   rO   s                    r9   
preprocessz"PerceiverImageProcessor.preprocess   s   @ ,:+E4K^K^!*!6IDNN	!)D	!*!6IDNN	'tTYYT"'38#-#9Zt
+9+E4K^K^'3'?|TEVEV#-#9Zt
!*!6IDNN	$V,F#:  	&!)%!)	
 6<<E.'<<6!9%*s
 $ >vay Ipvgl  	Pa bF   $ %dXYjkF 
  $ 5RcdF 
  $ U^opF  ou
ej'{N_`
 
 '>BBQ =


s$   3G7G<&H
H-HH)NNN)__name__
__module____qualname____doc__model_input_namesr   BICUBICboolr   strintr   floatr   r   r5   npndarrayr   r   r   r   FIRSTr   r   PILImagerU   __classcell__)r8   s   @r9   r   r   /   sm   "H ((  $$(#'9'A'A,3!:>9=VV S>V 	V
 38nV %V V c5j)V V U5$u+#567V E%e"456V 
VF #>BDH*
zz*
 S>*
 sm	*

 eC)9$9:;*
 $E#/?*?$@A*
 
*
b (:'A'A>BDH.
zz.
 38n.
 %	.

 eC)9$9:;.
 $E#/?*?$@A.
 
.
` %& *..2$()-'+%)*.'+:>9=;?(8(>(>DHICIC !IC DcN+	IC
 D>IC tCH~&IC %IC TNIC !IC tnIC U5$u+#567IC E%e"456IC !sJ!78IC &IC $E#/?*?$@AIC  
!IC 'ICr:   r   )'rY   typingr   r   r   r   numpyr`   image_processing_utilsr   r	   r
   image_transformsr   r   r   image_utilsr   r   r   r   r   r   r   r   r   r   r   r   utilsr   r   r   r   rc   
get_loggerrV   rQ   r   r3   r:   r9   <module>rm      sl    + . .  U U P P    _ ^  
		H	%mC0 mCr:   