
    sgz              	       |   d Z ddlmZmZmZmZmZ ddlZddl	m
Z
mZmZ ddlmZmZ ddlmZmZmZmZmZmZmZmZmZmZ ddlmZmZmZmZm Z   e       rddl!Z! e       r	  ejD                  e#      Z$d	e%d
eee%e%f      fdZ&	 ddejN                  deeee%e%f         dee   d
ejN                  fdZ( G d de
      Z)y)z!Image processor class for SegGPT.    )DictListOptionalTupleUnionN   )BaseImageProcessorBatchFeatureget_size_dict)resizeto_channel_dimension_format)
IMAGENET_DEFAULT_MEANIMAGENET_DEFAULT_STDChannelDimension
ImageInputPILImageResamplinginfer_channel_dimension_formatis_scaled_imagemake_list_of_imagesto_numpy_arrayvalid_images)
TensorTypeis_torch_availableis_vision_availableloggingrequires_backends
num_labelsreturnc                     t        | dz        dz   }d|z  }dg}t        |       D ]F  }||dz  z  }||dz  z  |z  }||z  }d||z  z
  }d||z  z
  }	d||z  z
  }
|j                  ||	|
f       H |S )NgUUUUUU?      )r   r   r         )intrangeappend)r   basemargin
color_listlocation	num_seq_r	num_seq_g	num_seq_bRGBs              e/var/www/html/venv/lib/python3.12/site-packages/transformers/models/seggpt/image_processing_seggpt.pybuild_paletter2   2   s    ze$%)DD[F J*% 	%a'	a'D0	tO	)f$$)f$$)f$$1a)$	%     maskpalettedata_formatc                    ||nt         j                  }|| j                  \  }}t        j                  d||ft        j
                        }t        j                  |       }|D ]  }||   }| |k(  j                  t        j
                        }	t        j                  |	d      }	|	t        j                  |      z  }
t        j                  |
dd      }
||
j                  t        j
                        z  } t        j                  |dd      j                  t        j
                        }nt        j                  | d   dd      }t        ||      S )Nr   )dtype)axisr   r#   )N.)r   FIRSTshapenpzerosuint8uniqueastypeexpand_dimsarraymoveaxiscliprepeatr   )r4   r5   r6   heightwidthrgb_maskclasses_in_mask	class_idx	rgb_value
class_maskclass_rgb_masks              r1   mask_to_rgbrO   F   s    "-!8+>N>T>TK

88Q.bhh?))D/( 	8I	*I)+33BHH=J
<J'"((9*==N[[Q?N--bhh77H	8 778Q,33BHH= 99T)_aa8&x==r3   c            $           e Zd ZdZdgZddej                  ddddddf	dedee	e
ef      ded	ed
eeef   dedeeeee   f      deeeee   f      deddf fdZdedeeeef      fdZ	 	 d!dej&                  deeeeef         deee
ef      dej&                  fdZej                  ddfdej&                  de	e
ef   dedeee
ef      deee
ef      dej&                  fdZddddddddej.                  dddfdedee   de	e
ef   ded	ee   d
ee   dee   deeeee   f      deeeee   f      dee
ef   deee
ef      dee   dee   fdZddddddddddddddej.                  dfdee   dee   dee   dee   de	e
ef   ded	ee   d
ee   dee   deeeee   f      deeeee   f      dee   dee   deee
ef      dee
ef   deee
ef      f dZ	 d!deeeeef         dee   fd Z xZS )"SegGptImageProcessora  
    Constructs a SegGpt image processor.

    Args:
        do_resize (`bool`, *optional*, defaults to `True`):
            Whether to resize the image's (height, width) dimensions to the specified `(size["height"],
            size["width"])`. Can be overridden by the `do_resize` parameter in the `preprocess` method.
        size (`dict`, *optional*, defaults to `{"height": 448, "width": 448}`):
            Size of the output image after resizing. Can be overridden by the `size` parameter in the `preprocess`
            method.
        resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
            Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
            `preprocess` method.
        do_rescale (`bool`, *optional*, defaults to `True`):
            Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale`
            parameter in the `preprocess` method.
        rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
            Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the
            `preprocess` method.
        do_normalize (`bool`, *optional*, defaults to `True`):
            Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
            method.
        image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
            Mean to use if normalizing the image. This is a float or list of floats the length of the number of
            channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
        image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
            Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
            number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
        do_convert_rgb (`bool`, *optional*, defaults to `True`):
            Whether to convert the prompt mask to RGB format. Can be overridden by the `do_convert_rgb` parameter in the
            `preprocess` method.
    pixel_valuesTNgp?	do_resizesizeresample
do_rescalerescale_factordo_normalize
image_mean	image_stddo_convert_rgbr   c
                     t        |   di |
 ||nddd}t        |      }|| _        || _        || _        || _        || _        || _        ||nt        | _
        ||nt        | _        |	| _        y )Ni  )rG   rH    )super__init__r   rS   rV   rX   rT   rU   rW   r   rY   r   rZ   r[   )selfrS   rT   rU   rV   rW   rX   rY   rZ   r[   kwargs	__class__s              r1   r_   zSegGptImageProcessor.__init__   s     	"6"'tc-JT""$(	 ,(2(>*DY&/&;AU,r3   r   c                     t        |      S )a_  Build a palette to map the prompt mask from a single channel to a 3 channel RGB.

        Args:
            num_labels (`int`):
                Number of classes in the segmentation task (excluding the background).

        Returns:
            `List[Tuple[int, int]]`: Palette to map the prompt mask from a single channel to a 3 channel RGB.
        )r2   )r`   r   s     r1   get_palettez SegGptImageProcessor.get_palette   s     Z((r3   imager5   r6   c                     t        |||      S )a  Converts a segmentation map to RGB format.

        Args:
            image (`np.ndarray`):
                Segmentation map with dimensions (height, width) where pixel values represent the class index.
            palette (`List[Tuple[int, int]]`, *optional*, defaults to `None`):
                Palette to use to convert the mask to RGB format. If unset, the mask is duplicated across the channel
                dimension.
            data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the output image. If unset, the channel dimension format of the input
                image is used. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.

        Returns:
            `np.ndarray`: The mask in RGB format.
        )r5   r6   )rO   )r`   re   r5   r6   s       r1   rO   z SegGptImageProcessor.mask_to_rgb   s    . 5'{KKr3   input_data_formatc                     t        |      }d|vsd|vrt        d|j                                |d   |d   f}t        |f||||d|S )a  
        Resize an image to `(size["height"], size["width"])`.

        Args:
            image (`np.ndarray`):
                Image to resize.
            size (`Dict[str, int]`):
                Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
            resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
                `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.
            data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the output image. If unset, the channel dimension format of the input
                image is used. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.

        Returns:
            `np.ndarray`: The resized image.
        rG   rH   zFThe `size` dictionary must contain the keys `height` and `width`. Got )rT   rU   r6   rg   )r   
ValueErrorkeysr   )r`   re   rT   rU   r6   rg   ra   output_sizes           r1   r   zSegGptImageProcessor.resize   sy    F T"47$#6efjfofofqersttH~tG}5
#/
 
 	
r3   imagesc           	      z   ||n| j                   }||n| j                  }||n| j                  }||n| j                  }||n| j                  }||n| j
                  }||n| j                  }|	|	n| j                  }	||n| j                  }t        |      }t        ||rdnd      }t        |      st        d      |r|t        d      |r|t        d      |r||	t        d      |D cg c]  }t        |       }}t        |d	         r|rt        j!                  d
       ||st#        |d	         }|rT|| j%                  |      nd}|D cg c]$  }| j'                  ||t(        j*                        & }}t(        j*                  }|r"|D cg c]  }| j-                  ||||       }}|r!|D cg c]  }| j/                  |||       }}|r"|D cg c]  }| j1                  |||	|       }}|D cg c]  }t3        ||
|       }}|S c c}w c c}w c c}w c c}w c c}w c c}w )a  
        Preprocess an image or batch of images.

        Args:
            images (`ImageInput`):
                Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`Dict[str, int]`, *optional*, defaults to `self.size`):
                Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
                resizing.
            resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
                `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has
                an effect if `do_resize` is set to `True`.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image values between [0 - 1].
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Rescale factor to rescale the image by if `do_rescale` is set to `True`.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
                Image mean to use if `do_normalize` is set to `True`.
            image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
                Image standard deviation to use if `do_normalize` is set to `True`.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                - Unset: Return a list of `np.ndarray`.
                - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - Unset: Use the channel dimension format of the input image.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
            do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
                Whether to convert the prompt mask to RGB format. If `num_labels` is specified, a palette will be built
                to map the prompt mask from a single channel to a 3 channel RGB. If unset, the prompt mask is duplicated
                across the channel dimension. Must be set to `False` if the prompt mask is already in RGB format.
            num_labels: (`int`, *optional*):
                Number of classes in the segmentation task (excluding the background). If specified, a palette will be
                built, assuming that class_idx 0 is the background, to map the prompt mask from a single class_idx
                channel to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed
                through as is if it is already in RGB format or being duplicated across the channel dimension.
        Nr"   r   )expected_ndimszkInvalid image type. Must be of type PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or jax.ndarray.z,Size must be specified if do_resize is True.z7Rescale factor must be specified if do_rescale is True.z=Image mean and std must be specified if do_normalize is True.r   zIt looks like you are trying to rescale already rescaled images. If the input images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.)re   r5   r6   )re   rT   rU   rg   )re   scalerg   )re   meanstdrg   )input_channel_dim)rS   rV   rX   r[   rU   rW   rY   rZ   rT   r   r   r   ri   r   r   loggerwarning_oncer   rd   rO   r   r;   r   rescale	normalizer   )r`   rl   rS   rT   rU   rV   rW   rX   rY   rZ   r6   rg   r[   r   ra   	size_dictre   r5   s                     r1   _preprocess_stepz%SegGptImageProcessor._preprocess_step   s   L "+!6IDNN	#-#9Zt
'3'?|TEVEV+9+E4K^K^'38+9+E4K^K^#-#9Zt
!*!6IDNN	'tTYY!$'	 %VAUVWF#: 
 KLL.0VWWZ/93D\]] 6<<E.'<<6!9%*s
 $^ >vay I6@6Ld&&z2RVG qwgl  ugK[KaKa bF  !1 6 6 $ %i(^opF 
  $ 5RcdF 
  $ U^opF  ou
ej'{N_`
 
 U =

s$   6H)H$H)H.$H3H8prompt_imagesprompt_masksreturn_tensorsc                 f   t        d |||fD              rt        d      i }|% | j                  |fd||||||	|
|d||d|}||d<   |% | j                  |fd||||||	|
|d||d|}||d<   |3 | j                  |f||t        j                  |||	|
|||||d|}||d<   t        ||	      S )
a  
        Preprocess an image or batch of images.

        Args:
            images (`ImageInput`):
                Image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            prompt_images (`ImageInput`):
                Prompt image to _preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
                passing in images with pixel values between 0 and 1, set `do_rescale=False`.
            prompt_masks (`ImageInput`):
                Prompt mask from prompt image to _preprocess that specify prompt_masks value in the preprocessed output.
                Can either be in the format of segmentation maps (no channels) or RGB images. If in the format of
                RGB images, `do_convert_rgb` should be set to `False`. If in the format of segmentation maps, `num_labels`
                specifying `num_labels` is recommended to build a palette to map the prompt mask from a single channel to
                a 3 channel RGB. If `num_labels` is not specified, the prompt mask will be duplicated across the channel
                dimension.
            do_resize (`bool`, *optional*, defaults to `self.do_resize`):
                Whether to resize the image.
            size (`Dict[str, int]`, *optional*, defaults to `self.size`):
                Dictionary in the format `{"height": h, "width": w}` specifying the size of the output image after
                resizing.
            resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):
                `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BICUBIC`. Only has
                an effect if `do_resize` is set to `True`. Doesn't apply to prompt mask as it is resized using nearest.
            do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
                Whether to rescale the image values between [0 - 1].
            rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
                Rescale factor to rescale the image by if `do_rescale` is set to `True`.
            do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
                Whether to normalize the image.
            image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
                Image mean to use if `do_normalize` is set to `True`.
            image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
                Image standard deviation to use if `do_normalize` is set to `True`.
            do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
                Whether to convert the prompt mask to RGB format. If `num_labels` is specified, a palette will be built
                to map the prompt mask from a single channel to a 3 channel RGB. If unset, the prompt mask is duplicated
                across the channel dimension. Must be set to `False` if the prompt mask is already in RGB format.
            num_labels: (`int`, *optional*):
                Number of classes in the segmentation task (excluding the background). If specified, a palette will be
                built, assuming that class_idx 0 is the background, to map the prompt mask from a plain segmentation map
                with no channels to a 3 channel RGB. Not specifying this will result in the prompt mask either being passed
                through as is if it is already in RGB format (if `do_convert_rgb` is false) or being duplicated
                across the channel dimension.
            return_tensors (`str` or `TensorType`, *optional*):
                The type of tensors to return. Can be one of:
                - Unset: Return a list of `np.ndarray`.
                - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
                - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
                - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
                - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
            data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
                The channel dimension format for the output image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - Unset: Use the channel dimension format of the input image.
            input_data_format (`ChannelDimension` or `str`, *optional*):
                The channel dimension format for the input image. If unset, the channel dimension format is inferred
                from the input image. Can be one of:
                - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
                - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
                - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
        c              3   $   K   | ]  }|d u  
 y w)Nr]   ).0vs     r1   	<genexpr>z2SegGptImageProcessor.preprocess.<locals>.<genexpr>  s     HQqDyHs   zFAt least one of images, prompt_images, prompt_masks must be specified.F)is_maskrS   rT   rU   rV   rW   rX   rY   rZ   r[   r6   rg   rR   prompt_pixel_values)rS   rT   rU   rV   rW   rX   rY   rZ   r[   r   r6   rg   rz   )datatensor_type)allri   rx   r   NEARESTr
   )r`   rl   ry   rz   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r   r{   r6   rg   ra   r   s                      r1   
preprocesszSegGptImageProcessor.preprocess  sM   h H6=,"GHHeff*T**#!%-)%#$'"3 F" $*D $1D11#!%-)%#$'"3 M" +8D&'#0400#+33%-)%#-%'"3 L" $0D >BBr3   target_sizesc                    t        | dg       |j                  }|dddd|j                  d   dz  dddf   }t        j                  | j
                        j                  |j                        }t        j                  | j                        j                  |j                        }|j                  dddd      |z  |z   }|j                  dddd      }t        j                  |dz  dd      }g }d}|| j                  |      nd}	|	dt        j                  |	      j                         j                  |j                        }|j                  \  }
}}
}
|j                  dd|dz   |      }t        |      D ]  \  }}|At        j                  j                   j#                  |j%                  d      ||   d	      d   }|z|j                  \  }}}|j                  ddd      j                  ||d|      }||z
  }t        j&                  |d      }t        j(                  |d
      }|j+                  d
      }n |j-                  d      j/                         }|j1                  |        |S )a  
        Converts the output of [`SegGptImageSegmentationOutput`] into segmentation maps. Only supports
        PyTorch.

        Args:
            outputs ([`SegGptImageSegmentationOutput`]):
                Raw outputs of the model.
            target_sizes (`List[Tuple[int, int]]`, *optional*):
                List of length (batch_size), where each list item (`Tuple[int, int]`) corresponds to the requested
                final size (height, width) of each prediction. If left to None, predictions will not be resized.
            num_labels (`int`, *optional*):
                Number of classes in the segmentation task (excluding the background). If specified, a palette will be
                built, assuming that class_idx 0 is the background, to map prediction masks from RGB values to class
                indices. This value should be the same used when preprocessing inputs.
        Returns:
            semantic_segmentation: `List[torch.Tensor]` of length `batch_size`, where each item is a semantic
            segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
            specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
        torchNr"   r   r   r    r#   nearest)rT   moder9   )dim)r   
pred_masksr<   r   tensorrZ   todevicerY   permuterE   rd   floatview	enumeratenn
functionalinterpolate	unsqueezepowsumargminrp   r$   r&   )r`   outputsr   r   masksrq   rp   semantic_segmentationpalette_tensorr5   _num_channelsidxr4   channelsrG   rH   distpreds                      r1   "post_process_semantic_segmentationz7SegGptImageProcessor.post_process_semantic_segmentation  sN   , 	$	*"" aEKKNa/1145 ll4>>*--ell;||DOO,//=aAq)C/$6 aAq) 

53;3/ "2<2H$"":.d"\\'288:==ellKN$)KK!A|Q+00AzA~|TN"5) 	/IC'xx**66NN1%%c*" 7  	 %*.**'&%||Aq!,11&%HMn,yyq)yy2.{{r{* yyQy'++-!((.)	/, %$r3   NN)__name__
__module____qualname____doc__model_input_namesr   BICUBICboolr   r   strr$   r   r   r   r_   r   rd   r=   ndarrayr   rO   r   r;   r   rx   r   r   r   __classcell__)rb   s   @r1   rQ   rQ   b   s   B (( )-'9'A'A,3!:>9=#-- tCH~&- %	-
 - c5j)- - U5$u+#567- E%e"456- - 
-4
)c 
)d5c?.C 
) 48>B	LzzL $uS#X/0L eC)9$9:;	L
 
L< (:'A'A>BDH.
zz.
 38n.
 %	.

 eC)9$9:;.
 $E#/?*?$@A.
 
.
f %)#'+%)*.'+:>9=4D4J4JDH)-$(OO D>O 38n	O
 %O TNO !O tnO U5$u+#567O E%e"456O 3 001O $E#/?*?$@AO !O SMOf (,.2-1$(#'+%)*.'+:>9=)-$(;?4D4J4JDH#UC$UC  
+UC z*	UC
 D>UC 38nUC %UC TNUC !UC tnUC U5$u+#567UC E%e"456UC !UC SMUC !sJ!78UC  3 001!UC" $E#/?*?$@A#UCp jnI%%-d5c?.C%DI%YabeYfI%r3   rQ   r   )*r   typingr   r   r   r   r   numpyr=   image_processing_utilsr	   r
   r   image_transformsr   r   image_utilsr   r   r   r   r   r   r   r   r   r   utilsr   r   r   r   r   r   
get_loggerr   rs   r$   r2   r   rO   rQ   r]   r3   r1   <module>r      s    ( 5 5  U U C   e d  
		H	%
c d5c?&; * rv>
**>'U38_(=>>T\]mTn>ZZ>8E%- E%r3   