
    sg'                         d dl Z d dlmZ d dlZd dlZddlmZmZm	Z	m
Z
 ddlmZmZ  e       rddlmZ  e
j                   e      Zded	ed
ej*                  fdZ e ed             G d de             Zy)    N)Union   )add_end_docstringsis_torch_availableis_torchaudio_availablelogging   )Pipelinebuild_pipeline_init_args),MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMESbpayloadsampling_ratereturnc                 z   | }d}d}dddd|d|d|d	d
ddg}	 t        j                  |t         j                  t         j                        }|j                  |       }|d   }t        j                  |t        j                        }	|	j                  d   dk(  rt	        d      |	S # t        $ r t	        d      w xY w)z?
    Helper function to read an audio file through ffmpeg.
    1f32leffmpegz-izpipe:0z-acz-arz-fz-hide_bannerz	-loglevelquietzpipe:1)stdinstdoutzFffmpeg was not found but is required to load audio files from filenamer   zMalformed soundfile)

subprocessPopenPIPEFileNotFoundError
ValueErrorcommunicatenp
frombufferfloat32shape)
r   r   aracformat_for_conversionffmpeg_commandffmpeg_processoutput_stream	out_bytesaudios
             ^/var/www/html/venv/lib/python3.12/site-packages/transformers/pipelines/audio_classification.pyffmpeg_readr*      s     ?B	B#

N c#)).
XbXgXgh #..x8Ma IMM)RZZ0E{{1~.//L  cabbcs   4B% %B:T)has_feature_extractorc                   n     e Zd ZdZ fdZdeej                  ee	f   f fdZ
d	dZd Zd Zd
dZ xZS )AudioClassificationPipelinea  
    Audio classification pipeline using any `AutoModelForAudioClassification`. This pipeline predicts the class of a
    raw waveform or an audio file. In case of an audio file, ffmpeg should be installed to support multiple audio
    formats.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> classifier = pipeline(model="superb/wav2vec2-base-superb-ks")
    >>> classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac")
    [{'score': 0.997, 'label': '_unknown_'}, {'score': 0.002, 'label': 'left'}, {'score': 0.0, 'label': 'yes'}, {'score': 0.0, 'label': 'down'}, {'score': 0.0, 'label': 'stop'}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)


    This pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"audio-classification"`.

    See the list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=audio-classification).
    c                     d|d<   t        |   |i | | j                  dk7  rt        d| j                   d      | j                  t               y )N   top_kptzThe z is only available in PyTorch.)super__init__	frameworkr   	__class__check_model_typer   )selfargskwargsr5   s      r)   r3   z$AudioClassificationPipeline.__init__]   sS    w$)&)>>T!tDNN#33QRSSJK    inputsc                 $    t        |   |fi |S )a  
        Classify the sequence(s) given as inputs. See the [`AutomaticSpeechRecognitionPipeline`] documentation for more
        information.

        Args:
            inputs (`np.ndarray` or `bytes` or `str` or `dict`):
                The inputs is either :
                    - `str` that is the filename of the audio file, the file will be read at the correct sampling rate
                      to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
                    - `bytes` it is supposed to be the content of an audio file and is interpreted by *ffmpeg* in the
                      same way.
                    - (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
                        Raw audio at the correct sampling rate (no further check will be done)
                    - `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
                      pipeline do the resampling. The dict must be either be in the format `{"sampling_rate": int,
                      "raw": np.array}`, or `{"sampling_rate": int, "array": np.array}`, where the key `"raw"` or
                      `"array"` is used to denote the raw audio waveform.
            top_k (`int`, *optional*, defaults to None):
                The number of top labels that will be returned by the pipeline. If the provided number is `None` or
                higher than the number of labels available in the model configuration, it will default to the number of
                labels.
            function_to_apply(`str`, *optional*, defaults to "softmax"):
                The function to apply to the model output. By default, the pipeline will apply the softmax function to
                the output of the model. Valid options: ["softmax", "sigmoid", "none"]. Note that passing Python's
                built-in `None` will default to "softmax", so you need to pass the string "none" to disable any
                post-processing.

        Return:
            A list of `dict` with the following keys:

            - **label** (`str`) -- The label predicted.
            - **score** (`float`) -- The corresponding probability.
        )r2   __call__)r7   r;   r9   r5   s      r)   r=   z$AudioClassificationPipeline.__call__g   s    L w1&11r:   c                     i }|H|| j                   j                  j                  kD  r | j                   j                  j                  }||d<   ||dvrt        d| d      ||d<   nd|d<   i i |fS )Nr0   )softmaxsigmoidnonez'Invalid value for `function_to_apply`: z2. Valid options are ['softmax', 'sigmoid', 'none']function_to_applyr?   )modelconfig
num_labelsr   )r7   r0   rB   r9   postprocess_paramss        r)   _sanitize_parametersz0AudioClassificationPipeline._sanitize_parameters   s    tzz((333

))44*/w'( (FF =>O=P QG G  7H236?232)))r:   c                 ,   t        |t              rg|j                  d      s|j                  d      r t        j                  |      j
                  }n%t        |d      5 }|j                         }d d d        t        |t              r t        || j                  j                        }t        |t              rd|v rd|v sd|v st        d      |j                  dd       }|$|j                  dd        |j                  dd       }|j                  d      }|}|| j                  j                  k7  rdd	d l}t!               rd	d
lm} nt'        d      |j)                  |j+                  |      || j                  j                        j-                         }t        |t.        j0                        st3        d      t5        |j6                        dk7  rt        d      | j                  || j                  j                  d      }|S # 1 sw Y   xY w)Nzhttp://zhttps://rbr   rawarrayzWhen passing a dictionary to AudioClassificationPipeline, the dict needs to contain a "raw" key containing the numpy array representing the audio and a "sampling_rate" key, containing the sampling_rate associated with that arraypathr   )
functionalztorchaudio is required to resample audio samples in AudioClassificationPipeline. The torchaudio package can be installed through: `pip install torchaudio`.z"We expect a numpy ndarray as inputr	   zFWe expect a single channel audio input for AudioClassificationPipeliner1   )r   return_tensors)
isinstancestr
startswithrequestsgetcontentopenreadbytesr*   feature_extractorr   dictr   poptorchr   
torchaudiorM   ImportErrorresample
from_numpynumpyr   ndarray	TypeErrorlenr    )r7   r;   f_inputsin_sampling_rater[   F	processeds           r)   
preprocessz&AudioClassificationPipeline.preprocess   s   fc"  +v/@/@/L "f-55&$' &1VVXF& fe$ )?)?)M)MNFfd# $v-5F?gQWFW N  jj-G

64( **Wd3%zz/:F4#9#9#G#GG*,:%e 
 $$V,.>@V@V@d@d%'  &"**-@AAv||!eff**$"8"8"F"FW[ + 
	 ]& &s   H		Hc                 *     | j                   di |}|S )N )rC   )r7   model_inputsmodel_outputss      r)   _forwardz$AudioClassificationPipeline._forward   s    "

2\2r:   c                    |dk(  r|j                   d   j                  d      }n2|dk(  r|j                   d   j                         }n|j                   d   }|j                  |      \  }}|j	                         }|j	                         }t        ||      D cg c]+  \  }}|| j                  j                  j                  |   d- }	}}|	S c c}}w )Nr?   r   r@   )scorelabel)	logitsr?   r@   topktolistziprC   rD   id2label)
r7   rm   r0   rB   probsscoresidsrq   _idlabelss
             r)   postprocessz'AudioClassificationPipeline.postprocess   s    	)!((+33B7E)+!((+335E!((+Ejj'jjl_bcikn_opQ[QVX[EDJJ,=,=,F,Fs,KLpp qs   0C)NN)r/   r?   )__name__
__module____qualname____doc__r3   r   r   ra   rW   rP   r=   rG   ri   rn   r}   __classcell__)r5   s   @r)   r-   r-   B   sA    2L&2bjj%,-&2P*$5nr:   r-   )r   typingr   r`   r   rR   utilsr   r   r   r   baser
   r   models.auto.modeling_autor   
get_loggerr~   loggerrW   intrK   r*   r-   rk   r:   r)   <module>r      s        \ \ 4 X			H	%!% ! ! !H ,4HIg( g Jgr:   