
    sg#                     p    d dl mZ d dlmZmZ erddlmZ  G d d      Z G d de      Z G d	 d
e      Z	y)    )Queue)TYPE_CHECKINGOptional   )AutoTokenizerc                       e Zd ZdZd Zd Zy)BaseStreamerzG
    Base class from which `.generate()` streamers should inherit.
    c                     t               )z;Function that is called by `.generate()` to push new tokensNotImplementedErrorselfvalues     T/var/www/html/venv/lib/python3.12/site-packages/transformers/generation/streamers.pyputzBaseStreamer.put       !##    c                     t               )zHFunction that is called by `.generate()` to signal the end of generationr   r   s    r   endzBaseStreamer.end!   r   r   N)__name__
__module____qualname____doc__r   r    r   r   r	   r	      s    $$r   r	   c                   F    e Zd ZdZddddefdZd Zd Zdded	efd
Z	d Z
y)TextStreamera)  
    Simple text streamer that prints the token(s) to stdout as soon as entire words are formed.

    <Tip warning={true}>

    The API for the streamer classes is still under development and may change in the future.

    </Tip>

    Parameters:
        tokenizer (`AutoTokenizer`):
            The tokenized used to decode the tokens.
        skip_prompt (`bool`, *optional*, defaults to `False`):
            Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
        decode_kwargs (`dict`, *optional*):
            Additional keyword arguments to pass to the tokenizer's `decode` method.

    Examples:

        ```python
        >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer

        >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
        >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
        >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
        >>> streamer = TextStreamer(tok)

        >>> # Despite returning the usual output, the streamer will also print the generated text to stdout.
        >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20)
        An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
        ```
    	tokenizerr   skip_promptc                 X    || _         || _        || _        g | _        d| _        d| _        y )Nr   T)r   r   decode_kwargstoken_cache	print_lennext_tokens_are_prompt)r   r   r   r!   s       r   __init__zTextStreamer.__init__H   s2    "&* &*#r   c                 L   t        |j                        dkD  r|j                  d   dkD  rt        d      t        |j                        dkD  r|d   }| j                  r| j                  rd| _        y| j
                  j                  |j                                 | j                  j                  | j
                  fi | j                  }|j                  d      r|| j                  d }g | _        d| _        nt        |      dkD  rK| j                  t        |d               r.|| j                  d }| xj                  t        |      z  c_        n?|| j                  |j                  d      dz    }| xj                  t        |      z  c_        | j!                  |       y)	zm
        Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
           r   z'TextStreamer only supports batch size 1FN
 )lenshape
ValueErrorr   r$   r"   extendtolistr   decoder!   endswithr#   _is_chinese_charordrfindon_finalized_text)r   r   textprintable_texts       r   r   zTextStreamer.putR   s[    u{{aEKKNQ$6FGG!!HE ; ;*/D' 	/$t~~$$T%5%5L9K9KL ==!$.."23N!DDNY]t44Sb]C!$.."23NNNc.11N "$..4::c?Q3FGNNNc.11N~.r   c                 
   t        | j                        dkD  rO | j                  j                  | j                  fi | j                  }|| j
                  d }g | _        d| _        nd}d| _        | j                  |d       y)z;Flushes any remaining cache and prints a newline to stdout.r   N T)
stream_end)r+   r"   r   r0   r!   r#   r$   r5   )r   r6   r7   s      r   r   zTextStreamer.endt   s~     t 1$(4>>(()9)9PT=O=OPD!$.."23N!DDNN&*#~$?r   r6   r:   c                 4    t        |d|sd       yd       y)zNPrints the new text to stdout. If the stream is ending, also prints a newline.Tr9   N)flushr   )printr   r6   r:   s      r   r5   zTextStreamer.on_finalized_text   s    d$jBCdCr   c                     |dk\  r|dk  sF|dk\  r|dk  s<|dk\  r|dk  s2|dk\  r|dk  s(|d	k\  r|d
k  s|dk\  r|dk  s|dk\  r|dk  s
|dk\  r|dk  ryy)z6Checks whether CP is the codepoint of a CJK character.i N  i  i 4  iM  i   iߦ i  i? i@ i i  i i   i  i  i TFr   )r   cps     r   r2   zTextStreamer._is_chinese_char   sr     6\bFlfvg"-g"-g"-g"-fvg"-r   NF)r   r   r   r   boolr%   r   r   strr5   r2   r   r   r   r   r   &   sB    B+/ + + /D@Dc Dt Dr   r   c                   X     e Zd ZdZ	 ddddedee   f fdZddedefd	Z	d
 Z
d Z xZS )TextIteratorStreamera  
    Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is
    useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive
    Gradio demo).

    <Tip warning={true}>

    The API for the streamer classes is still under development and may change in the future.

    </Tip>

    Parameters:
        tokenizer (`AutoTokenizer`):
            The tokenized used to decode the tokens.
        skip_prompt (`bool`, *optional*, defaults to `False`):
            Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
        timeout (`float`, *optional*):
            The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions
            in `.generate()`, when it is called in a separate thread.
        decode_kwargs (`dict`, *optional*):
            Additional keyword arguments to pass to the tokenizer's `decode` method.

    Examples:

        ```python
        >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
        >>> from threading import Thread

        >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
        >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
        >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
        >>> streamer = TextIteratorStreamer(tok)

        >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
        >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20)
        >>> thread = Thread(target=model.generate, kwargs=generation_kwargs)
        >>> thread.start()
        >>> generated_text = ""
        >>> for new_text in streamer:
        ...     generated_text += new_text
        >>> generated_text
        'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,'
        ```
    r   r   r   timeoutc                 b    t        |   ||fi | t               | _        d | _        || _        y N)superr%   r   
text_queuestop_signalrF   )r   r   r   rF   r!   	__class__s        r   r%   zTextIteratorStreamer.__init__   s1     	KA=A'r   r6   r:   c                     | j                   j                  || j                         |r2| j                   j                  | j                  | j                         yy)z\Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.rF   N)rJ   r   rF   rK   r>   s      r   r5   z&TextIteratorStreamer.on_finalized_text   sF    D$,,7OO 0 0$,,G r   c                     | S rH   r   r   s    r   __iter__zTextIteratorStreamer.__iter__   s    r   c                     | j                   j                  | j                        }|| j                  k(  r
t	               |S )NrN   )rJ   getrF   rK   StopIterationr   s     r   __next__zTextIteratorStreamer.__next__   s8    ##DLL#9D$$$/!Lr   )FNrA   )r   r   r   r   rB   r   floatr%   rC   r5   rP   rT   __classcell__)rL   s   @r   rE   rE      sR    +\ ae(7;NVW\oHc Ht Hr   rE   N)
queuer   typingr   r   models.autor   r	   r   rE   r   r   r   <module>rZ      s<      * +$ $v< vrD< Dr   