
    sg+                         d dl Z d dlmZ d dlmZmZ d dlmZ ddlm	Z	 ddl
mZmZ ddlmZ  ed	        e       rd
dlmZ ndZ ej"                  e      ZdddZd\  ZZd\  ZZdZ G d de	      Zy)    N)copyfile)OptionalTuple)
processors   )PreTrainedTokenizerFast)is_sentencepiece_availablelogging)require_versionztokenizers>=0.13.3   )LlamaTokenizerztokenizer.modelztokenizer.json)
vocab_filetokenizer_file)z[INST]z[/INST])z<<SYS>>
z
<</SYS>>

a  You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.

If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.c                        e Zd ZdZeZeZdZddgZ		 	 	 	 	 	 	 	 	 	 	 d fd	Z
edefd       Zd Zed	        Zed
        Zej"                  d        Zej"                  d        Zddedee   dee   fdZddZ xZS )LlamaTokenizerFastu  
    Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.

    This uses notably ByteFallback and no normalization.

    ```python
    >>> from transformers import LlamaTokenizerFast

    >>> tokenizer = LlamaTokenizerFast.from_pretrained("hf-internal-testing/llama-tokenizer")
    >>> tokenizer.encode("Hello this is a test")
    [1, 15043, 445, 338, 263, 1243]
    ```

    If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or
    call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the
    values of the first token and final token of an encoded sequence will not be correct). For more details, checkout
    [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.


    This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
    refer to this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`, *optional*):
            [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that
            contains the vocabulary necessary to instantiate a tokenizer.
        tokenizer_file (`str`, *optional*):
            [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
            contains everything needed to load the tokenizer.
        clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
            Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
            extra spaces.
        unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<s>"`):
            The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
        eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"</s>"`):
            The end of sequence token.
        add_bos_token (`bool`, *optional*, defaults to `True`):
            Whether or not to add an `bos_token` at the start of sequences.
        add_eos_token (`bool`, *optional*, defaults to `False`):
            Whether or not to add an `eos_token` at the end of sequences.
        use_default_system_prompt (`bool`, *optional*, defaults to `False`):
            Whether or not the default system prompt for Llama should be used
        legacy (`bool`, *optional*):
            Whether or not the `legacy` behavior of the tokenizer should be used. Legacy is before the merge of #24622
            and #25224 which includes fixes to properly handle tokens that appear after special tokens.
            Make sure to also set `from_slow` to `True`.
            A simple example:

            - `legacy=True`:
            ```python
            >>> from transformers import LlamaTokenizerFast

            >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=True, from_slow=True)
            >>> tokenizer.encode("Hello <s>.") # 869 is '▁.'
            [1, 15043, 29871, 1, 869]
            ```
            - `legacy=False`:
            ```python
            >>> from transformers import LlamaTokenizerFast

            >>> tokenizer = LlamaTokenizerFast.from_pretrained("huggyllama/llama-7b", legacy=False, from_slow=True)
            >>> tokenizer.encode("Hello <s>.")  # 29889 is '.'
            [1, 15043, 29871, 1, 29889]
            ```
            Checkout the [pull request](https://github.com/huggingface/transformers/pull/24565) for more details.
        add_prefix_space (`bool`, *optional*):
            Whether or not the tokenizer should automatically add a prefix space
    left	input_idsattention_maskc                     |
%t         j                  d| j                   d       d}
|
| _        |d|d<   t	        |   d|||||||||	||
d| || _        || _        | j                          |	| _	        || _
        y )Nz2You are using the default legacy behaviour of the a  . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 - if you loaded a llama tokenizer from a GGUF file you can ignore this message.T	from_slow)r   r   clean_up_tokenization_spaces	unk_token	bos_token	eos_tokenadd_bos_tokenadd_eos_tokenuse_default_system_promptadd_prefix_spacelegacy )loggerwarning_once	__class__r   super__init___add_bos_token_add_eos_tokenupdate_post_processorr   r   )selfr   r   r   r   r   r   r   r   r   r   r   kwargsr#   s                d/var/www/html/venv/lib/python3.12/site-packages/transformers/models/llama/tokenization_llama_fast.pyr%   zLlamaTokenizerFast.__init__   s     >DT^^DT U0 0 F'"&F; 	
!))E''&?-	
 	
 ,+""$)B&$    returnc                 p    | j                   r)t        j                  j                  | j                         S dS )NF)r   ospathisfiler)   s    r+   can_save_slow_tokenizerz*LlamaTokenizerFast.can_save_slow_tokenizer   s$    26//rww~~doo.LuLr,   c                 P   | j                   }| j                  }|| j                  rt        d      | j                  }| j
                  }|| j                  rt        d      | j                  r|dz   nd d| j                  rd|z   dz   nd }| | j                  rd|z   d	z   nd d
| j                  rd|z   d	z   nd }g }| j                  r|j                  ||f       | j                  r|j                  ||f       t        j                  |||      | j                  _        y)ze
        Updates the underlying post processor with the current `bos_token` and `eos_token`.
        Nz)add_bos_token = True but bos_token = Nonez)add_eos_token = True but eos_token = Nonez:0  z$A:0 z:0z:1z $B:1)singlepairspecial_tokens)r   bos_token_idr   
ValueErrorr   eos_token_idr   appendr   TemplateProcessing
_tokenizerpost_processor)r)   bosr:   eosr<   r7   r8   r9   s           r+   r(   z(LlamaTokenizerFast.update_post_processor   s+    nn((;4--HIInn((;4--HII#'#5#5SY2>dUYUgUgCGDLmoBpqD,>,>3s74<BGu_c_q_qcRUgVZlwyLz{!!3"56!!3"56)3)F)F^*
&r,   c                     | j                   S N)r'   r2   s    r+   r   z LlamaTokenizerFast.add_eos_token       """r,   c                     | j                   S rD   )r&   r2   s    r+   r   z LlamaTokenizerFast.add_bos_token   rE   r,   c                 2    || _         | j                          y rD   )r'   r(   r)   values     r+   r   z LlamaTokenizerFast.add_eos_token       #""$r,   c                 2    || _         | j                          y rD   )r&   r(   rH   s     r+   r   z LlamaTokenizerFast.add_bos_token   rJ   r,   save_directoryfilename_prefixc                    | j                   st        d      t        j                  j	                  |      st
        j                  d| d       y t        j                  j                  ||r|dz   ndt        d   z         }t        j                  j                  | j                        t        j                  j                  |      k7  rt        | j                  |       |fS )NzhYour fast tokenizer does not have the necessary information to save the vocabulary for a slow tokenizer.zVocabulary path (z) should be a directory-r5   r   )r3   r;   r/   r0   isdirr!   errorjoinVOCAB_FILES_NAMESabspathr   r   )r)   rL   rM   out_vocab_files       r+   save_vocabularyz"LlamaTokenizerFast.save_vocabulary   s    ++ 
 ww}}^,LL,^,<<STUo_s22QbcoQpp
 77??4??+rww~/NNT__n5  r,   c                     | j                   r| j                  gng }| j                  r| j                  gng }||z   |z   }|||z   |z   |z   }|S rD   )r   r:   r   r<   )r)   token_ids_0token_ids_1r:   r<   outputs         r+    build_inputs_with_special_tokensz3LlamaTokenizerFast.build_inputs_with_special_tokens   s`    .2.@.@))*b.2.@.@))*b+l:"l*[8<GFr,   )NNFz<unk>z<s>z</s>TFFNNrD   )__name__
__module____qualname____doc__rS   vocab_files_namesr   slow_tokenizer_classpadding_sidemodel_input_namesr%   propertyboolr3   r(   r   r   setterstrr   r   rV   r[   __classcell__)r#   s   @r+   r   r   1   s    FP *)L$&67 %*"'0%d M M M
4 # # # # % % % %!c !HSM !]bcf]g !*	r,   r   )r/   shutilr   typingr   r   
tokenizersr   tokenization_utils_fastr   utilsr	   r
   utils.versionsr   tokenization_llamar   
get_loggerr\   r!   rS   B_INSTE_INSTB_SYSE_SYSDEFAULT_SYSTEM_PROMPTr   r    r,   r+   <module>rv      s}    
  " ! > 8 - $ %2N			H	%#4HXY $,u^ N0 Nr,   