
    sg                         d Z ddlZddlmZmZmZmZmZ ddlm	Z	m
Z
 ddlmZmZmZmZmZmZmZmZ ddlmZ ddlmZmZ d	d
lmZmZmZ  ej:                  e      ZddddZ  G d de      Z!y)z
Fast tokenization class for LayoutLMv3. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
and _encode_plus, in which the Rust tokenizer is used.
    N)DictListOptionalTupleUnion)pre_tokenizers
processors   )BatchEncodingEncodedInputPaddingStrategyPreTokenizedInput
TensorType	TextInputTextInputPairTruncationStrategy)PreTrainedTokenizerFast)add_end_docstringslogging   )"LAYOUTLMV3_ENCODE_KWARGS_DOCSTRING2LAYOUTLMV3_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRINGLayoutLMv3Tokenizerz
vocab.jsonz
merges.txtztokenizer.json)
vocab_filemerges_filetokenizer_filec            *           e Zd ZdZeZddgZeZdddddddddd	d
ddg dg dg dddf fd	Z	 e
ee      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d9deeeee   ee   f   deeeee   f      deeee      eeee         f   deeee   eee      f      dedeeeef   deeeef   dee   dedee   dee   deeeef      dee   dee   ded ed!ed"ed#ed$ef(d%       Z e
ee      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d9d&eee   ee   ee   f   d'edeeeee            deeee   eee      f      dedeeeef   deeeef   dee   dedee   dee   deeeef      dee   dee   ded ed!ed"ed#ed$ef(d(       Zd:ded)ee   ded$ee   fd*Z e
ee      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d9deeef   dee   deeee         deee      dedeeeef   deeeef   dee   dedee   dee   deeeef      dee   dee   ded ed!ed"ed#ed$ef(d+       Zddddej<                  ej>                  ddddddddddddfd&eee   ee   ee   f   d'edeeeee            deeee         ded,ed-edee   dedee   dee   dee   dee   dee   ded ed!ed"ed#ed$ef(d.Z ddddej<                  ej>                  ddddddddddddfdeeef   dee   deeee         deee      ded,ed-edee   dedee   dee   dee   dee   dee   ded ed!ed"ed#ed$ef(d/Z!dej<                  dddfd0ee"ee#f   ef   dee   d,edee   dee   dee   d$e$fd1Z%d;d2ed3ee   d$e&e   fd4Z'd;d5Z(	 d;d6ee   d7eee      d$ee   fd8Z) xZ*S )<LayoutLMv3TokenizerFasta<  
    Construct a "fast" LayoutLMv3 tokenizer (backed by HuggingFace's *tokenizers* library). Based on BPE.

    This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
    refer to this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            Path to the vocabulary file.
        merges_file (`str`):
            Path to the merges file.
        errors (`str`, *optional*, defaults to `"replace"`):
            Paradigm to follow when decoding bytes to UTF-8. See
            [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
        bos_token (`str`, *optional*, defaults to `"<s>"`):
            The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the beginning of
            sequence. The token used is the `cls_token`.

            </Tip>

        eos_token (`str`, *optional*, defaults to `"</s>"`):
            The end of sequence token.

            <Tip>

            When building a sequence using special tokens, this is not the token that is used for the end of sequence.
            The token used is the `sep_token`.

            </Tip>

        sep_token (`str`, *optional*, defaults to `"</s>"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        cls_token (`str`, *optional*, defaults to `"<s>"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        unk_token (`str`, *optional*, defaults to `"<unk>"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        pad_token (`str`, *optional*, defaults to `"<pad>"`):
            The token used for padding, for example when batching sequences of different lengths.
        mask_token (`str`, *optional*, defaults to `"<mask>"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
        add_prefix_space (`bool`, *optional*, defaults to `False`):
            Whether or not to add an initial space to the input. This allows to treat the leading word just as any
            other word. (RoBERTa tokenizer detect beginning of words by the preceding space).
        trim_offsets (`bool`, *optional*, defaults to `True`):
            Whether the post processing step should trim offsets to avoid including whitespaces.
        cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
            The bounding box to use for the special [CLS] token.
        sep_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
            The bounding box to use for the special [SEP] token.
        pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
            The bounding box to use for the special [PAD] token.
        pad_token_label (`int`, *optional*, defaults to -100):
            The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
            CrossEntropyLoss.
        only_label_first_subword (`bool`, *optional*, defaults to `True`):
            Whether or not to only label the first subword, in case word labels are provided.
    	input_idsattention_maskNreplacez<s>z</s>z<unk>z<pad>z<mask>T)r   r   r   r   ic                    t        |   ||fi d|d|d|d|d|d|d|	d|
d	|d
|d|d|d|d|d|d|| t        j                  | j                  j
                  j                               }|j                  d
|      |k7  r;t        t        |j                  d            }||d
<    |di || j                  _        || _        d}t        | j                  |d       }|rt        j                  |j                               }d|v rt        |d         |d<   d|v rt        |d         |d<   d}|j                  d
|      |k7  r||d
<   d}|j                  d|      |k7  r||d<   d}|r>t        t        |j                  d            } |di |}t        | j                  ||       || _        || _        || _        || _        || _        y )Nr   errors	bos_token	eos_token	sep_token	cls_token	unk_token	pad_token
mask_tokenadd_prefix_spacetrim_offsetscls_token_boxsep_token_boxpad_token_boxpad_token_labelonly_label_first_subwordtypepost_processorsepclsFT )super__init__jsonloadsbackend_tokenizerpre_tokenizer__getstate__getgetattrr   popr+   tupler	   setattrr-   r.   r/   r0   r1   )selfr   r   r   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   kwargspre_tok_statepre_tok_classtokenizer_componenttokenizer_component_instancestatechanges_to_applycomponent_class	new_value	__class__s                               n/var/www/html/venv/lib/python3.12/site-packages/transformers/models/layoutlmv3/tokenization_layoutlmv3_fast.pyr8   z LayoutLMv3TokenizerFast.__init__y   sV   , 		
 *	
 		

  	
  	
  	
  	
  	
  	
 "	
 .	
 &	
 (	
 (	
  (!	
" ,#	
$ &>'	
, 

4#9#9#G#G#T#T#VW/1ABFVV#NM4E4Ef4MNM0@M,-3@3Q=3QD""0 0.'.t/E/EGZ\`'a$'JJ;HHJKE ~$U5\2e~$U5\2e$yy+-=>BRR,<()#' yy6,F(4n%#' ")*eii6G"H+4e4	..0CYO +**.(@%    Fr   text	text_pairboxesword_labelsadd_special_tokenspadding
truncation
max_lengthstridepad_to_multiple_ofpadding_sidereturn_tensorsreturn_token_type_idsreturn_attention_maskreturn_overflowing_tokensreturn_special_tokens_maskreturn_offsets_mappingreturn_lengthverbosereturnc                 h   d }|4 ||      st        d      t        |t        t        f      s,t        d      t        |t        t        f      st        d      |t        |t        t        f      }n5t        |t        t        f      xr |xr t        |d   t        t        f      }||n|}|t        d      |rYt	        |      t	        |      k7  rt        d      t        ||      D ]'  \  }}t	        |      t	        |      k7  st        d       n"t	        |      t	        |      k7  rt        d      |r|;t	        |      t	        |      k7  r$t        d	t	        |       d
t	        |       d      |t        t        ||            n|}t        |du      } | j                  d!i d|d|d|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|d||S  | j                  d!i d|d |d|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|d||S )"a3  
        Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
        sequences with word-level normalized bounding boxes and optional labels.

        Args:
            text (`str`, `List[str]`, `List[List[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
                (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
                words).
            text_pair (`List[str]`, `List[List[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
                (pretokenized string).
            boxes (`List[List[int]]`, `List[List[List[int]]]`):
                Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
            word_labels (`List[int]`, `List[List[int]]`, *optional*):
                Word-level integer labels (for token classification tasks such as FUNSD, CORD).
        c                    t        | t              ryt        | t        t        f      rft	        |       dk(  ryt        | d   t              ryt        | d   t        t        f      r)t	        | d         dk(  xs t        | d   d   t              S yy)NTr   F)
isinstancestrlistrA   len)ts    rN   _is_valid_text_inputz>LayoutLMv3TokenizerFast.__call__.<locals>._is_valid_text_input   sz    !S!Ae}-q6Q;!c*!tUm4qt9>EZ!Q-EE rO   NzStext input must of type `str` (single example) or `List[str]` (batch of examples). zwWords must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).r   z-You must provide corresponding bounding boxesz@You must provide words and boxes for an equal amount of examplesz:You must provide as many words as there are bounding boxeszbatch length of `text`: z- does not match batch length of `text_pair`: .batch_text_or_text_pairsis_pairrR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rP   rQ   r6   )	
ValueErrorrf   rh   rA   ri   zipboolbatch_encode_plusencode_plus)rC   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rD   rk   
is_batchedwordswords_exampleboxes_examplerm   rn   s                               rN   __call__z LayoutLMv3TokenizerFast.__call__   sa   X	(  '- !vwwi$7 M  dT5M2 M 
  #D4-8J#D4-8hThjQUVWQX[_afZgFhJ!)y=LMM5zSZ' !cdd03E50A c,}}%]);;$%abbc 5zSZ' !]^^$Tc)n)D .s4yk :I'q*  FOEZtCi,@'A`d$9D01G)4)) )A  (	
 $6   & &  $6 *  . '< '< +D  ,F!" (>#$ ,%&  ) . $4## #  (	
 $6   & &  $6 *  . '< '< +D  ,F!" (>#$ ,%&  ) rO   rm   rn   c           	           | j                   d||||
|d|\  }}}} | j                  di d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|d||S )NrU   rV   rW   rY   rb   rm   rn   rR   rS   rT   padding_strategytruncation_strategyrW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   r6   )"_get_padding_truncation_strategies_batch_encode_plus)rC   rm   rn   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rD   r{   r|   s                          rN   rr   z)LayoutLMv3TokenizerFast.batch_encode_plusg  s   < ElDDkDk E
!!1E
 E
A-z6 't&& 
%=

 
 $	

  2
 .
 !4
 "
 
  2
 &
 *
 #8
 #8
 '@
  (B!
" $:#
$ (%
& )
 	
rO   pairc                 t    |r||fgn|g} | j                   j                  |f|dd|}|d   j                  S )NFrT   is_pretokenizedr   )
_tokenizerencode_batchtokens)rC   rP   r   rT   rD   batched_input	encodingss          rN   tokenizez LayoutLMv3TokenizerFast.tokenize  sQ    *.$TF0DOO00
.@RW
[a
	 |"""rO   c           	           | j                   d||||
|d|\  }}}} | j                  di d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|d||S )aj  
        Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
        `__call__` should be used instead.

        Args:
            text (`str`, `List[str]`, `List[List[str]]`):
                The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
            text_pair (`List[str]` or `List[int]`, *optional*):
                Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
                list of list of strings (words of a batch of examples).
        rz   rP   rR   rQ   rS   rT   r{   r|   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   r6   )r}   _encode_plus)rC   rP   rQ   rR   rS   rT   rU   rV   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rD   r{   r|   s                          rN   rs   z#LayoutLMv3TokenizerFast.encode_plus  s   L ElDDkDk E
!!1E
 E
A-z6 !t   


  
 $	

  2
 .
 !4
 "
 
  2
 &
 *
 #8
 #8
 '@
  (B!
" $:#
$ (%
& )
 	
rO   r{   r|   c                    t        |t              st        dt        |       d      | j	                  ||||	|
|       |r$|D cg c]  \  }}|j                         |f }}}| j                  j                  ||d      }|D cg c]  }| j                  ||||||dn|||      ! }}i }|d   d   j                         D ]'  }|D cg c]  \  }}||   D ]  }|  }}}}|||<   ) |D cg c]  \  }}|D ]  }|  }}}}|r2g } t        |      D ]  \  }!\  }"}| |!gt        |"d         z  z  }  | |d	<   |d   D ]  }#| j                  |#||        g }$t        t        |d               D ]!  }%|r	|d	   |%   }&n|%}&g }'t        |d   |%   ||%   j                  ||%   j                         D ]  \  }(})}*|*;|r!|)dk(  r|'j#                  | j$                         ,|'j#                  ||&   |*          D|(| j&                  k(  r|'j#                  | j(                         o|(| j*                  k(  r|'j#                  | j,                         |(| j.                  k(  r|'j#                  | j$                         t1        d
       |$j#                  |'       $ |$|d<   |g }+t        t        |d               D ]  }%|r	|d	   |%   }&n|%}&g },d}-t        |d   |%   |d   |%   ||%   j                         D ]  \  }(}.}*|*l| j2                  rH|.d   dk(  r|-s|,j#                  ||&   |*          n|,j#                  | j4                         |.dk(  rd}-Zd}-]|,j#                  ||&   |*          u|,j#                  | j4                          |+j#                  |,        |+|d<   |s|d= t7        |||      S c c}}w c c}w c c}}}w c c}}}w )Nz/batch_text_or_text_pairs has to be a list (got ))r{   r|   rW   rX   rY   rZ   Tr   )encodingr\   r]   r^   r_   r`   ra   rb   r   r   overflow_to_sample_mappingzId not recognizedbboxFoffset_mapping)r   r   labels)tensor_type)rf   rh   	TypeErrorr2   set_truncation_and_paddingsplitr   r   _convert_encodingkeys	enumerateri   &_eventual_warn_about_too_long_sequencerangerp   sequence_idsword_idsappendr/   cls_token_idr-   sep_token_idr.   pad_token_idro   r1   r0   r   )/rC   rm   rn   rR   rS   rT   r{   r|   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rP   rQ   r   r   tokens_and_encodingssanitized_tokenskeyitem_estacksanitized_encodingsr   itoksr   token_boxesbatch_indexoriginal_indextoken_boxes_exampleidsequence_idword_idr   labels_exampleprevious_token_emptyoffsets/                                                  rN   r~   z*LayoutLMv3TokenizerFast._batch_encode_plus  s   4 2D9MdSkNlMmmnopp 	''- 3!1% 	( 	
 Yq'rodIy(A'r$'rOO00$1  1 
	2 & 
  ""!&;&;*C+E* (,++ #  
  
, '*1-224 	*C&:NN74DINqQNQNEN$)S!	* 1ESSWQdSqSqSS %)+& )*> ? K9D!*qcC[8I4J.JJ*K=W9:)+6 	XI77	:wW	X  %5k%B!CD 	4K(!12N!OP[!\!,"$,/ -k:#K0==#K099- >(K
 &;!#3+2243E3EF+2253H3QRT...+2243E3EFt000+2243E3EFt000+2243E3EF()<==%>& 233	46 $/  "F$S)9+)F%GH .,%56R%ST_%`N%0N!#',$+.$[1+>$%56{C'4==, D'B
 *88%ayA~6J . 5 5k.6QRY6Z [ . 5 5d6J6J K%/7; 47< 4*11+n2Mg2VW&--d.B.BC'D( n-7.: *0X&)$%56-/BP^__s (s 
0 OSs   N4$N:N?
6Oc                    |r||fgn|g}|g}||gnd } | j                   |fi dt        |d u      d|d|d|d|d|d|d|	d	|
d
|d|d|d|d|d|d|d|d||}|`|s^t        |j                         D ci c].  \  }}|t	        |      dkD  rt        |d   t              r|d   n|0 c}}|j                        }| j                  |d   ||       |S c c}}w )Nrn   rR   rS   rT   r{   r|   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   r   r   )	r~   rq   r   itemsri   rf   rh   r   r   )rC   rP   rQ   rR   rS   rT   r{   r|   rW   rX   rY   rZ   r[   r\   r]   r^   r_   r`   ra   rb   rD   r   batched_boxesbatched_word_labelsbatched_outputr   values                              rN   r   z$LayoutLMv3TokenizerFast._encode_plus  s   6 09$	*+tf/:/F{mD000
$./
  
 ,	

  2
 .
 !4
 "
 
  2
 &
 *
 #8
 #8
 '@
  (B!
" $:#
$ (%
& )
2 !*C* '5&:&:&<"U SZ!^
58T8RqX]] ((N 	33N;4OQ[]des   3C#
encoded_inputsc                 .   |d| j                   v }|| j                   d      }|t        j                  k(  rt        |      }||||z  dk7  r||z  dz   |z  }|t        j                  k7  xr t        |      |k7  }|rd|vrdgt        |      z  |d<   |r|t        |      z
  }	||n| j
                  }|dk(  r|r|d   dg|	z  z   |d<   d|v r|d   | j                  g|	z  z   |d<   d|v r|d   | j                  g|	z  z   |d<   d|v r|d   | j                  g|	z  z   |d<   d|v r|d   dg|	z  z   |d<   || j                  g|	z  z   || j                   d   <   |S |d	k(  r|rdg|	z  |d   z   |d<   d|v r| j                  g|	z  |d   z   |d<   d|v r| j                  g|	z  |d   z   |d<   d|v r| j                  g|	z  |d   z   |d<   d|v rdg|	z  |d   z   |d<   | j                  g|	z  |z   || j                   d   <   |S t        d
t        |      z         |S )a#  
        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)

        Args:
            encoded_inputs:
                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
            max_length: maximum length of the returned list and optionally padding length (see below).
                Will truncate by taking into account the special tokens.
            padding_strategy: PaddingStrategy to use for padding.

                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
                - PaddingStrategy.DO_NOT_PAD: Do not pad
                The tokenizer padding sides are defined in self.padding_side:

                    - 'left': pads on the left of the sequences
                    - 'right': pads on the right of the sequences
            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
                `>= 7.5` (Volta).
            padding_side:
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            return_attention_mask:
                (optional) Set to False to avoid returning attention mask (default: set to model specifics)
        r    r   r   righttoken_type_idsr   r   special_tokens_maskleftzInvalid padding strategy:)model_input_namesr   LONGESTri   
DO_NOT_PADrZ   pad_token_type_idr/   r0   r   ro   rg   )
rC   r   rW   r{   rY   rZ   r]   required_inputneeds_to_be_padded
differences
             rN   _padzLayoutLMv3TokenizerFast._pad  s   H !($48N8N$N!'(>(>q(AB666^,J!&8&D*WiJimnJn%);;q@DVVJ-1K1KKqPSTbPcgqPq !%5^%K01sS5H/HN+,#c.&99J+7+C<IZIZLw&(7EFV7W[\Z]`jZj7jN#34#~5&'78D<R<R;SV`;`` ##34 ^+-;F-CtGYGYFZ]gFg-gN6*~-/=h/G4K_K_J`cmJm/mN8,(N:<JK`<aefdgjtdt<tN#89<JdN_N_M`cmMm<mt55a89$ # '(89sZ7G.YiJj7jN#34#~58<8N8N7OR\7\_m(` 8N#34 ^+.2.@.@-AJ-NQ_`fQg-gN6*~-040D0D/E
/RUcdlUm/mN8,(N:=>C*<L~^sOt<tN#89=A=N=N<OR\<\_m<mt55a89  !!<s<?P!PQQrO   save_directoryfilename_prefixc                 f    | j                   j                  j                  ||      }t        |      S )N)name)r   modelsaverA   )rC   r   r   filess       rN   save_vocabularyz'LayoutLMv3TokenizerFast.save_vocabulary7  s+    %%**>*PU|rO   c                     | j                   g|z   | j                  gz   }||S || j                  gz   |z   | j                  gz   S N)bos_token_ideos_token_id)rC   token_ids_0token_ids_1outputs       rN    build_inputs_with_special_tokensz8LayoutLMv3TokenizerFast.build_inputs_with_special_tokens;  sU    ##${2d6G6G5HHM**++k9T=N=N<OOOrO   r   r   c                     | j                   g}| j                  g}|t        ||z   |z         dgz  S t        ||z   |z   |z   |z   |z         dgz  S )a  
        Args:
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. RoBERTa does not:
        make use of token type ids, therefore a list of zeros is returned.
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.
        Returns:
            `List[int]`: List of zeros.
        r   )r   r   ri   )rC   r   r   r4   r5   s        rN   $create_token_type_ids_from_sequencesz<LayoutLMv3TokenizerFast.create_token_type_ids_from_sequencesB  sm       !  !s[(3./1#553$s*S0;>DEKKrO   )NNNTFNNr   NNNNNFFFFT)NFr   )+__name__
__module____qualname____doc__VOCAB_FILES_NAMESvocab_files_namesr   r   slow_tokenizer_classr8   r   r   r   r   r   r   r   r   intrq   rg   r   r   r   r   rx   r   rr   r   rs   r   DO_NOT_TRUNCATEr~   r   r   r   dictr   r   r   r   r   __classcell__)rM   s   @rN   r   r   1   s	   AF *$&67. """!%'SAj :<no
 RV?CCG#'5:;?$(,0'+;?0404*/+0',#)UI0$y/4HYCZZ[U E"3T:K5L"LMNU T$s)_d4S	?&;;<	U
 eDItDI$>?@U !U tS/12U $%778U SMU U %SMU tnU !sJ!78U  (~U  (~U  $(!U" %)#U$ !%%U& 'U( )U, 
-U pUn :<no 15CG#'5:;?$(,0'+;?0404*/+0',#1:
"'O"#%#
:
 :
 T$s)_-.:
 eDItDI$>?@:
 !:
 tS/12:
 $%778:
 SM:
 :
 %SM:
  tn!:
" !sJ!78#:
$  (~%:
&  (~':
( $():
* %)+:
, !%-:
. /:
0 1:
4 
5:
 p:
z#S # #RV #mqrumv # :<no
 26+/+/#'5:;?$(,0'+;?0404*/+0',#)B
I001B
 -.B
 T#Y(	B

 d3i(B
 !B
 tS/12B
 $%778B
 SMB
 B
 %SMB
 tnB
 !sJ!78B
  (~B
  (~B
  $(!B
" %)#B
$ !%%B
& 'B
( )B
, 
-B
 pB
V 1515#',;,F,F2D2T2T$(,0'+(,0404*/+0',#1a`"'O"#%#
a` a` T$s)_-.a` d49o.a` !a` *a` 0a` SMa` a` %SMa`  tn!a`" !#a`$  (~%a`&  (~'a`( $()a`* %)+a`, !%-a`. /a`0 1a`2 
3a`N 26+/+/#',;,F,F2D2T2T$(,0'+)-0404*/+0',#)BI001B -.B T#Y(	B
 d3i(B !B *B 0B SMB B %SMB tnB !B  (~B  (~B  $(!B" %)#B$ !%%B& 'B( )B, 
-BP %),;,F,F,0'+04Wd3#45}DEW SMW *	W
 %SMW tnW  (~W 
Wtc HSM ]bcf]g P JNL9L3;DI3FL	cLrO   r   )"r   r9   typingr   r   r   r   r   
tokenizersr   r	   tokenization_utils_baser   r   r   r   r   r   r   r   tokenization_utils_fastr   utilsr   r   tokenization_layoutlmv3r   r   r   
get_loggerr   loggerr   r   r6   rO   rN   <module>r      sl   
  5 5 1	 	 	 ? 0  
		H	%#/`pq dL5 dLrO   