
    sgԔ                         d Z ddlZddlmZmZmZmZmZ ddlm	Z	 ddl
mZmZmZmZmZmZmZmZ ddlmZ ddlmZmZ d	d
lmZmZmZ  ej8                  e      ZdddZ G d de      Z y)z
Fast tokenization class for LayoutLMv2. It overwrites 2 methods of the slow tokenizer class, namely _batch_encode_plus
and _encode_plus, in which the Rust tokenizer is used.
    N)DictListOptionalTupleUnion)normalizers   )BatchEncodingEncodedInputPaddingStrategyPreTokenizedInput
TensorType	TextInputTextInputPairTruncationStrategy)PreTrainedTokenizerFast)add_end_docstringslogging   )"LAYOUTLMV2_ENCODE_KWARGS_DOCSTRING2LAYOUTLMV2_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRINGLayoutLMv2Tokenizerz	vocab.txtztokenizer.json)
vocab_filetokenizer_filec            *           e Zd ZdZeZeZddddddddg d	g d
g d	ddddf fd	Z e	e
e      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d7deeeee   ee   f   deeeee   f      deeee      eeee         f   deeee   eee      f      dedeeeef   deeeef   dee   dedee   dee   deeeef      dee   dee   dededed ed!ed"ef(d#       Z e	e
e      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d7d$eee   ee   ee   f   d%edeeeee            deeee   eee      f      dedeeeef   deeeef   dee   dedee   dee   deeeef      dee   dee   dededed ed!ed"ef(d&       Zd8ded'ee   ded"ee   fd(Z e	e
e      	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 d7deeef   dee   deeee         deee      dedeeeef   deeeef   dee   dedee   dee   deeeef      dee   dee   dededed ed!ed"ef(d)       Zddddej:                  ej<                  ddddddddddddfd$eee   ee   ee   f   d%edeeeee            deeee         ded*ed+edee   dedee   dee   dee   dee   dee   dededed ed!ed"ef(d,Zddddej:                  ej<                  ddddddddddddfdeeef   dee   deeee         deee      ded*ed+edee   dedee   dee   dee   dee   dee   dededed ed!ed"ef(d-Z dej:                  dddfd.ee!ee"f   ef   dee   d*edee   dee   dee   d"e#fd/Z$d9d0Z%	 d9d1ee   d2eee      d"ee   fd3Z&d9d4ed5ee   d"e'e   fd6Z( xZ)S ):LayoutLMv2TokenizerFasta&  
    Construct a "fast" LayoutLMv2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.

    This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
    refer to this superclass for more information regarding those methods.

    Args:
        vocab_file (`str`):
            File containing the vocabulary.
        do_lower_case (`bool`, *optional*, defaults to `True`):
            Whether or not to lowercase the input when tokenizing.
        unk_token (`str`, *optional*, defaults to `"[UNK]"`):
            The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
            token instead.
        sep_token (`str`, *optional*, defaults to `"[SEP]"`):
            The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
            sequence classification or for a text and a question for question answering. It is also used as the last
            token of a sequence built with special tokens.
        pad_token (`str`, *optional*, defaults to `"[PAD]"`):
            The token used for padding, for example when batching sequences of different lengths.
        cls_token (`str`, *optional*, defaults to `"[CLS]"`):
            The classifier token which is used when doing sequence classification (classification of the whole sequence
            instead of per-token classification). It is the first token of the sequence when built with special tokens.
        mask_token (`str`, *optional*, defaults to `"[MASK]"`):
            The token used for masking values. This is the token used when training this model with masked language
            modeling. This is the token which the model will try to predict.
        cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
            The bounding box to use for the special [CLS] token.
        sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
            The bounding box to use for the special [SEP] token.
        pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
            The bounding box to use for the special [PAD] token.
        pad_token_label (`int`, *optional*, defaults to -100):
            The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
            CrossEntropyLoss.
        only_label_first_subword (`bool`, *optional*, defaults to `True`):
            Whether or not to only label the first subword, in case word labels are provided.
        tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
            Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
            issue](https://github.com/huggingface/transformers/issues/328)).
        strip_accents (`bool`, *optional*):
            Whether or not to strip all accents. If this option is not specified, then it will be determined by the
            value for `lowercase` (as in the original LayoutLMv2).
    NTz[UNK]z[SEP]z[PAD]z[CLS]z[MASK])r   r   r   r   )  r   r   r   ic                    t        |   |f||||||||	|
|||||d| t        j                  | j                  j
                  j                               }|j                  d|      |k7  s|j                  d|      |k7  r@t        t        |j                  d            }||d<   ||d<    |di || j                  _        || _        |	| _        |
| _        || _        || _        || _        y )N)r   do_lower_case	unk_token	sep_token	pad_token	cls_token
mask_tokencls_token_boxsep_token_boxpad_token_boxpad_token_labelonly_label_first_subwordtokenize_chinese_charsstrip_accents	lowercaser+   type )super__init__jsonloadsbackend_tokenizer
normalizer__getstate__getgetattrr   popr   r%   r&   r'   r(   r)   )selfr   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   kwargspre_tok_statepre_tok_class	__class__s                      n/var/www/html/venv/lib/python3.12/site-packages/transformers/models/layoutlmv2/tokenization_layoutlmv2_fast.pyr0   z LayoutLMv2TokenizerFast.__init__b   s   & 		
)'!'''+%=#9'	
  !	
& 

4#9#9#D#D#Q#Q#STk=9]J  -@MQ#K1B1B61JKM)6M+&-:M/*0=0N0ND""-* +**.(@%    Fr   text	text_pairboxesword_labelsadd_special_tokenspadding
truncation
max_lengthstridepad_to_multiple_ofpadding_sidereturn_tensorsreturn_token_type_idsreturn_attention_maskreturn_overflowing_tokensreturn_special_tokens_maskreturn_offsets_mappingreturn_lengthverbosereturnc                 h   d }|4 ||      st        d      t        |t        t        f      s,t        d      t        |t        t        f      st        d      |t        |t        t        f      }n5t        |t        t        f      xr |xr t        |d   t        t        f      }||n|}|t        d      |rYt	        |      t	        |      k7  rt        d      t        ||      D ]'  \  }}t	        |      t	        |      k7  st        d       n"t	        |      t	        |      k7  rt        d      |r|;t	        |      t	        |      k7  r$t        d	t	        |       d
t	        |       d      |t        t        ||            n|}t        |du      } | j                  d!i d|d|d|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|d||S  | j                  d!i d|d |d|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|d||S )"a3  
        Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
        sequences with word-level normalized bounding boxes and optional labels.

        Args:
            text (`str`, `List[str]`, `List[List[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
                (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
                words).
            text_pair (`List[str]`, `List[List[str]]`):
                The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
                (pretokenized string).
            boxes (`List[List[int]]`, `List[List[List[int]]]`):
                Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
            word_labels (`List[int]`, `List[List[int]]`, *optional*):
                Word-level integer labels (for token classification tasks such as FUNSD, CORD).
        c                    t        | t              ryt        | t        t        f      rft	        |       dk(  ryt        | d   t              ryt        | d   t        t        f      r)t	        | d         dk(  xs t        | d   d   t              S yy)NTr   F)
isinstancestrlisttuplelen)ts    r>   _is_valid_text_inputz>LayoutLMv2TokenizerFast.__call__.<locals>._is_valid_text_input   sz    !S!Ae}-q6Q;!c*!tUm4qt9>EZ!Q-EE r?   NzStext input must of type `str` (single example) or `List[str]` (batch of examples). zwWords must be of type `List[str]` (single pretokenized example), or `List[List[str]]` (batch of pretokenized examples).r   z-You must provide corresponding bounding boxesz@You must provide words and boxes for an equal amount of examplesz:You must provide as many words as there are bounding boxeszbatch length of `text`: z- does not match batch length of `text_pair`: .batch_text_or_text_pairsis_pairrB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r@   rA   r.   )	
ValueErrorrV   rX   rY   rZ   zipboolbatch_encode_plusencode_plus)r9   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r:   r\   
is_batchedwordswords_exampleboxes_exampler^   r_   s                               r>   __call__z LayoutLMv2TokenizerFast.__call__   sa   V	(  '- !vwwi$7 M  dT5M2 M 
  #D4-8J#D4-8hThjQUVWQX[_afZgFhJ!)y=LMM5zSZ' !cdd03E50A c,}}%]);;$%abbc 5zSZ' !]^^$Tc)n)D .s4yk :I'q*  FOEZtCi,@'A`d$9D01G)4)) )A  (	
 $6   & &  $6 *  . '< '< +D  ,F!" (>#$ ,%&  ) . $4## #  (	
 $6   & &  $6 *  . '< '< +D  ,F!" (>#$ ,%&  ) r?   r^   r_   c           	           | j                   d||||
|d|\  }}}} | j                  di d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|d||S )NrE   rF   rG   rI   rR   r^   r_   rB   rC   rD   padding_strategytruncation_strategyrG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r.   )"_get_padding_truncation_strategies_batch_encode_plus)r9   r^   r_   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r:   rl   rm   s                          r>   rc   z)LayoutLMv2TokenizerFast.batch_encode_plus3  s   : ElDDkDk E
!!1E
 E
A-z6 't&& 
%=

 
 $	

  2
 .
 !4
 "
 
  2
 &
 *
 #8
 #8
 '@
  (B!
" $:#
$ (%
& )
 	
r?   pairc                 t    |r||fgn|g} | j                   j                  |f|dd|}|d   j                  S )NFrD   is_pretokenizedr   )
_tokenizerencode_batchtokens)r9   r@   rp   rD   r:   batched_input	encodingss          r>   tokenizez LayoutLMv2TokenizerFast.tokenizep  sQ    *.$TF0DOO00
.@RW
[a
	 |"""r?   c           	           | j                   d||||
|d|\  }}}} | j                  di d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|d||S )aj  
        Tokenize and prepare for the model a sequence or a pair of sequences. .. warning:: This method is deprecated,
        `__call__` should be used instead.

        Args:
            text (`str`, `List[str]`, `List[List[str]]`):
                The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
            text_pair (`List[str]` or `List[int]`, *optional*):
                Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
                list of list of strings (words of a batch of examples).
        rk   r@   rB   rA   rC   rD   rl   rm   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r.   )rn   _encode_plus)r9   r@   rA   rB   rC   rD   rE   rF   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r:   rl   rm   s                          r>   rd   z#LayoutLMv2TokenizerFast.encode_plusx  s   J ElDDkDk E
!!1E
 E
A-z6 !t   


  
 $	

  2
 .
 !4
 "
 
  2
 &
 *
 #8
 #8
 '@
  (B!
" $:#
$ (%
& )
 	
r?   rl   rm   c                 |   t        |t              st        dt        |       d      | j	                  ||||	|
|       |r$|D cg c]  \  }}|j                         |f }}}| j                  j                  ||d      }|D cg c]  }| j                  ||||||dn|||      ! }}i }|d   d   j                         D ]'  }|D cg c]  \  }}||   D ]  }|  }}}}|||<   ) |D cg c]  \  }}|D ]  }|  }}}}|r2g } t        |      D ]  \  }!\  }"}| |!gt        |"d         z  z  }  | |d	<   |d   D ]  }#| j                  |#||        g }$t        t        |d               D ]!  }%|r	|d	   |%   }&n|%}&g }'t        |d   |%   ||%   j                  ||%   j                         D ]  \  }(})}*|*;|r!|)dk(  r|'j#                  | j$                         ,|'j#                  ||&   |*          D|(| j&                  k(  r|'j#                  | j(                         o|(| j*                  k(  r|'j#                  | j,                         |(| j.                  k(  r|'j#                  | j$                         t1        d
       |$j#                  |'       $ |$|d<   |g }+t        t        |d               D ]  }%|r	|d	   |%   }&n|%}&g },t        |d   |%   |d   |%   ||%   j                         D ]  \  }(}-}*|*`| j2                  r<|-d   dk(  r|,j#                  ||&   |*          5|,j#                  | j4                         Q|,j#                  ||&   |*          i|,j#                  | j4                          |+j#                  |,        |+|d<   |s|d= t7        |||      S c c}}w c c}w c c}}}w c c}}}w )Nz/batch_text_or_text_pairs has to be a list (got ))rl   rm   rG   rH   rI   rJ   Trr   )encodingrL   rM   rN   rO   rP   rQ   rR   r   	input_idsoverflow_to_sample_mappingzId not recognizedbboxoffset_mappinglabels)tensor_type)rV   rX   	TypeErrorr-   set_truncation_and_paddingsplitrt   ru   _convert_encodingkeys	enumeraterZ   &_eventual_warn_about_too_long_sequencerangera   sequence_idsword_idsappendr'   cls_token_idr%   sep_token_idr&   pad_token_idr`   r)   r(   r
   ).r9   r^   r_   rB   rC   rD   rl   rm   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r@   rA   rx   r~   tokens_and_encodingssanitized_tokenskeyitem_estacksanitized_encodingsr   itoksr   token_boxesbatch_indexoriginal_indextoken_boxes_exampleidsequence_idword_idr   labels_exampleoffsets.                                                 r>   ro   z*LayoutLMv2TokenizerFast._batch_encode_plus  s   4 2D9MdSkNlMmmnopp 	''- 3!1% 	( 	
 Yq'rodIy(A'r$'rOO00$1  1 
	2 & 
  ""!&;&;*C+E* (,++ #  
  
, '*1-224 	*C&:NN74DINqQNQNEN$)S!	* 1ESSWQdSqSqSS %)+& )*> ? K9D!*qcC[8I4J.JJ*K=W9:)+6 	XI77	:wW	X  %5k%B!CD 	4K(!12N!OP[!\!,"$,/ -k:#K0==#K099- >(K
 &;!#3+2243E3EF+2253H3QRT...+2243E3EFt000+2243E3EFt000+2243E3EF()<==%>& 233	46 $/  "F$S)9+)F%GH .,%56R%ST_%`N%0N!#+.$[1+>$%56{C'4==, D'B
 *88%ayA~ . 5 5k.6QRY6Z [ . 5 5d6J6J K*11+n2Mg2VW&--d.B.BCD  n--.0 *0X&)$%56-/BP^__i (s 
0 OSs   N%$N+N0
6N7c                    |r||fgn|g}|g}||gnd } | j                   |fi dt        |d u      d|d|d|d|d|d|d|	d	|
d
|d|d|d|d|d|d|d|d||}|`|s^t        |j                         D ci c].  \  }}|t	        |      dkD  rt        |d   t              r|d   n|0 c}}|j                        }| j                  |d   ||       |S c c}}w )Nr_   rB   rC   rD   rl   rm   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r   r   )	ro   rb   r
   itemsrZ   rV   rX   rx   r   )r9   r@   rA   rB   rC   rD   rl   rm   rG   rH   rI   rJ   rK   rL   rM   rN   rO   rP   rQ   rR   r:   rw   batched_boxesbatched_word_labelsbatched_outputr   values                              r>   r{   z$LayoutLMv2TokenizerFast._encode_plus[  s   6 09$	*+tf/:/F{mD000
$./
  
 ,	

  2
 .
 !4
 "
 
  2
 &
 *
 #8
 #8
 '@
  (B!
" $:#
$ (%
& )
2 !*C* '5&:&:&<"U SZ!^
58T8RqX]] ((N 	33N;4OQ[]des   3C#
encoded_inputsc                 .   |d| j                   v }|| j                   d      }|t        j                  k(  rt        |      }||||z  dk7  r||z  dz   |z  }|t        j                  k7  xr t        |      |k7  }|rd|vrdgt        |      z  |d<   |r|t        |      z
  }	||n| j
                  }|dk(  r|r|d   dg|	z  z   |d<   d|v r|d   | j                  g|	z  z   |d<   d|v r|d   | j                  g|	z  z   |d<   d|v r|d   | j                  g|	z  z   |d<   d|v r|d   dg|	z  z   |d<   || j                  g|	z  z   || j                   d   <   |S |d	k(  r|rdg|	z  |d   z   |d<   d|v r| j                  g|	z  |d   z   |d<   d|v r| j                  g|	z  |d   z   |d<   d|v r| j                  g|	z  |d   z   |d<   d|v rdg|	z  |d   z   |d<   | j                  g|	z  |z   || j                   d   <   |S t        d
t        |      z         |S )a#  
        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)

        Args:
            encoded_inputs:
                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
            max_length: maximum length of the returned list and optionally padding length (see below).
                Will truncate by taking into account the special tokens.
            padding_strategy: PaddingStrategy to use for padding.

                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
                - PaddingStrategy.DO_NOT_PAD: Do not pad
                The tokenizer padding sides are defined in self.padding_side:

                    - 'left': pads on the left of the sequences
                    - 'right': pads on the right of the sequences
            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
                `>= 7.5` (Volta).
            padding_side:
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            return_attention_mask:
                (optional) Set to False to avoid returning attention mask (default: set to model specifics)
        attention_maskr   r   righttoken_type_idsr   r   special_tokens_maskleftzInvalid padding strategy:)model_input_namesr   LONGESTrZ   
DO_NOT_PADrJ   pad_token_type_idr'   r(   r   r`   rW   )
r9   r   rG   rl   rI   rJ   rM   required_inputneeds_to_be_padded
differences
             r>   _padzLayoutLMv2TokenizerFast._pad  s   H !($48N8N$N!'(>(>q(AB666^,J!&8&D*WiJimnJn%);;q@DVVJ-1K1KKqPSTbPcgqPq !%5^%K01sS5H/HN+,#c.&99J+7+C<IZIZLw&(7EFV7W[\Z]`jZj7jN#34#~5&'78D<R<R;SV`;`` ##34 ^+-;F-CtGYGYFZ]gFg-gN6*~-/=h/G4K_K_J`cmJm/mN8,(N:<JK`<aefdgjtdt<tN#89<JdN_N_M`cmMm<mt55a89$ # '(89sZ7G.YiJj7jN#34#~58<8N8N7OR\7\_m(` 8N#34 ^+.2.@.@-AJ-NQ_`fQg-gN6*~-040D0D/E
/RUcdlUm/mN8,(N:=>C*<L~^sOt<tN#89=A=N=N<OR\<\_m<mt55a89  !!<s<?P!PQQr?   c                 l    | j                   g|z   | j                  gz   }|r||| j                  gz   z  }|S )a  
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens. A BERT sequence has the following format:

        - single sequence: `[CLS] X [SEP]`
        - pair of sequences: `[CLS] A [SEP] B [SEP]`

        Args:
            token_ids_0 (`List[int]`):
                List of IDs to which the special tokens will be added.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
        )r   r   )r9   token_ids_0token_ids_1outputs       r>    build_inputs_with_special_tokensz8LayoutLMv2TokenizerFast.build_inputs_with_special_tokens  sE    " ##${2d6G6G5HHkT%6%6$777Fr?   r   r   c                     | j                   g}| j                  g}|t        ||z   |z         dgz  S t        ||z   |z         dgz  t        ||z         dgz  z   S )a  
        Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
        pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second
        sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).

        Args:
            token_ids_0 (`List[int]`):
                List of IDs.
            token_ids_1 (`List[int]`, *optional*):
                Optional second list of IDs for sequence pairs.

        Returns:
            `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
        r   r   )r   r   rZ   )r9   r   r   sepclss        r>   $create_token_type_ids_from_sequencesz<LayoutLMv2TokenizerFast.create_token_type_ids_from_sequences  st    "   !  !s[(3./1#553$s*+qc1Cc8I4JaS4PPPr?   save_directoryfilename_prefixc                 f    | j                   j                  j                  ||      }t        |      S )N)name)rt   modelsaverY   )r9   r   r   filess       r>   save_vocabularyz'LayoutLMv2TokenizerFast.save_vocabulary'  s+    %%**>*PU|r?   )NNNTFNNr   NNNNNFFFFT)NF)N)*__name__
__module____qualname____doc__VOCAB_FILES_NAMESvocab_files_namesr   slow_tokenizer_classr0   r   r   r   r   r   r   r   r   intrb   rW   r   r   r   r
   ri   r   rc   ry   rd   r   DO_NOT_TRUNCATEro   r{   r   r   dictr   r   r   r   r   __classcell__)r=   s   @r>   r   r   1   sv	   +Z *. "."!%#!7Ar :<no RV?CCG#'5:;?$(,0'+;?0404*/+0',#)UI0$y/4HYCZZ[U E"3T:K5L"LMNU T$s)_d4S	?&;;<	U
 eDItDI$>?@U !U tS/12U $%778U SMU U %SMU tnU !sJ!78U  (~U  (~U  $(!U" %)#U$ !%%U& 'U( )U, 
-U pUn :<no 15CG#'5:;?$(,0'+;?0404*/+0',#1:
"'O"#%#
:
 :
 T$s)_-.:
 eDItDI$>?@:
 !:
 tS/12:
 $%778:
 SM:
 :
 %SM:
  tn!:
" !sJ!78#:
$  (~%:
&  (~':
( $():
* %)+:
, !%-:
. /:
0 1:
4 
5:
 p:
x#S # #RV #mqrumv # :<no 26+/+/#'5:;?$(,0'+;?0404*/+0',#)B
I001B
 -.B
 T#Y(	B

 d3i(B
 !B
 tS/12B
 $%778B
 SMB
 B
 %SMB
 tnB
 !sJ!78B
  (~B
  (~B
  $(!B
" %)#B
$ !%%B
& 'B
( )B
, 
-B
 pB
V 1515#',;,F,F2D2T2T$(,0'+(,0404*/+0',#1\`"'O"#%#
\` \` T$s)_-.\` d49o.\` !\` *\` 0\` SM\` \` %SM\`  tn!\`" !#\`$  (~%\`&  (~'\`( $()\`* %)+\`, !%-\`. /\`0 1\`2 
3\`B 26+/+/#',;,F,F2D2T2T$(,0'+)-0404*/+0',#)BI001B -.B T#Y(	B
 d3i(B !B *B 0B SMB B %SMB tnB !B  (~B  (~B  $(!B" %)#B$ !%%B& 'B( )B, 
-BN %),;,F,F,0'+04Wd3#45}DEW SMW *	W
 %SMW tnW  (~W 
Wr2 JNQ9Q3;DI3FQ	cQ.c HSM ]bcf]g r?   r   )!r   r1   typingr   r   r   r   r   
tokenizersr   tokenization_utils_baser
   r   r   r   r   r   r   r   tokenization_utils_fastr   utilsr   r   tokenization_layoutlmv2r   r   r   
get_loggerr   loggerr   r   r.   r?   r>   <module>r      sh   
  5 5 "	 	 	 ? 0  
		H	%#.BRS x5 xr?   