
    sg?i                         d dl Z d dlZd dlmZmZmZmZ d dlZddl	m
Z
 ddlmZmZmZmZ ddlmZmZmZmZ  e       r
d dlZddlmZ  e       r
d dlZdd	lmZ  G d
 de      Z G d de      Z e ed      d       G d de             ZeZy)    N)ListOptionalTupleUnion   )BasicTokenizer)ExplicitEnumadd_end_docstringsis_tf_availableis_torch_available   )ArgumentHandlerChunkPipelineDatasetbuild_pipeline_init_args)/TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES),MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMESc                   ,    e Zd ZdZdeeee   f   fdZy)"TokenClassificationArgumentHandlerz5
    Handles arguments for token classification.
    inputsc                    |;t        |t        t        f      r%t        |      dkD  rt        |      }t        |      }nUt        |t              r|g}d}n?t
        t        |t
              st        |t        j                        r|d fS t        d      |j                  d      }|r?t        |t              rt        |d   t              r|g}t        |      |k7  rt        d      ||fS )Nr   r   zAt least one input is required.offset_mappingz;offset_mapping should have the same batch size as the input)

isinstancelisttuplelenstrr   typesGeneratorType
ValueErrorget)selfr   kwargs
batch_sizer   s        ^/var/www/html/venv/lib/python3.12/site-packages/transformers/pipelines/token_classification.py__call__z+TokenClassificationArgumentHandler.__call__    s    *VdE]"CFVW&\FVJ$XFJ Z%@JvW\WjWjDk4<>??$45.$/J~a?PRW4X"0!1>"j0 !^__~%%    N)__name__
__module____qualname____doc__r   r   r   r&    r'   r%   r   r      s     &uS$s)^4 &r'   r   c                   $    e Zd ZdZdZdZdZdZdZy)AggregationStrategyzDAll the valid aggregation strategies for TokenClassificationPipelinenonesimplefirstaveragemaxN)	r(   r)   r*   r+   NONESIMPLEFIRSTAVERAGEMAXr,   r'   r%   r.   r.   5   s    NDFEG
Cr'   r.   T)has_tokenizera
  
        ignore_labels (`List[str]`, defaults to `["O"]`):
            A list of labels to ignore.
        grouped_entities (`bool`, *optional*, defaults to `False`):
            DEPRECATED, use `aggregation_strategy` instead. Whether or not to group the tokens corresponding to the
            same entity together in the predictions or not.
        stride (`int`, *optional*):
            If stride is provided, the pipeline is applied on all the text. The text is split into chunks of size
            model_max_length. Works only with fast tokenizers and `aggregation_strategy` different from `NONE`. The
            value of this argument defines the number of overlapping tokens between chunks. In other words, the model
            will shift forward by `tokenizer.model_max_length - stride` tokens each step.
        aggregation_strategy (`str`, *optional*, defaults to `"none"`):
            The strategy to fuse (or not) tokens based on the model prediction.

                - "none" : Will simply not do any aggregation and simply return raw results from the model
                - "simple" : Will attempt to group entities following the default schema. (A, B-TAG), (B, I-TAG), (C,
                  I-TAG), (D, B-TAG2) (E, B-TAG2) will end up being [{"word": ABC, "entity": "TAG"}, {"word": "D",
                  "entity": "TAG2"}, {"word": "E", "entity": "TAG2"}] Notice that two consecutive B tags will end up as
                  different entities. On word based languages, we might end up splitting words undesirably : Imagine
                  Microsoft being tagged as [{"word": "Micro", "entity": "ENTERPRISE"}, {"word": "soft", "entity":
                  "NAME"}]. Look for FIRST, MAX, AVERAGE for ways to mitigate that and disambiguate words (on languages
                  that support that meaning, which is basically tokens separated by a space). These mitigations will
                  only work on real words, "New york" might still be tagged with two different entities.
                - "first" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
                  end up with different tags. Words will simply use the tag of the first token of the word when there
                  is ambiguity.
                - "average" : (works only on word based models) Will use the `SIMPLE` strategy except that words,
                  cannot end up with different tags. scores will be averaged first across tokens, and then the maximum
                  label is applied.
                - "max" : (works only on word based models) Will use the `SIMPLE` strategy except that words, cannot
                  end up with different tags. Word entity will simply be the token with the maximum score.c                       e Zd ZdZdZ e       f fd	Z	 	 	 	 	 	 d dee   dee   dee	   dee
eeef         d	ee   f
d
Zdeee
e   f   f fdZd!dZd Ze	j&                  dfdZd Zdedej.                  dej.                  dee
eeef         dej.                  de	de
e   fdZde
e   de	de
e   fdZde
e   de	defdZde
e   de	de
e   fdZde
e   defdZdedeeef   fdZde
e   de
e   fdZ xZ S )"TokenClassificationPipelineuv	  
    Named Entity Recognition pipeline using any `ModelForTokenClassification`. See the [named entity recognition
    examples](../task_summary#named-entity-recognition) for more information.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> token_classifier = pipeline(model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple")
    >>> sentence = "Je m'appelle jean-baptiste et je vis à montréal"
    >>> tokens = token_classifier(sentence)
    >>> tokens
    [{'entity_group': 'PER', 'score': 0.9931, 'word': 'jean-baptiste', 'start': 12, 'end': 26}, {'entity_group': 'LOC', 'score': 0.998, 'word': 'montréal', 'start': 38, 'end': 47}]

    >>> token = tokens[0]
    >>> # Start and end provide an easy way to highlight words in the original text.
    >>> sentence[token["start"] : token["end"]]
    ' jean-baptiste'

    >>> # Some models use the same idea to do part of speech.
    >>> syntaxer = pipeline(model="vblagoje/bert-english-uncased-finetuned-pos", aggregation_strategy="simple")
    >>> syntaxer("My name is Sarah and I live in London")
    [{'entity_group': 'PRON', 'score': 0.999, 'word': 'my', 'start': 0, 'end': 2}, {'entity_group': 'NOUN', 'score': 0.997, 'word': 'name', 'start': 3, 'end': 7}, {'entity_group': 'AUX', 'score': 0.994, 'word': 'is', 'start': 8, 'end': 10}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'sarah', 'start': 11, 'end': 16}, {'entity_group': 'CCONJ', 'score': 0.999, 'word': 'and', 'start': 17, 'end': 20}, {'entity_group': 'PRON', 'score': 0.999, 'word': 'i', 'start': 21, 'end': 22}, {'entity_group': 'VERB', 'score': 0.998, 'word': 'live', 'start': 23, 'end': 27}, {'entity_group': 'ADP', 'score': 0.999, 'word': 'in', 'start': 28, 'end': 30}, {'entity_group': 'PROPN', 'score': 0.999, 'word': 'london', 'start': 31, 'end': 37}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This token recognition pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous).

    The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the
    up-to-date list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=token-classification).
    	sequencesc                     t        |   |i | | j                  | j                  dk(  rt        nt
               t        d      | _        || _        y )NtfF)do_lower_case)	super__init__check_model_type	frameworkr   r   r   _basic_tokenizer_args_parser)r"   args_parserargsr#   	__class__s       r%   rA   z$TokenClassificationPipeline.__init__   sN    $)&)~~% <=	
 !/U C'r'   Ngrouped_entitiesignore_subwordsaggregation_strategyr   stridec                    i }|||d<   i }||p|r|rt         j                  }n%|r|st         j                  }nt         j                  }|t	        j
                  d| d       |t	        j
                  d| d       |~t        |t              rt         |j                            }|t         j                  t         j                  t         j                  hv r!| j                  j                  st        d      ||d<   |||d<   |s|| j                  j                  k\  rt        d      |t         j                  k(  rt        d	| d
      | j                  j                  rdd|d}	|	|d<   nt        d      |i |fS )Nr   zl`grouped_entities` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy="z"` instead.zk`ignore_subwords` is deprecated and will be removed in version v5.0.0, defaulted to `aggregation_strategy="z{Slow tokenizers cannot handle subwords. Please set the `aggregation_strategy` option to `"simple"` or use a fast tokenizer.rK   ignore_labelszl`stride` must be less than `tokenizer.model_max_length` (or even lower if the tokenizer adds special tokens)zI`stride` was provided to process all the text but `aggregation_strategy="z&"`, please select another one instead.T)return_overflowing_tokenspaddingrL   tokenizer_paramszm`stride` was provided to process all the text but you're using a slow tokenizer. Please use a fast tokenizer.)r.   r6   r5   r4   warningswarnr   r   upperr8   r7   	tokenizeris_fastr    model_max_length)
r"   rN   rI   rJ   rK   r   rL   preprocess_paramspostprocess_paramsrQ   s
             r%   _sanitize_parametersz0TokenClassificationPipeline._sanitize_parameters   s    %2@./'?+FO':'@'@$!/':'A'A$':'?'?$+//C.DKQ *//C.DKQ
  +.4':;O;U;U;W'X$$'--/B/F/FH[HcHcde.. >  :N56$2?/888  C  $':'?'?? ,--SU 
 >>))59#'"(($
 =M%&89$8  !"&888r'   r   c                 ^     | j                   |fi |\  }}|r||d<   t        |   |fi |S )a  
        Classify each token of the text(s) given as inputs.

        Args:
            inputs (`str` or `List[str]`):
                One or several texts (or one list of texts) for token classification.

        Return:
            A list or a list of list of `dict`: Each result comes as a list of dictionaries (one for each token in the
            corresponding input, or each entity if this pipeline was instantiated with an aggregation_strategy) with
            the following keys:

            - **word** (`str`) -- The token/word classified. This is obtained by decoding the selected tokens. If you
              want to have the exact string in the original sentence, use `start` and `end`.
            - **score** (`float`) -- The corresponding probability for `entity`.
            - **entity** (`str`) -- The entity predicted for that token/word (it is named *entity_group* when
              *aggregation_strategy* is not `"none"`.
            - **index** (`int`, only present when `aggregation_strategy="none"`) -- The index of the corresponding
              token in the sentence.
            - **start** (`int`, *optional*) -- The index of the start of the corresponding entity in the sentence. Only
              exists if the offsets are available within the tokenizer
            - **end** (`int`, *optional*) -- The index of the end of the corresponding entity in the sentence. Only
              exists if the offsets are available within the tokenizer
        r   )rE   r@   r&   )r"   r   r#   _inputsr   rH   s        r%   r&   z$TokenClassificationPipeline.__call__   sD    4 #4$"3"3F"Ef"E'5F#$w1&11r'   c           
   +     K   |j                  di       }| j                  j                  r| j                  j                  dkD  rdnd} | j                  |f| j                  |d| j                  j                  d|}|j                  dd        t        |d         }t        |      D ]  }| j                  dk(  r;|j                         D 	
ci c]  \  }	}
|	t        j                  |
|   d      ! }}	}
n5|j                         D 	
ci c]  \  }	}
|	|
|   j                  d       }}	}
|||d	<   |dk(  r|nd |d
<   ||dz
  k(  |d<   |  y c c}
}	w c c}
}	w w)NrQ   r   TF)return_tensors
truncationreturn_special_tokens_maskreturn_offsets_mappingoverflow_to_sample_mapping	input_idsr>   r   sentencer   is_last)poprU   rW   rC   rV   r   rangeitemsr>   expand_dims	unsqueeze)r"   rd   r   rX   rQ   r_   r   
num_chunksikvmodel_inputss               r%   
preprocessz&TokenClassificationPipeline.preprocess   s^    ,001CRH!^^<<A`A`cdAdTjo

>>!'+#'>>#9#9
 
 	

/6,-
z" 
	A~~%GM||~Vtq!2>>!A$#: :VVAGPA1Q4>>!#4 4PP)1?-.346xtL$&':>&9L#
	VPs   CE$E2E	E(4Ec                 B   |j                  d      }|j                  dd       }|j                  d      }|j                  d      }| j                  dk(  r | j                  d	i |d   }n, | j                  d	i |}t        |t              r|d   n|d   }|||||d|S )
Nspecial_tokens_maskr   rd   re   r>   r   logits)rs   rr   r   rd   re   r,   )rf   rC   modelr   dict)r"   ro   rr   r   rd   re   rs   outputs           r%   _forwardz$TokenClassificationPipeline._forward  s    *../DE%))*:DA##J/""9->>T!TZZ/,/2FTZZ/,/F)3FD)AVH%vayF #6, 
 
 	
r'   c           	         |dg}g }|D ]  }| j                   dk(  rf|d   d   j                  t        j                  t        j                  fv r4|d   d   j                  t        j                        j                         }n|d   d   j                         }|d   d   }|d   d   }|d   |d   d   nd }	|d   d   j                         }
t        j                  |d	d
      }t        j                  ||z
        }||j                  d	d
      z  }| j                   dk(  r$|j                         }|	|	j                         nd }	| j                  ||||	|
|      }| j                  ||      }|D cg c],  }|j                  dd       |vr|j                  dd       |vr|. }}|j                  |        t!        |      }|dkD  r| j#                  |      }|S c c}w )NOptrs   r   rd   rc   r   rr   T)axiskeepdimsr>   entityentity_groupr   )rC   dtypetorchbfloat16float16tofloat32numpynpr3   expsumgather_pre_entities	aggregater!   extendr   aggregate_overlapping_entities)r"   all_outputsrK   rN   all_entitiesmodel_outputsrs   rd   rc   r   rr   maxesshifted_expscorespre_entitiesrI   r~   entitiesrk   s                      r%   postprocessz'TokenClassificationPipeline.postprocess+  s     EM(  	*M~~%-*A!*D*J*Ju~~_d_l_lNm*m&x0366u}}EKKM&x0399;"1~j1H%k215I6CDT6U6a./2gk  #00E"Fq"I"O"O"QFF6T:E&&%0K ;??T?#JJF~~%%OO-	;I;U!5!5!7[_33)V^=PRfL  $~~l<PQ /::h-]BJJ~t4MI H  )A 	*B %
>>>|LLs   31Gc                 :   t        |      dk(  r|S t        |d       }g }|d   }|D ]_  }|d   |d   cxk  r|d   k  r6n n3|d   |d   z
  }|d   |d   z
  }||kD  r|}8||k(  s>|d   |d   kD  sJ|}M|j                  |       |}a |j                  |       |S )Nr   c                     | d   S )Nstartr,   )xs    r%   <lambda>zLTokenClassificationPipeline.aggregate_overlapping_entities.<locals>.<lambda>X  s
    !G* r'   keyr   endscore)r   sortedappend)r"   r   aggregated_entitiesprevious_entityr~   current_lengthprevious_lengths          r%   r   z:TokenClassificationPipeline.aggregate_overlapping_entitiesU  s    x=AO((<= "1+ 
	)Fw'6'?S_U=SS!'!@"1%"8?7;S"S!O3&,O#66'?_]dMe;e&,O#**?;"(
	) 	""?3""r'   rd   rc   r   rr   returnc                 2   g }t        |      D ]  \  }}	||   r| j                  j                  t        ||               }
|/||   \  }}t	        |t              s/| j
                  dk(  r |j                         }|j                         }||| }t        | j                  dd      rCt        | j                  j                  j                  dd      rt        |
      t        |      k7  }n_|t        j                  t        j                  t        j                  hv rt        j                   dt"               |dkD  xr d||dz
  |dz    v}t        ||         | j                  j$                  k(  r|}
d	}nd}d}d	}|
|	||||d
}|j'                  |        |S )zTFuse various numpy arrays into dicts with all the information needed for aggregationNrz   
_tokenizercontinuing_subword_prefixz?Tokenizer does not support real words, using fallback heuristicr    r   F)wordr   r   r   index
is_subword)	enumeraterU   convert_ids_to_tokensintr   rC   itemgetattrr   rt   r   r.   r6   r7   r8   rR   rS   UserWarningunk_token_idr   )r"   rd   rc   r   r   rr   rK   r   idxtoken_scoresr   	start_indend_indword_refr   
pre_entitys                   r%   r   z/TokenClassificationPipeline.gather_pre_entitiesi  s    !*6!2 0	,C"3'>>77IcN8KLD)%3C%8"	7!)S1~~-$-NN$4	"),,.#Ig64>><>7NN--335PRVD
 "%Tc(m!;J ,+11+33+//0 
 !]' "+Q!e3hyST}W`cdWd>e3eJy~&$..*E*EE#D!&J 	"
 &"(J 
+a0	,b r'   r   c                    |t         j                  t         j                  hv rlg }|D ]d  }|d   j                         }|d   |   }| j                  j
                  j                  |   ||d   |d   |d   |d   d}|j                  |       f n| j                  ||      }|t         j                  k(  r|S | j                  |      S )Nr   r   r   r   r   )r~   r   r   r   r   r   )
r.   r4   r5   argmaxrt   configid2labelr   aggregate_wordsgroup_entities)r"   r   rK   r   r   
entity_idxr   r~   s           r%   r   z%TokenClassificationPipeline.aggregate  s    $7$<$<>Q>X>X#YYH* (
'188:
"8,Z8"jj//88D"'0&v.'0%e, '( ++L:NOH#6#;#;;O""8,,r'   r   c                 (   | j                   j                  |D cg c]  }|d   	 c}      }|t        j                  k(  rA|d   d   }|j	                         }||   }| j
                  j                  j                  |   }n|t        j                  k(  rLt        |d       }|d   }|j	                         }||   }| j
                  j                  j                  |   }n|t        j                  k(  rvt        j                  |D cg c]  }|d   	 c}      }t        j                  |d      }	|	j	                         }
| j
                  j                  j                  |
   }|	|
   }nt        d      ||||d   d   |d	   d
   d}|S c c}w c c}w )Nr   r   r   c                 (    | d   j                         S )Nr   )r3   )r~   s    r%   r   z<TokenClassificationPipeline.aggregate_word.<locals>.<lambda>  s    &:J:N:N:P r'   r   )r|   zInvalid aggregation_strategyr   r{   r   )r~   r   r   r   r   )rU   convert_tokens_to_stringr.   r6   r   rt   r   r   r8   r3   r7   r   stacknanmeanr    )r"   r   rK   r~   r   r   r   r   
max_entityaverage_scoresr   
new_entitys               r%   aggregate_wordz*TokenClassificationPipeline.aggregate_word  s|   ~~66U]7^6v7^_#6#<#<<a[*F--/C3KEZZ&&//4F!%8%<%<<X+PQJ)F--/C3KEZZ&&//4F!%8%@%@@XXhGFvh/GHFZZQ7N'..0JZZ&&//
;F":.E;<<a[)B<&

 7 8_ Hs   F
Fc                 >   |t         j                  t         j                  hv rt        d      g }d}|D ]C  }||g}	|d   r|j	                  |        |j	                  | j                  ||             |g}E |!|j	                  | j                  ||             |S )z
        Override tokens from a given word that disagree to force agreement on word boundaries.

        Example: micro|soft| com|pany| B-ENT I-NAME I-ENT I-ENT will be rewritten with first strategy as microsoft|
        company| B-ENT I-ENT
        z;NONE and SIMPLE strategies are invalid for word aggregationNr   )r.   r4   r5   r    r   r   )r"   r   rK   word_entities
word_groupr~   s         r%   r   z+TokenClassificationPipeline.aggregate_words  s      $$&&$
 
 Z[[
 	&F!$X
%!!&)$$T%8%8EY%Z[$X
	& !  !4!4ZAU!VWr'   c                 @   |d   d   j                  dd      d   }t        j                  |D cg c]  }|d   	 c}      }|D cg c]  }|d   	 }}t        j                  |      | j                  j                  |      |d   d   |d   d	   d
}|S c c}w c c}w )z
        Group together the adjacent tokens with the same entity predicted.

        Args:
            entities (`dict`): The entities predicted by the pipeline.
        r   r~   -r   r{   r   r   r   r   )r   r   r   r   r   )splitr   r   meanrU   r   )r"   r   r~   r   tokensr   s         r%   group_sub_entitiesz.TokenClassificationPipeline.group_sub_entities  s     !X&,,S!4R88DVG_DE/78V&.88 #WWV_NN;;FCa[)B<&
  E8s   BBentity_namec                     |j                  d      rd}|dd  }||fS |j                  d      rd}|dd  }||fS d}|}||fS )NzB-Br   zI-I)
startswith)r"   r   bitags       r%   get_tagz#TokenClassificationPipeline.get_tag  sk    !!$'Bab/C 3w ##D)Bab/C 3w BC3wr'   c                 h   g }g }|D ]  }|s|j                  |       | j                  |d         \  }}| j                  |d   d         \  }}||k(  r|dk7  r|j                  |       d|j                  | j                  |             |g} |r |j                  | j                  |             |S )z
        Find and group together the adjacent tokens with the same entity predicted.

        Args:
            entities (`dict`): The entities predicted by the pipeline.
        r~   r{   r   )r   r   r   )	r"   r   entity_groupsentity_group_disaggr~   r   r   last_bilast_tags	            r%   r   z*TokenClassificationPipeline.group_entities  s       	/F&#**62 ll6(#34GB $-@-DX-N OGXh29#**62 $$T%<%<=P%QR'-h#'	/(   !8!89L!MNr'   )NNNNNN)N)!r(   r)   r*   r+   default_input_namesr   rA   r   boolr.   r   r   r   rZ   r   r   r&   rp   rw   r4   r   r   r   ndarrayru   r   r   r   r   r   r   r   __classcell__)rH   s   @r%   r;   r;   ?   s   F"H &#E#G 	( +/*.>B:> $G9 #4.G9 "$	G9
 '':;G9 !eCHo!67G9 G9R2uS$s)^4 2@4
* =P<T<Tdh (T#(<< ::< 

	<
 !eCHo!67<  ZZ< 2< 
d<|-d4j -H[ -`dei`j -,tDz I\ ae <T
 J] bfgkbl 84: $ *3 5c? #tDz #d4j #r'   r;   ) r   rR   typingr   r   r   r   r   r   models.bert.tokenization_bertr   utilsr	   r
   r   r   baser   r   r   r   
tensorflowr>   models.auto.modeling_tf_autor   r   models.auto.modeling_autor   r   r.   r;   NerPipeliner,   r'   r%   <module>r      s      / /  :  T S ^X& &4,  40n!D\- \E!D\~ *r'   