o
    h50                    @   s@  d Z ddlZddlZddlZddlZddlZddlmZ ddlm	Z	m
Z
 ddlmZ ddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZmZmZ ddlZdd	lmZ d
dl m!Z! d
dl"m#Z# d
dl$m%Z%m&Z&m'Z'm(Z(m)Z)m*Z*m+Z+m,Z,m-Z-m.Z.m/Z/m0Z0m1Z1m2Z2m3Z3m4Z4m5Z5m6Z6m7Z7m8Z8m9Z9m:Z:m;Z;m<Z<m=Z=m>Z>m?Z?m@Z@ d
dlAmBZBmCZC d
dlDmEZE ere; rddlFZFe8 rddlGZHe1 rddlImZJ d=ddZKe: rddlLmMZM ddlLmNZO nedddG dd dZMeG dd dZOe>PeQZReSdZTeSdZUeVZWeeV ZXeeS ZYeeVeVf ZZeeeV eeV f Z[eeeS eeS f Z\edded ed f Z]d Z^d!Z_d"Z`d#Zad$Zbecd%ZdG d&d' d'e%ZeG d(d) d)eZfG d*d+ d+eZgG d,d- d-eZhG d.d/ d/Zid0Zjd1Zkd2Zle)elG d3d4 d4eie'Zmd5eeV d6eVfd7d8Zne-emjoem_oemjoj duremjoj jpd9d:d;d<emjo_ dS dS )>a"  
Base classes common to both the slow and the fast tokenization classes: PreTrainedTokenizerBase (host all the user
fronting encoding methods) Special token mixing (host the special tokens logic) and BatchEncoding (wrap the dictionary
of output with special method for the Fast tokenizers)
    N)UserDict)MappingSized)contextmanager)	dataclass)
isfunction)
TYPE_CHECKINGAnyCallableDictList
NamedTupleOptionalSequenceTupleUnion)version   )__version__)custom_object_save)ExplicitEnumPaddingStrategyPushToHubMixin
TensorTypeadd_end_docstringsadd_model_info_to_auto_map"add_model_info_to_custom_pipelinescached_file	copy_funcdownload_urlextract_commit_hashget_json_schemais_flax_availableis_jax_tensoris_mlx_availableis_numpy_arrayis_offline_modeis_protobuf_availableis_remote_urlis_tf_availableis_tf_tensoris_tokenizers_availableis_torch_availableis_torch_deviceis_torch_tensorloggingrequires_backends	to_py_obj)_compile_jinja_template_render_with_assistant_indices)PROTOBUF_IMPORT_ERROR c                 C   s$   t  rddlm} |S tt| )Nr   )DecodeError)r'   google.protobuf.messager6   ImportErrorr4   format)error_messager6    r;   x/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/tokenization_utils_base.pyimport_protobuf_decode_errorQ   s   r=   )
AddedToken)EncodingFT)frozeneqc                   @   s2   e Zd ZdZ	ddefddZdd Zd	d
 ZdS )r>   a!  
        AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
        way it should behave.

        The `normalized` will default to `not special` if it is not specified, similarly to the definition in
        `tokenizers`.
        FNcontentc                 C   s<   || _ || _|| _|| _|| _|d ur|| _d S | | _d S N)rB   single_wordlstriprstripspecial
normalized)selfrB   rD   rE   rF   rG   rH   r;   r;   r<   __init__i   s   zAddedToken.__init__c                 C      | j S rC   __dict__rI   r;   r;   r<   __getstate__s      zAddedToken.__getstate__c                 C   rK   rC   )rB   rN   r;   r;   r<   __str__v   rP   zAddedToken.__str__)FFFFN)__name__
__module____qualname____doc__strrJ   rO   rQ   r;   r;   r;   r<   r>   _   s    	

r>   c                   @   s   e Zd ZdZdS )EncodingFastz_This is dummy class because without the `tokenizers` library we don't have these objects anywayN)rR   rS   rT   rU   r;   r;   r;   r<   rW   y   s    rW   gꌠ9Y>)Fg@xD
np.ndarraytorch.Tensorzspecial_tokens_map.jsonzadded_tokens.jsonztokenizer_config.jsonzchat_template.jinjaztokenizer.jsonztokenizer\.(.*)\.jsonc                   @   s    e Zd ZdZdZdZdZdZdS )TruncationStrategyz
    Possible values for the `truncation` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in
    an IDE.
    
only_firstonly_secondlongest_firstdo_not_truncateN)rR   rS   rT   rU   
ONLY_FIRSTONLY_SECONDLONGEST_FIRSTDO_NOT_TRUNCATEr;   r;   r;   r<   rZ      s    rZ   c                   @   "   e Zd ZU dZeed< eed< dS )CharSpanz
    Character span in the original string.

    Args:
        start (`int`): Index of the first character in the original string.
        end (`int`): Index of the character following the last character in the original string.
    startendNrR   rS   rT   rU   int__annotations__r;   r;   r;   r<   rd         
 rd   c                   @   rc   )	TokenSpanz
    Token span in an encoded string (list of tokens).

    Args:
        start (`int`): Index of the first token in the span.
        end (`int`): Index of the token following the last token in the span.
    re   rf   Nrg   r;   r;   r;   r<   rk      rj   rk   c                       s  e Zd ZdZ					dHdeeeef  deee	e
e	 f  dedeef dedee f
 fd	d
Zedee fddZedefddZdeeef deee	f fddZdefddZdd Zdd Zdd Zdd Zdd Zedeee	  fdd ZdId"edee fd#d$ZdId"edeee  fd%d&ZdId"edeee  fd'd(ZdId"edeee  fd)d*ZdJd+ed,ee defd-d.ZdJd+ed,ee defd/d0Z 	!dKd1ed2ee d3edee! fd4d5Z"dJd+ed,ee dee# fd6d7Z$	!dKd8ed9ee d3edefd:d;Z%	!dKd1ed2ee d3ede#fd<d=Z&dKd8ed9ee d3edefd>d?Z'	dLdeeeef  defd@dAZ(ddBdCeedDf dEedd fdFdGZ)  Z*S )MBatchEncodinga  
    Holds the output of the [`~tokenization_utils_base.PreTrainedTokenizerBase.__call__`],
    [`~tokenization_utils_base.PreTrainedTokenizerBase.encode_plus`] and
    [`~tokenization_utils_base.PreTrainedTokenizerBase.batch_encode_plus`] methods (tokens, attention_masks, etc).

    This class is derived from a python dictionary and can be used as a dictionary. In addition, this class exposes
    utility methods to map from word/character space to token space.

    Args:
        data (`dict`, *optional*):
            Dictionary of lists/arrays/tensors returned by the `__call__`/`encode_plus`/`batch_encode_plus` methods
            ('input_ids', 'attention_mask', etc.).
        encoding (`tokenizers.Encoding` or `Sequence[tokenizers.Encoding]`, *optional*):
            If the tokenizer is a fast tokenizer which outputs additional information like mapping from word/character
            space to token space the `tokenizers.Encoding` instance or list of instance (for batches) hold this
            information.
        tensor_type (`Union[None, str, TensorType]`, *optional*):
            You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
            initialization.
        prepend_batch_axis (`bool`, *optional*, defaults to `False`):
            Whether or not to add a batch axis when converting to tensors (see `tensor_type` above). Note that this
            parameter has an effect if the parameter `tensor_type` is set, *otherwise has no effect*.
        n_sequences (`Optional[int]`, *optional*):
            You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
            initialization.
    NFdataencodingtensor_typeprepend_batch_axisn_sequencesc                    s\   t  | t|tr|g}|| _|d u r"|d ur"t|r"|d j}|| _| j||d d S )Nr   ro   rp   )	superrJ   
isinstancerW   
_encodingslenrq   _n_sequencesconvert_to_tensors)rI   rm   rn   ro   rp   rq   	__class__r;   r<   rJ      s   

zBatchEncoding.__init__returnc                 C   rK   )z
        `Optional[int]`: The number of sequences used to generate each sample from the batch encoded in this
        [`BatchEncoding`]. Currently can be one of `None` (unknown), `1` (a single sentence) or `2` (a pair of
        sentences)
        )rw   rN   r;   r;   r<   rq      s   zBatchEncoding.n_sequencesc                 C   s
   | j duS )z
        `bool`: Indicate whether this [`BatchEncoding`] was generated from the result of a [`PreTrainedTokenizerFast`]
        or not.
        Nru   rN   r;   r;   r<   is_fast   s   
zBatchEncoding.is_fastitemc                    sT   t  tr
j  S jdurj  S t  tr& fddj D S td)a  
        If the key is a string, returns the value of the dict associated to `key` ('input_ids', 'attention_mask',
        etc.).

        If the key is an integer, get the `tokenizers.Encoding` for batch item with index `key`.

        If the key is a slice, returns the value of the dict associated to `key` ('input_ids', 'attention_mask', etc.)
        with the constraint of slice.
        Nc                    s   i | ]
}|j |   qS r;   )rm   ).0keyr~   rI   r;   r<   
<dictcomp>      z-BatchEncoding.__getitem__.<locals>.<dictcomp>zInvalid key. Only three types of key are available: (1) string, (2) integers for backend Encoding, and (3) slices for data subsetting.)rt   rV   rm   ru   slicekeysKeyErrorrI   r~   r;   r   r<   __getitem__  s   





zBatchEncoding.__getitem__c                 C   s    z| j | W S  ty   tw rC   )rm   r   AttributeErrorr   r;   r;   r<   __getattr__  s
   zBatchEncoding.__getattr__c                 C   s   | j | jdS )N)rm   	encodingsrm   ru   rN   r;   r;   r<   rO      s   zBatchEncoding.__getstate__c                 C   s,   d|v r	|d | _ d|v r|d | _d S d S )Nrm   r   r   )rI   stater;   r;   r<   __setstate__#  s
   
zBatchEncoding.__setstate__c                 C   
   | j  S rC   )rm   r   rN   r;   r;   r<   r   *     
zBatchEncoding.keysc                 C   r   rC   )rm   valuesrN   r;   r;   r<   r   -  r   zBatchEncoding.valuesc                 C   r   rC   )rm   itemsrN   r;   r;   r<   r   0  r   zBatchEncoding.itemsc                 C   rK   )z
        `Optional[List[tokenizers.Encoding]]`: The list all encodings from the tokenization process. Returns `None` if
        the input was tokenized through Python (i.e., not a fast) tokenizer.
        r|   rN   r;   r;   r<   r   7  s   zBatchEncoding.encodingsr   batch_indexc                 C      | j std| j | jS )a  
        Return the list of tokens (sub-parts of the input strings after word/subword splitting and before conversion to
        integer indices) at a given batch index (only works for the output of a fast tokenizer).

        Args:
            batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.

        Returns:
            `List[str]`: The list of tokens at that index.
        zgtokens() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast` class).)ru   
ValueErrortokensrI   r   r;   r;   r<   r   ?  s
   zBatchEncoding.tokensc                 C   r   )a  
        Return a list mapping the tokens to the id of their original sentences:

            - `None` for special tokens added around or between sequences,
            - `0` for tokens corresponding to words in the first sequence,
            - `1` for tokens corresponding to words in the second sequence when a pair of sequences was jointly
              encoded.

        Args:
            batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.

        Returns:
            `List[Optional[int]]`: A list indicating the sequence id corresponding to each token. Special tokens added
            by the tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding
            sequence.
        zmsequence_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast` class).)ru   r   sequence_idsr   r;   r;   r<   r   Q  s
   zBatchEncoding.sequence_idsc                 C   s$   | j stdtdt | |S )>  
        Return a list mapping the tokens to their actual word in the initial sentence for a fast tokenizer.

        Args:
            batch_index (`int`, *optional*, defaults to 0): The index to access in the batch.

        Returns:
            `List[Optional[int]]`: A list indicating the word corresponding to each token. Special tokens added by the
            tokenizer are mapped to `None` and other tokens are mapped to the index of their corresponding word
            (several tokens will be mapped to the same word index if they are parts of that word).
        zfwords() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast` class).z`BatchEncoding.words()` property is deprecated and should be replaced with the identical, but more self-explanatory `BatchEncoding.word_ids()` property.)ru   r   warningswarnFutureWarningword_idsr   r;   r;   r<   wordsi  s   
zBatchEncoding.wordsc                 C   r   )r   ziword_ids() is not available when using non-fast tokenizers (e.g. instance of a `XxxTokenizerFast` class).)ru   r   r   r   r;   r;   r<   r     s
   zBatchEncoding.word_idsbatch_or_token_indextoken_indexc                 C   X   | j std|dur|}nd}|}|dk r| j| }|dk r$| j| }| j | |S )a  
        Get the index of the sequence represented by the given token. In the general use case, this method returns `0`
        for a single sequence or the first sequence of a pair, and `1` for the second sequence of a pair

        Can be called as:

        - `self.token_to_sequence(token_index)` if batch size is 1
        - `self.token_to_sequence(batch_index, token_index)` if batch size is greater than 1

        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
        words are defined by the user). In this case it allows to easily associate encoded tokens with provided
        tokenized words.

        Args:
            batch_or_token_index (`int`):
                Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
                the token in the sequence.
            token_index (`int`, *optional*):
                If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
                sequence.

        Returns:
            `int`: Index of the word in the input sequence.
        zGtoken_to_sequence() is not available when using Python based tokenizersNr   )ru   r   _batch_size_seq_lentoken_to_sequencerI   r   r   r   r;   r;   r<   r     s   

zBatchEncoding.token_to_sequencec                 C   r   )a  
        Get the index of the word corresponding (i.e. comprising) to an encoded token in a sequence of the batch.

        Can be called as:

        - `self.token_to_word(token_index)` if batch size is 1
        - `self.token_to_word(batch_index, token_index)` if batch size is greater than 1

        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e.,
        words are defined by the user). In this case it allows to easily associate encoded tokens with provided
        tokenized words.

        Args:
            batch_or_token_index (`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
                the token in the sequence.
            token_index (`int`, *optional*):
                If a batch index is provided in *batch_or_token_index*, this can be the index of the token in the
                sequence.

        Returns:
            `int`: Index of the word in the input sequence.
        zCtoken_to_word() is not available when using Python based tokenizersNr   )ru   r   r   r   token_to_wordr   r;   r;   r<   r     s   

zBatchEncoding.token_to_wordbatch_or_word_index
word_indexsequence_indexc                 C   sn   | j std|dur|}nd}|}|dk r| j| }|dk r$| j| }| j | ||}|dur5t| S dS )a-  
        Get the encoded token span corresponding to a word in a sequence of the batch.

        Token spans are returned as a [`~tokenization_utils_base.TokenSpan`] with:

        - **start** -- Index of the first token.
        - **end** -- Index of the token following the last token.

        Can be called as:

        - `self.word_to_tokens(word_index, sequence_index: int = 0)` if batch size is 1
        - `self.word_to_tokens(batch_index, word_index, sequence_index: int = 0)` if batch size is greater or equal to
          1

        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
        are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
        words.

        Args:
            batch_or_word_index (`int`):
                Index of the sequence in the batch. If the batch only comprises one sequence, this can be the index of
                the word in the sequence.
            word_index (`int`, *optional*):
                If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
                sequence.
            sequence_index (`int`, *optional*, defaults to 0):
                If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
                or 1) the provided word index belongs to.

        Returns:
            ([`~tokenization_utils_base.TokenSpan`], *optional*): Span of tokens in the encoded sequence. Returns
            `None` if no tokens correspond to the word. This can happen especially when the token is a special token
            that has been used to format the tokenization. For example when we add a class token at the very beginning
            of the tokenization.
        zDword_to_tokens() is not available when using Python based tokenizersNr   )ru   r   r   r   word_to_tokensrk   )rI   r   r   r   r   spanr;   r;   r<   r     s   '

zBatchEncoding.word_to_tokensc                 C   sH   | j std|dur|}nd}|}| j | |}|dur"t| S dS )a  
        Get the character span corresponding to an encoded token in a sequence of the batch.

        Character spans are returned as a [`~tokenization_utils_base.CharSpan`] with:

        - **start** -- Index of the first character in the original string associated to the token.
        - **end** -- Index of the character following the last character in the original string associated to the
          token.

        Can be called as:

        - `self.token_to_chars(token_index)` if batch size is 1
        - `self.token_to_chars(batch_index, token_index)` if batch size is greater or equal to 1

        Args:
            batch_or_token_index (`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
                the token in the sequence.
            token_index (`int`, *optional*):
                If a batch index is provided in *batch_or_token_index*, this can be the index of the token or tokens in
                the sequence.

        Returns:
            [`~tokenization_utils_base.CharSpan`]: Span of characters in the original string, or None, if the token
            (e.g. <s>, </s>) doesn't correspond to any chars in the origin string.
        zDtoken_to_chars() is not available when using Python based tokenizersNr   )ru   r   token_to_charsrd   )rI   r   r   r   span_indicesr;   r;   r<   r     s   zBatchEncoding.token_to_charsbatch_or_char_index
char_indexc                 C   6   | j std|dur|}nd}|}| j | ||S )a  
        Get the index of the token in the encoded output comprising a character in the original string for a sequence
        of the batch.

        Can be called as:

        - `self.char_to_token(char_index)` if batch size is 1
        - `self.char_to_token(batch_index, char_index)` if batch size is greater or equal to 1

        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
        are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
        words.

        Args:
            batch_or_char_index (`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
                the word in the sequence
            char_index (`int`, *optional*):
                If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
                sequence.
            sequence_index (`int`, *optional*, defaults to 0):
                If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
                or 1) the provided character index belongs to.


        Returns:
            `int`: Index of the token, or None if the char index refers to a whitespace only token and whitespace is
                   trimmed with `trim_offsets=True`.
        zCchar_to_token() is not available when using Python based tokenizersNr   )ru   r   char_to_tokenrI   r   r   r   r   r;   r;   r<   r   =  s   !zBatchEncoding.char_to_tokenc                 C   s:   | j std|dur|}nd}|}t| j | || S )a/  
        Get the character span in the original string corresponding to given word in a sequence of the batch.

        Character spans are returned as a CharSpan NamedTuple with:

        - start: index of the first character in the original string
        - end: index of the character following the last character in the original string

        Can be called as:

        - `self.word_to_chars(word_index)` if batch size is 1
        - `self.word_to_chars(batch_index, word_index)` if batch size is greater or equal to 1

        Args:
            batch_or_word_index (`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
                the word in the sequence
            word_index (`int`, *optional*):
                If a batch index is provided in *batch_or_token_index*, this can be the index of the word in the
                sequence.
            sequence_index (`int`, *optional*, defaults to 0):
                If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
                or 1) the provided word index belongs to.

        Returns:
            `CharSpan` or `List[CharSpan]`: Span(s) of the associated character or characters in the string. CharSpan
            are NamedTuple with:

                - start: index of the first character associated to the token in the original string
                - end: index of the character following the last character associated to the token in the original
                  string
        zCword_to_chars() is not available when using Python based tokenizersNr   )ru   r   rd   word_to_chars)rI   r   r   r   r   r;   r;   r<   r   g  s   $zBatchEncoding.word_to_charsc                 C   r   )a@  
        Get the word in the original string corresponding to a character in the original string of a sequence of the
        batch.

        Can be called as:

        - `self.char_to_word(char_index)` if batch size is 1
        - `self.char_to_word(batch_index, char_index)` if batch size is greater than 1

        This method is particularly suited when the input sequences are provided as pre-tokenized sequences (i.e. words
        are defined by the user). In this case it allows to easily associate encoded tokens with provided tokenized
        words.

        Args:
            batch_or_char_index (`int`):
                Index of the sequence in the batch. If the batch only comprise one sequence, this can be the index of
                the character in the original string.
            char_index (`int`, *optional*):
                If a batch index is provided in *batch_or_token_index*, this can be the index of the character in the
                original string.
            sequence_index (`int`, *optional*, defaults to 0):
                If pair of sequences are encoded in the batch this can be used to specify which sequence in the pair (0
                or 1) the provided character index belongs to.


        Returns:
            `int` or `List[int]`: Index or indices of the associated encoded token(s).
        zBchar_to_word() is not available when using Python based tokenizersNr   )ru   r   char_to_wordr   r;   r;   r<   r     s   zBatchEncoding.char_to_wordc           
         st  |du r| S t |tst|}|tjkr&t stdddl}|j |j}nX|tjkrAt	 s2tdddl
j}dfdd	 n=|tjkrYt sMtdddlm} |j t}n%|tjkrut setdddlm j fd	d
}n	d fdd	 t}|  D ]5\}}z|r|g}||s |}|| |< W q ty }	 z|dkrtd|	td| d|	d}	~	ww | S )a  
        Convert the inner content to tensors.

        Args:
            tensor_type (`str` or [`~utils.TensorType`], *optional*):
                The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
                `None`, no modification is done.
            prepend_batch_axis (`int`, *optional*, defaults to `False`):
                Whether or not to add the batch dimension during the conversion.
        NzSUnable to convert output to TensorFlow tensors format, TensorFlow is not installed.r   zMUnable to convert output to PyTorch tensors format, PyTorch is not installed.c                    s4   t | trt | d tjr t| S  | S Nr   )rt   listnpndarray
from_numpyarraytensor)valuedtype)torchr;   r<   	as_tensor  s   
z3BatchEncoding.convert_to_tensors.<locals>.as_tensorzEUnable to convert output to JAX tensors format, JAX is not installed.zEUnable to convert output to MLX tensors format, MLX is not installed.c                    s   t |  jS rC   )rt   r   )obj)mxr;   r<   	is_tensor  s   z3BatchEncoding.convert_to_tensors.<locals>.is_tensorc                    sn   t | ttfr0t | d tttjfr0dd | D }tt|dkr0|d u r0 dd | D td} tj| |dS )Nr   c                 S      g | ]}t |qS r;   rv   r   valr;   r;   r<   
<listcomp>      zGBatchEncoding.convert_to_tensors.<locals>.as_tensor.<locals>.<listcomp>r   c                 S   s   g | ]}t |qS r;   )r   asarrayr   r;   r;   r<   r         )r   )	rt   r   tupler   r   rv   setobjectr   )r   r   
value_lens)r   r;   r<   r     s
   $overflowing_tokenszUnable to create tensor returning overflowing tokens of different lengths. Please see if a fast version of this tokenizer is available to have this feature available.zUnable to create tensor, you should probably activate truncation and/or padding with 'padding=True' 'truncation=True' to have batched tensors with the same length. Perhaps your features (`zY` in this case) have excessive nesting (inputs type `list` where type `int` is expected).rC   )rt   r   
TENSORFLOWr)   r8   
tensorflowconstantr   PYTORCHr,   r   JAXr"   	jax.numpynumpyr   r#   MLXr$   mlx.corecorer%   r   	Exceptionr   )
rI   ro   rp   tfr   jnpr   r   r   er;   )r   r   r   r<   rx     sr   




	z BatchEncoding.convert_to_tensors)non_blockingdeviceztorch.devicer   c                   sl   t | dg ddlt tst st tr) fdd| j D | _| S t	dt  d | S )au  
        Send all values to device by calling `v.to(device, non_blocking=non_blocking)` (PyTorch only).

        Args:
            device (`str` or `torch.device`): The device to put the tensors on.
            non_blocking (`bool`): Whether to perform the copy asynchronously.

        Returns:
            [`BatchEncoding`]: The same instance after modification.
        r   r   Nc                    s0   i | ]\}}|t |jr|j d n|qS ))r   r   )rt   Tensortor   kvr   r   r   r;   r<   r   4  s    z$BatchEncoding.to.<locals>.<dictcomp>z+Attempting to cast a BatchEncoding to type z. This is not supported.)
r0   r   rt   rV   r-   rh   rm   r   loggerwarning)rI   r   r   r;   r   r<   r   "  s   zBatchEncoding.to)NNNFN)r   rC   r   NF)+rR   rS   rT   rU   r   r   rV   r	   r   rW   r   r   boolrh   rJ   propertyrq   r}   r   r   rO   r   r   r   r   r   r   r   r   r   r   r   r   rk   r   rd   r   r   r   r   rx   r   __classcell__r;   r;   ry   r<   rl      s    "''
 5(
+
 -(
,grl   c                       s  e Zd ZdZg dZd%ddZdefddZ		d&d
ee	e
e	ef f defddZ	d%de
e	eee
e	ef  f dedefddZd%de
ee	 ee f dedefddZedefddZ fddZ fddZedee	e
e	ee	 f f fddZedee	e
e	eee
e	ef  f f fddZedee
e	ef  fddZedee	 fdd Zedee fd!d"Zdee	 fd#d$Z  ZS )'SpecialTokensMixina  
    A mixin derived by [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`] to handle specific behaviors related to
    special tokens. In particular, this class hold the attributes which can be used to directly access these special
    tokens in a model-independent manner and allow to set and update the special tokens.

    Args:
        bos_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing the beginning of a sentence.
        eos_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing the end of a sentence.
        unk_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing an out-of-vocabulary token.
        sep_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token separating two different sentences in the same input (used by BERT for instance).
        pad_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
            attention mechanisms or loss computation.
        cls_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing the class of the input (used by BERT for instance).
        mask_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing a masked token (used by masked-language modeling pretraining objectives, like
            BERT).
        additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
            A tuple or a list of additional tokens, which will be marked as `special`, meaning that they will be
            skipped when decoding if `skip_special_tokens` is set to `True`.
    )	bos_token	eos_token	unk_token	sep_token	pad_token	cls_token
mask_tokenadditional_special_tokensFc                 K   s   d| _ || _t| j| _g | jd< | D ]O\}}|d u rq|| jv re|dkrKt|tt	fs7J d| dt
dd |D sDJ dt| || qt|ttfrYt| || qtd| d	t| qd S )
Nr   r   zValue z is not a list or tuplec                 s       | ]
}t |ttfV  qd S rC   rt   rV   r>   r   tr;   r;   r<   	<genexpr>t      z.SpecialTokensMixin.__init__.<locals>.<genexpr>z2One of the tokens is not a string or an AddedTokenSpecial token - has to be either str or AddedToken but got: )_pad_token_type_idverbosedictfromkeysSPECIAL_TOKENS_ATTRIBUTES_special_tokens_mapr   rt   r   r   allsetattrrV   r>   	TypeErrortype)rI   r   kwargsr   r   r;   r;   r<   rJ   d  s&   

zSpecialTokensMixin.__init__r{   c                 C   s   t d | j| jddS )z
        The `sanitize_special_tokens` is now deprecated kept for backward compatibility and will be removed in
        transformers v5.
        zAThe `sanitize_special_tokens` will be removed in transformers v5.Tspecial_tokens)r   warning_once
add_tokensall_special_tokens_extendedrN   r;   r;   r<   sanitize_special_tokens}  s   
z*SpecialTokensMixin.sanitize_special_tokensTspecial_tokens_dictc              	   C   s  |sdS g }|  D ]\}}|| jv sJ d| d| jr*td| d| d |dkrt|ttfr>tdd	 |D sIJ d
| d| dg }|D ]}t|t	r]t
|ddddd}|sgt	|| jv rgqM|| qM|r~t|dkr~t| |t| n| jd | ||7 }q
t|t	t
fstd| d| dt|t	rt
|ddddd}t|t
rt| || ||vr|| q
| j|dd}|S )a  
        Add a dictionary of special tokens (eos, pad, cls, etc.) to the encoder and link them to class attributes. If
        special tokens are NOT in the vocabulary, they are added to it (indexed starting from the last index of the
        current vocabulary).

        When adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix of the
        model so that its embedding matrix matches the tokenizer.

        In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.

        Using `add_special_tokens` will ensure your special tokens can be used in several ways:

        - Special tokens can be skipped when decoding using `skip_special_tokens = True`.
        - Special tokens are carefully handled by the tokenizer (they are never split), similar to `AddedTokens`.
        - You can easily refer to special tokens using tokenizer class attributes like `tokenizer.cls_token`. This
          makes it easy to develop model-agnostic training and fine-tuning scripts.

        When possible, special tokens are already registered for provided pretrained models (for instance
        [`BertTokenizer`] `cls_token` is already registered to be :obj*'[CLS]'* and XLM's one is also registered to be
        `'</s>'`).

        Args:
            special_tokens_dict (dictionary *str* to *str* or `tokenizers.AddedToken`):
                Keys should be in the list of predefined special attributes: [`bos_token`, `eos_token`, `unk_token`,
                `sep_token`, `pad_token`, `cls_token`, `mask_token`, `additional_special_tokens`].

                Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer
                assign the index of the `unk_token` to them).
            replace_additional_special_tokens (`bool`, *optional*,, defaults to `True`):
                If `True`, the existing list of additional special tokens will be replaced by the list provided in
                `special_tokens_dict`. Otherwise, `self._special_tokens_map["additional_special_tokens"]` is just extended. In the former
                case, the tokens will NOT be removed from the tokenizer's full vocabulary - they are only being flagged
                as non-special tokens. Remember, this only affects which tokens are skipped during decoding, not the
                `added_tokens_encoder` and `added_tokens_decoder`. This means that the previous
                `additional_special_tokens` are still added tokens, and will not be split by the model.

        Returns:
            `int`: Number of tokens added to the vocabulary.

        Examples:

        ```python
        # Let's see how to add a new classification token to GPT-2
        tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
        model = GPT2Model.from_pretrained("openai-community/gpt2")

        special_tokens_dict = {"cls_token": "<CLS>"}

        num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
        print("We have added", num_added_toks, "tokens")
        # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
        model.resize_token_embeddings(len(tokenizer))

        assert tokenizer.cls_token == "<CLS>"
        ```r   zKey z is not a special tokenz
Assigning z to the z key of the tokenizerr   c                 s   r   rC   r   r   r;   r;   r<   r     r   z8SpecialTokensMixin.add_special_tokens.<locals>.<genexpr>zTokens z	 for key z* should all be str or AddedToken instancesFTrF   rE   rH   rG   zToken z* should be a str or an AddedToken instancer  )r   r  r   r   infort   r   r   r  rV   r>   r   appendrv   r  r  extendr   r  )rI   r  !replace_additional_special_tokensadded_tokensr   r   to_addtokenr;   r;   r<   add_special_tokens  sB   :"




z%SpecialTokensMixin.add_special_tokens
new_tokensr	  c                 C   s*   |sdS t |ttfs|g}| j||dS )a  
        Add a list of new tokens to the tokenizer class. If the new tokens are not in the vocabulary, they are added to
        it with indices starting from length of the current vocabulary and will be isolated before the tokenization
        algorithm is applied. Added tokens and tokens from the vocabulary of the tokenization algorithm are therefore
        not treated in the same way.

        Note, when adding new tokens to the vocabulary, you should make sure to also resize the token embedding matrix
        of the model so that its embedding matrix matches the tokenizer.

        In order to do that, please use the [`~PreTrainedModel.resize_token_embeddings`] method.

        Args:
            new_tokens (`str`, `tokenizers.AddedToken` or a list of *str* or `tokenizers.AddedToken`):
                Tokens are only added if they are not already in the vocabulary. `tokenizers.AddedToken` wraps a string
                token to let you personalize its behavior: whether this token should only match against a single word,
                whether this token should strip all potential whitespaces on the left side, whether this token should
                strip all potential whitespaces on the right side, etc.
            special_tokens (`bool`, *optional*, defaults to `False`):
                Can be used to specify if the token is a special token. This mostly change the normalization behavior
                (special tokens like CLS or [MASK] are usually not lower-cased for instance).

                See details for `tokenizers.AddedToken` in HuggingFace tokenizers library.

        Returns:
            `int`: Number of tokens added to the vocabulary.

        Examples:

        ```python
        # Let's see how to increase the vocabulary of Bert model and tokenizer
        tokenizer = BertTokenizerFast.from_pretrained("google-bert/bert-base-uncased")
        model = BertModel.from_pretrained("google-bert/bert-base-uncased")

        num_added_toks = tokenizer.add_tokens(["new_tok1", "my_new-tok2"])
        print("We have added", num_added_toks, "tokens")
        # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer.
        model.resize_token_embeddings(len(tokenizer))
        ```r   r  )rt   r   r   _add_tokensrI   r  r	  r;   r;   r<   r    s
   )zSpecialTokensMixin.add_tokensc                 C      t rC   NotImplementedErrorr  r;   r;   r<   r       zSpecialTokensMixin._add_tokensc                 C   rK   )zH
        `int`: Id of the padding token type in the vocabulary.
        )r   rN   r;   r;   r<   pad_token_type_id  s   z$SpecialTokensMixin.pad_token_type_idc                    s   |}| dp| d}|r| ds|d d n|d d } jdd d urlt fdd||fD rl|rO|d urM|dkrD |n fd	d
|D }|}|dkret|ttfse|d uretd| | j	|< d S t
 || d S )N_id_idsr  c                 3       | ]
}| j d  v V  qdS r  NrL   r   namerN   r;   r<   r   ,      
z1SpecialTokensMixin.__setattr__.<locals>.<genexpr>r   c                    s   g | ]}  |qS r;   )convert_ids_to_tokensr   rN   r;   r<   r   4  r   z2SpecialTokensMixin.__setattr__.<locals>.<listcomp>z%Cannot set a non-string value as the )endswithrM   getanyr)  rt   rV   r>   r   r  rs   __setattr__)rI   r   r   key_without_idkey_is_special_idry   rN   r<   r-  &  s$   "
zSpecialTokensMixin.__setattr__c                    s  |}| dp| d}|r| ds|d d n|d d } jdd d urst fdd||fD rs jd }|sc|| d u rP jrNtd| d	 d S || }|d
kr\t|S dd |D S t |}|d urq 	|S d S | jvrt
 jj d| t |S )Nr   r!  r"  r#  r  c                 3   r$  r%  rL   r&  rN   r;   r<   r   D  r(  z1SpecialTokensMixin.__getattr__.<locals>.<genexpr>zUsing z, but it is not set yet.r   c                 S   r   r;   rV   )r   tokr;   r;   r<   r   N  r   z2SpecialTokensMixin.__getattr__.<locals>.<listcomp>z has no attribute )r*  rM   r+  r,  r   r   errorrV   getattrconvert_tokens_to_idsr   rz   rR   rs   r   )rI   r   r.  r/  r  r   attr_as_tokensry   rN   r<   r   >  s(   "


zSpecialTokensMixin.__getattr__c                 C   s*   i }| j D ]}t| |}|r|||< q|S )a  
        `Dict[str, Union[str, List[str]]]`: A dictionary mapping special token class attributes (`cls_token`,
        `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).

        Convert potential tokens of `tokenizers.AddedToken` type to string.
        )r  r3  rI   set_attrattr
attr_valuer;   r;   r<   special_tokens_mapX  s   

z%SpecialTokensMixin.special_tokens_mapc                 C   s*   i }| j D ]}| j| }|r|||< q|S )a  
        `Dict[str, Union[str, tokenizers.AddedToken, List[Union[str, tokenizers.AddedToken]]]]`: A dictionary mapping
        special token class attributes (`cls_token`, `unk_token`, etc.) to their values (`'<unk>'`, `'<cls>'`, etc.).

        Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
        special tokens are tokenized.
        )r  r  r6  r;   r;   r<   special_tokens_map_extendedg  s   	

z.SpecialTokensMixin.special_tokens_map_extendedc                    sp   g }t   | j D ]+}t|ttfr fdd|D }nt| vr&|gng } tt| |	| q
|S )a  
        `List[Union[str, tokenizers.AddedToken]]`: All the special tokens (`'<unk>'`, `'<cls>'`, etc.), the order has
        nothing to do with the index of each tokens. If you want to know the correct indices, check
        `self.added_tokens_encoder`. We can't create an order anymore as the keys are `AddedTokens` and not `Strings`.

        Don't convert tokens of `tokenizers.AddedToken` type to string so they can be used to control more finely how
        special tokens are tokenized.
        c                    s   g | ]
}t | vr|qS r;   r0  r   r  seenr;   r<   r     r   zBSpecialTokensMixin.all_special_tokens_extended.<locals>.<listcomp>)
r   r;  r   rt   r   r   rV   updatemapr  )rI   
all_tokensr   tokens_to_addr;   r=  r<   r  w  s   
z.SpecialTokensMixin.all_special_tokens_extendedc                 C   s   dd | j D }|S )z
        `List[str]`: A list of the unique special tokens (`'<unk>'`, `'<cls>'`, ..., etc.).

        Convert tokens of `tokenizers.AddedToken` type to string.
        c                 S   r   r;   r0  )r   sr;   r;   r<   r     r   z9SpecialTokensMixin.all_special_tokens.<locals>.<listcomp>)r  )rI   all_toksr;   r;   r<   all_special_tokens  s   z%SpecialTokensMixin.all_special_tokensc                 C   s   | j }| |}|S )zy
        `List[int]`: List the ids of the special tokens(`'<unk>'`, `'<cls>'`, etc.) mapped to class attributes.
        )rE  r4  )rI   rD  all_idsr;   r;   r<   all_special_ids  s   
z"SpecialTokensMixin.all_special_idsc                 C   sZ   | j t|  | _ | D ]\}}t|ttfr|| j|< qtd| dt	| dS )a  
        Adds new special tokens to the "SPECIAL_TOKENS_ATTRIBUTES" list which will be part
        of "self.special_tokens" and saved as a special token in tokenizer's config.
        This allows us to dynamically add new model-type specific tokens after initializing the tokenizer.
        For example: if the model tokenizers is multimodal, we can support special image or audio tokens.
        r   r   N)
r  r   r   r   rt   rV   r>   r  r  r  )rI   r	  r   r   r;   r;   r<   "_set_model_specific_special_tokens  s   z5SpecialTokensMixin._set_model_specific_special_tokensF)T)rR   rS   rT   rU   r  rJ   rh   r  r   rV   r   r>   r  r   r   r  r  r   r  r-  r   r:  r;  r  rE  rG  rH  r   r;   r;   ry   r<   r   =  sF    
	
g
(1$.	r   a
  
            add_special_tokens (`bool`, *optional*, defaults to `True`):
                Whether or not to add special tokens when encoding the sequences. This will use the underlying
                `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are
                automatically added to the input ids. This is useful if you want to add `bos` or `eos` tokens
                automatically.
            padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
                Activates and controls padding. Accepts the following values:

                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
                  sequence if provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
                  lengths).
            truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
                Activates and controls truncation. Accepts the following values:

                - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
                  to the maximum acceptable input length for the model if that argument is not provided. This will
                  truncate token by token, removing a token from the longest sequence in the pair if a pair of
                  sequences (or a batch of pairs) is provided.
                - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will only
                  truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
                - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will only
                  truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
                - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
                  greater than the model maximum admissible input size).
            max_length (`int`, *optional*):
                Controls the maximum length to use by one of the truncation/padding parameters.

                If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
                is required by one of the truncation/padding parameters. If the model has no specific maximum input
                length (like XLNet) truncation/padding to a maximum length will be deactivated.
            stride (`int`, *optional*, defaults to 0):
                If set to a number along with `max_length`, the overflowing tokens returned when
                `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
                returned to provide some overlap between truncated and overflowing sequences. The value of this
                argument defines the number of overlapping tokens.
            is_split_into_words (`bool`, *optional*, defaults to `False`):
                Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
                tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
                which it will tokenize. This is useful for NER or token classification.
            pad_to_multiple_of (`int`, *optional*):
                If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated.
                This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
                `>= 7.5` (Volta).
            padding_side (`str`, *optional*):
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors instead of list of python integers. Acceptable values are:

                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return Numpy `np.ndarray` objects.
a  
            return_token_type_ids (`bool`, *optional*):
                Whether to return token type IDs. If left to the default, will return the token type IDs according to
                the specific tokenizer's default, defined by the `return_outputs` attribute.

                [What are token type IDs?](../glossary#token-type-ids)
            return_attention_mask (`bool`, *optional*):
                Whether to return the attention mask. If left to the default, will return the attention mask according
                to the specific tokenizer's default, defined by the `return_outputs` attribute.

                [What are attention masks?](../glossary#attention-mask)
            return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
                of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
                of returning overflowing tokens.
            return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
                Whether or not to return special tokens mask information.
            return_offsets_mapping (`bool`, *optional*, defaults to `False`):
                Whether or not to return `(char_start, char_end)` for each token.

                This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
                Python's tokenizer, this method will raise `NotImplementedError`.
            return_length  (`bool`, *optional*, defaults to `False`):
                Whether or not to return the lengths of the encoded inputs.
            verbose (`bool`, *optional*, defaults to `True`):
                Whether or not to print more information and warnings.
            **kwargs: passed to the `self.tokenize()` method

        Return:
            [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:

            - **input_ids** -- List of token ids to be fed to a model.

              [What are input IDs?](../glossary#input-ids)

            - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
              if *"token_type_ids"* is in `self.model_input_names`).

              [What are token type IDs?](../glossary#token-type-ids)

            - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
              `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).

              [What are attention masks?](../glossary#attention-mask)

            - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
              `return_overflowing_tokens=True`).
            - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
              `return_overflowing_tokens=True`).
            - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
              regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
            - **length** -- The length of the inputs (when `return_length=True`)
a  
    Class attributes (overridden by derived classes)

        - **vocab_files_names** (`Dict[str, str]`) -- A dictionary with, as keys, the `__init__` keyword name of each
          vocabulary file required by the model, and as associated values, the filename for saving the associated file
          (string).
        - **pretrained_vocab_files_map** (`Dict[str, Dict[str, str]]`) -- A dictionary of dictionaries, with the
          high-level keys being the `__init__` keyword name of each vocabulary file required by the model, the
          low-level being the `short-cut-names` of the pretrained models with, as associated values, the `url` to the
          associated pretrained vocabulary file.
        - **model_input_names** (`List[str]`) -- A list of inputs expected in the forward pass of the model.
        - **padding_side** (`str`) -- The default value for the side on which the model should have padding applied.
          Should be `'right'` or `'left'`.
        - **truncation_side** (`str`) -- The default value for the side on which the model should have truncation
          applied. Should be `'right'` or `'left'`.

    Args:
        model_max_length (`int`, *optional*):
            The maximum length (in number of tokens) for the inputs to the transformer model. When the tokenizer is
            loaded with [`~tokenization_utils_base.PreTrainedTokenizerBase.from_pretrained`], this will be set to the
            value stored for the associated model in `max_model_input_sizes` (see above). If no value is provided, will
            default to VERY_LARGE_INTEGER (`int(1e30)`).
        padding_side (`str`, *optional*):
            The side on which the model should have padding applied. Should be selected between ['right', 'left'].
            Default value is picked from the class attribute of the same name.
        truncation_side (`str`, *optional*):
            The side on which the model should have truncation applied. Should be selected between ['right', 'left'].
            Default value is picked from the class attribute of the same name.
        chat_template (`str`, *optional*):
            A Jinja template string that will be used to format lists of chat messages. See
            https://huggingface.co/docs/transformers/chat_templating for a full description.
        model_input_names (`List[string]`, *optional*):
            The list of inputs accepted by the forward pass of the model (like `"token_type_ids"` or
            `"attention_mask"`). Default value is picked from the class attribute of the same name.
        bos_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing the beginning of a sentence. Will be associated to `self.bos_token` and
            `self.bos_token_id`.
        eos_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing the end of a sentence. Will be associated to `self.eos_token` and
            `self.eos_token_id`.
        unk_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing an out-of-vocabulary token. Will be associated to `self.unk_token` and
            `self.unk_token_id`.
        sep_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token separating two different sentences in the same input (used by BERT for instance). Will be
            associated to `self.sep_token` and `self.sep_token_id`.
        pad_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token used to make arrays of tokens the same size for batching purpose. Will then be ignored by
            attention mechanisms or loss computation. Will be associated to `self.pad_token` and `self.pad_token_id`.
        cls_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing the class of the input (used by BERT for instance). Will be associated to
            `self.cls_token` and `self.cls_token_id`.
        mask_token (`str` or `tokenizers.AddedToken`, *optional*):
            A special token representing a masked token (used by masked-language modeling pretraining objectives, like
            BERT). Will be associated to `self.mask_token` and `self.mask_token_id`.
        additional_special_tokens (tuple or list of `str` or `tokenizers.AddedToken`, *optional*):
            A tuple or a list of additional special tokens. Add them here to ensure they are skipped when decoding with
            `skip_special_tokens` is set to True. If they are not part of the vocabulary, they will be added at the end
            of the vocabulary.
        clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
            Whether or not the model should cleanup the spaces that were added when splitting the input text during the
            tokenization process.
        split_special_tokens (`bool`, *optional*, defaults to `False`):
            Whether or not the special tokens should be split during the tokenization process. Passing will affect the
            internal state of the tokenizer. The default behavior is to not split special tokens. This means that if
            `<s>` is the `bos_token`, then `tokenizer.tokenize("<s>") = ['<s>`]. Otherwise, if
            `split_special_tokens=True`, then `tokenizer.tokenize("<s>")` will be give `['<','s', '>']`.
c                ,       s  e Zd ZU dZi Zeeef ed< i Zeeeeef f ed< dZ	e
e ed< g dZee ed< dZeed	< dZeed
< dZ fddZedefddZedefddZejdefddZejdefddZdefddZedeeef fddZdefddZdefddZdeeef fddZ						 							dd!eeeeef  eeeeef   f d"e
eeeef   d#e
eeeef   d$e
e d%ed&ed'ed(eeeef d)ed*e
e d+e
eee f  d,ed-ed.e
eee!f  deeee ee eee  e"f fd/d0Z#dd$e
e d"e
ee  defd1d2Z$e%ddddd3dd4d5eee&j'f d6e
eee&j'f  d7ed8ed9e
eeef  d:efd;d<Z(e%ddddddd=d>d?Z)e*d@dA Z+e%ddBeee!f fdCdDZ,			ddEeee&j'f dFe
e dGe
e dHede-e f
dIdJZ.		ddEeee&j'f dKe-e dFe
e dGe
e de-e f
dLdMZ/ddEedGe
e de-e fdNdOZ0ddPedQe
e dRedee fdSdTZ1e2e3dUdV		 				W		ddPee4e5e6f dXe
ee4e5e6f  dRed(eeeef d)eeee7df d*e
e dYed	e
e d+e
eee f  dee fdZd[Z8ddQedefd\d]Z9	 dd^d_Z:e2e3e;					 				W											 ddPee4e5ee4 ee5 df dXe
ee4e5ee4 ee5 f  d`ee4e5ee4 ee5 df dae
ee4e5ee4 ee5 f  dRed(eeeef d)eeee7df d*e
e dYedbedce
e d	e
e d+e
eee f  dde
e dee
e dfedgedhediedjede"f*dkdlZ<		 				W											 	ddPee4e5ee4 ee5 f dXe
ee4e5ee4 ee5 f  dRed(eeeef d)eeee7df d*e
e dYedbedce
e d	e
e d+e
eee f  dde
e dee
e dfedgedhediedjedmede"f(dndoZ=e2e3e;		 				W											 ddPee4e5e6f dXe
ee4e5e6f  dRed(eeeef d)eeee7df d*e
e dYedbedce
e d	e
e d+e
eee f  dde
e dee
e dfedgedhediedjede"f&dpdqZ>dd ej?e7j@ddWddddddddddd dfdPee4e5e6f dXe
ee4e5e6f  dRedredse7d*e
e dYedbedce
e d	e
e d+e
eee f  dde
e dee
e dfedgedhediedjedmede"f(dtduZAe2e3e;	 				W											 	ddveee4 eeB ee5 eeC ee6 eeD f dRed(eeeef d)eeee7df d*e
e dYedbedce
e d	e
e d+e
eee f  dde
e dee
e dfedgedhediedjedmede"f&dwdxZEd ej?e7j@ddWddddddddddd dfdveee4 eeB ee5 eeC ee6 eeD f dRedredse7d*e
e dYedbedce
e d	e
e d+e
eee f  dde
e dee
e dfedgedhediedjedmede"f&dydzZF	 						 dd{ee"ee" eee6f eeee6 f eeee6f  f d(eeeef d*e
e dce
e d	e
e dee
e d+e
eee f  djede"fd|d}ZG	dd~ee de
ee  dee fddZH	dd~ee de
ee  dee fddZIe2e3e;		 				W										 	ddee de
ee  dRed(eeeef d)eeee7df d*e
e dYedce
e d	e
e d+e
eee f  dde
e dee
e dfedgedhediedjedede"f&ddZJ		W		Wddee de
ee  dedseee7f dYede-ee ee ee f fddZKdej?dddfd{eeee6f e"f d*e
e dredce
e d	e
e dee
e deLfddZMdee defddZN		ddeee eee  dddf dede
e dee fddZO		ddeeee dddf dede
e defddZP		ddeeee f dede
e defddZQ	dd~ee de
ee  dedee fddZRe*dedefddZSdee d*e
e djefddZTdd ZUdd ZVeWdd ZXe%dddZY						 ddee de
ee  d*e
e de
e d(ed+e
e d)ede"fddZZ  Z[S )PreTrainedTokenizerBasez
    Base class for [`PreTrainedTokenizer`] and [`PreTrainedTokenizerFast`].

    Handles shared (mostly boiler plate) methods for those two classes.
    vocab_files_namespretrained_vocab_files_mapN_auto_class)	input_idstoken_type_idsattention_maskmodel_input_namesrightpadding_sidetruncation_sidec                    s  d| _ |D ]}t| |r!tt| |r!t| d| d| jj qt|| _	|
dd| _|
dd | _|
d|
dd }|d urF|nt| _|
d	| j| _| jd
vr^td| j |
d| j| _| jd
vrstd| j |
d| j| _|
dd| _|
dd| _i | _d| _|
dd | _t| jttfrdd | jD | _t jdi | |
di | _| j| jd d S )Nr;   z conflicts with the method z in name_or_pathr5   processor_classmodel_max_lengthmax_lenrS  )rR  leftzKPadding side should be selected between 'right' and 'left', current value: rT  zNTruncation side should be selected between 'right' and 'left', current value: rQ  clean_up_tokenization_spacesFsplit_special_tokenschat_templatec                 S   s   i | ]	}|d  |d qS r'  templater;   )r   r^  r;   r;   r<   r         z4PreTrainedTokenizerBase.__init__.<locals>.<dictcomp>extra_special_tokensr  )init_inputshasattrcallabler3  r   rz   rR   copydeepcopyinit_kwargspoprU  _processor_classVERY_LARGE_INTEGERrW  rS  r   rT  rQ  rZ  r[  deprecation_warnings_in_target_context_managerr\  rt   r   r   rs   rJ   r`  rH  )rI   r  r   rW  ry   r;   r<   rJ   z  s>   



z PreTrainedTokenizerBase.__init__r{   c                 C      | j | jdd S )zW
        `int`: The maximum length of a sentence that can be fed to the model.
        FpairrW  num_special_tokens_to_addrN   r;   r;   r<   max_len_single_sentence     z/PreTrainedTokenizerBase.max_len_single_sentencec                 C   rl  )zi
        `int`: The maximum combined length of a pair of sentences that can be fed to the model.
        Trm  ro  rN   r;   r;   r<   max_len_sentences_pair  rr  z.PreTrainedTokenizerBase.max_len_sentences_pairc                 C   sJ   || j | jdd kr!| jr!| jddstd d| jd< d S td)NFrm  rq  zXSetting 'max_len_single_sentence' is now deprecated. This value is automatically set up.TrW  rp  r   rj  r+  r   r   r   rI   r   r;   r;   r<   rq    s   c                 C   sJ   || j | jdd kr!| jr!| jddstd d| jd< d S td)NTrm  rs  FzWSetting 'max_len_sentences_pair' is now deprecated. This value is automatically set up.rt  ru  r;   r;   r<   rs    s   rV  c                 C   s
   || _ dS )z%Sets processor class as an attribute.N)rh  )rI   rV  r;   r;   r<   _set_processor_class  s   
z,PreTrainedTokenizerBase._set_processor_classc                 C      t  rC   r  rN   r;   r;   r<   added_tokens_decoder  s   z,PreTrainedTokenizerBase.added_tokens_decoderc                 C   sp   d dd | j D }| jj d| j d| j d| j d| j d| j	 d	| j
 d
| j d| j d| d S )Nz
	c                 S   s$   g | ]\}}| d |   dqS )z: ,)__repr__r   r;   r;   r<   r     s   $ z4PreTrainedTokenizerBase.__repr__.<locals>.<listcomp>z(name_or_path='z', vocab_size=z, model_max_length=z
, is_fast=z, padding_side='z', truncation_side='z', special_tokens=z, clean_up_tokenization_spaces=z, added_tokens_decoder={
	z
}
))joinrx  r   rz   rR   rU  
vocab_sizerW  r}   rS  rT  r:  rZ  )rI   added_tokens_decoder_repr;   r;   r<   rz    s*   z PreTrainedTokenizerBase.__repr__c                 C   rw  rC   r  rN   r;   r;   r<   __len__  rP   zPreTrainedTokenizerBase.__len__c                 C   rw  )a  
        Returns the vocabulary as a dictionary of token to index.

        `tokenizer.get_vocab()[token]` is equivalent to `tokenizer.convert_tokens_to_ids(token)` when `token` is in the
        vocab.

        Returns:
            `Dict[str, int]`: The vocabulary.
        r  rN   r;   r;   r<   	get_vocab  s   
z!PreTrainedTokenizerBase.get_vocabFTconversationtools	documentsr\  add_generation_promptcontinue_final_messagetokenizepadding
truncation
max_lengthreturn_tensorsreturn_dictreturn_assistant_tokens_masktokenizer_kwargsc           )   	   K   s  |r|st d|r|st d|du ri }| ||}|r)td|s)td t|}t|tt	frIt|d tt	fsDt
|d drI|}d}n|g}d	}|r\|rVt d
|r\t d|durg }|D ]}t|trq|| qdt|r}|t| qdt dnd}|dur|D ]}t|tstdqg }g }i | j|}|D ]}t
|dr|j}|rtd|||||d|\}}|| n|jd||||d|}|r0|d d }t|tt	frt|D ]}d|v r|d } nqt d| |vrt d|| }|||t|   |kr$|d|t|   }n|d|t|   }|| q|s=|d }|r| |f||	|
d	|d|}|r|rg } |s\|ra|d }!n|d g}!tt|!D ]H}"dgt|!|"  }#||" D ]2\}$}%||"|$}&||"|%d }'|&du r nt|&|'r|'d nt|!|" D ]}(d|#|(< qq{| |# ql|s|s| d } | |d< |r|j|d |S |d S |S )a[  
        Converts a list of dictionaries with `"role"` and `"content"` keys to a list of token
        ids. This method is intended for use with chat models, and will read the tokenizer's chat_template attribute to
        determine the format and control tokens to use when converting.

        Args:
            conversation (Union[List[Dict[str, str]], List[List[Dict[str, str]]]]): A list of dicts
                with "role" and "content" keys, representing the chat history so far.
            tools (`List[Dict]`, *optional*):
                A list of tools (callable functions) that will be accessible to the model. If the template does not
                support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
                giving the name, description and argument types for the tool. See our
                [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
                for more information.
            documents (`List[Dict[str, str]]`, *optional*):
                A list of dicts representing documents that will be accessible to the model if it is performing RAG
                (retrieval-augmented generation). If the template does not support RAG, this argument will have no
                effect. We recommend that each document should be a dict containing "title" and "text" keys. Please
                see the RAG section of the [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#arguments-for-RAG)
                for examples of passing documents with chat templates.
            chat_template (`str`, *optional*):
                A Jinja template to use for this conversion. It is usually not necessary to pass anything to this
                argument, as the model's template will be used by default.
            add_generation_prompt (bool, *optional*):
                If this is set, a prompt with the token(s) that indicate
                the start of an assistant message will be appended to the formatted output. This is useful when you want to generate a response from the model.
                Note that this argument will be passed to the chat template, and so it must be supported in the
                template for this argument to have any effect.
            continue_final_message (bool, *optional*):
                If this is set, the chat will be formatted so that the final
                message in the chat is open-ended, without any EOS tokens. The model will continue this message
                rather than starting a new one. This allows you to "prefill" part of
                the model's response for it. Cannot be used at the same time as `add_generation_prompt`.
            tokenize (`bool`, defaults to `True`):
                Whether to tokenize the output. If `False`, the output will be a string.
            padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
                 Select a strategy to pad the returned sequences (according to the model's padding side and padding
                 index) among:

                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
                  sequence if provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
                  lengths).
            truncation (`bool`, defaults to `False`):
                Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`.
            max_length (`int`, *optional*):
                Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If
                not specified, the tokenizer's `max_length` attribute will be used as a default.
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable
                values are:
                - `'tf'`: Return TensorFlow `tf.Tensor` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return NumPy `np.ndarray` objects.
                - `'jax'`: Return JAX `jnp.ndarray` objects.
            return_dict (`bool`, defaults to `False`):
                Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`.
            tokenizer_kwargs (`Dict[str: Any]`, *optional*): Additional kwargs to pass to the tokenizer.
            return_assistant_tokens_mask (`bool`, defaults to `False`):
                Whether to return a mask of the assistant generated tokens. For tokens generated by the assistant,
                the mask will contain 1. For user and system tokens, the mask will contain 0.
                This functionality is only available for chat templates that support it via the `{% generation %}` keyword.
            **kwargs: Additional kwargs to pass to the template renderer. Will be accessible by the chat template.

        Returns:
            `Union[List[int], Dict]`: A list of token ids representing the tokenized chat so far, including control tokens. This
            output is ready to pass to the model, either directly or via methods like `generate()`. If `return_dict` is
            set, will return a dict of tokenizer outputs instead.
        zr`return_dict=True` is incompatible with `tokenize=False`, because there is no dict of tokenizer outputs to return.zL`return_assistant_tokens_mask=True` is incompatible with `return_dict=False`Nz\{\%-?\s*generation\s*-?\%\}zareturn_assistant_tokens_mask==True but chat template does not contain `{% generation %}` keyword.r   messagesTFa  continue_final_message and add_generation_prompt are not compatible. Use continue_final_message when you want the model to continue the final message, and add_generation_prompt when you want to add a header that will prompt it to start a new assistant message instead.zKcontinue_final_message is not compatible with return_assistant_tokens_mask.zTools should either be a JSON schema, or a callable function with type hints and a docstring suitable for auto-conversion to a schema.zADocuments should be a list of dicts with 'title' and 'text' keys!)compiled_templater  r  r  r  )r  r  r  r  rB   textz]continue_final_message is set but we could not find any text to continuein the final message!a  continue_final_message is set but the final message does not appear in the chat after applying the chat template! This can happen if the chat template deletes portions of the final message. Please verify the chat template and final message in your chat to ensure they are compatible.)r  r  r  r  r  rN  r   assistant_masksro   r;   )r   get_chat_templateresearchr   r
  r2   rt   r   r   rb  r   r  r   r!   r  r:  r  r3   renderreversedstriprindexrv   rE   ranger   rx   ))rI   r  r  r  r\  r  r  r  r  r  r  r  r  r  r  r  r  conversations
is_batchedtool_schemastooldocumentrenderedall_generation_indicestemplate_kwargschatrendered_chatgeneration_indicesfinal_messagecontent_blockfinal_msg_locoutr  rN  icurrent_maskassistant_start_charassistant_end_charstart_token	end_tokentoken_idr;   r;   r<   apply_chat_template  s  Z



	


$z+PreTrainedTokenizerBase.apply_chat_templatec                 C   s   t | jtrA| j}|dur||v r|| }|S |du r?|dur)d|v r)|d }|S d|v r3|d }|S tdt|  d|S |du rS| jdurO| j}|S td|S )a  
        Retrieve the chat template string used for tokenizing chat messages. This template is used
        internally by the `apply_chat_template` method and can also be used externally to retrieve the model's chat
        template for better generation tracking.

        Args:
            chat_template (`str`, *optional*):
                A Jinja template or the name of a template to use for this conversion.
                It is usually not necessary to pass anything to this argument,
                as the model's template will be used by default.
            tools (`List[Dict]`, *optional*):
                A list of tools (callable functions) that will be accessible to the model. If the template does not
                support function calling, this argument will have no effect. Each tool should be passed as a JSON Schema,
                giving the name, description and argument types for the tool. See our
                [chat templating guide](https://huggingface.co/docs/transformers/main/en/chat_templating#automated-function-conversion-for-tool-use)
                for more information.

        Returns:
            `str`: The chat template string.
        Ntool_usedefaultzThis model has multiple chat templates with no default specified! Please either pass a chat template or the name of the template you wish to use to the `chat_template` argument. Available template names are .a.  Cannot use chat template functions because tokenizer.chat_template is not set and no template argument was passed! For information about writing templates and setting the tokenizer.chat_template attribute, please see the documentation at https://huggingface.co/docs/transformers/main/en/chat_templating)rt   r\  r   r   sortedr   )rI   r\  r  template_dictr;   r;   r<   r    s4   

	z)PreTrainedTokenizerBase.get_chat_templatemain)	cache_dirforce_downloadlocal_files_onlyr  revisiontrust_remote_codepretrained_model_name_or_pathr  r  r  r  r  c                O   s|  |	 dd}
|	 dd}|	 dd}|	 dd}|	 dd}|	 dd}|	 d	d}|	d
d}|durDtdt |durBtd|}d|d| jv d}|durU||d< t ra|sat	d d}t
|}i }i }tj|}d}tj|s{t|rt| jdkr|std| j dtd| j dt t| j d }|||< |}nd|r||d< n]tttttd}i | j|}d|v rt}t|t|||
||||||ddd|d}t||}|durt|dd}t|}d|v rt|d }W d   n	1 sw   Y  ||d< i }| D ]M\}}|du r!d||< q||krBtj|r3|||< qt|r@t ||d||< qt||||||
|||||ddd|d ||< t|| |}qt!d!d" |" D r~|s~t#d#| d$| d%| j d&| D ]&\}}||vrq|rt	d'|  qt	d'| d(||   q| j$|||g|R ||||||d)|	S )*a  
        Instantiate a [`~tokenization_utils_base.PreTrainedTokenizerBase`] (or a derived class) from a predefined
        tokenizer.

        Args:
            pretrained_model_name_or_path (`str` or `os.PathLike`):
                Can be either:

                - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
                - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
                  using the [`~tokenization_utils_base.PreTrainedTokenizerBase.save_pretrained`] method, e.g.,
                  `./my_model_directory/`.
                - (**Deprecated**, not applicable to all derived classes) A path or url to a single saved vocabulary
                  file (if and only if the tokenizer only requires a single vocabulary file like Bert or XLNet), e.g.,
                  `./my_model_directory/vocab.txt`.
            cache_dir (`str` or `os.PathLike`, *optional*):
                Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the
                standard cache should not be used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download the vocabulary files and override the cached versions if they
                exist.
            resume_download:
                Deprecated and ignored. All downloads are now resumed by default when possible.
                Will be removed in v5 of Transformers.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
                when running `huggingface-cli login` (stored in `~/.huggingface`).
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether or not to only rely on local files and not to attempt to download any files.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            subfolder (`str`, *optional*):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
                facebook/rag-token-base), specify it here.
            inputs (additional positional arguments, *optional*):
                Will be passed along to the Tokenizer `__init__` method.
            trust_remote_code (`bool`, *optional*, defaults to `False`):
                Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
                should only be set to `True` for repositories you trust and in which you have read the code, as it will
                execute code present on the Hub on your local machine.
            kwargs (additional keyword arguments, *optional*):
                Will be passed to the Tokenizer `__init__` method. Can be used to set special tokens like `bos_token`,
                `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
                `additional_special_tokens`. See parameters in the `__init__` for more details.

        <Tip>

        Passing `token=True` is required when you want to use a private model.

        </Tip>

        Examples:

        ```python
        # We can't instantiate directly the base class *PreTrainedTokenizerBase* so let's show our examples on a derived class: BertTokenizer
        # Download vocabulary from huggingface.co and cache.
        tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")

        # Download vocabulary from huggingface.co (user-uploaded) and cache.
        tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased")

        # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
        tokenizer = BertTokenizer.from_pretrained("./test/saved_model/")

        # If the tokenizer uses a single vocabulary file, you can point directly to this file
        tokenizer = BertTokenizer.from_pretrained("./test/saved_model/my_vocab.txt")

        # You can link tokens to special vocabulary when instantiating
        tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased", unk_token="<unk>")
        # You should be sure '<unk>' is in the vocabulary when doing that.
        # Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
        assert tokenizer.unk_token == "<unk>"
        ```resume_downloadNproxiesuse_auth_token	subfolder_from_pipeline
_from_autoF_commit_hash	gguf_filerThe `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.V`token` and `use_auth_token` are both specified. Please set only the argument `token`.	tokenizerFast)	file_typefrom_auto_classr}   using_pipelinez+Offline mode: forcing local_files_only=TrueTr   zCalling z.from_pretrained() with the path to a single file or url is not supported for this tokenizer. Use a model identifier or the path to a directory instead.z.from_pretrained() with the path to a single file or url is deprecated and won't be possible anymore in v5. Use a model identifier or the path to a directory instead.r   
vocab_file)added_tokens_filespecial_tokens_map_filetokenizer_config_filetokenizer_filechat_template_filer  )r  r  r  r  r  r  r  r  
user_agent _raise_exceptions_for_gated_repo%_raise_exceptions_for_missing_entries'_raise_exceptions_for_connection_errorsr  utf-8rn   fast_tokenizer_files)r  )r  r  r  r  r  r  r  r  r  r  r  r  r  c                 s   s    | ]}|d u V  qd S rC   r;   )r   full_file_namer;   r;   r<   r         z:PreTrainedTokenizerBase.from_pretrained.<locals>.<genexpr>zCan't load tokenizer for 'z'. If you were trying to load it from 'https://huggingface.co/models', make sure you don't have a local directory with the same name. Otherwise, make sure 'zI' is the correct path to a directory containing all relevant files for a z tokenizer.zloading file z from cache at r  r  r  r  	_is_localr  )%rg  r+  r   r   r   r   rR   r&   r   r  rV   ospathisdirisfiler(   rv   rK  r   r   ADDED_TOKENS_FILESPECIAL_TOKENS_MAP_FILETOKENIZER_CONFIG_FILEFULL_TOKENIZER_FILECHAT_TEMPLATE_FILEr   r    openjsonloadget_fast_tokenizer_filer   r   r  r   EnvironmentError_from_pretrained)clsr  r  r  r  r  r  r  ra  r  r  r  r  r  from_pipeliner  commit_hashr  r  vocab_filesinit_configurationis_localsingle_file_idfile_idadditional_files_namesfast_tokenizer_fileresolved_config_filereadertokenizer_configresolved_vocab_files	file_pathr;   r;   r<   from_pretrained'  s  [











z'PreTrainedTokenizerBase.from_pretrainedr  c          5   
   O   sJ  | dd}| dd }| dd d u}|s|s=| jd ur=|s=| jjt||t|g|
R ||||dt|}nd }|dd }|d urt|dd}t|}W d    n1 s_w   Y  | d	}|d	d  |sw|dd  |d
d}|
s|}
nd }|}|dd }|d urt|}|	 |d< W d    n1 sw   Y  |sd|v rt
|d ttfrd|d i|d< t|d ||d< d|v rt|d ||d< |d u r@ddlm} z|j|||||	|d}|j}W n tttfy   d }Y nw |d u r@ddlm} t|dr|j}nd }| D ]}|t|v r*|} nq|d ur@| |d\}}|d u r@|}|d ur`| jdd|ddkr`td| d| j d | | |dd }|dd }|! D ]\}} ||vr| ||< qu|dd }!|d ur||d< ||d< i }"i }#d |v r|d  ! D ]-\}$}t
|t"rt#di |}t
|t#r||"t$|$< ||#t|< qtd!|j% d"n:|d urZt|ddp}%t|%}&|&! D ]^\}'}(|'|v r||' rqt
|(t"rd#|(d$< t#di |(}(n7|'d%krDt
|(trD|d%g p g })|(D ]}t
|t"r6d#|d$< t#di |}||)vr@|)&| q#|)}(|(||'< qW d    n	1 sUw   Y  |d urg }*| j'| @ D ]%}'||' d ur|'d%kr|*d&d' ||' D 7 }*qh|*&t||'  qht|dd}+t|+},W d    n	1 sw   Y  |,! D ]\}-}.|-|*v }/t#|-dd|/ |/d(|"|.< |"|. |#t|< q|!d urt|!dd}0t|0}0|0d)}1W d    n	1 sw   Y  |1D ]}2|2d*}$t#di |2|"|$< |"|$ |#t|"|$ < q|"|d < | j(|dd+}| j'| @ D ]"}'|#i krC||' d urC|'d%krC|# t||' ||' ||'< q"z	| |
i |}3W n; t) y_   t*d, Y dS  t+y~ }4 zd-t|4v rst*d. W Y d }4~4dS d }4~4w ty   td/w |"i krt,t|" d0 d1|3j-krt*d2 |3S )3N	from_slowFr  r  )r  r  r  r  r  r  r  tokenizer_classra  r;   r  r\  auto_mapAutoTokenizercustom_pipelinesr   )
AutoConfig)r  r  r  r  r  )TOKENIZER_MAPPING_NAMES
model_typeNNr  r5   zThe tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. 
The tokenizer class you load from this checkpoint is 'z/'. 
The class this function is called from is 'z'.r  r  __slow_tokenizerrU  rx  zFound a zV in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instanceTrG   r   c                 S   r   r;   r0  r<  r;   r;   r<   r     r   z<PreTrainedTokenizerBase._from_pretrained.<locals>.<listcomp>r  r  id)savezUnable to load tokenizer model from SPM, loading from TikToken will be attempted instead.(Google protobuf error: Tried to load SPM model with non-SPM vocab file).zsentencepiece_processor.cczUnable to load tokenizer model from SPM, loading from TikToken will be attempted instead.(SentencePiece RuntimeError: Tried to load SPM model with non-SPM vocab file).zoUnable to load vocabulary from file. Please check that the provided vocabulary is accessible and not corrupted.r  r   zuSpecial tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.).r+  slow_tokenizer_classr  rd  re  rg  r  r  r  readrt   r   r   r   r   models.auto.configuration_autor  r  r  OSErrorr   r   models.auto.tokenization_autor  rb  r  r   rV   rR   replacer   r   r?  r   r   r>   rh   rz   r  r  convert_added_tokensr=   r  RuntimeErrormaxr|  )5r  r  r  r  r  r  r  r  r  r  ra  r  r  r  has_tokenizer_fileslow_tokenizerr  tokenizer_config_handlerf  config_tokenizer_classsaved_init_inputsr  chat_template_handler  configr  r  patternconfig_tokenizer_class_fastr  r  	args_namer  r  rx  added_tokens_mapidxspecial_tokens_map_handler:  r   r   r   r	  added_tokens_handleadded_tok_encoder	str_tokenindexrG   tokenizer_file_handler  serialized_tokensr  r   r;   r;   r<   r    s  	






















(z(PreTrainedTokenizerBase._from_pretrainedc                 C   s   |S rC   r;   )r  max_model_lengthinit_max_model_lengthr;   r;   r<   !_eventually_correct_t5_max_length	  s   z9PreTrainedTokenizerBase._eventually_correct_t5_max_lengthr   c                    s   t |trd|v r|d dkr|d tdi |S t |tr5r5| } r.d|d< |S |d |S t |ttfrG fdd|D S t |trY fdd| D S |S )	N__typer>   rG   c                    s   g | ]
}j | d qS r  add_type_fieldr  )r   or+  r  r  r;   r<   r   /	  r   z@PreTrainedTokenizerBase.convert_added_tokens.<locals>.<listcomp>c                    s"   i | ]\}}|j | d qS r)  r,  r   r.  r;   r<   r   1	  s   " z@PreTrainedTokenizerBase.convert_added_tokens.<locals>.<dictcomp>r;   )rt   r   rg  r>   rO   r   r   r   )r  r   r  r+  r;   r.  r<   r  !	  s   


z,PreTrainedTokenizerBase.convert_added_tokenssave_directorylegacy_formatfilename_prefixpush_to_hubc                 K   s  | dd}|dur tdt |dddurtd||d< tj|r1t	
d| d dS tj|dd	 |r[| d
d}| d|tjjd }| j|fi |}| |}	tj||re|d ndt }
tj||rt|d ndt }tj||r|d ndt }t| j}t| j }|ddg |D ]}t| |rt| |||< q|| j d|vr| j|d< || j d}| jdurt| jt rdd | j! D |d< n>|ddrt"|ddd}|#| j W d   n1 sw   Y  d}t	$d|  d|v r| d n| j|d< t%| j&dkr+t| j&|d< | j' D ]	}| |d q0| j(|ddd}i }| j)! D ]\}}|* ||< qI||d< | j+j,}|-drpt| d drp|dd! }||d"< t| d#ddur| j.|d$< t| d%ddur| j/|d&< | j0durt1| ||d' d(|v r| d( | d)d | d*d d+|v r| d+ t"|ddd}t2j3|d,ddd-d. }|#| W d   n	1 sw   Y  t	$d/|  | j(| j4ddd0}t"|
ddd}t2j3|d,ddd-d. }|#| W d   n	1 sw   Y  t	$d1|
  ||
f}|r2||f7 }| j5||||d2}|rK| j6|||	||dd3 |S )4a  
        Save the full tokenizer state.


        This method make sure the full tokenizer can then be re-loaded using the
        [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`] class method..

        Warning,None This won't save modifications you may have applied to the tokenizer after the instantiation (for
        instance, modifying `tokenizer.do_lower_case` after creation).

        Args:
            save_directory (`str` or `os.PathLike`): The path to a directory where the tokenizer will be saved.
            legacy_format (`bool`, *optional*):
                Only applicable for a fast tokenizer. If unset (default), will save the tokenizer in the unified JSON
                format as well as in legacy format if it exists, i.e. with tokenizer specific vocabulary and a separate
                added_tokens files.

                If `False`, will only save the tokenizer in the unified JSON format. This format is incompatible with
                "slow" tokenizers (not powered by the *tokenizers* library), so the tokenizer will not be able to be
                loaded in the corresponding "slow" tokenizer.

                If `True`, will save the tokenizer in legacy format. If the "slow" tokenizer doesn't exits, a value
                error is raised.
            filename_prefix (`str`, *optional*):
                A prefix to add to the names of the files saved by the tokenizer.
            push_to_hub (`bool`, *optional*, defaults to `False`):
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
            kwargs (`Dict[str, Any]`, *optional*):
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.

        Returns:
            A tuple of `str`: The files saved.
        r  Nr  r  r  zProvided path (z#) should be a directory, not a fileT)exist_okcommit_messagerepo_idr  -r5   rW  rZ  r`  Fc                 S   s   g | ]	\}}||d qS )r]  r;   r   r;   r;   r<   r   	  r_  z;PreTrainedTokenizerBase.save_pretrained.<locals>.<listcomp>r\  save_raw_chat_templatewr  r  zchat template saved in r   ra  )r+  r  rx  r  can_save_slow_tokenizerr#  r  	_auto_mapr  rh  rV  )r  rU  r  r  
device_map   indent	sort_keysensure_ascii
ztokenizer config file saved in r*  zSpecial tokens file saved in )r/  
file_namesr0  r1  )r4  r  )7rg  r   r   r   r+  r   r  r  r  r   r2  makedirssplitsep_create_repo_get_files_timestampsr{  r  r  r  rd  re  rf  r   r   r?  rb  r3  r:  r`  r\  rt   r   r   r  writer  rv   ra  rK  r  rx  rO   rz   rR   r*  r:  rh  rM  r   r  dumpsr;  _save_pretrained_upload_modified_files)rI   r/  r0  r1  r2  r  r  r4  r5  files_timestampsr  r  r  r  target_keysr   saved_raw_chat_templatefr  r  r   r   r  out_str
write_dictrB  
save_filesr;   r;   r<   save_pretrained4	  s   +












z'PreTrainedTokenizerBase.save_pretrainedrB  c           
         s   |du rt dt|}tj||r|d ndt } fdd j D }|rXt|ddd	 }t	j
|d
dddd }|| td|  W d   n1 sSw   Y   j||d}	||	 |f S )a2  
        Save a tokenizer using the slow-tokenizer/legacy format: vocabulary + added tokens.

        Fast tokenizers can also be saved in a unique JSON file containing {config + vocab + added-tokens} using the
        specific [`~tokenization_utils_fast.PreTrainedTokenizerFast._save_pretrained`]
        Fz^Only fast tokenizers (instances of PreTrainedTokenizerFast) can be saved in non legacy format.r6  r5   c                    s    i | ]\}}| j kr||qS r;   )r|  )r   r1  r"  rN   r;   r<   r   
       z<PreTrainedTokenizerBase._save_pretrained.<locals>.<dictcomp>r8  r  r  r<  Tr=  rA  zadded tokens file saved in N)r1  )r   rV   r  r  r{  r  added_tokens_encoderr   r  r  rI  rH  r   r  save_vocabulary)
rI   r/  rB  r0  r1  r  added_vocabrO  rP  r  r;   rN   r<   rJ  	  s"   
z(PreTrainedTokenizerBase._save_pretrainedc                 C   r  )aO  
        Save only the vocabulary of the tokenizer (vocabulary + added tokens).

        This method won't save the configuration and special token mappings of the tokenizer. Use
        [`~PreTrainedTokenizerFast._save_pretrained`] to save the whole state of the tokenizer.

        Args:
            save_directory (`str`):
                The directory in which to save the vocabulary.
            filename_prefix (`str`, *optional*):
                An optional prefix to add to the named of the saved files.

        Returns:
            `Tuple(str)`: Paths to the files saved.
        r  )rI   r/  r1  r;   r;   r<   rV  
  s   z'PreTrainedTokenizerBase.save_vocabularyr  rn  r  c                 K   r  )a  
        Converts a string into a sequence of tokens, replacing unknown tokens with the `unk_token`.

        Args:
            text (`str`):
                The sequence to be encoded.
            pair (`str`, *optional*):
                A second sequence to be encoded with the first.
            add_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to add the special tokens associated with the corresponding model.
            kwargs (additional keyword arguments, *optional*):
                Will be passed to the underlying model specific encode method. See details in
                [`~PreTrainedTokenizerBase.__call__`]

        Returns:
            `List[str]`: The list of tokens.
        r  )rI   r  rn  r  r  r;   r;   r<   r  $
  s   z PreTrainedTokenizerBase.tokenizezI
            **kwargs: Passed along to the `.tokenize()` method.
        z
        Returns:
            `List[int]`, `torch.Tensor`, `tf.Tensor` or `np.ndarray`: The tokenized ids of the text.
        r   	text_pairstridec
                 K   s,   | j |f||||||||	d|
}|d S )aC  
        Converts a string to a sequence of ids (integer), using the tokenizer and vocabulary.

        Same as doing `self.convert_tokens_to_ids(self.tokenize(text))`.

        Args:
            text (`str`, `List[str]` or `List[int]`):
                The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
                `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
                method).
            text_pair (`str`, `List[str]` or `List[int]`, *optional*):
                Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
                the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
                method).
        )rX  r  r  r  r  rY  rS  r  rN  )encode_plus)rI   r  rX  r  r  r  r  rY  rS  r  r  encoded_inputsr;   r;   r<   encode8
  s   &
zPreTrainedTokenizerBase.encodec                 C   r  rC   r  )rI   rn  r;   r;   r<   rp  m
  r  z1PreTrainedTokenizerBase.num_special_tokens_to_addc                 K   s  | dd}| dd}|dur-|du r-|du r-|r+| jdds&td d| jd< d	}|du rG|rG|r;td
t |du rCtj	}	nEtj
}	nA|dur|du rs|ro|durf|du sa|du sa|dkrftd |durotd tj	}	nt|ts}t|}	nt|tr|}	ntj}	|du r|dkr|rtdt t|}
n%|dur|dur|du rtj}
nt|tst|}
nt|tr|}
ntj}
|du r|	tj
kr| jtkr|r| jddstd d| jd< tj}	n| j}|
tjkr| jtkr|r| jddstd d| jd< tj}
n| j}|	tjkr)| jdu s%| jdk r)td|
tjkrQ|	tjkrQ|durQ|durQ|| dkrQtd| d| d|	|
||fS )z
        Find the correct padding/truncation strategy with backward compatibility for old arguments (truncation_strategy
        and pad_to_max_length) and behaviors.
        truncation_strategyr^   pad_to_max_lengthFNz#Truncation-not-explicitly-activatedat  Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`.Tr]   a  The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert).z`max_length` is ignored when `padding`=`True` and there is no truncation strategy. To pad to max length, use `padding='max_length'`.zLThough `pad_to_max_length` = `True`, it is ignored because `padding`=`True`.a  The `truncation_strategy` argument is deprecated and will be removed in a future version, use `truncation=True` to truncate examples to a max length. You can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to truncate to the maximal input size of the model (e.g. 512 for Bert).  If you have pairs of inputs, you can give a specific truncation strategy selected among `truncation='only_first'` (will only truncate the first sentence in the pairs) `truncation='only_second'` (will only truncate the second sentence in the pairs) or `truncation='longest_first'` (will iteratively remove tokens from the longest sentence in the pairs).zAsking-to-pad-to-max_lengthzAsking to pad to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no padding.z Asking-to-truncate-to-max_lengthzAsking to truncate to max_length but no maximum length is provided and the model has no predefined maximum length. Default to no truncation.r   zAsking to pad but the tokenizer does not have a padding token. Please select a token to use as `pad_token` `(tokenizer.pad_token = tokenizer.eos_token e.g.)` or add a new pad token via `tokenizer.add_special_tokens({'pad_token': '[PAD]'})`.zATruncation and padding are both activated but truncation length (z+) is not a multiple of pad_to_multiple_of (z).)rg  rj  r+  r   r   r   r   r   r   LONGEST
MAX_LENGTHrt   
DO_NOT_PADrZ   ra   rb   rW  LARGE_INTEGERr   pad_token_idr   )rI   r  r  r  pad_to_multiple_ofr   r  old_truncation_strategyold_pad_to_max_lengthpadding_strategyr]  r;   r;   r<   "_get_padding_truncation_strategiesp
  s   













$

z:PreTrainedTokenizerBase._get_padding_truncation_strategiestext_targettext_pair_targetis_split_into_wordsrd  return_token_type_idsreturn_attention_maskreturn_overflowing_tokensreturn_special_tokens_maskreturn_offsets_mappingreturn_lengthr   c                 K   s  i d|d|d|d|d|	d|
d|d|d	|d
|d|d|d|d|d|d| d| jd|}|| |du rK|du rKtd|dura| jsV|   | jd||d|}|durt|   | jd||d|}|   |du r~|S |du r|S |d |d< |S )a  
        Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
        sequences.

        Args:
            text (`str`, `List[str]`, `List[List[str]]`, *optional*):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            text_pair (`str`, `List[str]`, `List[List[str]]`, *optional*):
                The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
                (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
                `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            text_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
                The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
                list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
                you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
            text_pair_target (`str`, `List[str]`, `List[List[str]]`, *optional*):
                The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a
                list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized),
                you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
        r  r  r  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r[  r   Nz3You need to specify either `text` or `text_target`.)r  rX  rN  labelsr;   )rg  r[  r?  r   rk  _switch_to_input_mode	_call_one_switch_to_target_mode)rI   r  rX  ri  rj  r  r  r  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r  
all_kwargsr   target_encodingsr;   r;   r<   __call__
  sh   2	

z PreTrainedTokenizerBase.__call__r[  c                 K   s  dd }||st d|d ur||st d|r-t|ttfo+|o+t|d ttf}nt|ttf}|rt|tr?td|d urZt|t|krZt dt| dt| d|d urett||n|}| jdi d	|d
|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|d||S | j	di d|d|d
|d|d|d|d|d|d|	d|
d|d|d|d|d|d|d|d|d||S )Nc                 S   sz   t | trdS t | ttfr;t| dkrdS t | d trdS t | d ttfr9t| d dkp8t | d d tS dS dS )NTr   F)rt   rV   r   r   rv   )r   r;   r;   r<   _is_valid_text_inputn  s   
"z?PreTrainedTokenizerBase._call_one.<locals>._is_valid_text_inputztext input must be of type `str` (single example), `List[str]` (batch or single pretokenized example) or `List[List[str]]` (batch of pretokenized examples).r   zdwhen tokenizing batches of text, `text_pair` must be a list or tuple with the same length as `text`.zbatch length of `text`: z- does not match batch length of `text_pair`: r  batch_text_or_text_pairsr  r  r  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r[  r  rX  r;   )
r   rt   r   r   rV   r  rv   zipbatch_encode_plusrZ  )rI   r  rX  r  r  r  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r[  r  ry  r  rz  r;   r;   r<   rt  V  s   &

	

	
z!PreTrainedTokenizerBase._call_onec                 K   s   | j d||||	|d|\}}}}| jdi d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|d|d| j|S )ax  
        Tokenize and prepare for the model a sequence or a pair of sequences.

        <Tip warning={true}>

        This method is deprecated, `__call__` should be used instead.

        </Tip>

        Args:
            text (`str`, `List[str]` or (for non-fast tokenizers) `List[int]`):
                The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the
                `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
                method).
            text_pair (`str`, `List[str]` or `List[int]`, *optional*):
                Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using
                the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids`
                method).
        r  r  r  rd  r   r  rX  r  rg  r]  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r[  Nr;   )rh  _encode_plusrg  r[  )rI   r  rX  r  r  r  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r  rg  r]  r;   r;   r<   rZ    sb   ,
		
z#PreTrainedTokenizerBase.encode_plusrg  r]  c                 K   r  rC   r  )rI   r  rX  r  rg  r]  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r[  r  r;   r;   r<   r~    s   z$PreTrainedTokenizerBase._encode_plusrz  c                 K   s   | j d|||||d|\}}}}| jdi d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d||S )a  
        Tokenize and prepare for the model a list of sequences or a list of pairs of sequences.

        <Tip warning={true}>

        This method is deprecated, `__call__` should be used instead.

        </Tip>

        Args:
            batch_text_or_text_pairs (`List[str]`, `List[Tuple[str, str]]`, `List[List[str]]`, `List[Tuple[List[str], List[str]]]`, and for not-fast tokenizers, also `List[List[int]]`, `List[Tuple[List[int], List[int]]]`):
                Batch of sequences or pair of sequences to be encoded. This can be a list of
                string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see
                details in `encode_plus`).
        r}  rz  r  rg  r]  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r[  Nr;   )rh  _batch_encode_plus)rI   rz  r  r  r  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r[  r  rg  r]  r;   r;   r<   r|  1  s^   /
		
z)PreTrainedTokenizerBase.batch_encode_plusc                 K   r  rC   r  )rI   rz  r  rg  r]  r  rY  rk  rd  rS  r  rl  rm  rn  ro  rp  rq  r   r[  r  r;   r;   r<   r    s   z*PreTrainedTokenizerBase._batch_encode_plusr[  c	              	      s  | j jdr| jddstd| j j d d| jd< ttt	fr9td t
r9fdd	d  D | jd vrQtd
| jd  dt  | jd  }	|	du sgt|	trot|	dkro|rmg d< S |	d }
t|
tt	fr|	D ]}t|dkr|d }
 nq|t|
ttt	fst|
r|du rdn|}n)t|
r|du rdn|}nt|
tjr|du rdn|}ntd|
 dt|
 d D ]
\}}t||< q| j|||d\}}}}| jd  }	|	r	t|	d tt	fs	| j|||||dt|dS t|	 t fdd D sJ d|tjkr1tdd |	D }tj }i }t! D ]3fdd	 D }| j||||||d}| D ]\}}||vr`g ||< || "| qSq7t||dS )a  
        Pad a single encoded input or a batch of encoded inputs up to predefined length or to the max sequence length
        in the batch.

        Padding side (left/right) padding token ids are defined at the tokenizer level (with `self.padding_side`,
        `self.pad_token_id` and `self.pad_token_type_id`).

        Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the
        text followed by a call to the `pad` method to get a padded encoding.

        <Tip>

        If the `encoded_inputs` passed are dictionary of numpy arrays, PyTorch tensors or TensorFlow tensors, the
        result will use the same type unless you provide a different tensor type with `return_tensors`. In the case of
        PyTorch tensors, you will lose the specific device of your tensors however.

        </Tip>

        Args:
            encoded_inputs ([`BatchEncoding`], list of [`BatchEncoding`], `Dict[str, List[int]]`, `Dict[str, List[List[int]]` or `List[Dict[str, List[int]]]`):
                Tokenized inputs. Can represent one input ([`BatchEncoding`] or `Dict[str, List[int]]`) or a batch of
                tokenized inputs (list of [`BatchEncoding`], *Dict[str, List[List[int]]]* or *List[Dict[str,
                List[int]]]*) so you can use this method during preprocessing as well as in a PyTorch Dataloader
                collate function.

                Instead of `List[int]` you can have tensors (numpy arrays, PyTorch tensors or TensorFlow tensors), see
                the note above for the return type.
            padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
                 Select a strategy to pad the returned sequences (according to the model's padding side and padding
                 index) among:

                - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
                  sequence if provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'`: No padding (i.e., can output a batch with sequences of different
                  lengths).
            max_length (`int`, *optional*):
                Maximum length of the returned list and optionally padding length (see above).
            pad_to_multiple_of (`int`, *optional*):
                If set will pad the sequence to a multiple of the provided value.

                This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
                `>= 7.5` (Volta).
            padding_side (`str`, *optional*):
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            return_attention_mask (`bool`, *optional*):
                Whether to return the attention mask. If left to the default, will return the attention mask according
                to the specific tokenizer's default, defined by the `return_outputs` attribute.

                [What are attention masks?](../glossary#attention-mask)
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors instead of list of python integers. Acceptable values are:

                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return Numpy `np.ndarray` objects.
            verbose (`bool`, *optional*, defaults to `True`):
                Whether or not to print more information and warnings.
        r  zAsking-to-pad-a-fast-tokenizerFzYou're using a z tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.Tr   c                    s    i | ]   fd dD qS )c                    s   g | ]}|  qS r;   r;   )r   exampler   r;   r<   r     r   z:PreTrainedTokenizerBase.pad.<locals>.<dictcomp>.<listcomp>r;   )r   )r[  r  r<   r     rT  z/PreTrainedTokenizerBase.pad.<locals>.<dictcomp>zRYou should supply an encoding or a list of encodings to this method that includes z, but you provided NrP  r   ptr   ztype of z
 unknown: zA. Should be one of a python, numpy, pytorch or tensorflow object.)r  r  r   )r  rg  rd  rS  rm  r  c                 3   s    | ]	}t | kV  qd S rC   r   )r   r   )
batch_sizer;   r<   r   6  s    z.PreTrainedTokenizerBase.pad.<locals>.<genexpr>zLSome items in the output dictionary have a different batch size than others.c                 s   s    | ]}t |V  qd S rC   r   )r   inputsr;   r;   r<   r   ;  r  c                    s   i | ]	\}}||  qS r;   r;   r   )r  r;   r<   r   @  r_  )#rz   rR   r*  rj  r+  r   warning_advicert   r   r   r   r   rQ  r   r   rv   rh   r*   r.   r   r   r  r   r1   rh  _padrl   r  r   r   r_  r  r`  r  r  )rI   r[  r  r  rd  rS  rm  r  r   required_inputfirst_elementr~   r   r   rg  _batch_outputsr  outputsr;   )r  r[  r  r<   pad  s   N

	
zPreTrainedTokenizerBase.padtoken_ids_0token_ids_1c                 C   s2   |du rt |dg S dgt | dgt |  S )a  
        Create the token type IDs corresponding to the sequences passed. [What are token type
        IDs?](../glossary#token-type-ids)

        Should be overridden in a subclass if the model has a special way of building those.

        Args:
            token_ids_0 (`List[int]`): The first tokenized sequence.
            token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.

        Returns:
            `List[int]`: The token type ids.
        Nr   r   r   rI   r  r  r;   r;   r<   $create_token_type_ids_from_sequencesQ  s   z<PreTrainedTokenizerBase.create_token_type_ids_from_sequencesc                 C   s   |du r|S || S )a  
        Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
        adding special tokens.

        This implementation does not add special tokens and this method should be overridden in a subclass.

        Args:
            token_ids_0 (`List[int]`): The first tokenized sequence.
            token_ids_1 (`List[int]`, *optional*): The second tokenized sequence.

        Returns:
            `List[int]`: The model input with special tokens.
        Nr;   r  r;   r;   r<    build_inputs_with_special_tokense  s   z8PreTrainedTokenizerBase.build_inputs_with_special_tokensidspair_idsrp   c                 K   s  | j d|||||d|\}}}}t|du}t|}|r"t|nd}|r,|s,td|r;|tjkr;|dur;td|du rDd| jv }|du rMd| jv }i }|| |rZ| j|dnd }g }|tjkry|ry||kry| j	|||| ||d	\}}}|r||d
< || |d< |r| 
||}| ||}n|r|| n|}dgt| |rdgt| ng  }||d< |r||d< |r|r| |||d< n	dgt| |d< | |d || |tjks|r| j|||j||	|d}|rt|d |d< t||
|d}|S )a  
        Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
        adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
        manages a moving window (with user defined stride) for overflowing tokens. Please Note, for *pair_ids*
        different than `None` and *truncation_strategy = longest_first* or `True`, it is not possible to return
        overflowing tokens. Such a combination of arguments will raise an error.

        Args:
            ids (`List[int]`):
                Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
                `convert_tokens_to_ids` methods.
            pair_ids (`List[int]`, *optional*):
                Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
                and `convert_tokens_to_ids` methods.
        r}  Nr   zAsking to return token_type_ids while setting add_special_tokens to False results in an undefined behavior. Please set add_special_tokens to True or set return_token_type_ids to None.zNot possible to return overflowing tokens for pair of sequences with the `longest_first`. Please select another truncation strategy than `longest_first`, for instance `only_second` or `only_first`.rO  rP  rm  )r  num_tokens_to_remover]  rY  r   num_truncated_tokensrN  special_tokens_mask)r  r  rd  rS  rm  lengthrr   r;   )rh  r   rv   r   rZ   ra   rQ  rp  rb   truncate_sequencesr  r  get_special_tokens_mask&_eventual_warn_about_too_long_sequencer   ra  r  r   rl   )rI   r  r  r  r  r  r  rY  rd  rS  r  rl  rm  rn  ro  rp  rq  r   rp   r  rg  r]  rn  len_idslen_pair_idsr[  	total_lenr   sequencerO  r  r;   r;   r<   prepare_for_modely  s   (	


$	z)PreTrainedTokenizerBase.prepare_for_modelr]   r  c              	   C   s  |dkr	||g fS t |tst|}g }|tjks"|tjkr|du rt||krbtt||| }| jdkrD|d| }||d }n'| jdkrY|| d }|d|  }ntd| j dd| dt| d	}|tjkrz|d
| d }t	| n|tjkrt
dtjj d |durt|nd}	t|}
tt|	|
 |}|| }|
|	kr||d  }||d  }n|d }|| |d  }| jdkr|dkr|d|  n|}|dur|dkr|d|  n|}n~| jdkr||d }|dur||d nd}ndtd| j |tjkrk|durkt||krZtt||| }| jdkr?|| d }|d|  }n,| jdkrR|d| }||d }ntd| j t	d| dt| d| d |||fS )a
  
        Truncates a sequence pair in-place following the strategy.

        Args:
            ids (`List[int]`):
                Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
                `convert_tokens_to_ids` methods.
            pair_ids (`List[int]`, *optional*):
                Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
                and `convert_tokens_to_ids` methods.
            num_tokens_to_remove (`int`, *optional*, defaults to 0):
                Number of tokens to remove using the truncation strategy.
            truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `'longest_first'`):
                The strategy to follow for truncation. Can be:

                - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will truncate
                  token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
                  batch of pairs) is provided.
                - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will only
                  truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
                - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will only
                  truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
                - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
                  than the model maximum admissible input size).
            stride (`int`, *optional*, defaults to 0):
                If set to a positive number, the overflowing tokens returned will contain some tokens from the main
                sequence returned. The value of this argument defines the number of additional tokens.

        Returns:
            `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
            overflowing tokens. Note: The *longest_first* strategy returns empty list of overflowing tokens if a pair
            of sequences (or a batch of pairs) is provided.
        r   NrY  rR  zinvalid truncation strategy: z, use 'left' or 'right'.zWe need to remove z; to truncate the input but the first sequence has a length z. z/Please select another truncation strategy than z0, for instance 'longest_first' or 'only_second'.zmBe aware, overflowing tokens are not returned for the setting you have chosen, i.e. sequence pairs with the 'zg' truncation strategy. So the returned list will always be empty even if some tokens have been removed.r<  zinvalid truncation strategy:z< to truncate the input but the second sequence has a length z1. Please select another truncation strategy than z/, for instance 'longest_first' or 'only_first'.)rt   rZ   r_   ra   rv   minrT  r   r   r2  r   r   absr`   )rI   r  r  r  r]  rY  r   
window_len	error_msgr  r  first_removesecond_removeids_to_movepair_ids_to_mover;   r;   r<   r    s   ,






$
z*PreTrainedTokenizerBase.truncate_sequencesc           
      C   s  |du r	d| j v }|| j d  }|tjkrt|}|dur/|dur/|| dkr/|| d | }|tjko9t||k}|rId|vrIdgt| |d< |r|t| }	|durW|n| j}|dkr|rk|d dg|	  |d< d|v r{|d | jg|	  |d< d|v r|d dg|	  |d< || jg|	  || j d < |S |dkr|rdg|	 |d  |d< d|v r| jg|	 |d  |d< d|v rdg|	 |d  |d< | jg|	 | || j d < |S td	| |S )
a)  
        Pad encoded inputs (on left/right and up to predefined length or max length in the batch)

        Args:
            encoded_inputs:
                Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
            max_length: maximum length of the returned list and optionally padding length (see below).
                Will truncate by taking into account the special tokens.
            padding_strategy: PaddingStrategy to use for padding.

                - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
                - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
                - PaddingStrategy.DO_NOT_PAD: Do not pad
                The tokenizer padding sides are defined in `padding_side` argument:

                    - 'left': pads on the left of the sequences
                    - 'right': pads on the right of the sequences
            pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
                This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
                `>= 7.5` (Volta).
            padding_side:
                The side on which the model should have padding applied. Should be selected between ['right', 'left'].
                Default value is picked from the class attribute of the same name.
            return_attention_mask:
                (optional) Set to False to avoid returning attention mask (default: set to model specifics)
        NrP  r   r   rR  rO  r  rY  zInvalid padding strategy:)	rQ  r   r_  rv   ra  rS  r  rc  r   )
rI   r[  r  rg  rd  rS  rm  r  needs_to_be_padded
differencer;   r;   r<   r    sH   $


zPreTrainedTokenizerBase._padr   c                 C   r  )aT  
        Converts a sequence of tokens in a single string. The most simple way to do it is `" ".join(tokens)` but we
        often want to remove sub-word tokenization artifacts at the same time.

        Args:
            tokens (`List[str]`): The token to join in a string.

        Returns:
            `str`: The joined tokens.
        r  )rI   r   r;   r;   r<   convert_tokens_to_string  s   z0PreTrainedTokenizerBase.convert_tokens_to_string	sequencesrX   rY   z	tf.Tensorskip_special_tokensrZ  c                    s    fdd|D S )aj  
        Convert a list of lists of token ids into a list of strings by calling decode.

        Args:
            sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):
                List of tokenized input ids. Can be obtained using the `__call__` method.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding.
            clean_up_tokenization_spaces (`bool`, *optional*):
                Whether or not to clean up the tokenization spaces. If `None`, will default to
                `self.clean_up_tokenization_spaces`.
            kwargs (additional keyword arguments, *optional*):
                Will be passed to the underlying model specific decode method.

        Returns:
            `List[str]`: The list of decoded sentences.
        c                    s$   g | ]}j |f d qS ))r  rZ  )decode)r   seqrZ  r  rI   r  r;   r<   r     s    z8PreTrainedTokenizerBase.batch_decode.<locals>.<listcomp>r;   )rI   r  r  rZ  r  r;   r  r<   batch_decode  s   z$PreTrainedTokenizerBase.batch_decode	token_idsc                 K   s    t |}| jd|||d|S )a  
        Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special
        tokens and clean up tokenization spaces.

        Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.

        Args:
            token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):
                List of tokenized input ids. Can be obtained using the `__call__` method.
            skip_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not to remove special tokens in the decoding.
            clean_up_tokenization_spaces (`bool`, *optional*):
                Whether or not to clean up the tokenization spaces. If `None`, will default to
                `self.clean_up_tokenization_spaces`.
            kwargs (additional keyword arguments, *optional*):
                Will be passed to the underlying model specific decode method.

        Returns:
            `str`: The decoded sentence.
        )r  r  rZ  Nr;   )r1   _decoderI   r  r  rZ  r  r;   r;   r<   r     s   zPreTrainedTokenizerBase.decodec                 K   r  rC   r  r  r;   r;   r<   r  %  s   zPreTrainedTokenizerBase._decodealready_has_special_tokensc                    s0   |r|du s
J d| j   fdd|D }|S )a  
        Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
        special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.

        Args:
            token_ids_0 (`List[int]`):
                List of ids of the first sequence.
            token_ids_1 (`List[int]`, *optional*):
                List of ids of the second sequence.
            already_has_special_tokens (`bool`, *optional*, defaults to `False`):
                Whether or not the token list is already formatted with special tokens for the model.

        Returns:
            A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
        Na	  You cannot use ``already_has_special_tokens=False`` with this tokenizer. Please use a slow (full python) tokenizer to activate this argument. Or set `return_special_tokens_mask=True` when calling the encoding method to get the special tokens mask in any tokenizer. c                    s   g | ]
}| v r
d ndqS )r   r   r;   r<  rG  r;   r<   r   I  r   zCPreTrainedTokenizerBase.get_special_tokens_mask.<locals>.<listcomp>r  )rI   r  r  r  r  r;   r  r<   r  .  s   z/PreTrainedTokenizerBase.get_special_tokens_mask
out_stringc                 C   sX   |  dd dd dd dd d	d
 dd dd dd dd dd} | S )a  
        Clean up a list of simple English tokenization artifacts like spaces before punctuations and abbreviated forms.

        Args:
            out_string (`str`): The text to clean up.

        Returns:
            `str`: The cleaned-up string.
        z .r  z ??z !!z ,ry  z ' 'z n'tzn'tz 'mz'mz 'sz'sz 'vez'vez 'rez're)r  )r  r;   r;   r<   clean_up_tokenizationM  s   
z-PreTrainedTokenizerBase.clean_up_tokenizationc                 C   sn   |du r/t || jkr1|r3| jdkr5| jdds(tdt | d| j d d| jd< dS dS dS dS dS )	a  
        Depending on the input and internal state we might trigger a warning about a sequence that is too long for its
        corresponding model

        Args:
            ids (`List[str]`): The ids produced by the tokenization
            max_length (`int`, *optional*): The max_length desired (does not trigger a warning if it is set)
            verbose (`bool`): Whether or not to print more information and warnings.

        Nr   z4sequence-length-is-longer-than-the-specified-maximumFzcToken indices sequence length is longer than the specified maximum sequence length for this model (z > zI). Running this sequence through the model will result in indexing errorsT)rv   rW  rj  r+  r   r   )rI   r  r  r   r;   r;   r<   r  f  s   $z>PreTrainedTokenizerBase._eventual_warn_about_too_long_sequencec                 C      dS )zs
        Private method to put the tokenizer in input mode (when it has different modes for input/outputs)
        Nr;   rN   r;   r;   r<   rs  z     z-PreTrainedTokenizerBase._switch_to_input_modec                 C   r  )zt
        Private method to put the tokenizer in target mode (when it has different modes for input/outputs)
        Nr;   rN   r;   r;   r<   ru    r  z.PreTrainedTokenizerBase._switch_to_target_modec                 c   s2    t d |   d| _dV  d| _|   dS )z
        Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to
        sequence-to-sequence models that need a slightly different processing for the labels.
        a  `as_target_tokenizer` is deprecated and will be removed in v5 of Transformers. You can tokenize your labels by using the argument `text_target` of the regular `__call__` method (either in the same call as your input texts if you use the same keyword arguments, or in a separate call.TNF)r   r   ru  rk  rs  rN   r;   r;   r<   as_target_tokenizer  s   z+PreTrainedTokenizerBase.as_target_tokenizerr   c                 C   sD   t |ts|j}ddlm  m} t||st| d|| _dS )a  
        Register this class with a given auto class. This should only be used for custom tokenizers as the ones in the
        library are already mapped with `AutoTokenizer`.

        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"AutoTokenizer"`):
                The auto class to register this new tokenizer with.
        r   Nz is not a valid auto class.)	rt   rV   rR   transformers.models.automodelsautorb  r   rM  )r  
auto_classauto_moduler;   r;   r<   register_for_auto_class  s   


z/PreTrainedTokenizerBase.register_for_auto_classlongest	src_texts	tgt_textsmax_target_lengthc              	   K   s   d}	t |	t |dd |dd |du r| j}| |fd||||d|}
|du r/|
S |du r5|}|   | |fd||||d|}W d   n1 sRw   Y  |d |
d	< |
S )
a  
        Prepare model inputs for translation. For best performance, translate one sentence at a time.

        Arguments:
            src_texts (`List[str]`):
                List of documents to summarize or source language texts.
            tgt_texts (`list`, *optional*):
                List of summaries or target language texts.
            max_length (`int`, *optional*):
                Controls the maximum length for encoder inputs (documents to summarize or source language texts) If
                left unset or set to `None`, this will use the predefined model maximum length if a maximum length is
                required by one of the truncation/padding parameters. If the model has no specific maximum input length
                (like XLNet) truncation/padding to a maximum length will be deactivated.
            max_target_length (`int`, *optional*):
                Controls the maximum length of decoder inputs (target language texts or summaries) If left unset or set
                to `None`, this will use the max_length value.
            padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
                Activates and controls padding. Accepts the following values:

                - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
                  sequence if provided).
                - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
                  acceptable input length for the model if that argument is not provided.
                - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
                  lengths).
            return_tensors (`str` or [`~utils.TensorType`], *optional*):
                If set, will return tensors instead of list of python integers. Acceptable values are:

                - `'tf'`: Return TensorFlow `tf.constant` objects.
                - `'pt'`: Return PyTorch `torch.Tensor` objects.
                - `'np'`: Return Numpy `np.ndarray` objects.
            truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `True`):
                Activates and controls truncation. Accepts the following values:

                - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
                  to the maximum acceptable input length for the model if that argument is not provided. This will
                  truncate token by token, removing a token from the longest sequence in the pair if a pair of
                  sequences (or a batch of pairs) is provided.
                - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will only
                  truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
                - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
                  maximum acceptable input length for the model if that argument is not provided. This will only
                  truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
                - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
                  greater than the model maximum admissible input size).
            **kwargs:
                Additional keyword arguments passed along to `self.__call__`.

        Return:
            [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:

            - **input_ids** -- List of token ids to be fed to the encoder.
            - **attention_mask** -- List of indices specifying which tokens should be attended to by the model.
            - **labels** -- List of token ids for tgt_texts.

            The full set of keys `[input_ids, attention_mask, labels]`, will only be returned if tgt_texts is passed.
            Otherwise, input_ids, attention_mask will be the only keys.
        a  
`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of HuggingFace Transformers. Use the regular
`__call__` method to prepare your inputs and targets.

Here is a short example:

model_inputs = tokenizer(src_texts, text_target=tgt_texts, ...)

If you either need to use different keyword arguments for the source and target texts, you should do two calls like
this:

model_inputs = tokenizer(src_texts, ...)
labels = tokenizer(text_target=tgt_texts, ...)
model_inputs["labels"] = labels["input_ids"]

See the documentation of your specific tokenizer for more details on the specific arguments to the tokenizer of choice.
For a more complete example, see the implementation of `prepare_seq2seq_batch`.
src_langNtgt_langT)r  r  r  r  r  )r  r  r  r  r  rN  rr  )r   r   r   rg  rW  r  )rI   r  r  r  r  r  r  r  r  formatted_warningmodel_inputsrr  r;   r;   r<   prepare_seq2seq_batch  sH   G	

z-PreTrainedTokenizerBase.prepare_seq2seq_batch)NNNFFTFFNNFFNr  )FT)NNFrC   r   )NTFNNr   NNrI  )FNNNT)NNNNTFNNr   FNNNNNFFFFT)NTFNNr   FNNNNNFFFFTF)NTFNNr   FNNNNNFFFFT)TFNNr   FNNNNNFFFFTF)TNNNNNT)NTFNNr   NNNNNFFFFTF)Nr   r]   r   )FN)r   )NNNr  NT)\rR   rS   rT   rU   rK  r   rV   ri   rL  rM  r   rQ  r   rS  rT  r	  rJ   r   rh   rq  rs  setterrv  r>   rx  rz  r~  r  r   r
   r   r   r   r	   rl   r  r  classmethodr  PathLiker  r  staticmethodr'  r  r   rS  rJ  rV  r  r   ENCODE_KWARGS_DOCSTRING	TextInputPreTokenizedInputEncodedInputrZ   r\  rp  rh  'ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRINGrx  rt  rZ  ra  rb   r~  TextInputPairPreTokenizedInputPairEncodedInputPairr|  r  r  r  r  r  r  r   r  r  r  r  r  r  r  r  rs  ru  r   r  r  r  r   r;   r;   ry   r<   rJ  g  s  
 4
&	
 
 ${6 u }

 @
 #$	
+
 	
^	

v	
N	


W

(
	

 5




	
 



R
%
(






rJ  tokenization_filesr{   c                 C   sx   i }| D ]}t |}|dur| d }|||< qt| }t}tt}|D ]}t||kr7|| }q) |S |S )z
    Get the tokenization file to use for this version of transformers.

    Args:
        tokenization_files (`List[str]`): The list of available configuration files.

    Returns:
        `str`: The tokenization file to use.
    Nr   )	_re_tokenizer_filer  groupsr  r   r  r   parser   )r  tokenizer_files_map	file_namer  r   available_versionsr  transformers_versionr;   r;   r<   r  ,  s   



r  r  r   ztokenizer files)r   object_classobject_files)r5   )qrU   rd  r  r  r  r   collectionsr   collections.abcr   r   
contextlibr   dataclassesr   inspectr   typingr   r	   r
   r   r   r   r   r   r   r   r   r   	packagingr   r5   r   dynamic_module_utilsr   utilsr   r   r   r   r   r   r   r   r   r   r    r!   r"   r#   r$   r%   r&   r'   r(   r)   r*   r+   r,   r-   r.   r/   r0   r1   utils.chat_template_utilsr2   r3   utils.import_utilsr4   r   r   r   r   r   r=   
tokenizersr>   r?   rW   
get_loggerrR   r   rh   ri  rb  rV   r  r  r  r  r  r  
AudioInputr  r  r  r  r  compiler  rZ   rd   rk   rl   r   r  r  INIT_TOKENIZER_DOCSTRINGrJ  r  r2  r9   r;   r;   r;   r<   <module>   s   0x
	


       s<7F                     Y 