o
    hq                     @   s2  d dl Z d dlZd dlmZ d dlmZ d dlmZ d dlm	Z	m
Z
mZmZmZ d dlZd dlZd dlmZ ddlmZ dd	lmZ dd
lmZmZ eeZe ZdZG dd deZ G dd de Z!G dd de Z"G dd de Z#G dd de Z$G dd de Z%G dd de&Z'de'de(de'fddZ)dS )    N)ABC)OrderedDict)deepcopy)DictListOptionalTupleUnion)
functional   )isin_mps_friendly)PreTrainedTokenizerBase)add_start_docstringsloggingaL  
    Args:
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
            Indices of input sequence tokens in the vocabulary.

            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
            [`PreTrainedTokenizer.__call__`] for details.

            [What are input IDs?](../glossary#input-ids)
        scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
            Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
            or scores for each vocabulary token after SoftMax. If this stopping criteria depends on the `scores` input,
            make sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`.
        kwargs (`Dict[str, Any]`, *optional*):
            Additional stopping criteria specific kwargs.

    Return:
        `torch.BoolTensor`. (`torch.BoolTensor` of shape `(batch_size, 1)`), where `True` indicates we stop generation
            for a particular row, `True` indicates we should continue.

c                   @   s4   e Zd ZdZeedejdejdej	fddZ
dS )StoppingCriteriazAbstract base class for all stopping criteria that can be applied during generation.

    If your stopping criteria depends on the `scores` input, make sure you pass `return_dict_in_generate=True,
    output_scores=True` to `generate`.
    	input_idsscoresreturnc                 K   s   t d)Nz'StoppingCriteria needs to be subclassed)NotImplementedError)selfr   r   kwargs r   }/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py__call__5   s   zStoppingCriteria.__call__N)__name__
__module____qualname____doc__r   "STOPPING_CRITERIA_INPUTS_DOCSTRINGtorch
LongTensorFloatTensor
BoolTensorr   r   r   r   r   r   .   s    "r   c                   @   L   e Zd ZdZddedee fddZeede	j
de	jd	e	jfd
dZdS )MaxLengthCriteriaa  
    This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep
    in mind for decoder-only type of transformers, this will include the initial prompted tokens.

    Args:
        max_length (`int`):
            The maximum length that the output sequence can have in number of tokens.
        max_position_embeddings (`int`, *optional*):
            The maximum model length, as defined by the model's `config.max_position_embeddings` attribute.
    N
max_lengthmax_position_embeddingsc                 C   s   || _ || _d S N)r%   r&   )r   r%   r&   r   r   r   __init__F   s   
zMaxLengthCriteria.__init__r   r   r   c                 K   s^   |j d }|| jk}| jd ur |s || jkr td| j d tj|j d f||jtjdS )NzrThis is a friendly reminder - the current text generation call will exceed the model's predefined maximum length (zb). Depending on the model, you may observe exceptions, performance degradation, or nothing at all.r   devicedtype)	shaper%   r&   loggerwarning_oncer   fullr+   bool)r   r   r   r   cur_lenis_doner   r   r   r   J   s   

zMaxLengthCriteria.__call__r'   )r   r   r   r   intr   r(   r   r   r   r    r!   r"   r   r   r   r   r   r$   :   s
    "r$   c                   @   r#   )MaxTimeCriteriaa  
    This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the
    time will start being counted when you initialize this function. You can override this by passing an
    `initial_time`.

    Args:
        max_time (`float`):
            The maximum allowed time in seconds for the generation.
        initial_time (`float`, *optional*, defaults to `time.time()`):
            The start of the generation allowed time.
    Nmax_timeinitial_timestampc                 C   s&   || _ |d u rt | _d S || _d S r'   )r6   timer7   )r   r6   r7   r   r   r   r(   d   s    zMaxTimeCriteria.__init__r   r   r   c                 K   s2   t   | j | jk}tj|jd f||jtjdS )Nr   r*   )r8   r7   r6   r   r0   r-   r+   r1   r   r   r   r   r3   r   r   r   r   h   s   zMaxTimeCriteria.__call__r'   )r   r   r   r   floatr   r(   r   r   r   r    r!   r"   r   r   r   r   r   r5   W   s
    "r5   c                
   @   s   e Zd ZdZdedeeee f fddZdd Z	e
dd	d
Ze
deeeeeee f f eeeeee f f f fddZe
deeejf fddZeedejdejdejfddZdS )StopStringCriteriaa  
    This class can be used to stop generation whenever specific string sequences are generated. It preprocesses
    the strings together with the tokenizer vocab to find positions where tokens can validly complete the stop strings.

    Generation is stopped as soon as a token is generated that completes any of the stop strings.
    We want to catch any instance in which the stop string would be present in the decoded output, which means
    we must also catch cases with "overhangs" off one or both ends. To make this more concrete, for the stop string
    "stop", any of the following token sequences would trigger the match:

    - ["st", "op"]
    - ["stop"]
    - ["st", "opera"]
    - ["sto", "pper"]
    - ["las", "topper"]
    - ["s", "to", "pped"]

    Note that a match will only be triggered if the stop string is at the end of the generated sequence. In other
    words, these sequences will not trigger a match:

    - ["stop", "at"]
    - ["st", "op", "at"]
    - ["st", "opera", "tion"]

    The reason these are not a match is that the stop string does not overlap with the final token. If you can remove
    one or more tokens from the end of the sequence without destroying the stop string, then this criterion will not
    match that stop string. This is by design; because this check is run after each token is generated, we can't miss a
    valid stop string if one is generated, but we don't want to halt generation just because the stop string exists
    somewhere in the past input_ids.

    How is the match actually performed, though? We do it in quite a confusing way, because we want the entire match
    process to be compilable with Torch or XLA, which means we cannot use standard string methods. However, it is possible,
    with some work, to do string matching with pure tensor operations. We'll begin by describing the algorithm we use
    with standard string operations, and then at the end we'll explain how this is converted to pure tensor operations.

    The key to the algorithm is an observation: Because the stop string must overlap with the end of the token sequence, we can start at
    the end of the sequence and work backwards. Specifically, we check that there is an overlap between the start of
    the final token and the end of the stop_string, or to put it another way, stop_string[-i:] == token[:i] for
    some i > 0. If you look at the positive examples above, you'll see the last token in all of them fulfills this
    property:

    - ["st", "op"] (overlap is "op", overlap length == 2)
    - ["stop"]  (overlap is "stop", overlap length == 4)
    - ["st", "opera"]  (overlap is "op", overlap length == 2)
    - ["sto", "pper"]  (overlap is "p", overlap length == 1)
    - ["las", "topper"]  (overlap is "top", overlap length == 3)
    - ["s", "to", "pped"]  (overlap is "p", overlap length == 1)

    It's impossible to construct a matching sequence that does not have this property (feel free to verify this
    yourself). However, although this overlap between the start of the final token and the end of the stop string is
    necessary for a match, it is not sufficient. We also need to check that the rest of the token sequence is
    consistent with the stop string.

    How do we do that? Let's use ["s", "to", "pped"] as an example. We know that the final token, "pped", has an
    overlap of 1 with the stop string, "stop". We then go back to the previous token, "to". Since we have already
    matched 1 character from the stop string, the remainder to check is "sto". We check that the next token "to"
    matches the end of the remainder, which it does. We have now matched 3 characters from the stop string, and the
    remainder to match is "s". We go back to the previous token again, which is also "s". This is a match, and so
    we have matched the entire stop string.

    How does it work when the tokens run off the start of the stop string, though? Let's consider the example of
    ["las", "topper"]. The final token, "topper", has an overlap of 3 with the stop string, "stop". Therefore,
    the remaining stop string to match is "s". We go back to the previous token, "las". Because the remainder to
    match is just "s", with length 1, we consider only the final 1 character from the token, which is "s". This
    matches the stop string, and so the entire string is matched.

    How do we compute these matches with tensor operations, though? Simply: we efficiently precompute the necessary
    information for all tokens! For every token, we compute:
    - Its overlap with the end of the stop string, if any
    - The positions inside the stop string where the token matches, including matches that run off the start.
    - The total length of the token

    For example, for the token "pped", we would compute an end overlap of 1, no internal matching positions,
    and a length of 4. For the token "to", we would compute no end overlap, a single internal matching position
    of 1 (counting from the end), and a length of 2. For the token "s", we would compute no end overlap,
    a single internal matching position of 3 (again counting from the end) and a length of 1.

    As long as we have this information, we can execute the algorithm above without any string comparison
    operations. We simply perform the following steps:
    - Check if the final token has an end-overlap with the start string
    - Continue backwards, keeping track of how much of the stop string we've matched so far
    - At each point, check if the next token has the current position as one of its valid positions
    - Continue until either a match fails, or we completely match the whole stop string

    Again, consider ["s", "to", "pped"] as an example. "pped" has an end overlap of 1, so we can begin a match.
    We have matched 1 character so far, so we check that the next token "to", has 1 as a valid position (again,
    counting from the end). It does, so we add the length of "to" to our position tracker. We have now matched
    3 characters, so we check that the next token "s" has 3 as a valid position. It does, so we add its length
    to the position tracker. The position tracker is now 4, which is the length of the stop string. We have matched the
    entire stop string.

    In the second case, ["las", "topper"], "topper" has an end overlap of 3, so we can begin a match. We have
    matched 3 characters so far, so we check that the next token "las" has 3 as a valid position. It does, because we
    allow tokens to match positions that run off the start of the stop string. We add its length to the position
    tracker. The position tracker is now 6, which is greater than the length of the stop string! Don't panic, though -
    this also counts as a match of the stop string. We have matched the entire stop string.


    Args:
        tokenizer (`PreTrainedTokenizer`):
            The model's associated tokenizer (necessary to extract vocab and tokenize the termination sequences)
        stop_strings (`Union[str, List[str]]`):
            A list of strings that should end generation. If a string is passed, it will be treated like a
            list with a single element.

    Examples:

    ```python
    >>> from transformers import AutoModelForCausalLM, AutoTokenizer

    >>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
    >>> model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2")
    >>> inputs = tokenizer("The biggest states in the USA by land area:", return_tensors="pt")

    >>> gen_out = model.generate(**inputs)
    >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
    The biggest states in the USA by land area:
    - Alaska
    - Texas
    - California

    >>> # Passing one or more stop strings will halt generation after those strings are emitted
    >>> # Note that generating with stop strings requires you to pass the tokenizer too
    >>> gen_out = model.generate(**inputs, stop_strings=["Texas"], tokenizer=tokenizer)
    >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0])
    The biggest states in the USA by land area:
    - Alaska
    - Texas
    ```
    	tokenizerstop_stringsc                 C   s   t |tr|g}t|| _| }t| t| }}| |||\| _| _	| _
tdd | jD | _t| j| _tjdd |D tjd| _d S )Nc                 S      g | ]}t |qS r   len.0stop_stringr   r   r   
<listcomp>       z/StopStringCriteria.__init__.<locals>.<listcomp>c                 S   r>   r   r?   rA   r   r   r   rD      rE   r,   )
isinstancestrtupler=   	get_vocabkeysvalues!clean_and_embed_tokens_with_cacheembedding_vecmax_valid_positionsmax_valid_end_lensmaxmaximum_token_lenr@   num_stop_stringsr   tensorint32target_lens)r   r<   r=   vocab
token_listtoken_indicesr   r   r   r(      s   

 zStopStringCriteria.__init__c           	      C   s   ||| j ftv rt||| j f \}}}t||| j f n)| |\}}| ||| j \}}}|||ft||| j f< ttdkrFtjdd |||fS )N   F)last)r=   STOP_STRING_EMBEDDING_CACHEmove_to_endclean_tokenizer_vocab!_stop_string_create_embedding_vecr@   popitem)	r   rX   rY   r<   rN   rO   rP   clean_token_listclean_token_indicesr   r   r   rM      s    



z4StopStringCriteria.clean_and_embed_tokens_with_cacheabcdefc           
         s      }g }g } |ddd } fdd|D }| D ]#\}} ||g }	|	|	|t| d }	||	 || qt|t|fS )u?  
        This method turns a tokenizer vocab into a "clean" vocab where each token represents the actual string
        it will yield, without any special prefixes like "##" or "Ġ". This is trickier than it looks - the method
        tokenizer.convert_tokens_to_string() does not always return the correct string because of issues with prefix
        space addition/removal. To work around this, we add a static prefix to the start of the token, then remove
        it (and any prefix that may have been introduced with it) after calling convert_tokens_to_string().
        F)add_special_tokensr   c                    s   g | ]}  |qS r   )_convert_id_to_token)rB   tokr<   r   r   rD   !  s    z<StopStringCriteria.clean_tokenizer_vocab.<locals>.<listcomp>N)rJ   itemsconvert_tokens_to_stringindexr@   appendrI   )
r<   static_prefixrW   ra   rb   sentence_basetokens_basetoken	token_idxtoken_stringr   rg   r   r^     s   	
z(StopStringCriteria.clean_tokenizer_vocabr   c              	   C   s  i }i }|D ]y}|ddd }i ||< i ||< t | |D ]b\}}|ddd }	g }
g }tdt| t|D ]7}|dk rF|	| d }d}n|	}|||t|  }||rm|dkrh|tt|t| q6|
| q6|
rv|
|| |< |r~||| |< qq||fS )az  This function preprocesses stop strings and the tokenizer vocabulary to determine where tokens can
        validly appear in the stop strings. For each token, it computes a list of positions in the stop string where the
        token appears, as well as a list of the possible "end overlaps" for that token - that is, the number of characters
        from the end of the stop string that overlap with the start of the token, which can have more than one value.

        The reason for computing these may seem a bit cryptic - please see the docstring for StopStringCriteria for a full
        explanation of what these values are for!Nr)      r   )zipranger@   
startswithrk   min)rX   rY   r=   token_valid_positionstoken_end_overlapsrC   reversed_stop_stringro   tok_idxreversed_tokenmatching_positionspossible_end_lengthsirf   stopr   r   r   #_stop_string_get_matching_positions)  s8   

z6StopStringCriteria._stop_string_get_matching_positionsc              	   C   sh  t | ||\}}dd | D }|rt|nd}dd | D }|s)tdt|}t|||  d }	tjt|d |	ftjdd}
t	|D ]\\}}|| }|| }|
 D ]\}}||
||| || t| f< qZ|
 D ] \}}||
||t| ||  |t| ||  t| f< qst| |D ]\}}t||
|df< qqJtj|
tjd	}
|
||fS )
aG  This function precomputes everything needed for the run-time checks in StopStringCriteria, and packs
        them into an embedding tensor that can be accessed with pure tensor operations. For the specifics of the values
        that are precomputed and what they are used for, please refer to the StopStringCriteria docstring!c                 S   "   g | ]}|  D ]}t|qqS r   rL   r@   rB   	positionsvalr   r   r   rD   Z     " zHStopStringCriteria._stop_string_create_embedding_vec.<locals>.<listcomp>rr   c                 S   r   r   r   r   r   r   r   rD   _  r   zStop string preprocessing was unable to identify tokens matching one or more of the supplied stop string(s). This is most often caused by the stop strings containing unusual characters that are not in the tokenizer vocabulary.r   r)   )r,   
fill_valuerF   )r;   r   rL   rQ   
ValueErrorr@   npr0   rU   	enumeraterh   rs   r   rT   )rX   rY   r=   rw   rx   all_valid_positionsrO   valid_end_lensrP   vec_size
gather_vecr~   rC   r   end_lensrp   valid_positionspossible_end_lensro   r   r   r   r_   R  sJ    
z4StopStringCriteria._stop_string_create_embedding_vecr   r   c                 K   s  | j |j| _ | j|j| _|d d | j d f }t|d}tj|| j dd d}| j	}t
|| j }|d d dd d || j f d| jdf}|d d d d|| j df d| jdf}|d d dd d dd f }	|	dd|jd |jd f}	tj||	gdd}
|
jdd}|dk}tj|d d d dd d d f |d d d d d d d d d f kdd}tj||gdd}| jdtjd	}|dk}tj|| d
d| jd d d f k}tj|ddS )N)rr   r   rr   )rQ   r)   )dim)axis)r   r,   )rr   r)   )rN   tor+   rV   rR   r   flipclampsizerO   F	embeddingrS   	unflattenexpandr-   catcumsumanyrU   amax)r   r   r   r   flipped_idsrO   embeddedr   end_lengthslengthslengths_with_endsr   initial_matchlater_matchmatchmaskstring_matchesr   r   r   r     s0    
 
H$zStopStringCriteria.__call__N)rc   )r   r   r   r   r   r	   rH   r   r(   rM   staticmethodr^   r   r   r4   r   r   rT   r_   r   r   r    r!   Tensorr   r   r   r   r   r;   n   s     2(2"r;   c                   @   sR   e Zd ZdZdeeee ejf fddZ	e
edejdejdejfdd	Zd
S )EosTokenCriteriaa)  
    This class can be used to stop generation whenever the "end-of-sequence" token is generated.
    By default, it uses the `model.generation_config.eos_token_id`.

    Args:
        eos_token_id (`Union[int, List[int], torch.Tensor]`):
            The id(s) of the *end-of-sequence* token.
    eos_token_idc                 C   s0   t |tjst |tr|g}t|}|| _d S r'   )rG   r   r   r4   rT   r   )r   r   r   r   r   r(     s
   


zEosTokenCriteria.__init__r   r   r   c                 K   s,   | j |j| _ t|d d df | j }|S )Nr)   )r   r   r+   r   r9   r   r   r   r     s   zEosTokenCriteria.__call__N)r   r   r   r   r	   r4   r   r   r   r(   r   r   r    r!   r"   r   r   r   r   r   r     s
    	"r   c                   @   s4   e Zd ZdZdd ZdejdejdejfddZ	d	S )
ConfidenceCriteriaa  
    This class can be used to stop generation whenever assistant model's confidence in its prediction for the current token is lower than the threshold
        `model.generation_config.assistant_confidence_threshold` even if the number of speculative tokens (defined by `num_assistant_tokens`) is not yet reached.

    Args:
        assistant_confidence_threshold (`float`):
            The value of the threshold.
    c                 C   s
   || _ d S r'   )assistant_confidence_threshold)r   r   r   r   r   r(     s   
zConfidenceCriteria.__init__r   r   r   c                 K   s4   |d  d}|d|d f  }|| jk rdS dS )Nr)   r   )r   r)   TF)softmaxitemr   )r   r   r   r   probspr   r   r   r     s
   
zConfidenceCriteria.__call__N)
r   r   r   r   r(   r   r    r!   r"   r   r   r   r   r   r     s    	 r   c                   @   sF   e Zd ZeedejdejdejfddZ	e
dee fddZdS )	StoppingCriteriaListr   r   r   c                 K   sB   t j|jd fd|jt jd}| D ]}||||fi |B }q|S )Nr   Fr*   )r   r0   r-   r+   r1   )r   r   r   r   r3   criteriar   r   r   r     s   zStoppingCriteriaList.__call__c                 C   s"   | D ]}t |tr|j  S qd S r'   )rG   r$   r%   )r   stopping_criteriumr   r   r   r%     s
   

zStoppingCriteriaList.max_lengthN)r   r   r   r   r   r   r    r!   r"   r   propertyr   r4   r%   r   r   r   r   r     s
    r   stopping_criteriar%   r   c                 C   sJ   | j }t| }|d ur||krtdt |S |d u r#|t|d |S )NzOYou set different `max_length` for stopping criteria and `max_length` parameter)r%   )r%   r   warningswarnUserWarningrk   r$   )r   r%   stopping_max_lengthnew_stopping_criteriar   r   r   validate_stopping_criteria   s   r   )*r8   r   abcr   collectionsr   copyr   typingr   r   r   r   r	   numpyr   r   torch.nnr
   r   pytorch_utilsr   tokenization_utils_baser   utilsr   r   
get_loggerr   r.   r\   r   r   r$   r5   r;   r   r   listr   r4   r   r   r   r   r   <module>   s2    
  W