o
    ha                     @   s   d dl Z d dlZd dlZd dlmZ ddlmZmZmZm	Z	 ddl
mZmZ e	 r9d dlZddlmZ ddlmZ e rFd dlZdd	lmZ G d
d de jZG dd dZeeddG dd deZdS )    N)Dict   )ModelOutputadd_end_docstringsis_tf_availableis_torch_available   )Pipelinebuild_pipeline_init_args)!MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
KeyDataset)$TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMESc                   @   s   e Zd ZdZdZdZdS )
ReturnTyper   r   r   N)__name__
__module____qualname__TENSORSNEW_TEXT	FULL_TEXT r   r   z/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/pipelines/text_generation.pyr      s    r   c                   @   s   e Zd ZdZdefddZdS )Chata   This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats
    to this format because the rest of the pipeline code tends to assume that lists of messages are
    actually a batch of samples rather than messages in the same conversation.messagesc                 C   s,   |D ]}d|v rd|v st dq|| _d S )NrolecontentzQWhen passing chat dicts as input, each dict must have a 'role' and 'content' key.)
ValueErrorr   )selfr   messager   r   r   __init__!   s
   
zChat.__init__N)r   r   r   __doc__r   r   r   r   r   r   r      s    r   T)has_tokenizerc                       s   e Zd ZdZdZ fddZ											dddZ fdd	Z fd
dZ							dddZ	dd Z
ejddfddZ  ZS )TextGenerationPipelinea	  
    Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a
    specified text prompt. When the underlying model is a conversational model, it can also accept one or more chats,
    in which case the pipeline will operate in chat mode and will continue the chat(s) by adding its response(s).
    Each chat takes the form of a list of dicts, where each dict contains "role" and "content" keys.

    Examples:

    ```python
    >>> from transformers import pipeline

    >>> generator = pipeline(model="openai-community/gpt2")
    >>> generator("I can't believe you did such a ", do_sample=False)
    [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]

    >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
    >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
    ```

    ```python
    >>> from transformers import pipeline

    >>> generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta")
    >>> # Zephyr-beta is a conversational model, so let's pass it a chat instead of a single string
    >>> generator([{"role": "user", "content": "What is the capital of France? Answer in one word."}], do_sample=False, max_new_tokens=2)
    [{'generated_text': [{'role': 'user', 'content': 'What is the capital of France? Answer in one word.'}, {'role': 'assistant', 'content': 'Paris'}]}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
    generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
    text generation parameters in [Text generation strategies](../generation_strategies) and [Text
    generation](text_generation).

    This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"text-generation"`.

    The models that this pipeline can use are models that have been trained with an autoregressive language modeling
    objective. See the list of available [text completion models](https://huggingface.co/models?filter=text-generation)
    and the list of [conversational models](https://huggingface.co/models?other=conversational)
    on [huggingface.co/models].
    a  
    In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
    voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
    Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
    and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
    accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
    the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
    begging for his blessing. <eod> </s> <eos>
    c                    s   t  j|i | | | jdkrtnt d| jvrVd }| jd ur$| j}|d u r2| jj	j
dv r2| j}|d urX| jdd|i| j\}}}i | j|| _i | j|| _d S d S d S )Ntfprefix)XLNetLMHeadModelTransfoXLLMHeadModelTFXLNetLMHeadModelTFTransfoXLLMHeadModelr   )superr   check_model_type	frameworkr   r   _preprocess_paramsr#   model	__class__r   	XL_PREFIX_sanitize_parameters_forward_params)r   argskwargsr#   preprocess_paramsforward_params_r-   r   r   r   b   s    

zTextGenerationPipeline.__init__Nc                 K   s  i }d}d|v r| d }|d< d|v r| d|d< |	d ur$|	|d< |
d ur0|
|d< |
|d< |d ur8||d< |rM| j|d|| jd}|d jd	 |d
< |d ur`|dvr\t| d||d< |d urh||d< || |}i }|d ur|d u r|d urtd|d urtd|rtjntj}|d ur|d u r|d urtdtj	}|d ur||d< |d ur||d< |d ur||d< |d ur| jj
|dd}||d< | jd ur| j|d< | jd ur| j|d< | j|d< |||fS )NFadd_special_tokenspadding
truncation
max_lengthr#   )r8   r7   return_tensors	input_idsprefix_length>   holezT is not a valid value for `handle_long_generation` parameter expected [None, 'hole']handle_long_generationcontinue_final_messagez;`return_text` is mutually exclusive with `return_full_text`z>`return_full_text` is mutually exclusive with `return_tensors`z9`return_text` is mutually exclusive with `return_tensors`return_typeclean_up_tokenization_spaces)r7   eos_token_idassistant_model	tokenizerassistant_tokenizer)poprF   r*   shaper   updater   r   r   r   encoderE   rG   )r   return_full_textr;   return_textrB   rC   r#   r@   stop_sequencer9   r:   rA   generate_kwargsr3   r7   prefix_inputsr4   postprocess_paramsstop_sequence_idsr   r   r   r/   }   sl   







z+TextGenerationPipeline._sanitize_parametersc                    s.   | j jjdv r|ddi t j|i |S )z.
        Parse arguments and tokenize
        )r%   add_space_before_punct_symbolT)r,   r-   r   rJ   r(   _parse_and_tokenize)r   r1   r2   r6   r   r   rT      s   z*TextGenerationPipeline._parse_and_tokenizec                    s   t |t rtttjtfntttjfrmt |tjr-t|\}}dd |D t	|}}n|d }t |ttt
frmt |t
rJt jt|fi |S dd |D }t |tjrat j|fi |S t jt|fi |S t j|fi |S )a(  
        Complete the prompt(s) given as inputs.

        Args:
            text_inputs (`str`, `List[str]`, List[Dict[str, str]], or `List[List[Dict[str, str]]]`):
                One or several prompts (or one list of prompts) to complete. If strings or a list of string are
                passed, this pipeline will continue each prompt. Alternatively, a "chat", in the form of a list
                of dicts with "role" and "content" keys, can be passed, or a list of such chats. When chats are passed,
                the model's chat template will be used to format them before passing them to the model.
            return_tensors (`bool`, *optional*, defaults to `False`):
                Returns the tensors of predictions (as token indices) in the outputs. If set to
                `True`, the decoded text is not returned.
            return_text (`bool`, *optional*):
                Returns the decoded texts in the outputs.
            return_full_text (`bool`, *optional*, defaults to `True`):
                If set to `False` only added text is returned, otherwise the full text is returned. Cannot be
                specified at the same time as `return_text`.
            clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
                Whether or not to clean up the potential extra spaces in the text output.
            continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the
                last message in the input chat rather than starting a new one, allowing you to "prefill" its response.
                By default this is `True` when the final message in the input chat has the `assistant` role and
                `False` otherwise, but you can manually override that behaviour by setting this flag.
            prefix (`str`, *optional*):
                Prefix added to prompt.
            handle_long_generation (`str`, *optional*):
                By default, this pipelines does not handle long generation (ones that exceed in one form or the other
                the model maximum length). There is no perfect way to address this (more info
                :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
                strategies to work around that problem depending on your use case.

                - `None` : default strategy where nothing in particular happens
                - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
                  truncate a lot of the prompt and not suitable when generation exceed the model capacity)
            generate_kwargs (`dict`, *optional*):
                Additional keyword arguments to pass along to the generate method of the model (see the generate method
                corresponding to your framework [here](./text_generation)).

        Return:
            A list or a list of lists of `dict`: Returns one of the following dictionaries (cannot return a combination
            of both `generated_text` and `generated_token_ids`):

            - **generated_text** (`str`, present when `return_text=True`) -- The generated text.
            - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
              ids of the generated text.
        c                 s   s    | ]}|V  qd S Nr   ).0xr   r   r   	<genexpr>  s    z2TextGenerationPipeline.__call__.<locals>.<genexpr>r   c                 s   s    | ]}t |V  qd S rU   )r   )rV   chatr   r   r   rX     s    )
isinstancer   listtupletypesGeneratorTyper   	itertoolsteenextdictr(   __call__r   )r   text_inputsr2   r5   
first_itemchatsr6   r   r   rc      s$   /

zTextGenerationPipeline.__call__ c	                 K   sZ  ||||d}
dd |
  D }
t|tr;|
dd  |d u r(|jd d dk}| jj|jf| |d| jd	|
}n| j|| fd
| ji|
}||d< |dkr|d jd }d|	v ra|	d }n|		d| j
j| }|dk rstd|| | jjkr| jj| }|dkrtd|d d d | d f |d< d|v r|d d d | d f |d< |S )N)r7   r9   r8   r:   c                 S   s   i | ]\}}|d ur||qS rU   r   )rV   keyvaluer   r   r   
<dictcomp>4      z5TextGenerationPipeline.preprocess.<locals>.<dictcomp>r7   r=   r   	assistantT)add_generation_promptrA   return_dictr;   r;   prompt_textr?   r<   max_new_tokensr:   r   z0We cannot infer how many new tokens are expectedziWe cannot use `hole` to handle this generation the number of desired tokens exceeds the models max lengthattention_mask)itemsrZ   r   rH   r   rF   apply_chat_templater*   rI   getgeneration_configr:   r   model_max_length)r   ro   r#   r@   r7   r9   r8   r:   rA   rO   tokenizer_kwargsinputscur_len
new_tokenskeep_lengthr   r   r   
preprocess!  sN   
	
z!TextGenerationPipeline.preprocessc                 K   s  |d }| dd }|jd dkrd }d }d}n|jd }|d}|dd}|dkrmd|v p:d|v o:|d jd u}|sP| d	pE| jj|d	< |d	  |7  < d
|v p^d|v o^|d jd u}	|	smd|v rm|d  |7  < d|vrv| j|d< | jjd||d|}
t	|
t
r1|
j}dd |
 D }|jd }| jdkr| D ]>\}}t	|tjr|jd |kr|j||| g|jdd  R  ||< t	|trt|d |krt|dd}|||< qnS| jdkr0| D ]C\}}t	|tjr|jd |krt|||| g|jdd  R ||< t	|tr/t|d |kr/t|dd}|||< qn|
}i }|jd }| jdkrR|j||| g|jdd  R  }n| jdkrjt|||| g|jdd  R }|||d}|| |S )Nr<   rq   r   r   ro   r>   rp   ru   r:   min_new_tokens
min_length)r<   rq   c                 S   s   i | ]\}}|d kr||qS )	sequencesr   )rV   kvr   r   r   rj     rk   z3TextGenerationPipeline._forward.<locals>.<dictcomp>ptr"   )generated_sequencer<   ro   r   )rt   rI   rH   rp   ru   r:   r}   r,   generaterZ   r   r   rr   r*   torchTensorreshaper\   lenstackswapaxesr"   rJ   )r   model_inputsrO   r<   rq   in_bro   r>   has_max_new_tokenshas_min_new_tokensoutputr   other_outputsout_brh   ri   model_outputsr   r   r   _forward_  sx   




&(
$$
zTextGenerationPipeline._forwardTc                 C   s  |d d }|d }|d }|   }g }|di }	i }
|	rk| jdkrF|	 D ]\}}t|tjrD|jd t	|krD|   |
|< q)n%| jdkrk|	 D ]\}}t|t
jrj|jd t	|krj|   |
|< qOt|D ]\}}|tjkr}d|i}n|tjtjhv r| jj|d	|d
}|d u rd}nt	| jj|d d	|d
}||d  }|tjkrt|tr|| }n;t|tr|d u r|jd d dk}|rt|jd d |jd d |jd d | dg }nt|jd|dg }d|i}|
 D ]
\}}|| ||< q|| qo|S )Nr   r   r<   ro   additional_outputsr   r"   generated_token_idsT)skip_special_tokensrC   r=   r   rl   r   )r   r   generated_text)numpytolistrt   r*   rr   rZ   r   r   rI   r   r"   	enumerater   r   r   r   rF   decodestrr   r   r[   append)r   r   rB   rC   rA   r   r<   ro   recordsr   splitted_keysr   r   idxsequencerecordtextprompt_lengthall_textrh   valuesr   r   r   postprocess  sp   







z"TextGenerationPipeline.postprocess)NNNNNNNNNNN)rg   NNNNNN)r   r   r   r   r.   r   r/   rT   rc   r|   r   r   r   r   __classcell__r   r   r6   r   r!   (   s>    .

T
I
>Lr!   )enumr_   r]   typingr   utilsr   r   r   r   baser	   r
   r   models.auto.modeling_autor   pt_utilsr   
tensorflowr"   models.auto.modeling_tf_autor   Enumr   r   r!   r   r   r   r   <module>   s"    