o
    hy-                     @   s   d dl mZ d dlZddlmZmZmZmZ ddl	m
Z
mZmZmZ e r/d dlZddlmZ e r6d dlZeeZeedd	d
G dd deZdS )    )DictN   )add_end_docstringsis_tf_availableis_torch_availablelogging   )GenericTensorPipelinePipelineExceptionbuild_pipeline_init_args)stable_softmaxT)has_tokenizeraO  
        top_k (`int`, *optional*, defaults to 5):
            The number of predictions to return.
        targets (`str` or `List[str]`, *optional*):
            When passed, the model will limit the scores to the passed targets instead of looking up in the whole
            vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
            token will be used (with a warning, and that might be slower).
        tokenizer_kwargs (`dict`, *optional*):
            Additional dictionary of keyword arguments passed along to the tokenizer.c                       s   e Zd ZdZdedejfddZdedejfddZdefd	d
Z		dde
eef fddZdd ZdddZdddZdddZ fddZ  ZS )FillMaskPipelinea  
    Masked language modeling prediction pipeline using any `ModelWithLMHead`. See the [masked language modeling
    examples](../task_summary#masked-language-modeling) for more information.

    Example:

    ```python
    >>> from transformers import pipeline

    >>> fill_masker = pipeline(model="google-bert/bert-base-uncased")
    >>> fill_masker("This is a simple [MASK].")
    [{'score': 0.042, 'token': 3291, 'token_str': 'problem', 'sequence': 'this is a simple problem.'}, {'score': 0.031, 'token': 3160, 'token_str': 'question', 'sequence': 'this is a simple question.'}, {'score': 0.03, 'token': 8522, 'token_str': 'equation', 'sequence': 'this is a simple equation.'}, {'score': 0.027, 'token': 2028, 'token_str': 'one', 'sequence': 'this is a simple one.'}, {'score': 0.024, 'token': 3627, 'token_str': 'rule', 'sequence': 'this is a simple rule.'}]
    ```

    Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)

    This mask filling pipeline can currently be loaded from [`pipeline`] using the following task identifier:
    `"fill-mask"`.

    The models that this pipeline can use are models that have been trained with a masked language modeling objective,
    which includes the bi-directional models in the library. See the up-to-date list of available models on
    [huggingface.co/models](https://huggingface.co/models?filter=fill-mask).

    <Tip>

    This pipeline only works for inputs with exactly one token masked. Experimental: We added support for multiple
    masks. The returned values are raw model output, and correspond to disjoint probabilities where one might expect
    joint probabilities (See [discussion](https://github.com/huggingface/transformers/pull/10222)).

    </Tip>

    <Tip>

    This pipeline now supports tokenizer_kwargs. For example try:

    ```python
    >>> from transformers import pipeline

    >>> fill_masker = pipeline(model="google-bert/bert-base-uncased")
    >>> tokenizer_kwargs = {"truncation": True}
    >>> fill_masker(
    ...     "This is a simple [MASK]. " + "...with a large amount of repeated text appended. " * 100,
    ...     tokenizer_kwargs=tokenizer_kwargs,
    ... )
    ```


    </Tip>


    	input_idsreturnc                 C   sP   | j dkrt|| jjk }|S | j dkr$tj|| jjkdd}|S td)NtfptFas_tuplezUnsupported framework)		frameworkr   where	tokenizermask_token_idnumpytorchnonzero
ValueError)selfr   masked_index r    t/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/pipelines/fill_mask.pyget_masked_indexW   s   

z!FillMaskPipeline.get_masked_indexc                 C   s>   |  |}t|j}|dk rtd| jjd| jj dd S )Nr   	fill-maskzNo mask_token (z) found on the input)	r"   npprodshaper   modelbase_model_prefixr   
mask_token)r   r   r   numelr    r    r!   _ensure_exactly_one_mask_token`   s   
z/FillMaskPipeline._ensure_exactly_one_mask_tokenmodel_inputsc                 C   sF   t |tr|D ]}| |d d  qd S |d D ]}| | qd S )Nr   r   )
isinstancelistr+   )r   r,   model_inputr   r    r    r!   ensure_exactly_one_mask_tokenj   s   
z.FillMaskPipeline.ensure_exactly_one_mask_tokenNc                 K   s>   |d u r| j }|d u ri }| j|fd|i|}| | |S )Nreturn_tensors)r   r   r0   )r   inputsr1   tokenizer_kwargspreprocess_parametersr,   r    r    r!   
preprocessr   s   
zFillMaskPipeline.preprocessc                 C   s    | j di |}|d |d< |S )Nr   r    )r'   )r   r,   model_outputsr    r    r!   _forward~   s   zFillMaskPipeline._forward   c                 C   s  |d ur|j d |k r|j d }|d d }|d }| jdkrot|| jjk d d df }| }|d|d d f }t|dd}|d ur[tt	|d|
dd}t|d}tjj||d}	|	j |	j }
}n.tj|| jjkd	d
	d}|d|d d f }|jdd}|d ur|d|f }||\}
}g }|
j d dk}tt|
 | D ]Q\}\}}g }t||D ]>\}}|  }|d ur||  }|||| < |t|| jjk }| jj||d}||| j|g|d}|| q|| q|r
|d S |S )Nr   r   logitsr   )axisr   )kFr   )dim.)skip_special_tokens)scoretoken	token_strsequence)r&   r   r   r   r   r   r   r   	gather_ndsqueezereshapeexpand_dimsmathtop_kvaluesindicesr   r   softmaxtopk	enumerateziptolistcopyr$   pad_token_iddecodeappend)r   r6   rH   
target_idsr   outputsr   r9   probsrL   rI   predictionsresultsingle_maski_values_predictionsrowvptokensrB   propositionr    r    r!   postprocess   sJ   

""zFillMaskPipeline.postprocessc              	   C   s   t |tr|g}z| j }W n ty   i }Y nw g }|D ]B}||d }|d u r\| j|ddddddd }t|dkrHtd| d q|d }td| d	| j	| d
 |
| qtt|}t|dkrrtdt|}|S )NFr   T)add_special_tokensreturn_attention_maskreturn_token_type_ids
max_length
truncationr   r   zThe specified target token `zd` does not exist in the model vocabulary. We cannot replace it with anything meaningful, ignoring itz:` does not exist in the model vocabulary. Replacing with `z`.z1At least one target must be provided when passed.)r-   strr   	get_vocab	Exceptiongetlenloggerwarningconvert_ids_to_tokensrS   r.   setr   r$   array)r   targetsrH   vocabrT   targetid_r   r    r    r!   get_target_ids   sL   



zFillMaskPipeline.get_target_idsc                 C   sj   i }|d ur
||d< i }|d ur|  ||}||d< |d ur"||d< | jjd u r0td| jjd|i |fS )Nr3   rT   rH   r#   z-The tokenizer does not define a `mask_token`.)rv   r   r   r   r'   r(   )r   rH   rr   r3   preprocess_paramspostprocess_paramsrT   r    r    r!   _sanitize_parameters   s   

z%FillMaskPipeline._sanitize_parametersc                    s6   t  j|fi |}t|trt|dkr|d S |S )a  
        Fill the masked token in the text(s) given as inputs.

        Args:
            inputs (`str` or `List[str]`):
                One or several texts (or one list of prompts) with masked tokens.
            targets (`str` or `List[str]`, *optional*):
                When passed, the model will limit the scores to the passed targets instead of looking up in the whole
                vocab. If the provided targets are not in the model vocab, they will be tokenized and the first
                resulting token will be used (with a warning, and that might be slower).
            top_k (`int`, *optional*):
                When passed, overrides the number of predictions to return.

        Return:
            A list or a list of list of `dict`: Each result comes as list of dictionaries with the following keys:

            - **sequence** (`str`) -- The corresponding input with the mask token prediction.
            - **score** (`float`) -- The corresponding probability.
            - **token** (`int`) -- The predicted token id (to replace the masked one).
            - **token_str** (`str`) -- The predicted token (to replace the masked one).
        r   r   )super__call__r-   r.   rl   )r   r2   kwargsrU   	__class__r    r!   r{      s   zFillMaskPipeline.__call__)NN)r8   N)N)NNN)__name__
__module____qualname____doc__r	   r$   ndarrayr"   r+   r0   r   rh   r5   r7   rb   rv   ry   r{   __classcell__r    r    r}   r!   r      s    4	
	



7
)r   )typingr   r   r$   utilsr   r   r   r   baser	   r
   r   r   
tensorflowr   tf_utilsr   r   
get_loggerr   rm   r   r    r    r    r!   <module>   s    
