o
    h_                     @   s4  d dl Z d dlmZ d dlmZ d dlmZmZmZm	Z	m
Z
 d dlZd dlZd dlmZ d dlmZ ddlmZ dd	lmZmZmZ d
dlmZmZ e rYd dlZd
dlmZmZ eeZeG dd dZ G dd dZ!G dd deZ"eG dd deZ#G dd dej$Z%G dd deZ&G dd dZ'dS )    N)	dataclass)	lru_cache)AnyDictOptionalTupleUnion)nn)BCELoss   )PreTrainedModel)ModelOutputis_torch_availablelogging   )PretrainedConfigWatermarkingConfig)#SynthIDTextWatermarkLogitsProcessorWatermarkLogitsProcessorc                   @   s   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eej ed< dZeej ed< dZeej ed< dZeej ed< dZeej ed	< dS )
WatermarkDetectorOutputa  
    Outputs of a watermark detector.

    Args:
        num_tokens_scored (np.array of shape (batch_size)):
            Array containing the number of tokens scored for each element in the batch.
        num_green_tokens (np.array of shape (batch_size)):
            Array containing the number of green tokens for each element in the batch.
        green_fraction (np.array of shape (batch_size)):
            Array containing the fraction of green tokens for each element in the batch.
        z_score (np.array of shape (batch_size)):
            Array containing the z-score for each element in the batch. Z-score here shows
            how many standard deviations away is the green token count in the input text
            from the expected green token count for machine-generated text.
        p_value (np.array of shape (batch_size)):
            Array containing the p-value for each batch obtained from z-scores.
        prediction (np.array of shape (batch_size)), *optional*:
            Array containing boolean predictions whether a text is machine-generated for each element in the batch.
        confidence (np.array of shape (batch_size)), *optional*:
            Array containing confidence scores of a text being machine-generated for each element in the batch.
    Nnum_tokens_scorednum_green_tokensgreen_fractionz_scorep_value
prediction
confidence)__name__
__module____qualname____doc__r   r   nparray__annotations__r   r   r   r   r   r    r$   r$   x/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/generation/watermarking.pyr   (   s   
 r   c                   @   s   e Zd ZdZ		d!dededeeef de	de
f
d	d
Zdejde
fddZdejfddZdejdejdejfddZd"ddZ		d#dejdede	deeejf fddZd S )$WatermarkDetectoraS
  
    Detector for detection of watermark generated text. The detector needs to be given the exact same settings that were
    given during text generation to replicate the watermark greenlist generation and so detect the watermark. This includes
    the correct device that was used during text generation, the correct watermarking arguments and the correct tokenizer vocab size.
    The code was based on the [original repo](https://github.com/jwkirchenbauer/lm-watermarking/tree/main).

    See [the paper](https://arxiv.org/abs/2306.04634) for more information.

    Args:
        model_config (`PretrainedConfig`):
            The model config that will be used to get model specific arguments used when generating.
        device (`str`):
            The device which was used during watermarked text generation.
        watermarking_config (Union[`WatermarkingConfig`, `Dict`]):
            The exact same watermarking config and arguments used when generating text.
        ignore_repeated_ngrams (`bool`, *optional*, defaults to `False`):
            Whether to count every unique ngram only once or not.
        max_cache_size (`int`, *optional*, defaults to 128):
            The max size to be used for LRU caching of seeding/sampling algorithms called for every token.

    Examples:

    ```python
    >>> from transformers import AutoTokenizer, AutoModelForCausalLM, WatermarkDetector, WatermarkingConfig

    >>> model_id = "openai-community/gpt2"
    >>> model = AutoModelForCausalLM.from_pretrained(model_id)
    >>> tok = AutoTokenizer.from_pretrained(model_id)
    >>> tok.pad_token_id = tok.eos_token_id
    >>> tok.padding_side = "left"

    >>> inputs = tok(["This is the beginning of a long story", "Alice and Bob are"], padding=True, return_tensors="pt")
    >>> input_len = inputs["input_ids"].shape[-1]

    >>> # first generate text with watermark and without
    >>> watermarking_config = WatermarkingConfig(bias=2.5, seeding_scheme="selfhash")
    >>> out_watermarked = model.generate(**inputs, watermarking_config=watermarking_config, do_sample=False, max_length=20)
    >>> out = model.generate(**inputs, do_sample=False, max_length=20)

    >>> # now we can instantiate the detector and check the generated text
    >>> detector = WatermarkDetector(model_config=model.config, device="cpu", watermarking_config=watermarking_config)
    >>> detection_out_watermarked = detector(out_watermarked, return_dict=True)
    >>> detection_out = detector(out, return_dict=True)
    >>> detection_out_watermarked.prediction
    array([ True,  True])

    >>> detection_out.prediction
    array([False,  False])
    ```
    F   model_configdevicewatermarking_configignore_repeated_ngramsmax_cache_sizec                 C   sd   t |tr	| }|js|jn|j| _|d | _|| _td|j	|d|| _
t|d| j| _d S )Ngreenlist_ratio)
vocab_sizer)   )maxsizer$   )
isinstancer   to_dictis_encoder_decoderbos_token_iddecoder_start_token_idr-   r+   r   r.   	processorr   _get_ngram_score_get_ngram_score_cached)selfr(   r)   r*   r+   r,   r$   r$   r%   __init__}   s   

zWatermarkDetector.__init__prefixtargetc                 C   s   | j |}||v S N)r5   _get_greenlist_ids)r8   r:   r;   greenlist_idsr$   r$   r%   r6      s   z"WatermarkDetector._get_ngram_score	input_idsc                 C   s:  |j \}}t| jjdk}| jjd | }t|dt|| d d }|d d |f }t	|}t	|}	t
|j d D ]V}
t||
 }i }| D ]}|rW|n|d d }|d }| ||||< qQ| jr~t| ||
< t| |	|
< qBt| ||
< tdd t| | D |	|
< qB||	fS )Nselfhashr   r   c                 s   s    | ]	\}}|| V  qd S r<   r$   ).0freqoutcomer$   r$   r%   	<genexpr>   s
    
z=WatermarkDetector._score_ngrams_in_passage.<locals>.<genexpr>)shapeintr5   seeding_schemecontext_widthtorcharange	unsqueezer!   zerosrangecollectionsCounterkeysr7   r+   lensumvalueszip)r8   r?   
batch_size
seq_lengthr@   nindicesngram_tensorsnum_tokens_scored_batchgreen_token_count_batch	batch_idxfrequencies_tablengram_to_watermark_lookupngram_exampler:   r;   r$   r$   r%   _score_ngrams_in_passage   s,   
(

z*WatermarkDetector._score_ngrams_in_passagegreen_token_counttotal_num_tokensreturnc                 C   s4   | j }|||  }t|| d|  }|| }|S )Nr   )r-   r!   sqrt)r8   rb   rc   expected_countnumerdenomzr$   r$   r%   _compute_z_score   s
   z"WatermarkDetector._compute_z_scorer   r   c              
   C   s>   || | }dddt |dt d|d  t j      S )Nr         ?r   )r!   signexppi)r8   xlocscaleri   r$   r$   r%   _compute_pval   s   2zWatermarkDetector._compute_pval      @z_thresholdreturn_dictc           
   	   C   s   |d | j kr|ddddf }|jd | jj dk r&td| jj d| |\}}| ||}||k}|rO| |}d| }	t|||| ||||	dS |S )ai  
                Args:
                input_ids (`torch.LongTensor`):
                    The watermark generated text. It is advised to remove the prompt, which can affect the detection.
                z_threshold (`Dict`, *optional*, defaults to `3.0`):
                    Changing this threshold will change the sensitivity of the detector. Higher z threshold gives less
                    sensitivity and vice versa for lower z threshold.
                return_dict (`bool`,  *optional*, defaults to `False`):
                    Whether to return `~generation.WatermarkDetectorOutput` or not. If not it will return boolean predictions,
        ma
                Return:
                    [`~generation.WatermarkDetectorOutput`] or `np.array`: A [`~generation.WatermarkDetectorOutput`]
                    if `return_dict=True` otherwise a `np.array`.

        )r   r   Nr   rA   zEMust have at least `1` token to score after the first min_prefix_len=z' tokens required by the seeding scheme.)r   r   r   r   r   r   r   )	r3   rF   r5   rI   
ValueErrorra   rj   rs   r   )
r8   r?   ru   rv   r   rb   r   r   r   r   r$   r$   r%   __call__   s0   
	zWatermarkDetector.__call__N)Fr'   )r   r   )rt   F)r   r   r   r    r   strr   r   r   boolrG   r9   rJ   
LongTensorr6   ra   r!   r"   rj   rs   floatr   rx   r$   r$   r$   r%   r&   I   s<    8


r&   c                       s8   e Zd ZdZd
dee def fddZdd	 Z  Z	S )BayesianDetectorConfigaZ  
    This is the configuration class to store the configuration of a [`BayesianDetectorModel`]. It is used to
    instantiate a Bayesian Detector model according to the specified arguments.

    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
    documentation from [`PretrainedConfig`] for more information.

    Args:
        watermarking_depth (`int`, *optional*):
            The number of tournament layers.
        base_rate (`float1`, *optional*, defaults to 0.5):
            Prior probability P(w) that a text is watermarked.
    Nrk   watermarking_depth	base_ratec                    s.   || _ || _d | _d | _t jdi | d S )Nr$   )r~   r   
model_namer*   superr9   )r8   r~   r   kwargs	__class__r$   r%   r9     s
   zBayesianDetectorConfig.__init__c                 C   s   || _ || _d S r<   )r   r*   )r8   r   r*   r$   r$   r%   set_detector_information  s   
z/BayesianDetectorConfig.set_detector_information)Nrk   )
r   r   r   r    r   rG   r|   r9   r   __classcell__r$   r$   r   r%   r}      s    	r}   c                   @   s6   e Zd ZU dZdZeej ed< dZ	eej ed< dS )$BayesianWatermarkDetectorModelOutputa\  
    Base class for outputs of models predicting if the text is watermarked.

    Args:
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
            Language modeling loss.
        posterior_probabilities (`torch.FloatTensor` of shape `(1,)`):
            Multiple choice classification loss.
    Nlossposterior_probabilities)
r   r   r   r    r   r   rJ   FloatTensorr#   r   r$   r$   r$   r%   r     s   
 
r   c                       s\   e Zd ZdZdef fddZdejdeejejf fddZ	dejdejfd	d
Z
  ZS )%BayesianDetectorWatermarkedLikelihoodz~Watermarked likelihood model for binary-valued g-values.

    This takes in g-values and returns p(g_values|watermarked).
    r~   c              	      sT   t    || _tjddtdd|  | _tjdtdd| j| | _dS )z!Initializes the model parameters.g      gMbP?r   N)	r   r9   r~   rJ   r	   	Parameterrandnbetadelta)r8   r~   r   r$   r%   r9   (  s   
 $z.BayesianDetectorWatermarkedLikelihood.__init__g_valuesrd   c                 C   st   t jt j|dd| jdd}t j|dd}| jddddf || jjd   | j	 }t 
|}d	| }||fS )
aS  Computes the unique token probability distribution given g-values.

        Args:
            g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth)`):
                PRF values.

        Returns:
            p_one_unique_token and p_two_unique_tokens, both of shape
            [batch_size, seq_len, watermarking_depth]. p_one_unique_token[i,t,l]
            gives the probability of there being one unique token in a tournament
            match on layer l, on timestep t, for batch item i.
            p_one_unique_token[i,t,l] + p_two_unique_token[i,t,l] = 1.
        rl   dim)axisrA   )diagonal.N).Nr   )rJ   repeat_interleaverL   r~   trilr   typedtypesqueezer   sigmoid)r8   r   rp   logitsp_two_unique_tokensp_one_unique_tokenr$   r$   r%   _compute_latents/  s   0
z6BayesianDetectorWatermarkedLikelihood._compute_latentsc                 C   s"   |  |\}}d|d | |  S )a8  Computes the likelihoods P(g_values|watermarked).

        Args:
            g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth)`):
                g-values (values 0 or 1)

        Returns:
            p(g_values|watermarked) of shape [batch_size, seq_len, watermarking_depth].
        rk   )r   )r8   r   r   r   r$   r$   r%   forwardO  s   
z-BayesianDetectorWatermarkedLikelihood.forward)r   r   r   r    rG   r9   rJ   Tensorr   r   r   r   r$   r$   r   r%   r   "  s
      r   c                
       s   e Zd ZdZeZdZ fddZdd Zde	j
de	j
d	e	j
d
ede	j
f
ddZ			dde	j
d	e	j
dee	j
 defddZ  ZS )BayesianDetectorModelag  
    Bayesian classifier for watermark detection.

    This detector uses Bayes' rule to compute a watermarking score, which is the sigmoid of the log of ratio of the
    posterior probabilities P(watermarked|g_values) and P(unwatermarked|g_values). Please see the section on
    BayesianScore in the paper for further details.
    Paper URL: https://www.nature.com/articles/s41586-024-08025-4

    Note that this detector only works with non-distortionary Tournament-based watermarking using the Bernoulli(0.5)
    g-value distribution.

    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
    etc.)

    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
    and behavior.

    Parameters:
        config ([`BayesianDetectorConfig`]): Model configuration class with all the parameters of the model.
            Initializing with a config file does not load the weights associated with the model, only the
            configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
    modelc                    sF   t  | |j| _|j| _t| jd| _tjt	| jg| _
d S )N)r~   )r   r9   r~   r   r   likelihood_model_watermarkedrJ   r	   r   tensorprior)r8   configr   r$   r%   r9   }  s   zBayesianDetectorModel.__init__c                 C   s&   t |tjr|jjjddd dS dS )zInitialize the weights.g        g{Gz?)meanstdN)r0   r	   r   weightdatanormal_)r8   moduler$   r$   r%   _init_weights  s   z#BayesianDetectorModel._init_weightslikelihoods_watermarkedlikelihoods_unwatermarkedmaskr   rd   c                 C   s   t j|dd}t j|ddd}t t j|dtdd}t t j|dtdd}|| }t d|| }t |t d	|  }	|	| }
t |
S )
a7  
        Compute posterior P(w|g) given likelihoods, mask and prior.

        Args:
            likelihoods_watermarked (`torch.Tensor` of shape `(batch, length, depth)`):
                Likelihoods P(g_values|watermarked) of g-values under watermarked model.
            likelihoods_unwatermarked (`torch.Tensor` of shape `(batch, length, depth)`):
                Likelihoods P(g_values|unwatermarked) of g-values under unwatermarked model.
            mask (`torch.Tensor` of shape `(batch, length)`):
                A binary array indicating which g-values should be used. g-values with mask value 0 are discarded.
            prior (`float`):
                the prior probability P(w) that the text is watermarked.

        Returns:
            Posterior probability P(watermarked|g_values), shape [batch].
        rA   r   h㈵>wJ?)minmaxgKH9infzi...->ir   )rJ   rL   clamplogr|   einsumr   )r8   r   r   r   r   log_likelihoods_watermarkedlog_likelihoods_unwatermarkedlog_oddsrelative_surprisal_likelihoodrelative_surprisal_priorrelative_surprisalr$   r$   r%   _compute_posterior  s   
z(BayesianDetectorModel._compute_posteriorNr   Fr   labelsc                 C   s   |  |}dt| }| j|||| jd}d}	|dur8t }
t| j jd }|| }|
t|dd|| }	|sE|	du rA|fS ||	fS t	|	|dS )a  
        Computes the watermarked posterior P(watermarked|g_values).

        Args:
            g_values (`torch.Tensor` of shape `(batch_size, seq_len, watermarking_depth, ...)`):
                g-values (with values 0 or 1)
            mask:
                A binary array shape [batch_size, seq_len] indicating which g-values should be used. g-values with mask
                value 0 are discarded.

        Returns:
            p(watermarked | g_values), of shape [batch_size].
        rk   )r   r   r   r   Nr   r   r   )r   r   )
r   rJ   	ones_liker   r   r
   rS   r   r   r   )r8   r   r   r   loss_batch_weightrv   r   r   outr   loss_fctloss_unwweightloss_weightr$   r$   r%   r     s"   
zBayesianDetectorModel.forward)Nr   F)r   r   r   r    r}   config_classbase_model_prefixr9   r   rJ   r   r|   r   r   r   r   r   r$   r$   r   r%   r   `  s:    

.r   c                   @   s6   e Zd ZdZdededefddZdej	fdd	Z
d
S )SynthIDTextWatermarkDetectora  
    SynthID text watermark detector class.

    This class has to be initialized with the trained bayesian detector module check script
    in examples/synthid_text/detector_training.py for example in training/saving/loading this
    detector module. The folder also showcases example use case of this detector.

    Parameters:
        detector_module ([`BayesianDetectorModel`]):
            Bayesian detector module object initialized with parameters.
            Check https://github.com/huggingface/transformers-research-projects/tree/main/synthid_text for usage.
        logits_processor (`SynthIDTextWatermarkLogitsProcessor`):
            The logits processor used for watermarking.
        tokenizer (`Any`):
            The tokenizer used for the model.

    Examples:
    ```python
    >>> from transformers import (
    ...     AutoTokenizer, BayesianDetectorModel, SynthIDTextWatermarkLogitsProcessor, SynthIDTextWatermarkDetector
    ... )

    >>> # Load the detector. See https://github.com/huggingface/transformers-research-projects/tree/main/synthid_text for training a detector.
    >>> detector_model = BayesianDetectorModel.from_pretrained("joaogante/dummy_synthid_detector")
    >>> logits_processor = SynthIDTextWatermarkLogitsProcessor(
    ...     **detector_model.config.watermarking_config, device="cpu"
    ... )
    >>> tokenizer = AutoTokenizer.from_pretrained(detector_model.config.model_name)
    >>> detector = SynthIDTextWatermarkDetector(detector_model, logits_processor, tokenizer)

    >>> # Test whether a certain string is watermarked
    >>> test_input = tokenizer(["This is a test input"], return_tensors="pt")
    >>> is_watermarked = detector(test_input.input_ids)
    ```
    detector_modulelogits_processor	tokenizerc                 C   s   || _ || _|| _d S r<   )r   r   r   )r8   r   r   r   r$   r$   r%   r9     s   
z%SynthIDTextWatermarkDetector.__init__tokenized_outputsc                 C   s\   | j j|| jjdd d | j jd d f }| j j|d}|| }| j j|d}| ||S )N)r?   eos_token_idr   )r?   )r   compute_eos_token_maskr   r   	ngram_lencompute_context_repetition_maskcompute_g_valuesr   )r8   r   eos_token_maskcontext_repetition_maskcombined_maskr   r$   r$   r%   rx     s   z%SynthIDTextWatermarkDetector.__call__N)r   r   r   r    r   r   r   r9   rJ   r   rx   r$   r$   r$   r%   r     s    $

r   )(rO   dataclassesr   	functoolsr   typingr   r   r   r   r   numpyr!   rJ   r	   torch.nnr
   modeling_utilsr   utilsr   r   r   configuration_utilsr   r   logits_processr   r   
get_loggerr   loggerr   r&   r}   r   Moduler   r   r   r$   r$   r$   r%   <module>   s4   
  -> 