o
    hW                     @   s  d Z ddlZddlmZ ddlmZmZmZ ddlZddl	Zddlm
Z
 ddlmZ ddlmZmZ dd	lmZmZ d
dlmZ eeZeG dd deZG dd de
jZG dd de
jZG dd de
jZG dd de
jZG dd de
jZG dd de
jZdS )zTPyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object    N)	dataclass)OptionalTupleUnion)nn   )ACT2FN)BaseModelOutputBaseModelOutputWithPooling)ModelOutputlogging   )IdeficsVisionConfigc                   @   sj   e Zd ZU dZdZeej ed< dZ	eej ed< dZ
eeejdf  ed< dZeeejdf  ed< dS )IdeficsVisionModelOutputa  
    Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.

    Args:
        image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
            The image embeddings obtained by applying the projection layer to the pooler_output.
        last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
            Sequence of hidden-states at the output of the last layer of the model.
        hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
            Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
            one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.

            Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
        attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
            Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
            sequence_length)`.

            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
            heads.
    Nimage_embedslast_hidden_state.hidden_states
attentions)__name__
__module____qualname____doc__r   r   torchFloatTensor__annotations__r   r   r   r    r   r   v/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/models/idefics/vision.pyr   "   s   
 r   c                       s\   e Zd Zdef fddZdejdededejfdd	Zddej	de
dejfddZ  ZS )IdeficsVisionEmbeddingsconfigc                    s   t    || _|j| _|j| _|j| _tt	
| j| _tj|j| j| j| jdd| _| j| j d | _| jd | _t| j| j| _| jdt	| jddd d S )NF)in_channelsout_channelskernel_sizestridebias   r   position_ids)r   )
persistent)super__init__r   hidden_size	embed_dim
image_size
patch_sizer   	Parameterr   randnclass_embeddingConv2dnum_channelspatch_embeddingnum_patchesnum_positions	Embeddingposition_embeddingregister_bufferarangeexpandselfr   	__class__r   r   r)   A   s"   
"z IdeficsVisionEmbeddings.__init__
embeddingsheightwidthreturnc                 C   s  |j d d }| | j}|j d d }||kr||kr|S |dddf }|ddddf }|j d }	|| jj }
|| jj }|
d |d }
}t|}|dt|t||	}|	dddd}|j
tjk}|rvtd |tj}tjj||
| || fd	d
d}|r|tj}t|
|j d kst||j d krtdt|
t|f d|j d |j d f d|	dddddd|	}tj|d|fddS )a#  
        This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
        resolution images.

        Source:
        https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
        r   Nr   r&   g?r   r$   zUpcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead.bicubicF)scale_factormodealign_cornerszNumber of patches for images (z/) don't match the shape of position embedding ()dim)shaper7   r%   r   r-   mathsqrtreshapeintpermutedtyper   bfloat16loggerwarning_oncetofloatr   
functionalinterpolate
ValueErrorviewcat	unsqueeze)r<   r?   r@   rA   r4   	pos_embedr5   class_pos_embedpatch_pos_embedr+   num_h_patchesnum_w_patchessqrt_num_positionsfp32_upcastingr   r   r   interpolate_pos_encodingX   sH   	

$z0IdeficsVisionEmbeddings.interpolate_pos_encodingFpixel_valuesrd   c              
   C   s   |j \}}}}|s&|| jks|| jkr&td| d| d| j d| j d	| jjj}| |j|d}|ddd}| j	
|dd}	tj|	|gdd	}
|r[|
| |
|| }
|
S |
| | j }
|
S )
NzInput image size (*z) doesn't match model (z8). You should try to set `interpolate_pos_encoding=True`)rQ   r$   r   r&   rI   )rK   r,   rY   r3   weightrQ   rU   flatten	transposer0   r:   r   r[   rd   r7   r%   )r<   re   rd   
batch_sizer2   r@   rA   target_dtypepatch_embedsclass_embedsr?   r   r   r   forward   s(   
zIdeficsVisionEmbeddings.forwardF)r   r   r   r   r)   r   TensorrO   rd   r   boolrn   __classcell__r   r   r=   r   r   @   s    $1r   c                       s   e Zd ZdZ fddZdejdedefddZ					
ddejde	ej de	ej de	e
 deeje	ej f f
ddZ  ZS )IdeficsVisionAttentionz=Multi-headed attention from 'Attention Is All You Need' paperc                    s   t    || _|j| _|j| _| j| j | _| j| j | jkr-td| j d| j d| jd | _	|j
| _t| j| j| _t| j| j| _t| j| j| _t| j| j| _d S )Nz;embed_dim must be divisible by num_heads (got `embed_dim`: z and `num_heads`: z).g      )r(   r)   r   r*   r+   num_attention_heads	num_headshead_dimrY   scaleattention_dropoutdropoutr   Lineark_projv_projq_projout_projr;   r=   r   r   r)      s"   

zIdeficsVisionAttention.__init__tensorseq_lenbszc                 C   s    | ||| j| jdd S )Nr   r$   )rZ   ru   rv   ri   
contiguous)r<   r   r   r   r   r   r   _shape   s    zIdeficsVisionAttention._shapeNFr   attention_maskcausal_attention_maskoutput_attentionsrB   c                 C   s  |  \}}}| || j }| | |d|}	| | |d|}
|| j d| jf}| |||j| }|	j| }	|
j| }
|	 d}t	
||	dd}|  || j ||fkrmtd|| j ||f d|   |dur|  |d||fkrtd|d||f d|   ||| j||| }||| j ||}|dur|  |d||fkrtd|d||f d|   ||| j||| }||| j ||}tjj|dd}|r||| j||}||| j ||}nd}tjj|| j| jd	}t	
||
}|  || j || jfkr$td
|| j|| jf d|   ||| j|| j}|dd}||||}| |}||fS )z#Input shape: Batch x Time x Channelr&   r   r$   z$Attention weights should be of size z	, but is Nz!Attention mask should be of size rI   )ptrainingz `attn_output` should be of size )sizer}   rw   r   r{   r|   ru   rv   rZ   r   bmmri   rY   r   rW   softmaxry   r   rN   r~   )r<   r   r   r   r   r   tgt_lenr+   query_states
key_statesvalue_states
proj_shapesrc_lenattn_weightsattn_weights_reshaped
attn_probsattn_outputr   r   r   rn      sd   	



zIdeficsVisionAttention.forward)NNF)r   r   r   r   r)   r   rp   rO   r   r   rq   r   rn   rr   r   r   r=   r   rs      s$    rs   c                       s2   e Zd Z fddZdejdejfddZ  ZS )IdeficsVisionMLPc                    sD   t    || _t|j | _t|j|j	| _
t|j	|j| _d S N)r(   r)   r   r   
hidden_actactivation_fnr   rz   r*   intermediate_sizefc1fc2r;   r=   r   r   r)     s
   
zIdeficsVisionMLP.__init__r   rB   c                 C   s"   |  |}| |}| |}|S r   )r   r   r   )r<   r   r   r   r   rn     s   


zIdeficsVisionMLP.forward)r   r   r   r)   r   rp   rn   rr   r   r   r=   r   r     s    r   c                       sT   e Zd Zdef fddZ	ddejdejdejdee d	e	ej
 f
d
dZ  ZS )IdeficsVisionEncoderLayerr   c                    sR   t    |j| _t|| _tj| j|jd| _	t
|| _tj| j|jd| _d S N)eps)r(   r)   r*   r+   rs   	self_attnr   	LayerNormlayer_norm_epslayer_norm1r   mlplayer_norm2r;   r=   r   r   r)     s   


z"IdeficsVisionEncoderLayer.__init__Fr   r   r   r   rB   c                 C   sd   |}|  |}| j||||d\}}|| }|}| |}| |}|| }|f}|r0||f7 }|S )aI  
        Args:
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
            attention_mask (`torch.FloatTensor`): attention mask of size
                `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
                `(config.encoder_attention_heads,)`.
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
        )r   r   r   r   )r   r   r   r   )r<   r   r   r   r   residualr   outputsr   r   r   rn   &  s"   




z!IdeficsVisionEncoderLayer.forwardro   )r   r   r   r   r)   r   rp   r   rq   r   r   rn   rr   r   r   r=   r   r     s    r   c                       st   e Zd ZdZdef fddZ					ddeej deej dee	 d	ee	 d
ee	 de
eef fddZ  ZS )IdeficsVisionEncoderz
    Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
    [`IdeficsVisionEncoderLayer`].

    Args:
        config: IdeficsVisionConfig
    r   c                    s:   t     | _t fddt jD | _d| _d S )Nc                    s   g | ]}t  qS r   )r   ).0_r   r   r   
<listcomp>\  s    z1IdeficsVisionEncoder.__init__.<locals>.<listcomp>F)	r(   r)   r   r   
ModuleListrangenum_hidden_layerslayersgradient_checkpointingr;   r=   r   r   r)   Y  s   
 
zIdeficsVisionEncoder.__init__Nr   r   r   output_hidden_statesreturn_dictrB   c                 C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|r"dnd}|r(dnd}|}	t| jD ]1\}
}|r<||	f }| jrM| jrM| |j	|	|||}n||	|||d}|d }	|rb||d f }q1|rj||	f }|sxt
dd |	||fD S t|	||dS )	a  
        Args:
            inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
                Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
                This is useful if you want more control over how to convert `input_ids` indices into associated vectors
                than the model's internal embedding lookup matrix.
            attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
                Causal mask for the text model. Mask values selected in `[0, 1]`:

                - 1 for tokens that are **not masked**,
                - 0 for tokens that are **masked**.

                [What are attention masks?](../glossary#attention-mask)
            output_attentions (`bool`, *optional*):
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
                returned tensors for more detail.
            output_hidden_states (`bool`, *optional*):
                Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
                for more detail.
            return_dict (`bool`, *optional*):
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
        Nr   )r   r   r   c                 s   s    | ]	}|d ur|V  qd S r   r   )r   vr   r   r   	<genexpr>  s    z/IdeficsVisionEncoder.forward.<locals>.<genexpr>)r   r   r   )r   r   r   use_return_dict	enumerater   r   r   _gradient_checkpointing_func__call__tupler	   )r<   inputs_embedsr   r   r   r   r   encoder_statesall_attentionsr   idxencoder_layerlayer_outputsr   r   r   rn   _  sF   &

zIdeficsVisionEncoder.forward)NNNNN)r   r   r   r   r   r)   r   r   rp   rq   r   r   r	   rn   rr   r   r   r=   r   r   P  s*    	
r   c                       sn   e Zd Zdef fddZ					ddeej dee dee d	ee d
ee de	e
ef fddZ  ZS )IdeficsVisionTransformerr   c                    sR   t    || _|j}t|| _tj||jd| _	t
|| _tj||jd| _d S r   )r(   r)   r   r*   r   r?   r   r   r   pre_layrnormr   encoderpost_layernorm)r<   r   r+   r=   r   r   r)     s   


z!IdeficsVisionTransformer.__init__NFre   r   r   rd   r   rB   c           
      C   s   |dur|n| j j}|dur|n| j j}|dur|n| j j}|du r&td| j||d}| |}| j||||d}|d }|dddddf }	| |	}	|s[||	f|dd  S t	||	|j
|jdS )z
        Returns:

        Nz You have to specify pixel_values)rd   )r   r   r   r   r   r   )r   pooler_outputr   r   )r   r   r   r   rY   r?   r   r   r   r
   r   r   )
r<   re   r   r   rd   r   r   encoder_outputsr   pooled_outputr   r   r   rn     s2   

z IdeficsVisionTransformer.forward)NNNFN)r   r   r   r   r)   r   r   r   rq   r   r   r
   rn   rr   r   r   r=   r   r     s(    
r   ) r   rL   dataclassesr   typingr   r   r   r   torch.utils.checkpointr   activationsr   modeling_outputsr	   r
   utilsr   r   configuration_ideficsr   
get_loggerr   rS   r   Moduler   rs   r   r   r   r   r   r   r   r   <module>   s(   
di3b