o
    h
                     @   s   d dl mZmZ d dlZdejdedejfddZ			dd	ejjd
ejdejdejdeej de	dee	 dee
 deejdf fddZdS )    )OptionalTupleNhidden_statesn_repreturnc                 C   s^   | j \}}}}|dkr| S | dddddddddf |||||} | ||| ||S )z
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
       N)shapeexpandreshape)r   r   batchnum_key_value_headsslenhead_dim r   |/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/integrations/sdpa_attention.py	repeat_kv   s
   0r           modulequerykeyvalueattention_maskdropoutscaling	is_causalc              	   K   s   t | drt|| j}t|| j}|}	|d ur/|	jdkr/|	d d d d d d d |jd f }	| }| }| }|d u rJ|jd dkoI|	d u }tj rYt	|tj
rY| }tjjj||||	|||d}
|
dd }
|
d fS )Nnum_key_value_groups      r   )	attn_mask	dropout_pscaler   )hasattrr   r   ndimr   
contiguoustorchjit
is_tracing
isinstanceTensoritemnn
functionalscaled_dot_product_attention	transpose)r   r   r   r   r   r   r   r   kwargscausal_maskattn_outputr   r   r   sdpa_attention_forward   s0   
&	r2   )r   NN)typingr   r   r%   r)   intr   r+   Modulefloatboolr2   r   r   r   r   <module>   s2    
