o
    hU9                     @  sx  d dl mZ d dlZd dlmZmZ d dlmZ d dlZd dl	m
Z
mZ d dlmZ ddlmZmZmZmZ ejgZeeZed	d
dZedd
dZedd
dZedd
dZedd
dZedd
dZedd
dZedd
dZej  Z!edr{e!r{	 dd Z"dJdKddZ#G dd  d ej$Z%dLdMd!d"Z&dNdOd%d&Z'dPd,d-Z(dQd5d6Z)dd7dRd=d>Z*dSdAdBZ+dTdFdGZ,dHdI Z-dS )U    )annotationsN)	lru_cachewraps)Callable)storage_ptrstorage_size)nn   )is_torch_greater_or_equalis_torch_xla_availableis_torchdynamo_compilingloggingz2.6T)
accept_devz2.4z2.3z2.2z2.1z2.0z1.13z1.12z2.5c                 C  s   ddl m} |||| j|jS )z
    A function that calls the internal `_softmax_backward_data` PyTorch method and that adjusts the arguments according
    to the torch version detected.
    r   )_softmax_backward_data)torchr   dimdtype)parentgrad_outputoutputr   selfr    r   n/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/pytorch_utils.pysoftmax_backward_data1   s   r   layer	nn.Linearindextorch.LongTensorr   intreturnc                 C  s   | | jj}| j||  }| jdur,|dkr#| j  }n	| j|   }t| j }t	|||< t
j|d |d | jdud | jj}d|j_|j|  d|j_| jdurrd|j_|j|  d|j_|S )a  
    Prune a linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (`torch.nn.Linear`): The layer to prune.
        index (`torch.LongTensor`): The indices to keep in the layer.
        dim (`int`, *optional*, defaults to 0): The dimension on which to keep the indices.

    Returns:
        `torch.nn.Linear`: The pruned layer as a new layer with `requires_grad=True`.
    Nr	   r   )biasFT)toweightdeviceindex_selectdetachcloner    listsizelenr   Linearrequires_gradcopy_
contiguousr   r   r   Wbnew_size	new_layerr   r   r   prune_linear_layer<   s"   
(
r3   c                      s2   e Zd ZdZ fddZd
ddZdd	 Z  ZS )Conv1Da  
    1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).

    Basically works like a linear layer but the weights are transposed.

    Args:
        nf (`int`): The number of output features.
        nx (`int`): The number of input features.
    c                   sR   t    || _|| _tt||| _tt	|| _
tjj| jdd d S )Ng{Gz?)std)super__init__nfnxr   	Parameterr   emptyr"   zerosr    initnormal_)r   r8   r9   	__class__r   r   r7   i   s   
zConv1D.__init__r   strc                 C  s   dj di | jS )NzConv1D(nf={nf}, nx={nx})r   )format__dict__)r   r   r   r   __repr__q   s   zConv1D.__repr__c              	   C  sF   |  d d | jf }t| j|d| d| j}||}|S )N)r(   r8   r   addmmr    viewr"   )r   xsize_outr   r   r   forwardt   s    
zConv1D.forward)r   rA   )__name__
__module____qualname____doc__r7   rD   rJ   __classcell__r   r   r?   r   r4   ^   s
    

r4   c                 C  s   | | jj}| j||  }|dkr| j  }n	| j|   }t| j }t	|||< t
|d |d  | jj}d|j_|j|  d|j_d|j_|j|  d|j_|S )a  
    Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
    are transposed.

    Used to remove heads.

    Args:
        layer ([`~pytorch_utils.Conv1D`]): The layer to prune.
        index (`torch.LongTensor`): The indices to keep in the layer.
        dim (`int`, *optional*, defaults to 1): The dimension on which to keep the indices.

    Returns:
        [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
    r   r	   FT)r!   r"   r#   r$   r%   r&   r    r'   r(   r)   r4   r+   r,   r-   r.   r   r   r   prune_conv1d_layer{   s   rP   nn.Linear | Conv1D
int | Nonec                 C  sb   t | tjrt| ||du rddS |dS t | tr)t| ||du r%ddS |dS td| j )a  
    Prune a Conv1D or linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
        index (`torch.LongTensor`): The indices to keep in the layer.
        dim (`int`, *optional*): The dimension on which to keep the indices.

    Returns:
        `torch.nn.Linear` or [`~pytorch_utils.Conv1D`]: The pruned layer as a new layer with `requires_grad=True`.
    Nr   r   r	   zCan't prune layer of class )
isinstancer   r*   r3   r4   rP   
ValueErrorr@   )r   r   r   r   r   r   prune_layer   s
   
rV   
forward_fnCallable[..., torch.Tensor]
chunk_size	chunk_dimtorch.Tensorc           	        s&  t |dksJ | dt tj}|t |kr(td| dt | d|dkr|d j  }|D ]}|j  |krKtd| d|j   q5|d j  | dkrftd|d j   d	| |d j  | t fd
d|D }tfddt| D }tj	| dS | S )aZ  
    This function chunks the `input_tensors` into smaller input tensor parts of size `chunk_size` over the dimension
    `chunk_dim`. It then applies a layer `forward_fn` to each chunk independently to save memory.

    If the `forward_fn` is independent across the `chunk_dim` this function will yield the same result as directly
    applying `forward_fn` to `input_tensors`.

    Args:
        forward_fn (`Callable[..., torch.Tensor]`):
            The forward function of the model.
        chunk_size (`int`):
            The chunk size of a chunked tensor: `num_chunks = len(input_tensors[0]) / chunk_size`.
        chunk_dim (`int`):
            The dimension over which the `input_tensors` should be chunked.
        input_tensors (`Tuple[torch.Tensor]`):
            The input tensors of `forward_fn` which will be chunked

    Returns:
        `torch.Tensor`: A tensor with the same shape as the `forward_fn` would have given if applied`.


    Examples:

    ```python
    # rename the usual forward() fn to forward_chunk()
    def forward_chunk(self, hidden_states):
        hidden_states = self.decoder(hidden_states)
        return hidden_states


    # implement a chunked forward function
    def forward(self, hidden_states):
        return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
    ```r   z" has to be a tuple/list of tensorszforward_chunk_fn expects z arguments, but only z input tensors are givenz/All input tenors have to be of the same shape: z, found shape zThe dimension to be chunked z( has to be a multiple of the chunk size c                 3  s    | ]
}|j  d V  qdS )rS   N)chunk).0input_tensor)rZ   
num_chunksr   r   	<genexpr>   s    z,apply_chunking_to_forward.<locals>.<genexpr>c                 3  s    | ]} | V  qd S Nr   )r]   input_tensors_chunk)rW   r   r   r`      s    rS   )
r)   inspect	signature
parametersrU   shapetuplezipr   cat)	rW   rY   rZ   input_tensorsnum_args_in_forward_chunk_fntensor_shaper^   input_tensors_chunksoutput_chunksr   )rZ   rW   r_   r   apply_chunking_to_forward   s6   )ro   heads	list[int]n_heads	head_sizealready_pruned_headsset[int]!tuple[set[int], torch.LongTensor]c                   sv   t ||}t| | } | D ]  t fdd|D   d| < q|d d}t t|| 	 }| |fS )a3  
    Finds the heads and their indices taking `already_pruned_heads` into account.

    Args:
        heads (`List[int]`): List of the indices of heads to prune.
        n_heads (`int`): The number of heads in the model.
        head_size (`int`): The size of each head.
        already_pruned_heads (`Set[int]`): A set of already pruned heads.

    Returns:
        `Tuple[Set[int], torch.LongTensor]`: A tuple with the indices of heads to prune taking `already_pruned_heads`
        into account and the indices of rows/columns to keep in the layer weight.
    c                 3  s     | ]}| k r
d ndV  qdS )r	   r   Nr   )r]   hheadr   r   r`     s    z3find_pruneable_heads_and_indices.<locals>.<genexpr>r   rE   r	   )
r   onessetsumrG   r-   eqaranger)   long)rp   rr   rs   rt   maskr   r   rx   r    find_pruneable_heads_and_indices   s   
r   )indexingtensors!torch.Tensor | list[torch.Tensor]r   
str | Nonetuple[torch.Tensor, ...]c                 G  s   t j|d| iS )z
    Wrapper around torch.meshgrid to avoid warning messages about the introduced `indexing` argument.

    Reference: https://pytorch.org/docs/1.13/generated/torch.meshgrid.html
    r   )r   meshgrid)r   r   r   r   r   r     s   r   tensortuple[torch.device, int, int]c                 C  s@   | j jdkrt rddl}|j| }nt| }| j |t| fS )a  
    Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For
    example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is
    guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with
    non-overlapping lifetimes may have the same id.
    xlar   N)r#   typer   	torch_xla_XLAC_xla_get_tensor_idr   r   )r   r   	unique_idr   r   r   id_tensor_storage$  s
   r   elementstest_elementstorch.Tensor | intc                 C  sh   | j jdkr.ts.t|}|jdkr|d}| |jd d	|dj
dd  S t| |S )a   
    Same as `torch.isin` without flags, but MPS-friendly. We can remove this function when we stop supporting
    torch <= 2.3. See https://github.com/pytorch/pytorch/issues/77764#issuecomment-2067838075

    Args:
        elements (`torch.Tensor`): Input elements
        test_elements (`torch.Tensor` or `int`): The elements to check against.

    Returns:
        `torch.Tensor`: A boolean tensor of the same shape as `elements` that is True for `elements` in `test_elements`
        and False otherwise
    mpsr   r	   rS   )r#   r   "is_torch_greater_or_equal_than_2_4r   r   ndim	unsqueezetilerf   r}   r|   boolsqueezeisin)r   r   r   r   r   isin_mps_friendly9  s   


.r   c                    s    fdd}|S )z
    LRU cache decorator from standard functools library, but with a workaround to disable
    caching when torchdynamo is compiling. Expected to work with class methods.
    c                   s   t   fdd}|S )Nc                   sr   t  s.t| d j s | d j ti  |  | d j |i |S  | g|R i |S )N_cached_)r   hasattrrK   __setattr__r   __get____getattribute__)r   argskwargs)funclru_args
lru_kwargsr   r   wrapperX  s    zGcompile_compatible_method_lru_cache.<locals>.decorator.<locals>.wrapper)r   )r   r   r   r   )r   r   	decoratorW  s   z6compile_compatible_method_lru_cache.<locals>.decoratorr   )r   r   r   r   r   r   #compile_compatible_method_lru_cacheQ  s   r   )r   )r   r   r   r   r   r   r   r   )r	   )r   r4   r   r   r   r   r   r4   ra   )r   rQ   r   r   r   rR   r   rQ   )rW   rX   rY   r   rZ   r   r   r[   )
rp   rq   rr   r   rs   r   rt   ru   r   rv   )r   r   r   r   r   r   )r   r[   r   r   )r   r[   r   r   r   r[   ).
__future__r   rc   	functoolsr   r   typingr   r   safetensors.torchr   r   r   utilsr
   r   r   r   	LayerNormALL_LAYERNORM_LAYERS
get_loggerrK   logger"is_torch_greater_or_equal_than_2_6r   "is_torch_greater_or_equal_than_2_3"is_torch_greater_or_equal_than_2_2"is_torch_greater_or_equal_than_2_1"is_torch_greater_or_equal_than_2_0#is_torch_greater_or_equal_than_1_13#is_torch_greater_or_equal_than_1_12distributedis_available_torch_distributed_availabler   r3   Moduler4   rP   rV   ro   r   r   r   r   r   r   r   r   r   <module>   s@   

"!

N
	
