o
    h*                     @   s   d dl Z d dlmZ d dlZd dlmZ d dlmZmZ d dl	m
Z
mZ ddlmZ ddlmZ g d	ZG d
d deZG dd deZG dd deZG dd deZG dd deeZdS )    N)Any)Tensor)
functionalinit)	ParameterUninitializedParameter   )LazyModuleMixin)Module)BilinearIdentity
LazyLinearLinearc                       s@   e Zd ZdZdededdf fddZdedefd	d
Z  ZS )r   a  A placeholder identity operator that is argument-insensitive.

    Args:
        args: any argument (unused)
        kwargs: any keyword argument (unused)

    Shape:
        - Input: :math:`(*)`, where :math:`*` means any number of dimensions.
        - Output: :math:`(*)`, same shape as the input.

    Examples::

        >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
        >>> input = torch.randn(128, 20)
        >>> output = m(input)
        >>> print(output.size())
        torch.Size([128, 20])

    argskwargsreturnNc                    s   t    d S Nsuper__init__)selfr   r   	__class__ k/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/nn/modules/linear.pyr   +   s   zIdentity.__init__inputc                 C   s   |S r   r   r   r   r   r   r   forward.   s   zIdentity.forward)	__name__
__module____qualname____doc__r   r   r   r   __classcell__r   r   r   r   r      s    r   c                	       s   e Zd ZU dZddgZeed< eed< eed< 			ddedededdf fd	d
Z	dddZ
dedefddZdefddZ  ZS )r   aC  Applies an affine linear transformation to the incoming data: :math:`y = xA^T + b`.

    This module supports :ref:`TensorFloat32<tf32_on_ampere>`.

    On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.

    Args:
        in_features: size of each input sample
        out_features: size of each output sample
        bias: If set to ``False``, the layer will not learn an additive bias.
            Default: ``True``

    Shape:
        - Input: :math:`(*, H_\text{in})` where :math:`*` means any number of
          dimensions including none and :math:`H_\text{in} = \text{in\_features}`.
        - Output: :math:`(*, H_\text{out})` where all but the last dimension
          are the same shape as the input and :math:`H_\text{out} = \text{out\_features}`.

    Attributes:
        weight: the learnable weights of the module of shape
            :math:`(\text{out\_features}, \text{in\_features})`. The values are
            initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
            :math:`k = \frac{1}{\text{in\_features}}`
        bias:   the learnable bias of the module of shape :math:`(\text{out\_features})`.
                If :attr:`bias` is ``True``, the values are initialized from
                :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                :math:`k = \frac{1}{\text{in\_features}}`

    Examples::

        >>> m = nn.Linear(20, 30)
        >>> input = torch.randn(128, 20)
        >>> output = m(input)
        >>> print(output.size())
        torch.Size([128, 30])
    in_featuresout_featuresweightTNbiasr   c                    sr   ||d}t    || _|| _ttj||ffi || _|r-ttj|fi || _n| 	dd  | 
  d S Ndevicedtyper&   )r   r   r#   r$   r   torchemptyr%   r&   register_parameterreset_parameters)r   r#   r$   r&   r)   r*   factory_kwargsr   r   r   r   ]   s   

zLinear.__init__c                 C   sd   t j| jtdd | jd ur0t | j\}}|dkr#dt| nd}t | j| | d S d S )N   )ar   r   )r   kaiming_uniform_r%   mathsqrtr&   _calculate_fan_in_and_fan_outuniform_)r   fan_in_boundr   r   r   r.   r   s   
zLinear.reset_parametersr   c                 C   s   t || j| jS r   )Flinearr%   r&   r   r   r   r   r   |   s   zLinear.forwardc                 C   s    d| j  d| j d| jd u S )Nzin_features=, out_features=, bias=)r#   r$   r&   r   r   r   r   
extra_repr   s    zLinear.extra_reprTNNr   Nr   r   r    r!   __constants__int__annotations__r   boolr   r.   r   strr?   r"   r   r   r   r   r   2   s*   
 %

r   c                	       s6   e Zd Z			d	dedededdf fddZ  ZS )
NonDynamicallyQuantizableLinearTNr#   r$   r&   r   c                    s   t  j|||||d d S )N)r&   r)   r*   r   )r   r#   r$   r&   r)   r*   r   r   r   r      s   

z(NonDynamicallyQuantizableLinear.__init__r@   )r   r   r    rD   rF   r   r"   r   r   r   r   rH      s    rH   c                       s   e Zd ZU dZg dZeed< eed< eed< eed< 			ddededed	ed
df
 fddZ	dddZ
deded
efddZd
efddZ  ZS )r   a  Applies a bilinear transformation to the incoming data: :math:`y = x_1^T A x_2 + b`.

    Args:
        in1_features: size of each first input sample
        in2_features: size of each second input sample
        out_features: size of each output sample
        bias: If set to ``False``, the layer will not learn an additive bias.
            Default: ``True``

    Shape:
        - Input1: :math:`(*, H_\text{in1})` where :math:`H_\text{in1}=\text{in1\_features}` and
          :math:`*` means any number of additional dimensions including none. All but the last dimension
          of the inputs should be the same.
        - Input2: :math:`(*, H_\text{in2})` where :math:`H_\text{in2}=\text{in2\_features}`.
        - Output: :math:`(*, H_\text{out})` where :math:`H_\text{out}=\text{out\_features}`
          and all but the last dimension are the same shape as the input.

    Attributes:
        weight: the learnable weights of the module of shape
            :math:`(\text{out\_features}, \text{in1\_features}, \text{in2\_features})`.
            The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
            :math:`k = \frac{1}{\text{in1\_features}}`
        bias:   the learnable bias of the module of shape :math:`(\text{out\_features})`.
                If :attr:`bias` is ``True``, the values are initialized from
                :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
                :math:`k = \frac{1}{\text{in1\_features}}`

    Examples::

        >>> m = nn.Bilinear(20, 30, 40)
        >>> input1 = torch.randn(128, 20)
        >>> input2 = torch.randn(128, 30)
        >>> output = m(input1, input2)
        >>> print(output.size())
        torch.Size([128, 40])
    )in1_featuresin2_featuresr$   rI   rJ   r$   r%   TNr&   r   c                    sz   ||d}t    || _|| _|| _ttj|||ffi || _|r1ttj|fi || _	n| 
dd  |   d S r'   )r   r   rI   rJ   r$   r   r+   r,   r%   r&   r-   r.   )r   rI   rJ   r$   r&   r)   r*   r/   r   r   r   r      s   
	
zBilinear.__init__c                 C   sL   dt | jd }t| j| | | jd ur$t| j| | d S d S )Nr   )r3   r4   r%   sizer   r6   r&   )r   r9   r   r   r   r.      s
   
zBilinear.reset_parametersinput1input2c                 C   s   t ||| j| jS r   )r:   bilinearr%   r&   )r   rL   rM   r   r   r   r      s   zBilinear.forwardc              	   C   s(   d| j  d| j d| j d| jd u S )Nzin1_features=z, in2_features=r<   r=   )rI   rJ   r$   r&   r>   r   r   r   r?      s   zBilinear.extra_reprr@   rA   rB   r   r   r   r   r      s0   
 %
r   c                       s`   e Zd ZU dZeZeed< eed< 	ddede	ddf fdd	Z
d fd
dZdddZ  ZS )r   a  A :class:`torch.nn.Linear` module where `in_features` is inferred.

    In this module, the `weight` and `bias` are of :class:`torch.nn.UninitializedParameter`
    class. They will be initialized after the first call to ``forward`` is done and the
    module will become a regular :class:`torch.nn.Linear` module. The ``in_features`` argument
    of the :class:`Linear` is inferred from the ``input.shape[-1]``.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        out_features: size of each output sample
        bias: If set to ``False``, the layer will not learn an additive bias.
            Default: ``True``

    Attributes:
        weight: the learnable weights of the module of shape
            :math:`(\text{out\_features}, \text{in\_features})`. The values are
            initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where
            :math:`k = \frac{1}{\text{in\_features}}`
        bias:   the learnable bias of the module of shape :math:`(\text{out\_features})`.
                If :attr:`bias` is ``True``, the values are initialized from
                :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                :math:`k = \frac{1}{\text{in\_features}}`


    r%   r&   TNr$   r   c                    sL   ||d}t  ddd tdi || _|| _|r$tdi || _d S d S )Nr(   r   Fr   )r   r   r   r%   r$   r&   )r   r$   r&   r)   r*   r/   r   r   r   r     s   
zLazyLinear.__init__c                    s(   |   s| jdkrt   d S d S d S )Nr   )has_uninitialized_paramsr#   r   r.   r>   r   r   r   r.     s   zLazyLinear.reset_parametersc                 C   s|   |   r<t * |jd | _| j| j| jf | jd ur&| j| jf | 	  W d    d S 1 s5w   Y  d S d S )N)
rO   r+   no_gradshaper#   r%   materializer$   r&   r.   r   r   r   r   initialize_parameters  s   


"z LazyLinear.initialize_parametersr@   rA   )r   r   r    r!   r   cls_to_becomer   rE   rD   rF   r   r.   rT   r"   r   r   r   r   r      s   
 r   )r3   typingr   r+   r   torch.nnr   r:   r   torch.nn.parameterr   r   lazyr	   moduler
   __all__r   r   rH   r   r   r   r   r   r   <module>   s   VT