o
    h'                    @   s  d dl Z d dlmZmZ d dlmZ d dlZd dlmZ d dlm	Z	 d dl
mZmZ d dlmZmZmZ d dlmZmZ d	d
lmZ d	dlmZ d	dlmZmZmZmZ g dZdddZG dd deZ G dd de Z!G dd de Z"G dd de Z#G dd de Z$G dd de$Z%G dd de$Z&G dd  d e$Z'G d!d" d"e$Z(G d#d$ d$eZ)G d%d& d&e)e!Z*G d'd( d(e)e"Z+G d)d* d*e)e#Z,G d+d, d,e)e%Z-G d-d. d.e)e&Z.G d/d0 d0e)e'Z/dS )1    N)OptionalUnion)
deprecated)Tensor)reproducibility_notes)
functionalinit)	_size_1_t	_size_2_t	_size_3_t)	ParameterUninitializedParameter   )LazyModuleMixin)Module)_pair_reverse_repeat_tuple_single_triple)Conv1dConv2dConv3dConvTranspose1dConvTranspose2dConvTranspose3d
LazyConv1d
LazyConv2d
LazyConv3dLazyConvTranspose1dLazyConvTranspose2dLazyConvTranspose3da  * :attr:`groups` controls the connections between inputs and outputs.
      :attr:`in_channels` and :attr:`out_channels` must both be divisible by
      :attr:`groups`. For example,

        * At groups=1, all inputs are convolved to all outputs.
        * At groups=2, the operation becomes equivalent to having two conv
          layers side by side, each seeing half the input channels
          and producing half the output channels, and both subsequently
          concatenated.
        * At groups= :attr:`in_channels`, each input channel is convolved with
          its own set of filters (of size
          :math:`\frac{\text{out\_channels}}{\text{in\_channels}}`).a  When `groups == in_channels` and `out_channels == K * in_channels`,
        where `K` is a positive integer, this operation is also known as a "depthwise convolution".

        In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
        a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
        :math:`(C_\text{in}=C_\text{in}, C_\text{out}=C_\text{in} \times \text{K}, ..., \text{groups}=C_\text{in})`.)groups_notedepthwise_separable_notec                       sx  e Zd ZU g dZdeej iZdededee defddZe	ed< e
e	 ed	< e	ed
< ee	df ed< ee	df ed< eeee	df f ed< ee	df ed< eed< ee	df ed< e	ed< eed< eed< ee ed< 		dde	d
e	dee	df dee	df deeee	df f dee	df dedee	df de	dededdf fddZdddZdd Z fddZ  ZS )_ConvNd)	stridepaddingdilationgroupspadding_modeoutput_paddingin_channelsout_channelskernel_sizebiasinputweightreturnc                 C   s   d S N selfr.   r/   r-   r2   r2   i/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/nn/modules/conv.py_conv_forwardE      z_ConvNd._conv_forwardr*    _reversed_padding_repeated_twicer+   .r,   r$   r%   r&   
transposedr)   r'   r(   Nc                    s  ||d}t    |	dkrtd||	 dkrtd||	 dkr&tdddh}t|trN||vr=td|d	| |dkrNtd
d |D rNtdh d}||vratd| d| d|| _|| _|| _|| _	|| _
|| _|| _|| _|	| _|| _t| j
trddgt| | _|dkrt||tt|d ddD ]!\}}}||d  }|d }|| jd| < || | jd| d < qnt| j
d| _|rttj|||	 g|R fi || _nttj|||	 g|R fi || _|
rttj|fi || _n| dd  |   d S )Ndevicedtyper   z!groups must be a positive integer'in_channels must be divisible by groupsz(out_channels must be divisible by groupssamevalidzInvalid padding string z, should be one of c                 s   s    | ]}|d kV  qdS )r   Nr2   ).0sr2   r2   r5   	<genexpr>t   s    z#_ConvNd.__init__.<locals>.<genexpr>z8padding='same' is not supported for strided convolutions>   zerosreflectcircular	replicatezpadding_mode must be one of z, but got padding_mode=''r      r-   )super__init__
ValueError
isinstancestranyr*   r+   r,   r$   r%   r&   r9   r)   r'   r(   lenr8   zipranger   r   torchemptyr/   r-   register_parameterreset_parameters)r4   r*   r+   r,   r$   r%   r&   r9   r)   r'   r-   r(   r;   r<   factory_kwargsvalid_padding_stringsvalid_padding_modesdkitotal_paddingleft_pad	__class__r2   r5   rK   V   s   


z_ConvNd.__init__c                 C   sd   t j| jtdd | jd ur.t | j\}}|dkr0dt| }t | j| | d S d S d S )N   )ar   r   )r   kaiming_uniform_r/   mathsqrtr-   _calculate_fan_in_and_fan_outuniform_)r4   fan_in_boundr2   r2   r5   rV      s   
z_ConvNd.reset_parametersc                 C   s   d}| j dt| j  kr|d7 }| jdt| j kr|d7 }| jdt| j kr,|d7 }| jdkr5|d7 }| jd u r>|d	7 }| jd
krG|d7 }|jdi | jS )NzI{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride})r   z, padding={padding})r   z, dilation={dilation}z!, output_padding={output_padding}r   z, groups={groups}z, bias=FalserC   z, padding_mode={padding_mode}r2   )	r%   rP   r&   r)   r'   r-   r(   format__dict__)r4   rA   r2   r2   r5   
extra_repr   s   


z_ConvNd.extra_reprc                    s$   t  | t| dsd| _d S d S )Nr(   rC   )rJ   __setstate__hasattrr(   )r4   stater_   r2   r5   rn      s   

z_ConvNd.__setstate__NNr0   N)__name__
__module____qualname____constants__r   rS   r   __annotations__r6   intlisttupler   rN   boolrK   rV   rm   rn   __classcell__r2   r2   r_   r5   r#   7   s^   
 



	

\r#   c                          e Zd Zddjdi ee d Z								dd	ed
ededede	e
ef dededede
ddf fddZdededee fddZdedefddZ  ZS )r   a  Applies a 1D convolution over an input signal composed of several input
    planes.

    In the simplest case, the output value of the layer with input size
    :math:`(N, C_{\text{in}}, L)` and output :math:`(N, C_{\text{out}}, L_{\text{out}})` can be
    precisely described as:

    .. math::
        \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
        \sum_{k = 0}^{C_{in} - 1} \text{weight}(C_{\text{out}_j}, k)
        \star \text{input}(N_i, k)

    where :math:`\star` is the valid `cross-correlation`_ operator,
    :math:`N` is a batch size, :math:`C` denotes a number of channels,
    :math:`L` is a length of signal sequence.
    u  

    This module supports :ref:`TensorFloat32<tf32_on_ampere>`.

    On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.

    * :attr:`stride` controls the stride for the cross-correlation, a single
      number or a one-element tuple.

    * :attr:`padding` controls the amount of padding applied to the input. It
      can be either a string {{'valid', 'same'}} or a tuple of ints giving the
      amount of implicit padding applied on both sides.

    * :attr:`dilation` controls the spacing between the kernel points; also
      known as the à trous algorithm. It is harder to describe, but this `link`_
      has a nice visualization of what :attr:`dilation` does.

    {groups_note}

    Note:
        {depthwise_separable_note}
    Note:
        {cudnn_reproducibility_note}

    Note:
        ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
        the input so the output has the shape as the input. However, this mode
        doesn't support any stride values other than 1.

    Note:
        This module supports complex data types i.e. ``complex32, complex64, complex128``.

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to both sides of
            the input. Default: 0
        dilation (int or tuple, optional): Spacing between kernel
            elements. Default: 1
        groups (int, optional): Number of blocked connections from input
            channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the
            output. Default: ``True``
        padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
            ``'replicate'`` or ``'circular'``. Default: ``'zeros'``

    au  

    Shape:
        - Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
        - Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where

          .. math::
              L_{out} = \left\lfloor\frac{L_{in} + 2 \times \text{padding} - \text{dilation}
                        \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor

    Attributes:
        weight (Tensor): the learnable weights of the module of shape
            :math:`(\text{out\_channels},
            \frac{\text{in\_channels}}{\text{groups}}, \text{kernel\_size})`.
            The values of these weights are sampled from
            :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
            :math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`
        bias (Tensor):   the learnable bias of the module of shape
            (out_channels). If :attr:`bias` is ``True``, then the values of these weights are
            sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
            :math:`k = \frac{groups}{C_\text{in} * \text{kernel\_size}}`

    Examples::

        >>> m = nn.Conv1d(16, 33, 3, stride=2)
        >>> input = torch.randn(20, 16, 50)
        >>> output = m(input)

    .. _cross-correlation:
        https://en.wikipedia.org/wiki/Cross-correlation

    .. _link:
        https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
    r   r   TrC   Nr*   r+   r,   r$   r%   r&   r'   r-   r(   r0   c                    h   |
|d}t |}t |}t|tr|nt |}t |}t j||||||dt d|||	fi | d S Nr:   Fr   )r   rM   rN   rJ   rK   r4   r*   r+   r,   r$   r%   r&   r'   r-   r(   r;   r<   rW   kernel_size_stride_padding_	dilation_r_   r2   r5   rK   A  s(   

zConv1d.__init__r.   r/   c              	   C   X   | j dkrttj|| j| j d||| jtd| j| jS t|||| j| j	| j| jS NrC   )moder   )
r(   Fconv1dpadr8   r$   r   r&   r'   r%   r3   r2   r2   r5   r6   e     

zConv1d._conv_forwardc                 C      |  || j| jS r1   r6   r/   r-   r4   r.   r2   r2   r5   forwardv     zConv1d.forwardr2   r   r   r   r   TrC   NN)rs   rt   ru   rk   r   convolution_notes__doc__rx   r	   r   rN   r{   rK   r   r   r6   r   r|   r2   r2   r_   r5   r      sX    233Eo
	
$r   c                       r}   )r   a  Applies a 2D convolution over an input signal composed of several input
    planes.

    In the simplest case, the output value of the layer with input size
    :math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}}, H_{\text{out}}, W_{\text{out}})`
    can be precisely described as:

    .. math::
        \text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
        \sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k) \star \text{input}(N_i, k)


    where :math:`\star` is the valid 2D `cross-correlation`_ operator,
    :math:`N` is a batch size, :math:`C` denotes a number of channels,
    :math:`H` is a height of input planes in pixels, and :math:`W` is
    width in pixels.
    uy	  

    This module supports :ref:`TensorFloat32<tf32_on_ampere>`.

    On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.

    * :attr:`stride` controls the stride for the cross-correlation, a single
      number or a tuple.

    * :attr:`padding` controls the amount of padding applied to the input. It
      can be either a string {{'valid', 'same'}} or an int / a tuple of ints giving the
      amount of implicit padding applied on both sides.

    * :attr:`dilation` controls the spacing between the kernel points; also
      known as the à trous algorithm. It is harder to describe, but this `link`_
      has a nice visualization of what :attr:`dilation` does.


    {groups_note}

    The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:

        - a single ``int`` -- in which case the same value is used for the height and width dimension
        - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
          and the second `int` for the width dimension

    Note:
        {depthwise_separable_note}

    Note:
        {cudnn_reproducibility_note}

    Note:
        ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
        the input so the output has the shape as the input. However, this mode
        doesn't support any stride values other than 1.

    Note:
        This module supports complex data types i.e. ``complex32, complex64, complex128``.

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all four sides of
            the input. Default: 0
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
        groups (int, optional): Number of blocked connections from input
            channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the
            output. Default: ``True``
        padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
            ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
    aE  

    Shape:
        - Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
        - Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where

          .. math::
              H_{out} = \left\lfloor\frac{H_{in}  + 2 \times \text{padding}[0] - \text{dilation}[0]
                        \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor

          .. math::
              W_{out} = \left\lfloor\frac{W_{in}  + 2 \times \text{padding}[1] - \text{dilation}[1]
                        \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor

    Attributes:
        weight (Tensor): the learnable weights of the module of shape
            :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
            :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
            The values of these weights are sampled from
            :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
            :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
        bias (Tensor):   the learnable bias of the module of shape
            (out_channels). If :attr:`bias` is ``True``,
            then the values of these weights are
            sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
            :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`

    Examples:

        >>> # With square kernels and equal stride
        >>> m = nn.Conv2d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
        >>> # non-square kernels and unequal stride and with padding and dilation
        >>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
        >>> input = torch.randn(20, 16, 50, 100)
        >>> output = m(input)

    .. _cross-correlation:
        https://en.wikipedia.org/wiki/Cross-correlation

    .. _link:
        https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
    r   r   TrC   Nr*   r+   r,   r$   r%   r&   r'   r-   r(   r0   c                    r~   r   )r   rM   rN   rJ   rK   r   r_   r2   r5   rK     (   

zConv2d.__init__r.   r/   c              	   C   r   r   )
r(   r   conv2dr   r8   r$   r   r&   r'   r%   r3   r2   r2   r5   r6     r   zConv2d._conv_forwardc                 C   r   r1   r   r   r2   r2   r5   r   )  r   zConv2d.forwardr2   r   )rs   rt   ru   rk   r   r   r   rx   r
   r   rN   r{   rK   r   r   r6   r   r|   r2   r2   r_   r5   r   z  sZ    899L 
	
"r   c                       r}   )r   a  Applies a 3D convolution over an input signal composed of several input
    planes.

    In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)`
    and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as:

    .. math::
        out(N_i, C_{out_j}) = bias(C_{out_j}) +
                                \sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k)

    where :math:`\star` is the valid 3D `cross-correlation`_ operator
    uT	  

    This module supports :ref:`TensorFloat32<tf32_on_ampere>`.

    On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.

    * :attr:`stride` controls the stride for the cross-correlation.

    * :attr:`padding` controls the amount of padding applied to the input. It
      can be either a string {{'valid', 'same'}} or a tuple of ints giving the
      amount of implicit padding applied on both sides.

    * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
      It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.


    {groups_note}

    The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:

        - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
        - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
          the second `int` for the height dimension and the third `int` for the width dimension

    Note:
        {depthwise_separable_note}

    Note:
        {cudnn_reproducibility_note}

    Note:
        ``padding='valid'`` is the same as no padding. ``padding='same'`` pads
        the input so the output has the shape as the input. However, this mode
        doesn't support any stride values other than 1.

    Note:
        This module supports complex data types i.e. ``complex32, complex64, complex128``.

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int, tuple or str, optional): Padding added to all six sides of
            the input. Default: 0
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
        padding_mode (str, optional): ``'zeros'``, ``'reflect'``, ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
    a	  

    Shape:
        - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
        - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or :math:`(C_{out}, D_{out}, H_{out}, W_{out})`,
          where

          .. math::
              D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
                    \times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor

          .. math::
              H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
                    \times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor

          .. math::
              W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2]
                    \times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor

    Attributes:
        weight (Tensor): the learnable weights of the module of shape
                         :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
                         :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
                         The values of these weights are sampled from
                         :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                         :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
        bias (Tensor):   the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
                         then the values of these weights are
                         sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                         :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`

    Examples::

        >>> # With square kernels and equal stride
        >>> m = nn.Conv3d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nn.Conv3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(4, 2, 0))
        >>> input = torch.randn(20, 16, 10, 50, 100)
        >>> output = m(input)

    .. _cross-correlation:
        https://en.wikipedia.org/wiki/Cross-correlation

    .. _link:
        https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
    r   r   TrC   Nr*   r+   r,   r$   r%   r&   r'   r-   r(   r0   c                    r~   r   )r   rM   rN   rJ   rK   r   r_   r2   r5   rK     r   zConv3d.__init__r.   r/   c              	   C   r   r   )
r(   r   conv3dr   r8   r$   r   r&   r'   r%   r3   r2   r2   r5   r6     r   zConv3d._conv_forwardc                 C   r   r1   r   r   r2   r2   r5   r     r   zConv3d.forwardr2   r   )rs   rt   ru   rk   r   r   r   rx   r   r   rN   r{   rK   r   r   r6   r   r|   r2   r2   r_   r5   r   -  sX    344Bx
	
"r   c                       st   e Zd Z		d	d fddZ	ddedeee  dee dee d	ee d
edeee  dee fddZ  Z	S )_ConvTransposeNdNr0   c                    sP   |dkrt d| jj ||d}t j|||||||||	|
|fi | d S )NrC   z+Only "zeros" padding mode is supported for r:   )rL   r`   rs   rJ   rK   )r4   r*   r+   r,   r$   r%   r&   r9   r)   r'   r-   r(   r;   r<   rW   r_   r2   r5   rK     s(   

z_ConvTransposeNd.__init__r.   output_sizer$   r%   r,   num_spatial_dimsr&   c                 C   s  |d u rt | j}|S | |d k}	|	rdnd}
t||
| kr'||
d  }t||krGtd| d|  d| d|
|  dt| dtjtt	 g }tjtt	 g }t
|D ]9}|||
 d ||  d||   |d ury|| nd|| d   d }|| ||| ||  d  q]t
t|D ]-}|| }|| }|| }||k s||krtd	| d
| d| d| dd   d	qtjtt	 g }t
|D ]}||| ||   q|}|S )NrI   r   ConvTransposezD: for zD input, output_size must have z or z elements (got )zrequested an output size of z, but valid sizes range from z to z (for an input of )r   r)   dimrP   rL   rS   jitannotatery   rx   rR   sizeappend)r4   r.   r   r$   r%   r,   r   r&   rethas_batch_dimnum_non_spatial_dims	min_sizes	max_sizesrZ   dim_sizer\   r   min_sizemax_sizeresr2   r2   r5   _output_padding   sd   

(


z _ConvTransposeNd._output_paddingrq   rr   r1   )
rs   rt   ru   rK   r   r   ry   rx   r   r|   r2   r2   r_   r5   r     s0    /

	r   c                          e Zd Zdjdi eed Z									dded	ed
ededededede	dede
ddf fddZddedeee  defddZ  ZS )r   uX  Applies a 1D transposed convolution operator over an input image
    composed of several input planes.

    This module can be seen as the gradient of Conv1d with respect to its input.
    It is also known as a fractionally-strided convolution or
    a deconvolution (although it is not an actual deconvolution operation as it does
    not compute a true inverse of convolution). For more information, see the visualizations
    `here`_ and the `Deconvolutional Networks`_ paper.

    This module supports :ref:`TensorFloat32<tf32_on_ampere>`.

    On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.

    * :attr:`stride` controls the stride for the cross-correlation.

    * :attr:`padding` controls the amount of implicit zero padding on both
      sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
      below for details.

    * :attr:`output_padding` controls the additional size added to one side
      of the output shape. See note below for details.

    * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
      It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.

    {groups_note}

    Note:
        The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
        amount of zero padding to both sizes of the input. This is set so that
        when a :class:`~torch.nn.Conv1d` and a :class:`~torch.nn.ConvTranspose1d`
        are initialized with same parameters, they are inverses of each other in
        regard to the input and output shapes. However, when ``stride > 1``,
        :class:`~torch.nn.Conv1d` maps multiple input shapes to the same output
        shape. :attr:`output_padding` is provided to resolve this ambiguity by
        effectively increasing the calculated output shape on one side. Note
        that :attr:`output_padding` is only used to find output shape, but does
        not actually add zero-padding to output.

    Note:
        In some circumstances when using the CUDA backend with CuDNN, this operator
        may select a nondeterministic algorithm to increase performance. If this is
        undesirable, you can try to make the operation deterministic (potentially at
        a performance cost) by setting ``torch.backends.cudnn.deterministic =
        True``.
        Please see the notes on :doc:`/notes/randomness` for background.


    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
            will be added to both sides of the input. Default: 0
        output_padding (int or tuple, optional): Additional size added to one side
            of the output shape. Default: 0
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
    a~  

    Shape:
        - Input: :math:`(N, C_{in}, L_{in})` or :math:`(C_{in}, L_{in})`
        - Output: :math:`(N, C_{out}, L_{out})` or :math:`(C_{out}, L_{out})`, where

          .. math::
              L_{out} = (L_{in} - 1) \times \text{stride} - 2 \times \text{padding} + \text{dilation}
                        \times (\text{kernel\_size} - 1) + \text{output\_padding} + 1

    Attributes:
        weight (Tensor): the learnable weights of the module of shape
                         :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
                         :math:`\text{kernel\_size})`.
                         The values of these weights are sampled from
                         :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                         :math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`
        bias (Tensor):   the learnable bias of the module of shape (out_channels).
                         If :attr:`bias` is ``True``, then the values of these weights are
                         sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                         :math:`k = \frac{groups}{C_\text{out} * \text{kernel\_size}}`

    .. _`here`:
        https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md

    .. _`Deconvolutional Networks`:
        https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
    r   r   TrC   Nr*   r+   r,   r$   r%   r)   r'   r-   r&   r(   r0   c                    ^   ||d}t |}t |}t |}t |	}	t |}t j||||||	d||||
fi | d S Nr:   T)r   rJ   rK   r4   r*   r+   r,   r$   r%   r)   r'   r-   r&   r(   r;   r<   rW   r_   r2   r5   rK     *   

zConvTranspose1d.__init__r.   r   c              
   C   h   | j dkr	tdt| jtsJ d}| ||| j| j| j|| j}t	
|| j| j| j| j|| j| jS )NrC   z:Only `zeros` padding mode is supported for ConvTranspose1dr   )r(   rL   rM   r%   rz   r   r$   r,   r&   r   conv_transpose1dr/   r-   r'   r4   r.   r   r   r)   r2   r2   r5   r     s2   
	zConvTranspose1d.forwardr2   	r   r   r   r   Tr   rC   NNr1   )rs   rt   ru   rk   r   r   r   rx   r	   r{   rN   rK   r   r   ry   r   r|   r2   r2   r_   r5   r   6  sX    >??Ae	
($r   c                       r   )r   uu  Applies a 2D transposed convolution operator over an input image
    composed of several input planes.

    This module can be seen as the gradient of Conv2d with respect to its input.
    It is also known as a fractionally-strided convolution or
    a deconvolution (although it is not an actual deconvolution operation as it does
    not compute a true inverse of convolution). For more information, see the visualizations
    `here`_ and the `Deconvolutional Networks`_ paper.

    This module supports :ref:`TensorFloat32<tf32_on_ampere>`.

    On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.

    * :attr:`stride` controls the stride for the cross-correlation.

    * :attr:`padding` controls the amount of implicit zero padding on both
      sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
      below for details.

    * :attr:`output_padding` controls the additional size added to one side
      of the output shape. See note below for details.

    * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
      It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.

    {groups_note}

    The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
    can either be:

        - a single ``int`` -- in which case the same value is used for the height and width dimensions
        - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
          and the second `int` for the width dimension

    Note:
        The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
        amount of zero padding to both sizes of the input. This is set so that
        when a :class:`~torch.nn.Conv2d` and a :class:`~torch.nn.ConvTranspose2d`
        are initialized with same parameters, they are inverses of each other in
        regard to the input and output shapes. However, when ``stride > 1``,
        :class:`~torch.nn.Conv2d` maps multiple input shapes to the same output
        shape. :attr:`output_padding` is provided to resolve this ambiguity by
        effectively increasing the calculated output shape on one side. Note
        that :attr:`output_padding` is only used to find output shape, but does
        not actually add zero-padding to output.

    Note:
        {cudnn_reproducibility_note}

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
            will be added to both sides of each dimension in the input. Default: 0
        output_padding (int or tuple, optional): Additional size added to one side
            of each dimension in the output shape. Default: 0
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
    a	  

    Shape:
        - Input: :math:`(N, C_{in}, H_{in}, W_{in})` or :math:`(C_{in}, H_{in}, W_{in})`
        - Output: :math:`(N, C_{out}, H_{out}, W_{out})` or :math:`(C_{out}, H_{out}, W_{out})`, where

        .. math::
              H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
                        \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
        .. math::
              W_{out} = (W_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
                        \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1

    Attributes:
        weight (Tensor): the learnable weights of the module of shape
                         :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
                         :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
                         The values of these weights are sampled from
                         :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                         :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
        bias (Tensor):   the learnable bias of the module of shape (out_channels)
                         If :attr:`bias` is ``True``, then the values of these weights are
                         sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                         :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`

    Examples::

        >>> # With square kernels and equal stride
        >>> m = nn.ConvTranspose2d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nn.ConvTranspose2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2))
        >>> input = torch.randn(20, 16, 50, 100)
        >>> output = m(input)
        >>> # exact output size can be also specified as an argument
        >>> input = torch.randn(1, 16, 12, 12)
        >>> downsample = nn.Conv2d(16, 16, 3, stride=2, padding=1)
        >>> upsample = nn.ConvTranspose2d(16, 16, 3, stride=2, padding=1)
        >>> h = downsample(input)
        >>> h.size()
        torch.Size([1, 16, 6, 6])
        >>> output = upsample(h, output_size=input.size())
        >>> output.size()
        torch.Size([1, 16, 12, 12])

    .. _`here`:
        https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md

    .. _`Deconvolutional Networks`:
        https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
    r   r   TrC   Nr*   r+   r,   r$   r%   r)   r'   r-   r&   r(   r0   c                    r   r   )r   rJ   rK   r   r_   r2   r5   rK   R  r   zConvTranspose2d.__init__r.   r   c              
   C   r   )NrC   z:Only `zeros` padding mode is supported for ConvTranspose2drI   )r(   rL   rM   r%   rz   r   r$   r,   r&   r   conv_transpose2dr/   r-   r'   r   r2   r2   r5   r   v  2   

zConvTranspose2d.forwardr2   r   r1   )rs   rt   ru   rk   r   r   r   rx   r
   r{   rN   rK   r   r   ry   r   r|   r2   r2   r_   r5   r     sX    ?@@B|	
($r   c                       r   )r   uK  Applies a 3D transposed convolution operator over an input image composed of several input
    planes.
    The transposed convolution operator multiplies each input value element-wise by a learnable kernel,
    and sums over the outputs from all input feature planes.

    This module can be seen as the gradient of Conv3d with respect to its input.
    It is also known as a fractionally-strided convolution or
    a deconvolution (although it is not an actual deconvolution operation as it does
    not compute a true inverse of convolution). For more information, see the visualizations
    `here`_ and the `Deconvolutional Networks`_ paper.

    This module supports :ref:`TensorFloat32<tf32_on_ampere>`.

    On certain ROCm devices, when using float16 inputs this module will use :ref:`different precision<fp16_on_mi200>` for backward.

    * :attr:`stride` controls the stride for the cross-correlation.

    * :attr:`padding` controls the amount of implicit zero padding on both
      sides for ``dilation * (kernel_size - 1) - padding`` number of points. See note
      below for details.

    * :attr:`output_padding` controls the additional size added to one side
      of the output shape. See note below for details.

    * :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
      It is harder to describe, but the link `here`_ has a nice visualization of what :attr:`dilation` does.

    {groups_note}

    The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`output_padding`
    can either be:

        - a single ``int`` -- in which case the same value is used for the depth, height and width dimensions
        - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
          the second `int` for the height dimension and the third `int` for the width dimension

    Note:
        The :attr:`padding` argument effectively adds ``dilation * (kernel_size - 1) - padding``
        amount of zero padding to both sizes of the input. This is set so that
        when a :class:`~torch.nn.Conv3d` and a :class:`~torch.nn.ConvTranspose3d`
        are initialized with same parameters, they are inverses of each other in
        regard to the input and output shapes. However, when ``stride > 1``,
        :class:`~torch.nn.Conv3d` maps multiple input shapes to the same output
        shape. :attr:`output_padding` is provided to resolve this ambiguity by
        effectively increasing the calculated output shape on one side. Note
        that :attr:`output_padding` is only used to find output shape, but does
        not actually add zero-padding to output.

    Note:
        {cudnn_reproducibility_note}

    Args:
        in_channels (int): Number of channels in the input image
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
            will be added to both sides of each dimension in the input. Default: 0
        output_padding (int or tuple, optional): Additional size added to one side
            of each dimension in the output shape. Default: 0
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
    aY	  

    Shape:
        - Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` or :math:`(C_{in}, D_{in}, H_{in}, W_{in})`
        - Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` or
          :math:`(C_{out}, D_{out}, H_{out}, W_{out})`, where

        .. math::
              D_{out} = (D_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{dilation}[0]
                        \times (\text{kernel\_size}[0] - 1) + \text{output\_padding}[0] + 1
        .. math::
              H_{out} = (H_{in} - 1) \times \text{stride}[1] - 2 \times \text{padding}[1] + \text{dilation}[1]
                        \times (\text{kernel\_size}[1] - 1) + \text{output\_padding}[1] + 1
        .. math::
              W_{out} = (W_{in} - 1) \times \text{stride}[2] - 2 \times \text{padding}[2] + \text{dilation}[2]
                        \times (\text{kernel\_size}[2] - 1) + \text{output\_padding}[2] + 1


    Attributes:
        weight (Tensor): the learnable weights of the module of shape
                         :math:`(\text{in\_channels}, \frac{\text{out\_channels}}{\text{groups}},`
                         :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
                         The values of these weights are sampled from
                         :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                         :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
        bias (Tensor):   the learnable bias of the module of shape (out_channels)
                         If :attr:`bias` is ``True``, then the values of these weights are
                         sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
                         :math:`k = \frac{groups}{C_\text{out} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`

    Examples::

        >>> # With square kernels and equal stride
        >>> m = nn.ConvTranspose3d(16, 33, 3, stride=2)
        >>> # non-square kernels and unequal stride and with padding
        >>> m = nn.ConvTranspose3d(16, 33, (3, 5, 2), stride=(2, 1, 1), padding=(0, 4, 2))
        >>> input = torch.randn(20, 16, 10, 50, 100)
        >>> output = m(input)

    .. _`here`:
        https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md

    .. _`Deconvolutional Networks`:
        https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf
    r   r   TrC   Nr*   r+   r,   r$   r%   r)   r'   r-   r&   r(   r0   c                    r   r   )r   rJ   rK   r   r_   r2   r5   rK     r   zConvTranspose3d.__init__r.   r   c              
   C   r   )NrC   z:Only `zeros` padding mode is supported for ConvTranspose3d   )r(   rL   rM   r%   rz   r   r$   r,   r&   r   conv_transpose3dr/   r-   r'   r   r2   r2   r5   r   /  r   zConvTranspose3d.forwardr2   r   r1   )rs   rt   ru   rk   r   r   r   rx   r   r{   rN   rK   r   r   ry   r   r|   r2   r2   r_   r5   r     sX    ABBDy	
($r   c                       s(   e Zd Zeded fddZ  ZS )_ConvTransposeMixinzX`_ConvTransposeMixin` is a deprecated internal class. Please consider using public APIs.)categoryc                    s   t  j|i | d S r1   )rJ   rK   )r4   argskwargsr_   r2   r5   rK   _  s   z_ConvTransposeMixin.__init__)rs   rt   ru   r   FutureWarningrK   r|   r2   r2   r_   r5   r   ^  s    r   c                       s   e Zd ZU eed< eed< eed< eed< eedf ed< eed< eed< d fddZde	d	d
fddZ
de	d	efddZd	efddZ  ZS )_LazyConvXdMixinr'   r9   r*   r+   .r,   r/   r-   r0   Nc                    s(   |   s| jdkrt   d S d S d S )Nr   )has_uninitialized_paramsr*   rJ   rV   r4   r_   r2   r5   rV   v  s   z!_LazyConvXdMixin.reset_parametersr.   c                 O   s   |   r_| || _| j| j dkrtdt| jtsJ | jr3| j	| j| j
| j g| jR  n| j	| j
| j| j g| jR  | jd urYt| jtsQJ | j	| j
f |   d S d S )Nr   r=   )r   _get_in_channelsr*   r'   rL   rM   r/   r   r9   materializer+   r,   r-   rV   )r4   r.   r   r   r2   r2   r5   initialize_parameters  s2   


z&_LazyConvXdMixin.initialize_parametersc              	   C   sn   |   }|d }|d }| ||fvr'td| d| d| jj d|j | |kr2|jd S |jd S )Nr   z	Expected zD (unbatched) or zD (batched) input to z, but got input of size: r   )_get_num_spatial_dimsr   RuntimeErrorr`   rs   shape)r4   r.   r   num_dims_no_batchnum_dims_batchr2   r2   r5   r     s    z!_LazyConvXdMixin._get_in_channelsc                 C   s   t r1   )NotImplementedErrorr   r2   r2   r5   r     r7   z&_LazyConvXdMixin._get_num_spatial_dimsrr   )rs   rt   ru   rx   rw   r{   rz   r   rV   r   r   r   r   r|   r2   r2   r_   r5   r   m  s   
 	r   c                       j   e Zd ZdZeZ								ddeded	ed
edededede	ddf fddZ
defddZ  ZS )r   a  A :class:`torch.nn.Conv1d` module with lazy initialization of the ``in_channels`` argument.

    The ``in_channels`` argument of the :class:`Conv1d` is inferred from the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight` and `bias`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): Zero-padding added to both sides of
            the input. Default: 0
        dilation (int or tuple, optional): Spacing between kernel
            elements. Default: 1
        groups (int, optional): Number of blocked connections from input
            channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the
            output. Default: ``True``
        padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
            ``'replicate'`` or ``'circular'``. Default: ``'zeros'``

    .. seealso:: :class:`torch.nn.Conv1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
    r   r   TrC   Nr+   r,   r$   r%   r&   r'   r-   r(   r0   c              
      `   |	|
d}t  jdd|||||d|f	i | tdi || _|| _|r.tdi || _d S d S Nr:   r   Fr2   rJ   rK   r   r/   r+   r-   r4   r+   r,   r$   r%   r&   r'   r-   r(   r;   r<   rW   r_   r2   r5   rK     &   
zLazyConv1d.__init__c                 C      dS Nr   r2   r   r2   r2   r5   r     r7   z LazyConv1d._get_num_spatial_dimsr   )rs   rt   ru   r   r   cls_to_becomerx   r	   r{   rN   rK   r   r|   r2   r2   r_   r5   r     >    	!r   c                       r   )r   a  A :class:`torch.nn.Conv2d` module with lazy initialization of the ``in_channels`` argument.

    The ``in_channels`` argument of the :class:`Conv2d` that is inferred from the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight` and `bias`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): Zero-padding added to both sides of
            the input. Default: 0
        dilation (int or tuple, optional): Spacing between kernel
            elements. Default: 1
        groups (int, optional): Number of blocked connections from input
            channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the
            output. Default: ``True``
        padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
            ``'replicate'`` or ``'circular'``. Default: ``'zeros'``

    .. seealso:: :class:`torch.nn.Conv2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
    r   r   TrC   Nr+   r,   r$   r%   r&   r'   r-   r(   r0   c              
      r   r   r   r   r_   r2   r5   rK     r   zLazyConv2d.__init__c                 C   r   NrI   r2   r   r2   r2   r5   r   4  r7   z LazyConv2d._get_num_spatial_dimsr   )rs   rt   ru   r   r   r   rx   r
   r{   rN   rK   r   r|   r2   r2   r_   r5   r     r   r   c                       r   )r   a#  A :class:`torch.nn.Conv3d` module with lazy initialization of the ``in_channels`` argument.

    The ``in_channels`` argument of the :class:`Conv3d` that is inferred from
    the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight` and `bias`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): Zero-padding added to both sides of
            the input. Default: 0
        dilation (int or tuple, optional): Spacing between kernel
            elements. Default: 1
        groups (int, optional): Number of blocked connections from input
            channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the
            output. Default: ``True``
        padding_mode (str, optional): ``'zeros'``, ``'reflect'``,
            ``'replicate'`` or ``'circular'``. Default: ``'zeros'``

    .. seealso:: :class:`torch.nn.Conv3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
    r   r   TrC   Nr+   r,   r$   r%   r&   r'   r-   r(   r0   c              
      r   r   r   r   r_   r2   r5   rK   Y  r   zLazyConv3d.__init__c                 C   r   Nr   r2   r   r2   r2   r5   r   z  r7   z LazyConv3d._get_num_spatial_dimsr   )rs   rt   ru   r   r   r   rx   r   r{   rN   rK   r   r|   r2   r2   r_   r5   r   9  s>    	!r   c                       p   e Zd ZdZeZ									ddeded	ed
ededededede	ddf fddZ
defddZ  ZS )r   aF  A :class:`torch.nn.ConvTranspose1d` module with lazy initialization of the ``in_channels`` argument.

    The ``in_channels`` argument of the :class:`ConvTranspose1d` that is inferred from
    the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight` and `bias`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
            will be added to both sides of the input. Default: 0
        output_padding (int or tuple, optional): Additional size added to one side
            of the output shape. Default: 0
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1

    .. seealso:: :class:`torch.nn.ConvTranspose1d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
    r   r   TrC   Nr+   r,   r$   r%   r)   r'   r-   r&   r(   r0   c                    b   |
|d}t  jdd|||||d||	f
i | tdi || _|| _|r/tdi || _d S d S r   r   r4   r+   r,   r$   r%   r)   r'   r-   r&   r(   r;   r<   rW   r_   r2   r5   rK     (   
zLazyConvTranspose1d.__init__c                 C   r   r   r2   r   r2   r2   r5   r     r7   z)LazyConvTranspose1d._get_num_spatial_dimsr   )rs   rt   ru   r   r   r   rx   r	   r{   rN   rK   r   r|   r2   r2   r_   r5   r     D    	
#r   c                       sp   e Zd ZdZeZ									ddeded	ed
ededededede	ddf fddZ
defddZ  ZS )r   ae  A :class:`torch.nn.ConvTranspose2d` module with lazy initialization of the ``in_channels`` argument.

    The ``in_channels`` argument of the :class:`ConvTranspose2d` is inferred from
    the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight` and `bias`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
            will be added to both sides of each dimension in the input. Default: 0
        output_padding (int or tuple, optional): Additional size added to one side
            of each dimension in the output shape. Default: 0
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1

    .. seealso:: :class:`torch.nn.ConvTranspose2d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
    r   r   TrC   Nr+   r,   r$   r%   r)   r'   r-   r&   r(   r0   c                    r   r   r   r   r_   r2   r5   rK     r   zLazyConvTranspose2d.__init__c                 C   r   r   r2   r   r2   r2   r5   r     r7   z)LazyConvTranspose2d._get_num_spatial_dimsr   )rs   rt   ru   r   r   r   rx   r
   r{   rN   rK   r   r|   r2   r2   r_   r5   r     r   r   c                       r   )r    ae  A :class:`torch.nn.ConvTranspose3d` module with lazy initialization of the ``in_channels`` argument.

    The ``in_channels`` argument of the :class:`ConvTranspose3d` is inferred from
    the ``input.size(1)``.
    The attributes that will be lazily initialized are `weight` and `bias`.

    Check the :class:`torch.nn.modules.lazy.LazyModuleMixin` for further documentation
    on lazy modules and their limitations.

    Args:
        out_channels (int): Number of channels produced by the convolution
        kernel_size (int or tuple): Size of the convolving kernel
        stride (int or tuple, optional): Stride of the convolution. Default: 1
        padding (int or tuple, optional): ``dilation * (kernel_size - 1) - padding`` zero-padding
            will be added to both sides of each dimension in the input. Default: 0
        output_padding (int or tuple, optional): Additional size added to one side
            of each dimension in the output shape. Default: 0
        groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
        bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
        dilation (int or tuple, optional): Spacing between kernel elements. Default: 1

    .. seealso:: :class:`torch.nn.ConvTranspose3d` and :class:`torch.nn.modules.lazy.LazyModuleMixin`
    r   r   TrC   Nr+   r,   r$   r%   r)   r'   r-   r&   r(   r0   c                    r   r   r   r   r_   r2   r5   rK   &  r   zLazyConvTranspose3d.__init__c                 C   r   r   r2   r   r2   r2   r5   r   I  r7   z)LazyConvTranspose3d._get_num_spatial_dimsr   )rs   rt   ru   r   r   r   rx   r   r{   rN   rK   r   r|   r2   r2   r_   r5   r    	  r   r    )0rd   typingr   r   typing_extensionsr   rS   r   torch._torch_docsr   torch.nnr   r   r   torch.nn.common_typesr	   r
   r   torch.nn.parameterr   r   lazyr   moduler   utilsr   r   r   r   __all__r   r#   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r    r2   r2   r2   r5   <module>   sN      % 4 ,^ % = IBEEFEE