o
    h                     @   s\   d dl mZ d dlmZ d dlmZ ddlmZ ddgZG dd deZ	G d	d deZ
d
S )    )Union)Tensor)_size   )ModuleFlatten	Unflattenc                       sj   e Zd ZU dZddgZeed< eed< ddededdf fdd	Zd
edefddZ	de
fddZ  ZS )r   a  
    Flattens a contiguous range of dims into a tensor.

    For use with :class:`~nn.Sequential`, see :meth:`torch.flatten` for details.

    Shape:
        - Input: :math:`(*, S_{\text{start}},..., S_{i}, ..., S_{\text{end}}, *)`,'
          where :math:`S_{i}` is the size at dimension :math:`i` and :math:`*` means any
          number of dimensions including none.
        - Output: :math:`(*, \prod_{i=\text{start}}^{\text{end}} S_{i}, *)`.

    Args:
        start_dim: first dim to flatten (default = 1).
        end_dim: last dim to flatten (default = -1).

    Examples::
        >>> input = torch.randn(32, 1, 5, 5)
        >>> # With default parameters
        >>> m = nn.Flatten()
        >>> output = m(input)
        >>> output.size()
        torch.Size([32, 25])
        >>> # With non-default parameters
        >>> m = nn.Flatten(0, 2)
        >>> output = m(input)
        >>> output.size()
        torch.Size([160, 5])
    	start_dimend_dimr   returnNc                    s   t    || _|| _d S N)super__init__r	   r
   )selfr	   r
   	__class__ l/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/nn/modules/flatten.pyr   /   s   

zFlatten.__init__inputc                 C      | | j| jS r   )flattenr	   r
   r   r   r   r   r   forward4      zFlatten.forwardc                 C      d| j  d| j S )Nz
start_dim=z
, end_dim=)r	   r
   r   r   r   r   
extra_repr7      zFlatten.extra_repr)r   r   )__name__
__module____qualname____doc____constants__int__annotations__r   r   r   strr   __classcell__r   r   r   r   r      s   
 c                       s   e Zd ZU dZeeeef  ZddgZe	eef e
d< e	eef e
d< de	eef de	eef ddf fddZdd	 Zd
d ZdedefddZdefddZ  ZS )r   a  
    Unflattens a tensor dim expanding it to a desired shape. For use with :class:`~nn.Sequential`.

    * :attr:`dim` specifies the dimension of the input tensor to be unflattened, and it can
      be either `int` or `str` when `Tensor` or `NamedTensor` is used, respectively.

    * :attr:`unflattened_size` is the new shape of the unflattened dimension of the tensor and it can be
      a `tuple` of ints or a `list` of ints or `torch.Size` for `Tensor` input;  a `NamedShape`
      (tuple of `(name, size)` tuples) for `NamedTensor` input.

    Shape:
        - Input: :math:`(*, S_{\text{dim}}, *)`, where :math:`S_{\text{dim}}` is the size at
          dimension :attr:`dim` and :math:`*` means any number of dimensions including none.
        - Output: :math:`(*, U_1, ..., U_n, *)`, where :math:`U` = :attr:`unflattened_size` and
          :math:`\prod_{i=1}^n U_i = S_{\text{dim}}`.

    Args:
        dim (Union[int, str]): Dimension to be unflattened
        unflattened_size (Union[torch.Size, Tuple, List, NamedShape]): New shape of the unflattened dimension

    Examples:
        >>> input = torch.randn(2, 50)
        >>> # With tuple of ints
        >>> m = nn.Sequential(
        >>>     nn.Linear(50, 50),
        >>>     nn.Unflatten(1, (2, 5, 5))
        >>> )
        >>> output = m(input)
        >>> output.size()
        torch.Size([2, 2, 5, 5])
        >>> # With torch.Size
        >>> m = nn.Sequential(
        >>>     nn.Linear(50, 50),
        >>>     nn.Unflatten(1, torch.Size([2, 5, 5]))
        >>> )
        >>> output = m(input)
        >>> output.size()
        torch.Size([2, 2, 5, 5])
        >>> # With namedshape (tuple of tuples)
        >>> input = torch.randn(2, 50, names=('N', 'features'))
        >>> unflatten = nn.Unflatten('features', (('C', 2), ('H', 5), ('W', 5)))
        >>> output = unflatten(input)
        >>> output.size()
        torch.Size([2, 2, 5, 5])
    dimunflattened_sizer   Nc                    sN   t    t|tr| | nt|tr| | ntd|| _|| _	d S )Nz'invalid argument type for dim parameter)
r   r   
isinstancer$   _require_tuple_intr&   _require_tuple_tuple	TypeErrorr(   r)   )r   r(   r)   r   r   r   r   p   s   



zUnflatten.__init__c                 C   s`   t |tr$t|D ]\}}t |ts!tddt|j d|  q	d S tddt|j  )Nz*unflattened_size must be tuple of tuples, but found element of type  at pos z,unflattened_size must be a tuple of tuples, zbut found type )r*   tuple	enumerater-   typer   r   r   idxelemr   r   r   r,      s   

zUnflatten._require_tuple_tuplec                 C   s`   t |ttfr&t|D ]\}}t |ts#tddt|j d|  qd S tdt|j )Nz(unflattened_size must be tuple of ints, r.   r/   z9unflattened_size must be a tuple of ints, but found type )r*   r0   listr1   r$   r-   r2   r   r3   r   r   r   r+      s   
zUnflatten._require_tuple_intr   c                 C   r   r   )	unflattenr(   r)   r   r   r   r   r      r   zUnflatten.forwardc                 C   r   )Nzdim=z, unflattened_size=)r(   r)   r   r   r   r   r      r   zUnflatten.extra_repr)r   r    r!   r"   r0   r&   r$   
NamedShaper#   r   r%   r   r   r,   r+   r   r   r   r'   r   r   r   r   r   ;   s"   
 .

N)typingr   torchr   torch.typesr   moduler   __all__r   r   r   r   r   r   <module>   s   .