o
    hV                     @   sj   d dl mZ d dlZd dlZd dlmZ d dlmZ G dd dejZ	ej
ed ded	d
fddZdS )    )AnyN)_from_dlpack)
_to_dlpackc                   @   s4   e Zd ZdZdZdZdZdZdZdZ	dZ
d	Zd
ZdS )DLDeviceType)   )   )   )   )   )   )	   )
   )   )   N)__name__
__module____qualname__kDLCPUkDLGPUkDLCPUPinned	kDLOpenCL	kDLVulkankDLMetalkDLVPIkDLROCM	kDLExtDev	kDLOneAPI r   r   f/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/utils/dlpack.pyr   
   s    r   a  to_dlpack(tensor) -> PyCapsule

Returns an opaque object (a "DLPack capsule") representing the tensor.

.. note::
  ``to_dlpack`` is a legacy DLPack interface. The capsule it returns
  cannot be used for anything in Python other than use it as input to
  ``from_dlpack``. The more idiomatic use of DLPack is to call
  ``from_dlpack`` directly on the tensor object - this works when that
  object has a ``__dlpack__`` method, which PyTorch and most other
  libraries indeed have now.

.. warning::
  Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
  Behavior when a capsule is consumed multiple times is undefined.

Args:
    tensor: a tensor to be exported

The DLPack capsule shares the tensor's memory.

ext_tensorreturnztorch.Tensorc                 C   s   t | drC|  }|d tjtjfv r;tjd|d  }|d tjk}|r.|jdkr.dn|j}| j	|d}t
|S | 	 }t
|S | }t
|S )aI  from_dlpack(ext_tensor) -> Tensor

    Converts a tensor from an external library into a ``torch.Tensor``.

    The returned PyTorch tensor will share the memory with the input tensor
    (which may have come from another library). Note that in-place operations
    will therefore also affect the data of the input tensor. This may lead to
    unexpected issues (e.g., other libraries may have read-only flags or
    immutable data structures), so the user should only do this if they know
    for sure that this is fine.

    Args:
        ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
            The tensor or DLPack capsule to convert.

            If ``ext_tensor`` is a tensor (or ndarray) object, it must support
            the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
            method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
            an opaque ``PyCapsule`` instance, typically produced by a
            ``to_dlpack`` function or method.

    Examples::

        >>> import torch.utils.dlpack
        >>> t = torch.arange(4)

        # Convert a tensor directly (supported in PyTorch >= 1.10)
        >>> t2 = torch.from_dlpack(t)
        >>> t2[:2] = -1  # show that memory is shared
        >>> t2
        tensor([-1, -1,  2,  3])
        >>> t
        tensor([-1, -1,  2,  3])

        # The old-style DLPack usage, with an intermediate capsule object
        >>> capsule = torch.utils.dlpack.to_dlpack(t)
        >>> capsule
        <capsule object "dltensor" at ...>
        >>> t3 = torch.from_dlpack(capsule)
        >>> t3
        tensor([-1, -1,  2,  3])
        >>> t3[0] = -9  # now we're sharing memory between 3 tensors
        >>> t3
        tensor([-9, -1,  2,  3])
        >>> t2
        tensor([-9, -1,  2,  3])
        >>> t
        tensor([-9, -1,  2,  3])

    
__dlpack__r   zcuda:r   )stream)hasattr__dlpack_device__r   r   r   torchcudacurrent_streamcuda_streamr!   r   )r   devicer"   is_cuda
stream_ptrdlpackr   r   r   from_dlpack1   s   
3r-   )typingr   r%   enumtorch._Cr   r   	to_dlpackIntEnumr   _C_add_docstrr-   r   r   r   r   <module>   s    