o
    h!                     @   s   d dl Z d dlZd dlmZ eejds$edejjd< edejjd< G dd dejjZG dd deZ	G d	d
 d
ejj
ZdS )    N)_dummy_type_CudaStreamBase_CudaEventBasec                       s   e Zd ZdZd fdd	ZdddZdd	d
ZdddZdef fddZ	d fddZ
edd Zdef fddZdd Zdd Z  ZS )Streama  Wrapper around a CUDA stream.

    A CUDA stream is a linear sequence of execution that belongs to a specific
    device, independent from other streams. It supports with statement as a
    context manager to ensure the operators within the with block are running
    on the corresponding stream.  See :ref:`cuda-semantics` for details.

    Args:
        device(torch.device or int, optional): a device on which to allocate
            the stream. If :attr:`device` is ``None`` (default) or a negative
            integer, this will use the current device.
        priority(int, optional): priority of the stream, which can be positive, 0, or negative.
            A lower number indicates a higher priority. By default, the priority is set to 0.
            If the value falls outside of the allowed priority range, it will automatically be
            mapped to the nearest valid priority (lowest for large positive numbers or
            highest for large negative numbers).

    Nr   c                    sx   |d u sd|v rd|v rt  j| fd|i|S tj| t  j| fd|i|W  d    S 1 s5w   Y  d S )N	stream_iddevice_indexpriority)super__new__torchcudadevice)clsr   r   kwargs	__class__ f/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/cuda/streams.pyr
   "   s
   $zStream.__new__returnc                 C   s   | |  dS )a  Make all future work submitted to the stream wait for an event.

        Args:
            event (torch.cuda.Event): an event to wait for.

        .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see
           `CUDA Stream documentation`_ for more info.

           This function returns without waiting for :attr:`event`: only future
           operations are affected.

        .. _CUDA Stream documentation:
           https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__STREAM.html
        N)waitselfeventr   r   r   
wait_event*   s   zStream.wait_eventc                 C   s   |  |  dS )a  Synchronize with another stream.

        All future work submitted to this stream will wait until all kernels
        submitted to a given stream at the time of call complete.

        Args:
            stream (Stream): a stream to synchronize.

        .. note:: This function returns without waiting for currently enqueued
           kernels in :attr:`stream`: only future operations are affected.
        N)r   record_eventr   streamr   r   r   wait_stream;   s   zStream.wait_streamc                 C   s   |du rt  }||  |S )zRecord an event.

        Args:
            event (torch.cuda.Event, optional): event to record. If not given, a new one
                will be allocated.

        Returns:
            Recorded event.
        N)Eventrecordr   r   r   r   r   I   s   

zStream.record_eventc                    
   t   S )zCheck if all the work submitted has been completed.

        Returns:
            A boolean indicating if all kernels in this stream are completed.
        r	   queryr   r   r   r   r"   X   s   
zStream.queryc                       t    dS )zWait for all the kernels in this stream to complete.

        .. note:: This is a wrapper around ``cudaStreamSynchronize()``: see
           `CUDA Stream documentation`_ for more info.
        Nr	   synchronizer#   r   r   r   r&   `   s   zStream.synchronizec                 C      t | jS N)ctypesc_void_pcuda_streamr#   r   r   r   _as_parameter_h      zStream._as_parameter_c                    s   t |trt |S dS )NF)
isinstancer   r	   __eq__)r   or   r   r   r/   l   s   
zStream.__eq__c                 C   s   t | j| jfS r(   )hashr+   r   r#   r   r   r   __hash__q   s   zStream.__hash__c                 C   s   d| j  d| jddS )Nz<torch.cuda.Stream device=z cuda_stream=#x>)r   r+   r#   r   r   r   __repr__t   s   zStream.__repr__)Nr   r   Nr(   )__name__
__module____qualname____doc__r
   r   r   r   boolr"   r&   propertyr,   r/   r2   r5   __classcell__r   r   r   r   r      s    



r   c                       s"   e Zd ZdZd fdd	Z  ZS )ExternalStreama  Wrapper around an externally allocated CUDA stream.

    This class is used to wrap streams allocated in other libraries in order
    to facilitate data exchange and multi-library interactions.

    .. note:: This class doesn't manage the stream life-cycle, it is the user
       responsibility to keep the referenced stream alive while this class is
       being used.

    Args:
        stream_ptr(int): Integer representation of the `cudaStream_t` value.
            allocated externally.
        device(torch.device or int, optional): the device where the stream
            was originally allocated. If device is specified incorrectly,
            subsequent launches using this stream may fail.
    Nc                    sH   t j| t j| fd|i|W  d    S 1 sw   Y  d S )N
stream_ptr)r   r   r   r	   r
   )r   r?   r   r   r   r   r   r
      s   $zExternalStream.__new__r(   )r7   r8   r9   r:   r
   r=   r   r   r   r   r>   x   s    r>   c                       s   e Zd ZdZd fdd	Ze fddZd fdd		Zdd fddZ fddZ	 fddZ
d fddZ fddZedd Zd
efddZ  ZS )r   a  Wrapper around a CUDA event.

    CUDA events are synchronization markers that can be used to monitor the
    device's progress, to accurately measure timing, and to synchronize CUDA
    streams.

    The underlying CUDA events are lazily initialized when the event is first
    recorded or exported to another process. After creation, only streams on the
    same device may record the event. However, streams on any device can wait on
    the event.

    Args:
        enable_timing (bool, optional): indicates if the event should measure time
            (default: ``False``)
        blocking (bool, optional): if ``True``, :meth:`wait` will be blocking (default: ``False``)
        interprocess (bool): if ``True``, the event can be shared between processes
            (default: ``False``)

    .. _CUDA Event Documentation:
       https://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html
    Fc                    s   t  j| |||dS )N)enable_timingblockinginterprocess)r	   r
   )r   r@   rA   rB   r   r   r   r
      s   zEvent.__new__c                    s   t  ||S )z<Reconstruct an event from an IPC handle on the given device.)r	   from_ipc_handle)r   r   handler   r   r   rC      s   zEvent.from_ipc_handleNc                    "   |du r	t j }t | dS )zRecord the event in a given stream.

        Uses ``torch.cuda.current_stream()`` if no stream is specified. The
        stream's device must match the event's device.
        N)r   r   current_streamr	   r   r   r   r   r   r      s   
zEvent.recordr   c                    rE   )a  Make all future work submitted to the given stream wait for this event.

        Use ``torch.cuda.current_stream()`` if no stream is specified.

        .. note:: This is a wrapper around ``cudaStreamWaitEvent()``: see
            `CUDA Event documentation`_ for more info.
        N)r   r   rF   r	   r   r   r   r   r   r      s   
z
Event.waitc                    r    )zCheck if all work currently captured by event has completed.

        Returns:
            A boolean indicating if all work currently captured by event has
            completed.
        r!   r#   r   r   r   r"      s   
zEvent.queryc                    s   t  |S )zReturn the time elapsed.

        Time reported in milliseconds after the event was recorded and
        before the end_event was recorded.
        )r	   elapsed_time)r   	end_eventr   r   r   rG      s   zEvent.elapsed_timec                    r$   )aN  Wait for the event to complete.

        Waits until the completion of all work currently captured in this event.
        This prevents the CPU thread from proceeding until the event completes.

         .. note:: This is a wrapper around ``cudaEventSynchronize()``: see
            `CUDA Event documentation`_ for more info.
        Nr%   r#   r   r   r   r&      s   	zEvent.synchronizec                    r    )zqReturn an IPC handle of this event.

        If not recorded yet, the event will use the current device.
        )r	   
ipc_handler#   r   r   r   rI      s   
zEvent.ipc_handlec                 C   r'   r(   )r)   r*   
cuda_eventr#   r   r   r   r,      r-   zEvent._as_parameter_c                 C   s   | j rd| jjddS dS )Nz<torch.cuda.Event r3   r4   z <torch.cuda.Event uninitialized>)rJ   r,   valuer#   r   r   r   r5      s   zEvent.__repr__)FFFr(   r6   )r7   r8   r9   r:   r
   classmethodrC   r   r   r"   rG   r&   rI   r<   r,   strr5   r=   r   r   r   r   r      s    
	
r   )r)   r   torch._utilsr   hasattr_C__dict__r   r   r>   r   r   r   r   r   r   <module>   s   j