o
    h                     @   sn  U d dl Z d dlZd dlZd dlZd dlZd dlZd dlZd dlmZ d dl	m
Z
 d dlmZmZmZmZmZ d dlmZmZ d dlZdddZdd	d
Zdd Zdd Zdd Zdd Zdd Zdd Z	dddZ	dddZg Ze d e!d< dd Z"dd  Z#d!d" Z$d#d$ Z%d%d& Z&e&Z'd'd( Z(d)d* Z)d+d, Z*d-d. Z+d/d0 Z,d1d2 Z-d3d4 Z.d5d6 Z/d7d8 Z0d9d: Z1d;d< Z2d=d> Z3d?d@ Z4dAdB Z5dCdD Z6dEdF Z7G dGdH dHe8Z9G dIdJ dJZ:dKdL Z;dMdN Z<dOdP Z=dQdR Z>dSdT Z?dUe@fdVdWZA		ddXedYeBdZeBdUe@fd[d\ZCd]d^ ZDd_d` ZEG dadb dbZFdcdd ZGer-edeeHdfdUeBfdgdhZIndUeBfdidhZIdjdk ZJeKdldme8fdndoZLdpe8dUeMfdqdrZNG dsdt dtZOePeQZReduZSG dvdw dweeS ZTdxe8dUee
 fdydzZUd{d|d}d~ddddddddZVi ddddddddddddddddddddddddddddddddddZWdS )    N)defaultdict)
ModuleType)AnyCallableGenericOptionalTYPE_CHECKING)
deprecated	ParamSpecFc           	      K   s   t d||}|du r| jd | jj S t|trt|}|t| kr$| S | jr\|js.t	d|j
dd}|d |j }tj| ||}|d }tj| ||}||||  S |jrct	d||  | |S )	a  Returns the type if `dtype` is not provided, else casts this object to
    the specified type.

    If this is already of the correct type, no copy is performed and the
    original object is returned.

    Args:
        dtype (type or string): The desired type
        non_blocking (bool): If ``True``, and the source is in pinned memory
            and destination is on the GPU or vice versa, the copy is performed
            asynchronously with respect to the host. Otherwise, the argument
            has no effect.
        **kwargs: For compatibility, may contain the key ``async`` in place of
            the ``non_blocking`` argument. The ``async`` arg is deprecated.
    typeN.z)Cannot cast sparse tensor to dense tensorz.sparse z.LongTensorz)Cannot cast dense tensor to sparse tensor)_get_async_or_non_blocking
__module__	__class____name__
isinstancestr_import_dotted_namer   	is_sparseRuntimeErrorreplacetorchTensor_values_indicessizecopy_)	selfdtypenon_blockingkwargsnew_module_namenew_values_type_name
new_valuesnew_indices_type_namenew_indices r'   `/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/_utils.py_type   s*   
r)   c           	      C   sf  | j |kr| S |jdkr/|o| j jdtj fv }tj|  tj||d }|	| | |S t
t|jd}|dusDJ |j  d| |` | jrt|drt
|j| jj}t
tj| |j||}t
tj| |j||}||||  W  d   S | jrJ d|j  dtj|  |d	}|	| | |W  d   S 1 sw   Y  dS )
a  Returns a copy of this object in device memory.

    If this object is already on the correct device, then no copy is performed
    and the original object is returned.

    Args:
        device (int): The destination device.
        non_blocking (bool): If ``True`` and the source is in pinned memory,
            the copy will be asynchronous with respect to the host. Otherwise,
            the argument has no effect.
    cpucuda)r   device
pin_memoryNz device module is not loadedsparsez$sparse storage is not supported for z tensors)r,   )r,   r   r   _C_get_privateuse1_backend_nameemptynbytesuint8untyped_storager   getattrupperr   hasattrr.   r   r   r   r   r   r   UntypedStorage)	r   r,   r    r-   r4   device_modulenew_typeindicesvaluesr'   r'   r(   _to9   sH   



$r=   c                 C   sR   |s|S t |dksd|vr d}t|  }t|| |td |d S )zReturn the non-blocking flag given the function name and kwargs.

    Args:
        function_name (str): the name of the function being used.
        non_blocking (bool): the default value.
        **kwargs (dict): the kwargs passed to the function.
       asyncz,{}() got an unexpected keyword argument '{}'z)'async' is deprecated; use 'non_blocking')lenlistkeyspop	TypeErrorformatwarningswarn)function_namer    r!   messageargumentr'   r'   r(   r   j   s   
r   c                 C   sT   t jjj}|du r| S t|tr|| | S t|tt jfr |S t	|s&J t
d)z{Return the map_location location.

    Used for rebuild functions where the tensor device is distinct from the storage
    NzgCallable map_location not supported with _rebuild_wrapper_subclass or _rebuild_device_tensor_from_numpy)r   serialization_serialization_tlsmap_locationr   dictgetr   r,   callabler   )r,   rM   r'   r'   r(   _get_restore_location|   s   

rQ   c                 C   s(   t jd| j| jjd}|| j|||S )Nr   r   r,   )r   r1   r   _untyped_storager,   set_)storagestorage_offsetr   stridetr'   r'   r(   _rebuild_tensor   s   rZ   c                 C   s   t | tjsJ tj| S N)r   r   r   r/   _get_tensor_metadatatensorr'   r'   r(   get_tensor_metadata   s   r_   c                 C   s0   t |tsJ t | tjsJ tj| | d S r[   )r   rN   r   r   r/   _set_tensor_metadata)r^   metadatar'   r'   r(   set_tensor_metadata   s   rb   c                 C   sR   t jd d ur'|  jd ur't|  j}t|t js!t |}t || _| S r[   )	r   _guardsdetect_fake_moder4   _fake_devicerQ   r   r,   fake_device)r^   r,   r'   r'   r(   _restore_device_fake_mode   s   
rg   c                 C   s4   t | |||}||_|rt|| ||_t|}|S r[   )rZ   requires_gradrb   _backward_hooksrg   )rV   rW   r   rX   rh   backward_hooksra   r^   r'   r'   r(   _rebuild_tensor_v2   s   	
rk   c           	      C   sH   t jd|| jj|d}|| j||| |rt|| ||_t|}|S )NrR   r   r,   rh   )r   r1   rT   r,   rU   rb   ri   rg   )	rV   rW   r   rX   rh   rj   r   ra   rY   r'   r'   r(   _rebuild_tensor_v3   s   

rm   ztorch.Tensor_sparse_tensors_to_validatec                  C   s   zct D ]Y} | jtju rt|  |  |  |   q| jtj	tj
tjtjhv rT| jtj	tjhv r<|  |  }}n	|  |  }}t|||  |  | j qtd| j dW t   d S t   w )Nz,_validate_loaded_sparse_tensors for layout ``)rn   layoutr   
sparse_coo _validate_sparse_coo_tensor_argsr   r   r   is_coalesced
sparse_csr
sparse_csc
sparse_bsr
sparse_bsccrow_indicescol_indicesccol_indicesrow_indices'_validate_sparse_compressed_tensor_argsr<   NotImplementedErrorclear)rY   compressed_indicesplain_indicesr'   r'   r(   _validate_loaded_sparse_tensors  s6    r   c           	      C   s   | t jkr*t|dkr|\}}}d}n|\}}}}t j|||d|d}t| |S | t jt jt jt j	hv rN|\}}}}t j
||||| dd}t| |S td|  )z
    Rebuilds a sparse tensor from its sparse storage representation.

    Args:
        layout (str): The sparse storage layout of the tensor.
        data (tuple): The tensor's sparse storage representation.
       NF)check_invariantsrs   )rp   r   z$rebuilding sparse tensor for layout )r   rq   r@   sparse_coo_tensorrn   appendrt   ru   rv   rw   sparse_compressed_tensorr}   )	rp   datar;   r<   r   rs   resultr   r   r'   r'   r(   _rebuild_sparse_tensor9  s8   




r   c                 C   s   t | |||S r[   )r   _nested_view_from_buffer)buffersizesstridesstorage_offsetsr'   r'   r(   _rebuild_nested_tensorc     r   c                 C   s    t |}| j||d}||_|S NrS   )rQ   torh   r   r   r,   rh   r^   r'   r'   r(   &_rebuild_device_tensor_from_cpu_tensorg  s   r   c                 C   s&   t |}t| j||d}||_|S r   )rQ   r   
from_numpyr   rh   r   r'   r'   r(   !_rebuild_device_tensor_from_numpyn  s   r   c                 C   s   t j||| d|dS )Nmetarl   )r   empty_strided)r   r   rX   rh   r'   r'   r(   _rebuild_meta_tensor_no_storagey  s   
r   c              
   C   s$   t |}tjj| |||||||dS )N)r   r   rW   rp   r,   rh   )rQ   r   r   _make_wrapper_subclass)clsr   r   rX   rW   rp   r,   rh   r'   r'   r(   _rebuild_wrapper_subclass  s   
r   c                 C   s  |d }|t jkr|\}}	}
t j||	|
| j| jd}n]|t jt jfv rq|\}}}}t|tu rct|tu rc|t jkrOt j	|t j
| jd}t j	|t j| jd}nt j	|t j| jd}t j	|t j| jd}t j||||| j| jd}ntd| || ||| ||_||_|S )Nr   )scale
zero_pointr   r,   rS   )scaleszero_pointsaxisr   r,   z0Can't deserialize quantized tensor with qscheme )r   per_tensor_affine_empty_affine_quantizedr   r,   per_channel_affine per_channel_affine_float_qparamsr   rA   r^   doublelongfloat#_empty_per_channel_affine_quantizedr   rU   rh   ri   )rV   rW   r   rX   quantizer_paramsrh   rj   qscheme_r   r   r^   r   r   r   r'   r'   r(   _rebuild_qtensor  sF   	




	r   c                 C   s   t j| |}||_|S r[   )r   nn	Parameterri   )r   rh   rj   paramr'   r'   r(   _rebuild_parameter  s   r   c                 C   s"   t j| |}||_t||}|S r[   )r   r   r   ri   _set_obj_state)r   rh   rj   stater   r'   r'   r(   _rebuild_parameter_with_state  s   
r   c                    sP   t  dd }|r| }|S t j}|r# j fdd|D f}|S  j}|S )N__getstate__c                    s"   i | ]}t  |r|t |qS r'   )r7   r5   ).0nameobjr'   r(   
<dictcomp>  s    
z"_get_obj_state.<locals>.<dictcomp>)r5   copyreg
_slotnamesr   __dict__)r   getstate_fnr   slots_to_saver'   r   r(   _get_obj_state  s   
r   c                 C   s   t |trt|dkstd| |d }|d }n|}d }|r0| D ]
\}}t| || q%|rA| D ]
\}}t| || q6| S )N   zInvalid serialized state: r   r>   )r   tupler@   r   itemssetattr)r   r   
dict_stateslots_statekvr'   r'   r(   r     s   

r   c                 C   s6   |  d}t|d }|dd  D ]}t||}q|S )Nr   r   r>   )split
__import__r5   )r   
componentsr   	componentr'   r'   r(   r     s
   
r   c                 C   s   t jj| S )a  Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
    same dense type.

    Since inputs are dense, the resulting tensor will be a concatenated 1D
    buffer. Element-wise operation on this buffer will be equivalent to
    operating individually.

    Args:
        tensors (Iterable[Tensor]): dense tensors to flatten.

    Returns:
        A contiguous 1D buffer containing input tensors.
    r   r/   _nnflatten_dense_tensors)tensorsr'   r'   r(   _flatten_dense_tensors  s   r   c                 C   s8   t jjdd | D }t jjdd | D }||fS )ab  Flatten sparse tensors into two contiguous 1D buffers, one of indices and
    one of values. Assume tensors are of same sparse type.

    Args:
        tensors (Iterable[Tensor]): sparse tensors to flatten.

    Returns:
        A tuple of two contiguous 1D buffers, one containing input tensors'
        indices and the other containing the values.
    c                 S      g | ]}t j|qS r'   r   r   r   r   rY   r'   r'   r(   
<listcomp>7      z+_flatten_sparse_tensors.<locals>.<listcomp>c                 S   r   r'   r   r   r   r   r'   r'   r(   r   :  r   r   )r   flat_indicesflat_valuesr'   r'   r(   _flatten_sparse_tensors+  s   r   c                 C   s   t jj| |S )a  View a flat buffer using the sizes of tensors. Assume that tensors are of
    same dense type, and that flat is given by _flatten_dense_tensors.

    Args:
        flat (Tensor): flattened dense tensors to unflatten.
        tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
          unflatten flat.

    Returns:
        Unflattened dense tensors with sizes same as tensors and values from
        flat.
    )r   r/   r   unflatten_dense_tensors)flatr   r'   r'   r(   _unflatten_dense_tensors?  s   r   c           
   	   C   sx   | \}}t jj|dd |D }t jj|dd |D }g }t|||D ]\}}}	||||	|  q&t|S )a  View flat buffer (containing indices and values) using the sizes of
    tensors. Assume that tensors are of same sparse type, and that flat is given
    by _flatten_sparse_tensors.

    Args:
        flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
          tensors to unflatten.
        tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
          unflatten flat.

    Returns:
        Unflattened sparse tensors with sizes same as tensors and values from
        flat.
    c                 S   r   r'   r   r   r'   r'   r(   r   `  r   z-_unflatten_sparse_tensors.<locals>.<listcomp>c                 S   r   r'   r   r   r'   r'   r(   r   c  r   )	r   r/   r   r   zipr   newr   r   )
r   r   r   r   r;   r<   outputsrY   ir   r'   r'   r(   _unflatten_sparse_tensorsO  s   r   c                    sL   t t}| D ]}||  | qdd | D  t fdd|D S )a  Assume that tensors are of same order as ordered_tensors within their
    types, e.g., from _take_tensors. Reorder them to be of same order as
    ordered_tensors.

    Args:
        tensors (Iterable[Tensor]): tensors to be reordered. They should be of
          the same order as ordered_tensors within their own types.
        ordered_tensors (Iterable[Tensor]): tensors whose order will be the
          reference.

    Returns:
        Ordered tuple of tensors with contents from tensors and order of
        ordered_tensors.
    c                 S   s   i | ]	\}}|t |qS r'   )iter)r   rY   collr'   r'   r(   r   }  s    z'_reorder_tensors_as.<locals>.<dictcomp>c                 3   s     | ]}t  |  V  qd S r[   )nextr   )r   r^   
type_dict_r'   r(   	<genexpr>~  s    z&_reorder_tensors_as.<locals>.<genexpr>)r   rA   r   r   r   r   )r   ordered_tensors	type_dictr^   r'   r   r(   _reorder_tensors_ask  s
   r   c                 c   s    t dd }| D ]\}| }|jr/tj|}tj|}| |  | |   }n| |  }|| }|d | |krV|d dkrV|d V  g dg }||< |d 	| |d  |7  < q	|
 D ]\}	}
t|	dkrw|	V  qjdS )a  Group tensors into chunks. This generator yields a chunk at each time,
    each containing tensors of same type up to certain byte limit in total size.

    Args:
        tensors (Sequence): A sequence of tensors to be separated into chunks.
        size_limit (int): The limit of each chunk in bytes.

    Yields:
        Blocks of tensors of same type and within size_limit. The yielded
        tensors are only ordered as the original sequence within its types.
    c                   S   s   g dgS )Nr   r'   r'   r'   r'   r(   <lambda>      z_take_tensors.<locals>.<lambda>r>   r   N)r   r   r   r   r   r   r   numelelement_sizer   r<   r@   )r   
size_limitbuf_dictr^   rY   r;   r<   r   buf_and_sizebufr   r'   r'   r(   _take_tensors  s.   
r   c                    s    fdd}|S )Nc                    s   t  | _| jd< | S )Nreturn)rN   __annotations__)funr!   retr'   r(   dec  s   

zannotate.<locals>.decr'   )r   r!   r   r'   r   r(   annotate  s   r   c                 C   s   t j| }|d u rt| }g }t jjddd* |dd |D  |dd | D  | dd| d}W d    |S 1 sEw   Y  |S )	Nr   )	threshold	edgeitemsc                 s   s    | ]}t |V  qd S r[   repr)r   ar'   r'   r(   r     s    zrender_call.<locals>.<genexpr>c                 s   s&    | ]\}}| d t | V  qdS )=Nr   )r   r   r   r'   r'   r(   r     s   $ (z, ))	r   	overridesresolve_namer   _tensor_strprintoptionsextendr   join)fnargsr!   str_fnstr_argsrr'   r'   r(   render_call  s   
r  c                   @   s   e Zd ZdZdZdd ZdS )KeyErrorMessagez(str subclass that returns itself in reprr'   c                 C   s   | S r[   r'   r   r'   r'   r(   __repr__  s   zKeyErrorMessage.__repr__N)r   r   __qualname____doc__	__slots__r  r'   r'   r'   r(   r    s    r  c                   @   s"   e Zd ZdZdddZdd ZdS )	ExceptionWrapperz?Wraps an exception plus traceback to communicate across threadsNin backgroundc                 C   s6   |d u rt  }|d | _dtj| | _|| _d S )Nr   r   )sysexc_infoexc_typer	  	tracebackformat_exceptionexc_msgwhere)r   r  r  r'   r'   r(   __init__  s
   

zExceptionWrapper.__init__c                 C   sv   d| j j d| j d| j }| j tkrt|}nt| j ddr&| j |dz|  |}W | ty:   t|dw )z4Reraises the wrapped exception in the current threadzCaught  z.
Original rI   N)rI   )	r  r   r  r  KeyErrorr  r5   	Exceptionr   )r   msg	exceptionr'   r'   r(   reraise  s   


zExceptionWrapper.reraise)Nr  )r   r   r  r  r  r%  r'   r'   r'   r(   r    s    
	r  c                  C   sx   t j rdS t jj rdS tt drt j rdS tt dr't j r'dS t j	 } t
t | d }|r:| r:| S d S Nr+   mpsxpumtia)r   r+   is_availablebackendsr'  r7   r(  r)  r/   r0   r5   )custom_backend_namecustom_device_modr'   r'   r(   _get_available_device_type  s   

r.  c                 C   s   t  }|r| dkr| tjS |r| dkr| tjS |r*| dkr*| tjS |r7| dkr7| tjS |tj krE| t	t|S d S r&  )
r.  lowerr   r+   r'  r(  r)  r/   r0   r5   )
get_memberdevice_typer'   r'   r(   _get_device_attr  s   



r2  c                   C      t dd S )Nc                 S   s   |   S r[   )current_devicemr'   r'   r(   r     r   z+_get_current_device_index.<locals>.<lambda>r2  r'   r'   r'   r(   _get_current_device_index     r8  c                   C   r3  )Nc                 S   s   t t|  S r[   )rA   rangedevice_countr5  r'   r'   r(   r     s    z)_get_all_device_indices.<locals>.<lambda>r7  r'   r'   r'   r(   _get_all_device_indices  r9  r<  c                 C   s   dd | D S )Nc                    s   g | ]
 t  fd dqS )c                    s
   |   S r[   )get_device_propertiesr5  r   r'   r(   r     s   
 z4_get_devices_properties.<locals>.<listcomp>.<lambda>r7  )r   r'   r>  r(   r     s    z+_get_devices_properties.<locals>.<listcomp>r'   )
device_idsr'   r'   r(   _get_devices_properties  s   r@  r   c                   C   s   t j dkrt j S dS )zChecks if there are CUDA devices available and
    returns the device index of the current default CUDA device.
    Returns -1 in case there are no CUDA devices available.
    Arguments: ``None``
    r   )r   r+   r;  r4  r'   r'   r'   r(   get_current_device_index!  s   
rB  r,   optional	allow_cpuc                 C   s   t | tr
t| } d}t | tjr*|s | jdkr td|  | jdkr'dn| j}t | tr1| }|du rM|rFtj	 rAt
 }|S t }|S td|  |S )a'  Gets the device index from :attr:`device`, which can be a torch.device
    object, a Python integer, or ``None``.

    If :attr:`device` is a torch.device object, returns the device index if it
    has index. Note that for a device without a specified index,
    i.e., ``torch.device('xxx')``, this will return the current default
    device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
    CPU devices will be accepted and ``-1`` will be returned in this case.

    If :attr:`device` is a Python integer, it is returned as is.

    If :attr:`device` is ``None``, this will return the current default
    device of the supported runtime platform if :attr:`optional` is ``True``.
    i.e., the current default CUDA device will be returned if CUDA runtime is supported.
    Nr*   z$Expected a non cpu device, but got: rA  zFExpected a torch.device with a specified index or an integer, but got:)r   r   r   r,   r   
ValueErrorindexintjitis_scriptingrB  r8  )r,   rC  rD  
device_idxr'   r'   r(   _get_device_index,  s(   



rK  c                 C   s$   t | tjjs|  rt| S | S )z
    Returns a real view of a tensor if complex dtype else just the tensor
    need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule
    )r   r   r   UninitializedParameter
is_complexview_as_realr]   r'   r'   r(   _handle_complex[  s   
rO  c                 C   sh   t | tjstdt|  | jrt| jd? S | jr%t| jd? S | tj	kr,dS t
| jd? S )z8
    Returns the element size for a dtype, in bytes
    zexpected torch.dtype, but got r   r   r>   )r   r   r   r   r   rM  finfobitsis_floating_pointbooliinfo)r   r'   r'   r(   _element_sizeh  s   
rU  c                   @   s    e Zd ZdddZdddZdS )_ClassPropertyDescriptorNc                 C   s
   || _ d S r[   )fget)r   rW  fsetr'   r'   r(   r  {  s   
z!_ClassPropertyDescriptor.__init__c                 C   s    |d u rt |}| j|| S r[   )r   rW  __get__)r   instanceownerr'   r'   r(   rY  ~  s   z _ClassPropertyDescriptor.__get__r[   )r   r   r  r  rY  r'   r'   r'   r(   rV  z  s    
rV  c                 C   s   t | ttfst| } t| S r[   )r   classmethodstaticmethodrV  )funcr'   r'   r(   classproperty  s   r_  U`torch._utils.is_compiling` is deprecated. Use `torch.compiler.is_compiling` instead.)categoryc                   C   s
   t j S r[   )r   compileris_compilingr'   r'   r'   r(   rc    s   
rc  c                   C   s   t jddd tj S )zd
        Indicates whether we are tracing/compiling with torch.compile() or torch.export().
        r`  r   )
stacklevel)rF   rG   r   rb  rc  r'   r'   r'   r(   rc    s
   
c              	   C   sz   ddl m} t| |r6tjtjjj}zt| j	 W |d ur(tj
| d S d S |d ur5tj
| w w t|  d S )Nr   )FunctionalTensor)#torch._subclasses.functional_tensorre  r   r   r/   _unset_dispatch_mode_TorchDispatchModeKey
FUNCTIONAL_functionalize_syncelem_set_dispatch_mode)rY   re  maybe_functional_moder'   r'   r(   rj    s   
rj  r   r1  c                 C   s.   t t| d }|d u rtd|  d|  d|S )NzDevice 'z<' does not have a corresponding module registered as 'torch.z'.)r5   r   r   )r1  r9   r'   r'   r(   _get_device_module  s   rn  r   c                 C   s*   dt fdd}t| tf|d|ddS )Nis_initc                    s    fdd}|S )Nc                    s"    r| j j}n| j}td| )Nz&Tried to instantiate dummy base class )r   r   r   )r   r  r!   
class_namero  r'   r(   err_fn  s   
z/_dummy_type.<locals>.get_err_fn.<locals>.err_fnr'   )ro  rr  r'   rq  r(   
get_err_fn  s   z_dummy_type.<locals>.get_err_fnTF)r  __new__)rS  r   object)r   rs  r'   r'   r(   _dummy_type  s   
rv  c                   @   s2   e Zd Zdd Zdd Zdd Zdefdd	Zd
S )_LazySeedTrackerc                 C   s   d | _ d | _g | _d S r[   manual_seed_all_cbmanual_seed_cb
call_orderr  r'   r'   r(   r    s   
z_LazySeedTracker.__init__c                 C      ||f| _ | j| j g| _d S r[   rx  r   cbr  r'   r'   r(   queue_seed_all     
z_LazySeedTracker.queue_seed_allc                 C   r|  r[   )rz  ry  r{  r}  r'   r'   r(   
queue_seed  r  z_LazySeedTracker.queue_seedr   c                 C   s   | j S r[   )r{  r  r'   r'   r(   	get_calls  s   z_LazySeedTracker.get_callsN)r   r   r  r  r  r  rA   r  r'   r'   r'   r(   rw    s
    rw  Pc                   @   sN   e Zd ZdefddZdeedf ddfddZd	ejd
ej	ddfddZ
dS )CallbackRegistryr   c                 C   s   || _ g | _d S r[   )r   callback_list)r   r   r'   r'   r(   r    s   
zCallbackRegistry.__init__r~  Nr   c                 C   s   | j | d S r[   )r  r   )r   r~  r'   r'   r(   add_callback  r   zCallbackRegistry.add_callbackr  r!   c              	   O   sB   | j D ]}z	||i | W q ty   td| j Y qw d S )Nz6Exception in callback for %s registered with gpu trace)r  r"  loggerr$  r   )r   r  r!   r~  r'   r'   r(   fire_callbacks  s   
zCallbackRegistry.fire_callbacks)r   r   r  r   r  r   r  r  r  r!   r  r'   r'   r'   r(   r    s    r  module_namec                 C   sj   t j| d  }d ur|S tj|  }d ur3tj|}|t j| < |jd us+J d|j| |S d S )Nz)The loader attribute should always be set)	r  modulesrO   	importlibutil	find_specmodule_from_specloaderexec_module)r  modulespecr'   r'   r(   
try_import  s   
r  builtinsr   queuereprlibzcollections.abccollectionsdbmio)__builtin__copy_regQueuer   _abcollUserDictUserList
UserStringwhichdbStringIO	cStringIO)r  xrange)r  r:  )r  reduce)	functoolsr  )r  intern)r  r  )r  unichr)r  chr)r  unicode)r  r   )r  r   )r  rG  )	itertoolsizip)r  r   )r  imap)r  map)r  ifilter)r  filter)r  ifilterfalse)r  filterfalse)r  izip_longest)r  zip_longest)r  IterableUserDict)r  r  )r  r  )r  r  )r  r  )r  r  )r  
basestring)
exceptionsStandardError)r  r"  )r  r  )NF)Fr[   )FF)Xr   r  r  loggingr  r  rF   r  r   typesr   typingr   r   r   r   r   typing_extensionsr	   r
   r   r)   r=   r   rQ   rZ   r_   rb   rg   rk   rm   rn   rA   r   r   r   r   r   r   _rebuild_xla_tensorr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r  r   r  r  r.  r2  r8  r<  r@  rG  rB  rS  rK  rO  rU  rV  r_  FutureWarningrc  rj  	lru_cachern  r   rv  rw  	getLoggerr   r  r  r  r  IMPORT_MAPPINGNAME_MAPPINGr'   r'   r'   r(   <module>   s
  


(1=

%*3
%		#
/
	
	
