o
    h@                     @   s  d dl Z d dlZd dlZd dlZd dlmZmZ d dlmZm	Z	m
Z
 d dlZd dlm  mZ d dlmZmZ d dlmZ dKddZe jG d	d
 d
ZG dd dZdedefddZdedeeef fddZdedefddZdedefddZdedefddZ dedefddZ!dedefdd Z"defd!d"Z#d#d$ Z$dej%d%eed&f d'e&eef deeej'ef  fd(d)Z(d*d+ Z)dedefd,d-Z*defd.d/Z+d0d1 Z,dej%fd2d3Z-dej%fd4d5Z.dej%defd6d7Z/dej%de
edf fd8d9Z0	:dLd%ee d'e&eef d;edeej1 fd<d=Z2d>d? fd@dAZ3G dBdC dCZ4dDej1dej1fdEdFZ5dej6jdefdGdHZ7dej%dee8e e8e f fdIdJZ9dS )M    N)IterableIterator)AnyCallableUnion)_C_utils_internal)
OpOverload   c                 C   s   t jdt| d d S )Nz}Python torch.library APIs do nothing under torch::deploy (multipy). Please instead use C++ custom operator registration APIs.
stacklevel)warningswarnRuntimeWarningr    r   h/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/_library/utils.pywarn_deploy   s
   
r   c                   @   s*   e Zd ZU dZeed< eed< dd ZdS )Kernelz$Models a (function, source location)funcsourcec                 O   s   | j |i |S N)r   )selfargskwargsr   r   r   __call__   s   zKernel.__call__N)__name__
__module____qualname____doc__r   __annotations__strr   r   r   r   r   r      s
   
 r   c                   @   s(   e Zd ZdZdefddZd	ddZdS )
RegistrationHandlez2Does something when someone calls .destroy() on it
on_destroyc                 C   s
   || _ d S r   _on_destroy)r   r"   r   r   r   __init__&   s   
zRegistrationHandle.__init__returnNc                 C   s   |    d S r   r#   r   r   r   r   destroy)   s   zRegistrationHandle.destroy)r&   N)r   r   r   r   r   r%   r(   r   r   r   r   r!   #   s    r!   r   r&   c                 C   s&   t t| }|j d|j }|S )zGet a string that represents the caller.

    Example: "/path/to/foo.py:42"

    Use stacklevel=1 to get the caller's source
    Use stacklevel=2 to get the caller's caller's source
    etc.
    :)inspectgetframeinfosys	_getframefilenamelineno)r   framer   r   r   r   
get_source-   s   	r1   qualnamec                 C   s6   |  d}t|dkrtd|  d|d |d fS )Nz::   zAExpected `qualname` to be of the form "namespace::name", but got zf. The qualname passed to the torch.library APIs must consist of a namespace and a name, e.g. aten::sinr      )splitlen
ValueError)r2   splitsr   r   r   parse_namespace;   s   
r9   c                 C   sH   t | \}}d|v r|d\}}nd}ttj|}t||}t||S )N.default)r9   r5   getattrtorchops)r2   	namespacenameoverloadnspacketr   r   r   	lookup_opG   s   

rD   opc                 C   s   t | tsJ | jdv S )N>   atenprimprims)
isinstancer	   r?   )rE   r   r   r   
is_builtinR   s   
rJ   schemac                 C   sT   dd }t | tjjr|| S ddlm} t | tr|| } t | |s&J || S )zCheck if the schema is functional.

    An operator is functional if:
    - it does not mutate any of its inputs
    - it does not return a view on any of its inputs
    - it has at least one return
    c                 S   sD   | j rdS | j}t|dkotdd |D }|rdS | js dS dS )NFr   c                 s   s$    | ]}|j d uo|j j V  qd S r   )
alias_infois_write).0rr   r   r   	<genexpr>d   s    
z>is_functional_schema.<locals>.is_functional.<locals>.<genexpr>T)
is_mutablereturnsr6   any)rK   retsis_non_mutating_viewr   r   r   is_functional`   s   z+is_functional_schema.<locals>.is_functionalr   )FunctionSchema)rI   r=   r   rW   torchgen.modelr    parse)rK   rV   rW   r   r   r   is_functional_schemaW   s   	

rZ   typc              	   C   sh   | t t j kp3| t t t j kp3| t t t j kp3| t t t t j kS r   )r   ListType
TensorTypegetOptionalTyper[   r   r   r   is_tensorlist_like_typez   s   ra   c                 C   s"   | t j kp| t t j kS r   )r   r]   r^   r_   r`   r   r   r   is_tensor_like_type   s   "rb   c                 C   s   | j dkrdS | j}t|jdksdS |jd jdu rdS |jd jj}t|dkr,dS tt|}t|jdk r;dS |jd }|jdu rGdS |jj	sMdS |jj}t|dkrYdS |tt|krcdS |jdd D ]
}|jdurt dS qjdS )aN  Check if an op is an inplace aten op, i.e. it mutates and returns the first arg.

    TODO: torchgen/model.py's FunctionSchema.parse is the source of truth for this,
    but not all PyTorch builds have torchgen (due to the yaml dependency being weird).
    Figure this out.

    Example: add_(Tensor(a!) x, Tensor y) -> Tensor(a)
    rF   Fr4   r   NT)
r?   _schemar6   rR   rL   	after_setnextiter	argumentsrM   )rE   rK   	alias_setloc	first_argargr   r   r   mutates_and_returns_first_arg   s8   
	


rl   c                 C   s   g }i }t t| jD ]3}| j| }|jr*|j|v r#||j ||j< q|j||j< q|t|k r8|||  q||j qt||fS r   )ranger6   rg   
kwarg_onlyr@   default_valueappendtuple)rK   r   r   new_args
new_kwargsiinfor   r   r   fill_defaults   s   

rv   r   .r   c                 c   s    t | jt |t | ksJ tt | jD ]6}| j| }|jr/|j|v r.|||j fV  q|t |krF|jsE|j|v rE|||j fV  q||| fV  qdS )zzips schema.arguments and (args, kwargs) together.

    Assumes that (args, kwargs) were the inputs to some torch._ops.OpOverload:
    that is, (args, kwargs) must be bindable to the schema (args, kwargs).
    N)r6   rg   rm   rn   r@   )rK   r   r   rt   ru   r   r   r   
zip_schema   s   

rw   c                    s   ddl m} | j}t|tjjstddd  g }| jD ]6}t|tj	j
tj	jj
fr3| | qt|tj	jjttfrK| fdd|D  qtdt| t|jj| } | }||jt|j t|fS )	Nr   )FunctionSchemaGenzfx_node's target must be a hop.c                 S   s8   | j dd }|d u r| jdksJ t| jj| j}|S )Nvalget_attr)metar^   rE   r<   graphowning_moduletarget)nodemeta_valr   r   r   _collect_example_val   s
   z5hop_schema_from_fx_node.<locals>._collect_example_valc                    s   g | ]} |qS r   r   )rN   xr   r   r   
<listcomp>   s    z+hop_schema_from_fx_node.<locals>.<listcomp>zUnsupported arg type )torchgen.gen_schema_utilsrx   r~   rI   r=   _opsHigherOrderOperatorRuntimeErrorr   fxNoder   rp   immutable_collectionsimmutable_listlistrq   typer*   	signaturer   bindfrom_example_namerg   items)r   rx   hopexample_inputsrk   
bound_argsexample_outputr   r   r   hop_schema_from_fx_node   s*   
r   c                 C   s@   t | tsJ t| rdS | j}|jsdS t|jdkrdS dS )NFr   T)rI   r	   rJ   rc   rQ   r6   rR   )rE   rK   r   r   r   can_generate_trivial_fake_impl   s   r   c                   C   s   t tddS )zIf an op was defined in C++ and extended from Python using the
    torch.library APIs, returns if we require that there have been a
    m.set_python_module("mylib.ops") call from C++ that associates
    the C++ op with a python module.
    REQUIRES_SET_PYTHON_MODULET)r<   r   r   r   r   r   requires_set_python_module  s   r   c                 O   sL   t | tjjjs
J tjj|| f\}}dd |D }| ||||S )Nc                 S   s8   g | ]}t |tjrtj|tjjjrt|qS r   )	rI   r=   Tensorr   _dispatch_keyshasDispatchKeyPythonr   rN   ar   r   r   r      s    
z(handle_dispatch_mode.<locals>.<listcomp>)	rI   r=   utils_python_dispatchTorchDispatchMode_pytreetree_flattenvalues__torch_dispatch__)	curr_modeop_overloadr   r   args_flattened_overload_typesr   r   r   handle_dispatch_mode  s   r   c                 C      t dd | jD S )Nc                 s   s    | ]}|j V  qd S r   )rn   r   r   r   r   rP   ,  s    z&has_kwarg_only_args.<locals>.<genexpr>rS   rg   rK   r   r   r   has_kwarg_only_args+  s   r   c                 C   s2   | j D ]}t|jst|jsq|jsq dS dS )NTF)rg   rb   r   ra   rn   )rK   r   r   r   r   has_kwarg_only_tensors/  s   
r   c                 C   r   )z
    Given a schema, returns True if the schema has a Tensor arg.
    A Tensor arg is any arg with a type annotation that might involve Tensor.
    c                 s   s$    | ]}t |jpt|jV  qd S r   )rb   r   ra   r   r   r   r   rP   >  s
    
z!has_tensor_arg.<locals>.<genexpr>r   r   r   r   r   has_tensor_arg9  s   r   c                 C   s:   t | jD ]\}}|jtj u r|jdkr|  S qdS )zx
    Given a schema, returns the id of the `device: torch.device` argument.
    If it does not exist, returns None.
    deviceN)	enumeraterg   r   r   DeviceObjTyper^   r@   )rK   indexrk   r   r   r   get_device_arg_indexD  s
   r   r4   allowed_nestingc                 #   sF     fdd}| D ]	}||E d H  q	|  D ]	}||E d H  qd S )Nc                 3   sT    t | tjr| V  d S  dkr&t | ttfr(tt| i  d E d H  d S d S d S )Nr   r4   )rI   r=   r   rq   r   iter_tensors)rk   r   r   r   checkR  s   
ziter_tensors.<locals>.check)r   )r   r   r   r   rk   kwargr   r   r   r   O  s   r   c                   C   s   dS )Nz???r   r   r   r   r   <lambda>^  s    r   c                 C   sr   dd |D }|}t |ts|f}t|i D ] }t| }t| |v r1t|  d|  d|| qdS )zO
    custom operators' outputs must not alias any inputs or other outputs.
    c                 S   s$   h | ]}t |tjrt| qS r   )rI   r=   r   iduntyped_storage)rN   tr   r   r   	<setcomp>b  s   $ z,check_aliasing_constraint.<locals>.<setcomp>z (with implementation in a  ): The output of this custom operator (1) must not also be an input to this custom operator and (2) may not alias any inputs to this custom operator or other returns. The most common way to trigger this error is if we have y = custom_op(x) and y and x are the same Tensor. Please instead return a clone of the offending output tensor(s) (e.g. return x.clone()) or refactor the custom operator to not return y.N)rI   rq   r   r   r   r   add)r@   prevresult
get_modulestoragestuple_resulttensorkeyr   r   r   check_aliasing_constraint^  s   
r   c                   @   s    e Zd ZdZdd Zdd ZdS )MutationCheckerz
    Check if an operator mutated its arguments.
    Usage:

    checker = MutationChecker(op, flat_args, args_spec)
    op(*args, **kwargs)
    checker.check()
    c                 C   s&   || _ || _|| _dd |D | _d S )Nc                 S   $   g | ]}t |tjrt|nd qS r   rI   r=   r   hash_tensorr   r   r   r   r     s    z,MutationChecker.__init__.<locals>.<listcomp>)rE   	args_spec	flat_argsreal_pre_hashes)r   rE   r   r   r   r   r   r%     s   zMutationChecker.__init__c                    s   dd  j D }dd t j|D }t| j\}}t jj||D ])\}} fdd}t	|j
r9||| q$t|j
rM|d u rDdnt|}||| q$d S )Nc                 S   r   r   r   r   r   r   r   r     s    z)MutationChecker.check.<locals>.<listcomp>c                 S   sT   g | ]&\}}t |tjr&t |tjr&t|| o%|  o$|   nd qS r   )rI   r=   r   equalisnanall)rN   prepostr   r   r   r     s    

c              	      sB   | j |krd S t jj d| j d jj d| j rdnd d)Nz: for argument 'z': the operator's schema z specified that the operator mutateszdoes not mutatea*   the argument, but this seems to be emperically wrong. Please make the schema and operator behavior consistent. You can specify that an operator mutates a Tensor by e.g. changing its schema type from 'Tensor name' to 'Tensor(a!) name'(use different identifiers (a, b, c, ...) for different Tensors))rM   r   rE   r   r@   rc   )ru   was_mutatedr'   r   r   	check_one  s   
z(MutationChecker.check.<locals>.check_oneF)r   zipr   pytreetree_unflattenr   rw   rE   rc   rb   r   ra   rS   )r   real_post_hashesr   was_mutated_argswas_mutated_kwargsru   r   was_any_mutatedr   r'   r   r     s(   




zMutationChecker.checkN)r   r   r   r   r%   r   r   r   r   r   r   x  s    	r   r   c                 C   s   |     S )zNSome inexpensive hash. Used as a quick and dirty indicator for tensor mutation)detachfloatmean)r   r   r   r   r     s   r   c                 C   s   t | rdS | j}tj|drdS tjj|}|du rAtj|dr&dS tjjj	
|}|jjdur6dS tj|dr?dS dS |jdurHdS dS )zIf an operator (that stays alive until FakeTensorMode) has a Fake kernel.
    Don't use this if the operator decomposes before FakeTensorMode.
    TCompositeImplicitAutogradNCompositeExplicitAutogradMetaF)r   r   r=   r   %_dispatch_has_kernel_for_dispatch_key_library
custom_ops_maybe_get_opdefsimple_registry	singletonfind	fake_implkernel_abstract_fn)rE   r@   opdefentryr   r   r   has_fake_kernel  s,   
r   c                 C   sT   g }g }t | jD ]\}}|jd ur%|jjr%|jr ||j q	|| q	||fS r   )r   rg   rL   rM   rn   rp   r@   )rK   idxskeysrt   ru   r   r   r   mutated_args_kwargs  s   
r   )r
   )r4   ):dataclassesr*   r,   r   collections.abcr   r   typingr   r   r   r=   torch.utils._pytreer   r   r   r   r   
torch._opsr	   r   	dataclassr   r!   intr    r1   rq   r9   rD   boolrJ   rZ   ra   rb   rl   rv   rW   dictArgumentrw   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   <module>   sn   
	

#
&


&	


:(