o
    h                    @   s  U d dl Z d dlmZmZmZmZmZmZ d dlm	Z	 d dl
Z
d dl
mZmZ d dlmZ d dlmZ d dlmZmZ d dlmZ erWd d	lmZ eeeee ee f  ZneZeee  Zg Zee ed
< edZ e	dZ!dee!e f dee!e f fddZ"dd Z#dedefddZ$dededeedf fddZ%dedefddZ&deded e'fd!d"Z(d#eded$edefd%d&Z)	drd'ed(eedf d e'd)ee def
d*d+Z*	drd'ed(eedf d e'd)ee def
d,d-Z+d#eded$edefd.d/Z,d#eded$edefd0d1Z-deeef defd2d3Z.dedefd4d5Z/deeef defd6d7Z0e"	drd8ddd9deeef ded ee' d)ee d#ee defd:d;Z1e"	drd8ddd9deeef ded ee' d)ee d#ee defd<d=Z2e"ddd>deded)ee d#ee def
d?d@Z3e"ddd>deded)ee d#ee def
dAdBZ4e"	drd8ddd9deeef ded ee' d)ee d#ee defdCdDZ5e"	drd8ddd9deeef ded ee' d)ee d#ee defdEdFZ6e"	drd8ddd9deeef dee d ee' d)ee d#ee defdGdHZ7e"	drd8ddd9deeef dee d ee' d)ee d#ee defdIdJZ8e"	drd8ddd9deeef ded ee' d)ee d#ee defdKdLZ9e"	Mdsd8ddd9deeef ded e'd)ee d#ee defdNdOZ:e"	drd8ddd9deded e'd)ee d#ee defdPdQZ;ddddRdeeef dSeeef d)ee dTee dUee defdVdWZ<e"	X	dtd8ddd9deeef dYee= ded ee' d)ee d#ee defdZd[Z>deeef ded\ee' d]eeee=f  d ee' d)ee d#ee d^ee' defd_d`Z?e"		dudd8dddadeeef ded\ee' dbeeee=f  d ee' d)ee d#ee defdcddZ@e"		dudd8dddadeeef ded\ee' dbee d ee' d)ee d#ee defdedfZAe"ddd>deeef ded)ee d#ee def
dgdhZBe"ddd>deeef ded)ee d#ee def
didjZCe"ddd>deeef ded)ee d#ee def
dkdlZDe"dmdddndeeef dYe=dedoe=d)ee d#ee defdpdqZEdS )v    N)AnyCallableOptionalTYPE_CHECKINGTypeVarUnion)	ParamSpec)	sym_floatTensor)corresponding_real_dtype)_docs)is_masked_tensorMaskedTensor)as_masked_tensor)_dtype__all___T_Pfuncreturnc                 C   sH   t t| j dd}|du rtd| j d n|| _t| j | S )zlDecorator that applies docstring templates to function docstring
    and returns the function instance.
    
_docstringNz&No documentation string available for zb. PyTorch team should run `python tools/update_masked_docs.py` to generate the missing docstrings.)getattrr   __name__warningswarn__doc__r   append)r   
doc_string r   e/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/masked/_ops.py_apply_docstring_templates#   s   r    c                    s  t ddddddddddd	
}t dbi d
dddddddddddddddddddddddddddddddddd d!}t d"d#d$d%d&d'd(d)d*d+	 t d,d-d.d/d0d1d2}t d
d3d4d5dddddd6d7dd8}t dddd d9d:d2}i }|| || d;}tg d<g d=g}tg d>g d?g}	| jd@v rdA|f}
|jtjdB}n| jdCv r|dDf}
n| jdkr|f}
|jtjdB}n|f}
|| j \}} fdEdF|D } fdGdF|D }| j|v rdHg dI}n| j|v rdJg dK}|jtjdB}nJ | |g|
R dM|	i}| j| jdN | j || j dOdPdQ |D dOdRdQ |D dSt	|
 dOtt	|
dSt	|	
 dTt	| dTt	|	 dTt	| dU| j|v rjt| jtjdLtjdBt| jtjdLtjdBt| jtjdLtjdBdV | jdkrjt| jtjdLtjdBtdWdX n| j|v rj|| j dY nJ jdT|dZ jdT|d[ fd\d]| D }|fd^dQ D  | jd_u r d`fdadF|D }n| j}||S )czuA utility function called from tools/update_masked_docs.py
    script to update the module torch.masked._docs.py
    zI{function_name}(input, {operation_args}, *, {operation_kwargs}) -> TensorzReturns {operation name} of all the elements in the :attr:`input`
tensor along the given dimension(s) :attr:`dim` while the :attr:`input`
elements are masked out according to the boolean tensor
:attr:`mask`.a  If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension(s) :attr:`dim` where it is of
size 1. Otherwise, :attr:`dim` is squeezed (see
:func:`torch.squeeze`), resulting in the output tensor having 1 (or
``len(dim)``) fewer dimension(s).

The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True
then the corresponding element in :attr:`input` tensor will be
included in {operation name} computation, otherwise the element is
ignored.

When all elements of :attr:`input` along the given dimension
:attr:`dim` are ignored (fully masked-out), the corresponding element
of the output tensor will have undefined value: it may or may not
correspond to the identity value of {operation name} operation; the
choice may correspond to the value that leads to the most efficient
storage of :attr:`output` tensor.

The mask of the output tensor can be computed as
``torch.any(torch.broadcast_to(mask, input.shape), dim, keepdim=keepdim,
dtype=torch.bool)``.

The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.

Args:
    input (Tensor): the input tensor
    {args_declarations}

Keyword args:
    {kwargs_declarations}zExample::

    >>> input = {example_input}
    >>> input
    {indent_example_input}
    >>> mask = {example_mask}
    >>> mask
    {indent_example_mask}
    >>> {full_function_name}(input, {example_args}, mask=mask)
    {indent_example_output}
zpThe identity value of {operation name} operation, which is used to start the reduction, is ``{identity_int32}``.a  The identity value of {operation name} operation, which is used to start the
reduction, depends on input dtype. For instance, for float32, uint8,
and int32 dtypes, the identity values are ``{identity_float32}``, ``{identity_uint8}``, and ``{identity_int32}``, respectively.zReturns {operation name} of all the slices in the :attr:`input` tensor
along :attr:`dim` while the :attr:`input` elements are masked out
according to the boolean tensor :attr:`mask`.

{definition}a  The boolean tensor :attr:`mask` defines the "validity" of
:attr:`input` tensor elements: if :attr:`mask` element is True then
the corresponding element in :attr:`input` tensor will be included in
{operation name} computation, otherwise the element is ignored.

The values of masked-out elements of the output tensor have undefined
value: it may or may not be set to zero or nan; the choice may correspond to
the value that leads to the most efficient storage of :attr:`output`
tensor.

The mask of the {operation name} output tensor can be computed as
``torch.broadcast_to(mask, input.shape)``.

The shapes of the :attr:`mask` tensor and the :attr:`input` tensor
don't need to match, but they must be :ref:`broadcastable
<broadcasting-semantics>` and the dimensionality of the :attr:`mask`
tensor must not be greater than of the :attr:`input` tensor.

Args:
    input (Tensor): the input tensor
    {args_declarations}

Keyword args:
    {kwargs_declarations})
reduction_signaturereduction_descrreduction_argsreduction_examplereduction_identityreduction_identity_dtypenormalization_signaturenormalization_descrnormalization_argsnormalization_examplesum)dimzkeepdim=False
dtype=None	mask=Noneprodcumsum)dim__as_int)r/   r0   cumprodaminamaxargmin)r3   r.   argmaxmeanmediannorm))ordr-   r.   var))r-   unbiasedr.   std	logsumexpsoftmaxlog_softmaxsoftmin	normalize))ord__requiredr4   )z	eps=1e-12r/   r0   zdim (int or tuple of ints, optional): the dimension or dimensions to reduce.
  Default: None that is equivalent to ``tuple(range(input.ndim))``.zBdim (int): the dimension along which {operation name} is computed.zord (int, float, optional): the order of vector norm. Default: 2.
  See :func:`torch.linalg.vector_norm` for a list of supported norms.z}ord (int, float): the order of vector norm. Default: 2.
  See :func:`torch.linalg.vector_norm` for a list of supported norms.zjunbiased (bool): when True, use Bessel's correction, otherwise, compute
  the uncorrected sample variance.zQeps (float, optional): small value to avoid division by zero. Default: {default}.zjkeepdim (bool, optional): whether the output tensor has
  :attr:`dim` retained or not. Default: {default}.zdtype (:class:`torch.dtype`, optional): the desired data type
  of returned tensor.  If specified, the input tensor is
  casted to :attr:`dtype` before the operation is
  performed. Default: {default}.zmask (:class:`torch.Tensor`, optional): the boolean tensor
  containing the binary mask of validity of input tensor
  elements.
  Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.)	r-   r4   r=   rF   r?   epskeepdimdtypemaskzLet ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmax of i-th element in ``x`` is
defined as ``exp(x[i])/sum(exp(x))``.zLet ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. LogSoftmax of i-th element in ``x`` is
defined as ``log(exp(x[i])/sum(exp(x)))``.zLet ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Softmin of i-th element in ``x`` is
defined as ``exp(-x[i])/sum(exp(-x))``.zLet ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Normalize of i-th element in ``x`` is
defined as ``x[i]/max(norm(x, p), eps)``.zLet ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``sum(x[:i])``.zLet ``x`` be a sequence of unmasked elements of one-dimensional slice
of the :attr:`input` tensor. Cumsum of i-th element in ``x`` is
defined as ``prod(x[:i])``.)rB   rC   rD   rE   r2   r5   productmaximumminimumvariancestandard_deviation)r+   r1   r7   r6   r9   r8   r:   r;   r<   r>   r@   rA   cumulative_sumcumulative_prod   ))r   rR      )TFT)FFF>   r<   rE          @rI      r@   r>   Fc                    s4   g | ]}d   ||ddd  d qS )
    __rR   r   : TBD.)joingetsplit
splitlines.0aargument_declarationsr   r   
<listcomp>5  s     z'_generate_docstring.<locals>.<listcomp>c                    sT   g | ]&}d   |ddd |ddd  dj|ddd d qS )rZ   =rR   r   r[   r\   )default)r]   r^   r_   formatr`   ra   rd   r   r   rf   ;  s    "	reduction)	signaturedescridentityargsexamplenormalization)rk   rl   rn   ro   r   rJ   ., c                 s        | ]}| d dd V  qdS r[   rR   r   Nr_   ra   r   r   r   	<genexpr>U      z&_generate_docstring.<locals>.<genexpr>c                 s   rs   rt   ru   ra   r   r   r   rv   V  rw    rZ   )function_namefull_function_namezoperation nameoperation_argsoperation_kwargsexample_inputexample_argsexample_maskindent_example_inputindent_example_maskindent_example_output)identity_uint8identity_int32identity_float32-inf)identity_ord_ninf)
definition)args_declarations)kwargs_declarationsc                    s&   i | ]\}}|  r||qS r   )
startswith
format_maprb   kv)op_kindtemplate_datar   r   
<dictcomp>  s    
z'_generate_docstring.<locals>.<dictcomp>c                 3   s0    | ]\}}|t |tr| n|fV  qd S N)
isinstancestrr   r   )r   r   r   rv     s
    
Nz

c                    s   g | ]}d   d| dqS ){_}r   )rb   sec)r   r   r   rf     s    r   )dictupdatetorchtensorr   tofloat32
__module__r]   r   r_   mapr`   _reduction_identityuint8int32floatr   itemsr   )r   docstring_templatesargs_and_kwargsdefinitionsreduction_namesnormalization_namesoperation_namesexample_dimr}   r   r~   r{   r|   arg_declarationskwarg_declarationsdoc_sectionsexample_output	templatesdoc_templater   )re   r   r   r   _generate_docstring8   sZ  #l	
# 	











r   op_nameinputc                 G   s"  |j }|j}| ddd } | dv rtjd||dS | dv r&tjd||dS | dv rPt|r9tjtj ||dS t|sC|tjkrOtjt	|j
||dS n| d	v rt|rctjtj ||dS t|rttjtj d
 ||dS t|s~|tjkrtjt	|j
||dS n{| dv rt|rtjtj||dS t|s|tjkrtjt	|j||dS nR| dkrdS | dkr|r|d nd}|tdkrt|sJ |j tjtj||dS tjd||dS | dkrt|r|j ntj}tjtj||dS | dv rdS td|  d| d)aF  Return identity value as scalar tensor of a reduction operation on
    given input, or None, if the identity value cannot be uniquely
    defined for the given input.

    The identity value of the operation is defined as the initial
    value to reduction operation that has a property ``op(op_identity,
    value) == value`` for any value in the domain of the operation.
    Or put it another way, including or excluding the identity value in
    a list of operands will not change the reduction result.

    See https://github.com/pytorch/rfcs/pull/27 for more information.

    rq   rR   rU   >   r+   r2   r   rI   device>   r1   r5   >   r7   r9   	logaddexp>   rA   y                >   r6   r8   r:   Nr<   rV   r   r;   rY   zidentity of z on z input)rI   r   rsplitr   r   is_floating_pointinf	is_signedr   iinfomin
is_complexmaxr   nanNotImplementedError)r   r   rn   rI   r   r=   r   r   r   r     sR   




r   r-   ndim.c              	   C   s   g }| dkrd} | du rt t|S t|d}t| ttjfr"| fn| }|D ]/}||v r4td| d||ks=|| k rNtd|  d|d  d| d	|	||  q&t t
|S )
z4Return dim argument as a tuple of sorted dim values.r   NrR   zdim=z+ appears multiple times in the list of dimsz4Dimension out of range (expected to be in range of [rr   z], but got ))tupleranger   r   intr   SymIntRuntimeError
IndexErrorr   sorted)r-   r   dimsdim_dr   r   r   _canonical_dim  s    
r   indicesshapec                 C   s>   |  | d}t|D ]\}}|| || |  q|S )NrR   )	new_zerossize	enumeratemul_add_)r   r   flat_indicesr   szr   r   r   _sparse_coo_flatten_indices  s
   
r   rH   c                 C   s$   | }t |D ]	}|j||d}q|S )Nr-   rH   )reversedany)r   r-   rH   rr   r   r   r   _any  s   r   rJ   
fill_valuec              	      s  |j tjksJ | j |j ksJ | j|jksJ |  | ks"J | }t| |jd|  }t|  | jd|   }|  dkrZt	| 
 ttd| d d}n| 
 }|| dddf  }dd   fdd}d	d
 }| ||}	 ||	\}
}| tdf|  }|
 | }|  dkr ||	\}
}| 
 | }t|||}||||	} ||\}
}|  tdf|  }|d}|dkrt|||j}|dS tj||gdd}t|||f|jdd  g}t|||j}| S )a  Sparse variant of torch.where. Supports sparse COO and hybrid sparse COO tensors.

    _sparse_coo_where implements the following invariant:

      _sparse_coo_where(mask, input, fill_value).to_dense(fill_value) ==
        torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value))

    where `a == b` means `assertEqual(a, b)`, mask is boolean sparse
    tensor, and `to_dense(fill_value)` is like `to_dense()` except
    that the unspecified elements are mapped to `fill_value` rather
    than to `0`.

    Returns a sparse COO tensor with the following features:

    - all specified elements correspond to masked-in elements that
      have the values of the input tensor. If there exists a masked-in
      element (as specified by mask) that is not specified in the
      input, in the result tensor, the corresponding element has value
      0. In the dense part of the sparse tensor, the masked-out
      elements are replaced with fill_value.

    - all unspecified elements correspond to masked-out elements.
    Nr   rR   Fc                 S   s.   t | |gjdd\}}|t |dfS NT)return_countsrR   )r   catuniquewheregti1i2unioncountsr   r   r   intersection,  s   z'_sparse_coo_where.<locals>.intersectionc                    s4   t | |gjdd\}} |t |d | S r   )r   r   r   r   eqr   r   r   r   minus0  s   z _sparse_coo_where.<locals>.minusc                 S   s   | \}}|| S r   r   )rc   objwr   r   r   _apply4  s   z!_sparse_coo_where.<locals>._applyTr,   )layoutr   
sparse_coor   	dense_dimcoalescer   r   
sparse_dimr   valuesr   r   nonzeroslicer   r   sparse_coo_tensor_coalesced_r   r   )rJ   r   r   input_flat_indicesmask_flat_indicesmask_valuesmaskin_flat_indicesr   r   maskin_input_flat_indicesr   r   where_input_indiceswhere_input_valuesw1where_mask_valuesmaskin_zero_flat_indiceswhere_zero_indicesnresultwhere_indiceswhere_valuesr   r   r   _sparse_coo_where  sf   

r  
mask_inputr   rI   c                    s  | j }g d}||vrtdd| d| d|}| | }}	| }
| }g g }g }|j|kr<||}|rMt	 fddt
|jD }nt	 fddt
|jD } D ]}||
krdq]||k rn| q]||d	 |  q]t|d
kr|dkr|}| ||t|d}ntS | }t|kr|dv r|d
d
krt||}n| |d
d}|rt|D ]}|d
}q|j|d S |	 }|rd
|d d f< ntd
krfddt|D }|d
t||j}| d
kr[tj|dd	d\}}t|j}|jd	 |d
< t|jd	 D ]}|d}q||j}|tjtj hv rL|tj!}|"|}|j#d
|||dd}|j|d}n|"|}|j#d
|||dd}tj$|||||jdS )N)r+   r1   r7   r6   op must be one of rx   
, but got  insteadc                 3   s$    | ]\}}| v rd n|V  qdS )rR   Nr   rb   isir   r   r   rv     s    
z7_sparse_coo_scatter_reduction_helper.<locals>.<genexpr>c                 3   s     | ]\}}| vr|V  qd S r   r   r  r  r   r   rv     s    rR   r   r+   r   >   r7   r6   r,   rX   c                    s   g | ]
}|t  vr|qS r   )set)rb   r  )reduced_sparse_dimsr   r   rf     s
    z8_sparse_coo_scatter_reduction_helper.<locals>.<listcomp>T)return_inverser-   rU   F)reduceinclude_selfr   )%r   
ValueErrorr]   _values_indicesr-   r   rI   r   r   r   r   r   lenboolNotImplementedcloner   r   r   	unsqueeze	to_sparseindex_selectr   r   r   numelr   listr   expandbfloat16float16r   	new_emptyscatter_reduce_r   )opr  r   rH   rI   r  valid_reductionsoutput_dtyper   r   
input_dimsnum_sparse_dimsretained_sparse_dimsreduced_dense_dimsoutput_shaper   
new_valuesr   new_indicesinverse_indices	out_shapescatter_indicesoutr   )r   r  r   $_sparse_coo_scatter_reduction_helperj  s   









r9  c                 C   s  |sJ d| j }g d}||vrtdd| d| d|j}|}| | | }	}
}|	j|kr<|	|}	t	|dkrD|S t	|dkr|d dkr}t
j|d	d
\}}|jd }t
d|g}|	|j}|jd||	|dd d|dg}nw|d dksJ dt
|
dt
t
|
dkdfd}|d }||}t
j|	||
d}|ddg}n@t	|dksJ td|	 }|dkrd	|d}|dv r|d= | |	dfi |}nt
jd|d}||}t
d|g}d|g}t
j||||||dS )NzEreduction operations on CSR tensors with keepdim=False is unsupported)r+   r1   r:   r7   r6   r  rx   r  r  r   rR   T)r  F)r  zFSparse CSR tensors are 2D and only support reduction along dim 0 or 1.rU   )offsetsrV   rH   rI   )r7   r6   rI   rX   r   )r   r  r]   r   r   crow_indicescol_indicesrI   r   r  r   r   r   r   r)  r*  r   r   r   r2   diff_segment_reducer   r$  emptysparse_csr_tensor)r+  r  r   rH   rI   r  r,  r   r-  r   r<  r=  new_col_indicesr7  new_nnznew_crow_indicesr3  	new_shapennz	op_kwargsr   r   r   $_sparse_csr_segment_reduction_helper  s   









rH  c                 C   s   t |  | | S )z;Sparse variant of torch.where. Supports sparse CSR tensors.)r  to_sparse_cooto_sparse_csrrJ   r   r   r   r   r   _sparse_csr_where8  s
   rL  c                 C   sZ   | j tjkrt| ||S | j tjkrt| ||S | j tjkr%t| ||S td| j  )a  torch.where with sparse inputs support.

    _where implements the following invariant:

      _where(mask, input, fill_value).to_dense(fill_value) ==
        torch.where(mask.to_dense(), input.to_dense(), torch.full(input.shape, fill_value))

    where `a == b` means `assertEqual(a, b)`, mask is boolean sparse
    tensor, and `to_dense(fill_value)` is like `to_dense()` except
    that the unspecified elements are mapped to `fill_value` rather
    than to `0`.

    Returns a sparse tensor with the following features:

    - all specified elements correspond to masked-in elements that
      have the values of the input tensor. If there exists a masked-in
      element (as specified by mask) that is not specified in the
      input, in the result tensor, the corresponding element has value
      0. In the dense part of the sparse tensor, the masked-out
      elements are replaced with fill_value.

    - all unspecified elements correspond to masked-out elements.
    zB_where expects strided or sparse COO or sparse CSR tensor but got )	r   r   stridedr   r   r  
sparse_csrrL  r  rK  r   r   r   _where@  s   
rO  c                 O   s^  | j tjtjtjhvrtd| j  |d}|du r td|j| jkrf|j| jkr0t	d|j tjkrEt
| | jjtjd}n!|j tjkrSt|| j}n|j tjks[J t| | j }|j | j kr| j tjkrw| }n%| j tjkr|j tjkr||  }n| }n| j tjksJ | }|j tjkr| }|jtjd}|S )a{  Return canonical input mask.

    A canonical input mask is defined as a boolean mask tensor that
    shape and layout matches with the shape and the layout of the
    input.

    The canonical input mask is computed from the :attr:`mask` tensor
    content to meet the following criteria:

    1. The shape of the canonical input mask is the same as the shape
       of :attr:`input` tensor. If the mask tensor has a smaller shape
       than the shape of the :attr:`input`, broadcasting rules will be
       applied. Downcasting of mask is not supported.

    2. The layout of the canonical input mask is the same as the
       layout of the :attr:`input` tensor. If the mask has different
       layout, it will be converted to the expected layout.  In the
       case of sparse COO layout, the canonical input mask will be
       coalesced.

    3. The dtype of the canonical input mask is torch.bool. If the
       mask dtype is not bool then it will be converted to bool dtype
       using `.to(dtype=bool)` method call.

    4. The elements of the canonical input mask have boolean values
       copied from the content of the :attr:`mask` tensor (after
       possible broadcasting and dtype conversion transforms).  In
       general, the sparsity pattern of the sparse canonical input
       mask need not to be the same as the sparsity pattern of the
       sparse :attr:`input` tensor.

    zG_input_mask expects strided or sparse COO or sparse CSR tensor but got rJ   Nz"_input_mask requires explicit maskzZ_input_mask expected broadcastable mask (got mask dimensionality higher than of the input)rX   )r   r   rM  r   rN  r  r^   r   r   r   broadcast_tor   r   r  _sparse_broadcast_tor"  rJ  to_denser   r   )r   rn   kwargsrJ   r   r   r   _input_maskd  sF   !




rT  c           
      O   s   t | ra| jdv }| jdv }|rG| jdkr|r|dd }|r#|d n|d}t|g|R i |}|dd	}t||j}	t||	t|S |rTt|g|R i |S td
| j	 d| j dtdt
| j d)zBReturn output mask of masked operation applied to given arguments.>   r@   r+   r>   r7   r6   r:   r<   r1   r9   r8   r;   rA   >   r2   r5   rB   rD   rE   rC   r<   rR   Nr   r-   rH   Fz5_output_mask expected masked operation (got callable rq   r   z,_output_mask expected masked operation (got  object))callabler   r^   rT  r   r   r   r  r  r   type)
r+  r   rn   rS  is_reductionis_normalizationr-   outmaskrH   r   r   r   r   _output_mask  s(   


r[  c                    sL    fddG fdddt jj}t|r!|| | S ||S )Nc                    sV   |d u r| S t | |d}tr tj| g R  }t|| |S tdtj d)NrJ   z7_combine_input_and_mask expected masked operation (got rU  )rT  rV  r   r   rO  r  rW  )r   rJ   canonical_maskr   )rn   r+  r   r   helper  s   z'_combine_input_and_mask.<locals>.helperc                       s(   e Zd Ze fddZedd ZdS )z(_combine_input_and_mask.<locals>.Combinec                    s&   |  | |dur| |  ||S )zJReturn input with masked-out elements eliminated for the given operations.N)save_for_backwardmark_non_differentiable)ctxr   rJ   r^  r   r   forward  s   


z0_combine_input_and_mask.<locals>.Combine.forwardc                 S   s.   | j \}t|r| n|}t||}|d fS r   )saved_tensorsr   get_datar   )ra  grad_outputrJ   	grad_datar	  r   r   r   backward  s
   
z1_combine_input_and_mask.<locals>.Combine.backwardN)r   r   __qualname__staticmethodrc  rh  r   rb  r   r   Combine  s
    	rk  )r   autogradFunctionr   applyre  get_mask)r+  r   rJ   rn   rk  r   )rn   r^  r+  r   _combine_input_and_mask  s   rp  FrH   rI   rJ   c                C   s  |d u r>| j tjkr)| jtjtjtjtjtjhv r%| 	 j
tjd } n| j}n| j}| jtjtjtjtjtjhv r>tj}t|| j}tt| |}|j tjkr[tj||t||dS |j tjkrlttj||t||S |j tjkrtj|t|t||dS td|j  d)NrX   r-   rH   rI   zAmasked sum expects strided, sparse_coo or sparse_csr tensor (got  tensor))r   r   rN  rI   r   r  int8int16r   rI  r   int64rJ  r   r   rp  r+   rM  r   r9  _sparse_csr_sumr%  r  r   r-   rH   rI   rJ   r   r  r   r   r   r+     sF   
	r+   c          	      C   sN  |d u r>| j tjkr)| jtjtjtjtjtjhv r%| 	 j
tjd } n| j}n| j}| jtjtjtjtjtjhv r>tj}t|| j}tt| |}|j tjkrj|}|j
|d}t|D ]}|j|t|d}q\|S |j tjkr|d u rxtdttj||t||S |j tjkr|d u rtdtj|t|t||dS td|j  d)NrX   r   z=masked prod expects explicit mask for sparse_coo tensor inputz=masked prod expects explicit mask for sparse_csr tensor inputrr  zBmasked prod expects strided, sparse_coo or sparse_csr tensor (got rs  )r   r   rN  rI   r   r  rt  ru  r   rI  r   rv  rJ  r   r   rp  r1   rM  r   r   r  r9  _sparse_csr_prodr%  )	r   r-   rH   rI   rJ   r   r  r	  r   r   r   r   r1   F  s^   
	r1   )rI   rJ   c                C   `   |d u r| j }t|| jd }tt| |}|jtjkr'tj|||dj	|dS t
d|j d)Nr   rX   z*masked cumsum expects strided tensor (got rs  )rI   r   r   rp  r+   r   r   rM  r2   r   r  r   r-   rI   rJ   r   r  r   r   r   r2        r2   c                C   rz  )Nr   rX   z+masked cumprod expects strided tensor (got rs  )rI   r   r   rp  r1   r   r   rM  r5   r   r  r{  r   r   r   r5     r|  r5   c                C      |du r| j }tt| |}t||j}|jtjkr&t||t|j	|dS |jtj
kr?|du r4tdttj||t||S |jtjkrX|du rMtdttj||t||S td|j d)k{reduction_signature}

{reduction_descr}

{reduction_identity_dtype}

{reduction_args}

{reduction_example}NrX   =masked amax expects explicit mask for sparse_coo tensor inputz=masked amax expects explicit mask for sparse_csr tensor inputzBmasked amax expects strided, sparse_coo or sparse_csr tensor (got rs  )rI   rp  r7   r   r   r   r   rM  r  r   r   r  r9  rN  rH  r   r-   rH   rI   rJ   r  r   r   r   r   r7     2   r7   c                C   r}  )r~  NrX   r  z=masked amin expects explicit mask for sparse_csr tensor inputzBmasked amin expects strided, sparse_coo or sparse_csr tensor (got rs  )rI   rp  r6   r   r   r   r   rM  r  r   r   r  r9  rN  rH  r  r   r   r   r6     r  r6   c                C   R   |du r| j }tt| |}|jtjkr t||t|j|dS td|j d)g{reduction_signature}
{reduction_descr}
{reduction_identity_dtype}
{reduction_args}
{reduction_example}NrX   z*masked argmax expects strided tensor (got rs  )	rI   rp  r9   r   r   rM  r  r   r  r   r-   rH   rI   rJ   r  r   r   r   r9        r9   c                C   r  )r  NrX   z*masked argmin expects strided tensor (got rs  )	rI   rp  r8   r   r   rM  r  r   r  r  r   r   r   r8   9  r  r8   c                C   s  d}|du r| j }d}|js|jstd| d| | jtjkr]|du rAttj| j	tj
| jd||d}t| |||d}|| S t| |d	}|j|t|d
}t| ||||d}|| S | jtjkrtt| |}	t||	j}
|du rwtdttj|	|
t||S td| j d)a  {reduction_signature}

{reduction_descr}

By definition, the identity value of a mean operation is the mean
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
mean is undefined.  Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.

{reduction_args}

{reduction_example}r   NInputz&mean(): Could not infer output dtype. z> dtype must be either a floating point or complex dtype. Got: r   rH   r;  r\  r   rq  z=masked mean expects explicit mask for sparse_csr tensor inputz6masked mean expects strided or sparse_csr tensor (got rs  )rI   r   r   r  r   r   rM  r+   onesr   rv  r   rT  r  rN  rp  r:   r   r   rH  )r   r-   rH   rI   rJ   dtype_sourcecounttotalinmaskr  r   r   r   r   r:   S  sH   r:   rU   c          	      C   s   |du r| j }t|| jd }t| }|s| jtjd} tt| |}|j	tj
krHt|||j}|r5|S |sDt| sD|j|dS tdtd|j	 d)a  {reduction_signature}
{reduction_descr}
By definition, the identity value of a median operation is the median
value of the tensor. If all elements of the input tensor along given
dimension(s) :attr:`dim` are masked-out, the identity value of the
median is undefined.  Due to this ambiguity, the elements of output
tensor with strided layout, that correspond to fully masked-out
elements, have ``nan`` values.
{reduction_args}
{reduction_example}Nr   rX   zMmasked median expects no fully masked out rows if dtype is not floating pointz*masked median expects strided tensor (got rs  )rI   r   r   r   r   r   r   rp  r;   r   rM  	nanmedianr   isnanr   r  )	r   r-   rH   rI   rJ   r   is_floatr  outputr   r   r   r;     s&   
r;   c                C   s\   |d u r| j }t|| j}tt| |}|jtjkr%tj|||dj|dS t	d|j d)Nr  rX   z-masked logsumexp expects strided tensor (got rs  )
rI   r   r   rp  rA   r   r   rM  r   r  rx  r   r   r   rA     s   	rA   )rI   
input_mask
other_maskotherr  r  c                C   sl   |du r| j }| jtjkr)|jtjkr)tt| |}tt||}t||j|dS td| j d|j d)a  logaddexp(input, other, *, dtype=None, input_mask=None, other_mask=None) -> Tensor

    Returns logaddexp of all the elements in the :attr:`input` and the :attr:`other`
    tensor. The :attr:`input` elements are masked out according to the boolean tensor
    :attr:`input_mask` and the attr:`other` elements are masked out according to the boolean tensor
    :attr:`other_mask`.

    The shapes of a mask tensor and the tensor to be masked
    don't need to match, but they must be :ref:`broadcastable
    <broadcasting-semantics>` and the dimensionality of the mask
    tensor must not be greater than of the tensor to be masked.

    Args:
        input (Tensor): the input tensor
        other (Tensor): the second input tensor

    Keyword args:
        dtype (:class:`torch.dtype`, optional): the desired data type
          of returned tensor.  If specified, the output tensor is
          casted to :attr:`dtype` after the operation is
          performed. Default: None.
        input_mask (:class:`torch.Tensor`, optional): the boolean tensor
          containing the binary mask of validity of :attr:`input` tensor elements.
          Default: None that is equivalent to ``torch.ones(input.shape, dtype=torch.bool)``.
        other_mask (:class:`torch.Tensor`, optional): the boolean tensor
          containing the binary mask of validity of :attr:`other` tensor elements.
          Default: None that is equivalent to ``torch.ones(other.shape, dtype=torch.bool)``.

    Example::

        >>> input = torch.tensor([-100.0, -200, -300])
        >>> input
        tensor([-100., -200., -300.])
        >>> other = torch.tensor([-1.0, -2, -3])
        >>> other
        tensor([-1., -2., -3.])
        >>> mask = torch.tensor([True, False, True])
        >>> mask
        tensor([ True, False,  True])
        >>> torch.masked._ops.logaddexp(input, other, input_mask=mask, other_mask=mask)
        tensor([-1., -inf, -3.])NrX   z.masked logaddexp expects strided tensors (got z tensor for input, z for other))rI   r   r   rM  rp  r   r   r  )r   r  rI   r  r  r  
mask_otherr   r   r   r     s   1r   rW   r=   c                C   s`   |du r| j }tt| ||}|jtjkr't|| j}tjj	|||t
||dS td|j d)z{reduction_signature}

{reduction_descr}

The identity value of norm operation, which is used to start the
reduction, is ``{identity_float32}``, except for ``ord=-inf`` it is
``{identity_ord_ninf}``.

{reduction_args}

{reduction_example}NrX   z(masked norm expects strided tensor (got rs  )rI   rp  r<   r   r   rM  r   r   linalgvector_normr  r  )r   r=   r-   rH   rI   rJ   r  r   r   r   r   r<     s   r<   r?   correction_opt	take_sqrtc                C   s  |d u s|d u sJ dd}|d ur|rdnd}|d ur t |}|d u r0| j}|js0|js0tj}|}	|	js;|	js;tj}	| jtjkr|d u r^ttj	| j
tj| jd|dd}
t| |d|d}nt| |d}|j|dd	}
t| |d||d
}t||
}t| |}|d u rt||  |||	d}nt||  |||	|d
}|s|
|j
}
|dkr|	jrt|	n|	}|
|}
t|
|}
t|
|
g }
t||
j|d}|rt|}|S td| j d)Nz0Only one of unbiased and correction may be giveng      ?g        r   Tr  r;  r\  r   rq  r   rX   z+masked std/var expects strided tensor (got rs  )r	   rI   r   r   r   r   r   rM  r+   r  r   rv  r   rT  dividesubtractconjreshaper   r   rL   r   sqrtr  )r   r-   r?   r  rH   rI   rJ   r  
correctioncompute_dtyper  sample_totalr  sample_meanxr  
real_dtyper  r   r   r   _std_var5  sd   


r  )r  rH   rI   rJ   r  c             
   C      t | ||||||ddS )a   {reduction_signature}
{reduction_descr}
The identity value of sample variance operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
{reduction_args}
{reduction_example}Fr   r-   r?   r  rH   rI   rJ   r  r  r   r-   r?   r  rH   rI   rJ   r   r   r   r>   }     r>   c             
   C   r  )a
  {reduction_signature}
{reduction_descr}
The identity value of sample standard deviation operation is undefined. The
elements of output tensor with strided layout, that correspond to
fully masked-out elements, have ``nan`` values.
{reduction_args}
{reduction_example}Tr  r  r  r   r   r   r@     r  r@   c                C   \   |d u r| j }t|| jd }tt| |}|jtjkr%tjj	j
|||dS td|j d)Nr   rX   z+masked softmax expects strided tensor (got rs  )rI   r   r   rp  r7   r   r   rM  nn
functionalrB   r  r{  r   r   r   rB        rB   c                C   r  )Nr   rX   z/masked log_softmax expects strided tensor (got rs  )rI   r   r   rp  r7   r   r   rM  r  r  rC   r  r{  r   r   r   rC     r  rC   c                C   r  )Nr   rX   z+masked softmin expects strided tensor (got rs  )rI   r   r   rp  r6   r   r   rM  r  r  rD   r  r{  r   r   r   rD     r  rD   g-q=)rG   rI   rJ   rG   c          	      C   sl   |d u r| j }tt| |}|jtjkr-t| ||d||d}t||g |}t	||S t
d|j d)NTrq  z-masked normalize expects strided tensor (got rs  )rI   rp  r+   r   r   rM  r<   rL   new_fullr  r  )	r   r=   r-   rG   rI   rJ   r  nrm_denomr   r   r   rE     s   
rE   r   )rU   )rW   N)NN)Fr   typingr   r   r   r   r   r   typing_extensionsr   r   r	   r
   torch._prims_commonr   torch.maskedr   torch.masked.maskedtensor.corer   r   "torch.masked.maskedtensor.creationr   torch.typesr   DTyper   r   r%  	DimOrDimsr   r   __annotations__r   r   r    r   r   r   r   r  r   r  r9  rH  rL  rO  rT  r[  rp  r+   r1   r2   r5   r7   r6   r9   r8   r:   r;   rA   r   r   r<   r  r>   r@   rB   rC   rD   rE   r   r   r   r   <module>   sT  
 "  ]=		u

}

V$U.

*
4
L
1
1


?
*


=
#
	

H
	
	



