o
    h(D                     @   s  d Z ddlmZmZmZ ddlZddlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de de de
 d e_ dee dee dee dee dee dedededededededefddZdee dee dee dee dee dedededededededefddZeed 		!	!	!	!d$dee dee dee dee dee d"ee dedededededededefd#dZdS )%z1Implementation for the Resilient backpropagation.    )castOptionalUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype_maximize_doc_params_doc_use_grad_for_differentiable_view_as_real	OptimizerParamsTRproprpropc                       s   e Zd Z			dddddddedeeef d	eeef d
eeef dede	e dedef fddZ
 fddZdd ZedddZ  ZS )r   {Gz?g      ?g333333?gư>2   FN)
capturableforeachmaximizedifferentiableparamslretas
step_sizesr   r   r   r   c          
   	      s   t |tr| dkrtdd|kstd| d|d   k r-d  k r-|d k s<n td|d  d|d  t|||||||d	}	t ||	 d S )
Nr   zTensor lr must be 1-elementg        zInvalid learning rate: r         ?zInvalid eta values: z, )r   r    r!   r   r   r   r   )
isinstancer   numel
ValueErrordictsuper__init__)
selfr   r   r    r!   r   r   r   r   defaults	__class__ e/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/optim/rprop.pyr(      s    (	zRprop.__init__c                    s   t  | | jD ]S}|dd  |dd |dd |dd |d D ]4}| j|g }t|dkr[t|d s[t	|d }|d rQtj
|t |jd	ntj
|t d
|d< q'q	d S )Nr   r   Fr   r   r   r   stepdtypedevicer1   )r'   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   r2   )r)   r7   grouppp_statestep_valr+   r-   r.   r4   ;   s&   

zRprop.__setstate__c                 C   s  d}|d D ]}|j d u rq|t|O }|| |j }	|	jr$td||	 | j| }
t|
dkrt|d rBtjdt	 |j
dntjdt	 d|
d	< tj|tjd
|
d< |jjrjt|	t|d |d |
d< n
t|	|d |
d< ||
d  ||
d  ||
d	  q|S )NFr   z'Rprop does not support sparse gradientsr   r   r-   r0   r3   r/   memory_formatprevr   	step_size)gradr:   
is_complexappend	is_sparseRuntimeErrorr7   r9   zerosr   r2   
zeros_likepreserve_formatr1   	full_likecomplex)r)   r>   r   gradsprevsr!   state_stepshas_complexr?   rF   r7   r-   r-   r.   _init_groupN   s4   




zRprop._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]@}g }g }g }g }g }|d \}	}
|d \}}|d }|d }| ||||||}t||||||||	|
|||d |d |d q$|S )	zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr    r!   r   r   r   r   )	step_size_minstep_size_maxetaminusetaplusr   r   r   r   rS   ) _cuda_graph_capture_health_checkr:   enable_gradr5   rT   r   )r)   closurelossr>   r   rP   rQ   r!   rR   rW   rX   rU   rV   r   r   rS   r-   r-   r.   r/   t   sH   

z
Rprop.step)r   r   r   N)__name__
__module____qualname__r   r   r<   r   tupleboolr   r(   r4   rT   r   r/   __classcell__r-   r-   r+   r.   r      s<    


	
&a
  Implements the resilient backpropagation algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \theta_0 \in \mathbf{R}^d \text{ (params)},f(\theta)
                \text{ (objective)},                                                             \\
            &\hspace{13mm}      \eta_{+/-} \text{ (etaplus, etaminus)}, \Gamma_{max/min}
                \text{ (step sizes)}                                                             \\
            &\textbf{initialize} :   g^0_{prev} \leftarrow 0,
                \: \eta_0 \leftarrow \text{lr (learning rate)}                                   \\
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm} \textbf{for} \text{  } i = 0, 1, \ldots, d-1 \: \mathbf{do}            \\
            &\hspace{10mm}  \textbf{if} \:   g^i_{prev} g^i_t  > 0                               \\
            &\hspace{15mm}  \eta^i_t \leftarrow \mathrm{min}(\eta^i_{t-1} \eta_{+},
                \Gamma_{max})                                                                    \\
            &\hspace{10mm}  \textbf{else if}  \:  g^i_{prev} g^i_t < 0                           \\
            &\hspace{15mm}  \eta^i_t \leftarrow \mathrm{max}(\eta^i_{t-1} \eta_{-},
                \Gamma_{min})                                                                    \\
            &\hspace{15mm}  g^i_t \leftarrow 0                                                   \\
            &\hspace{10mm}  \textbf{else}  \:                                                    \\
            &\hspace{15mm}  \eta^i_t \leftarrow \eta^i_{t-1}                                     \\
            &\hspace{5mm}\theta_t \leftarrow \theta_{t-1}- \eta_t \mathrm{sign}(g_t)             \\
            &\hspace{5mm}g_{prev} \leftarrow  g_t                                                \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to the paper
    `A Direct Adaptive Method for Faster Backpropagation Learning: The RPROP Algorithm
    <http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.21.1417>`_.
    z
    Args:
        a{  
        lr (float, optional): learning rate (default: 1e-2)
        etas (Tuple[float, float], optional): pair of (etaminus, etaplus), that
            are multiplicative increase and decrease factors
            (default: (0.5, 1.2))
        step_sizes (Tuple[float, float], optional): a pair of minimal and
            maximal allowed step sizes (default: (1e-6, 50))
        z	
        z

    r   rP   rQ   r!   rR   rU   rV   rW   rX   r   r   r   rS   c                C   s  t | D ]\}}|| }|	s|n| }|| }|| }|| }tj s?|
r?t }|jj|jjkr7|jj|v s?J d| d|d7 }t|r\t|}t|}t|}t|}|rh|	|
  }n|	| }|
r|t|d|| |t|d|| |t|dd| n|||d< |||d< d||d< |||| |j
tjd}|
r|t||d| nd|||< |j| |dd || qd S )NIIf capturable=True, params and state_steps must be on supported devices: .r   r   rB   value)	enumerater:   compileris_compilingr   r2   typerG   view_as_realmulclonesigncopy_wheregtlteqmul_clamp_rM   addcmul_)r   rP   rQ   r!   rR   rU   rV   rW   rX   r   r   r   rS   iparamrF   rD   rE   r/   capturable_supported_devicesrp   r-   r-   r.   _single_tensor_rprop   sH   





r|   c             
      s  t | dkrd S |rJ dtj s.|
r.t  t fddt| |D s.J d  dt| ||||g}|	 D ]\\}}}}}}t
tt |}t
tt |}t
tt |}t
tt |}t
tt |}tj s|d jrtj|tjddd	dd
 nt|d |rt|||| t||}|	rt| t|| |	rt| |}t| |
r|D ])}|t|d|| |t|d|| |t|dd| qn|D ]}|||d< |||d< d||d< qt|| |D ]	}||| qt|}tt |D ]}|| t|| |d||  q~dd |D }tj|||dd q<d S )Nr   z#_foreach ops don't support autogradc                 3   s0    | ]\}}|j j|j jko|j j v V  qd S r]   )r2   rl   ).0r?   r/   r{   r-   r.   	<genexpr>9  s    

z&_multi_tensor_rprop.<locals>.<genexpr>rd   re   r"   cpu)r2   )alphar   c                 S   s   g | ]}|  qS r-   )rp   )r}   rF   r-   r-   r.   
<listcomp>  s    z'_multi_tensor_rprop.<locals>.<listcomp>rf   rg   )r9   r:   rj   rk   r   allzipr   "_group_tensors_by_device_and_dtypevaluesr   listr   is_cpu_foreach_add_r=   r   _foreach_mul_foreach_neg__foreach_copy__foreach_sign_rq   rr   rs   rt   ru   _foreach_mul_rw   range_foreach_addcmul_)r   rP   rQ   r!   rR   rU   rV   rW   rX   r   r   r   rS   grouped_tensorsgrouped_params_grouped_grads_grouped_prevs_grouped_step_sizes_grouped_state_steps__grouped_paramsgrouped_gradsgrouped_prevsgrouped_step_sizesgrouped_state_stepssignsrp   rE   ry   
grad_signsr-   r~   r.   _multi_tensor_rprop!  s   

	



r   )single_tensor_fnFr   c
                C   s   t j stdd |D std|du rt| |dd\}}|r*t j r*td|r4t j s4t}nt	}|| |||||
|||||||	d dS )	zpFunctional API that performs rprop algorithm computation.

    See :class:`~torch.optim.Rprop` for details.
    c                 s   s    | ]	}t |tjV  qd S r]   )r#   r:   r   )r}   tr-   r-   r.   r     s    
zrprop.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)	use_fusedz6torch.jit.script not supported with foreach optimizers)rU   rV   rW   rX   r   r   r   rS   )
r:   rj   rk   r   rJ   r   jitis_scriptingr   r|   )r   rP   rQ   r!   rR   r   r   r   r   rS   rU   rV   rW   rX   r   funcr-   r-   r.   r     s<   

)NFFFF)__doc__typingr   r   r   r:   r   	optimizerr   r   r	   r
   r   r   r   r   r   r   r   r   r   __all__r   r   r<   rb   r|   r   r   r-   r-   r-   r.   <module>   s   < #	
6	

D	

r		
