o
    h0O                  $   @   s  d Z ddlmZmZmZ ddlZddlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de de de
 d e_ dee dee dee dee dee dee dedededededededededef dd Zdee dee dee dee dee dee dedededededededededef d!d"Zeed#		$	$	$	$d'dee dee dee dee dee dee d%ee dedededededededededef"d&dZdS )(z)Implementation for the RMSprop algorithm.    )castOptionalUnionN)Tensor   )_capturable_doc_default_to_fused_or_foreach_differentiable_doc_disable_dynamo_if_unsupported_foreach_doc!_get_capturable_supported_devices_get_scalar_dtype_maximize_doc_params_doc_use_grad_for_differentiable_view_as_real	OptimizerParamsTRMSproprmspropc                       s   e Zd Z										ddedeeef d	ed
edededededee dedef fddZ	 fddZ
dd ZedddZ  ZS )r   {Gz?Gz?:0yE>r   FNparamslralphaepsweight_decaymomentumcentered
capturableforeachmaximizedifferentiablec                    s   t |tr| dkrtdd|kstd| d|ks%td| d|ks0td| d|ks;td| d|ksFtd| t||||||||	|
|d	
}t || d S )
Nr   zTensor lr must be 1-elementg        zInvalid learning rate: zInvalid epsilon value: zInvalid momentum value: zInvalid weight_decay value: zInvalid alpha value: )
r   r   r   r   r   r   r    r!   r"   r#   )
isinstancer   numel
ValueErrordictsuper__init__)selfr   r   r   r   r   r   r   r    r!   r"   r#   defaults	__class__ g/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/optim/rmsprop.pyr)      s2   zRMSprop.__init__c                    s   t  | | jD ]_}|dd |dd |dd  |dd |dd |dd |d	 D ]4}| j|g }t|dkrgt|d
 sgt	|d
 }|d r]tj
|t |jdntj
|t d|d
< q3q	d S )Nr   r   r   Fr!   r"   r#   r    r   stepdtypedevicer2   )r(   __setstate__param_groups
setdefaultstategetlentorch	is_tensorfloattensorr   r3   )r*   r8   grouppp_statestep_valr,   r.   r/   r5   F   s*   

zRMSprop.__setstate__c                 C   s8  d}|d D ]}	|	j d u rq|t|	O }||	 |	j jr"td||	j  | j|	 }
t|
dkrs|d rAtjdt	 |	j
dntjdt	 d|
d	< tj|	tjd
|
d< |d dkretj|	tjd
|
d< |d rstj|	tjd
|
d< ||
d  ||
d	  |d dkr||
d  |d r||
d  q|S )NFr   z)RMSprop does not support sparse gradientsr   r    r.   r1   r4   r0   )memory_format
square_avgr   momentum_bufferr   grad_avg)gradr;   
is_complexappend	is_sparseRuntimeErrorr8   r:   zerosr   r3   
zeros_likepreserve_format)r*   r?   params_with_gradgradssquare_avgsmomentum_buffer_list	grad_avgsstate_stepshas_complexr@   r8   r.   r.   r/   _init_group[   sD   






zRMSprop._init_groupc                 C   s   |    d}|dur!t  | }W d   n1 sw   Y  | jD ]B}g }g }g }g }g }g }	| |||||||	}
t||||||	|d |d |d |d |d |d |d |d	 |d
 |d |
d q$|S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r!   r"   r#   r    )r   r   r   r   r   r   r!   r"   r#   r    rU   ) _cuda_graph_capture_health_checkr;   enable_gradr6   rV   r   )r*   closurelossr?   rO   rP   rQ   rS   rR   rT   rU   r.   r.   r/   r0      sT   


zRMSprop.step)
r   r   r   r   r   FFNFFN)__name__
__module____qualname__r   r   r=   r   boolr   r)   r5   rV   r   r0   __classcell__r.   r.   r,   r/   r      sL    
	
)3aj  Implements RMSprop algorithm.

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \alpha \text{ (alpha)}, \: \gamma \text{ (lr)},
                \: \theta_0 \text{ (params)}, \: f(\theta) \text{ (objective)}                   \\
            &\hspace{13mm}   \lambda \text{ (weight decay)},\: \mu \text{ (momentum)},
                \: centered, \: \epsilon \text{ (epsilon)}                                       \\
            &\textbf{initialize} : v_0 \leftarrow 0 \text{ (square average)}, \:
                \textbf{b}_0 \leftarrow 0 \text{ (buffer)}, \: g^{ave}_0 \leftarrow 0     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}if \: \lambda \neq 0                                                    \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}v_t           \leftarrow   \alpha v_{t-1} + (1 - \alpha) g^2_t
                \hspace{8mm}                                                                     \\
            &\hspace{5mm} \tilde{v_t} \leftarrow v_t                                             \\
            &\hspace{5mm}if \: centered                                                          \\
            &\hspace{10mm} g^{ave}_t \leftarrow g^{ave}_{t-1} \alpha + (1-\alpha) g_t            \\
            &\hspace{10mm} \tilde{v_t} \leftarrow \tilde{v_t} -  \big(g^{ave}_{t} \big)^2        \\
            &\hspace{5mm}if \: \mu > 0                                                           \\
            &\hspace{10mm} \textbf{b}_t\leftarrow \mu \textbf{b}_{t-1} +
                g_t/ \big(\sqrt{\tilde{v_t}} +  \epsilon \big)                                   \\
            &\hspace{10mm} \theta_t \leftarrow \theta_{t-1} - \gamma \textbf{b}_t                \\
            &\hspace{5mm} else                                                                   \\
            &\hspace{10mm}\theta_t      \leftarrow   \theta_{t-1} -
                \gamma  g_t/ \big(\sqrt{\tilde{v_t}} + \epsilon \big)  \hspace{3mm}              \\
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    For further details regarding the algorithm we refer to
    `lecture notes <https://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`_ by G. Hinton.
    and centered version `Generating Sequences
    With Recurrent Neural Networks <https://arxiv.org/pdf/1308.0850v5.pdf>`_.
    The implementation here takes the square root of the gradient average before
    adding epsilon (note that TensorFlow interchanges these two operations). The effective
    learning rate is thus :math:`\gamma/(\sqrt{v} + \epsilon)` where :math:`\gamma`
    is the scheduled learning rate and :math:`v` is the weighted moving average
    of the squared gradient.
    z
    Args:
        a0  
        lr (float, Tensor, optional): learning rate (default: 1e-2)
        alpha (float, optional): smoothing constant (default: 0.99)
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-8)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        momentum (float, optional): momentum factor (default: 0)
        centered (bool, optional) : if ``True``, compute the centered RMSProp,
            the gradient is normalized by an estimation of its variance
        z	
        z

    r   rP   rQ   rS   rR   rT   r   r   r   r   r   r   r"   r#   r    rU   c       
         C   s  t | D ]\}}|| }tj s,|r,t }|jj|jjkr$|jj|v s,J d| d|| }|s4|n| }|| }|d7 }|	dkrJ|j||	d}t|}|r`t	|}t	|}t	|}|
|j||d| d |r|| }|rzt	|}||d|  |j||dd }n| }|r||}n||}|
dkr|| }|rt	|}|
|
|| |j|| d q|j||| d qd S )NIIf capturable=True, params and state_steps must be on supported devices: .r   r   r   value)	enumerater;   compileris_compilingr   r3   typeaddrH   view_as_realmul_addcmul_lerp_addcmulsqrt_sqrtadd_addcdiv_)r   rP   rQ   rS   rR   rT   r   r   r   r   r   r   r"   r#   r    rU   iparamr0   capturable_supported_devicesrG   rD   is_complex_paramrF   avgbufr.   r.   r/   _single_tensor_rmsprop  sL   







r{   c       
   !         s  t | dkrd S |rJ dtj s.|r.t  t fddt| |D s.J d  dt| |||||g}|	 D ]0\\}}}}}}}t
tt |}t
tt |}t
tt |}t
tt |}|r||g}|
dkrzt
tt |}|| |rt
tt |}|| t|g|R   |rt|}tj s|d jrtj|tjddd	dd
 nt|d |	dkr|rtj|||	d
 ntj|||	d
}t|| tj|||d| d |rt
tt |}t||d|  tj|||dd}t| t|| nt|}t|| |
dkrKt
tt |}t||
 t||| |rAt|tjrAt|| } t||  q=tj||| d
 q=|rdt|tjrdt||  t||| q=tj|||| d q=d S )Nr   z#_foreach ops don't support autogradc                 3   s0    | ]\}}|j j|j jko|j j v V  qd S r[   )r3   rj   ).0r@   r0   rw   r.   r/   	<genexpr>i  s    

z(_multi_tensor_rmsprop.<locals>.<genexpr>ra   rb   g      ?cpu)r3   rc   r   rd   rf   )r:   r;   rh   ri   r   allzipr   "_group_tensors_by_device_and_dtypevaluesr   listr   rI   r   _foreach_negis_cpu_foreach_add_r>   _foreach_add_foreach_mul__foreach_addcmul__foreach_lerp__foreach_addcmul_foreach_sqrt__foreach_sqrt_foreach_addcdiv_r$   _foreach_mul_foreach_div_)!r   rP   rQ   rS   rR   rT   r   r   r   r   r   r   r"   r#   r    rU   grouped_tensorsgrouped_params_grouped_grads_grouped_square_avgs_grouped_grad_avgs_grouped_momentum_buffer_list_grouped_state_steps__grouped_paramsgrouped_gradsgrouped_square_avgsgrouped_state_stepsstate_and_gradsgrouped_momentum_buffer_listgrouped_grad_avgsry   momentum_lrr.   r}   r/   _multi_tensor_rmspropN  s   







r   )single_tensor_fnFr!   c                C   s   t j stdd |D std|du rt| |dd\}}|r*t j r*td|r4t j s4t}nt	}|| |||||||||||||	||
d dS )	ztFunctional API that performs rmsprop algorithm computation.

    See :class:`~torch.optim.RMSProp` for details.
    c                 s   s    | ]	}t |tjV  qd S r[   )r$   r;   r   )r|   tr.   r.   r/   r~     s    
zrmsprop.<locals>.<genexpr>zPAPI has changed, `state_steps` argument must contain a list of singleton tensorsNF)	use_fusedz6torch.jit.script not supported with foreach optimizers)
r   r   r   r   r   r   r"   r    r#   rU   )
r;   rh   ri   r   rK   r   jitis_scriptingr   r{   )r   rP   rQ   rS   rR   rT   r!   r"   r#   r    rU   r   r   r   r   r   r   r   funcr.   r.   r/   r     sB   

)NFFFF)__doc__typingr   r   r   r;   r   	optimizerr   r   r	   r
   r   r   r   r   r   r   r   r   r   __all__r   r   r=   r_   r{   r   r   r.   r.   r.   r/   <module>   s  < ,,A	

G	

 
	
