o
    h1M                     @   s  d Z ddlmZmZmZ ddlZddlmZ ddlmZm	Z	m
Z
mZmZmZmZmZmZmZmZ ddgZG d	d deZd
de de de de
 de d d e_ 					d(dee dee deee  dedee dee dee dee dedededededefddZdee d ee deee  dee dee dededededededefd!d"Zdee d ee deee  dee dee dededededededefd#d$Zdee d ee deee  dee dee dededededededed%dfd&d'ZdS ))z9Implementation for Stochastic Gradient Descent optimizer.    )castOptionalUnionN)Tensor   )_default_to_fused_or_foreach_device_dtype_check_for_fused_differentiable_doc_foreach_doc
_fused_doc_maximize_doc_params_doc_use_grad_for_differentiable
DeviceDict	OptimizerParamsTSGDsgdc                       s   e Zd Z					dddddddedeeef ded	ed
eeef dededee dedee f fddZ	 fddZ
dd ZedddZ  ZS )r   MbP?r   FN)maximizeforeachdifferentiablefusedparamslrmomentum	dampeningweight_decaynesterovr   r   r   r   c                   s   t |tr| dkrtd|dk rtd| |dk r%td| |dk r0td| t||||||||	|
d	}|rK|dksG|dkrKtd	t || |
rfd
| _d
| _|	r`t	d|rht	dd S d S )Nr   zTensor lr must be 1-elementg        zInvalid learning rate: zInvalid momentum value: zInvalid weight_decay value: )	r   r   r   r   r   r   r   r   r   r   z8Nesterov momentum requires a momentum and zero dampeningTz)`fused` does not support `differentiable`z0`fused` and `foreach` cannot be `True` together.)

isinstancer   numel
ValueErrordictsuper__init___step_supports_amp_scaling"_need_device_dtype_check_for_fusedRuntimeError)selfr   r   r   r   r   r   r   r   r   r   defaults	__class__ c/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/torch/optim/sgd.pyr$      s>   zSGD.__init__c                    sX   t  | | jD ] }|dd |dd |dd  |dd |dd q	d S )Nr   Fr   r   r   r   )r#   __setstate__param_groups
setdefault)r(   stategroupr*   r,   r-   r.   I   s   
zSGD.__setstate__c                 C   s   d}|d D ]<}|j d urB|d rt| ddrt| d| _|| ||j  |j jr/d}|d dkrB| j| }||d q|S )	NFr   r   r&   Tr   r   momentum_buffer)gradgetattrr   r&   append	is_sparser1   get)r(   r2   r   gradsmomentum_buffer_listhas_sparse_gradpr1   r,   r,   r-   _init_groupR   s"   



zSGD._init_groupc                 C   s   d}|durt   | }W d   n1 sw   Y  | jD ]S}g }g }g }| ||||}t||||d |d |d |d |d |d ||d |d	 t| d
dt| ddd |d dkrst||D ]\}}	| j| }
|	|
d< qeq |S )zPerform a single optimization step.

        Args:
            closure (Callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   r   r   r   r   r   r   r   
grad_scale	found_inf)r   r   r   r   r   r   r;   r   r   r>   r?   r   r3   )torchenable_gradr/   r=   r   r5   zipr1   )r(   closurelossr2   r   r9   r:   r;   r<   r3   r1   r,   r,   r-   stepg   sD   





zSGD.step)r   r   r   r   FN)__name__
__module____qualname__r   r   floatr   boolr   r$   r.   r=   r   rE   __classcell__r,   r,   r*   r-   r      sH    	

	
.	a  Implements stochastic gradient descent (optionally with momentum).

    .. math::
       \begin{aligned}
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{input}      : \gamma \text{ (lr)}, \: \theta_0 \text{ (params)}, \: f(\theta)
                \text{ (objective)}, \: \lambda \text{ (weight decay)},                          \\
            &\hspace{13mm} \:\mu \text{ (momentum)}, \:\tau \text{ (dampening)},
            \:\textit{ nesterov,}\:\textit{ maximize}                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                                 \\
            &\textbf{for} \: t=1 \: \textbf{to} \: \ldots \: \textbf{do}                         \\
            &\hspace{5mm}g_t           \leftarrow   \nabla_{\theta} f_t (\theta_{t-1})           \\
            &\hspace{5mm}\textbf{if} \: \lambda \neq 0                                           \\
            &\hspace{10mm} g_t \leftarrow g_t + \lambda  \theta_{t-1}                            \\
            &\hspace{5mm}\textbf{if} \: \mu \neq 0                                               \\
            &\hspace{10mm}\textbf{if} \: t > 1                                                   \\
            &\hspace{15mm} \textbf{b}_t \leftarrow \mu \textbf{b}_{t-1} + (1-\tau) g_t           \\
            &\hspace{10mm}\textbf{else}                                                          \\
            &\hspace{15mm} \textbf{b}_t \leftarrow g_t                                           \\
            &\hspace{10mm}\textbf{if} \: \textit{nesterov}                                       \\
            &\hspace{15mm} g_t \leftarrow g_{t} + \mu \textbf{b}_t                             \\
            &\hspace{10mm}\textbf{else}                                                   \\[-1.ex]
            &\hspace{15mm} g_t  \leftarrow  \textbf{b}_t                                         \\
            &\hspace{5mm}\textbf{if} \: \textit{maximize}                                          \\
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} + \gamma g_t                   \\[-1.ex]
            &\hspace{5mm}\textbf{else}                                                    \\[-1.ex]
            &\hspace{10mm}\theta_t \leftarrow \theta_{t-1} - \gamma g_t                   \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
            &\bf{return} \:  \theta_t                                                     \\[-1.ex]
            &\rule{110mm}{0.4pt}                                                          \\[-1.ex]
       \end{aligned}

    Nesterov momentum is based on the formula from
    `On the importance of initialization and momentum in deep learning`__.
    z
    Args:
        a  
        lr (float, Tensor, optional): learning rate (default: 1e-3)
        momentum (float, optional): momentum factor (default: 0)
        dampening (float, optional): dampening for momentum (default: 0)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        nesterov (bool, optional): enables Nesterov momentum. Only applicable
            when momentum is non-zero. (default: False)
        z	
        z
    a  

    Example:
        >>> # xdoctest: +SKIP
        >>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward()
        >>> optimizer.step()

    __ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf

    .. note::
        The implementation of SGD with Momentum/Nesterov subtly differs from
        Sutskever et al. and implementations in some other frameworks.

        Considering the specific case of Momentum, the update can be written as

        .. math::
            \begin{aligned}
                v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
                p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
            \end{aligned}

        where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
        parameters, gradient, velocity, and momentum respectively.

        This is in contrast to Sutskever et al. and
        other frameworks which employ an update of the form

        .. math::
            \begin{aligned}
                v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
                p_{t+1} & = p_{t} - v_{t+1}.
            \end{aligned}

        The Nesterov version is analogously modified.

        Moreover, the initial value of the momentum buffer is set to the
        gradient value at the first step. This is in contrast to some other
        frameworks that initialize it to all zeros.

    Fr   d_p_listr:   r;   r   r   r>   r?   r   r   r   r   r   r   c                C   s   |du r|du rt j st| ddd\}}nd}d}|du r!d}|du r'd}|r2t j r2td|r=t j r=td|rGt j sGt}n|rQt j sQt}nt}|| ||||	|
||||||d dS )zlFunctional API that performs SGD algorithm computation.

    See :class:`~torch.optim.SGD` for details.
    NF)r   	use_fusedz6torch.jit.script not supported with foreach optimizersz4torch.jit.script not supported with fused optimizers)	r   r   r   r   r   r;   r   r>   r?   )r@   jitis_scriptingr   r'   _multi_tensor_sgd
_fused_sgd_single_tensor_sgd)r   rM   r:   r;   r   r   r>   r?   r   r   r   r   r   r   funcr,   r,   r-   r      sD   

r9   c                C   s,  |d u r|d u s
J t | D ]\}}|
s|| n||  }|dkrAt|tr:|jr2|| |}n|j||d}n|j||d}|dkrq|| }|d u rYt| }|||< n|	|j
|d| d |	ro|j||d}n|}t|tr|jr|j||dd q|j
|| d q|j
|| d qd S )Nr   alphar   )value)	enumerater   r   requires_gradaddcmul_cloneaddr@   detachmul_add_)r   r9   r:   r>   r?   r   r   r   r   r   r   r;   iparamr4   bufr,   r,   r-   rS   <  s0   


rS   c                C   sB  |d u r|d u s
J t | dkrd S tj| ||gdd}| D ]\\}}}}ttt |}ttt |}|o?tdd |D }|
rGt	|}|dkr^|
rVtj
|||d ntj|||d}|dkrg }d}tt |D ]}|| d u rxd} n|tt||  ql|rt|| tj
||d| d n?g }tt |D ]6}|| d u rt||   } ||< ||| < ntt|| }||j|| d| d || q|	rtj
|||d n|}|st|tjrtj rt|| }t
|| q tj
||| d q tt |D ]}|| j|| | d qq d S )	Nr   Twith_indicesc                 s   s    | ]}|j V  qd S rF   )r7   ).0r4   r,   r,   r-   	<genexpr>  s    
z$_multi_tensor_sgd.<locals>.<genexpr>rU   Fr   )lenr   "_group_tensors_by_device_and_dtypevaluesr   listr   anyr@   _foreach_neg_foreach_add__foreach_addranger6   _foreach_mul_r\   r^   r_   r`   r   compileris_compiling_foreach_mul)r   r9   r:   r>   r?   r   r   r   r   r   r   r;   grouped_tensorsdevice_params_device_grads_device_momentum_buffer_listindicesdevice_paramsdevice_gradsdevice_has_sparse_gradbufsall_states_with_momentum_bufferra   rc   
grads_x_lrr,   r,   r-   rQ   s  st   

rQ   returnc                C   sP  | sd S |r
t d|d ur|j|ini }|d ur|j|ini }|dk}tdd |D o/| }|rBt|D ]\}}t|||< q6tj| ||gdd}| D ]U\\}}\\}}}}t	t
t |}t	t
t |}d\}}|d urz||||}|d ur|d ur||||}tj|||rg nt	t
t ||||||	|
|||d qPd S )	Nz.`_fused_sgd` does not support sparse gradientsr   c                 s   s    | ]}|d u V  qd S rF   r,   )rf   tr,   r,   r-   rg     s    z_fused_sgd.<locals>.<genexpr>Frd   )NN)	r   r   r   r   r   r   is_first_stepr>   r?   )r'   deviceallrY   r@   
empty_liker   ri   itemsr   rk   r   r0   to_fused_sgd_)r   r9   r:   r>   r?   r   r   r   r   r   r   r;   grad_scale_dictfound_inf_dictno_momentum_bufferr   ra   gru   r   _rv   rw   rx   rz   r{   device_grad_scaledevice_found_infr,   r,   r-   rR     s^   

rR   )FNNNN)__doc__typingr   r   r   r@   r   	optimizerr   r   r	   r
   r   r   r   r   r   r   r   __all__r   rk   rK   rJ   r   rS   rQ   rR   r,   r,   r,   r-   <module>   s  4~#	
1e
	

F
	

7
	

]
	
