o
    hY                     @   sh  d Z ddlZddlZddlmZ ddlmZmZ ddlZddl	m
Z
 ddlmZmZ ddlmZmZ dd	lmZ dd
lmZ ddlmZ eeZdUddZdVde
defddZde
fddZdedefddZdVde
dedefddZdededefddZ dVddZ!dededed e"fd!d"Z#	dWde
deded e"def
d$d%Z$dededed efd&d'Z%	dXde
deded edef
d(d)Z&dededed*e"d+e"d,efd-d.Z'	dYd1d2Z(dd3deded4ee fd5d6Z)	dZde
ded4ee defd7d8Z*d9d:dededed e"d;e"f
d<d"Z#	#			d[de
deded e"ded=ee" d;ee" fd>d?Z+deded@edAedBe,dCe,dDe"d e"fdEdFZ-			G	H		#	d\de
dedAedee d@ee dBe,dCe,dDe"d e"defdIdJZ.ej/e!ej0e$ej1e&ej2e(ej3eej4eej5e*ej6eej7e+ej8e.i
Z9			d]dKee,ef de
dee dee dLee: f
dMdNZ;G dOdP dPe
Z<G dQdR dReZ=d^dSdTZ>dS )_z$PyTorch optimization for BERT model.    N)partial)OptionalUnion)	Optimizer)LambdaLRReduceLROnPlateau   )LayerWiseDummyOptimizerLayerWiseDummyScheduler)SchedulerType)logging)require_versionc                 C   s   dS Nr    _r   r   m/var/www/html/construction_image-detection-poc/venv/lib/python3.10/site-packages/transformers/optimization.py_get_constant_lambda"      r   	optimizer
last_epochc                 C   s   t | t|dS )a  
    Create a schedule with a constant learning rate, using the learning rate set in optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r   )r   r   )r   r   r   r   r   get_constant_schedule&   s   r   c                 K   s   t | fi |S )a  
    Create a schedule with a constant learning rate that decreases when a metric has stopped improving.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        kwargs (`dict`, *optional*):
            Extra parameters to be passed to the scheduler. See `torch.optim.lr_scheduler.ReduceLROnPlateau`
            for possible parameters.

    Return:
        `torch.optim.lr_scheduler.ReduceLROnPlateau` with the appropriate schedule.
    )r   )r   kwargsr   r   r   get_reduce_on_plateau_schedule7   s   r   current_stepnum_warmup_stepsc                C   s"   | |k rt | t td| S dS )N      ?floatmax)r   r   r   r   r   ,_get_constant_schedule_with_warmup_lr_lambdaI   s   r"   c                 C   s   t t|d}t| ||dS )ad  
    Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate
    increases linearly between 0 and the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r   r   )r   r"   r   )r   r   r   	lr_lambdar   r   r   !get_constant_schedule_with_warmupO   s   r%   num_training_stepsc                C   sB   | |k rt | t td| S tdt ||  t td||  S )Nr           r   )r   r   r&   r   r   r   *_get_linear_schedule_with_warmup_lr_lambdad   s   $r(   c                 C   s   t t||d}t| ||S )a  
    Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after
    a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r   r&   )r   r(   r   )r   r   r&   r   r$   r   r   r   get_linear_schedule_with_warmupj   s   r*   
num_cyclesc             	   C   sf   | |k rt | t td| S t | | t td||  }tdddttjt | d |   S )Nr   r'         ?r          @r    r!   mathcospir   r   r&   r+   progressr   r   r   *_get_cosine_schedule_with_warmup_lr_lambda   s   *r4   r,   c                 C      t t|||d}t| ||S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`float`, *optional*, defaults to 0.5):
            The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
            following a half-cosine).
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r   r&   r+   )r   r4   r   r   r   r&   r+   r   r$   r   r   r   get_cosine_schedule_with_warmup   s   r8   c             	   C   sr   | |k rt | t td| S t | | t td||  }|dkr$dS tdddttjt || d    S )Nr   r   r'   r,   r.   r2   r   r   r   =_get_cosine_with_hard_restarts_schedule_with_warmup_lr_lambda   s   *r9   c                 C   r5   )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases
    linearly between 0 and the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`int`, *optional*, defaults to 1):
            The number of hard restarts to use.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    r6   )r   r9   r   r7   r   r   r   2get_cosine_with_hard_restarts_schedule_with_warmup   s   r:   lr_endpowerlr_initc          
      C   sf   | |k rt | t td| S | |kr|| S || }|| }d| | |  }|||  | }	|	| S r   r   )
r   r   r&   r;   r<   r=   lr_rangedecay_stepspct_remainingdecayr   r   r   4_get_polynomial_decay_schedule_with_warmup_lr_lambda   s   	rB   Hz>r   c                 C   sH   | j d }||kstd| d| dtt|||||d}t| ||S )a  
    Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the
    optimizer to end lr defined by *lr_end*, after a warmup period during which it increases linearly from 0 to the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        lr_end (`float`, *optional*, defaults to 1e-7):
            The end LR.
        power (`float`, *optional*, defaults to 1.0):
            Power factor.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Note: *power* defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT
    implementation at
    https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.

    lrzlr_end (z#) must be smaller than initial lr ())r   r&   r;   r<   r=   )defaults
ValueErrorr   rB   r   )r   r   r&   r;   r<   r   r=   r$   r   r   r   )get_polynomial_decay_schedule_with_warmup   s   
rH   )	timescalerI   c                C   s@   | |k rt | t td| S || }dt| | |  }|S )Nr   r   )r    r!   r/   sqrt)r   r   rI   shiftrA   r   r   r   $_get_inverse_sqrt_schedule_lr_lambda  s
   rL   c                 C   s,   |du r|pd}t t||d}t| ||dS )a  
    Create a schedule with an inverse square-root learning rate, from the initial lr set in the optimizer, after a
    warmup period which increases lr linearly from 0 to the initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        timescale (`int`, *optional*, defaults to `num_warmup_steps`):
            Time scale.
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    Ni'  )r   rI   r   )r   rL   r   )r   r   rI   r   r$   r   r   r   get_inverse_sqrt_schedule'  s   rM   r'   )min_lr_raterN   c                C   sz   | |k rt | t td| S t | | t td||  }ddttjt | d |   }|d|  | }td|S )Nr   r,   r   r-   r   r.   )r   r   r&   r+   rN   r3   factorr   r   r   r4   E  s   $
min_lrc                 C   s^   |dur|durt d|dur|| jd  }n|du r t dtt||||d}t| ||S )a  
    Create a schedule with a learning rate that decreases following the values of the cosine function between the
    initial lr set in the optimizer to min_lr, after a warmup period during which it increases linearly between 0 and the
    initial lr set in the optimizer.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_training_steps (`int`):
            The total number of training steps.
        num_cycles (`float`, *optional*, defaults to 0.5):
            The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
            following a half-cosine).
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.
        min_lr (`float`, *optional*):
            The minimum learning rate to reach after the cosine schedule.
        min_lr_rate (`float`, *optional*):
            The minimum learning rate as a ratio of the initial learning rate. If set, `min_lr` should not be set.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    Nz/Only one of min_lr or min_lr_rate should be setrD   zLOne of min_lr or min_lr_rate should be set through the `lr_scheduler_kwargs`)r   r&   r+   rN   )rG   rF   r   r4   r   )r   r   r&   r+   r   rP   rN   r$   r   r   r   +get_cosine_with_min_lr_schedule_with_warmupP  s   #rQ   num_stable_stepsnum_decay_stepswarmup_type
decay_typemin_lr_ratioc          
      C   s4  | |k rAt | t td| }|dkr|}	n|dkr'ddttj|   }	n|dkr4dtd|  }	|	d|  | }	td|	S | || k rIdS | || | k rt | | | t td| }|dkrid| }	n"|dkrddttjt | d |   }	n|dkrdt| }	|	d|  | }	td|	S |S )	Nr   linearcosiner,   r   1-sqrtr'   r-   )r    r!   r/   r0   r1   rJ   )
r   r   rR   rS   rT   rU   rV   r+   r3   rO   r   r   r   _get_wsd_scheduler_lambda  s.   

&
rZ   rW   rX   c
              
   C   s   |du r|du rt d|dur|durtd |dvr%t d| d|dvr1t d| d|du r;|| | }tt|||||||d}
t| |
|	S )	a  
    Create a schedule with a learning rate that has three stages:
    1. warmup: increase from min_lr_ratio times the initial learning rate to the initial learning rate following a warmup_type.
    2. stable: constant learning rate.
    3. decay: decrease from the initial learning rate to min_lr_ratio times the initial learning rate following a decay_type.

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        num_warmup_steps (`int`):
            The number of steps for the warmup phase.
        num_decay_steps (`int`):
            The number of steps for the decay phase.
        num_training_steps (`int`, *optional*):
            The total number of training steps. This is the sum of the warmup, stable and decay steps. If `num_stable_steps` is not provided, the stable phase will be `num_training_steps - num_warmup_steps - num_decay_steps`.
        num_stable_steps (`int`, *optional*):
            The number of steps for the stable phase. Please ensure that `num_warmup_steps + num_stable_steps + num_decay_steps` equals `num_training_steps`, otherwise the other steps will default to the minimum learning rate.
        warmup_type (`str`, *optional*, defaults to "linear"):
            The type of warmup to use. Can be 'linear', 'cosine' or '1-sqrt'.
        decay_type (`str`, *optional*, defaults to "cosine"):
            The type of decay to use. Can be 'linear', 'cosine' or '1-sqrt'.
        min_lr_ratio (`float`, *optional*, defaults to 0):
            The minimum learning rate as a ratio of the initial learning rate.
        num_cycles (`float`, *optional*, defaults to 0.5):
            The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0
            following a half-cosine).
        last_epoch (`int`, *optional*, defaults to -1):
            The index of the last epoch when resuming training.

    Return:
        `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.
    Nz@Either num_training_steps or num_stable_steps must be specified.zZBoth num_training_steps and num_stable_steps are specified. num_stable_steps will be used.)rW   rX   rY   zUnknown warmup type: z), expected 'linear', 'cosine' or '1-sqrt'zUnknown decay type: )r   rR   rS   rT   rU   rV   r+   )rG   warningswarnr   rZ   r   )r   r   rS   r&   rR   rT   rU   rV   r+   r   r$   r   r   r   get_wsd_schedule  s*   -

r]   namescheduler_specific_kwargsc           	         sT  t | } t|  }|durGt|trG|j}i  | D ]}t| || ||d |< q fdd}| D ]
}|jr=|| q3t	||j
d dS | t jkrP||S |du rVi }| t jkrc||fi |S |du rnt|  d| t jkry|||dS | t jkr|||dS | t jkr||f||d	|S |du rt|  d
||f||d	|S )a  
    Unified API to get any scheduler from its name.

    Args:
        name (`str` or `SchedulerType`):
            The name of the scheduler to use.
        optimizer (`torch.optim.Optimizer`):
            The optimizer that will be used during training.
        num_warmup_steps (`int`, *optional*):
            The number of warmup steps to do. This is not required by all schedulers (hence the argument being
            optional), the function will raise an error if it's unset and the scheduler type requires it.
        num_training_steps (`int``, *optional*):
            The number of training steps to do. This is not required by all schedulers (hence the argument being
            optional), the function will raise an error if it's unset and the scheduler type requires it.
        scheduler_specific_kwargs (`dict`, *optional*):
            Extra parameters for schedulers such as cosine with restarts. Mismatched scheduler types and scheduler
            parameters will cause the scheduler function to raise a TypeError.
    N)r   r   r&   c                    s    |     d S N)step)paramscheduler_dictr   r   scheduler_hook+  s   z%get_scheduler.<locals>.scheduler_hookrD   )optimizer_dictrD   z; requires `num_warmup_steps`, please provide that argument.r#   r)   z= requires `num_training_steps`, please provide that argument.)r   TYPE_TO_SCHEDULER_FUNCTION
isinstancer	   rf   keysget_schedulerrequires_grad"register_post_accumulate_grad_hookr
   rF   CONSTANTREDUCE_ON_PLATEAUrG   CONSTANT_WITH_WARMUPINVERSE_SQRTWARMUP_STABLE_DECAY)	r^   r   r   r&   r_   schedule_funcrf   rb   re   r   rc   r   rj     sb   





rj   c                       sv   e Zd ZdZ									d fd	d
	Zedd Zedd Zedd Zedd Z	e
 dddZ  ZS )	Adafactora)  
    AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code:
    https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py

    Paper: *Adafactor: Adaptive Learning Rates with Sublinear Memory Cost* https://arxiv.org/abs/1804.04235 Note that
    this optimizer internally adjusts the learning rate depending on the `scale_parameter`, `relative_step` and
    `warmup_init` options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and
    `relative_step=False`.

    Arguments:
        params (`Iterable[nn.parameter.Parameter]`):
            Iterable of parameters to optimize or dictionaries defining parameter groups.
        lr (`float`, *optional*):
            The external learning rate.
        eps (`Tuple[float, float]`, *optional*, defaults to `(1e-30, 0.001)`):
            Regularization constants for square gradient and parameter scale respectively
        clip_threshold (`float`, *optional*, defaults to 1.0):
            Threshold of root mean square of final gradient update
        decay_rate (`float`, *optional*, defaults to -0.8):
            Coefficient used to compute running averages of square
        beta1 (`float`, *optional*):
            Coefficient used for computing running averages of gradient
        weight_decay (`float`, *optional*, defaults to 0.0):
            Weight decay (L2 penalty)
        scale_parameter (`bool`, *optional*, defaults to `True`):
            If True, learning rate is scaled by root mean square
        relative_step (`bool`, *optional*, defaults to `True`):
            If True, time-dependent learning rate is computed instead of external learning rate
        warmup_init (`bool`, *optional*, defaults to `False`):
            Time-dependent learning rate computation depends on whether warm-up initialization is being used

    This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested.

    Recommended T5 finetuning settings (https://discuss.huggingface.co/t/t5-finetuning-tips/684/3):

        - Training without LR warmup or clip_threshold is not recommended.

           - use scheduled LR warm-up to fixed LR
           - use clip_threshold=1.0 (https://arxiv.org/abs/1804.04235)
        - Disable relative updates
        - Use scale_parameter=False
        - Additional optimizer operations like gradient clipping should not be used alongside Adafactor

    Example:

    ```python
    Adafactor(model.parameters(), scale_parameter=False, relative_step=False, warmup_init=False, lr=1e-3)
    ```

    Others reported the following combination to work well:

    ```python
    Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
    ```

    When using `lr=None` with [`Trainer`] you will most likely need to use [`~optimization.AdafactorSchedule`]
    scheduler as following:

    ```python
    from transformers.optimization import Adafactor, AdafactorSchedule

    optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None)
    lr_scheduler = AdafactorSchedule(optimizer)
    trainer = Trainer(..., optimizers=(optimizer, lr_scheduler))
    ```

    Usage:

    ```python
    # replace AdamW with Adafactor
    optimizer = Adafactor(
        model.parameters(),
        lr=1e-3,
        eps=(1e-30, 1e-3),
        clip_threshold=1.0,
        decay_rate=-0.8,
        beta1=None,
        weight_decay=0.0,
        relative_step=False,
        scale_parameter=False,
        warmup_init=False,
    )
    ```NgKH9gMbP?r   皙r'   TFc              
      sV   t d |d ur|	rtd|
r|	std||||||||	|
d	}t || d S )Nztorch>=1.5.0z;Cannot combine manual `lr` and `relative_step=True` optionsz0`warmup_init=True` requires `relative_step=True`)	rD   epsclip_threshold
decay_ratebeta1weight_decayscale_parameterrelative_stepwarmup_init)r   rG   super__init__)selfparamsrD   rv   rw   rx   ry   rz   r{   r|   r}   rF   	__class__r   r   r     s    zAdafactor.__init__c                 C   sj   | d }| d r | d rd|d  nd}t |dt|d  }d}| d r1t| d	 d
 |d }|| S )NrD   r|   r}   gư>ra   g{Gz?r   r{   rv   r   RMS)minr/   rJ   r!   )param_groupparam_staterel_step_szmin_stepparam_scaler   r   r   _get_lr  s   zAdafactor._get_lrc                 C   s    t |dk}| d d u}||fS )N   ry   )len)r   param_shapefactoreduse_first_momentr   r   r   _get_options  s   zAdafactor._get_optionsc                 C   s   |  d|  d  S )Nr   r,   )normnumel)tensorr   r   r   _rms  s   zAdafactor._rmsc                 C   s6   | | j ddd  d}|d }t||S )Nr   T)dimkeepdim)meanrsqrt_	unsqueezersqrttorchmul)exp_avg_sq_rowexp_avg_sq_colr_factorc_factorr   r   r   _approx_sq_grad  s   zAdafactor._approx_sq_gradc                 C   s$  d}|dur	| }| j D ]}|d D ]z}|jdu rq|j}|jtjtjhv r,| }|jr3td| j	| }|j
}| ||\}}	t|dkrd|d< |	rVt||d< |r{t|dd ||d< t|dd	 |dd  ||d
< nt||d< d|d< n)|	r|d ||d< |r|d ||d< |d
 ||d
< n	|d ||d< |}
|jtjtjhv r|
 }
|d  d7  < | |
|d< | ||}dt|d |d  }|d |d d  }|r!|d }|d
 }||j|jddd| d ||j|jd	dd| d | ||}|| n|d }||j|d| d | |}|| ||d  jdd || |	rf|d }||d j|d|d  d |}|d dkry|
j|
|d  | d |
|  |jtjtjhv r||
 qq|S )z
        Performs a single optimization step

        Arguments:
            closure (callable, optional): A closure that reevaluates the model
                and returns the loss.
        Nr   z,Adafactor does not support sparse gradients.r   ra   exp_avgr   r   r   r   
exp_avg_sqr   r   r   rx   r   rv   )r   )alpharw   )r   ry   rz   )param_groupsgraddtyper   float16bfloat16r    	is_sparseRuntimeErrorstateshaper   r   
zeros_likezerostor   r   r/   powmul_add_r   r   r   div_clamp_copy_)r   closurelossgrouppr   r   
grad_shaper   r   p_data_fp32rD   beta2tupdater   r   r   r   r   r   r   ra     s|   	

*
   
 
NzAdafactor.step)	Nrt   r   ru   Nr'   TTFr`   )__name__
__module____qualname____doc__r   staticmethodr   r   r   r   r   no_gradra   __classcell__r   r   r   r   rs   ^  s,    W 




rs   c                       s*   e Zd ZdZd fdd	Zdd Z  ZS )AdafactorSchedulea8  
    Since [`~optimization.Adafactor`] performs its own scheduling, if the training loop relies on a scheduler (e.g.,
    for logging), this class creates a proxy object that retrieves the current lr values from the optimizer.

    It returns `initial_lr` during startup and the actual `lr` during stepping.
    r'   c                    sD    fdd}|j D ]} |d< q	t || |j D ]}|d= qd S )Nc                    s    S r`   r   r   
initial_lrr   r   r$   X  r   z-AdafactorSchedule.__init__.<locals>.lr_lambdar   )r   r~   r   )r   r   r   r$   r   r   r   r   r   W  s   


zAdafactorSchedule.__init__c                    s0   | j   fdd jD }t|dkr| j}|S )Nc                    s8   g | ]}|d  d j dur | j|d  d  qS )r   r   N)r   r   r   ).0r   optr   r   
<listcomp>c  s
    z,AdafactorSchedule.get_lr.<locals>.<listcomp>r   )r   r   r   base_lrs)r   lrsr   r   r   get_lra  s   
zAdafactorSchedule.get_lrr'   )r   r   r   r   r   r   r   r   r   r   r   r   O  s    
r   c                 C   s
   t | |S )aX  
    Get a proxy schedule for [`~optimization.Adafactor`]

    Args:
        optimizer ([`~torch.optim.Optimizer`]):
            The optimizer for which to schedule the learning rate.
        initial_lr (`float`, *optional*, defaults to 0.0):
            Initial lr

    Return:
        [`~optimization.Adafactor`] proxy schedule object.


    )r   )r   r   r   r   r   get_adafactor_schedulem  s   
r   r`   )r   )r,   r   )r   r   )rC   r   r   )Nr   )r,   r   NN)NNrW   rX   r   r,   r   )NNNr   )?r   r/   r[   	functoolsr   typingr   r   r   torch.optimr   torch.optim.lr_schedulerr   r   trainer_pt_utilsr	   r
   trainer_utilsr   utilsr   utils.versionsr   
get_loggerr   loggerr   intr   r   r"   r%   r(   r*   r    r4   r8   r9   r:   rB   rH   rL   rM   rQ   strrZ   r]   LINEARCOSINECOSINE_WITH_RESTARTS
POLYNOMIALrm   ro   rp   rn   COSINE_WITH_MIN_LRrq   rg   dictrj   rs   r   r   r   r   r   r   <module>   s  





"

!

 .	


4	
*	

J

] r