a
    Tqe4                     @   s   d dl Z d dlmZmZ d dlZG dd dejZG dd deZ	dd Z
G d	d
 d
eZG dd deZ	G dd deZG dd deZG dd deZdS )    N)SGDlr_schedulerc                       s8   e Zd Zd fdd	Zdd Zdd Zdd	d
Z  ZS )_LRMomentumSchedulerc                    sx   |dkr&|j D ]}|d|d  qn*t|j D ]\}}d|vr0td|q0ttdd |j | _t 	|| d S )Nr   initial_momentummomentumzXparam 'initial_momentum' is not specified in param_groups[{}] when resuming an optimizerc                 S   s   | d S )Nr    )groupr   r   I/home/opencvuniv/work/pranav/ADAS_2_LIDAR/SFA3D/sfa/utils/lr_scheduler.py<lambda>       z/_LRMomentumScheduler.__init__.<locals>.<lambda>)
param_groups
setdefault	enumerateKeyErrorformatlistmapbase_momentumssuper__init__)self	optimizer
last_epochr	   i	__class__r   r
   r      s    
z_LRMomentumScheduler.__init__c                 C   s   t d S NNotImplementedErrorr   r   r   r
   get_lr   s    z_LRMomentumScheduler.get_lrc                 C   s   t d S r   r   r    r   r   r
   get_momentum   s    z!_LRMomentumScheduler.get_momentumNc                 C   sP   |d u r| j d }|| _ t| jj|  |  D ]\}}}||d< ||d< q0d S )N   lrr   )r   zipr   r   r!   r"   )r   epochparam_groupr$   r   r   r   r
   step   s    
"z_LRMomentumScheduler.step)r   )N)__name__
__module____qualname__r   r!   r"   r(   __classcell__r   r   r   r
   r      s   r   c                   @   s    e Zd ZdZdd Zdd ZdS )ParameterUpdatea}  A callable class used to define an arbitrary schedule defined by a list.
    This object is designed to be passed to the LambdaLR or LambdaScheduler scheduler to apply
    the given schedule.

    Arguments:
        params {list or numpy.array} -- List or numpy array defining parameter schedule.
        base_param {float} -- Parameter value used to initialize the optimizer.
    c                 C   s   t |dg| _|| _d S )Nr   nphstackparams
base_paramr   r1   r2   r   r   r
   r   ,   s    ParameterUpdate.__init__c                 C   s   | j | | j S r   r1   r2   r   r&   r   r   r
   __call__0   s    ParameterUpdate.__call__Nr)   r*   r+   __doc__r   r7   r   r   r   r
   r-   "   s   	r-   c                    s    fddt ||D S )Nc                    s   g | ]\}}||  qS r   r   ).0lmbdabaser   r   r
   
<listcomp>5   r   z apply_lambda.<locals>.<listcomp>)r%   )r   basesZlambdasr   r>   r
   apply_lambda4   s    rA   c                       sP   e Zd ZdZdd dd df fdd	Zdd Zd	d
 Zdd Zdd Z  Z	S )LambdaSchedulera  Sets the learning rate and momentum of each parameter group to the initial lr and momentum
    times a given function. When last_epoch=-1, sets initial lr and momentum to the optimizer
    values.
    Args:
        optimizer (Optimizer): Wrapped optimizer.
        lr_lambda (function or list): A function which computes a multiplicative
            factor given an integer parameter epoch, or a list of such
            functions, one for each group in optimizer.param_groups.
            Default: lambda x:x.
        momentum_lambda (function or list): As for lr_lambda but applied to momentum.
            Default: lambda x:x.
        last_epoch (int): The index of last epoch. Default: -1.
    Example:
        >>> # Assuming optimizer has two groups.
        >>> lr_lambda = [
        ...     lambda epoch: epoch // 30,
        ...     lambda epoch: 0.95 ** epoch
        ... ]
        >>> mom_lambda = [
        ...     lambda epoch: max(0, (50 - epoch) // 50),
        ...     lambda epoch: 0.99 ** epoch
        ... ]
        >>> scheduler = LambdaScheduler(optimizer, lr_lambda, mom_lambda)
        >>> for epoch in range(100):
        >>>     train(...)
        >>>     validate(...)
        >>>     scheduler.step()
    c                 C   s   | S r   r   xr   r   r
   r   V   r   zLambdaScheduler.<lambda>r   c                    s   || _ t|ttfs(|gt|j | _n6t|t|jkrTtdt|jt|t|| _t|ttfs|gt|j | _	n6t|t|jkrtdt|jt|t|| _	|| _
t || d S )Nz"Expected {} lr_lambdas, but got {}z(Expected {} momentum_lambdas, but got {})r   
isinstancer   tuplelenr   
lr_lambdas
ValueErrorr   momentum_lambdasr   r   r   )r   r   	lr_lambdamomentum_lambdar   r   r   r
   r   V   s"    

zLambdaScheduler.__init__c                 C   s   dd | j  D }dgt| j |d< dgt| j |d< tt| j| jD ]H\}\}}t|tj	sx|j 
 |d |< t|tj	sN|j 
 |d |< qN|S )a>  Returns the state of the scheduler as a :class:`dict`.
        It contains an entry for every variable in self.__dict__ which
        is not the optimizer.
        The learning rate and momentum lambda functions will only be saved if they are
        callable objects and not if they are functions or lambdas.
        c                 S   s   i | ]\}}|d vr||qS ))r   rH   rJ   r   )r;   keyvaluer   r   r
   
<dictcomp>s   s   z.LambdaScheduler.state_dict.<locals>.<dictcomp>NrH   rJ   )__dict__itemsrG   rH   rJ   r   r%   rE   typesFunctionTypecopy)r   
state_dictidxZlr_fnZmom_fnr   r   r
   rU   l   s    zLambdaScheduler.state_dictc                 C   s|   | d}| d}| j| t|D ]"\}}|dur(| j| j| q(t|D ]"\}}|durT| j| j| qTdS )zLoads the schedulers state.
        Arguments:
            state_dict (dict): scheduler state. Should be an object returned
                from a call to :meth:`state_dict`.
        rH   rJ   N)poprP   updater   rH   rJ   )r   rU   rH   rJ   rV   fnr   r   r
   load_state_dict   s    

zLambdaScheduler.load_state_dictc                 C   s   t | j| j| jS r   )rA   r   base_lrsrH   r    r   r   r
   r!      s    zLambdaScheduler.get_lrc                 C   s   t | j| j| jS r   )rA   r   r   rJ   r    r   r   r
   r"      s    zLambdaScheduler.get_momentum)
r)   r*   r+   r:   r   rU   rZ   r!   r"   r,   r   r   r   r
   rB   8   s   rB   c                   @   s    e Zd ZdZdd Zdd ZdS )r-   a  A callable class used to define an arbitrary schedule defined by a list.
    This object is designed to be passed to the LambdaLR or LambdaScheduler scheduler to apply
    the given schedule. If a base_param is zero, no updates are applied.

    Arguments:
        params {list or numpy.array} -- List or numpy array defining parameter schedule.
        base_param {float} -- Parameter value used to initialize the optimizer.
    c                 C   s8   t |dg| _|| _|dk r4d| _| jd d | _d S )Nr   g-q=r#   g        g      ?r.   r3   r   r   r
   r      s
    r4   c                 C   s   | j | | j S r   r5   r6   r   r   r
   r7      s    r8   Nr9   r   r   r   r
   r-      s   	c                       s"   e Zd ZdZd fdd	Z  ZS )ListSchedulera<  Sets the learning rate and momentum of each parameter group to values defined by lists.
    When last_epoch=-1, sets initial lr and momentum to the optimizer values. One of both of lr
    and momentum schedules may be specified.
    Note that the parameters used to initialize the optimizer are overriden by those defined by
    this scheduler.
    Args:
        optimizer (Optimizer): Wrapped optimizer.
        lrs (list or numpy.ndarray): A list of learning rates, or a list of lists, one for each
            parameter group. One- or two-dimensional numpy arrays may also be passed.
        momentum (list or numpy.ndarray): A list of momentums, or a list of lists, one for each
            parameter group. One- or two-dimensional numpy arrays may also be passed.
        last_epoch (int): The index of last epoch. Default: -1.
    Example:
        >>> # Assuming optimizer has two groups.
        >>> lrs = [
        ...     np.linspace(0.01, 0.1, 100),
        ...     np.logspace(-2, 0, 100)
        ... ]
        >>> momentums = [
        ...     np.linspace(0.85, 0.95, 100),
        ...     np.linspace(0.8, 0.99, 100)
        ... ]
        >>> scheduler = ListScheduler(optimizer, lrs, momentums)
        >>> for epoch in range(100):
        >>>     train(...)
        >>>     validate(...)
        >>>     scheduler.step()
    Nr   c                    s   |j } d u rdd }nRt ttfr0t n  t jdkrV fdd|D }ndd t |D }d u r|dd }nRtttfrtntjdkrfdd|D }nd	d t|D }t	 
||| d S )
Nc                 S   s   | S r   r   rC   r   r   r
   r      r   z(ListScheduler.__init__.<locals>.<lambda>r#   c                    s   g | ]}t  |d  qS r$   r-   r;   g)lrsr   r
   r?      r   z*ListScheduler.__init__.<locals>.<listcomp>c                 S   s   g | ]\}}t ||d  qS r]   r^   r;   lr`   r   r   r
   r?      r   c                 S   s   | S r   r   rC   r   r   r
   r      r   c                    s   g | ]}t  |d  qS r   r^   r_   )	momentumsr   r
   r?      r   c                 S   s   g | ]\}}t ||d  qS rd   r^   rb   r   r   r
   r?      r   )r   rE   r   rF   r/   arrayrG   shaper%   r   r   )r   r   ra   re   r   groupsrK   rL   r   )ra   re   r
   r      s    

zListScheduler.__init__)NNr   r)   r*   r+   r:   r   r,   r   r   r   r
   r\      s   r\   c                       s    e Zd ZdZ fddZ  ZS )RangeFindera9  Scheduler class that implements the LR range search specified in:
        A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch
        size, momentum, and weight decay. Leslie N. Smith, 2018, arXiv:1803.09820.

    Logarithmically spaced learning rates from 1e-7 to 1 are searched. The number of increments in
    that range is determined by 'epochs'.
    Note that the parameters used to initialize the optimizer are overriden by those defined by
    this scheduler.

    Args:
        optimizer (Optimizer): Wrapped optimizer.
        epochs (int): Number of epochs over which to run test.
    Example:
        >>> scheduler = RangeFinder(optimizer, 100)
        >>> for epoch in range(100):
        >>>     train(...)
        >>>     validate(...)
        >>>     scheduler.step()
    c                    s    t dd|}t || d S )Nir   )r/   logspacer   r   )r   r   epochsra   r   r   r
   r      s    zRangeFinder.__init__ri   r   r   r   r
   rj      s   rj   c                       s*   e Zd ZdZddgdf fdd	Z  ZS )OneCyclePolicya  Scheduler class that implements the 1cycle policy search specified in:
        A disciplined approach to neural network hyper-parameters: Part 1 -- learning rate, batch
        size, momentum, and weight decay. Leslie N. Smith, 2018, arXiv:1803.09820.

    Args:
        optimizer (Optimizer): Wrapped optimizer.
        lr (float or list). Maximum learning rate in range. If a list of values is passed, they
            should correspond to parameter groups.
        epochs (int): The number of epochs to use during search.
        momentum_rng (list). Optional upper and lower momentum values (may be both equal). Set to
            None to run without momentum. Default: [0.85, 0.95]. If a list of lists is passed, they
            should correspond to parameter groups.
        phase_ratio (float): Fraction of epochs used for the increasing and decreasing phase of
            the schedule. For example, if phase_ratio=0.45 and epochs=100, the learning rate will
            increase from lr/10 to lr over 45 epochs, then decrease back to lr/10 over 45 epochs,
            then decrease to lr/100 over the remaining 10 epochs. Default: 0.45.
    g333333?gffffff?g?c           
         sZ  t |  t|ttfr0 fdd|D }nFtt|d |t||d t|d |d  d  g}|d urBt|}t|j	dkrt
|jD ]\}}|| d |d< q fdd|D }	nnt
|jD ]\}}|d |d< qtt|d |d	 t|d	 |d t|d |d  d  g}	nd }	t |||	 d S )
Nc                    sR   g | ]J}t t |d  |t ||d  t |d  |d  d  gqS )皙?{Gz?   r/   r0   linspace)r;   rc   rl   Zphase_epochsr   r
   r?     s   z+OneCyclePolicy.__init__.<locals>.<listcomp>rn   ro   rp   r#   r   c                    sZ   g | ]R}t t |d  |d t |d |d  t |d  |d   d  gqS )r#   r   rp   rq   )r;   mrs   r   r
   r?   &  s   r   )intrE   r   rF   r/   r0   rr   rf   rG   rg   r   r   r   r   )
r   r   r$   rl   momentum_rngphase_ratiora   r   r`   re   r   rs   r
   r     s6    

zOneCyclePolicy.__init__ri   r   r   r   r
   rm      s   rm   )torchZtorch.optimr   r   numpyr/   _LRSchedulerr   objectr-   rA   rB   r\   rj   rm   r   r   r   r
   <module>   s   a4