Skip to content
This repository has been archived by the owner on Apr 17, 2023. It is now read-only.

Commit

Permalink
ReduceLROnDelayScheduler
Browse files Browse the repository at this point in the history
  • Loading branch information
daniil-lyakhov committed Feb 10, 2022
1 parent b84ca21 commit 0ab5334
Showing 1 changed file with 46 additions and 3 deletions.
49 changes: 46 additions & 3 deletions torchreid/optim/lr_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
from torch import optim
from torch.optim.lr_scheduler import _LRScheduler

AVAI_SCH = {'single_step', 'multi_step', 'cosine', 'warmup', 'cosine_cycle', 'reduce_on_plateau', 'onecycle'}
AVAI_SCH = {'single_step', 'multi_step', 'cosine', 'warmup', 'cosine_cycle',
'reduce_on_plateau_delayed', 'reduce_on_plateau', 'onecycle'}

def build_lr_scheduler(optimizer, lr_scheduler, base_scheduler, **kwargs):
if lr_scheduler == 'warmup':
Expand All @@ -37,7 +38,8 @@ def _build_scheduler(optimizer,
max_lr=0.1,
patience=5,
lr_decay_factor=100,
pct_start=0.3):
pct_start=0.3,
epoch_delay=0):

init_learning_rate = [param_group['lr'] for param_group in optimizer.param_groups]
if lr_scheduler not in AVAI_SCH:
Expand Down Expand Up @@ -96,7 +98,21 @@ def _build_scheduler(optimizer,
lb_lr = [lr / lr_decay_factor for lr in init_learning_rate]
epoch_treshold = max(int(max_epoch * 0.75) - warmup, 1) # 75% of the training - warmup epochs
scheduler = ReduceLROnPlateauV2(optimizer, epoch_treshold, factor=gamma, patience=patience,
threshold=2e-4, verbose=True, min_lr=lb_lr )
threshold=2e-4, verbose=True, min_lr=min_lr)
elif lr_scheduler == 'reduce_on_plateau_delayed':
if epoch_delay < 0:
raise ValueError(f'epoch_delay = {epoch_delay} should be greater than zero')

if max_epoch < epoch_delay:
raise ValueError(f'max_epoch param = {max_epoch} should be greater than'
f' epoch_delay param = {epoch_delay}')

if epoch_delay < warmup:
raise ValueError(f'warmap param = {warmup} should be less than'
f' epoch_delay param = {epoch_delay}')
epoch_treshold = max(int(max_epoch * 0.75) - epoch_delay, 1) # 75% of the training - skipped epochs
scheduler = ReduceLROnPlateauV2Delayed(optimizer, epoch_treshold, epoch_delay, factor=gamma,
patience=patience, threshold=2e-4, verbose=True, min_lr=min_lr)
else:
raise ValueError('Unknown scheduler: {}'.format(lr_scheduler))

Expand Down Expand Up @@ -275,3 +291,30 @@ class OneCycleLR(optim.lr_scheduler.OneCycleLR):
@property
def warmup_finished(self):
return self.last_epoch >= self._schedule_phases[0]['end_step']


class ReduceLROnPlateauV2Delayed(ReduceLROnPlateauV2):
"""
ReduceOnPlateuV2 scheduler which starts working only
after certain amount of epochs specified by epoch delay param.
Useful when compression algorithms is applying to prevent
lr drop before full model compression. Warmup included into epoch_delay.
"""
def __init__(self,
optimizer: optim.Optimizer,
epoch_treshold: int,
epoch_delay: int,
**kwargs) -> None:

super().__init__(optimizer, epoch_treshold, **kwargs)
self._epoch_delay = epoch_delay

def step(self, metrics, epoch=None):
# If there was less than self._epoch_delay epochs
# just update epochs counter
if self.last_epoch <= self._epoch_delay:
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch
else:
super().step(metrics, epoch=epoch)

0 comments on commit 0ab5334

Please sign in to comment.