Skip to content

Commit 6545a64

Browse files
committed
Update LR params.
1 parent 7dd17d4 commit 6545a64

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

model.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -295,19 +295,19 @@ def test_step(self, batch, batch_idx):
295295

296296
def configure_optimizers(self):
297297
# Train with a lower LR on the output layer
298-
LR = 1.5e-3
298+
LR = 8.75e-4
299299
train_params = [
300300
{'params' : get_parameters([self.input]), 'lr' : LR, 'gc_dim' : 0 },
301301
{'params' : [self.layer_stacks.l1_fact.weight], 'lr' : LR },
302302
{'params' : [self.layer_stacks.l1.weight], 'lr' : LR },
303303
{'params' : [self.layer_stacks.l1.bias], 'lr' : LR },
304304
{'params' : [self.layer_stacks.l2.weight], 'lr' : LR },
305305
{'params' : [self.layer_stacks.l2.bias], 'lr' : LR },
306-
{'params' : [self.layer_stacks.output.weight], 'lr' : LR / 10 },
307-
{'params' : [self.layer_stacks.output.bias], 'lr' : LR / 10 },
306+
{'params' : [self.layer_stacks.output.weight], 'lr' : LR },
307+
{'params' : [self.layer_stacks.output.bias], 'lr' : LR },
308308
]
309309
# increasing the eps leads to less saturated nets with a few dead neurons
310310
optimizer = ranger.Ranger(train_params, betas=(.9, 0.999), eps=1.0e-7, gc_loc=False, use_gc=False)
311311
# Drop learning rate after 75 epochs
312-
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.987)
312+
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.992)
313313
return [optimizer], [scheduler]

0 commit comments

Comments
 (0)