We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent de5ba9f commit 287aa76Copy full SHA for 287aa76
word_language_model/main.py
@@ -160,7 +160,7 @@ def train():
160
loss.backward()
161
162
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
163
- torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
164
for p in model.parameters():
165
p.data.add_(-lr, p.grad.data)
166
0 commit comments