Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,10 +70,10 @@ def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
gates = F.linear(input, w_ih, b_ih) + F.linear(hx, w_hh, b_hh)

ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
outgate = torch.sigmoid(outgate)

cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
Expand Down Expand Up @@ -4476,7 +4476,7 @@ def reparameterize(self, mu, logvar):

def decode(self, z):
h3 = F.relu(self.fc3(z))
return F.sigmoid(self.fc4(h3))
return torch.sigmoid(self.fc4(h3))

def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
Expand Down
8 changes: 4 additions & 4 deletions torch/distributions/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from torch.distributions import constraints
from torch.distributions.utils import (_sum_rightmost, broadcast_all,
lazy_property)
from torch.nn.functional import pad, sigmoid
from torch.nn.functional import pad

__all__ = [
'AbsTransform',
Expand Down Expand Up @@ -341,7 +341,7 @@ def __eq__(self, other):
return isinstance(other, SigmoidTransform)

def _call(self, x):
return sigmoid(x)
return torch.sigmoid(x)

def _inverse(self, y):
return y.log() - (-y).log1p()
Expand Down Expand Up @@ -483,7 +483,7 @@ def __eq__(self, other):

def _call(self, x):
offset = (x.shape[-1] + 1) - x.new([1]).expand(x.shape).cumsum(-1)
z = sigmoid(x - offset.log())
z = torch.sigmoid(x - offset.log())
z_cumprod = (1 - z).cumprod(-1)
y = pad(z, (0, 1), value=1) * pad(z_cumprod, (1, 0), value=1)
return y
Expand All @@ -497,7 +497,7 @@ def _inverse(self, y):

def log_abs_det_jacobian(self, x, y):
offset = (x.shape[-1] + 1) - x.new([1]).expand(x.shape).cumsum(-1)
z = sigmoid(x - offset.log())
z = torch.sigmoid(x - offset.log())
detJ = ((1 - z).log() + y[..., :-1].log()).sum(-1)
return detJ

Expand Down
2 changes: 1 addition & 1 deletion torch/distributions/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def logits_to_probs(logits, is_binary=False):
the log probabilities (possibly unnormalized) of the events.
"""
if is_binary:
return F.sigmoid(logits)
return torch.sigmoid(logits)
return F.softmax(logits, dim=-1)


Expand Down
19 changes: 10 additions & 9 deletions torch/nn/_functions/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from torch.autograd import NestedIOFunction
import torch.backends.cudnn as cudnn
from .. import functional as F
import torch
from .thnn import rnnFusedPointwise as fusedBackend
import itertools
from functools import partial
Expand All @@ -18,7 +19,7 @@ def RNNReLUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):


def RNNTanhCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
hy = F.tanh(F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh))
hy = torch.tanh(F.linear(input, w_ih, b_ih) + F.linear(hidden, w_hh, b_hh))
return hy


Expand All @@ -34,13 +35,13 @@ def LSTMCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):

ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)

ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)

cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
hy = outgate * torch.tanh(cy)

return hy, cy

Expand All @@ -58,9 +59,9 @@ def GRUCell(input, hidden, w_ih, w_hh, b_ih=None, b_hh=None):
i_r, i_i, i_n = gi.chunk(3, 1)
h_r, h_i, h_n = gh.chunk(3, 1)

resetgate = F.sigmoid(i_r + h_r)
inputgate = F.sigmoid(i_i + h_i)
newgate = F.tanh(i_n + resetgate * h_n)
resetgate = torch.sigmoid(i_r + h_r)
inputgate = torch.sigmoid(i_i + h_i)
newgate = torch.tanh(i_n + resetgate * h_n)
hy = newgate + inputgate * (hidden - newgate)

return hy
Expand Down
4 changes: 2 additions & 2 deletions torch/nn/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -1009,6 +1009,7 @@ def tanh(input):

See :class:`~torch.nn.Tanh` for more details.
"""
warnings.warn("nn.functional.tanh is deprecated. Use torch.tanh instead.")
return input.tanh()


Expand All @@ -1019,11 +1020,10 @@ def sigmoid(input):

See :class:`~torch.nn.Sigmoid` for more details.
"""
warnings.warn("nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.")
return input.sigmoid()


# etc.

def linear(input, weight, bias=None):
r"""
Applies a linear transformation to the incoming data: :math:`y = xA^T + b`.
Expand Down