Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions aten/src/ATen/native/ReduceOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,23 @@ Tensor _prod(const Tensor &self, int64_t dim_, bool keepdim) {
return at::_prod_out(result, self, dim, keepdim);
}

Tensor& logsumexp_out(Tensor& result, const Tensor &self, int64_t dim_, bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim());
auto maxes = at::max_values(self, dim, true);
result = at::where((maxes == INFINITY).__or__(maxes == -INFINITY),
maxes,
maxes + at::log(at::sum(at::exp(self - maxes), dim, true)));
if (! keepdim)
result.squeeze_(dim);
return result;
}

Tensor logsumexp(const Tensor &self, int64_t dim_, bool keepdim) {
int64_t dim = maybe_wrap_dim(dim_, self.dim());
Tensor result = self.type().tensor();
return at::native::logsumexp_out(result, self, dim, keepdim);
}

// \DIM REDUCE ################################################################

// MULTI DIM REDUCE ###########################################################
Expand Down
5 changes: 5 additions & 0 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -615,6 +615,11 @@
CPU: log_softmax_backward_cpu
CUDA: log_softmax_backward_cuda

- func: logsumexp(Tensor self, int64_t dim, bool keepdim=False) -> Tensor

- func: logsumexp_out(Tensor result, Tensor self, int64_t dim, bool keepdim=False) -> Tensor
variants: function

- func: margin_ranking_loss(Tensor input1, Tensor input2, Tensor target, double margin=0.0, bool size_average=true, bool reduce=true) -> Tensor
variants: function

Expand Down
1 change: 1 addition & 0 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -2738,6 +2738,7 @@ class dont_convert(tuple):
('addcdiv', (), (0.5, (S, S, 1), (1, S)), 'scalar_scale_broadcast_lhs'),
('zero_', (S, S, S), NO_ARGS),
('zero_', (), NO_ARGS, 'scalar'),
('logsumexp', (S, S), (1,)),
('norm', (S, S), (2,)),
('norm', (S, S), (0,), '0'),
('norm', (S, S), (0.5,), '0_5'),
Expand Down
11 changes: 11 additions & 0 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -766,6 +766,17 @@ def test_multidim(x, dim):
def test_dim_reduction(self):
self._test_dim_reduction(self, lambda t: t)

@unittest.skipIf(not TEST_SCIPY, "Scipy not found")
def test_logsumexp(self):
from scipy.special import logsumexp
a = torch.randn(5, 4)
a[0, 0] = float('inf')
a[1, :] = float('-inf')
actual = a.logsumexp(1)
expected = logsumexp(a.numpy(), 1)
self.assertEqual(expected.shape, actual.shape)
self.assertTrue(np.allclose(expected, actual.numpy()))

@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_cpu_parallel(self):
# To use parallel branches we'll need to compare on tensors
Expand Down
3 changes: 3 additions & 0 deletions tools/autograd/derivatives.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -369,6 +369,9 @@
- name: log_normal_(Tensor self, double mean, double std, Generator generator)
self: zeros_like(grad)

- name: logsumexp(Tensor self, int64_t dim, bool keepdim)
self: logsumexp_backward(grad, self, result, dim, keepdim)

- name: lt_(Tensor self, Scalar other)
self: zeros_like(self)

Expand Down
8 changes: 8 additions & 0 deletions tools/autograd/templates/Functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -357,6 +357,14 @@ Tensor cumsum_backward(const Tensor & x, int64_t dim) {
return ret;
}

Tensor logsumexp_backward(Tensor grad, const Tensor & self, Tensor result, int64_t dim, bool keepdim) {
if (! keepdim) {
grad = grad.unsqueeze(dim);
result = result.unsqueeze(dim);
}
return grad * (self - result).exp();
}

Tensor unsqueeze_to(const Tensor & self, IntList sizes) {
auto result = self;

Expand Down
7 changes: 7 additions & 0 deletions torch/_tensor_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1143,6 +1143,13 @@ def add_docstr_all(method, docstr):
f(x) = \\dfrac{1}{x \\sigma \\sqrt{2\\pi}}\ e^{-\\dfrac{(\\ln x - \\mu)^2}{2\\sigma^2}}
""")

add_docstr_all('logsumexp',
r"""
logsumexp(dim, keepdim=False) -> Tensor

See :func:`torch.logsumexp`
""")

add_docstr_all('lt',
r"""
lt(other) -> Tensor
Expand Down
30 changes: 30 additions & 0 deletions torch/_torch_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -2154,6 +2154,36 @@ def parse_kwargs(desc):
tensor([ 1.2589, 2.1135, 3.5481, 5.9566, 10.0000])
""".format(**factory_common_args))

add_docstr(torch.logsumexp,
r"""
logsumexp(input, dim, keepdim=False, out=None)

Returns the log of summed exponentials of each row of the :attr:`input`
tensor in the given dimension :attr:`dim`. The computation is numerically
stabilized.

For summation index :math:`j` given by `dim` and other indices :math:`i`, the result is

:math:`\text{logsumexp}(x)_{i} = \log \sum_j \exp(x_ij).`

If :attr:`keepdim` is ``True``, the output tensor is of the same size
as :attr:`input` except in the dimension :attr:`dim` where it is of size 1.
Otherwise, :attr:`dim` is squeezed (see :func:`torch.squeeze`), resulting in
the output tensor having 1 fewer dimension than :attr:`input`.

Args:
input (Tensor): the input tensor
dim (int or tuple of ints): the dimension or dimensions to reduce
keepdim (bool): whether the output tensor has :attr:`dim` retained or not
out (Tensor, optional): the output tensor


Example::
>>> a = torch.randn(3, 3)
>>> torch.logsumexp(a, 1)
tensor([ 0.8442, 1.4322, 0.8711])
""")

add_docstr(torch.lt,
r"""
lt(input, other, out=None) -> Tensor
Expand Down