Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -754,12 +754,16 @@
dispatch:
CPU: _log1p__cpu
CUDA: _log1p__cuda
SparseCPU: log1p_sparse_
SparseCUDA: log1p_sparse_

- func: log1p_out(Tensor result, Tensor self) -> Tensor
variants: function
dispatch:
CPU: _log1p_out_cpu
CUDA: _log1p_out_cuda
SparseCPU: log1p_out_sparse
SparseCUDA: log1p_out_sparse

- func: log2(Tensor self) -> Tensor

Expand Down Expand Up @@ -1421,8 +1425,6 @@

- func: zero_(Tensor self) -> Tensor



- func: s_native_add_out(Tensor result, Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
variants: function
dispatch:
Expand Down
27 changes: 27 additions & 0 deletions aten/src/ATen/native/sparse/SparseTensorMath.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,33 @@ SparseTensor& mul_sparse_scalar_(SparseTensor& t, Scalar v) {
return mul_out_sparse_scalar(t, t, v);
}

// --------------------------------------------------------------------
// log1p(SparseTensor)
// --------------------------------------------------------------------

// TODO: add in-place variant

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.


SparseTensor& log1p_out_sparse(SparseTensor& r, const SparseTensor& t) {
AT_ASSERT(r.is_sparse());
AT_ASSERT(t.is_sparse());

if (isSameTensor(r, t)) {
// don't have in-place log1p for uncoalesced input because coalesce() is not in-place
AT_CHECK(
r.is_coalesced(), "in-place log1p on uncoalesced tensors is not supported yet!");
}
else {
r = raw_copy_sparse_(r, t.coalesce());
}
r._values().log1p_();
return r;
}

SparseTensor& log1p_sparse_(SparseTensor& t) {
AT_CHECK(t.is_coalesced(), "in-place log1p on uncoalesced tensors is not supported yet!");
return log1p_out_sparse(t, t);
}

// --------------------------------------------------------------------
// pow(SparseTensor, Scalar)
// --------------------------------------------------------------------
Expand Down
40 changes: 40 additions & 0 deletions test/test_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -659,6 +659,45 @@ def _test_zeros(self, shape, out_shape_i, out_shape_v=None):
self.assertEqual(out._sparseDims(), len(shape))
self.assertEqual(out._denseDims(), 0)

def test_log1p(self):
if self.is_cuda:
input = torch.cuda.sparse.DoubleTensor(
torch.LongTensor([[0], [1], [2]]).transpose(1, 0).cuda(),
torch.FloatTensor([3, 4, 5]).cuda(),
torch.Size([3]))
else:
input = torch.sparse.DoubleTensor(
torch.LongTensor([[0], [1], [2]]).transpose(1, 0),
torch.FloatTensor([3, 4, 5]),
torch.Size([3]))

expected_output = torch.tensor([3., 4., 5.]).log1p_()
self.assertEqual(expected_output, input.log1p().to_dense())
self.assertEqual(expected_output, input.coalesce().log1p_().to_dense())

# test in-place op on uncoalesced input
with self.assertRaisesRegex(RuntimeError,
"in-place log1p on uncoalesced tensors is not supported yet!"):
input.log1p_()

input.requires_grad_()
self.assertTrue(input.requires_grad)

# test autograd
x = input.clone()
y = input.log1p()
with self.assertRaisesRegex(RuntimeError,
"log1p of a sparse tensor is made to be non-differentiable since.*"):
y.backward(x)

# test uncoalesced input
input_uncoalesced = torch.sparse.DoubleTensor(
torch.LongTensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
torch.FloatTensor([2, 3, 4, 1, 1, 1]),
torch.Size([3]))
self.assertEqual(expected_output, input_uncoalesced.log1p().to_dense())
self.assertEqual(expected_output, input_uncoalesced.coalesce().log1p_().to_dense())

def test_zeros(self):
i_shapes = [2, 3, 4]
v_shapes = [3, 4, 5, 6]
Expand Down Expand Up @@ -731,6 +770,7 @@ def test_sparse_variable_methods(self):
'_sparseDims': lambda x: x._sparseDims(),
'_denseDims': lambda x: x._denseDims(),
'norm': lambda x: x.norm(),
'log1p': lambda x: x.log1p(),
}

for test_name, test_fn in to_test_one_arg.items():
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/derivatives.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,7 @@
self: grad / (self * 2.3025850929940456)

- name: log1p(Tensor self)
self: grad / (self + 1)
self: log1p_backward(grad, self)

- name: log2(Tensor self)
self: grad / (self * 0.6931471805599453)
Expand Down
11 changes: 11 additions & 0 deletions tools/autograd/templates/Functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1861,6 +1861,17 @@ std::tuple<Tensor, Tensor, Tensor> _trilinear_backward(const Tensor& grad_out, c
return std::tuple<Tensor, Tensor, Tensor>(grad_i1, grad_i2, grad_i3);
}

Tensor log1p_backward(const Tensor& grad, const Tensor& self) {
if (self.is_sparse()) {
AT_ERROR(
"log1p of a sparse tensor is made to be non-differentiable since ",
"local gradient of zero is 1 / (0 + 1) = 1 and it makes the tensor dense. ",
"Use a different mathematical operation which preserves sparsity of gradients, ",
"or report a bug if you think this is an error.");
}
return grad / (self + 1);
}

} // anonymous namespace

${autograd_function_definitions}
Expand Down