Skip to content

Commit 61ca0ba

Browse files
weiyangfbfacebook-github-bot
authored andcommitted
Add log1p for sparse tensor (#8969)
Summary: - fixes log1p at #8853 - added log1p of sparse tensor in ATen - make log1p of sparse tensor non-differentiable and raise error, because local derivate of log1p for zero element is 1 / (0 + 1) = 1 and make tensor dense Closes #8969 Reviewed By: ezyang Differential Revision: D8677491 fbshipit-source-id: 8363a613519de4bc75eda087ccd20a3eb2d18126
1 parent 8d38460 commit 61ca0ba

File tree

5 files changed

+83
-3
lines changed

5 files changed

+83
-3
lines changed

aten/src/ATen/native/native_functions.yaml

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -754,12 +754,16 @@
754754
dispatch:
755755
CPU: _log1p__cpu
756756
CUDA: _log1p__cuda
757+
SparseCPU: log1p_sparse_
758+
SparseCUDA: log1p_sparse_
757759

758760
- func: log1p_out(Tensor result, Tensor self) -> Tensor
759761
variants: function
760762
dispatch:
761763
CPU: _log1p_out_cpu
762764
CUDA: _log1p_out_cuda
765+
SparseCPU: log1p_out_sparse
766+
SparseCUDA: log1p_out_sparse
763767

764768
- func: log2(Tensor self) -> Tensor
765769

@@ -1421,8 +1425,6 @@
14211425

14221426
- func: zero_(Tensor self) -> Tensor
14231427

1424-
1425-
14261428
- func: s_native_add_out(Tensor result, Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
14271429
variants: function
14281430
dispatch:

aten/src/ATen/native/sparse/SparseTensorMath.cpp

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,33 @@ SparseTensor& mul_sparse_scalar_(SparseTensor& t, Scalar v) {
9494
return mul_out_sparse_scalar(t, t, v);
9595
}
9696

97+
// --------------------------------------------------------------------
98+
// log1p(SparseTensor)
99+
// --------------------------------------------------------------------
100+
101+
// TODO: add in-place variant
102+
103+
SparseTensor& log1p_out_sparse(SparseTensor& r, const SparseTensor& t) {
104+
AT_ASSERT(r.is_sparse());
105+
AT_ASSERT(t.is_sparse());
106+
107+
if (isSameTensor(r, t)) {
108+
// don't have in-place log1p for uncoalesced input because coalesce() is not in-place
109+
AT_CHECK(
110+
r.is_coalesced(), "in-place log1p on uncoalesced tensors is not supported yet!");
111+
}
112+
else {
113+
r = raw_copy_sparse_(r, t.coalesce());
114+
}
115+
r._values().log1p_();
116+
return r;
117+
}
118+
119+
SparseTensor& log1p_sparse_(SparseTensor& t) {
120+
AT_CHECK(t.is_coalesced(), "in-place log1p on uncoalesced tensors is not supported yet!");
121+
return log1p_out_sparse(t, t);
122+
}
123+
97124
// --------------------------------------------------------------------
98125
// pow(SparseTensor, Scalar)
99126
// --------------------------------------------------------------------

test/test_sparse.py

Lines changed: 40 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -659,6 +659,45 @@ def _test_zeros(self, shape, out_shape_i, out_shape_v=None):
659659
self.assertEqual(out._sparseDims(), len(shape))
660660
self.assertEqual(out._denseDims(), 0)
661661

662+
def test_log1p(self):
663+
if self.is_cuda:
664+
input = torch.cuda.sparse.DoubleTensor(
665+
torch.LongTensor([[0], [1], [2]]).transpose(1, 0).cuda(),
666+
torch.FloatTensor([3, 4, 5]).cuda(),
667+
torch.Size([3]))
668+
else:
669+
input = torch.sparse.DoubleTensor(
670+
torch.LongTensor([[0], [1], [2]]).transpose(1, 0),
671+
torch.FloatTensor([3, 4, 5]),
672+
torch.Size([3]))
673+
674+
expected_output = torch.tensor([3., 4., 5.]).log1p_()
675+
self.assertEqual(expected_output, input.log1p().to_dense())
676+
self.assertEqual(expected_output, input.coalesce().log1p_().to_dense())
677+
678+
# test in-place op on uncoalesced input
679+
with self.assertRaisesRegex(RuntimeError,
680+
"in-place log1p on uncoalesced tensors is not supported yet!"):
681+
input.log1p_()
682+
683+
input.requires_grad_()
684+
self.assertTrue(input.requires_grad)
685+
686+
# test autograd
687+
x = input.clone()
688+
y = input.log1p()
689+
with self.assertRaisesRegex(RuntimeError,
690+
"log1p of a sparse tensor is made to be non-differentiable since.*"):
691+
y.backward(x)
692+
693+
# test uncoalesced input
694+
input_uncoalesced = torch.sparse.DoubleTensor(
695+
torch.LongTensor([[0], [1], [2], [0], [1], [2]]).transpose(1, 0),
696+
torch.FloatTensor([2, 3, 4, 1, 1, 1]),
697+
torch.Size([3]))
698+
self.assertEqual(expected_output, input_uncoalesced.log1p().to_dense())
699+
self.assertEqual(expected_output, input_uncoalesced.coalesce().log1p_().to_dense())
700+
662701
def test_zeros(self):
663702
i_shapes = [2, 3, 4]
664703
v_shapes = [3, 4, 5, 6]
@@ -731,6 +770,7 @@ def test_sparse_variable_methods(self):
731770
'_sparseDims': lambda x: x._sparseDims(),
732771
'_denseDims': lambda x: x._denseDims(),
733772
'norm': lambda x: x.norm(),
773+
'log1p': lambda x: x.log1p(),
734774
}
735775

736776
for test_name, test_fn in to_test_one_arg.items():

tools/autograd/derivatives.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -371,7 +371,7 @@
371371
self: grad / (self * 2.3025850929940456)
372372

373373
- name: log1p(Tensor self)
374-
self: grad / (self + 1)
374+
self: log1p_backward(grad, self)
375375

376376
- name: log2(Tensor self)
377377
self: grad / (self * 0.6931471805599453)

tools/autograd/templates/Functions.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1861,6 +1861,17 @@ std::tuple<Tensor, Tensor, Tensor> _trilinear_backward(const Tensor& grad_out, c
18611861
return std::tuple<Tensor, Tensor, Tensor>(grad_i1, grad_i2, grad_i3);
18621862
}
18631863

1864+
Tensor log1p_backward(const Tensor& grad, const Tensor& self) {
1865+
if (self.is_sparse()) {
1866+
AT_ERROR(
1867+
"log1p of a sparse tensor is made to be non-differentiable since ",
1868+
"local gradient of zero is 1 / (0 + 1) = 1 and it makes the tensor dense. ",
1869+
"Use a different mathematical operation which preserves sparsity of gradients, ",
1870+
"or report a bug if you think this is an error.");
1871+
}
1872+
return grad / (self + 1);
1873+
}
1874+
18641875
} // anonymous namespace
18651876

18661877
${autograd_function_definitions}

0 commit comments

Comments
 (0)