Skip to content

Commit c2f8f50

Browse files
realdougfacebook-github-bot
authored andcommitted
add narrow() support for sparse tensors re: #8853 (#11342)
Summary: Couple questions: 1) I used the log1p implementation in #8969 as a guide especially for testing. I'm not sure what the ```skipIfROCM``` annotation is for, so unsure if i need it for my test. 2) I implemented the branching logic in the narrow function itself; is this the right place to do so? I noticed that there a number of places where sparse-specific logic is handled with just an if statement in this file. Or should I implement a separate dispatch in native_functions.yml as in the log1p? And of course, happy to make any any other updates/changes that I may have missed as well. This is my first PR to the project. Pull Request resolved: #11342 Differential Revision: D9978430 Pulled By: weiyangfb fbshipit-source-id: e73dc20302ab58925afb19e609e31f4a38c634ad
1 parent 78fe149 commit c2f8f50

File tree

8 files changed

+91
-0
lines changed

8 files changed

+91
-0
lines changed

aten/src/ATen/core/Tensor.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -533,6 +533,7 @@ class CAFFE2_API Tensor {
533533
Tensor mv(const Tensor & vec) const;
534534
Tensor mvlgamma(int64_t p) const;
535535
Tensor & mvlgamma_(int64_t p);
536+
Tensor narrow_copy(int64_t dim, int64_t start, int64_t length) const;
536537
Tensor narrow(int64_t dim, int64_t start, int64_t length) const;
537538
Tensor permute(IntList dims) const;
538539
Tensor pin_memory() const;

aten/src/ATen/core/TensorMethods.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -902,6 +902,9 @@ inline Tensor Tensor::mvlgamma(int64_t p) const {
902902
inline Tensor & Tensor::mvlgamma_(int64_t p) {
903903
return type().mvlgamma_(*this, p);
904904
}
905+
inline Tensor Tensor::narrow_copy(int64_t dim, int64_t start, int64_t length) const {
906+
return type().narrow_copy(*this, dim, start, length);
907+
}
905908
inline Tensor Tensor::narrow(int64_t dim, int64_t start, int64_t length) const {
906909
return type().narrow(*this, dim, start, length);
907910
}

aten/src/ATen/core/Type.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -492,6 +492,7 @@ struct CAFFE2_API Type {
492492
virtual Tensor mv(const Tensor & self, const Tensor & vec) const = 0;
493493
virtual Tensor mvlgamma(const Tensor & self, int64_t p) const = 0;
494494
virtual Tensor & mvlgamma_(Tensor & self, int64_t p) const = 0;
495+
virtual Tensor narrow_copy(const Tensor & self, int64_t dim, int64_t start, int64_t length) const = 0;
495496
virtual Tensor narrow(const Tensor & self, int64_t dim, int64_t start, int64_t length) const = 0;
496497
virtual Tensor permute(const Tensor & self, IntList dims) const = 0;
497498
virtual Tensor pin_memory(const Tensor & self) const = 0;

aten/src/ATen/native/TensorShape.cpp

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,45 @@ Tensor &as_strided_(Tensor& self, IntList size, IntList stride) {
148148
return at::as_strided_(self, size, stride, self.storage_offset());
149149
}
150150

151+
Tensor narrow_copy_sparse(const Tensor& self, int64_t dim, int64_t start, int64_t length){
152+
int64_t allDim = self.dim();
153+
int64_t end = start+length;
154+
AT_CHECK(allDim > 0, "narrow() cannot be applied to a 0-dim tensor.");
155+
AT_CHECK(dim >= 0 && dim < allDim,
156+
"Dimension ", dim, " out of range. Expecting 0 <= dim < ", allDim, ".");
157+
AT_CHECK(start >= 0 && length >= 0 && end <= self.size(dim),
158+
"Invalid range to narrow. range(start, start+length) must be a subset of range(0, ", self.size(dim), ").")
159+
LongTensor indices = self._indices();
160+
int64_t sparseDims = self._sparseDims();
161+
162+
std::vector<int64_t> newSizes = self.sizes().vec();
163+
newSizes[dim]=length;
164+
165+
Tensor newValues;
166+
LongTensor newIndices;
167+
if(dim < sparseDims){
168+
Tensor mask = (indices[dim] >= start).__and__((indices[dim] < end));
169+
newIndices = indices.masked_select(mask).view({sparseDims, -1});
170+
newIndices[dim].add_(-start);
171+
Tensor nzIndices = mask.nonzero().view(-1);
172+
newValues = self._values().index_select(0, nzIndices);
173+
}else{
174+
/* This means we are narrowing on a dense dim, which is in effect just a
175+
regular narrow on _values() */
176+
newIndices = indices;
177+
int64_t ddim = dim - sparseDims + 1;
178+
newValues = self._values().narrow_copy(ddim, start, length);
179+
}
180+
181+
SparseTensor newTensor = at::sparse_coo_tensor(newIndices, newValues, newSizes, self.type().options());
182+
_get_sparse_impl(newTensor)->set_coalesced(self.is_coalesced());
183+
return newTensor;
184+
}
185+
186+
Tensor narrow_copy_dense(const Tensor& self, int64_t dim, int64_t start, int64_t length){
187+
return self.narrow(dim, start, length).clone();
188+
}
189+
151190
Tensor narrow(const Tensor& self, int64_t dim, int64_t start, int64_t length) {
152191
AT_CHECK(self.dim() > 0, "narrow() cannot be applied to a 0-dim tensor.");
153192
auto cur_size = self.size(dim);

aten/src/ATen/native/native_functions.yaml

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1174,6 +1174,14 @@
11741174
- func: mvlgamma_(Tensor self, int64_t p) -> Tensor
11751175
variants: method
11761176

1177+
- func: narrow_copy(Tensor self, int64_t dim, int64_t start, int64_t length) -> Tensor
1178+
variants: method
1179+
dispatch:
1180+
CPU: narrow_copy_dense
1181+
CUDA: narrow_copy_dense
1182+
SparseCPU: narrow_copy_sparse
1183+
SparseCUDA: narrow_copy_sparse
1184+
11771185
- func: narrow(Tensor self, int64_t dim, int64_t start, int64_t length) -> Tensor
11781186
variants: function, method
11791187

docs/source/sparse.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,7 @@ An empty sparse tensor can be constructed by specifying its size:
110110
.. method:: mm
111111
.. method:: mul
112112
.. method:: mul_
113+
.. method:: narrow_copy
113114
.. method:: resizeAs_
114115
.. method:: size
115116
.. method:: spadd

test/test_sparse.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1023,6 +1023,33 @@ def test_shape(i_shapes, v_shapes, nnzs):
10231023
test_shape([0, 3, 4], [3, 4, 5, 6], [0])
10241024
test_shape([2, 3, 4], [0, 4, 5, 6], [9, 12])
10251025

1026+
def _test_narrow(self, input, narrow_args):
1027+
expected = input.to_dense().narrow(*narrow_args)
1028+
self.assertEqual(expected, input.narrow_copy(*narrow_args).to_dense())
1029+
1030+
def _all_narrow_combs(self, shape):
1031+
for dim, dim_sz in enumerate(shape):
1032+
for start in range(dim_sz):
1033+
for length in range(dim_sz - start):
1034+
yield [dim, start, length]
1035+
1036+
def test_narrow(self):
1037+
shape = [3, 3, 4, 2]
1038+
input, _, _ = self._gen_sparse(4, 19, shape)
1039+
for narrow_args in self._all_narrow_combs(shape):
1040+
self._test_narrow(input, narrow_args)
1041+
1042+
self.assertRaises(RuntimeError, lambda: input.narrow_copy(-1, 0, 3)) # dim < 0
1043+
self.assertRaises(RuntimeError, lambda: input.narrow_copy(10, 0, 3)) # dim > input.dim()
1044+
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, shape[0] + 1, 3)) # start > size of dim
1045+
self.assertRaises(RuntimeError, lambda: input.narrow_copy(0, 2, shape[0])) # start+length > size of dim
1046+
1047+
with_dense, _, _ = self._gen_sparse(2, 7, shape)
1048+
for narrow_args in self._all_narrow_combs(shape):
1049+
self._test_narrow(with_dense, narrow_args)
1050+
1051+
self.assertRaises(RuntimeError, lambda: with_dense.narrow_copy(10, 0, 3)) # dim > sparseDim + denseDim
1052+
10261053
def _test_log1p_tensor(self, input, dense_tensor):
10271054
expected_output = torch.tensor(dense_tensor).log1p_()
10281055
self.assertEqual(expected_output, input.log1p().to_dense())

torch/_tensor_docs.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1453,6 +1453,17 @@ def callable(a, b) -> number
14531453
[ 8, 9]])
14541454
""")
14551455

1456+
add_docstr_all('narrow_copy',
1457+
r"""
1458+
narrow_copy(dimension, start, length) -> Tensor
1459+
1460+
Same as :meth:`Tensor.narrow` except returning a copy rather
1461+
than shared storage. This is primarily for sparse tensors, which
1462+
do not have a shared-storage narrow method. Calling ```narrow_copy``
1463+
with ```dimemsion > self._sparseDims()``` will return a copy with the
1464+
relevant dense dimension narrowed, and ```self.shape``` updated accordingly.
1465+
""")
1466+
14561467
add_docstr_all('ndimension',
14571468
r"""
14581469
ndimension() -> int

0 commit comments

Comments
 (0)