Skip to content

Commit 56e7a2c

Browse files
authored
Better support for adding zero-filled sparse tensors (#7479)
Right now, if we add a zero-filled sparse tensor with another sparse tensor, both tensors must have the same "density" (dimI, dimV) and size (tensor.size()) for them to be added successfully. This relaxes that constraint so that if both tensors have the same tensor.size() and at least one is zero-filled, they can be added successfully. Before: ``` i = torch.LongTensor([[0, 1, 1], [2, 0, 2]]) v = torch.FloatTensor([3, 4, 5]).unsqueeze(1) sparse_mat = torch.sparse.FloatTensor(i, v, torch.Size([2,3,1])) zeros = torch.zeros(sparse_mat.size(), layout=torch.sparse_coo) sparse_mat + zeros RuntimeError: cadd operands have incompatible sizes or dimension types at ../src/THS/generic/THSTensorMath.c:126 ``` After: no error.
1 parent f12b877 commit 56e7a2c

File tree

3 files changed

+60
-4
lines changed

3 files changed

+60
-4
lines changed

aten/src/THCS/generic/THCSTensorMath.cu

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -367,10 +367,29 @@ void THCSTensor_(div)(THCState *state, THCSTensor *r_, THCSTensor *t, real value
367367
}
368368
}
369369

370+
int THCSTensor_(isSameSizeIgnoringDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) {
371+
int d;
372+
if (self->nDimensionI + self->nDimensionV != src->nDimensionI + src->nDimensionV) {
373+
return 0;
374+
}
375+
for(d = 0; d < self->nDimensionI + self->nDimensionV; ++d) {
376+
if(self->size[d] != src->size[d]) {
377+
return 0;
378+
}
379+
}
380+
return 1;
381+
}
382+
383+
int THCSTensor_(isSameDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) {
384+
return self->nDimensionI == src->nDimensionI &&
385+
self->nDimensionV == src->nDimensionV;
386+
}
387+
388+
370389
void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) {
371390
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t, src));
372-
if(!THCSTensor_(isSameSizeAs)(state, t, src)) {
373-
THError("cadd operands have incompatible sizes or dimension types");
391+
if (!THCSTensor_(isSameSizeIgnoringDensity)(state, t, src)) {
392+
THError("cadd operands have incompatible sizes");
374393
}
375394

376395
if (src->nnz == 0) {
@@ -382,6 +401,10 @@ void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real valu
382401
return;
383402
}
384403

404+
if(!THCSTensor_(isSameDensity)(state, t, src)) {
405+
THError("cadd operands have incompatible densities");
406+
}
407+
385408
// We deliberately choose to simply concat the indices and values tensors
386409
// rather than merging them. This removes the need to synchronously fetch nnz
387410
// at the end of the operation, at the cost of having a non-coalesced result.

aten/src/THS/generic/THSTensorMath.c

Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -121,9 +121,27 @@ void THSTensor_(div)(THSTensor *r_, THSTensor *t, real value) {
121121
}
122122
}
123123

124+
int THSTensor_(isSameSizeIgnoringDensity)(const THSTensor *self, const THSTensor* src) {
125+
int d;
126+
if (self->nDimensionI + self->nDimensionV != src->nDimensionI + src->nDimensionV) {
127+
return 0;
128+
}
129+
for(d = 0; d < self->nDimensionI + self->nDimensionV; ++d) {
130+
if(self->size[d] != src->size[d]) {
131+
return 0;
132+
}
133+
}
134+
return 1;
135+
}
136+
137+
int THSTensor_(isSameDensity)(const THSTensor *self, const THSTensor* src) {
138+
return self->nDimensionI == src->nDimensionI &&
139+
self->nDimensionV == src->nDimensionV;
140+
}
141+
124142
void THSTensor_(cadd)(THSTensor *r_, THSTensor *t, real value, THSTensor *src) {
125-
if(!THSTensor_(isSameSizeAs)(t, src)) {
126-
THError("cadd operands have incompatible sizes or dimension types");
143+
if (!THSTensor_(isSameSizeIgnoringDensity)(t, src)) {
144+
THError("cadd operands have incompatible sizes");
127145
}
128146

129147
if (src->nnz == 0) {
@@ -135,6 +153,10 @@ void THSTensor_(cadd)(THSTensor *r_, THSTensor *t, real value, THSTensor *src) {
135153
return;
136154
}
137155

156+
if(!THSTensor_(isSameDensity)(t, src)) {
157+
THError("cadd operands have incompatible densities");
158+
}
159+
138160
// saving those because they can be overwritten when doing in-place operations
139161
ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz, max_nnz = t_nnz + s_nnz;
140162
int t_coalesced = t->coalesced, s_coalesced = src->coalesced;

test/test_sparse.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -385,6 +385,17 @@ def test_transpose_coalesce_invariant(self):
385385
self.assertTrue(x_coalesced.is_coalesced())
386386
self.assertFalse(y_uncoalesced.is_coalesced())
387387

388+
def test_add_zeros(self):
389+
def test_shape(sparse_dims, sizes):
390+
x, _, _ = self._gen_sparse(sparse_dims, 20, sizes)
391+
zeros = torch.zeros(sizes, layout=torch.sparse_coo).to(x.device)
392+
self.assertEqual(zeros + x, x)
393+
self.assertEqual(x + zeros, x)
394+
395+
test_shape(1, [1])
396+
test_shape(4, [3, 17, 19, 5])
397+
test_shape(2, [3, 17, 19, 5])
398+
388399
@cpu_only
389400
def test_mm(self):
390401
def test_shape(di, dj, dk):

0 commit comments

Comments
 (0)