Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 25 additions & 2 deletions aten/src/THCS/generic/THCSTensorMath.cu
Original file line number Diff line number Diff line change
Expand Up @@ -367,10 +367,29 @@ void THCSTensor_(div)(THCState *state, THCSTensor *r_, THCSTensor *t, real value
}
}

int THCSTensor_(isSameSizeIgnoringDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) {
int d;
if (self->nDimensionI + self->nDimensionV != src->nDimensionI + src->nDimensionV) {
return 0;
}
for(d = 0; d < self->nDimensionI + self->nDimensionV; ++d) {
if(self->size[d] != src->size[d]) {
return 0;
}
}
return 1;
}

int THCSTensor_(isSameDensity)(THCState *state, const THCSTensor *self, const THCSTensor *src) {
return self->nDimensionI == src->nDimensionI &&
self->nDimensionV == src->nDimensionV;
}


void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real value, THCSTensor *src) {
THCAssertSameGPU(THCSTensor_(checkGPU)(state, 3, 3, r_, t, src));
if(!THCSTensor_(isSameSizeAs)(state, t, src)) {
THError("cadd operands have incompatible sizes or dimension types");
if (!THCSTensor_(isSameSizeIgnoringDensity)(state, t, src)) {
THError("cadd operands have incompatible sizes");
}

if (src->nnz == 0) {
Expand All @@ -382,6 +401,10 @@ void THCSTensor_(cadd)(THCState *state, THCSTensor *r_, THCSTensor *t, real valu
return;
}

if(!THCSTensor_(isSameDensity)(state, t, src)) {
THError("cadd operands have incompatible densities");
}

// We deliberately choose to simply concat the indices and values tensors
// rather than merging them. This removes the need to synchronously fetch nnz
// at the end of the operation, at the cost of having a non-coalesced result.
Expand Down
26 changes: 24 additions & 2 deletions aten/src/THS/generic/THSTensorMath.c
Original file line number Diff line number Diff line change
Expand Up @@ -121,9 +121,27 @@ void THSTensor_(div)(THSTensor *r_, THSTensor *t, real value) {
}
}

int THSTensor_(isSameSizeIgnoringDensity)(const THSTensor *self, const THSTensor* src) {
int d;
if (self->nDimensionI + self->nDimensionV != src->nDimensionI + src->nDimensionV) {
return 0;
}
for(d = 0; d < self->nDimensionI + self->nDimensionV; ++d) {
if(self->size[d] != src->size[d]) {
return 0;
}
}
return 1;
}

int THSTensor_(isSameDensity)(const THSTensor *self, const THSTensor* src) {
return self->nDimensionI == src->nDimensionI &&
self->nDimensionV == src->nDimensionV;
}

void THSTensor_(cadd)(THSTensor *r_, THSTensor *t, real value, THSTensor *src) {
if(!THSTensor_(isSameSizeAs)(t, src)) {
THError("cadd operands have incompatible sizes or dimension types");
if (!THSTensor_(isSameSizeIgnoringDensity)(t, src)) {
THError("cadd operands have incompatible sizes");
}

if (src->nnz == 0) {
Expand All @@ -135,6 +153,10 @@ void THSTensor_(cadd)(THSTensor *r_, THSTensor *t, real value, THSTensor *src) {
return;
}

if(!THSTensor_(isSameDensity)(t, src)) {
THError("cadd operands have incompatible densities");
}

// saving those because they can be overwritten when doing in-place operations
ptrdiff_t t_nnz = t->nnz, s_nnz = src->nnz, max_nnz = t_nnz + s_nnz;
int t_coalesced = t->coalesced, s_coalesced = src->coalesced;
Expand Down
11 changes: 11 additions & 0 deletions test/test_sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -385,6 +385,17 @@ def test_transpose_coalesce_invariant(self):
self.assertTrue(x_coalesced.is_coalesced())
self.assertFalse(y_uncoalesced.is_coalesced())

def test_add_zeros(self):
def test_shape(sparse_dims, sizes):
x, _, _ = self._gen_sparse(sparse_dims, 20, sizes)
zeros = torch.zeros(sizes, layout=torch.sparse_coo).to(x.device)
self.assertEqual(zeros + x, x)
self.assertEqual(x + zeros, x)

test_shape(1, [1])
test_shape(4, [3, 17, 19, 5])
test_shape(2, [3, 17, 19, 5])

@cpu_only
def test_mm(self):
def test_shape(di, dj, dk):
Expand Down