@@ -496,6 +496,74 @@ def test_shape(sparse_dims, nnz, with_size):
496496 test_shape (3 , 10 , [100 , 100 , 100 , 5 , 5 , 5 , 0 ])
497497 test_shape (3 , 0 , [0 , 0 , 100 , 5 , 5 , 5 , 0 ])
498498
499+ def test_Sparse_to_Sparse_copy_ (self ):
500+ # This is for testing torch.copy_(SparseTensor, SparseTensor)
501+ sparse_dims = 3
502+ nnz = 10
503+ sizes = [2 , 3 , 4 , 5 ] # hybrid sparse
504+ x1 , _ , _ = self ._gen_sparse (sparse_dims , nnz , sizes )
505+ x2 , _ , _ = self ._gen_sparse (sparse_dims , nnz + 10 , sizes )
506+
507+ # test copy
508+ x2_dense = x2 .to_dense ()
509+ x1 .copy_ (x2 )
510+ self .assertEqual (x2_dense , x1 .to_dense ())
511+
512+ # test type conversion (when x1.copy_(x2), x1.dtype should stay the same)
513+ x1 = x1 .to (torch .float32 )
514+ x2 = x2 .to (torch .float64 )
515+ x1_dtype = x1 .dtype
516+ x1 .copy_ (x2 )
517+ self .assertEqual (x1_dtype , x1 .dtype )
518+
519+ # test no broadcast
520+ self .assertRaises (RuntimeError , lambda : x1 .copy_ (x2 .narrow_copy (0 , 0 , 1 )))
521+
522+ # test raise error on copy_() between dense and sparse Tensors
523+ self .assertRaises (RuntimeError , lambda : x1 .copy_ (torch .randn (5 , 5 )))
524+
525+ # test autograd
526+ x1 , _ , _ = self ._gen_sparse (sparse_dims , nnz , sizes )
527+ x2 , _ , _ = self ._gen_sparse (sparse_dims , nnz + 10 , sizes )
528+ x2 .requires_grad_ (True )
529+ x1 .copy_ (x2 )
530+ y = x1 * 2
531+ x2_clone = x2 .clone ()
532+ y .backward (x2_clone )
533+ expected_grad = x2_clone * 2
534+ self .assertEqual (expected_grad .to_dense (), x2 .grad .to_dense ())
535+ self .assertEqual (None , x1 .grad )
536+
537+ @unittest .skipIf (torch .cuda .device_count () < 2 , "no multi-GPU" )
538+ def test_Sparse_to_Sparse_copy_multi_gpu (self ):
539+ # This is for testing torch.copy_(SparseTensor, SparseTensor) across GPU devices
540+ sparse_dims = 3
541+ nnz = 10
542+ sizes = [2 , 3 , 4 , 5 ] # hybrid sparse
543+ x1 , _ , _ = self ._gen_sparse (sparse_dims , nnz , sizes )
544+ x2 , _ , _ = self ._gen_sparse (sparse_dims , nnz + 10 , sizes )
545+ x1 = x1 .to ('cuda:0' )
546+
547+ def test_cross_device (x1 , x2 ):
548+ x1_device = x1 .device
549+ x1 .copy_ (x2 )
550+ self .assertEqual (x2 .to ('cuda:0' ).to_dense (), x1 .to_dense ())
551+ self .assertEqual (x1_device , x1 .device )
552+
553+ test_cross_device (x1 , x2 .to ('cuda:1' )) # test across gpu devices
554+ test_cross_device (x1 , x2 .to ('cpu' )) # test between cpu and gpu
555+
556+ # test autograd
557+ x2 = x2 .to ('cuda:1' )
558+ x2 .requires_grad_ (True )
559+ x1 .copy_ (x2 )
560+ y = x1 * 2
561+ x2_clone = x2 .clone ().to ('cuda:0' )
562+ y .backward (x2_clone )
563+ expected_grad = x2_clone * 2
564+ self .assertEqual (expected_grad .to_dense (), x2 .grad .to ('cuda:0' ).to_dense ())
565+ self .assertEqual (None , x1 .grad )
566+
499567 @cuda_only
500568 def test_cuda_empty (self ):
501569 def test_tensor (x ):
0 commit comments