@@ -555,6 +555,33 @@ def test_max(self):
555555 def test_min (self ):
556556 self ._testSelection (torch .min , min )
557557
558+ @staticmethod
559+ def _test_norm (self , device ):
560+ # full reduction
561+ x = torch .randn (5 , device = device )
562+ xn = x .cpu ().numpy ()
563+ for p in [0 , 1 , 2 , 3 , 4 , float ('inf' )]:
564+ res = x .norm (p ).item ()
565+ expected = np .linalg .norm (xn , p )
566+ self .assertEqual (res , expected , "full reduction failed for {}-norm" .format (p ))
567+ # one dimension
568+ x = torch .randn (5 , 5 , device = device )
569+ xn = x .cpu ().numpy ()
570+ for p in [0 , 1 , 2 , 3 , 4 , float ('inf' )]:
571+ res = x .norm (p , 1 ).cpu ().numpy ()
572+ expected = np .linalg .norm (xn , p , 1 )
573+ self .assertEqual (res .shape , expected .shape )
574+ self .assertTrue (np .allclose (res , expected ), "dim reduction failed for {}-norm" .format (p ))
575+
576+ @unittest .skipIf (not TEST_NUMPY , "Numpy not found" )
577+ def test_norm (self ):
578+ self ._test_norm (self , device = 'cpu' )
579+
580+ @unittest .skipIf (not TEST_NUMPY , "Numpy not found" )
581+ @unittest .skipIf (not torch .cuda .is_available (), 'no CUDA' )
582+ def test_norm_cuda (self ):
583+ self ._test_norm (self , device = 'cuda' )
584+
558585 def test_dim_reduction_uint8_overflow (self ):
559586 example = [[- 1 , 2 , 1 ], [5 , 3 , 6 ]]
560587 x = torch .tensor (example , dtype = torch .uint8 )
@@ -2056,6 +2083,23 @@ def renorm(matrix, value, dim, max_norm):
20562083 self .assertEqual (m3 , m2 )
20572084 self .assertEqual (m3 .norm (2 , 0 ), m2 .norm (2 , 0 ))
20582085
2086+ @staticmethod
2087+ def _test_renorm_ps (self , device ):
2088+ # full reduction
2089+ x = torch .randn (5 , 5 )
2090+ xn = x .numpy ()
2091+ for p in [1 , 2 , 3 , 4 , float ('inf' )]:
2092+ res = x .renorm (p , 1 , 1 )
2093+ expected = x / x .norm (p , 0 , keepdim = True ).clamp (min = 1 )
2094+ self .assertEqual (res .numpy (), expected .numpy (), "renorm failed for {}-norm" .format (p ))
2095+
2096+ def test_renorm_ps (self ):
2097+ self ._test_renorm_ps (self , device = 'cpu' )
2098+
2099+ @unittest .skipIf (not torch .cuda .is_available (), 'no CUDA' )
2100+ def test_renorm_ps_cuda (self ):
2101+ self ._test_renorm_ps (self , device = 'cuda' )
2102+
20592103 @staticmethod
20602104 def _test_multinomial (self , type ):
20612105 def make_prob_dist (shape , is_contiguous ):
0 commit comments