-
Notifications
You must be signed in to change notification settings - Fork 26.3k
Fixed histc return type for CUDA #20369
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2458,58 +2458,59 @@ def test_zeros_out(self): | |
| self.assertEqual(torch.zeros(shape), torch.zeros(shape, layout=torch.strided, out=out)) | ||
| self.assertEqual(torch.zeros(shape), torch.zeros(shape, device='cpu', out=out)) | ||
|
|
||
| @staticmethod | ||
| def _test_histc(self, device): | ||
| # negative nbins throws | ||
| with self.assertRaisesRegex(RuntimeError, 'bins must be > 0'): | ||
| torch.histc(torch.tensor([1], dtype=torch.float, device=device), bins=-1) | ||
|
|
||
| # without nbins | ||
| actual = torch.histc( | ||
| torch.tensor([2, 5], dtype=torch.float, device=device)) | ||
| expected = torch.zeros(100, dtype=torch.float, device=device) | ||
| expected.data[0] = 1 | ||
| expected.data[99] = 1 | ||
| self.assertEqual(expected, actual) | ||
| # tensor with the same element | ||
| actual = torch.histc(torch.ones(5, dtype=torch.float, device=device), bins=5) | ||
| self.assertEqual( | ||
| torch.tensor([0, 0, 5, 0, 0], dtype=torch.float, device=device), | ||
| actual) | ||
| # no element falls between [min, max] | ||
| actual = torch.histc( | ||
| torch.ones(5, dtype=torch.float, device=device), bins=5, min=2, max=3) | ||
| self.assertEqual( | ||
| torch.tensor([0, 0, 0, 0, 0], dtype=torch.float, device=device), | ||
| actual) | ||
| # element falls below min + integral bin size and | ||
| actual = torch.histc( | ||
| torch.tensor([2, 4, 2, 2, 5, 4], dtype=torch.float, device=device), | ||
| bins=5, min=1, max=5) | ||
| self.assertEqual( | ||
| torch.tensor([0, 3, 0, 2, 1], dtype=torch.float, device=device), | ||
| actual) | ||
| # non-integral bin size | ||
| actual = torch.histc( | ||
| torch.tensor([1, 2, 1], dtype=torch.float, device=device), | ||
| bins=4, min=0, max=3) | ||
| self.assertEqual( | ||
| torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device), | ||
| actual) | ||
| # double input | ||
| actual = torch.histc( | ||
| torch.tensor([1, 2, 1], dtype=torch.double, device=device), | ||
| bins=4, min=0, max=3) | ||
| self.assertEqual( | ||
| torch.tensor([0, 2, 1, 0], dtype=torch.double, device=device), | ||
| actual) | ||
| # mixed input | ||
| actual = torch.histc( | ||
| torch.tensor([1., 2, 1], dtype=torch.float, device=device), | ||
| bins=4, min=0, max=3) | ||
| self.assertEqual( | ||
| torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device), | ||
| actual) | ||
| def test_histc(self): | ||
| for device in torch.testing.get_all_device_types(): | ||
| # negative nbins throws | ||
| with self.assertRaisesRegex(RuntimeError, 'bins must be > 0'): | ||
| torch.histc(torch.tensor([1], dtype=torch.float, device=device), bins=-1) | ||
|
|
||
| # without nbins | ||
| actual = torch.histc( | ||
| torch.tensor([2, 5], dtype=torch.float, device=device)) | ||
| expected = torch.zeros(100, dtype=torch.float, device=device) | ||
| expected.data[0] = 1 | ||
| expected.data[99] = 1 | ||
| self.assertEqual(expected, actual) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm confused: the original code tested CUDA tensors by sending them through this path. Does
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. apparently it does. i just tried this:
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for checking that, that is concerning. Could you add another test to check if the dtype of the output is correct, then? |
||
| # tensor with the same element | ||
| actual = torch.histc(torch.ones(5, dtype=torch.float, device=device), bins=5) | ||
| self.assertEqual( | ||
| torch.tensor([0, 0, 5, 0, 0], dtype=torch.float, device=device), | ||
| actual) | ||
| # no element falls between [min, max] | ||
| actual = torch.histc( | ||
| torch.ones(5, dtype=torch.float, device=device), bins=5, min=2, max=3) | ||
| self.assertEqual( | ||
| torch.tensor([0, 0, 0, 0, 0], dtype=torch.float, device=device), | ||
| actual) | ||
| # element falls below min + integral bin size and | ||
| actual = torch.histc( | ||
| torch.tensor([2, 4, 2, 2, 5, 4], dtype=torch.float, device=device), | ||
| bins=5, min=1, max=5) | ||
| self.assertEqual( | ||
| torch.tensor([0, 3, 0, 2, 1], dtype=torch.float, device=device), | ||
| actual) | ||
| # non-integral bin size | ||
| actual = torch.histc( | ||
| torch.tensor([1, 2, 1], dtype=torch.float, device=device), | ||
| bins=4, min=0, max=3) | ||
| self.assertEqual( | ||
| torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device), | ||
| actual) | ||
| # double input | ||
| actual = torch.histc( | ||
| torch.tensor([1, 2, 1], dtype=torch.double, device=device), bins=4, min=0, max=3) | ||
| self.assertEqual( | ||
| torch.tensor([0, 2, 1, 0], dtype=torch.double, device=device), | ||
| actual) | ||
| self.assertEqual(actual.dtype, torch.double) | ||
| # mixed input | ||
| actual = torch.histc( | ||
| torch.tensor([1., 2, 1], dtype=torch.float, device=device), | ||
| bins=4, min=0, max=3) | ||
| self.assertEqual( | ||
| torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device), | ||
| actual) | ||
| self.assertEqual(actual.dtype, torch.float) | ||
|
|
||
| # test against numpy.histogram() | ||
| def test_against_np(tensor, bins=100, min=0, max=0): | ||
|
|
@@ -2540,9 +2541,6 @@ def test_against_np(tensor, bins=100, min=0, max=0): | |
| expanded = torch.randn(1, 5, 1, 2, device=device).expand(3, 5, 7, 2) | ||
| test_against_np(expanded) | ||
|
|
||
| def test_histc_cpu(self): | ||
| self._test_histc(self, 'cpu') | ||
|
|
||
| def test_ones(self): | ||
| res1 = torch.ones(100, 100) | ||
| res2 = torch.Tensor() | ||
|
|
@@ -9314,7 +9312,7 @@ def test_serialization_offset_filelike(self): | |
| i, j = 41, 43 | ||
| with BytesIOContext() as f: | ||
| pickle.dump(i, f) | ||
| torch.save(a, f) | ||
| torch.save(a, f) | ||
| pickle.dump(j, f) | ||
| torch.save(b, f) | ||
| f.seek(0) | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.