Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions aten/src/ATen/native/cuda/SummaryOps.cu
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ Tensor _histc_cuda_template(
if (nbins <= 0) {
AT_ERROR("bins must be > 0");
}
Tensor output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(kLong));
Tensor output = native::zeros({nbins}, device(DeviceType::CUDA).dtype(self.scalar_type()));
input_t minvalue = min;
input_t maxvalue = max;
if (min == max) {
Expand All @@ -322,7 +322,8 @@ Tensor _histc_cuda_template(
minvalue = minvalue - 1;
maxvalue = maxvalue + 1;
}
auto ret = cuda::CUDA_tensor_histogram<int64_t, input_t, false>(

auto ret = cuda::CUDA_tensor_histogram<input_t, input_t, false>(
output, self, Tensor(), nbins, minvalue, maxvalue);
return output;
}
Expand Down
3 changes: 0 additions & 3 deletions test/test_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -2720,9 +2720,6 @@ def test_bincount_cuda(self):
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))

def test_histc_cuda(self):
_TestTorchMixin._test_histc(self, device='cuda')

def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
Expand Down
110 changes: 54 additions & 56 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2458,58 +2458,59 @@ def test_zeros_out(self):
self.assertEqual(torch.zeros(shape), torch.zeros(shape, layout=torch.strided, out=out))
self.assertEqual(torch.zeros(shape), torch.zeros(shape, device='cpu', out=out))

@staticmethod
def _test_histc(self, device):
# negative nbins throws
with self.assertRaisesRegex(RuntimeError, 'bins must be > 0'):
torch.histc(torch.tensor([1], dtype=torch.float, device=device), bins=-1)

# without nbins
actual = torch.histc(
torch.tensor([2, 5], dtype=torch.float, device=device))
expected = torch.zeros(100, dtype=torch.float, device=device)
expected.data[0] = 1
expected.data[99] = 1
self.assertEqual(expected, actual)
# tensor with the same element
actual = torch.histc(torch.ones(5, dtype=torch.float, device=device), bins=5)
self.assertEqual(
torch.tensor([0, 0, 5, 0, 0], dtype=torch.float, device=device),
actual)
# no element falls between [min, max]
actual = torch.histc(
torch.ones(5, dtype=torch.float, device=device), bins=5, min=2, max=3)
self.assertEqual(
torch.tensor([0, 0, 0, 0, 0], dtype=torch.float, device=device),
actual)
# element falls below min + integral bin size and
actual = torch.histc(
torch.tensor([2, 4, 2, 2, 5, 4], dtype=torch.float, device=device),
bins=5, min=1, max=5)
self.assertEqual(
torch.tensor([0, 3, 0, 2, 1], dtype=torch.float, device=device),
actual)
# non-integral bin size
actual = torch.histc(
torch.tensor([1, 2, 1], dtype=torch.float, device=device),
bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device),
actual)
# double input
actual = torch.histc(
torch.tensor([1, 2, 1], dtype=torch.double, device=device),
bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.double, device=device),
actual)
# mixed input
actual = torch.histc(
torch.tensor([1., 2, 1], dtype=torch.float, device=device),
bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device),
actual)
def test_histc(self):
for device in torch.testing.get_all_device_types():
# negative nbins throws
with self.assertRaisesRegex(RuntimeError, 'bins must be > 0'):
torch.histc(torch.tensor([1], dtype=torch.float, device=device), bins=-1)

# without nbins
actual = torch.histc(
torch.tensor([2, 5], dtype=torch.float, device=device))
expected = torch.zeros(100, dtype=torch.float, device=device)
expected.data[0] = 1
expected.data[99] = 1
self.assertEqual(expected, actual)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm confused: the original code tested CUDA tensors by sending them through this path. Does self.assertEqual ignore dtype when comparing tensors?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

apparently it does. i just tried this:
self.assertEqual(torch.tensor([1], dtype=torch.int), torch.tensor([1], dtype=torch.double)) and it didn't fail. I will create an issue for this cause its really concerning.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for checking that, that is concerning.

Could you add another test to check if the dtype of the output is correct, then?

# tensor with the same element
actual = torch.histc(torch.ones(5, dtype=torch.float, device=device), bins=5)
self.assertEqual(
torch.tensor([0, 0, 5, 0, 0], dtype=torch.float, device=device),
actual)
# no element falls between [min, max]
actual = torch.histc(
torch.ones(5, dtype=torch.float, device=device), bins=5, min=2, max=3)
self.assertEqual(
torch.tensor([0, 0, 0, 0, 0], dtype=torch.float, device=device),
actual)
# element falls below min + integral bin size and
actual = torch.histc(
torch.tensor([2, 4, 2, 2, 5, 4], dtype=torch.float, device=device),
bins=5, min=1, max=5)
self.assertEqual(
torch.tensor([0, 3, 0, 2, 1], dtype=torch.float, device=device),
actual)
# non-integral bin size
actual = torch.histc(
torch.tensor([1, 2, 1], dtype=torch.float, device=device),
bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device),
actual)
# double input
actual = torch.histc(
torch.tensor([1, 2, 1], dtype=torch.double, device=device), bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.double, device=device),
actual)
self.assertEqual(actual.dtype, torch.double)
# mixed input
actual = torch.histc(
torch.tensor([1., 2, 1], dtype=torch.float, device=device),
bins=4, min=0, max=3)
self.assertEqual(
torch.tensor([0, 2, 1, 0], dtype=torch.float, device=device),
actual)
self.assertEqual(actual.dtype, torch.float)

# test against numpy.histogram()
def test_against_np(tensor, bins=100, min=0, max=0):
Expand Down Expand Up @@ -2540,9 +2541,6 @@ def test_against_np(tensor, bins=100, min=0, max=0):
expanded = torch.randn(1, 5, 1, 2, device=device).expand(3, 5, 7, 2)
test_against_np(expanded)

def test_histc_cpu(self):
self._test_histc(self, 'cpu')

def test_ones(self):
res1 = torch.ones(100, 100)
res2 = torch.Tensor()
Expand Down Expand Up @@ -9314,7 +9312,7 @@ def test_serialization_offset_filelike(self):
i, j = 41, 43
with BytesIOContext() as f:
pickle.dump(i, f)
torch.save(a, f)
torch.save(a, f)
pickle.dump(j, f)
torch.save(b, f)
f.seek(0)
Expand Down