Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
20ae623
trying to fix #18408
codexetreme Apr 7, 2019
033c003
fixed a small error
codexetreme Apr 7, 2019
fd80b91
fixed a small error in code for #18408
codexetreme Apr 7, 2019
53b19d2
Merge branch 'master' of https://github.com/codexetreme/pytorch
codexetreme Apr 9, 2019
e89e6f8
Fixed typos, changed logic for well_formed_tensor
codexetreme Apr 9, 2019
6e1de27
added code to cuda version as well
codexetreme Apr 9, 2019
5640966
merged from upstream
codexetreme Apr 18, 2019
f66e2ab
Added tests to check for _unique_dim does not support empty tensor
codexetreme Apr 18, 2019
9fb6c48
added counts return tensor
codexetreme Apr 18, 2019
fc39408
Merge branch 'master' of https://github.com/pytorch/pytorch
codexetreme Apr 23, 2019
157c06a
Merge branch 'master' of https://github.com/codexetreme/pytorch
codexetreme Apr 23, 2019
085d067
Bug Fix and added test for return_counts
codexetreme Apr 23, 2019
232d6f4
Merge branch 'master' of https://github.com/pytorch/pytorch
codexetreme Apr 24, 2019
6048ae3
renamed conflicting variable
codexetreme Apr 24, 2019
5438b2f
bug fix and fixed linting in test_torch
codexetreme Apr 24, 2019
343eaa0
Merge branch 'master' of https://github.com/pytorch/pytorch
codexetreme Apr 29, 2019
c3a3cf8
output now returns empty tensor
codexetreme Apr 29, 2019
62a11ca
Merge branch 'master' of https://github.com/pytorch/pytorch
codexetreme May 4, 2019
4123d6d
Fixed review suggestions
codexetreme May 4, 2019
2eea140
Fixed linting and minor issue
codexetreme May 5, 2019
4877df1
removed redundant condition check and fixed linting
codexetreme May 6, 2019
c150085
Merge remote-tracking branch 'origin/master' into HEAD
May 12, 2019
d6fe5e7
fixed the typos and tests
codexetreme Jun 22, 2019
73dd22d
Merge branch 'master' of https://www.github.com/codexetreme/pytorch
codexetreme Jun 22, 2019
84ce13e
removed trailing white spaces
codexetreme May 23, 2019
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions aten/src/ATen/native/Unique.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,27 @@ std::tuple<Tensor, Tensor, Tensor> _unique_dim_cpu_template(
const bool consecutive,
const bool return_inverse,
const bool return_counts) {

auto sizes = self.sizes().vec();
// check how many zero dimensions exist
auto num_zero_dims = std::count(sizes.begin(), sizes.end(), 0);

// tensor is not well formed as it has 0 sized dimensions
if (self.size(dim) == 0){
AT_CHECK(
num_zero_dims == 1,
"Number of zero sized dimensions is more than one, so unique cannot be applied ")
Tensor output = at::empty({0}, self.options());
Tensor inverse_indices =
at::empty({0}, self.options().dtype(kLong));
Tensor counts = at::empty({0}, self.options().dtype(kLong));

return std::make_tuple(output, inverse_indices, counts);
}

AT_CHECK(num_zero_dims == 0,
"There are 0 sized dimensions, and they aren't selected, so unique cannot be applied");

// reshape tensor as [dim, -1]
Tensor input_flat = self.transpose(dim, 0);
auto orig_sizes = input_flat.sizes().vec();
Expand Down
20 changes: 20 additions & 0 deletions aten/src/ATen/native/cuda/Unique.cu
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,26 @@ std::tuple<Tensor, Tensor, Tensor> unique_dim_cuda_template(
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA());
auto policy = thrust::cuda::par(allocator).on(stream);

auto sizes = self.sizes().vec();
// check how many zero dimensions exist
auto num_zero_dims = std::count(sizes.begin(), sizes.end(), 0);

// tensor is not well formed as it has 0 sized dimensions
if (self.size(dim) == 0){
AT_CHECK(
num_zero_dims == 1,
"Number of zero sized dimensions is more than one, so unique cannot be applied ")
Tensor output = at::empty({0}, self.options());
Tensor inverse_indices =
at::empty({0}, self.options().dtype(kLong));
Tensor counts = at::empty({0}, self.options().dtype(kLong));

return std::make_tuple(output, inverse_indices, counts);
}

AT_CHECK(num_zero_dims == 0,
"There are 0 sized dimensions, and they aren't selected, so unique cannot be applied");

int64_t num_inp = self.size(dim);
auto options = self.options().dtype(kLong);
Expand Down
34 changes: 33 additions & 1 deletion test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -10848,6 +10848,9 @@ def run_test(dtype=torch.float, device=torch.device('cpu')):
[0., 1.]]],
dtype=dtype,
device=device)
x_empty = torch.empty(5, 0, dtype=dtype, device=device)
x_ill_formed_empty = torch.empty(5, 0, 0, dtype=dtype, device=device)
x_ill_formed_empty_another = torch.empty(5, 0, 5, dtype=dtype, device=device)
expected_unique_dim0 = torch.tensor([[[1., 1.],
[0., 1.],
[2., 1.],
Expand Down Expand Up @@ -10878,7 +10881,9 @@ def run_test(dtype=torch.float, device=torch.device('cpu')):
device=device)
expected_inverse_dim2 = torch.tensor([0, 1])
expected_counts_dim2 = torch.tensor([1, 1])

expected_unique_empty = torch.tensor([], dtype=dtype, device=device)
expected_inverse_empty = torch.tensor([], dtype=torch.long, device=device)
expected_counts_empty = torch.tensor([], dtype=torch.long, device=device)
# dim0
x_unique = torch.unique(x, dim=0)
self.assertEqual(expected_unique_dim0, x_unique)
Expand Down Expand Up @@ -10963,6 +10968,33 @@ def run_test(dtype=torch.float, device=torch.device('cpu')):
self.assertEqual(expected_inverse_dim2, x_inverse)
self.assertEqual(expected_counts_dim2, x_counts)

# test empty tensor
x_unique, x_inverse, x_counts = torch.unique(
x_empty,
return_inverse=True,
return_counts=True,
dim=1)
self.assertEqual(expected_unique_empty, x_unique)
self.assertEqual(expected_inverse_empty, x_inverse)
self.assertEqual(expected_counts_empty, x_counts)

# test not a well formed tensor
# Checking for runtime error, as this is the expected behaviour
with self.assertRaises(RuntimeError):
torch.unique(
x_ill_formed_empty,
return_inverse=True,
return_counts=True,
dim=1)

# test along dim2
with self.assertRaises(RuntimeError):
torch.unique(
x_ill_formed_empty_another,
return_inverse=True,
return_counts=True,
dim=2)

# test consecutive version
y = torch.tensor(
[[0, 1],
Expand Down