Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions aten/src/ATen/core/Tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -702,6 +702,7 @@ class CAFFE2_API Tensor {
Tensor index_select(int64_t dim, const Tensor & index) const;
Tensor masked_select(const Tensor & mask) const;
Tensor nonzero() const;
std::vector<Tensor> nonzero_numpy() const;
Tensor gather(int64_t dim, const Tensor & index, bool sparse_grad=false) const;
Tensor addcmul(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
Tensor addcdiv(const Tensor & tensor1, const Tensor & tensor2, Scalar value=1) const;
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/core/TensorMethods.h
Original file line number Diff line number Diff line change
Expand Up @@ -1151,6 +1151,9 @@ inline Tensor Tensor::masked_select(const Tensor & mask) const {
inline Tensor Tensor::nonzero() const {
return dispatch_type().nonzero(*this);
}
inline std::vector<Tensor> Tensor::nonzero_numpy() const {
return dispatch_type().nonzero_numpy(*this);
}
inline Tensor Tensor::gather(int64_t dim, const Tensor & index, bool sparse_grad) const {
return dispatch_type().gather(*this, dim, index, sparse_grad);
}
Expand Down
1 change: 1 addition & 0 deletions aten/src/ATen/core/Type.h
Original file line number Diff line number Diff line change
Expand Up @@ -508,6 +508,7 @@ struct CAFFE2_API Type {
virtual Tensor index_select(const Tensor & self, int64_t dim, const Tensor & index) const = 0;
virtual Tensor masked_select(const Tensor & self, const Tensor & mask) const = 0;
virtual Tensor nonzero(const Tensor & self) const = 0;
virtual std::vector<Tensor> nonzero_numpy(const Tensor & self) const = 0;
virtual Tensor gather(const Tensor & self, int64_t dim, const Tensor & index, bool sparse_grad) const = 0;
virtual Tensor addcmul(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const = 0;
virtual Tensor addcdiv(const Tensor & self, const Tensor & tensor1, const Tensor & tensor2, Scalar value) const = 0;
Expand Down
15 changes: 15 additions & 0 deletions aten/src/ATen/native/Indexing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -568,4 +568,19 @@ Tensor _gather_sparse_backward(const Tensor& self, int64_t dim, const Tensor& in
return at::_sparse_coo_tensor_unsafe(sparse_ind, grad.reshape(-1), self.sizes());
}

std::vector<Tensor> nonzero_numpy(const Tensor& self) {
// special case scalar for compatibility with numpy:
//
// >>> np.array(5).nonzero()
// (array([0]),)
// >>> np.array(0).nonzero()
// (array([], dtype=int64),)

if (self.dim() == 0) {
return self.unsqueeze(0).nonzero().unbind(1);
}

return self.nonzero().unbind(1);
}

}} // at::native
3 changes: 3 additions & 0 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3433,6 +3433,9 @@
CPU: legacy::cpu::_th_nonzero
CUDA: legacy::cuda::_th_nonzero

- func: nonzero_numpy(Tensor self) -> Tensor[]
variants: method, function

- func: gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)
dispatch:
CPU: gather_out_cpu
Expand Down
30 changes: 29 additions & 1 deletion test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -9034,6 +9034,11 @@ def gen_nontrivial_input(num_src, dtype, device):
dst2 = tensor.nonzero()
dst3 = torch.LongTensor().to(device)
torch.nonzero(tensor, out=dst3)

self.assertRaisesRegex(
TypeError,
"received an invalid combination of arguments",
lambda: torch.nonzero(tensor, as_tuple=True, out=dst3))
if len(shape) == 1:
dst = []
for i in range(num_src):
Expand All @@ -9055,21 +9060,44 @@ def gen_nontrivial_input(num_src, dtype, device):
self.assertNotEqual(tensor[dst1[i, 0], dst1[i, 1], dst1[i, 2]].item(), 0)
lex = is_lexicographically_sorted(dst1)
self.assertEqual(torch.ones_like(lex), lex)
if TEST_NUMPY:
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
np1 = tensor.cpu().numpy().nonzero()
for t in (tup1, tup2):
self.assertEqual(len(t), len(np1))
for i in range(len(t)):
self.assertEqual(t[i].cpu().numpy(), np1[i])

def test_nonzero_empty(self):
def assert_tuple_empty(tup, dim):
self.assertEqual(dim, len(tup))
for t in tup:
self.assertEqual(torch.Size([0]), t.shape)
for device in torch.testing.get_all_device_types():
x = torch.randn(0, 2, 0, 5, 0, device=device)
y = torch.nonzero(x)
z = torch.nonzero(x, as_tuple=True)

self.assertEqual(0, y.numel())
self.assertEqual(torch.Size([0, 5]), y.shape)
assert_tuple_empty(z, 5)

x = torch.tensor(0.5, device=device)
y = torch.nonzero(x)
self.assertEqual(torch.Size([1, 0]), y.shape)
# nonzero with as_tuple returns a
# tuple of len 1 for a zero-dim tensor.
# This is done to match Numpy behavior.
z = torch.nonzero(x, as_tuple=True)
self.assertEqual(1, len(z))
self.assertEqual(torch.zeros(1, dtype=torch.long), z[0])

x = torch.zeros((), device=device)
y = torch.nonzero(x)
z = torch.nonzero(x, as_tuple=True)
self.assertEqual(torch.Size([0, 0]), y.shape)
self.assertEqual(1, len(z))
self.assertEqual(torch.empty(0, dtype=torch.long), z[0])

def test_deepcopy(self):
from copy import deepcopy
Expand Down
1 change: 1 addition & 0 deletions tools/autograd/gen_python_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
'slice', 'randint(_out)?',
'item', '_local_scalar_dense', 'to',
'copy_sparse_to_sparse_', 'copy_',
'nonzero(_(out|numpy))?',
]

# These function signatures are not exposed to Python. Note that this signature
Expand Down
44 changes: 44 additions & 0 deletions tools/autograd/templates/python_torch_functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,49 @@ static PyObject * THPVariable__promote_types(PyObject* self, PyObject* args, PyO
END_HANDLE_TH_ERRORS
}

static Tensor dispatch_nonzero(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
return self.nonzero();
}

static Tensor dispatch_nonzero(const Tensor & self, Tensor out) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
return at::nonzero_out(out, self);
}

static std::vector<Tensor> dispatch_nonzero_numpy(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
return self.nonzero_numpy();
}

static PyObject * THPVariable_nonzero(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"nonzero(Tensor input, *, Tensor out=None)|deprecated",
"nonzero(Tensor input, *, bool as_tuple)",
});
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0) {
if (r.isNone(1)) {
return wrap(dispatch_nonzero(r.tensor(0)));
} else {
return wrap(dispatch_nonzero(r.tensor(0), r.tensor(1)));
}
} else {
if (r.toBool(1)) {
return wrap(dispatch_nonzero_numpy(r.tensor(0)));
} else {
return wrap(dispatch_nonzero(r.tensor(0)));
}
}
END_HANDLE_TH_ERRORS
}

static PyObject * THPVariable_sparse_coo_tensor(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
Expand Down Expand Up @@ -405,6 +448,7 @@ static PyMethodDef torch_functions[] = {
{"from_numpy", (PyCFunction)THPVariable_from_numpy, METH_STATIC | METH_O, NULL},
{"hsmm", (PyCFunction)THPVariable_hspmm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
{"_promote_types", (PyCFunction)THPVariable__promote_types, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
{"nonzero", (PyCFunction)THPVariable_nonzero, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
{"randint", (PyCFunction)THPVariable_randint, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
{"range", (PyCFunction)THPVariable_range, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
{"saddmm", (PyCFunction)THPVariable_sspaddmm, METH_VARARGS | METH_KEYWORDS | METH_STATIC, NULL},
Expand Down
31 changes: 31 additions & 0 deletions tools/autograd/templates/python_variable_methods.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,36 @@ static PyObject * THPVariable_cpu(PyObject* self, PyObject* args)
END_HANDLE_TH_ERRORS
}

static Tensor dispatch_nonzero(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
return self.nonzero();
}

static std::vector<Tensor> dispatch_nonzero_numpy(const Tensor & self) {
AutoNoGIL no_gil;
OptionalDeviceGuard device_guard(device_of(self));
return self.nonzero_numpy();
}

static PyObject * THPVariable_nonzero(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"nonzero()|deprecated",
"nonzero(*, bool as_tuple=False)",
});
auto& self_ = reinterpret_cast<THPVariable*>(self)->cdata;
ParsedArgs<2> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.idx == 0 || (r.idx == 1 && !r.toBool(0))) {
return wrap(dispatch_nonzero(self_));
} else {
return wrap(dispatch_nonzero_numpy(self_));
}
END_HANDLE_TH_ERRORS
}

static PyObject * THPVariable_cuda(PyObject* self, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
Expand Down Expand Up @@ -734,6 +764,7 @@ PyMethodDef variable_methods[] = {
{"new_ones", (PyCFunction)THPVariable_new_ones, METH_VARARGS | METH_KEYWORDS, NULL},
{"new_tensor", (PyCFunction)THPVariable_new_tensor, METH_VARARGS | METH_KEYWORDS, NULL},
{"new_zeros", (PyCFunction)THPVariable_new_zeros, METH_VARARGS | METH_KEYWORDS, NULL},
{"nonzero", (PyCFunction)THPVariable_nonzero, METH_VARARGS | METH_KEYWORDS, NULL},
{"numpy", (PyCFunction)THPVariable_numpy, METH_NOARGS, NULL},
{"record_stream", (PyCFunction)THPVariable_record_stream, METH_O, NULL},
{"requires_grad_", (PyCFunction)THPVariable_requires_grad_, METH_VARARGS | METH_KEYWORDS, NULL},
Expand Down
6 changes: 6 additions & 0 deletions torch/__init__.pyi.in
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ class Tensor:
def share_memory_(self) -> None: ...
# TODO: fill in the types for these, or otherwise figure out some
# way to not have to write these out again...
def nonzero(self, *, as_tuple=True): ...
def norm(self, p="fro", dim=None, keepdim=False): ...
def stft(self, n_fft, hop_length=None, win_length=None, window=None,
center=True, pad_mode='reflect', normalized=False, onesided=True): ...
Expand All @@ -105,3 +106,8 @@ def set_default_tensor_type(type) -> None: ... # ick, what a bad legacy API
def set_default_dtype(d : _dtype) -> None: ...
def manager_path() -> str: ...
def compiled_with_cxx11_abi() -> bool: ...

# The return value of this function depends on the value of `as_tuple`,
# (similar to `unique`, `lu`, etc.); as such, it is not
# possible to type correctly
def nonzero(input: Tensor, *, out: Optional[Tensor]=None, as_tuple: Optional[bool]=None): ...
32 changes: 31 additions & 1 deletion torch/_torch_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -3468,7 +3468,9 @@ def merge_dicts(*dicts):

add_docstr(torch.nonzero,
r"""
nonzero(input, out=None) -> LongTensor
nonzero(input, *, out=None, as_tuple=False) -> LongTensor or tuple of LongTensors

**When** :attr:`as_tuple` **is false or unspecified:**

Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
Expand All @@ -3479,10 +3481,29 @@ def merge_dicts(*dicts):
:attr:`out` is of size :math:`(z \times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.

**When** :attr:`as_tuple` **is true:**

Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
each containing the indices (in that dimension) of all non-zero elements of
:attr:`input` .

If :attr:`input` has `n` dimensions, then the resulting tuple contains `n` tensors
of size `z`, where `z` is the total number of
non-zero elements in the :attr:`input` tensor.

As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
value, it is treated as a one-dimensional tensor with one element.

Args:
input (Tensor): the input tensor
out (LongTensor, optional): the output tensor containing indices

Returns:
LongTensor or tuple of LongTensor: If :attr:`as_tuple` is false, the output
tensor containing indices. If :attr:`as_tuple` is true, one 1-D tensor for
each dimension, containing the indices of each nonzero element along that
dimension.

Example::

>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]))
Expand All @@ -3498,6 +3519,15 @@ def merge_dicts(*dicts):
[ 1, 1],
[ 2, 2],
[ 3, 3]])
>>> torch.nonzero(torch.tensor([1, 1, 1, 0, 1]), as_tuple=True)
(tensor([0, 1, 2, 4]),)
>>> torch.nonzero(torch.tensor([[0.6, 0.0, 0.0, 0.0],
[0.0, 0.4, 0.0, 0.0],
[0.0, 0.0, 1.2, 0.0],
[0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
(tensor([0, 1, 2, 3]), tensor([0, 1, 2, 3]))
>>> torch.nonzero(torch.tensor(5), as_tuple=True)
(tensor([0]),)
""")

add_docstr(torch.normal,
Expand Down