Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions aten/src/ATen/core/aten_interned_strings.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,6 @@ _(aten, _cast_Long) \
_(aten, _cast_Short) \
_(aten, _cat) \
_(aten, _ceil) \
_(aten, _clamp) \
_(aten, _clamp_max) \
_(aten, _clamp_min) \
_(aten, _convolution) \
Expand Down Expand Up @@ -243,7 +242,6 @@ _(aten, cholesky) \
_(aten, cholesky_inverse) \
_(aten, cholesky_solve) \
_(aten, chunk) \
_(aten, clamp) \
_(aten, clamp_max) \
_(aten, clamp_min) \
_(aten, clone) \
Expand Down
4 changes: 4 additions & 0 deletions aten/src/ATen/core/interned_strings.h
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,10 @@ namespace c10 {
_(aten, abs_) \
_(aten, absolute) \
_(aten, absolute_) \
_(aten, clamp) \
_(aten, clamp_) \
_(aten, clip) \
_(aten, clip_) \
_(aten, append) \
_(aten, item) \
_(aten, format) \
Expand Down
13 changes: 13 additions & 0 deletions aten/src/ATen/native/UnaryOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -466,6 +466,19 @@ Tensor& clamp_min_(Tensor& self, Scalar min) {
return at::clamp_min_out(self, self, min);
}

// Implements the "clip" alias for clamp
Tensor& clip_out(Tensor& result, const Tensor& self, optional<Scalar> min, optional<Scalar> max) {
return at::clamp_out(result, self, min, max);
}

Tensor clip(const Tensor& self, optional<Scalar> min, optional<Scalar> max) {
return at::clamp(self, min, max);
}

Tensor& clip_(Tensor& self, optional<Scalar> min, optional<Scalar> max) {
return at::clamp_(self, min, max);
}

Tensor polygamma(int64_t n, const Tensor& self) {
Tensor result = at::empty({0}, self.options());
at::polygamma_out(result, n, self);
Expand Down
25 changes: 21 additions & 4 deletions aten/src/ATen/native/native_functions.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -231,10 +231,17 @@
# original function's name with their own.
# 2) Implement the corresponding functions and have them redispatch to the
# original function.
# 3) Add docstrings to the new functions that reference the original function.
# 4) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp.
# 5) Update the op_alias_mappings in torch/testing/_internal/jit_utils.py.
# 6) Add a test validating the alias's behavior is the same as the original
# 3) Add entries for the alias (and original function, if needed) to
# aten/src/ATen/interned_strings.h
# 4) Add docstrings to the new functions that reference the original function.
# 5) Update torch/overrides.py consistent with the original function.
# 6) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp.
# 7) Update the op_alias_mappings in torch/testing/_internal/jit_utils.py.
# 8) Add entries in
# torch/testing/_internal/common_method_invocations.py's method_tests
# for both the alias and the in-place version of the alias, with
# should_check_autodiff=False.
# 9) Add a test validating the alias's behavior is the same as the original
# function's.
#
# See torch.absolute, an alias for torch.abs, as an example.
Expand Down Expand Up @@ -798,6 +805,16 @@

- func: clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)

# clip is an alias for clamp
- func: clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor
use_c10_dispatcher: full
variants: function, method

- func: clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)
variants: function, method

- func: clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)

- func: cudnn_is_acceptable(Tensor self) -> bool
use_c10_dispatcher: full
device_guard: False
Expand Down
2 changes: 2 additions & 0 deletions docs/source/tensors.rst
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,8 @@ view of a storage and defines numeric operations on it.
.. automethod:: chunk
.. automethod:: clamp
.. automethod:: clamp_
.. automethod:: clip
.. automethod:: clip_
.. automethod:: clone
.. automethod:: contiguous
.. automethod:: copy_
Expand Down
1 change: 1 addition & 0 deletions docs/source/torch.rst
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,7 @@ Pointwise Ops
bitwise_xor
ceil
clamp
clip
conj
cos
cosh
Expand Down
5 changes: 3 additions & 2 deletions test/jit/test_op_normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, op_alias_mappings
from torch.testing._internal.jit_metaprogramming_utils import create_script_fn, create_traced_fn
from torch.testing._internal.common_methods_invocations import method_tests, create_input
from torch.testing._internal.common_methods_invocations import method_tests, create_input, dont_convert

if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
Expand All @@ -26,7 +26,8 @@ def get_defaults(
skipTestIf=(),
output_process_fn=lambda x: x,
kwargs=None):
args = tuple(enumerate(args))
if not isinstance(args, dont_convert):
args = tuple(enumerate(args))
kwargs = kwargs if kwargs else {}
return name, self_size, args, kwargs, output_process_fn

Expand Down
139 changes: 72 additions & 67 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -7034,80 +7034,85 @@ def test_logical_and(self, device):
def test_logical_or(self, device):
self._test_logical(device, 'logical_or', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 1, 1])

# Tests clamp and its alias, clip
def test_clamp(self, device):
m1 = torch.rand(100, device=device).mul(5).add(-2.5) # uniform in [-2.5, 2.5]
# just in case we're extremely lucky.
min_val = -1
max_val = 1
m1[1] = min_val
m1[2] = max_val
op_list = ((torch.clamp, torch.Tensor.clamp, torch.Tensor.clamp_),
(torch.clip, torch.Tensor.clip, torch.Tensor.clip_))
for op, method_op, inplace_op in op_list:

res1 = m1.clone()
res1.clamp_(min_val, max_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = max(min_val, min(max_val, res2[i]))
self.assertEqual(res1, res2)
m1 = torch.rand(100, device=device).mul(5).add(-2.5) # uniform in [-2.5, 2.5]
# just in case we're extremely lucky.
min_val = -1
max_val = 1
m1[1] = min_val
m1[2] = max_val
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what are you trying to achieve here? Even if you are extremely lucky and your input tensor is bound by [min_val, max_val], these assignments are not going to change anything, and output will be equal to input.


out = m1.clone()
torch.clamp(m1, min=min_val, max=max_val, out=out)
self.assertEqual(out, res1)
res1 = m1.clone()
inplace_op(res1, min_val, max_val)
res2 = m1.clone()
for i in iter_indices(res2):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe compare_with_numpy instead of this loop?

res2[i] = max(min_val, min(max_val, res2[i]))
self.assertEqual(res1, res2)

res1 = torch.clamp(m1, min=min_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = max(min_val, res2[i])
self.assertEqual(res1, res2)
out = m1.clone()
op(m1, min=min_val, max=max_val, out=out)
self.assertEqual(out, res1)

torch.clamp(m1, min=min_val, out=out)
self.assertEqual(out, res1)
res1 = op(m1, min=min_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = max(min_val, res2[i])
self.assertEqual(res1, res2)

res1 = torch.clamp(m1, max=max_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = min(max_val, res2[i])
self.assertEqual(res1, res2)
op(m1, min=min_val, out=out)
self.assertEqual(out, res1)

res1 = op(m1, max=max_val)
res2 = m1.clone()
for i in iter_indices(res2):
res2[i] = min(max_val, res2[i])
self.assertEqual(res1, res2)

torch.clamp(m1, max=max_val, out=out)
self.assertEqual(out, res1)

# if the tensor contains nan case
test_tens = torch.tensor([nan], device=device)

res1 = test_tens.clone()
res1.clamp_(min_val, max_val)
res2 = test_tens.clone()
for i in iter_indices(res2):
res2[i] = max(min(res2[i], max_val), min_val)
self.assertEqual(torch.isnan(res1), torch.isnan(res2))

out = test_tens.clone()
torch.clamp(test_tens, min=min_val, max=max_val, out=out)
self.assertEqual(torch.isnan(out), torch.isnan(res1))

res1 = torch.clamp(test_tens, min=min_val)
res2 = test_tens.clone()
for i in iter_indices(res2):
res2[i] = max(res2[i], min_val)
self.assertEqual(torch.isnan(res1), torch.isnan(res2))

torch.clamp(test_tens, min=min_val, out=out)
self.assertEqual(torch.isnan(out), torch.isnan(res1))

res1 = torch.clamp(test_tens, max=max_val)
res2 = test_tens.clone()
for i in iter_indices(res2):
res2[i] = min(res2[i], max_val)
self.assertEqual(torch.isnan(res1), torch.isnan(res2))

torch.clamp(test_tens, max=max_val, out=out)
self.assertEqual(torch.isnan(out), torch.isnan(res1))

error_msg = 'At least one of \'min\' or \'max\' must not be None'
with self.assertRaisesRegex(RuntimeError, error_msg):
m1.clamp()
with self.assertRaisesRegex(RuntimeError, error_msg):
m1.clamp_()
op(m1, max=max_val, out=out)
self.assertEqual(out, res1)

# if the tensor contains nan case
test_tens = torch.tensor([nan], device=device)

res1 = test_tens.clone()
inplace_op(res1, min_val, max_val)
res2 = test_tens.clone()
for i in iter_indices(res2):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this loop not needed, just compare with expected [nan] here

res2[i] = max(min(res2[i], max_val), min_val)
self.assertEqual(torch.isnan(res1), torch.isnan(res2))

out = test_tens.clone()
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

given that you expect out to be equal to test_tens, this is not the best way to initialize out

op(test_tens, min=min_val, max=max_val, out=out)
self.assertEqual(torch.isnan(out), torch.isnan(res1))

res1 = op(test_tens, min=min_val)
res2 = test_tens.clone()
for i in iter_indices(res2):
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

loop is not needed

res2[i] = max(res2[i], min_val)
self.assertEqual(torch.isnan(res1), torch.isnan(res2))

op(test_tens, min=min_val, out=out)
self.assertEqual(torch.isnan(out), torch.isnan(res1))

res1 = op(test_tens, max=max_val)
res2 = test_tens.clone()
for i in iter_indices(res2):
res2[i] = min(res2[i], max_val)
self.assertEqual(torch.isnan(res1), torch.isnan(res2))

op(test_tens, max=max_val, out=out)
self.assertEqual(torch.isnan(out), torch.isnan(res1))

error_msg = 'At least one of \'min\' or \'max\' must not be None'
with self.assertRaisesRegex(RuntimeError, error_msg):
method_op(m1)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't see method_op tested anywhere other than here where it raises an error

with self.assertRaisesRegex(RuntimeError, error_msg):
inplace_op(m1)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is nan propagation in clamp tested anywhere else? Vectorized and non-vectorized paths (could) propagate nan differently, so testing 1-element and 32-element tensors is needed to make sure that everything is ok.


def test_cat_empty_legacy(self, device):
# FIXME: this is legacy behavior and should be removed
Expand Down
12 changes: 12 additions & 0 deletions torch/_tensor_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -787,6 +787,18 @@ def add_docstr_all(method, docstr):
In-place version of :meth:`~Tensor.clamp`
""")

add_docstr_all('clip', r"""
clip(min, max) -> Tensor

Alias for :meth:`~Tensor.clamp`.
""")

add_docstr_all('clip_', r"""
clip_(min, max) -> Tensor

Alias for :meth:`~Tensor.clamp_`.
""")

add_docstr_all('clone',
r"""
clone(memory_format=torch.preserve_format) -> Tensor
Expand Down
9 changes: 7 additions & 2 deletions torch/_torch_docs.py
Original file line number Diff line number Diff line change
Expand Up @@ -1425,8 +1425,7 @@ def merge_dicts(*dicts):
[-0.0889, 0.2122, 0.1412]])
""")

add_docstr(torch.clamp,
r"""
add_docstr(torch.clamp, r"""
clamp(input, min, max, out=None) -> Tensor

Clamp all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]` and return
Expand Down Expand Up @@ -1497,6 +1496,12 @@ def merge_dicts(*dicts):
tensor([ 0.5000, -0.4702, -0.4599, 0.5000])
""".format(**common_args))

add_docstr(torch.clip, r"""
clip(input, min, max, *, out=None) -> Tensor
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why is there "*" here but not in clamp declaration?


Alias for :func:`torch.clamp`.
""".format(**common_args))

add_docstr(torch.conj,
r"""
conj(input, out=None) -> Tensor
Expand Down
3 changes: 2 additions & 1 deletion torch/csrc/jit/passes/normalize_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ namespace {
static const std::unordered_map<Symbol, Symbol> alias_map = {
{aten::absolute, aten::abs},
{aten::absolute_, aten::abs_},
};
{aten::clip, aten::clamp},
{aten::clip_, aten::clamp_}};

void replaceNodeWithNewSymbol(Node* node, Symbol new_symbol) {
WithInsertPoint insert_guard{node};
Expand Down
3 changes: 2 additions & 1 deletion torch/overrides.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ def get_testing_overrides() -> Dict[Callable, Callable]:
torch.cholesky_solve: lambda input1, input2, upper=False, out=None: -1,
torch.chunk: lambda input, chunks, dim=0: -1,
torch.clamp: lambda input, min=None, max=None, out=None: -1,
torch.clip: lambda input, min=None, max=None, out=None: -1,
torch.clamp_min: lambda input, min, out=None: -1,
torch.clamp_max: lambda input, max, out=None: -1,
torch.clone: lambda input: -1,
Expand Down Expand Up @@ -1064,7 +1065,7 @@ def get_overridable_functions() -> Dict[Any, List[Callable]]:
for func_name in ns_funcs:
# ignore private functions or functions that are deleted in torch.__init__
if namespace is not torch.Tensor:
if func_name.startswith('_'):
if func_name.startswith('_'):
continue
elif func_name.endswith('_'):
continue
Expand Down
2 changes: 2 additions & 0 deletions torch/testing/_internal/common_methods_invocations.py
Original file line number Diff line number Diff line change
Expand Up @@ -276,6 +276,8 @@ def method_tests():
('clamp', (), (None, 0.5), 'min_scalar', (True,)),
('clamp', (), (0.5, None), 'max_scalar', (True,)),
('clamp', (S, S), (), 'max_scalar_kwarg', (True,), (), (), ident, {'max': 1}),
('clip', (S, S, S), dont_convert((0, 1)), '', (False,)),
('clip_', (S, S, S), dont_convert((0, 1)), '', (False,)),
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

before your additions of absolute_ and clip_ looks like inplace versions weren't tested here, so maybe they shouldn't be?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The alias test requires an inplace entry.

('sqrt', torch.rand(S, S, S) + 5e-4, NO_ARGS, '', (True,)),
('sqrt', uniform_scalar(5e-4, requires_grad=True), NO_ARGS, 'scalar', (True,)),
('sin', (S, S, S), NO_ARGS, '', (True,)),
Expand Down
2 changes: 2 additions & 0 deletions torch/testing/_internal/jit_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -679,4 +679,6 @@ def attrs_with_prefix(module, prefix):
op_alias_mappings = {
"absolute" : "abs",
"absolute_" : "abs_",
"clip" : "clamp",
"clip_" : "clamp_",
}