Skip to content

Commit e2eb0cb

Browse files
Mike Ruberryfacebook-github-bot
authored andcommitted
Adds arccosh alias for acosh and adds an alias consistency test (#43107)
Summary: This adds the torch.arccosh alias and updates alias testing to validate the consistency of the aliased and original operations. The alias testing is also updated to run on CPU and CUDA, which revealed a memory leak when tracing (see #43119). Pull Request resolved: #43107 Reviewed By: ngimel Differential Revision: D23156472 Pulled By: mruberry fbshipit-source-id: 6155fac7954fcc49b95e7c72ed917c85e0eabfcd
1 parent 4ae832e commit e2eb0cb

File tree

13 files changed

+197
-122
lines changed

13 files changed

+197
-122
lines changed

aten/src/ATen/core/aten_interned_strings.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ _(aten, _acos) \
2424
_(aten, _addmv) \
2525
_(aten, _addr) \
2626
_(aten, _arange) \
27-
_(aten, _acosh) \
2827
_(aten, _asinh) \
2928
_(aten, _atanh) \
3029
_(aten, _argmax) \

aten/src/ATen/core/interned_strings.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -143,6 +143,10 @@ namespace c10 {
143143
_(aten, abs_) \
144144
_(aten, absolute) \
145145
_(aten, absolute_) \
146+
_(aten, acosh) \
147+
_(aten, acosh_) \
148+
_(aten, arccosh) \
149+
_(aten, arccosh_) \
146150
_(aten, clamp) \
147151
_(aten, clamp_) \
148152
_(aten, clip) \

aten/src/ATen/native/UnaryOps.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -290,6 +290,11 @@ Tensor& acosh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out
290290
Tensor acosh(const Tensor& self) { return unary_op_impl(self, at::acosh_out); }
291291
Tensor& acosh_(Tensor& self) { return unary_op_impl_(self, at::acosh_out); }
292292

293+
// arccosh, alias for acosh
294+
Tensor& arccosh_out(Tensor& result, const Tensor& self) { return at::acosh_out(result, self); }
295+
Tensor arccosh(const Tensor& self) { return at::acosh(self); }
296+
Tensor& arccosh_(Tensor& self) { return at::acosh_(self); }
297+
293298
Tensor& asinh_out(Tensor& result, const Tensor& self) { return unary_op_impl_out(result, self, asinh_stub); }
294299
Tensor asinh(const Tensor& self) { return unary_op_impl(self, at::asinh_out); }
295300
Tensor& asinh_(Tensor& self) { return unary_op_impl_(self, at::asinh_out); }

aten/src/ATen/native/native_functions.yaml

Lines changed: 15 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -245,9 +245,7 @@
245245
# adding an alias in a namespace.)
246246
# 5) Update torch/overrides.py consistent with the original function.
247247
# 6) Update the alias_map in torch/csrc/jit/passes/normalize_ops.cpp.
248-
# 8) Add entries to test/test_op_normalization.py's "alias_infos"
249-
# 9) Add a test or modify an existing test to validate that the alias's behavior
250-
# is the same as the original function's.
248+
# 8) Add entries to test/test_op_aliases.py's "alias_infos"
251249
#
252250
# See torch.absolute, an alias for torch.abs, as an example.
253251

@@ -471,6 +469,20 @@
471469
- func: acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
472470
supports_named_tensor: True
473471

472+
# arcosh, alias for acosh
473+
- func: arccosh(Tensor self) -> Tensor
474+
use_c10_dispatcher: full
475+
supports_named_tensor: True
476+
variants: function, method
477+
478+
- func: arccosh_(Tensor(a!) self) -> Tensor(a!)
479+
use_c10_dispatcher: full
480+
supports_named_tensor: True
481+
variants: function, method
482+
483+
- func: arccosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
484+
supports_named_tensor: True
485+
474486
- func: asinh(Tensor self) -> Tensor
475487
use_c10_dispatcher: full
476488
supports_named_tensor: True

docs/source/tensors.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -246,6 +246,8 @@ view of a storage and defines numeric operations on it.
246246
.. automethod:: count_nonzero
247247
.. automethod:: acosh
248248
.. automethod:: acosh_
249+
.. automethod:: arccosh
250+
.. automethod:: arccosh_
249251
.. automethod:: cpu
250252
.. automethod:: cross
251253
.. automethod:: cuda

docs/source/torch.rst

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,7 @@ Pointwise Ops
251251
absolute
252252
acos
253253
acosh
254+
arccosh
254255
add
255256
addcdiv
256257
addcmul

test/run_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@
7272
'test_type_promotion',
7373
'test_jit_disabled',
7474
'test_function_schema',
75-
'test_op_normalization.py',
75+
'test_op_aliases.py',
7676
'test_overrides',
7777
'test_jit_fuser_te',
7878
'test_tensorexpr',

test/test_op_aliases.py

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
import torch
2+
from torch.testing import FileCheck
3+
4+
from torch.testing._internal.common_utils import \
5+
(run_tests)
6+
from torch.testing._internal.jit_utils import JitTestCase
7+
from torch.testing._internal.common_device_type import \
8+
(instantiate_device_type_tests, skipCPUIfNoLapack, skipCUDAIfNoMagma, onlyCPU)
9+
10+
# Information for generating an alias test
11+
# NOTE: ending the alias_name with an underscore will interpret the test
12+
# as the test for an inplace method of that name
13+
class AliasInfo(object):
14+
__slots__ = ['alias_name', 'alias_op', 'original_name', 'original_op',
15+
'get_input', 'get_args', 'decorators']
16+
17+
def __init__(self,
18+
alias_name, # the name of the alias
19+
alias_op, # the aliased op
20+
original_name, # the name of the original function
21+
original_op, # the original op
22+
get_input, # callable (device)->tensor that returns the first tensor argument
23+
*,
24+
get_args=lambda d: (), # callable (device)->tuple that returns additional positional arguments
25+
decorators=()): # decorators to apply to the test
26+
self.alias_name = alias_name
27+
self.alias_op = alias_op
28+
self.original_name = original_name
29+
self.original_op = original_op
30+
self.get_input = get_input
31+
self.get_args = get_args
32+
self.decorators = decorators
33+
34+
alias_infos = (
35+
AliasInfo('absolute', torch.absolute, 'abs', torch.abs,
36+
lambda d: torch.randn(20, device=d)),
37+
AliasInfo('absolute_', torch.Tensor.absolute_, 'abs_', torch.Tensor.abs_,
38+
lambda d: torch.randn(20, device=d)),
39+
AliasInfo('clip', torch.clip, 'clamp', torch.clamp,
40+
lambda d: torch.randn(20, device=d), get_args=lambda d: (.4, .6)),
41+
AliasInfo('clip_', torch.Tensor.clip_, 'clamp_', torch.Tensor.clamp_,
42+
lambda d: torch.randn(20, device=d), get_args=lambda d: (.4, .6)),
43+
AliasInfo('linalg.det', torch.linalg.det, 'det', torch.det,
44+
lambda d: torch.randn(10, 10, device=d),
45+
decorators=(skipCPUIfNoLapack, skipCUDAIfNoMagma)),
46+
# NOTE: only runs on CPU because it leaks CUDA memory
47+
# (see https://github.com/pytorch/pytorch/issues/43119)
48+
AliasInfo('outer', torch.outer, 'ger', torch.ger,
49+
lambda d: torch.randn(20, device=d), get_args=lambda d: (torch.randn(20, device=d),),
50+
decorators=(onlyCPU,)),
51+
AliasInfo('arccosh', torch.arccosh, 'acosh', torch.acosh,
52+
lambda d: torch.randn(20, device=d) + 2),
53+
AliasInfo('arccosh_', torch.Tensor.arccosh_, 'acosh_', torch.Tensor.acosh_,
54+
lambda d: torch.randn(20, device=d) + 2),
55+
)
56+
57+
# Placeholder test class for validating that aliases are correctly
58+
# translated when scripted and traced
59+
class TestOpNormalization(JitTestCase):
60+
pass
61+
62+
# Generates alias tests and adds them to the specified class (cls)
63+
def create_alias_tests(cls):
64+
for info in alias_infos:
65+
66+
# Tests that the JIT remaps aliases to their original ops
67+
def _test_jit_op_alias_normalization(self, device, info=info):
68+
tensor = torch.tensor
69+
op = info.alias_op
70+
is_inplace = info.alias_name.endswith('_')
71+
72+
# Checks that scripting converts aliases
73+
# NOTE: the code to test scripting must be generated since
74+
# scripting does not support splatting args or directly
75+
# calling torch.Tensor methods. The following
76+
# splats args after the first tensor by inlining them as constants.
77+
if is_inplace:
78+
fn_template = '''
79+
def _fn(t):
80+
return t.{alias_name}({args})
81+
'''
82+
arg_string = ', '.join((str(arg) for arg in info.get_args(device)))
83+
script = fn_template.format(alias_name=info.alias_name, args=arg_string)
84+
else:
85+
fn_template = '''
86+
def _fn(t):
87+
return op(t{args})
88+
'''
89+
arg_string = ", " + ', '.join((str(arg) for arg in info.get_args(device)))
90+
script = fn_template.format(args=arg_string)
91+
92+
# Compiles script
93+
scripted = torch.jit.CompilationUnit(script)._fn
94+
95+
# Acquires and checks the graph remaps the alias
96+
inp = info.get_input(device)
97+
scripted(inp.clone())
98+
graph = scripted.graph_for(inp.clone())
99+
FileCheck().check(info.original_name).check_not(info.alias_name).run(graph)
100+
101+
# Checks that tracing converts aliases
102+
# NOTE: tracing has no problem splatting args
103+
args = info.get_args(device)
104+
105+
def _fn(t, info=info, args=args):
106+
return info.alias_op(t, *args)
107+
108+
traced = torch.jit.trace(_fn, (inp.clone(),))
109+
traced(inp.clone())
110+
graph = traced.graph_for(inp.clone())
111+
FileCheck().check(info.original_name).check_not(info.alias_name).run(graph)
112+
113+
# Applies decorators
114+
for decorator in info.decorators:
115+
_test_jit_op_alias_normalization = decorator(_test_jit_op_alias_normalization)
116+
117+
test_name = "test_jit_op_alias_normalization_" + info.alias_name
118+
setattr(cls, test_name, _test_jit_op_alias_normalization)
119+
120+
# Tests that the alias functions perform the same operation as the original
121+
def _test_alias_computation(self, device, info=info):
122+
alias_op = info.alias_op
123+
original_op = info.original_op
124+
125+
inp = info.get_input(device)
126+
args = info.get_args(device)
127+
alias_result = alias_op(inp.clone(), *args)
128+
original_result = alias_op(inp.clone(), *args)
129+
130+
self.assertEqual(alias_result, original_result, atol=0, rtol=0)
131+
132+
# Applies decorators
133+
for decorator in info.decorators:
134+
_test_alias_computation = decorator(_test_alias_computation)
135+
136+
test_name = "test_alias_computation_" + info.alias_name
137+
setattr(cls, test_name, _test_alias_computation)
138+
139+
140+
create_alias_tests(TestOpNormalization)
141+
instantiate_device_type_tests(TestOpNormalization, globals())
142+
143+
if __name__ == '__main__':
144+
run_tests()

test/test_op_normalization.py

Lines changed: 0 additions & 111 deletions
This file was deleted.

torch/_tensor_docs.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -216,6 +216,18 @@ def add_docstr_all(method, docstr):
216216
In-place version of :meth:`~Tensor.acosh`
217217
""")
218218

219+
add_docstr_all('arccosh', r"""
220+
acosh() -> Tensor
221+
222+
See :func:`torch.arccosh`
223+
""")
224+
225+
add_docstr_all('arccosh_', r"""
226+
acosh_() -> Tensor
227+
228+
In-place version of :meth:`~Tensor.arccosh`
229+
""")
230+
219231
add_docstr_all('add',
220232
r"""
221233
add(other, *, alpha=1) -> Tensor

0 commit comments

Comments
 (0)