Skip to content

Commit 005d6ea

Browse files
smessmerfacebook-github-bot
authored andcommitted
Fix overload names (#28182)
Summary: Pull Request resolved: #28182 They haven't been unique. Fixing it... ghstack-source-id: 92436985 Test Plan: waitforsandcastle Differential Revision: D17969010 fbshipit-source-id: 1aacbfb3c18a75ca6743b03cc2eea5fc4d3685c9
1 parent a94bf1d commit 005d6ea

File tree

3 files changed

+31
-22
lines changed

3 files changed

+31
-22
lines changed

aten/src/ATen/native/native_functions.yaml

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@
7070
variants: method
7171
supports_named_tensor: True
7272

73-
- func: align_to(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
73+
- func: align_to.ellipsis_idx(Tensor(a) self, Dimname[] order, int ellipsis_idx) -> Tensor(a)
7474
variants: method
7575
supports_named_tensor: True
7676

@@ -87,11 +87,11 @@
8787
variants: method
8888
supports_named_tensor: True
8989

90-
- func: unflatten(Tensor self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor
90+
- func: unflatten.Dimname(Tensor self, Dimname dim, int[] sizes, Dimname[] names) -> Tensor
9191
variants: method
9292
supports_named_tensor: True
9393

94-
- func: unflatten(Tensor self, int dim, int[] sizes, Dimname[] names) -> Tensor
94+
- func: unflatten.int(Tensor self, int dim, int[] sizes, Dimname[] names) -> Tensor
9595
variants: method
9696
supports_named_tensor: True
9797

@@ -1659,11 +1659,11 @@
16591659
CUDA: logspace_cuda_out
16601660

16611661
# log_softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
1662-
- func: log_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
1662+
- func: log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
16631663
variants: function, method
16641664
supports_named_tensor: True
16651665

1666-
- func: log_softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
1666+
- func: log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
16671667
variants: function, method
16681668
supports_named_tensor: True
16691669

@@ -2554,11 +2554,11 @@
25542554
variants: function, method
25552555

25562556
# softmax allows positional dtype, unlike most operators, because kwonly is BC-breaking when loading jit models.
2557-
- func: softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
2557+
- func: softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor
25582558
variants: function, method
25592559
supports_named_tensor: True
25602560

2561-
- func: softmax(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
2561+
- func: softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor
25622562
variants: function, method
25632563
supports_named_tensor: True
25642564

@@ -4006,45 +4006,45 @@
40064006
- func: index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor
40074007
variants: function, method
40084008

4009-
- func: index_fill_.Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
4009+
- func: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
40104010
use_c10_dispatcher: unboxed_only
40114011
variants: method
40124012
supports_named_tensor: True
40134013
dispatch:
40144014
CPU: legacy::cpu::_th_index_fill_
40154015
CUDA: legacy::cuda::_th_index_fill_
40164016

4017-
- func: index_fill.Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
4017+
- func: index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor
40184018
use_c10_dispatcher: full
40194019
supports_named_tensor: True
40204020
variants: function, method
40214021

4022-
- func: index_fill_.Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
4022+
- func: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
40234023
use_c10_dispatcher: unboxed_only
40244024
variants: method
40254025
dispatch:
40264026
CPU: legacy::cpu::_th_index_fill_
40274027
CUDA: legacy::cuda::_th_index_fill_
40284028
supports_named_tensor: True
40294029

4030-
- func: index_fill.Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
4030+
- func: index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor
40314031
use_c10_dispatcher: full
40324032
variants: function, method
40334033
supports_named_tensor: True
40344034

4035-
- func: index_fill_.dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
4035+
- func: index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)
40364036
variants: method
40374037
supports_named_tensor: True
40384038

4039-
- func: index_fill_.dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
4039+
- func: index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)
40404040
variants: method
40414041
supports_named_tensor: True
40424042

4043-
- func: index_fill.dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
4043+
- func: index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor
40444044
variants: function, method
40454045
supports_named_tensor: True
40464046

4047-
- func: index_fill.dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
4047+
- func: index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor
40484048
variants: function, method
40494049
supports_named_tensor: True
40504050

@@ -6681,7 +6681,7 @@
66816681
CPU: slow_conv_transpose2d_cpu
66826682
CUDA: slow_conv_transpose2d_cuda
66836683

6684-
- func: slow_conv_transpose2d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
6684+
- func: slow_conv_transpose2d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] output_padding, int[2] dilation, Tensor columns, Tensor ones, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
66856685
python_module: nn
66866686
dispatch:
66876687
CPU: slow_conv_transpose2d_backward_out_cpu
@@ -6706,7 +6706,7 @@
67066706
CPU: slow_conv_transpose3d_cpu
67076707
CUDA: slow_conv_transpose3d_cuda
67086708

6709-
- func: slow_conv_transpose3d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
6709+
- func: slow_conv_transpose3d_backward.grad_output(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, int[3] output_padding, int[3] dilation, Tensor finput, Tensor fgrad_input, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
67106710
python_module: nn
67116711
dispatch:
67126712
CPU: slow_conv_transpose3d_backward_out_cpu
@@ -6737,7 +6737,7 @@
67376737
CPU: legacy::cpu::_thnn_conv2d_forward
67386738
CUDA: legacy::cuda::_thnn_conv2d_forward
67396739

6740-
- func: thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
6740+
- func: thnn_conv2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
67416741
python_module: nn
67426742
dispatch:
67436743
CPU: legacy::cpu::_thnn_conv2d_backward_out
@@ -6766,7 +6766,7 @@
67666766
dispatch:
67676767
CUDA: legacy::cuda::_thnn_conv_depthwise2d_forward
67686768

6769-
- func: thnn_conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight) -> (Tensor(a!), Tensor(b!))
6769+
- func: thnn_conv_depthwise2d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[2] kernel_size, int[2] stride, int[2] padding, int[2] dilation, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight) -> (Tensor(a!), Tensor(b!))
67706770
python_module: nn
67716771
dispatch:
67726772
CUDA: legacy::cuda::_thnn_conv_depthwise2d_backward_out
@@ -6793,7 +6793,7 @@
67936793
dispatch:
67946794
CPU: legacy::cpu::_thnn_conv3d_forward
67956795

6796-
- func: thnn_conv3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, *, Tensor?(a!) grad_input, Tensor?(b!) grad_weight, Tensor?(c!) grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
6796+
- func: thnn_conv3d_backward.grad_input(Tensor grad_output, Tensor self, Tensor weight, int[3] kernel_size, int[3] stride, int[3] padding, Tensor finput, Tensor fgrad_input, *, Tensor(a!)? grad_input, Tensor(b!)? grad_weight, Tensor(c!)? grad_bias) -> (Tensor(a!), Tensor(b!), Tensor(c!))
67976797
python_module: nn
67986798
dispatch:
67996799
CPU: legacy::cpu::_thnn_conv3d_backward_out

test/backward_compatibility/check_backward_compatibility.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,15 @@
1313
('q_per_channel_axis', datetime.date(2019, 10, 1)),
1414
('fbgemm_is_cpu_supported', datetime.date(2019, 10, 1)),
1515
('c10_experimental', datetime.date(2020, 1, 1)),
16+
('index_fill', datetime.date(2019, 10, 30)),
17+
('align_to', datetime.date(2019, 10, 30)),
18+
('unflatten', datetime.date(2019, 10, 30)),
19+
('softmax', datetime.date(2019, 10, 30)),
20+
('slow_conv_transpose2d_backward', datetime.date(2019, 10, 30)),
21+
('slow_conv_transpose3d_backward', datetime.date(2019, 10, 30)),
22+
('thnn_conv2d_backward', datetime.date(2019, 10, 30)),
23+
('thnn_conv_depthwise2d_backward', datetime.date(2019, 10, 30)),
24+
('thnn_conv3d_backward', datetime.date(2019, 10, 30))
1625
]
1726

1827

tools/autograd/derivatives.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -402,11 +402,11 @@
402402
source: grad.index_select(dim, index)
403403
index: non_differentiable
404404

405-
- name: index_fill_.Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
405+
- name: index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)
406406
self: grad.clone().index_fill_(dim, index, 0)
407407
index: non_differentiable
408408

409-
- name: index_fill_.Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
409+
- name: index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)
410410
self: grad.clone().index_fill_(dim, index, 0)
411411
value: grad.index_select(dim, index).sum()
412412
index: non_differentiable

0 commit comments

Comments
 (0)