Skip to content

Commit 21ba320

Browse files
smessmerfacebook-github-bot
authored andcommitted
Fix CI (#26250)
Summary: Pull Request resolved: #26250 Exclude some ops from the c10 dispatcher that don't work with it yet. ghstack-source-id: 90138046 Test Plan: waitforsandcastle Reviewed By: zou3519 Differential Revision: D17390117 fbshipit-source-id: a87fb3048aeba2c3293b95d610ddb8e94369f8fe
1 parent a2e5445 commit 21ba320

File tree

4 files changed

+34
-42
lines changed

4 files changed

+34
-42
lines changed

aten/src/ATen/core/OpsAlreadyMovedToC10.cpp

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,6 @@ const std::unordered_set<c10::OperatorName>& aten_ops_already_moved_to_c10() {
1616
{"aten::_cast_Long", ""},
1717
{"aten::_cast_Short", ""},
1818
{"aten::_cast_Half", ""},
19-
#ifdef BUILD_NAMEDTENSOR
20-
{"aten::names_", ""},
21-
#endif
2219
{"aten::_cudnn_ctc_loss", ""},
2320
{"aten::_cudnn_rnn_flatten_weight", ""},
2421
{"aten::_debug_has_internal_overlap", ""},
@@ -239,9 +236,6 @@ const std::unordered_set<c10::OperatorName>& aten_ops_already_moved_to_c10() {
239236
{"aten::hardshrink", ""},
240237
{"aten::hardshrink_backward", ""},
241238
{"aten::rsqrt", ""},
242-
#ifdef BUILD_NAMEDTENSOR
243-
{"aten::select", "Dimname"},
244-
#endif
245239
{"aten::select", "int"},
246240
{"aten::selu", ""},
247241
{"aten::celu", ""},
@@ -251,9 +245,6 @@ const std::unordered_set<c10::OperatorName>& aten_ops_already_moved_to_c10() {
251245
{"aten::sinh", ""},
252246
{"aten::detach", ""},
253247
{"aten::size", "int"},
254-
#ifdef BUILD_NAMEDTENSOR
255-
{"aten::size", "Dimname"},
256-
#endif
257248
{"aten::slice", "Tensor"},
258249
{"aten::slogdet", ""},
259250
{"aten::smm", ""},
@@ -267,9 +258,6 @@ const std::unordered_set<c10::OperatorName>& aten_ops_already_moved_to_c10() {
267258
{"aten::sspaddmm", ""},
268259
{"aten::stack", ""},
269260
{"aten::stride", "int"},
270-
#ifdef BUILD_NAMEDTENSOR
271-
{"aten::stride", "Dimname"},
272-
#endif
273261
{"aten::sum_to_size", ""},
274262
{"aten::sqrt", ""},
275263
{"aten::std", ""},
@@ -281,7 +269,7 @@ const std::unordered_set<c10::OperatorName>& aten_ops_already_moved_to_c10() {
281269
{"aten::threshold", ""},
282270
{"aten::threshold_", ""},
283271
{"aten::threshold_backward", ""},
284-
{"aten::transpose", ""},
272+
{"aten::transpose", "int"},
285273
{"aten::_mkldnn_transpose", ""},
286274
{"aten::transpose_", ""},
287275
{"aten::_mkldnn_transpose_", ""},
@@ -357,7 +345,7 @@ const std::unordered_set<c10::OperatorName>& aten_ops_already_moved_to_c10() {
357345
{"aten::hspmm", ""},
358346
{"aten::copy_sparse_to_sparse_", ""},
359347
{"aten::numel", ""},
360-
{"aten::unbind", ""},
348+
{"aten::unbind", "int"},
361349
{"aten::to_sparse", "sparse_dim"},
362350
{"aten::to_sparse", ""},
363351
{"aten::to_mkldnn", ""},
@@ -602,6 +590,9 @@ const std::unordered_set<c10::OperatorName>& aten_ops_not_moved_to_c10_yet() {
602590
{"aten::backward", ""},
603591
{"aten::set_data", ""},
604592
{"aten::data", ""},
593+
#ifdef BUILD_NAMEDTENSOR
594+
{"aten::names_", ""},
595+
#endif
605596
#ifdef BUILD_NAMEDTENSOR
606597
{"aten::renamed", ""},
607598
#endif
@@ -937,6 +928,9 @@ const std::unordered_set<c10::OperatorName>& aten_ops_not_moved_to_c10_yet() {
937928
{"aten::relu_", ""},
938929
{"aten::rsqrt_", ""},
939930
{"aten::rsqrt", "out"},
931+
#ifdef BUILD_NAMEDTENSOR
932+
{"aten::select", "Dimname"},
933+
#endif
940934
{"aten::selu_", ""},
941935
{"aten::sigmoid_", ""},
942936
{"aten::sigmoid", "out"},
@@ -945,6 +939,9 @@ const std::unordered_set<c10::OperatorName>& aten_ops_not_moved_to_c10_yet() {
945939
{"aten::sinh_", ""},
946940
{"aten::sinh", "out"},
947941
{"aten::detach_", ""},
942+
#ifdef BUILD_NAMEDTENSOR
943+
{"aten::size", "Dimname"},
944+
#endif
948945
{"aten::softmax", ""},
949946
#ifdef BUILD_NAMEDTENSOR
950947
{"aten::softmax", ""},
@@ -960,6 +957,9 @@ const std::unordered_set<c10::OperatorName>& aten_ops_not_moved_to_c10_yet() {
960957
{"aten::sspaddmm", "out"},
961958
{"aten::stack", "out"},
962959
{"aten::stft", ""},
960+
#ifdef BUILD_NAMEDTENSOR
961+
{"aten::stride", "Dimname"},
962+
#endif
963963
{"aten::sum", ""},
964964
{"aten::sum", "dim_IntList"},
965965
#ifdef BUILD_NAMEDTENSOR
@@ -999,7 +999,7 @@ const std::unordered_set<c10::OperatorName>& aten_ops_not_moved_to_c10_yet() {
999999
{"aten::tanh", "out"},
10001000
{"aten::threshold", "out"},
10011001
#ifdef BUILD_NAMEDTENSOR
1002-
{"aten::transpose", ""},
1002+
{"aten::transpose", "Dimname"},
10031003
#endif
10041004
{"aten::roll", ""},
10051005
{"aten::trunc_", ""},
@@ -1063,7 +1063,7 @@ const std::unordered_set<c10::OperatorName>& aten_ops_not_moved_to_c10_yet() {
10631063
{"aten::_sparse_coo_tensor_with_dims_and_tensors", ""},
10641064
{"aten::hspmm", "out"},
10651065
#ifdef BUILD_NAMEDTENSOR
1066-
{"aten::unbind", ""},
1066+
{"aten::unbind", "Dimname"},
10671067
#endif
10681068
{"aten::mkldnn_reorder_conv2d_weight", ""},
10691069
{"aten::quantize_linear", ""},

aten/src/ATen/core/TensorMethods.h

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,8 @@ inline Tensor & Tensor::names_(c10::optional<DimnameList> names) const {
8585
#ifdef USE_STATIC_DISPATCH
8686
return TypeDefault::names_(const_cast<Tensor&>(*this), names);
8787
#else
88-
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchema({"aten::names_", ""}).value();
89-
return c10::Dispatcher::singleton().lookup(op, impl::dispatchTypeId(type_set()))
90-
.callUnboxed<Tensor &, Tensor &, c10::optional<DimnameList>>(const_cast<Tensor&>(*this), names);
88+
static auto table = globalATenDispatch().getOpTable("aten::names_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)");
89+
return table->getOp<Tensor & (Tensor &, c10::optional<DimnameList>)>(type_set())(const_cast<Tensor&>(*this), names);
9190
#endif
9291
}
9392
#endif
@@ -1934,9 +1933,8 @@ inline Tensor Tensor::select(Dimname dim, int64_t index) const {
19341933
#ifdef USE_STATIC_DISPATCH
19351934
return TypeDefault::select(const_cast<Tensor&>(*this), dim, index);
19361935
#else
1937-
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchema({"aten::select", "Dimname"}).value();
1938-
return c10::Dispatcher::singleton().lookup(op, impl::dispatchTypeId(type_set()))
1939-
.callUnboxed<Tensor, const Tensor &, Dimname, int64_t>(const_cast<Tensor&>(*this), dim, index);
1936+
static auto table = globalATenDispatch().getOpTable("aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)");
1937+
return table->getOp<Tensor (const Tensor &, Dimname, int64_t)>(type_set())(const_cast<Tensor&>(*this), dim, index);
19401938
#endif
19411939
}
19421940
#endif
@@ -2055,9 +2053,8 @@ inline int64_t Tensor::size(Dimname dim) const {
20552053
#ifdef USE_STATIC_DISPATCH
20562054
return TypeDefault::size(const_cast<Tensor&>(*this), dim);
20572055
#else
2058-
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchema({"aten::size", "Dimname"}).value();
2059-
return c10::Dispatcher::singleton().lookup(op, impl::dispatchTypeId(type_set()))
2060-
.callUnboxed<int64_t, const Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
2056+
static auto table = globalATenDispatch().getOpTable("aten::size.Dimname(Tensor self, Dimname dim) -> int");
2057+
return table->getOp<int64_t (const Tensor &, Dimname)>(type_set())(const_cast<Tensor&>(*this), dim);
20612058
#endif
20622059
}
20632060
#endif
@@ -2190,9 +2187,8 @@ inline int64_t Tensor::stride(Dimname dim) const {
21902187
#ifdef USE_STATIC_DISPATCH
21912188
return TypeDefault::stride(const_cast<Tensor&>(*this), dim);
21922189
#else
2193-
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchema({"aten::stride", "Dimname"}).value();
2194-
return c10::Dispatcher::singleton().lookup(op, impl::dispatchTypeId(type_set()))
2195-
.callUnboxed<int64_t, const Tensor &, Dimname>(const_cast<Tensor&>(*this), dim);
2190+
static auto table = globalATenDispatch().getOpTable("aten::stride.Dimname(Tensor self, Dimname dim) -> int");
2191+
return table->getOp<int64_t (const Tensor &, Dimname)>(type_set())(const_cast<Tensor&>(*this), dim);
21962192
#endif
21972193
}
21982194
#endif
@@ -2374,7 +2370,7 @@ inline Tensor Tensor::transpose(int64_t dim0, int64_t dim1) const {
23742370
#ifdef USE_STATIC_DISPATCH
23752371
return TypeDefault::transpose(const_cast<Tensor&>(*this), dim0, dim1);
23762372
#else
2377-
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchema({"aten::transpose", ""}).value();
2373+
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchema({"aten::transpose", "int"}).value();
23782374
return c10::Dispatcher::singleton().lookup(op, impl::dispatchTypeId(type_set()))
23792375
.callUnboxed<Tensor, const Tensor &, int64_t, int64_t>(const_cast<Tensor&>(*this), dim0, dim1);
23802376
#endif
@@ -2384,7 +2380,7 @@ inline Tensor Tensor::transpose(Dimname dim0, Dimname dim1) const {
23842380
#ifdef USE_STATIC_DISPATCH
23852381
return TypeDefault::transpose(const_cast<Tensor&>(*this), dim0, dim1);
23862382
#else
2387-
static auto table = globalATenDispatch().getOpTable("aten::transpose(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)");
2383+
static auto table = globalATenDispatch().getOpTable("aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)");
23882384
return table->getOp<Tensor (const Tensor &, Dimname, Dimname)>(type_set())(const_cast<Tensor&>(*this), dim0, dim1);
23892385
#endif
23902386
}
@@ -2965,7 +2961,7 @@ inline std::vector<Tensor> Tensor::unbind(int64_t dim) const {
29652961
#ifdef USE_STATIC_DISPATCH
29662962
return TypeDefault::unbind(const_cast<Tensor&>(*this), dim);
29672963
#else
2968-
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchema({"aten::unbind", ""}).value();
2964+
static c10::OperatorHandle op = c10::Dispatcher::singleton().findSchema({"aten::unbind", "int"}).value();
29692965
return c10::Dispatcher::singleton().lookup(op, impl::dispatchTypeId(type_set()))
29702966
.callUnboxed<std::vector<Tensor>, const Tensor &, int64_t>(const_cast<Tensor&>(*this), dim);
29712967
#endif
@@ -2975,7 +2971,7 @@ inline std::vector<Tensor> Tensor::unbind(Dimname dim) const {
29752971
#ifdef USE_STATIC_DISPATCH
29762972
return TypeDefault::unbind(const_cast<Tensor&>(*this), dim);
29772973
#else
2978-
static auto table = globalATenDispatch().getOpTable("aten::unbind(Tensor(a) self, Dimname dim) -> Tensor(a)[]");
2974+
static auto table = globalATenDispatch().getOpTable("aten::unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[]");
29792975
return table->getOp<std::vector<Tensor> (const Tensor &, Dimname)>(type_set())(const_cast<Tensor&>(*this), dim);
29802976
#endif
29812977
}

aten/src/ATen/native/native_functions.yaml

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@
4747
variants: method
4848

4949
- func: names_(Tensor(a!) self, Dimname[]? names) -> Tensor(a!)
50-
use_c10_dispatcher: True
5150
variants: method
5251
named_guard: False
5352

@@ -2193,7 +2192,6 @@
21932192
CUDA: _rsqrt_out_cuda
21942193

21952194
- func: select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)
2196-
use_c10_dispatcher: True
21972195
variants: function, method
21982196
device_guard: False
21992197
named_guard: False
@@ -2288,7 +2286,6 @@
22882286
named_guard: False
22892287

22902288
- func: size.Dimname(Tensor self, Dimname dim) -> int
2291-
use_c10_dispatcher: True
22922289
variants: function, method
22932290
device_guard: False
22942291
named_guard: False
@@ -2426,7 +2423,6 @@
24262423
named_guard: False
24272424

24282425
- func: stride.Dimname(Tensor self, Dimname dim) -> int
2429-
use_c10_dispatcher: True
24302426
variants: function, method
24312427
device_guard: False
24322428
named_guard: False
@@ -2591,13 +2587,13 @@
25912587
use_c10_dispatcher: True
25922588
variants: function
25932589

2594-
- func: transpose(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
2590+
- func: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
25952591
use_c10_dispatcher: True
25962592
variants: function, method
25972593
device_guard: False
25982594
named_guard: False
25992595

2600-
- func: transpose(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
2596+
- func: transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)
26012597
variants: function, method
26022598
device_guard: False
26032599
named_guard: False
@@ -3371,12 +3367,12 @@
33713367
device_guard: False
33723368
named_guard: False
33733369

3374-
- func: unbind(Tensor(a) self, int dim=0) -> Tensor(a)[]
3370+
- func: unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[]
33753371
use_c10_dispatcher: True
33763372
variants: function, method
33773373
named_guard: False
33783374

3379-
- func: unbind(Tensor(a) self, Dimname dim) -> Tensor(a)[]
3375+
- func: unbind.Dimname(Tensor(a) self, Dimname dim) -> Tensor(a)[]
33803376
variants: function, method
33813377
named_guard: False
33823378

tools/autograd/derivatives.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -849,7 +849,7 @@
849849
- name: trace(Tensor self) -> Tensor
850850
self: trace_backward(grad, self.sizes())
851851

852-
- name: transpose(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
852+
- name: transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)
853853
self: grad.transpose(dim0, dim1)
854854

855855
- name: transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)
@@ -1504,7 +1504,7 @@
15041504
- name: _fft_with_size(Tensor self, int signal_ndim, bool complex_input, bool complex_output, bool inverse, int[] checked_signal_sizes, bool normalized, bool onesided, int[] output_sizes) -> Tensor
15051505
self: fft_backward(self, grad, signal_ndim, complex_input, complex_output, inverse, checked_signal_sizes, normalized, onesided, output_sizes)
15061506

1507-
- name: unbind(Tensor(a) self, int dim=0) -> Tensor(a)[]
1507+
- name: unbind.int(Tensor(a) self, int dim=0) -> Tensor(a)[]
15081508
self: unbind_backward(grads, dim)
15091509

15101510
- name: stack(Tensor[] tensors, int dim=0) -> Tensor

0 commit comments

Comments
 (0)