Skip to content

Commit fe69a2b

Browse files
janeyx99pytorchmergebot
authored andcommitted
Move from/to to torch::stable::detail (#164956)
To not pollute the global namespace, we should move the `from`/`to` APIs into torch::stable::detail. We are also following our normal deprecation cycle and choosing to continue exposing the global `from`/`to` for the time being as people who onboard their extensions onto 2.9 would not be able to build with 2.10 otherwise. Note that this means that within libtorch, we do not get the luxury of tacking on a `using torch::stable::detail::from` because then it leads to build time ambiguous calls --> both the global and namespace APIs are exposed, which one do I want? So that is why you see every local site is updated. Note that the update is _not_ necessary from a custom op writer point of view. FA3 can continue to build on torch nightlies without changing any code. (Since this is a header change, this PR has no implication on runtime, a previously built FA3 ABI stable wheel will continue to work fine with newer torch versions after this PR.) Once TORCH_BOX lands, we would be free to remove these global APIs when the deprecation cycle is up (April 2026) and encourage people to use TORCH_BOX and avoid from/to entirely. Pull Request resolved: #164956 Approved by: https://github.com/malfet ghstack dependencies: #164882
1 parent 0be0de4 commit fe69a2b

File tree

5 files changed

+107
-60
lines changed

5 files changed

+107
-60
lines changed

test/test_cpp_extensions_jit.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1240,18 +1240,18 @@ def test_aoti_torch_call_dispatcher(self):
12401240
at::Tensor my_abs(at::Tensor x) {
12411241
StableIValue stack[1];
12421242
RAIIATH raii(torch::aot_inductor::new_tensor_handle(std::move(x)));
1243-
stack[0] = from(raii.release());
1243+
stack[0] = torch::stable::detail::from(raii.release());
12441244
aoti_torch_call_dispatcher("aten::abs", "", stack);
1245-
RAIIATH res(to<AtenTensorHandle>(stack[0]));
1245+
RAIIATH res(torch::stable::detail::to<AtenTensorHandle>(stack[0]));
12461246
return *reinterpret_cast<at::Tensor*>(res.release());
12471247
}
12481248
12491249
at::Tensor my_floor(at::Tensor x) {
12501250
StableIValue stack[1];
12511251
RAIIATH raii(torch::aot_inductor::new_tensor_handle(std::move(x)));
1252-
stack[0] = from(raii.release());
1252+
stack[0] = torch::stable::detail::from(raii.release());
12531253
aoti_torch_call_dispatcher("aten::floor", "", stack);
1254-
RAIIATH res(to<AtenTensorHandle>(stack[0]));
1254+
RAIIATH res(torch::stable::detail::to<AtenTensorHandle>(stack[0]));
12551255
return *reinterpret_cast<at::Tensor*>(res.release());
12561256
}
12571257
"""

torch/csrc/inductor/aoti_torch/shim_common.cpp

Lines changed: 30 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1413,28 +1413,28 @@ static StableIValue from_ivalue(
14131413
case c10::TypeKind::TensorType: {
14141414
AtenTensorHandle ath = torch::aot_inductor::new_tensor_handle(
14151415
std::move(const_cast<at::Tensor&>(ivalue.toTensor())));
1416-
return from(ath);
1416+
return torch::stable::detail::from(ath);
14171417
}
14181418
case c10::TypeKind::IntType: {
1419-
return from(ivalue.toInt());
1419+
return torch::stable::detail::from(ivalue.toInt());
14201420
}
14211421
case c10::TypeKind::FloatType: {
1422-
return from(ivalue.toDouble());
1422+
return torch::stable::detail::from(ivalue.toDouble());
14231423
}
14241424
case c10::TypeKind::BoolType: {
1425-
return from(ivalue.toBool());
1425+
return torch::stable::detail::from(ivalue.toBool());
14261426
}
14271427
case c10::TypeKind::ScalarTypeType: {
1428-
return from(ivalue.toScalarType());
1428+
return torch::stable::detail::from(ivalue.toScalarType());
14291429
}
14301430
case c10::TypeKind::DeviceObjType: {
1431-
return from(ivalue.toDevice());
1431+
return torch::stable::detail::from(ivalue.toDevice());
14321432
}
14331433
case c10::TypeKind::LayoutType: {
1434-
return from(ivalue.toLayout());
1434+
return torch::stable::detail::from(ivalue.toLayout());
14351435
}
14361436
case c10::TypeKind::MemoryFormatType: {
1437-
return from(ivalue.toMemoryFormat());
1437+
return torch::stable::detail::from(ivalue.toMemoryFormat());
14381438
}
14391439
case c10::TypeKind::OptionalType: {
14401440
auto inner_type = type->castRaw<at::OptionalType>()->getElementType();
@@ -1444,17 +1444,18 @@ static StableIValue from_ivalue(
14441444
// able to follow the patterned semantic of every other case here in one
14451445
// line:
14461446
//
1447-
// return from<std::optional<inner_type::t>>(ivalue.toInnerTypeT()));
1447+
// return
1448+
// torch::stable::detail::from<std::optional<inner_type::t>>(ivalue.toInnerTypeT()));
14481449
//
14491450
// BUT we do NOT have that type inner_type::t readily available, so we
14501451
// will manually unwrap and recursively call. This implementation MUST
1451-
// be kept in sync with from<std::optional<T>> function in
1452-
// torch/csrc/stable/library.h
1452+
// be kept in sync with torch::stable::detail::from<std::optional<T>>
1453+
// function in torch/csrc/stable/stableivalue_conversions.h
14531454
if (ivalue.isNone()) {
1454-
return from(std::nullopt);
1455+
return torch::stable::detail::from(std::nullopt);
14551456
}
14561457
StableIValue* sivp = new StableIValue(from_ivalue(inner_type, ivalue));
1457-
return from(sivp);
1458+
return torch::stable::detail::from(sivp);
14581459
}
14591460
default: {
14601461
TORCH_CHECK(
@@ -1471,30 +1472,32 @@ static c10::IValue to_ivalue(
14711472
switch (type->kind()) {
14721473
case c10::TypeKind::TensorType: {
14731474
auto ret_raiiath = torch::aot_inductor::RAIIAtenTensorHandle(
1474-
to<AtenTensorHandle>(stable_ivalue));
1475+
torch::stable::detail::to<AtenTensorHandle>(stable_ivalue));
14751476
return (c10::IValue(*torch::aot_inductor::tensor_handle_to_tensor_pointer(
14761477
ret_raiiath.get())));
14771478
}
14781479
case c10::TypeKind::IntType: {
1479-
return c10::IValue(to<int64_t>(stable_ivalue));
1480+
return c10::IValue(torch::stable::detail::to<int64_t>(stable_ivalue));
14801481
}
14811482
case c10::TypeKind::FloatType: {
1482-
return c10::IValue(to<double>(stable_ivalue));
1483+
return c10::IValue(torch::stable::detail::to<double>(stable_ivalue));
14831484
}
14841485
case c10::TypeKind::BoolType: {
1485-
return c10::IValue(to<bool>(stable_ivalue));
1486+
return c10::IValue(torch::stable::detail::to<bool>(stable_ivalue));
14861487
}
14871488
case c10::TypeKind::ScalarTypeType: {
1488-
return c10::IValue(to<c10::ScalarType>(stable_ivalue));
1489+
return c10::IValue(
1490+
torch::stable::detail::to<c10::ScalarType>(stable_ivalue));
14891491
}
14901492
case c10::TypeKind::DeviceObjType: {
1491-
return c10::IValue(to<c10::Device>(stable_ivalue));
1493+
return c10::IValue(torch::stable::detail::to<c10::Device>(stable_ivalue));
14921494
}
14931495
case c10::TypeKind::LayoutType: {
1494-
return c10::IValue(to<c10::Layout>(stable_ivalue));
1496+
return c10::IValue(torch::stable::detail::to<c10::Layout>(stable_ivalue));
14951497
}
14961498
case c10::TypeKind::MemoryFormatType: {
1497-
return c10::IValue(to<c10::MemoryFormat>(stable_ivalue));
1499+
return c10::IValue(
1500+
torch::stable::detail::to<c10::MemoryFormat>(stable_ivalue));
14981501
}
14991502
case c10::TypeKind::OptionalType: {
15001503
auto inner_type = type->castRaw<at::OptionalType>()->getElementType();
@@ -1504,16 +1507,17 @@ static c10::IValue to_ivalue(
15041507
// able to follow the patterned semantic of every other case here in one
15051508
// line:
15061509
//
1507-
// return c10::IValue(to<std::optional<inner_type::t>>(stable_ivalue));
1510+
// return
1511+
// c10::IValue(torch::stable::detail::to<std::optional<inner_type::t>>(stable_ivalue));
15081512
//
15091513
// BUT we do NOT have that type inner_type::t readily available, so we
15101514
// will manually unwrap and recursively call. This implementation MUST
1511-
// be kept in sync with the to<T> function in
1512-
// torch/csrc/stable/library.h
1513-
if (stable_ivalue == from(std::nullopt)) {
1515+
// be kept in sync with the torch::stable::detail::to<T> function in
1516+
// torch/csrc/stable/stableivalue_conversions.h
1517+
if (stable_ivalue == torch::stable::detail::from(std::nullopt)) {
15141518
return c10::IValue();
15151519
}
1516-
auto sivp = to<StableIValue*>(stable_ivalue);
1520+
auto sivp = torch::stable::detail::to<StableIValue*>(stable_ivalue);
15171521
auto ival = to_ivalue(inner_type, *sivp);
15181522
delete sivp;
15191523
return ival;

torch/csrc/stable/ops.h

Lines changed: 26 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,15 @@ namespace torch::stable {
1818
inline torch::stable::Tensor empty_like(const torch::stable::Tensor& self) {
1919
const auto num_args = 6;
2020
std::array<StableIValue, num_args> stack{
21-
from(self),
22-
from(std::nullopt),
23-
from(std::nullopt),
24-
from(std::nullopt),
25-
from(std::nullopt),
26-
from(std::nullopt)};
21+
torch::stable::detail::from(self),
22+
torch::stable::detail::from(std::nullopt),
23+
torch::stable::detail::from(std::nullopt),
24+
torch::stable::detail::from(std::nullopt),
25+
torch::stable::detail::from(std::nullopt),
26+
torch::stable::detail::from(std::nullopt)};
2727
TORCH_ERROR_CODE_CHECK(
2828
aoti_torch_call_dispatcher("aten::empty_like", "", stack.data()));
29-
return to<torch::stable::Tensor>(stack[0]);
29+
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
3030
}
3131

3232
// We expect this to be the stable version of the fill_.Scalar op
@@ -71,7 +71,8 @@ inline torch::stable::Tensor new_empty(
7171

7272
int32_t target_dtype;
7373
if (dtype.has_value()) {
74-
target_dtype = to<int32_t>(from(dtype.value()));
74+
target_dtype = torch::stable::detail::to<int32_t>(
75+
torch::stable::detail::from(dtype.value()));
7576
} else {
7677
TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(self.get(), &target_dtype));
7778
}
@@ -109,7 +110,8 @@ inline torch::stable::Tensor new_zeros(
109110

110111
int32_t target_dtype;
111112
if (dtype.has_value()) {
112-
target_dtype = to<int32_t>(from(dtype.value()));
113+
target_dtype = torch::stable::detail::to<int32_t>(
114+
torch::stable::detail::from(dtype.value()));
113115
} else {
114116
TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(self.get(), &target_dtype));
115117
}
@@ -194,21 +196,24 @@ inline torch::stable::Tensor transpose(
194196
int64_t dim0,
195197
int64_t dim1) {
196198
const auto num_args = 3;
197-
std::array<StableIValue, num_args> stack{from(self), from(dim0), from(dim1)};
199+
std::array<StableIValue, num_args> stack{
200+
torch::stable::detail::from(self),
201+
torch::stable::detail::from(dim0),
202+
torch::stable::detail::from(dim1)};
198203
TORCH_ERROR_CODE_CHECK(
199204
aoti_torch_call_dispatcher("aten::transpose", "int", stack.data()));
200-
return to<torch::stable::Tensor>(stack[0]);
205+
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
201206
}
202207

203208
// We expect this to be the stable version of the zero_ op with identical
204209
// semantics to the existing zero_ op (except that it will not be called as
205210
// a tensor method but only as a function i.e. zero_(t) not t.zero_()).
206211
inline torch::stable::Tensor zero_(torch::stable::Tensor& self) {
207212
const auto num_args = 1;
208-
std::array<StableIValue, num_args> stack{from(self)};
213+
std::array<StableIValue, num_args> stack{torch::stable::detail::from(self)};
209214
TORCH_ERROR_CODE_CHECK(
210215
aoti_torch_call_dispatcher("aten::zero_", "", stack.data()));
211-
return to<torch::stable::Tensor>(stack[0]);
216+
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
212217
}
213218

214219
// We expect this to be the stable version of the copy_ op with
@@ -219,20 +224,24 @@ inline torch::stable::Tensor copy_(
219224
std::optional<bool> non_blocking = std::nullopt) {
220225
const auto num_args = 3;
221226
std::array<StableIValue, num_args> stack{
222-
from(self), from(src), from(non_blocking.value_or(false))};
227+
torch::stable::detail::from(self),
228+
torch::stable::detail::from(src),
229+
torch::stable::detail::from(non_blocking.value_or(false))};
223230
TORCH_ERROR_CODE_CHECK(
224231
aoti_torch_call_dispatcher("aten::copy_", "", stack.data()));
225-
return to<torch::stable::Tensor>(stack[0]);
232+
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
226233
}
227234

228235
// We expect this to be the stable version of the clone op. We will
229236
// add optional memory_format kwarg support in the future.
230237
inline torch::stable::Tensor clone(const torch::stable::Tensor& self) {
231238
const auto num_args = 2;
232-
std::array<StableIValue, num_args> stack{from(self), from(std::nullopt)};
239+
std::array<StableIValue, num_args> stack{
240+
torch::stable::detail::from(self),
241+
torch::stable::detail::from(std::nullopt)};
233242
TORCH_ERROR_CODE_CHECK(
234243
aoti_torch_call_dispatcher("aten::clone", "", stack.data()));
235-
return to<torch::stable::Tensor>(stack[0]);
244+
return torch::stable::detail::to<torch::stable::Tensor>(stack[0]);
236245
}
237246

238247
} // namespace torch::stable

torch/csrc/stable/stableivalue_conversions.h

Lines changed: 45 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99

1010
#include <optional>
1111

12+
namespace torch::stable::detail {
13+
1214
// forward declare so that the from/to() implementations in the detail
1315
// namespace of library.h where the real work is done can compile.
1416
template <typename T>
@@ -17,15 +19,8 @@ template <typename T>
1719
T to(StableIValue val);
1820

1921
// =============================================================================
20-
// helpers for converting between StableIValue and T
22+
// Below are the helpers for converting between StableIValue and T
2123
// =============================================================================
22-
23-
// note that the signatures for from and to are forward declared in
24-
// stable/stableivalue_conversions.h but defined below to avoid circular
25-
// dependencies where other headers (like tensor-inl.h) will need to/from.
26-
27-
namespace detail {
28-
2924
// =============================================================================
3025
// FROM CONVERSIONS (T -> StableIValue)
3126
// =============================================================================
@@ -314,7 +309,9 @@ struct ToImpl<torch::stable::Tensor> {
314309
}
315310
};
316311

317-
} // namespace detail
312+
// =============================================================================
313+
// end to helpers for converting between StableIValue and T
314+
// =============================================================================
318315

319316
// Expose the partially templated class functions through single functions
320317
template <typename T>
@@ -338,6 +335,42 @@ inline T to(StableIValue val) {
338335
return detail::ToImpl<T>::call(val);
339336
}
340337

341-
// =============================================================================
342-
// end to helpers for converting between StableIValue and T
343-
// =============================================================================
338+
} // namespace torch::stable::detail
339+
340+
// [global from/to deprecation note]
341+
// WARNING! the following APIs will be removed!! We deprecated global from/to
342+
// (in 2.10) in favor of torch::stable::detail from/to to not pollute the global
343+
// namespace. We are only including the following wrappers for backwards
344+
// compatibility.
345+
346+
// WARNING! Will be removed. Only exists for BC. See [global from/to deprecation
347+
// note]
348+
template <typename T>
349+
[[deprecated("Use torch::stable::detail::from instead.")]]
350+
inline StableIValue from(T val) {
351+
return torch::stable::detail::from(val);
352+
}
353+
354+
// WARNING! Will be removed. Only exists for BC. See [global from/to deprecation
355+
// note]
356+
template <typename T>
357+
[[deprecated("Use torch::stable::detail::from instead.")]]
358+
inline StableIValue from(const std::optional<T>& val) {
359+
return torch::stable::detail::from(val);
360+
}
361+
362+
// WARNING! Will be removed. Only exists for BC. See [global from/to deprecation
363+
// note]
364+
[[deprecated(
365+
"Use torch::stable::detail::from instead.")]] [[maybe_unused]] inline StableIValue
366+
from(const torch::stable::Tensor& val) {
367+
return torch::stable::detail::from(val);
368+
}
369+
370+
// WARNING! Will be removed. Only exists for BC. See [global from/to deprecation
371+
// note]
372+
template <typename T>
373+
[[deprecated("Use torch::stable::detail::to instead.")]]
374+
inline T to(StableIValue val) {
375+
return torch::stable::detail::to<T>(val);
376+
}

torch/csrc/stable/tensor_inl.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@ using torch::headeronly::ScalarType;
1717
inline ScalarType Tensor::scalar_type() const {
1818
int32_t dtype;
1919
TORCH_ERROR_CODE_CHECK(aoti_torch_get_dtype(ath_.get(), &dtype));
20-
return to<ScalarType>(from(dtype));
20+
return torch::stable::detail::to<ScalarType>(
21+
torch::stable::detail::from(dtype));
2122
}
2223

2324
} // namespace torch::stable

0 commit comments

Comments
 (0)