Skip to content

Commit 41adec3

Browse files
Revert "Switch to native functional collective by default (#120370)"
This reverts commit 1f1bc0e. Reverted #120370 on behalf of https://github.com/yifuwang due to broke CI ([comment](#120370 (comment)))
1 parent 7b1cc14 commit 41adec3

File tree

2 files changed

+7
-21
lines changed

2 files changed

+7
-21
lines changed

test/distributed/_tensor/experimental/test_tp_transform.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -92,8 +92,8 @@ def test_tp_transform_with_uncovered_op(self):
9292
self.assert_has_c10d_ops(
9393
tp_exported_program.graph_module,
9494
{
95-
"_c10d_functional.all_gather_into_tensor.default": 1,
96-
"_c10d_functional.wait_tensor.default": 1,
95+
"c10d_functional.all_gather_into_tensor.default": 1,
96+
"c10d_functional.wait_tensor.default": 1,
9797
},
9898
)
9999

@@ -131,8 +131,8 @@ def test_tp_transform_e2e(self):
131131
self.assert_has_c10d_ops(
132132
tp_exported_program.graph_module,
133133
{
134-
"_c10d_functional.all_reduce.default": 2,
135-
"_c10d_functional.wait_tensor.default": 2,
134+
"c10d_functional.all_reduce.default": 2,
135+
"c10d_functional.wait_tensor.default": 2,
136136
},
137137
)
138138

@@ -167,8 +167,8 @@ def test_tp_transform_no_bias(self):
167167
self.assert_has_c10d_ops(
168168
tp_exported_program.graph_module,
169169
{
170-
"_c10d_functional.all_reduce.default": 1,
171-
"_c10d_functional.wait_tensor.default": 1,
170+
"c10d_functional.all_reduce.default": 1,
171+
"c10d_functional.wait_tensor.default": 1,
172172
},
173173
)
174174

torch/distributed/_functional_collectives_impl.py

Lines changed: 1 addition & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
import torch
88
import torch.distributed as dist
99
import torch.distributed.distributed_c10d as c10d
10-
from torch._dynamo import assume_constant_result
1110

1211
"""
1312
Moved eager kernel implementations to a separate file partly for readability and partly as it is currently
@@ -25,23 +24,10 @@
2524
2625
"""
2726

28-
_use_native_funcol: Optional[bool] = None
27+
_use_native_funcol = "_USE_NATIVE_C10D_FUNCTIONAL" in os.environ
2928

3029

31-
@assume_constant_result
3230
def native_funcol_enabled():
33-
global _use_native_funcol
34-
if _use_native_funcol is None:
35-
try:
36-
# Disable native funcol when torch_xla is installed. This check
37-
# will be removed once torch_xla adopts the native_funcol IR.
38-
import torch_xla # noqa: F401
39-
40-
_use_native_funcol = False
41-
except Exception:
42-
# When TORCH_DISABLE_NATIVE_FUNCOL is set, fallback to py funcol
43-
_use_native_funcol = os.environ.get("TORCH_DISABLE_NATIVE_FUNCOL") != "1"
44-
4531
return _use_native_funcol
4632

4733

0 commit comments

Comments
 (0)