Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .ci/pytorch/multigpu-test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ if [[ "${SHARD_NUMBER:-2}" == "2" ]]; then
time python test/run_test.py --verbose -i distributed/test_functional_api

# DTensor tests
time python test/run_test.py --verbose -i distributed/_tensor/test_random_ops
time python test/run_test.py --verbose -i distributed/_tensor/test_dtensor_compile
time python test/run_test.py --verbose -i distributed/tensor/test_random_ops
time python test/run_test.py --verbose -i distributed/tensor/test_dtensor_compile

# DeviceMesh test
time python test/run_test.py --verbose -i distributed/test_device_mesh
Expand Down
2 changes: 1 addition & 1 deletion .ci/pytorch/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ test_inductor_distributed() {
python test/run_test.py -i inductor/test_aot_inductor.py -k test_non_default_cuda_device --verbose
python test/run_test.py -i inductor/test_aot_inductor.py -k test_replicate_on_devices --verbose
python test/run_test.py -i distributed/test_c10d_functional_native.py --verbose
python test/run_test.py -i distributed/_tensor/test_dtensor_compile.py --verbose
python test/run_test.py -i distributed/tensor/test_dtensor_compile.py --verbose
python test/run_test.py -i distributed/tensor/parallel/test_micro_pipeline_tp.py --verbose
python test/run_test.py -i distributed/_composable/test_replicate_with_compiler.py --verbose
python test/run_test.py -i distributed/_composable/fsdp/test_fully_shard_comm.py --verbose
Expand Down
4 changes: 2 additions & 2 deletions .github/labeler.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@
- torch/fx/experimental/sym_node.py
- torch/fx/experimental/validator.py
- torch/fx/experimental/proxy_tensor.py
- test/distributed/_tensor/test_dtensor_compile.py
- test/distributed/tensor/test_dtensor_compile.py
- test/distributed/tensor/parallel/test_fsdp_2d_parallel.py
- torch/distributed/_tensor/**
- torch/distributed/tensor/**
- torch/distributed/fsdp/**
- torch/csrc/inductor/**
- torch/csrc/dynamo/**
Expand Down
11 changes: 0 additions & 11 deletions test/distributed/_tensor/README.md

This file was deleted.

10 changes: 10 additions & 0 deletions test/distributed/tensor/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
## Run distributed tensor tests:

from root, run (either CPU or GPU)

`pytest test/distributed/tensor/test_dtensor.py`


run specific test cases and print stdout/stderr:

`pytest test/distributed/tensor/test_dtensor.py -s -k test_from_local`
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def wrapped(fn):

# Re-generate this failed list, turn on dry_run of the below func
# check_dtensor_func(self, test, op, dry_run=True), then run sth
# like python test/distributed/_tensor/test_dtensor_ops.py > failed.expect
# like python test/distributed/tensor/test_dtensor_ops.py > failed.expect
dtensor_fails = {
# these sometimes pass and sometimes fail
# we need to remove many of them from list once op
Expand Down
2 changes: 1 addition & 1 deletion test/inductor/test_compiled_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -3629,7 +3629,7 @@ def wrap_test_class(orig_cls):
TestAutogradWithCompiledAutograd = wrap_test_class(test_autograd.TestAutograd)
TestCustomOpWithCompiledAutograd = wrap_test_class(test_custom_ops.TestCustomOp)
if torch.distributed.is_available() and HAS_CUDA:
test_dtensor = load_test_module("distributed/_tensor/test_dtensor_compile")
test_dtensor = load_test_module("distributed/tensor/test_dtensor_compile")
TestDTensorCompileWithCompiledAutograd = wrap_test_class(
test_dtensor.TestDTensorCompile
)
Expand Down
2 changes: 1 addition & 1 deletion test/run_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def __contains__(self, item):
"test_jit_legacy",
"test_cuda_nvml_based_avail",
"test_jit_cuda_fuser",
"distributed/_tensor/test_attention",
"distributed/tensor/test_attention",
]

# whitelist of tests for s390x
Expand Down
2 changes: 1 addition & 1 deletion torch/fx/experimental/symbolic_shapes.py
Original file line number Diff line number Diff line change
Expand Up @@ -837,7 +837,7 @@ def free_symbols(val: IterateExprs) -> OrderedSet[sympy.Symbol]:
return OrderedSet()

# TODO: Apparently, returning an OrderedSet here breaks
# python test/distributed/_tensor/test_dtensor_compile.py TestDTensorCompile.test_dtensor_dynamic
# python test/distributed/tensor/test_dtensor_compile.py TestDTensorCompile.test_dtensor_dynamic
return first_expr.free_symbols.union(*(e.free_symbols for e in itr)) # type: ignore[return-value]


Expand Down
Loading