We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 0113f21 commit 2072a64Copy full SHA for 2072a64
test/distributed/test_data_parallel.py
@@ -96,7 +96,7 @@ def test_parallel_apply(self):
96
for out, expected in zip(outputs, expected_outputs):
97
self.assertEqual(out, expected)
98
99
-@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
+ @unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
100
def test_parallel_apply_autocast(self):
101
l1 = nn.Linear(10, 5).to("cuda:0", torch.float)
102
l2 = nn.Linear(10, 5).to("cuda:1", torch.float)
0 commit comments