Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion aten/src/ATen/native/cuda/group_norm_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ void GroupNormBackwardKernelImplInternal(
ComputeInternalGradientsCUDAKernel<T>
<<<N * C, cuda_utils::kCUDABlockReduceNumThreads, 0, cuda_stream>>>(
HxW, dY_data, X_data, ds_data, db_data);
if (dX != nullptr) {
if (dX_data != nullptr) {
Tensor c1 = at::empty({N, C}, X.options().dtype(kAccType));
Tensor c2 = at::empty({N, G}, X.options().dtype(kAccType));
Tensor c3 = at::empty({N, G}, X.options().dtype(kAccType));
Expand Down Expand Up @@ -525,6 +525,7 @@ void GroupNormBackwardKernelImplInternal(
<<<N * C, kCUDANumThreads, 0, cuda_stream>>>(
C, HxW, G, dY_data, X_data, c1_data, c2_data, c3_data, dX_data);
}
AT_CUDA_CHECK(cudaGetLastError());
}
if (dgamma->defined() || dbeta->defined()) {
T* dgamma_data = dgamma->defined() ? dgamma->data_ptr<T>() : nullptr;
Expand Down
19 changes: 14 additions & 5 deletions test/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -9199,8 +9199,10 @@ def _test_GroupNorm_general(self, device, dtype=torch.float):
(2, 6, 4, 2, 2): 3,
(1, 256, 1, 1): 32,
}
for shape, g in good_shape_g.items():
for shape_g, grad in product(good_shape_g.items(), [True, False]):
shape, g = shape_g
x = torch.empty(*shape, device=device, dtype=dtype).uniform_(0, 10)
x.requires_grad_(grad)
b = shape[0]
c = shape[1]

Expand All @@ -9212,8 +9214,13 @@ def _test_GroupNorm_general(self, device, dtype=torch.float):
out_reshaped = output.view(b, g, -1)
mean = out_reshaped.mean(-1)
var = out_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-5, rtol=0)
# TODO: fix numerical issue. See #44863
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)

output.backward(torch.randn_like(output))
if output.is_cuda:
torch.cuda.synchronize()

# test that GN applies weight and bias correctly
scale = torch.empty(c, device=device, dtype=dtype).uniform_(0.2, 2)
Expand All @@ -9226,8 +9233,9 @@ def _test_GroupNorm_general(self, device, dtype=torch.float):
out_normed_reshaped = out_normed.view(b, g, -1)
mean = out_normed_reshaped.mean(-1)
var = out_normed_reshaped.var(-1, unbiased=False)
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-5, rtol=0)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-5, rtol=0)
# TODO: fix numerical issue. See #44863
self.assertEqual(torch.abs(mean).mean(), 0, atol=1e-3, rtol=1e-3)
self.assertEqual(torch.abs(var).mean(), 1, atol=1e-3, rtol=1e-3)

bad_shape_g = {
(1, 2, 3, 4): 3,
Expand Down Expand Up @@ -9568,6 +9576,7 @@ def test_LayerNorm_general(self, device):
if self.device_type == 'cuda':
self._test_LayerNorm_cuda_half(device)

@onlyOnCPUAndCUDA
def test_GroupNorm_general(self, device):
self._test_GroupNorm_general(device)

Expand Down