Skip to content

Commit 2b14f2d

Browse files
XiaobingSuperfacebook-github-bot
authored andcommitted
[reland][DNNL]:enable max_pool3d and avg_pool3d (#40996)
Summary: Pull Request resolved: #40996 Test Plan: Imported from OSS Differential Revision: D22440766 Pulled By: VitalyFedyunin fbshipit-source-id: 242711612920081eb4a7e5a7e80bc8b2d4c9f978
1 parent 45c5bac commit 2b14f2d

File tree

5 files changed

+149
-22
lines changed

5 files changed

+149
-22
lines changed

aten/src/ATen/native/Pooling.cpp

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,9 +154,14 @@ Tensor max_pool3d(
154154
IntArrayRef padding,
155155
IntArrayRef dilation,
156156
bool ceil_mode) {
157+
if (self.is_mkldnn()) {
158+
return at::mkldnn_max_pool3d(
159+
self, kernel_size, stride, padding, dilation, ceil_mode);
160+
}
157161
auto output_and_indices = at::max_pool3d_with_indices(
158162
self, kernel_size, stride, padding, dilation, ceil_mode);
159163
return std::get<0>(output_and_indices);
160164
}
165+
161166
} // namespace native
162167
} // namespace at

aten/src/ATen/native/mkldnn/Pooling.cpp

Lines changed: 101 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,17 @@ Tensor mkldnn_max_pool2d(
1717
IntArrayRef padding,
1818
IntArrayRef dilation,
1919
bool ceil_mode) {
20-
AT_ERROR(
21-
"mkldnn_max_pool2d: ATen not compiled with MKLDNN support");
20+
TORCH_CHECK(false, "mkldnn_max_pool2d: ATen not compiled with MKLDNN support");
21+
}
22+
23+
Tensor mkldnn_max_pool3d(
24+
const Tensor& self,
25+
IntArrayRef kernel_size,
26+
IntArrayRef stride,
27+
IntArrayRef padding,
28+
IntArrayRef dilation,
29+
bool ceil_mode) {
30+
TORCH_CHECK(false, "mkldnn_max_pool3d: ATen not compiled with MKLDNN support");
2231
}
2332

2433
Tensor mkldnn_avg_pool2d(
@@ -29,7 +38,7 @@ Tensor mkldnn_avg_pool2d(
2938
bool ceil_mode,
3039
bool count_include_pad,
3140
c10::optional<int64_t> divisor_override) {
32-
AT_ERROR("mkldnn_avg_pool2d: ATen not compiled with MKLDNN support");
41+
TORCH_CHECK(false, "mkldnn_avg_pool2d: ATen not compiled with MKLDNN support");
3342
}
3443

3544
Tensor& mkldnn_avg_pool2d_out(
@@ -41,19 +50,41 @@ Tensor& mkldnn_avg_pool2d_out(
4150
bool ceil_mode,
4251
bool count_include_pad,
4352
c10::optional<int64_t> divisor_override) {
44-
AT_ERROR("mkldnn_avg_pool2d_out: ATen not compiled with MKLDNN support");
53+
TORCH_CHECK(false, "mkldnn_avg_pool2d_out: ATen not compiled with MKLDNN support");
54+
}
55+
56+
Tensor mkldnn_avg_pool3d(
57+
const Tensor& self,
58+
IntArrayRef kernel_size,
59+
IntArrayRef stride,
60+
IntArrayRef padding,
61+
bool ceil_mode,
62+
bool count_include_pad,
63+
c10::optional<int64_t> divisor_override) {
64+
TORCH_CHECK(false, "mkldnn_avg_pool3d: ATen not compiled with MKLDNN support");
65+
}
66+
67+
Tensor& mkldnn_avg_pool3d_out(
68+
Tensor& output,
69+
const Tensor& self,
70+
IntArrayRef kernel_size,
71+
IntArrayRef stride,
72+
IntArrayRef padding,
73+
bool ceil_mode,
74+
bool count_include_pad,
75+
c10::optional<int64_t> divisor_override) {
76+
TORCH_CHECK(false, "mkldnn_avg_pool3d_out: ATen not compiled with MKLDNN support");
4577
}
4678

4779
Tensor mkldnn_adaptive_avg_pool2d(Tensor const& input, IntArrayRef output_size) {
48-
AT_ERROR("mkldnn_adaptive_avg_pool2d: ATen not compiled with MKLDNN support");
80+
TORCH_CHECK(false, "mkldnn_adaptive_avg_pool2d: ATen not compiled with MKLDNN support");
4981
}
5082

5183
Tensor& mkldnn_adaptive_avg_pool2d_out(
5284
Tensor& output,
5385
const Tensor& input,
5486
IntArrayRef output_size) {
55-
AT_ERROR(
56-
"mkldnn_adaptive_avg_pool2d_out: ATen not compiled with MKLDNN support");
87+
TORCH_CHECK(false, "mkldnn_adaptive_avg_pool2d_out: ATen not compiled with MKLDNN support");
5788
}
5889

5990
} // namespace native
@@ -67,21 +98,22 @@ Tensor& mkldnn_adaptive_avg_pool2d_out(
6798
namespace at {
6899
namespace native {
69100

70-
static Tensor _mkldnn_pool2d(
101+
static Tensor _mkldnn_pooling(
71102
const Tensor& input,
72103
IntArrayRef kernel_size,
73104
IntArrayRef stride,
74105
IntArrayRef padding,
75106
IntArrayRef dilation,
76107
bool ceil_mode,
77108
ideep::algorithm algo) {
78-
auto kernel_size_vec = expand_param_if_needed(kernel_size, "kernel_size", 2);
109+
const int64_t dims = input.dim() - 2;
110+
auto kernel_size_vec = expand_param_if_needed(kernel_size, "kernel_size", dims);
79111
if (stride.empty()) stride = kernel_size;
80-
auto stride_vec = expand_param_if_needed(stride, "stride", 2);
81-
auto padding_vec = expand_param_if_needed(padding, "padding", 2);
112+
auto stride_vec = expand_param_if_needed(stride, "stride", dims);
113+
auto padding_vec = expand_param_if_needed(padding, "padding", dims);
82114
auto padding_vec_l = padding_vec;
83115
auto padding_vec_r = padding_vec;
84-
auto dilation_vec = expand_param_if_needed(dilation, "dilation", 2);
116+
auto dilation_vec = expand_param_if_needed(dilation, "dilation", dims);
85117

86118
const ideep::tensor& x = itensor_from_mkldnn(input);
87119
std::vector<int64_t> output_sizes;
@@ -152,7 +184,24 @@ Tensor mkldnn_max_pool2d(
152184
IntArrayRef padding,
153185
IntArrayRef dilation,
154186
bool ceil_mode) {
155-
return _mkldnn_pool2d(
187+
return _mkldnn_pooling(
188+
input,
189+
kernel_size,
190+
stride,
191+
padding,
192+
dilation,
193+
ceil_mode,
194+
ideep::algorithm::pooling_max);
195+
}
196+
197+
Tensor mkldnn_max_pool3d(
198+
const Tensor& input,
199+
IntArrayRef kernel_size,
200+
IntArrayRef stride,
201+
IntArrayRef padding,
202+
IntArrayRef dilation,
203+
bool ceil_mode) {
204+
return _mkldnn_pooling(
156205
input,
157206
kernel_size,
158207
stride,
@@ -172,7 +221,7 @@ Tensor mkldnn_avg_pool2d(
172221
c10::optional<int64_t> divisor_override) {
173222
TORCH_CHECK(!divisor_override.has_value(),
174223
"mkldnn_avg_pool2d operator does not support divisor");
175-
return _mkldnn_pool2d(
224+
return _mkldnn_pooling(
176225
input,
177226
kernel_size,
178227
stride,
@@ -192,28 +241,59 @@ Tensor& mkldnn_avg_pool2d_out(
192241
bool ceil_mode,
193242
bool count_include_pad,
194243
c10::optional<int64_t> divisor_override) {
195-
AT_ERROR(
196-
"mkldnn_avg_pool2d_out: in-place mkldnn operations are not supported yet");
244+
TORCH_CHECK(false, "mkldnn_avg_pool2d_out: in-place mkldnn operations are not supported yet");
245+
}
246+
247+
Tensor mkldnn_avg_pool3d(
248+
const Tensor& input,
249+
IntArrayRef kernel_size,
250+
IntArrayRef stride,
251+
IntArrayRef padding,
252+
bool ceil_mode,
253+
bool count_include_pad,
254+
c10::optional<int64_t> divisor_override) {
255+
TORCH_CHECK(!divisor_override.has_value(), "mkldnn_avg_pool3d operator does not support divisor");
256+
return _mkldnn_pooling(
257+
input,
258+
kernel_size,
259+
stride,
260+
padding,
261+
/*dilation*/ std::vector<int64_t>{1, 1, 1},
262+
ceil_mode,
263+
count_include_pad ? ideep::algorithm::pooling_avg_include_padding
264+
: ideep::algorithm::pooling_avg_exclude_padding);
265+
}
266+
267+
Tensor& mkldnn_avg_pool3d_out(
268+
Tensor& output,
269+
const Tensor& input,
270+
IntArrayRef kernel_size,
271+
IntArrayRef stride,
272+
IntArrayRef padding,
273+
bool ceil_mode,
274+
bool count_include_pad,
275+
c10::optional<int64_t> divisor_override) {
276+
TORCH_CHECK(false, "mkldnn_avg_pool3d_out: in-place mkldnn operations are not supported yet");
197277
}
198278

199279
Tensor mkldnn_adaptive_avg_pool2d(
200280
Tensor const& input,
201281
IntArrayRef output_size) {
202-
AT_ASSERTM(input.dim() == 4, "mkldnn_adaptive_avg_pool2d: Expect 2D input");
282+
TORCH_CHECK(input.dim() == 4, "mkldnn_adaptive_avg_pool2d: Expect 2D input");
203283

204284
auto output_size_vec =
205285
expand_param_if_needed(output_size, "output_size", input.dim() - 2);
206286
std::vector<int64_t> kernel_size(input.dim() - 2);
207287
for (int64_t i = 2; i < input.dim(); ++i) {
208288
auto s1 = input.size(i);
209289
auto s2 = output_size_vec[i - 2];
210-
AT_ASSERTM(s2 != 0, "output size can not be zero");
211-
AT_ASSERTM(
290+
TORCH_CHECK(s2 != 0, "output size can not be zero");
291+
TORCH_CHECK(
212292
s1 % s2 == 0,
213293
"input size is not divisible by the output size is not supported yet");
214294
kernel_size[i - 2] = s1 / s2;
215295
}
216-
return _mkldnn_pool2d(
296+
return _mkldnn_pooling(
217297
input,
218298
kernel_size,
219299
/*stride*/ kernel_size,
@@ -227,8 +307,7 @@ Tensor& mkldnn_adaptive_avg_pool2d_out(
227307
Tensor& output,
228308
const Tensor& input,
229309
IntArrayRef output_size) {
230-
AT_ERROR(
231-
"mkldnn_adaptive_avg_pool2d_out: in-place mkldnn operations are not supported yet");
310+
TORCH_CHECK(false, "mkldnn_adaptive_avg_pool2d_out: in-place mkldnn operations are not supported yet");
232311
}
233312

234313

aten/src/ATen/native/native_functions.yaml

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1794,6 +1794,11 @@
17941794
dispatch:
17951795
MkldnnCPU: mkldnn_max_pool2d
17961796

1797+
- func: mkldnn_max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor
1798+
use_c10_dispatcher: full
1799+
dispatch:
1800+
MkldnnCPU: mkldnn_max_pool3d
1801+
17971802
- func: quantized_max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor
17981803
use_c10_dispatcher: full
17991804
dispatch:
@@ -5851,13 +5856,15 @@
58515856
dispatch:
58525857
CPU: avg_pool3d_out_cpu
58535858
CUDA: avg_pool3d_out_cuda
5859+
MkldnnCPU: mkldnn_avg_pool3d_out
58545860

58555861
- func: avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor
58565862
use_c10_dispatcher: full
58575863
python_module: nn
58585864
dispatch:
58595865
CPU: avg_pool3d_cpu
58605866
CUDA: avg_pool3d_cuda
5867+
MkldnnCPU: mkldnn_avg_pool3d
58615868
QuantizedCPU: quantized_avg_pool3d
58625869

58635870
- func: avg_pool3d_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)

test/test_mkldnn.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,25 @@ def test_max_pool2d_stride_none(self):
254254

255255
self.assertEqual(y1, y2.to_dense())
256256

257+
def test_max_pool3d(self):
258+
N = torch.randint(3, 10, (1,)).item()
259+
C = torch.randint(3, 10, (1,)).item()
260+
261+
for stride in [1, 2, 3]:
262+
for D, H, W in [(64, 64, 64), (35, 39, 35), (16, 19, 20), [7, 8, 9]]:
263+
x = torch.randn(N, C, D, H, W, dtype=torch.float32) * 10
264+
265+
for ceil_mode in [False, True]:
266+
max_pool3d = torch.nn.MaxPool3d(
267+
kernel_size=3 if not ceil_mode else 7,
268+
stride=stride,
269+
padding=1,
270+
ceil_mode=ceil_mode)
271+
272+
self.assertEqual(
273+
max_pool3d(x),
274+
max_pool3d(x.to_mkldnn()).to_dense())
275+
257276
def test_avg_pool2d(self):
258277
N = torch.randint(3, 10, (1,)).item()
259278
C = torch.randint(3, 10, (1,)).item()
@@ -291,6 +310,22 @@ def test_avg_pool2d_stride_none(self):
291310

292311
self.assertEqual(y1, y2.to_dense())
293312

313+
def test_avg_pool3d(self):
314+
N = torch.randint(3, 10, (1,)).item()
315+
C = torch.randint(3, 10, (1,)).item()
316+
x = torch.randn(N, C, 64, 64, 64, dtype=torch.float32) * 10
317+
318+
for count_include_pad in [True, False]:
319+
avg_pool3d = torch.nn.AvgPool3d(
320+
kernel_size=3,
321+
stride=2,
322+
padding=1,
323+
count_include_pad=count_include_pad)
324+
325+
self.assertEqual(
326+
avg_pool3d(x),
327+
avg_pool3d(x.to_mkldnn()).to_dense())
328+
294329
def test_adaptive_avg_pool2d(self):
295330
N = torch.randint(3, 10, (1,)).item()
296331
C = torch.randint(3, 10, (1,)).item()

torch/_overrides.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,7 @@ def get_ignored_functions():
114114
torch.mkldnn_convolution,
115115
torch.mkldnn_convolution_backward_weights,
116116
torch.mkldnn_max_pool2d,
117+
torch.mkldnn_max_pool3d,
117118
torch.ones,
118119
torch.promote_types,
119120
torch.rand,

0 commit comments

Comments
 (0)