Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 6 additions & 8 deletions aten/src/ATen/native/DilatedMaxPool2d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -129,9 +129,8 @@ void max_pool2d_with_indices_out_cpu_template(
IntArrayRef dilation,
bool ceil_mode)
{
// XXX JIT: Pooling.cpp allows stride.empty().
// XXX IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 2 &&
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) &&
(stride.empty() || stride.size() == 2) &&
(padding.size() == 1 || padding.size() == 2) &&
(dilation.size() == 1 || dilation.size() == 2),
Expand All @@ -141,7 +140,7 @@ void max_pool2d_with_indices_out_cpu_template(
"non-empty 3D or 4D (batch mode) tensor expected for input");

const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);

const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]);
Expand Down Expand Up @@ -303,9 +302,8 @@ Tensor& max_pool2d_with_indices_backward_out_cpu_template(
IntArrayRef dilation,
bool ceil_mode)
{
// XXX JIT: Pooling.cpp allows stride.empty().
// XXX IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 2 &&
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) &&
(stride.empty() || stride.size() == 2) &&
(padding.size() == 1 || padding.size() == 2) &&
(dilation.size() == 1 || dilation.size() == 2),
Expand All @@ -315,7 +313,7 @@ Tensor& max_pool2d_with_indices_backward_out_cpu_template(
"non-empty 3D or 4D (batch mode) tensor expected for input");

const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);

const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]);
Expand Down
18 changes: 8 additions & 10 deletions aten/src/ATen/native/DilatedMaxPool3d.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,9 +148,8 @@ void max_pool3d_with_indices_out_cpu_template(
IntArrayRef dilation,
bool ceil_mode)
{
// XXX [JIT] Pooling.cpp allows stride.empty().
// XXX [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 3 &&
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) &&
(stride.empty() || stride.size() == 3) &&
(padding.size() == 1 || padding.size() == 3) &&
(dilation.size() == 1 || dilation.size() == 3),
Expand All @@ -160,8 +159,8 @@ void max_pool3d_with_indices_out_cpu_template(
"non-empty 4D or 5D (batch mode) tensor expected for input");

const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = safe_downcast<int, int64_t>(kernel_size[2]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);

const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]);
Expand Down Expand Up @@ -353,9 +352,8 @@ Tensor& max_pool3d_with_indices_backward_out_cpu_template(
IntArrayRef dilation,
bool ceil_mode)
{
// XXX [JIT] Pooling.cpp allows stride.empty().
// XXX [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 3 &&
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) &&
(stride.empty() || stride.size() == 3) &&
(padding.size() == 1 || padding.size() == 3) &&
(dilation.size() == 1 || dilation.size() == 3),
Expand All @@ -365,8 +363,8 @@ Tensor& max_pool3d_with_indices_backward_out_cpu_template(
"non-empty 4D or 5D (batch mode) tensor expected for input");

const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = safe_downcast<int, int64_t>(kernel_size[2]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);

const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]);
Expand Down
14 changes: 6 additions & 8 deletions aten/src/ATen/native/cuda/DilatedMaxPool2d.cu
Original file line number Diff line number Diff line change
Expand Up @@ -146,9 +146,8 @@ void max_pool2d_with_indices_out_cuda_template(
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});

// XXX JIT: Pooling.cpp allows stride.empty().
// XXX IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 2 &&
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) &&
(stride.empty() || stride.size() == 2) &&
(padding.size() == 1 || padding.size() == 2) &&
(dilation.size() == 1 || dilation.size() == 2),
Expand All @@ -158,7 +157,7 @@ void max_pool2d_with_indices_out_cuda_template(
"non-empty 3D or 4D (batch mode) tensor expected for input");

const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);

const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]);
Expand Down Expand Up @@ -237,9 +236,8 @@ void max_pool2d_with_indices_backward_out_cuda_template(
checkAllSameGPU("max_pool2d_with_indices_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});

// XXX JIT: Pooling.cpp allows stride.empty().
// XXX IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 2 &&
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 2) &&
(stride.empty() || stride.size() == 2) &&
(padding.size() == 1 || padding.size() == 2) &&
(dilation.size() == 1 || dilation.size() == 2),
Expand All @@ -249,7 +247,7 @@ void max_pool2d_with_indices_backward_out_cuda_template(
"non-empty 3D or 4D (batch mode) tensor expected for input");

const int kH = safe_downcast<int, int64_t>(kernel_size[0]);
const int kW = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kH : safe_downcast<int, int64_t>(kernel_size[1]);

const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[0]);
const int dW = stride.empty() ? kW : safe_downcast<int, int64_t>(stride[1]);
Expand Down
18 changes: 8 additions & 10 deletions aten/src/ATen/native/cuda/DilatedMaxPool3d.cu
Original file line number Diff line number Diff line change
Expand Up @@ -290,9 +290,8 @@ void max_pool3d_with_indices_out_cuda_template(
checkAllSameGPU("max_pool3d_with_indices_out_cuda",
{output_arg, indices_arg, input_arg});

// XXX [JIT] Pooling.cpp allows stride.empty().
// XXX [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 3 &&
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) &&
(stride.empty() || stride.size() == 3) &&
(padding.size() == 1 || padding.size() == 3) &&
(dilation.size() == 1 || dilation.size() == 3),
Expand All @@ -302,8 +301,8 @@ void max_pool3d_with_indices_out_cuda_template(
"non-empty 4D or 5D (batch mode) tensor expected for input");

const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = safe_downcast<int, int64_t>(kernel_size[2]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);

const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]);
Expand Down Expand Up @@ -395,9 +394,8 @@ void max_pool3d_with_indices_backward_out_cuda_template(
checkAllSameGPU("max_pool3d_with_indices_backward_out_cuda",
{gradInput_arg, gradOutput_arg, input_arg, indices_arg});

// XXX [JIT] Pooling.cpp allows stride.empty().
// XXX [LIBTORCH] IntegrationTest.MNIST: padding.size() == 1 && dilation.size() == 1.
TORCH_CHECK(kernel_size.size() == 3 &&
// #20866, #22032: Guarantee this for the official C++ API?
TORCH_CHECK((kernel_size.size() == 1 || kernel_size.size() == 3) &&
(stride.empty() || stride.size() == 3) &&
(padding.size() == 1 || padding.size() == 3) &&
(dilation.size() == 1 || dilation.size() == 3),
Expand All @@ -414,8 +412,8 @@ void max_pool3d_with_indices_backward_out_cuda_template(
gradInput.zero_();

const int kT = safe_downcast<int, int64_t>(kernel_size[0]);
const int kH = safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = safe_downcast<int, int64_t>(kernel_size[2]);
const int kH = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[1]);
const int kW = kernel_size.size() == 1 ? kT : safe_downcast<int, int64_t>(kernel_size[2]);

const int dT = stride.empty() ? kT : safe_downcast<int, int64_t>(stride[0]);
const int dH = stride.empty() ? kH : safe_downcast<int, int64_t>(stride[1]);
Expand Down