Skip to content

Commit ab1d30e

Browse files
committed
Update on "not for land: Adding a version serialization type to ConvPackedParam"
Note: this is a debug copy of #40003 [ghstack-poisoned]
1 parent b7ffc31 commit ab1d30e

File tree

2 files changed

+24
-21
lines changed

2 files changed

+24
-21
lines changed

aten/src/ATen/native/quantized/cpu/fbgemm_utils.cpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -278,8 +278,9 @@ CAFFE2_API torch::class_<ConvPackedParamsBase<kSpatialDim>> register_conv_params
278278
params_vec.push_back(dilation[0].item<int64_t>());
279279
}
280280
params_vec.push_back(groups[0].item<int64_t>());
281+
int64_t vec_size = params_vec.size();
281282
at::Tensor params_tensor = at::from_blob(
282-
params_vec.data(), {params_vec.size()},
283+
params_vec.data(), {vec_size},
283284
at::TensorOptions().dtype(at::kLong));
284285

285286
// non_optional.emplace_back(std::move(params_tensor));

test/quantization/test_quantize_jit.py

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1609,15 +1609,17 @@ def forward(self, x, y):
16091609
(NonQuantizedInplaceAdd(), False)]:
16101610
for tracing in [True, False]:
16111611
op = "quantized::add" if quantized else "aten::add"
1612-
m = self.checkGraphModeOp(m, data, op, tracing)
1612+
# TODO before land: investigate and fix more properly
1613+
# context: https://app.circleci.com/pipelines/github/pytorch/pytorch/203747/workflows/f6f8e383-4dcd-4e21-8f1e-af0dc5f6cdb3/jobs/6894794
1614+
m2 = self.checkGraphModeOp(m, data, op, tracing)
16131615
# TODO: remove after refactor of checkGraphModeOp
16141616
if quantized:
16151617
FileCheck().check_not("aten::add") \
16161618
.check_not("aten::add_") \
1617-
.run(m.graph)
1619+
.run(m2.graph)
16181620
else:
16191621
FileCheck().check_not("quantized::add") \
1620-
.run(m.graph)
1622+
.run(m2.graph)
16211623

16221624
@skipIfNoFBGEMM
16231625
def test_quantized_add_scalar(self):
@@ -1664,15 +1666,15 @@ def forward(self, x):
16641666
op = "quantized::add_scalar" if quantized else "aten::add"
16651667
# we don't check the numerical consistency for add_scalar
16661668
# since it's not supported
1667-
m = self.checkGraphModeOp(m, data, op, tracing, check=False)
1669+
m2 = self.checkGraphModeOp(m, data, op, tracing, check=False)
16681670
# TODO: remove after refactor of checkGraphModeOp
16691671
if quantized:
16701672
FileCheck().check_not("aten::add") \
16711673
.check_not("aten::add_") \
1672-
.run(m.graph)
1674+
.run(m2.graph)
16731675
else:
16741676
FileCheck().check_not("quantized::add_scalar") \
1675-
.run(m.graph)
1677+
.run(m2.graph)
16761678

16771679
@skipIfNoFBGEMM
16781680
def test_quantized_add_relu(self):
@@ -1757,14 +1759,14 @@ def forward(self, x, y):
17571759
AddFunctionalRelu(), InplaceAddFunctionalRelu(),
17581760
AddInplaceFunctionalRelu(), InplaceAddInplaceFunctionalRelu()]:
17591761
for tracing in [True, False]:
1760-
m = self.checkGraphModeOp(m, data, "quantized::add_relu(", tracing)
1762+
m2 = self.checkGraphModeOp(m, data, "quantized::add_relu(", tracing)
17611763
FileCheck().check_not("aten::add(") \
17621764
.check_not("aten::add_(") \
17631765
.check_not("aten::relu(") \
17641766
.check_not("aten::relu_(") \
17651767
.check_not("quantized::add(") \
17661768
.check_not("quantized::relu(") \
1767-
.run(m.graph)
1769+
.run(m2.graph)
17681770

17691771
@skipIfNoFBGEMM
17701772
def test_quantized_add_scalar_relu(self):
@@ -1837,14 +1839,14 @@ def forward(self, x):
18371839
for tracing in [True, False]:
18381840
# quantized::add_scalar_relu or quantized::add_scalar_relu_out
18391841
# TODO: split this after refactor of checkGraphModeOp
1840-
m = self.checkGraphModeOp(m, data, "quantized::add_scalar_relu", tracing, check=False)
1842+
m2 = self.checkGraphModeOp(m, data, "quantized::add_scalar_relu", tracing, check=False)
18411843
FileCheck().check_not("aten::add(") \
18421844
.check_not("aten::add_(") \
18431845
.check_not("aten::relu(") \
18441846
.check_not("aten::relu_(") \
18451847
.check_not("quantized::add_scalar(") \
18461848
.check_not("quantized::relu(") \
1847-
.run(m.graph)
1849+
.run(m2.graph)
18481850

18491851
@skipIfNoFBGEMM
18501852
def test_quantized_cat(self):
@@ -1991,15 +1993,15 @@ def forward(self, x, y):
19911993
(NonQuantizedInplaceMul(), False)]:
19921994
for tracing in [True, False]:
19931995
op = "quantized::mul" if quantized else "aten::mul"
1994-
m = self.checkGraphModeOp(m, data, op, tracing)
1996+
m2 = self.checkGraphModeOp(m, data, op, tracing)
19951997
# TODO: remove after refactor of checkGraphModeOp
19961998
if quantized:
19971999
FileCheck().check_not("aten::mul") \
19982000
.check_not("aten::mul_") \
1999-
.run(m.graph)
2001+
.run(m2.graph)
20002002
else:
20012003
FileCheck().check_not("quantized::mul") \
2002-
.run(m.graph)
2004+
.run(m2.graph)
20032005

20042006
@skipIfNoFBGEMM
20052007
def test_quantized_mul_scalar(self):
@@ -2046,15 +2048,15 @@ def forward(self, x):
20462048
op = "quantized::mul_scalar" if quantized else "aten::mul"
20472049
# we don't check the numerical consistency for add_scalar
20482050
# since it's not supported
2049-
m = self.checkGraphModeOp(m, data, op, tracing, check=False)
2051+
m2 = self.checkGraphModeOp(m, data, op, tracing, check=False)
20502052
# TODO: remove after refactor of checkGraphModeOp
20512053
if quantized:
20522054
FileCheck().check_not("aten::mul") \
20532055
.check_not("aten::mul_") \
2054-
.run(m.graph)
2056+
.run(m2.graph)
20552057
else:
20562058
FileCheck().check_not("quantized::mul_scalar") \
2057-
.run(m.graph)
2059+
.run(m2.graph)
20582060

20592061
@skipIfNoFBGEMM
20602062
def test_quantized_mul_relu(self):
@@ -2139,14 +2141,14 @@ def forward(self, x, y):
21392141
MulFunctionalRelu(), InplaceMulFunctionalRelu(),
21402142
MulInplaceFunctionalRelu(), InplaceMulInplaceFunctionalRelu()]:
21412143
for tracing in [True, False]:
2142-
m = self.checkGraphModeOp(m, data, "quantized::mul_relu(", tracing)
2144+
m2 = self.checkGraphModeOp(m, data, "quantized::mul_relu(", tracing)
21432145
FileCheck().check_not("aten::mul(") \
21442146
.check_not("aten::mul_(") \
21452147
.check_not("aten::relu(") \
21462148
.check_not("aten::relu_(") \
21472149
.check_not("quantized::mul(") \
21482150
.check_not("quantized::relu(") \
2149-
.run(m.graph)
2151+
.run(m2.graph)
21502152

21512153
@skipIfNoFBGEMM
21522154
def test_quantized_mul_scalar_relu(self):
@@ -2218,14 +2220,14 @@ def forward(self, x):
22182220
InplaceMulScalarInplaceFunctionalRelu()]:
22192221
for tracing in [True, False]:
22202222
# quantized::mul_scalar_relu or quantized::mul_scalar_relu_out
2221-
m = self.checkGraphModeOp(m, data, "quantized::mul_scalar_relu", tracing, check=False)
2223+
m2 = self.checkGraphModeOp(m, data, "quantized::mul_scalar_relu", tracing, check=False)
22222224
FileCheck().check_not("aten::mul(") \
22232225
.check_not("aten::mul_(") \
22242226
.check_not("aten::relu(") \
22252227
.check_not("aten::relu_(") \
22262228
.check_not("quantized::mul_scalar(") \
22272229
.check_not("quantized::relu(") \
2228-
.run(m.graph)
2230+
.run(m2.graph)
22292231

22302232
def test_hardswish(self):
22312233
class FunctionalHardswish(torch.nn.Module):

0 commit comments

Comments
 (0)