@@ -1609,15 +1609,17 @@ def forward(self, x, y):
16091609 (NonQuantizedInplaceAdd (), False )]:
16101610 for tracing in [True , False ]:
16111611 op = "quantized::add" if quantized else "aten::add"
1612- m = self .checkGraphModeOp (m , data , op , tracing )
1612+ # TODO before land: investigate and fix more properly
1613+ # context: https://app.circleci.com/pipelines/github/pytorch/pytorch/203747/workflows/f6f8e383-4dcd-4e21-8f1e-af0dc5f6cdb3/jobs/6894794
1614+ m2 = self .checkGraphModeOp (m , data , op , tracing )
16131615 # TODO: remove after refactor of checkGraphModeOp
16141616 if quantized :
16151617 FileCheck ().check_not ("aten::add" ) \
16161618 .check_not ("aten::add_" ) \
1617- .run (m .graph )
1619+ .run (m2 .graph )
16181620 else :
16191621 FileCheck ().check_not ("quantized::add" ) \
1620- .run (m .graph )
1622+ .run (m2 .graph )
16211623
16221624 @skipIfNoFBGEMM
16231625 def test_quantized_add_scalar (self ):
@@ -1664,15 +1666,15 @@ def forward(self, x):
16641666 op = "quantized::add_scalar" if quantized else "aten::add"
16651667 # we don't check the numerical consistency for add_scalar
16661668 # since it's not supported
1667- m = self .checkGraphModeOp (m , data , op , tracing , check = False )
1669+ m2 = self .checkGraphModeOp (m , data , op , tracing , check = False )
16681670 # TODO: remove after refactor of checkGraphModeOp
16691671 if quantized :
16701672 FileCheck ().check_not ("aten::add" ) \
16711673 .check_not ("aten::add_" ) \
1672- .run (m .graph )
1674+ .run (m2 .graph )
16731675 else :
16741676 FileCheck ().check_not ("quantized::add_scalar" ) \
1675- .run (m .graph )
1677+ .run (m2 .graph )
16761678
16771679 @skipIfNoFBGEMM
16781680 def test_quantized_add_relu (self ):
@@ -1757,14 +1759,14 @@ def forward(self, x, y):
17571759 AddFunctionalRelu (), InplaceAddFunctionalRelu (),
17581760 AddInplaceFunctionalRelu (), InplaceAddInplaceFunctionalRelu ()]:
17591761 for tracing in [True , False ]:
1760- m = self .checkGraphModeOp (m , data , "quantized::add_relu(" , tracing )
1762+ m2 = self .checkGraphModeOp (m , data , "quantized::add_relu(" , tracing )
17611763 FileCheck ().check_not ("aten::add(" ) \
17621764 .check_not ("aten::add_(" ) \
17631765 .check_not ("aten::relu(" ) \
17641766 .check_not ("aten::relu_(" ) \
17651767 .check_not ("quantized::add(" ) \
17661768 .check_not ("quantized::relu(" ) \
1767- .run (m .graph )
1769+ .run (m2 .graph )
17681770
17691771 @skipIfNoFBGEMM
17701772 def test_quantized_add_scalar_relu (self ):
@@ -1837,14 +1839,14 @@ def forward(self, x):
18371839 for tracing in [True , False ]:
18381840 # quantized::add_scalar_relu or quantized::add_scalar_relu_out
18391841 # TODO: split this after refactor of checkGraphModeOp
1840- m = self .checkGraphModeOp (m , data , "quantized::add_scalar_relu" , tracing , check = False )
1842+ m2 = self .checkGraphModeOp (m , data , "quantized::add_scalar_relu" , tracing , check = False )
18411843 FileCheck ().check_not ("aten::add(" ) \
18421844 .check_not ("aten::add_(" ) \
18431845 .check_not ("aten::relu(" ) \
18441846 .check_not ("aten::relu_(" ) \
18451847 .check_not ("quantized::add_scalar(" ) \
18461848 .check_not ("quantized::relu(" ) \
1847- .run (m .graph )
1849+ .run (m2 .graph )
18481850
18491851 @skipIfNoFBGEMM
18501852 def test_quantized_cat (self ):
@@ -1991,15 +1993,15 @@ def forward(self, x, y):
19911993 (NonQuantizedInplaceMul (), False )]:
19921994 for tracing in [True , False ]:
19931995 op = "quantized::mul" if quantized else "aten::mul"
1994- m = self .checkGraphModeOp (m , data , op , tracing )
1996+ m2 = self .checkGraphModeOp (m , data , op , tracing )
19951997 # TODO: remove after refactor of checkGraphModeOp
19961998 if quantized :
19971999 FileCheck ().check_not ("aten::mul" ) \
19982000 .check_not ("aten::mul_" ) \
1999- .run (m .graph )
2001+ .run (m2 .graph )
20002002 else :
20012003 FileCheck ().check_not ("quantized::mul" ) \
2002- .run (m .graph )
2004+ .run (m2 .graph )
20032005
20042006 @skipIfNoFBGEMM
20052007 def test_quantized_mul_scalar (self ):
@@ -2046,15 +2048,15 @@ def forward(self, x):
20462048 op = "quantized::mul_scalar" if quantized else "aten::mul"
20472049 # we don't check the numerical consistency for add_scalar
20482050 # since it's not supported
2049- m = self .checkGraphModeOp (m , data , op , tracing , check = False )
2051+ m2 = self .checkGraphModeOp (m , data , op , tracing , check = False )
20502052 # TODO: remove after refactor of checkGraphModeOp
20512053 if quantized :
20522054 FileCheck ().check_not ("aten::mul" ) \
20532055 .check_not ("aten::mul_" ) \
2054- .run (m .graph )
2056+ .run (m2 .graph )
20552057 else :
20562058 FileCheck ().check_not ("quantized::mul_scalar" ) \
2057- .run (m .graph )
2059+ .run (m2 .graph )
20582060
20592061 @skipIfNoFBGEMM
20602062 def test_quantized_mul_relu (self ):
@@ -2139,14 +2141,14 @@ def forward(self, x, y):
21392141 MulFunctionalRelu (), InplaceMulFunctionalRelu (),
21402142 MulInplaceFunctionalRelu (), InplaceMulInplaceFunctionalRelu ()]:
21412143 for tracing in [True , False ]:
2142- m = self .checkGraphModeOp (m , data , "quantized::mul_relu(" , tracing )
2144+ m2 = self .checkGraphModeOp (m , data , "quantized::mul_relu(" , tracing )
21432145 FileCheck ().check_not ("aten::mul(" ) \
21442146 .check_not ("aten::mul_(" ) \
21452147 .check_not ("aten::relu(" ) \
21462148 .check_not ("aten::relu_(" ) \
21472149 .check_not ("quantized::mul(" ) \
21482150 .check_not ("quantized::relu(" ) \
2149- .run (m .graph )
2151+ .run (m2 .graph )
21502152
21512153 @skipIfNoFBGEMM
21522154 def test_quantized_mul_scalar_relu (self ):
@@ -2218,14 +2220,14 @@ def forward(self, x):
22182220 InplaceMulScalarInplaceFunctionalRelu ()]:
22192221 for tracing in [True , False ]:
22202222 # quantized::mul_scalar_relu or quantized::mul_scalar_relu_out
2221- m = self .checkGraphModeOp (m , data , "quantized::mul_scalar_relu" , tracing , check = False )
2223+ m2 = self .checkGraphModeOp (m , data , "quantized::mul_scalar_relu" , tracing , check = False )
22222224 FileCheck ().check_not ("aten::mul(" ) \
22232225 .check_not ("aten::mul_(" ) \
22242226 .check_not ("aten::relu(" ) \
22252227 .check_not ("aten::relu_(" ) \
22262228 .check_not ("quantized::mul_scalar(" ) \
22272229 .check_not ("quantized::relu(" ) \
2228- .run (m .graph )
2230+ .run (m2 .graph )
22292231
22302232 def test_hardswish (self ):
22312233 class FunctionalHardswish (torch .nn .Module ):
0 commit comments