@@ -319,7 +319,6 @@ def is_inplace(op, variant):
319319
320320vjp_fail = {
321321 xfail ('tensor_split' ), # data_ptr composite compliance
322- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
323322}
324323
325324aliasing_ops = {
@@ -462,7 +461,7 @@ def wrapped_fn(*args, **kwargs):
462461 xfail ('nn.functional._scaled_dot_product_attention' , device_type = 'cuda' ),
463462
464463 xfail ('nn.functional.rrelu' ), # in-place test errors out with no formula implemented
465- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
464+ xfail ('NumpyExpMarkDirtyAutogradFunction' ), # TODO: https://github.com/pytorch/pytorch/issues/91280
466465
467466 # --- Non-Contiguous Failures! ---
468467 # This is expected to fail as the operator
@@ -966,6 +965,7 @@ def test_vmapvjp(self, device, dtype, op):
966965 # skip because this is flaky depending on what the max_norm is!
967966 skip ('nn.functional.embedding' , '' ),
968967 skip ('to' ), # RuntimeError: required rank 4 tensor to use channels_last format
968+ xfail ('NumpyExpMarkDirtyAutogradFunction' ), # vmap: inplace into a regular tensor
969969 # ----------------------------------------------------------------------
970970
971971 # ---------------------------- BUGS ------------------------------------
@@ -1003,7 +1003,6 @@ def test_vmapvjp(self, device, dtype, op):
10031003 xfail ("_native_batch_norm_legit" ),
10041004
10051005 xfail ('nn.functional.prelu' ),
1006- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
10071006 # ----------------------------------------------------------------------
10081007 }
10091008
@@ -1475,6 +1474,7 @@ def reference(primals, cotangents, primals_tangents, cotangents_tangents):
14751474
14761475 # Not actually a problem
14771476 xfail ('NumpyCubeNotComposableAutogradFunction' ), # not composable
1477+ xfail ('NumpyExpMarkDirtyAutogradFunction' ), # vmap: inplace into a regular tensor
14781478
14791479 # Potential bugs/errors
14801480 xfail ('as_strided' ), # AssertionError: Tensor-likes are not close!
@@ -1948,7 +1948,6 @@ def f(x):
19481948 @ops (autograd_function_db , allowed_dtypes = (torch .float32 ,))
19491949 @skipOps ('TestOperators' , 'test_vmapvjpvmap' , {
19501950 xfail ('NumpyCubeNotComposableAutogradFunction' ), # Not composable
1951- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
19521951 })
19531952 def test_vmapvjpvmap (self , device , dtype , op ):
19541953 samples = op .sample_inputs (device , dtype , requires_grad = True )
@@ -1993,7 +1992,6 @@ def inner(primals, cotangents):
19931992 @ops (autograd_function_db , allowed_dtypes = (torch .float32 ,))
19941993 @skipOps ('TestOperators' , 'test_vjpvmapvmap' , {
19951994 xfail ('NumpyCubeNotComposableAutogradFunction' ), # Not composable
1996- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
19971995 })
19981996 def test_vjpvmapvmap (self , device , dtype , op ):
19991997 samples = op .sample_inputs (device , dtype , requires_grad = True )
@@ -2032,7 +2030,6 @@ def test_vjpvmapvmap(self, device, dtype, op):
20322030 @ops (autograd_function_db , allowed_dtypes = (torch .float32 ,))
20332031 @skipOps ('TestOperators' , 'test_vjpvjpvmap' , {
20342032 xfail ('NumpyCubeNotComposableAutogradFunction' ), # Not composable
2035- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
20362033 })
20372034 def test_vjpvjpvmap (self , device , dtype , op ):
20382035 samples = op .sample_inputs (device , dtype , requires_grad = True )
@@ -2063,7 +2060,6 @@ def test_vjpvjpvmap(self, device, dtype, op):
20632060 @ops (autograd_function_db , allowed_dtypes = (torch .float32 ,))
20642061 @skipOps ('TestOperators' , 'test_jvpvmap' , {
20652062 xfail ('NumpyCubeNotComposableAutogradFunction' ), # Not composable
2066- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
20672063 })
20682064 def test_jvpvmap (self , device , dtype , op ):
20692065 samples = op .sample_inputs (device , dtype , requires_grad = True )
@@ -2092,7 +2088,6 @@ def test_jvpvmap(self, device, dtype, op):
20922088 @ops (autograd_function_db , allowed_dtypes = (torch .float32 ,))
20932089 @skipOps ('TestOperators' , 'test_jvpvmapvmap' , {
20942090 xfail ('NumpyCubeNotComposableAutogradFunction' ), # Not composable
2095- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
20962091 })
20972092 def test_jvpvmapvmap (self , device , dtype , op ):
20982093 samples = op .sample_inputs (device , dtype , requires_grad = True )
@@ -2127,7 +2122,6 @@ def test_jvpvmapvmap(self, device, dtype, op):
21272122 @ops (autograd_function_db , allowed_dtypes = (torch .float32 ,))
21282123 @skipOps ('TestOperators' , 'test_vmapjvpvmap' , {
21292124 xfail ('NumpyCubeNotComposableAutogradFunction' ), # Not composable
2130- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
21312125 })
21322126 def test_vmapjvpvmap (self , device , dtype , op ):
21332127 samples = op .sample_inputs (device , dtype , requires_grad = True )
@@ -2163,7 +2157,6 @@ def test_vmapjvpvmap(self, device, dtype, op):
21632157 @ops (autograd_function_db , allowed_dtypes = (torch .float32 ,))
21642158 @skipOps ('TestOperators' , 'test_jvpjvpvmap' , {
21652159 xfail ('NumpyCubeNotComposableAutogradFunction' ), # Not composable
2166- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
21672160 })
21682161 def test_jvpjvpvmap (self , device , dtype , op ):
21692162 samples = op .sample_inputs (device , dtype , requires_grad = True )
@@ -2193,7 +2186,6 @@ def test_jvpjvpvmap(self, device, dtype, op):
21932186 @ops (autograd_function_db , allowed_dtypes = (torch .float32 ,))
21942187 @skipOps ('TestOperators' , 'test_jvpvjpvmap' , {
21952188 xfail ('NumpyCubeNotComposableAutogradFunction' ), # Not composable
2196- xfail ('NumpyExpMarkDirtyAutogradFunction' ), # https://github.com/pytorch/pytorch/issues/90225
21972189 })
21982190 def test_jvpvjpvmap (self , device , dtype , op ):
21992191 samples = op .sample_inputs (device , dtype , requires_grad = True )
0 commit comments