Skip to content

Commit 42a6874

Browse files
t-viezyang
authored andcommitted
einsum: don't inplace modify arguments (fixes: #7763) (#7765)
Thank you, Pierce Freeman, for the report and minimal example!
1 parent fb23e62 commit 42a6874

File tree

2 files changed

+2
-1
lines changed

2 files changed

+2
-1
lines changed

aten/src/ATen/native/Linear.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -299,7 +299,7 @@ Tensor einsum(std::string eqn, TensorList tensors) {
299299
// finally, we insert dimensions for idxes not in the operand
300300
for (size_t dim = 0; dim < idx_to_dim.size(); dim++) {
301301
if (idx_to_dim[dim] == -1) {
302-
preprocessed_op.unsqueeze_(dim);
302+
preprocessed_op = preprocessed_op.unsqueeze(dim);
303303
}
304304
}
305305
preprocessed_operands.push_back(preprocessed_op);

test/test_torch.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1508,6 +1508,7 @@ def test_einsum(self):
15081508
def do_einsum(*args):
15091509
return torch.einsum(test[0], args)
15101510
self.assertTrue(torch.autograd.gradcheck(do_einsum, test[1:]))
1511+
self.assertTrue(A._version == 0) # check that we do not use inplace ops
15111512

15121513
def test_sum_all(self):
15131514
def check_sum_all(tensor):

0 commit comments

Comments
 (0)