Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -3863,12 +3863,17 @@ def func(a, b):
def func2(a, b, c, d):
return c + a ** b ** d

def func3(a, b):
# type: (int, float) -> float
return a ** b

a = torch.rand(1, requires_grad=True)
b = torch.rand(1, requires_grad=True)
c = torch.rand(1, requires_grad=True)
d = torch.rand(1, requires_grad=True)
self.checkScript(func, (a, b), optimize=True)
self.checkScript(func2, (a, b, c, d), optimize=True)
self.checkScript(func3, (4, -0.5), optimize=True)

@unittest.skipIf(not RUN_CUDA, "device tests require CUDA")
def test_pow_scalar_backward_cuda(self):
Expand Down
20 changes: 1 addition & 19 deletions torch/csrc/jit/register_prim_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1962,7 +1962,7 @@ RegisterOperators reg2({
DEFINE_BINARY_OP(aten::add, a + b),
DEFINE_BINARY_OP(aten::sub, a - b),
DEFINE_BINARY_OP(aten::mul, a* b),
DEFINE_BINARY_OP(aten::pow, static_cast<decltype(a)>(pow(a, b))),
DEFINE_BINARY_OP(aten::pow, pow(a, b)),
// min and max are in prim:: because there is a difference between
// the python builtin 'min' and 'torch.min'
DEFINE_BINARY_OP(prim::min, a < b ? a : b),
Expand Down Expand Up @@ -2037,24 +2037,6 @@ RegisterOperators reg2({
return 0;
}),

Operator(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do you get to remove this?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

DEFINE_BINARY_OPS already emit the int/float mixed ops (called DEFINE_INT_FLOAT_OP here), this is not necessary anymore. This was due to some merge conflicts that we did not aware of before..

"aten::pow(float a, float b) -> float",
[](Stack& stack) {
double a, b;
pop(stack, a, b);
push(stack, std::pow(a, b));
return 0;
}),
Operator(
"aten::pow(float a, int b) -> float",
[](Stack& stack) {
double a;
int b;
pop(stack, a, b);
push(stack, std::pow(a, b));
return 0;
}),

Operator(
"aten::floor(float a) -> float",
[](Stack& stack) {
Expand Down