Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions aten/src/ATen/Declarations.cwrap
Original file line number Diff line number Diff line change
Expand Up @@ -1117,7 +1117,7 @@
- CPU
- CUDA
cname: logicalall
return: bool
return: real
arguments:
- THTensor* self
]]
Expand All @@ -1129,7 +1129,7 @@
- CPU
- CUDA
cname: logicalany
return: bool
return: real
arguments:
- THTensor* self
]]
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/LinearAlgebra.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ std::tuple<Tensor, Tensor, Tensor, Tensor> _det_with_svd(const Tensor& self) {
qr_det = -qr_det;
}
det = qr_det; // QR is more stable than svd, so use it anyways
if ((qr_det < 0).any() ^ (det < 0).any()) { // if different sign
if ((qr_det < 0).any().toCByte() ^ (det < 0).any().toCByte()) { // if different sign
u.narrow(1, 0, 1).mul_(-1);
sigma.narrow(0, 0, 1).mul_(-1);
}
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/TensorCompare.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ namespace at {
namespace native {

bool allclose(const Tensor& self, const Tensor& other, double rtol, double atol) {
if (!self.sub(other).abs().le(other.abs().mul(rtol).add(atol)).all()) {
if (!self.sub(other).abs().le(other.abs().mul(rtol).add(atol)).all().toCByte()) {
return false;
}

Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/templates/Functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ Tensor cumprod_backward(const Tensor &grad, const Tensor &input, int64_t dim) {
}

// Simple case with nonzero elements in the input
if ((input != 0).all()) {
if ((input != 0).all().toCByte()) {
Tensor result = at::cumprod(input, dim);
return sum_scan_exclusive(result * grad, dim) / input;
}
Expand Down
4 changes: 2 additions & 2 deletions torch/lib/THD/master_worker/worker/dispatch/TensorMath.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -593,14 +593,14 @@ static void tensorLogicalall(rpc::RPCMessage& raw_message) {
at::Tensor tensor = unpackRetrieveTensor(raw_message);
finalize(raw_message);

int64_t response = tensor.all();
int64_t response = tensor.all().toCLong();
sendValueToMaster(response);
}

static void tensorLogicalany(rpc::RPCMessage& raw_message) {
at::Tensor tensor = unpackRetrieveTensor(raw_message);
finalize(raw_message);

int64_t response = tensor.any();
int64_t response = tensor.any().toCLong();
sendValueToMaster(response);
}