Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -1774,7 +1774,7 @@ def backward(self, grad_output):
self.assertEqual(x.grad.data, torch.ones(x.size()))

def test_set_grad_enabled(self):
x = torch.tensor([1], requires_grad=True)
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
Expand Down
4 changes: 3 additions & 1 deletion test/test_distributions.py
Original file line number Diff line number Diff line change
Expand Up @@ -1877,7 +1877,9 @@ def test_cdf_log_prob(self):
for Dist, params in EXAMPLES:
for i, param in enumerate(params):
dist = Dist(**param)
samples = torch.tensor(dist.sample().data, requires_grad=True)
samples = torch.tensor(dist.sample().data)
if samples.dtype.is_floating_point:
samples.requires_grad_()
try:
cdfs = dist.cdf(samples)
pdfs = dist.log_prob(samples).exp()
Expand Down
2 changes: 1 addition & 1 deletion test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ def forward(ctx, x):
def backward(ctx, go):
return go

x = torch.tensor([0], requires_grad=True)
x = torch.tensor([0.], requires_grad=True)

def fn(x):
y = RegularFn.apply(x)
Expand Down
2 changes: 1 addition & 1 deletion test/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -2046,7 +2046,7 @@ def test_pad(self):
lambda: F.pad(torch.randn(1, 1, 2), (2, 1), mode='reflect'))

def test_pad_scalar_error(self):
inputs = torch.tensor(0, requires_grad=True)
inputs = torch.tensor(0., requires_grad=True)
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1, 1)))
self.assertRaises(AssertionError, lambda: F.pad(inputs, (1,)))

Expand Down
18 changes: 7 additions & 11 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -1903,8 +1903,8 @@ def get_int64_dtype(dtype):
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=rg),
int64_dtype, layout, device, None, rg)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
Expand All @@ -1917,8 +1917,8 @@ def get_int64_dtype(dtype):
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=rg),
int64_dtype, layout, device, fv + 3, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
Expand Down Expand Up @@ -2697,12 +2697,12 @@ def test_contiguous(self):

def test_scalars_as_floats(self):
"zero-dim variables that don't require grad should bind to scalar arguments"
x = torch.tensor(2)
y = torch.tensor(3)
x = torch.tensor(2.)
y = torch.tensor(3.)
# 3 + (3 * 3) * 2
self.assertEqual(y.addcmul(y, y, value=x), 21)

x = torch.tensor(2, requires_grad=True)
x = torch.tensor(2., requires_grad=True)
self.assertRaises(Exception, lambda: y.addcmul(y, y, value=x))

@staticmethod
Expand Down Expand Up @@ -6123,8 +6123,6 @@ def test_parsing_int64(self):
self.assertEqual(x, torch.cumsum(torch.ones(5, 5), torch.tensor(0)))
# doesn't accept floating point variables
self.assertRaises(TypeError, lambda: torch.cumsum(torch.ones(5, 5), torch.tensor(0.)))
# doesn't accept variables with requires_grad
self.assertRaises(TypeError, lambda: torch.cumsum(torch.ones(5, 5), torch.tensor(0, requires_grad=True)))

def test_parsing_double(self):
# accepts floating point and integer arguments
Expand All @@ -6136,8 +6134,6 @@ def test_parsing_double(self):
self.assertTrue(torch.isclose(x, x, torch.tensor(1), torch.tensor(1)).all())
self.assertTrue(torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1.)).all())
# doesn't accept variables with requires_grad
self.assertRaises(TypeError,
lambda: torch.isclose(x, x, torch.tensor(1, requires_grad=True), torch.tensor(1)).all())
self.assertRaises(TypeError,
lambda: torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1., requires_grad=True)).all())

Expand Down
11 changes: 3 additions & 8 deletions tools/autograd/templates/python_torch_functions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/python_variables.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/tensor_new.h"
Expand All @@ -26,18 +27,12 @@ using at::Tensor;
using at::Scalar;
using at::ScalarType;
using at::Backend;
using torch::autograd::utils::set_requires_grad;

using namespace torch::autograd::utils;

namespace torch { namespace autograd {

static Tensor set_requires_grad(Tensor self, bool requires_grad) {
if (requires_grad && !at::isFloatingType(self.type().scalarType())) {
throw std::runtime_error("only Tensors of floating point dtype can require gradients");
}
as_variable_ref(self).set_requires_grad(requires_grad);
return self;
}

static void check_out_type_matches(Tensor result,
ScalarType scalarType, bool scalarType_is_none,
const THPLayout& layout, bool layout_is_none,
Expand Down
16 changes: 16 additions & 0 deletions torch/csrc/autograd/utils/python_variables.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
#pragma once

#include <ATen/ATen.h>
#include "torch/csrc/autograd/python_variable.h"

namespace torch { namespace autograd { namespace utils {

inline at::Tensor set_requires_grad(at::Tensor self, bool requires_grad) {

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.

This comment was marked as off-topic.

if (requires_grad && !at::isFloatingType(self.type().scalarType())) {
throw std::runtime_error("only Tensors of floating point dtype can require gradients");
}
as_variable_ref(self).set_requires_grad(requires_grad);
return self;
}

}}} // namespace torch::autograd::utils
7 changes: 2 additions & 5 deletions torch/csrc/utils/tensor_new.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/Size.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/autograd/utils/python_variables.h"
#include "torch/csrc/utils/auto_gil.h"
#include "torch/csrc/utils/auto_gpu.h"
#include "torch/csrc/utils/cuda_lazy_init.h"
Expand All @@ -23,6 +24,7 @@
static const int MAX_DIMS = 128;

using namespace at;
using torch::autograd::utils::set_requires_grad;

namespace torch { namespace utils {

Expand Down Expand Up @@ -386,11 +388,6 @@ static const Type& typeWithDefault(PythonArgs& r, int64_t dtype_idx, int64_t dev
return torch::getType(scalartype, *torch::getLayout(type.backend()), device_type);
}

static Tensor set_requires_grad(Tensor self, bool requires_grad) {
static_cast<torch::autograd::Variable&>(self).set_requires_grad(requires_grad);
return self;
}

Tensor sparse_coo_tensor_ctor(const Type& type, PyObject* args, PyObject* kwargs) {
Backend sparse_backend = type.is_cuda() ? kSparseCUDA : kSparseCPU;
const auto& default_sparse_type = type.toBackend(sparse_backend);
Expand Down