Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -638,7 +638,7 @@ def backward(self, dy):
self.assertFalse(y.requires_grad)

def test_indexing(self):
x = torch.arange(1, 17).view(4, 4)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)

def compare(x, y, idx, indexed_tensor, indexed_var):
Expand Down Expand Up @@ -681,7 +681,7 @@ def check_index(x, y, idx):
check_index(x, y, ([0]))
check_index(x, y, ([0], ))

x = torch.arange(1, 49).view(4, 3, 4)
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)

check_index(x, y, (slice(None), [0], [0]))
Expand Down Expand Up @@ -717,7 +717,7 @@ def check_index(x, y, idx):
compare(x, y, seq, indexed_tensor, indexed_var)

def test_indexing_duplicates(self):
x = torch.arange(1, 17).view(4, 4)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)

idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
Expand All @@ -728,7 +728,7 @@ def test_indexing_duplicates(self):
self.assertEqual(y.grad.data, expected_grad)

# with advanced indexing
x = torch.arange(1, 17).view(4, 4)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)

idx = [[1, 1, 3, 2, 1, 2], [0]]
Expand All @@ -740,7 +740,7 @@ def test_indexing_duplicates(self):

self.assertEqual(y.grad.data, expected_grad)

x = torch.arange(1, 17).view(4, 4)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
Expand All @@ -750,7 +750,7 @@ def test_indexing_duplicates(self):
[0, 0, 0, 0]])
self.assertEqual(y.grad.data, expected_grad)

x = torch.arange(1, 65).view(4, 4, 4)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)

idx = [[1, 1, 1], slice(None), slice(None)]
Expand Down Expand Up @@ -1952,7 +1952,7 @@ def test_dir(self):
self.assertTrue(hasattr(x, key))

def test_as_strided(self):
x = Variable(torch.arange(0, 25).view(5, 5), requires_grad=True)
x = Variable(torch.arange(0., 25).view(5, 5), requires_grad=True)

def as_strided(x):
return x.as_strided([3, 3], [6, 2], 2)
Expand Down Expand Up @@ -2253,7 +2253,7 @@ def make_nonzero_det(A, sign=None, min_singular_value=0.1):
def random_fullrank_matrix_distinct_singular_value(l):
A = torch.randn(l, l)
u, _, v = A.svd()
s = torch.arange(1, l + 1).mul_(1.0 / (l + 1))
s = torch.arange(1., l + 1).mul_(1.0 / (l + 1))
return u.mm(torch.diag(s)).mm(v.t())


Expand Down
4 changes: 2 additions & 2 deletions test/test_indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ def test_int_assignment(self):
self.assertEqual(x.tolist(), [[0, 1], [5, 6]])

def test_byte_tensor_assignment(self):
x = torch.arange(0, 16).view(4, 4)
x = torch.arange(0., 16).view(4, 4)
b = torch.ByteTensor([True, False, True, False])
value = torch.tensor([3., 4., 5., 6.])
x[b] = value
Expand Down Expand Up @@ -475,7 +475,7 @@ def test_index_is_larger(self):

def test_broadcast_subspace(self):
a = torch.zeros((100, 100))
v = torch.arange(0, 100)[:, None]
v = torch.arange(0., 100)[:, None]
b = torch.arange(99, -1, -1).long()
a[b] = v
expected = b.double().unsqueeze(1).expand(100, 100)
Expand Down
6 changes: 3 additions & 3 deletions test/test_jit.py
Original file line number Diff line number Diff line change
Expand Up @@ -1890,8 +1890,8 @@ def func(x, y):
w = -q
return w * w

x = torch.arange(4, requires_grad=True)
y = torch.arange(0, 8, 2, requires_grad=True)
x = torch.arange(4., requires_grad=True)
y = torch.arange(0., 8, 2, requires_grad=True)
self.checkScript(func, [x, y], optimize=True, capture_output=True)

def test_multiple_assignment(self):
Expand Down Expand Up @@ -2041,7 +2041,7 @@ def fn(x, slope):
c = F.prelu(x, slope)
return a, b, c

x = torch.arange(-3, 4)
x = torch.arange(-3., 4)
slope = torch.tensor([0.5])
self.checkScript(fn, [x, slope], optimize=True)

Expand Down
10 changes: 5 additions & 5 deletions test/test_multiprocessing.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def autograd_sharing(queue, ready, master_modified):
ready.set()
master_modified.wait()

expected_var = torch.arange(1, 26).view(5, 5)
expected_var = torch.arange(1., 26).view(5, 5)
expected_var[0, 0] = 1000
is_ok = var.data.equal(expected_var)
var.data[:] = torch.ones(5, 5)
Expand Down Expand Up @@ -314,7 +314,7 @@ def test_cuda_small_tensors(self):
tensors = []
for i in range(5):
device = i % 2
tensors += [torch.arange(i * 5, (i + 1) * 5).cuda(device)]
tensors += [torch.arange(i * 5., (i + 1) * 5).cuda(device)]

inq = ctx.Queue()
outq = ctx.Queue()
Expand All @@ -329,7 +329,7 @@ def test_cuda_small_tensors(self):

for i, tensor in enumerate(tensors):
v, device, tensor_size, storage_size = results[i]
self.assertEqual(v, torch.arange(i * 5, (i + 1) * 5).sum())
self.assertEqual(v, torch.arange(i * 5., (i + 1) * 5).sum())
self.assertEqual(device, i % 2)
self.assertEqual(tensor_size, 5)
self.assertEqual(storage_size, 5)
Expand Down Expand Up @@ -412,12 +412,12 @@ def _test_autograd_sharing(self, var):

def test_variable_sharing(self):
for requires_grad in [True, False]:
var = Variable(torch.arange(1, 26).view(5, 5),
var = Variable(torch.arange(1., 26).view(5, 5),
requires_grad=requires_grad)
self._test_autograd_sharing(var)

def test_parameter_sharing(self):
param = Parameter(torch.arange(1, 26).view(5, 5))
param = Parameter(torch.arange(1., 26).view(5, 5))
self._test_autograd_sharing(param)

def test_empty_shared(self):
Expand Down
14 changes: 7 additions & 7 deletions test/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -1237,7 +1237,7 @@ def compare_scaling(grads):
self.assertEqual(scale.std(), 0)
return scale[0]

grads = torch.arange(1, 101).view(10, 10), torch.ones(10).div(1000)
grads = torch.arange(1., 101).view(10, 10), torch.ones(10).div(1000)
for norm_type in [0.5, 1.5, 2, 4, 'inf']:
for p, g in zip(l.parameters(), grads):
p._grad = Variable(g.clone().view_as(p.data))
Expand Down Expand Up @@ -1267,7 +1267,7 @@ def test_clip_grad_value(self):
l = nn.Linear(10, 10)
clip_value = 2.5

grad_w, grad_b = torch.arange(-50, 50).view(10, 10).div_(5), torch.ones(10).mul_(2)
grad_w, grad_b = torch.arange(-50., 50).view(10, 10).div_(5), torch.ones(10).mul_(2)
for grad_list in [[grad_w, grad_b], [grad_w, None]]:
for p, g in zip(l.parameters(), grad_list):
p._grad = g.clone().view_as(p.data) if g is not None else g
Expand All @@ -1290,7 +1290,7 @@ def test_vector_to_parameters(self):
fc1 = nn.Linear(10, 20)
model = nn.Sequential(conv1, fc1)

vec = Variable(torch.arange(0, 980))
vec = Variable(torch.arange(0., 980))
vector_to_parameters(vec, model.parameters())

sample = next(model.parameters())[0, 0, 0]
Expand Down Expand Up @@ -3191,10 +3191,10 @@ def pad(tensor, length):
max_length = lengths[0]
batch_sizes = [sum(map(bool, filter(lambda x: x >= i, lengths))) for i in range(1, max_length + 1)]
offset = 0
padded = torch.cat([pad(i * 100 + torch.arange(1, 5 * l + 1).view(l, 1, 5), max_length)
padded = torch.cat([pad(i * 100 + torch.arange(1., 5 * l + 1).view(l, 1, 5), max_length)
for i, l in enumerate(lengths, 1)], 1)
padded = torch.tensor(padded, requires_grad=True)
expected_data = [[torch.arange(1, 6) + (i + 1) * 100 + 5 * n for i in range(batch_size)]
expected_data = [[torch.arange(1., 6) + (i + 1) * 100 + 5 * n for i in range(batch_size)]
for n, batch_size in enumerate(batch_sizes)]
expected_data = list(itertools.chain.from_iterable(expected_data))
expected_data = torch.stack(expected_data, dim=0)
Expand Down Expand Up @@ -4320,7 +4320,7 @@ def test_shape(N, C, IH, IW, H, W, padding_mode):
# test known input on CPU
for padding_mode in ['zeros', 'border']:

input = Variable(torch.arange(1, 11).view(1, 1, 2, 5))
input = Variable(torch.arange(1., 11).view(1, 1, 2, 5))
grid = Variable(torch.Tensor(
[[-0.9, -1.4, 0, 0.2, 1],
[-1, -0.333, 0, 0.5, 1],
Expand Down Expand Up @@ -4430,7 +4430,7 @@ def test_shape(N, C, ID, IH, IW, D, H, W, padding_mode):

def test_affine_grid(self):
# test known input on CPU
input = Variable(torch.arange(1, 7).view(1, 2, 3))
input = Variable(torch.arange(1., 7).view(1, 2, 3))
output = F.affine_grid(input, torch.Size([1, 1, 2, 2]))
groundtruth = torch.Tensor(
[[[0, -3], [2, 5]], [[4, 7], [6, 15]]]).view(1, 2, 2, 2)
Expand Down
55 changes: 44 additions & 11 deletions test/test_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -998,7 +998,7 @@ def test_remainder(self):
long_m1 = torch.LongTensor(10, 10).random_(-10, 10)
long_res1 = long_m1.clone()
long_res2 = long_m1.clone()
long_qs = torch.arange(-5, 5).long()
long_qs = torch.arange(-5, 5)
long_qs[5] = 5 # Can't handle the divisor=0 case
for col_idx, long_q in enumerate(long_qs):
# Reference
Expand Down Expand Up @@ -2313,6 +2313,39 @@ def test_arange(self):
self.assertEqual(r1, r2, 0)
self.assertEqual(r2, r3[:-1], 0)

def test_arange_inference(self):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float32)
# end only
self.assertIs(torch.float32, torch.arange(1.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1.)).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64)).dtype)

self.assertIs(torch.int64, torch.arange(1).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1)).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1, dtype=torch.int16)).dtype)

# start, end, [step]
self.assertIs(torch.float32, torch.arange(1., 3).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1., dtype=torch.float64), 3).dtype)
self.assertIs(torch.float32, torch.arange(1, 3.).dtype)
self.assertIs(torch.float32, torch.arange(torch.tensor(1, dtype=torch.int16), torch.tensor(3.)).dtype)
self.assertIs(torch.float32, torch.arange(1, 3, 1.).dtype)
self.assertIs(torch.float32,
torch.arange(torch.tensor(1),
torch.tensor(3, dtype=torch.int16),
torch.tensor(1., dtype=torch.float64)).dtype)

self.assertIs(torch.int64, torch.arange(1, 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), 3).dtype)
self.assertIs(torch.int64, torch.arange(torch.tensor(1), torch.tensor(3, dtype=torch.int16)).dtype)
self.assertIs(torch.int64, torch.arange(1, 3, 1).dtype)
self.assertIs(torch.int64,
torch.arange(torch.tensor(1),
torch.tensor(3),
torch.tensor(1, dtype=torch.int16)).dtype)
torch.set_default_dtype(saved_dtype)

@staticmethod
def _select_broadcastable_dims(dims_full=None):
# select full dimensionality
Expand Down Expand Up @@ -2883,7 +2916,7 @@ def test_median(self):
self.assertEqual(x, x0, 0)

def test_mode(self):
x = torch.arange(1, SIZE * SIZE + 1).clone().resize_(SIZE, SIZE)
x = torch.arange(1., SIZE * SIZE + 1).clone().resize_(SIZE, SIZE)
x[:2] = 1
x[:, :2] = 1
x0 = x.clone()
Expand Down Expand Up @@ -3119,7 +3152,7 @@ def test_randn(self):

def test_slice(self):
empty = torch.Tensor()
x = torch.arange(0, 16).view(4, 4)
x = torch.arange(0., 16).view(4, 4)
self.assertEqual(x.slice(), x)
self.assertEqual(x.slice(0, 0, 4), x)
# start and stop are clamped to the size of dim
Expand Down Expand Up @@ -3914,7 +3947,7 @@ def naive_stft(x, frame_length, hop, fft_size=None, normalized=False,
return_size = fft_size
result = x.new(batch, int((length - frame_length) / float(hop)) + 1, return_size, 2)
for w in range(return_size): # freq
radians = torch.arange(frame_length) * w * 2 * math.pi / fft_size
radians = torch.arange(float(frame_length)) * w * 2 * math.pi / fft_size
radians = radians.type_as(x)
re_kernel = radians.cos().mul_(window)
im_kernel = -radians.sin().mul_(window)
Expand Down Expand Up @@ -4576,7 +4609,7 @@ def ri(indices):
# strided is [[1 3 5 7],
# [9 11 13 15]]

reference = conv_fn(torch.arange(0, 24).view(3, 8))
reference = conv_fn(torch.arange(0., 24).view(3, 8))
strided = conv_fn(torch.Tensor())
strided.set_(reference.storage(), 1, size=torch.Size([2, 4]),
stride=[8, 2])
Expand Down Expand Up @@ -4614,15 +4647,15 @@ def ri(indices):
# strided is [[10, 11],
# [17, 18]]

reference = conv_fn(torch.arange(0, 24).view(3, 8))
reference = conv_fn(torch.arange(0., 24).view(3, 8))
strided = conv_fn(torch.Tensor())
strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
stride=[7, 1])
self.assertEqual(strided[ri([0]), ri([1])], torch.Tensor([11]))
strided[ri([0]), ri([1])] = -1
self.assertEqual(strided[ri([0]), ri([1])], torch.Tensor([-1]))

reference = conv_fn(torch.arange(0, 24).view(3, 8))
reference = conv_fn(torch.arange(0., 24).view(3, 8))
strided = conv_fn(torch.Tensor())
strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
stride=[7, 1])
Expand All @@ -4632,7 +4665,7 @@ def ri(indices):
self.assertEqual(strided[ri([0, 1]), ri([1, 0])], torch.Tensor([-1,
2]))

reference = conv_fn(torch.arange(0, 24).view(3, 8))
reference = conv_fn(torch.arange(0., 24).view(3, 8))
strided = conv_fn(torch.Tensor())
strided.set_(reference.storage(), 10, size=torch.Size([2, 2]),
stride=[7, 1])
Expand Down Expand Up @@ -4727,7 +4760,7 @@ def get_set_tensor(indexed, indexer):
# 5 6 7 8 9
# 10 11 12 13 14
# 15 16 17 18 19
reference = conv_fn(torch.arange(0, 20).view(4, 5))
reference = conv_fn(torch.arange(0., 20).view(4, 5))

indices_to_test = [
# grab the second, fourth columns
Expand All @@ -4753,7 +4786,7 @@ def get_set_tensor(indexed, indexer):
indexer,
get_set_tensor(reference, indexer))

reference = conv_fn(torch.arange(0, 160).view(4, 8, 5))
reference = conv_fn(torch.arange(0., 160).view(4, 8, 5))

indices_to_test = [
[slice(None), slice(None), [0, 3, 4]],
Expand Down Expand Up @@ -4804,7 +4837,7 @@ def get_set_tensor(indexed, indexer):
indexer,
get_set_tensor(reference, indexer))

reference = conv_fn(torch.arange(0, 1296).view(3, 9, 8, 6))
reference = conv_fn(torch.arange(0., 1296).view(3, 9, 8, 6))

indices_to_test = [
[slice(None), slice(None), slice(None), [0, 3, 4]],
Expand Down
2 changes: 1 addition & 1 deletion tools/autograd/gen_python_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
'.*_forward_out', 'sparse_raw_resize_', '_unsafe_view', 'tensor',
'sparse_coo_tensor', '_arange.*', '_range.*', '_linspace.*', '_logspace.*',
'_indexCopy_', 'max_values', 'min_values', 'argmax', 'argmin',
'_cumsum.*', '_cumprod.*', '_sum.*', '_prod.*', '_th_sum.*', '_th_prod.*',
'_cumsum.*', '_cumprod.*', '_sum.*', '_prod.*', '_th_sum.*', '_th_prod.*', 'arange.*',
]

PY_VARIABLE_METHODS_CPP = CodeTemplate.from_file(template_path + '/python_variable_methods.cpp')
Expand Down
Loading