Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions test/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -811,12 +811,20 @@ def test_type(self):
net.l = l
net.l2 = l
net.add_module('empty', None)
net.register_buffer('indices', torch.LongTensor(1))
net.float()
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
net.double()
self.assertIsInstance(l.weight.data, torch.DoubleTensor)
self.assertIsInstance(l.bias.data, torch.DoubleTensor)
self.assertIsInstance(net.indices, torch.LongTensor)
if TEST_CUDA:
net.float().cuda()
self.assertIsInstance(l.weight.data, torch.cuda.FloatTensor)
self.assertIsInstance(l.bias.data, torch.cuda.FloatTensor)
self.assertIsInstance(net.indices, torch.cuda.LongTensor)
net.type(torch.FloatTensor)
self.assertIsInstance(l.weight.data, torch.FloatTensor)
self.assertIsInstance(l.bias.data, torch.FloatTensor)
Expand Down
3 changes: 3 additions & 0 deletions torch/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,9 @@ def storage_type(cls):
CharTensor, ByteTensor, HalfTensor
}

_integer_tensor_classes = {
LongTensor, IntTensor, ShortTensor, CharTensor, ByteTensor
}

set_default_tensor_type('torch.FloatTensor')

Expand Down
6 changes: 6 additions & 0 deletions torch/cuda/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -522,6 +522,12 @@ def storage_type():
torch._tensor_classes.add(ByteTensor)
torch._tensor_classes.add(HalfTensor)

torch._integer_tensor_classes.add(LongTensor)
torch._integer_tensor_classes.add(IntTensor)
torch._integer_tensor_classes.add(ShortTensor)
torch._integer_tensor_classes.add(CharTensor)
torch._integer_tensor_classes.add(ByteTensor)

from . import sparse
from . import profiler
from . import nvtx
Expand Down
6 changes: 6 additions & 0 deletions torch/cuda/sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,3 +86,9 @@ def is_signed(self):
_sparse_tensor_classes.add(ByteTensor)
_sparse_tensor_classes.add(HalfTensor)
torch._tensor_classes.update(_sparse_tensor_classes)

torch._integer_tensor_classes.add(LongTensor)
torch._integer_tensor_classes.add(IntTensor)
torch._integer_tensor_classes.add(ShortTensor)
torch._integer_tensor_classes.add(CharTensor)
torch._integer_tensor_classes.add(ByteTensor)
6 changes: 6 additions & 0 deletions torch/distributed/remote_types.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,12 @@ def storage_type(cls):
torch._tensor_classes.add(CharTensor)
torch._tensor_classes.add(ByteTensor)

torch._integer_tensor_classes.add(LongTensor)
torch._integer_tensor_classes.add(IntTensor)
torch._integer_tensor_classes.add(ShortTensor)
torch._integer_tensor_classes.add(CharTensor)
torch._integer_tensor_classes.add(ByteTensor)

_type_names = ['Double', 'Float', 'Long', 'Int', 'Short', 'Char', 'Byte']
_locals = locals()
_tensors = [_locals[t + 'Tensor'] for t in _type_names]
Expand Down
12 changes: 6 additions & 6 deletions torch/nn/modules/module.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,28 +235,28 @@ def type(self, dst_type):
return self._apply(lambda t: t.type(dst_type))

def float(self):
"""Casts all parameters and buffers to float datatype.
"""Casts all floating point parameters and buffers to float datatype.

Returns:
Module: self
"""
return self._apply(lambda t: t.float())
return self._apply(lambda t: t.float() if not type(t) in torch._integer_tensor_classes else t)

def double(self):
"""Casts all parameters and buffers to double datatype.
"""Casts all floating point parameters and buffers to double datatype.

Returns:
Module: self
"""
return self._apply(lambda t: t.double())
return self._apply(lambda t: t.double() if not type(t) in torch._integer_tensor_classes else t)

def half(self):
"""Casts all parameters and buffers to half datatype.
"""Casts all floating point parameters and buffers to half datatype.

Returns:
Module: self
"""
return self._apply(lambda t: t.half())
return self._apply(lambda t: t.half() if not type(t) in torch._integer_tensor_classes else t)

def register_backward_hook(self, hook):
"""Registers a backward hook on the module.
Expand Down
6 changes: 6 additions & 0 deletions torch/sparse/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -178,4 +178,10 @@ def is_signed(self):
_sparse_tensor_classes.add(ByteTensor)
torch._tensor_classes.update(_sparse_tensor_classes)

torch._integer_tensor_classes.add(LongTensor)
torch._integer_tensor_classes.add(IntTensor)
torch._integer_tensor_classes.add(ShortTensor)
torch._integer_tensor_classes.add(CharTensor)
torch._integer_tensor_classes.add(ByteTensor)

_C._sparse_init()