Skip to content

Commit b69beed

Browse files
committed
update split doc
1 parent 21be376 commit b69beed

File tree

2 files changed

+25
-25
lines changed

2 files changed

+25
-25
lines changed

torch/functional.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99

1010

1111
def split(tensor, split_size, dim=0):
12-
"""Splits the tensor into chunks all of a certain size (if possible).
12+
"""Splits the tensor into chunks all of size :attr:`split_size` (if possible).
1313
1414
Last chunk will be smaller if the tensor size along a given dimension
1515
is not divisible by :attr`split_size`.

torch/tensor.py

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ class _TensorBase(object):
1414
# CUDA case, which handles constructing the tensor on the same GPU
1515
# as this tensor.
1616
def new(self, *args, **kwargs):
17-
"""Constructs a new tensor of the same data type as :attr:`self` tensor.
17+
r"""Constructs a new tensor of the same data type as :attr:`self` tensor.
1818
1919
Any valid argument combination to the tensor constructor is accepted by
2020
this method, including sizes, :class:`torch.Storage`, NumPy ndarray,
@@ -27,7 +27,7 @@ def new(self, *args, **kwargs):
2727
return self.__class__(*args, **kwargs)
2828

2929
def type_as(self, tensor):
30-
"""Returns this :attr:`self` tensor cast to the type of the given
30+
r"""Returns this :attr:`self` tensor cast to the type of the given
3131
tensor.
3232
3333
This is a no-op if the :attr:`self` tensor is already of the correct
@@ -41,48 +41,48 @@ def type_as(self, tensor):
4141
return self.type(tensor.type())
4242

4343
def cpu(self):
44-
"""Returns a CPU copy of this tensor if it's not already on the CPU"""
44+
r"""Returns a CPU copy of this tensor if it's not already on the CPU"""
4545
return self.type(getattr(torch, self.__class__.__name__))
4646

4747
def double(self):
48-
"""Casts this tensor to double type"""
48+
r"""Casts this tensor to double type"""
4949
return self.type(type(self).__module__ + '.DoubleTensor')
5050

5151
def float(self):
52-
"""Casts this tensor to float type"""
52+
r"""Casts this tensor to float type"""
5353
return self.type(type(self).__module__ + '.FloatTensor')
5454

5555
def half(self):
56-
"""Casts this tensor to half-precision float type"""
56+
r"""Casts this tensor to half-precision float type"""
5757
return self.type(type(self).__module__ + '.HalfTensor')
5858

5959
def long(self):
60-
"""Casts this tensor to long type"""
60+
r"""Casts this tensor to long type"""
6161
return self.type(type(self).__module__ + '.LongTensor')
6262

6363
def int(self):
64-
"""Casts this tensor to int type"""
64+
r"""Casts this tensor to int type"""
6565
return self.type(type(self).__module__ + '.IntTensor')
6666

6767
def short(self):
68-
"""Casts this tensor to short type"""
68+
r"""Casts this tensor to short type"""
6969
return self.type(type(self).__module__ + '.ShortTensor')
7070

7171
def char(self):
72-
"""Casts this tensor to char type"""
72+
r"""Casts this tensor to char type"""
7373
return self.type(type(self).__module__ + '.CharTensor')
7474

7575
def byte(self):
76-
"""Casts this tensor to byte type"""
76+
r"""Casts this tensor to byte type"""
7777
return self.type(type(self).__module__ + '.ByteTensor')
7878

7979
def is_pinned(self):
80-
"""Returns true if this tensor resides in pinned memory"""
80+
r"""Returns true if this tensor resides in pinned memory"""
8181
storage = self.storage()
8282
return storage.is_pinned() if storage else False
8383

8484
def pin_memory(self):
85-
"""Copies the tensor to pinned memory, if it's not already pinned."""
85+
r"""Copies the tensor to pinned memory, if it's not already pinned."""
8686
if self.is_cuda:
8787
raise TypeError("cannot pin '{0}' only CPU memory can be pinned"
8888
.format(self.type()))
@@ -92,7 +92,7 @@ def pin_memory(self):
9292
return type(self)().set_(storage.pin_memory()).view_as(self)
9393

9494
def share_memory_(self):
95-
"""Moves the underlying storage to shared memory.
95+
r"""Moves the underlying storage to shared memory.
9696
9797
This is a no-op if the underlying storage is already in shared memory
9898
and for CUDA tensors. Tensors in shared memory cannot be resized.
@@ -101,15 +101,15 @@ def share_memory_(self):
101101
return self
102102

103103
def is_shared(self):
104-
"""Checks if tensor is in shared memory.
104+
r"""Checks if tensor is in shared memory.
105105
106106
This is always ``True`` for CUDA tensors.
107107
"""
108108
return self.storage().is_shared()
109109

110110
@property
111111
def shape(self):
112-
"""Alias for .size()
112+
r"""Alias for .size()
113113
114114
Returns a torch.Size object, containing the dimensions of the
115115
:attr:`self` Tensor.
@@ -173,27 +173,27 @@ def __iter__(self):
173173
return iter([])
174174

175175
def split(self, split_size, dim=0):
176-
"""Splits this tensor into tensor chunks of :attr:`split_size` size.
176+
r"""Splits this tensor into tensor chunks of :attr:`split_size` size.
177177
178178
See :func:`torch.split`.
179179
"""
180180
return torch.split(self, split_size, dim)
181181

182182
def chunk(self, n_chunks, dim=0):
183-
"""Splits this tensor into a certain number of tensor chunks.
183+
r"""Splits this tensor into a certain number of tensor chunks.
184184
185185
See :func:`torch.chunk`.
186186
"""
187187
return torch.chunk(self, n_chunks, dim)
188188

189189
def matmul(self, other):
190-
"""Matrix product of two tensors.
190+
r"""Matrix product of two tensors.
191191
192192
See :func:`torch.matmul`."""
193193
return torch.matmul(self, other)
194194

195195
def tolist(self):
196-
"""Returns a nested list represenation of this tensor."""
196+
r"""Returns a nested list represenation of this tensor."""
197197
dim = self.dim()
198198
if dim == 1:
199199
return [v for v in self]
@@ -202,7 +202,7 @@ def tolist(self):
202202
return []
203203

204204
def view_as(self, tensor):
205-
"""Returns this tensor viewed as the size as the specified tensor.
205+
r"""Returns this tensor viewed as the size as the specified tensor.
206206
207207
This is equivalent to::
208208
@@ -211,7 +211,7 @@ def view_as(self, tensor):
211211
return self.view(tensor.size())
212212

213213
def permute(self, *dims):
214-
"""Permute the dimensions of this tensor.
214+
r"""Permute the dimensions of this tensor.
215215
216216
Args:
217217
*dims (int...): The desired ordering of dimensions
@@ -240,7 +240,7 @@ def permute(self, *dims):
240240
return tensor
241241

242242
def expand_as(self, tensor):
243-
"""Expands this tensor to the size of the specified tensor.
243+
r"""Expands this tensor to the size of the specified tensor.
244244
245245
This is equivalent to::
246246
@@ -249,7 +249,7 @@ def expand_as(self, tensor):
249249
return self.expand(tensor.size())
250250

251251
def repeat(self, *sizes):
252-
"""Repeats this tensor along the specified dimensions.
252+
r"""Repeats this tensor along the specified dimensions.
253253
254254
Unlike :meth:`expand`, this function copies the tensor's data.
255255

0 commit comments

Comments
 (0)