Skip to content

Commit ad782ff

Browse files
Erotemicpytorchmergebot
authored andcommitted
Enable xdoctest runner in CI for real this time (#83816)
Builds on #83317 and enables running the doctests. Just need to figure out what is causing the failures. Pull Request resolved: #83816 Approved by: https://github.com/ezyang, https://github.com/malfet
1 parent fb4fc0d commit ad782ff

File tree

90 files changed

+456
-262
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

90 files changed

+456
-262
lines changed

.circleci/docker/requirements-ci.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -179,9 +179,9 @@ pytest-rerunfailures
179179
#Pinned versions:
180180
#test that import:
181181

182-
xdoctest==1.0.2
182+
xdoctest==1.1.0
183183
#Description: runs doctests in pytest
184-
#Pinned versions: 1.0.2
184+
#Pinned versions: 1.1.0
185185
#test that import:
186186

187187
pygments==2.12.0

.github/requirements/pip-requirements-macOS.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,4 +19,4 @@ pytest-shard==0.1.2
1919
scipy==1.9.0
2020
sympy==1.11.1
2121
unittest-xml-reporting<=3.2.0,>=2.0.0
22-
xdoctest==1.0.2
22+
xdoctest==1.1.0

test/run_doctests.sh

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,12 @@ This script simply runs the torch doctests via the xdoctest runner.
44
55
This must be run from the root of the torch repo, as it needs the path to the
66
torch source code.
7-
"
87
9-
#xdoctest -m torch --style=google list
8+
This script is provided as a developer convenience. On the CI the doctests are
9+
invoked in 'run_test.py'
10+
"
11+
# To simply list tests
12+
# xdoctest -m torch --style=google list
1013

1114
# Reference: https://stackoverflow.com/questions/59895/bash-script-dir
1215
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
@@ -16,14 +19,10 @@ echo "TORCH_MODPATH = $TORCH_MODPATH"
1619
if [[ ! -d "$TORCH_MODPATH" ]] ; then
1720
echo "Could not find the path to the torch module"
1821
else
19-
20-
# Next version of xdoctest will support environment variables that overlo
21-
22-
2322
export XDOCTEST_GLOBAL_EXEC="from torch import nn\nimport torch.nn.functional as F\nimport torch"
2423
export XDOCTEST_OPTIONS="+IGNORE_WHITESPACE"
2524
# Note: google wont catch numpy style docstrings (a few exist) but it also wont fail
2625
# on things not intended to be doctests.
2726
export XDOCTEST_STYLE="google"
28-
xdoctest "$TORCH_MODPATH" --style="$XDOCTEST_STYLE" --global-exec "$XDOCTEST_GLOBAL_EXEC" --options="$XDOCTEST_OPTIONS"
27+
xdoctest torch "$TORCH_MODPATH" --style="$XDOCTEST_STYLE" --global-exec "$XDOCTEST_GLOBAL_EXEC" --options="$XDOCTEST_OPTIONS"
2928
fi

test/run_test.py

Lines changed: 26 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -659,10 +659,9 @@ def run_doctests(test_module, test_directory, options):
659659
import pathlib
660660
pkgpath = pathlib.Path(torch.__file__).parent
661661

662-
#
663662
enabled = {
664663
# TODO: expose these options to the user
665-
# Temporary disable all feature-conditional tests
664+
# For now disable all feature-conditional tests
666665
# 'lapack': 'auto',
667666
# 'cuda': 'auto',
668667
# 'cuda1': 'auto',
@@ -671,6 +670,9 @@ def run_doctests(test_module, test_directory, options):
671670
'cuda': 0,
672671
'cuda1': 0,
673672
'qengine': 0,
673+
'autograd_profiler': 0,
674+
'cpp_ext': 0,
675+
'monitor': 0,
674676
}
675677

676678
# Resolve "auto" based on a test to determine if the feature is available.
@@ -707,13 +709,34 @@ def run_doctests(test_module, test_directory, options):
707709
if enabled['qengine']:
708710
os.environ['TORCH_DOCTEST_QENGINE'] = '1'
709711

712+
if enabled['autograd_profiler']:
713+
os.environ['TORCH_DOCTEST_AUTOGRAD_PROFILER'] = '1'
714+
715+
if enabled['cpp_ext']:
716+
os.environ['TORCH_DOCTEST_CPP_EXT'] = '1'
717+
718+
if enabled['monitor']:
719+
os.environ['TORCH_DOCTEST_MONITOR'] = '1'
720+
721+
if 0:
722+
# TODO: could try to enable some of these
723+
os.environ['TORCH_DOCTEST_QUANTIZED_DYNAMIC'] = '1'
724+
os.environ['TORCH_DOCTEST_ANOMOLY'] = '1'
725+
os.environ['TORCH_DOCTEST_AUTOGRAD'] = '1'
726+
os.environ['TORCH_DOCTEST_HUB'] = '1'
727+
os.environ['TORCH_DOCTEST_DATALOADER'] = '1'
728+
os.environ['TORCH_DOCTEST_ONNX'] = '1'
729+
os.environ['TORCH_DOCTEST_FUTURES'] = '1'
730+
710731
pkgpath = os.path.dirname(torch.__file__)
732+
711733
xdoctest_config = {
712734
'global_exec': r'\n'.join([
713735
'from torch import nn',
714736
'import torch.nn.functional as F',
715737
'import torch',
716738
]),
739+
'analysis': 'static', # set to "auto" to test doctests in compiled modules
717740
'style': 'google',
718741
'options': '+IGNORE_WHITESPACE',
719742
}
@@ -1016,7 +1039,7 @@ def parse_args():
10161039
)
10171040
parser.add_argument(
10181041
"--xdoctest-command",
1019-
default='list',
1042+
default='all',
10201043
help=(
10211044
"Control the specific doctest action. "
10221045
"Use 'list' to simply parse doctests and check syntax. "

torch/__init__.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -427,7 +427,7 @@ def is_tensor(obj):
427427
obj (Object): Object to test
428428
Example::
429429
430-
>>> x=torch.tensor([1,2,3])
430+
>>> x = torch.tensor([1, 2, 3])
431431
>>> torch.is_tensor(x)
432432
True
433433
@@ -627,10 +627,10 @@ def use_deterministic_algorithms(mode, *, warn_only=False):
627627
628628
Example::
629629
630+
>>> # xdoctest: +SKIP
630631
>>> torch.use_deterministic_algorithms(True)
631632
632633
# Forward mode nondeterministic error
633-
>>> # xdoctest: +SKIP
634634
>>> torch.randn(10, device='cuda').kthvalue(0)
635635
...
636636
RuntimeError: kthvalue CUDA does not have a deterministic implementation...

torch/_functorch/eager_transforms.py

Lines changed: 14 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,7 @@ def vjp(func: Callable, *primals, has_aux: bool = False):
251251
252252
Case 2: Using ``vjp`` inside ``torch.no_grad`` context manager:
253253
254+
>>> # xdoctest: +SKIP(failing)
254255
>>> with torch.no_grad():
255256
>>> vjp(f)(x)
256257
@@ -1286,6 +1287,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
12861287
12871288
Example of using ``grad``:
12881289
1290+
>>> # xdoctest: +SKIP
12891291
>>> from torch.func import grad
12901292
>>> x = torch.randn([])
12911293
>>> cos_x = grad(lambda x: torch.sin(x))(x)
@@ -1297,6 +1299,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
12971299
12981300
When composed with ``vmap``, ``grad`` can be used to compute per-sample-gradients:
12991301
1302+
>>> # xdoctest: +SKIP
13001303
>>> from torch.func import grad, vmap
13011304
>>> batch_size, feature_size = 3, 5
13021305
>>>
@@ -1317,6 +1320,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
13171320
13181321
Example of using ``grad`` with ``has_aux`` and ``argnums``:
13191322
1323+
>>> # xdoctest: +SKIP
13201324
>>> from torch.func import grad
13211325
>>> def my_loss_func(y, y_pred):
13221326
>>> loss_per_sample = (0.5 * y_pred - y) ** 2
@@ -1327,13 +1331,14 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
13271331
>>> y_true = torch.rand(4)
13281332
>>> y_preds = torch.rand(4, requires_grad=True)
13291333
>>> out = fn(y_true, y_preds)
1330-
>>> > output is ((grads w.r.t y_true, grads w.r.t y_preds), (y_pred, loss_per_sample))
1334+
>>> # > output is ((grads w.r.t y_true, grads w.r.t y_preds), (y_pred, loss_per_sample))
13311335
13321336
.. note::
13331337
Using PyTorch ``torch.no_grad`` together with ``grad``.
13341338
13351339
Case 1: Using ``torch.no_grad`` inside a function:
13361340
1341+
>>> # xdoctest: +SKIP
13371342
>>> def f(x):
13381343
>>> with torch.no_grad():
13391344
>>> c = x ** 2
@@ -1343,6 +1348,7 @@ def grad(func: Callable, argnums: argnums_t = 0, has_aux: bool = False) -> Calla
13431348
13441349
Case 2: Using ``grad`` inside ``torch.no_grad`` context manager:
13451350
1351+
>>> # xdoctest: +SKIP
13461352
>>> with torch.no_grad():
13471353
>>> grad(f)(x)
13481354
@@ -1433,11 +1439,12 @@ def functionalize(func: Callable, *, remove: str = 'mutations') -> Callable:
14331439
14341440
Example::
14351441
1442+
>>> # xdoctest: +SKIP
14361443
>>> import torch
14371444
>>> from torch.fx.experimental.proxy_tensor import make_fx
14381445
>>> from torch.func import functionalize
14391446
>>>
1440-
>>> A function that uses mutations and views, but only on intermediate tensors.
1447+
>>> # A function that uses mutations and views, but only on intermediate tensors.
14411448
>>> def f(a):
14421449
... b = a + 1
14431450
... c = b.view(-1)
@@ -1490,17 +1497,17 @@ def forward(self, a_1):
14901497
return view_copy_1
14911498
14921499
1493-
>>> A function that mutates its input tensor
1500+
>>> # A function that mutates its input tensor
14941501
>>> def f(a):
14951502
... b = a.view(-1)
14961503
... b.add_(1)
14971504
... return a
14981505
...
14991506
>>> f_no_mutations_and_views_traced = make_fx(functionalize(f, remove='mutations_and_views'))(inpt)
1500-
>>>
1501-
>>> All mutations and views have been removed,
1502-
>>> but there is an extra copy_ in the graph to correctly apply the mutation to the input
1503-
>>> after the function has completed.
1507+
>>> #
1508+
>>> # All mutations and views have been removed,
1509+
>>> # but there is an extra copy_ in the graph to correctly apply the mutation to the input
1510+
>>> # after the function has completed.
15041511
>>> print(f_no_mutations_and_views_traced.code)
15051512
15061513

torch/_functorch/fx_minifier.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -69,6 +69,7 @@ def minifier(fail_f: fx.GraphModule, inps, module_fails, dump_state: Callable =
6969
2. Delta Debugging: Tries replacing half of the graph with inputs. If fails,
7070
tries replacing quarter of the graph, etc.
7171
72+
>>> # xdoctest: +SKIP(failing)
7273
>>> failing_function = fx.symbolic_trace(f)
7374
>>> minimize(failing_function, [torch.randn(5)], lambda fx_g, inps: fx_g(*inps))
7475

torch/_namedtensor_internals.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,10 +122,12 @@ def update_names(tensor, names, rename_map, inplace):
122122
123123
For example,
124124
```
125+
>>> # xdoctest: +SKIP
125126
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
126127
>>> x.rename('...', 'height', 'width').names
127128
('N', 'C', 'height', 'width')
128129
130+
>>> # xdoctest: +SKIP
129131
>>> x.rename('batch', '...', 'width').names
130132
('batch', 'C', 'H', 'width')
131133
@@ -136,6 +138,7 @@ def update_names(tensor, names, rename_map, inplace):
136138
137139
For example,
138140
```
141+
>>> # xdoctest: +SKIP
139142
>>> x = torch.empty(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
140143
>>> x.rename(W='width', H='height').names
141144
('N', 'C', 'height', 'width')

torch/_prims_common/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1496,6 +1496,7 @@ def compute_required_storage_length(
14961496
>>> compute_required_storage_length(t.shape, t.stride(), t.storage_offset())
14971497
200
14981498
1499+
>>> # xdoctest: +SKIP(failing)
14991500
>>> t2 = torch.empty_strided((1, 2, 3), (5, 7, 11))
15001501
>>> size = compute_required_storage_length(t2.shape, t2.stride(), t2.storage_offset())
15011502
>>> size == t.storage().size()

torch/_tensor_str.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,6 @@ def _vector_str(self, indent, summarize, formatter1, formatter2=None):
215215
elements_per_line = max(
216216
1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
217217
)
218-
# char_per_line = element_length * elements_per_line # unused
219218

220219
def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
221220
if formatter2 is not None:

0 commit comments

Comments
 (0)