Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 29 additions & 7 deletions test/test_autograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -2441,10 +2441,6 @@ def get_id():

events.populate_cpu_children()

print()
for event in events:
print(event)

# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
Expand All @@ -2454,10 +2450,34 @@ def get_children_ids(event):

assert([get_children_ids(event) for event in events] == res)

def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True) as prof:
layer2(layer1(input))

# type conversion
assert(prof.function_events[0].input_shapes == [[30, 20]])
# fc (addmm)
assert(
prof.function_events[1].input_shapes ==
[[30], [128, 20], [20, 30], [], []]
)
assert(prof.function_events[2].input_shapes == [[40, 30]])
assert(
prof.function_events[3].input_shapes ==
[[40], [128, 30], [30, 40], [], []]
)
print(prof.table())
print(prof.key_averages(group_by_input_shape=True).table())

def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile() as prof:
with profile(record_shapes=True) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
Expand All @@ -2467,8 +2487,10 @@ def test_profiler_aggregation_lstm(self):
end = time.time()
total_time_s += end - start

print(prof.table(sort_by="self_cpu_time_total", row_limit=10))
print(prof.key_averages().table(sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

actually I am not sure why we were printing to stdout before, it's not useful in the unit test. can you change it, just call __repr__ on it for the sanity check.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@soumith , is there a specific issue with printing to stdout? Normally a python test runner would capture std out. But when you run a test manually output is useful. Is this is not a case for some of the OSS CIs?

No strong feelings about this though :)

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

the default testrunner for python doesn't capture stdout. But the default one in buck eats it properly.
Anyways, this is a nit and it wasn't introduced in your diff.

sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))

total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
Expand Down
Loading