-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathgraph_results.py
More file actions
122 lines (109 loc) · 3.83 KB
/
graph_results.py
File metadata and controls
122 lines (109 loc) · 3.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import json
import statistics
import plotly.graph_objects as go
import plotly.io as io
# This script parses JMH benchmarking results into charts developed using plot.ly (https://plotly.com/)
# It expects JMH benchmark results be dumped to a file "scijava-ops-benchmark_results.json", within its directory.
# If you'd like to add a plotly chart, add an entry to the following list.
A = "<b style=\"color:black\">[<b style=\"color:#009E73\">A</b>]</b>"
C = "<b style=\"color:black\">[<b style=\"color:#E69F00\">C</b>]</b>"
AC = "<b style=\"color:black\">[<b style=\"color:#CC79A7\">AC</b>]</b>"
figures = [
{
"name": "BenchmarkMatching",
"title": "Basic Op Matching Performance",
"bars": {
"noOps": "Static Method",
"noOpsAdapted": f"Static Method {A}",
"sjOps": "SciJava Ops",
"sjOpsAdapted": f"SciJava Ops {A}"
}
},
{
"name": "BenchmarkCaching",
"title": "Caching Effects on Op Matching Performance",
"bars": {
"noOps": "Static Method",
"sjOps": "SciJava Ops",
"sjOpsWithCache": "SciJava Ops (cached)"
}
},
{
"name": "BenchmarkConversion",
"title": "Parameter Conversion Performance",
"bars": {
"noOpsConverted": f"Static Method {C}",
"noOpsAdaptedAndConverted": f"Static Method {AC}",
"sjOpsConverted": f"SciJava Ops {C}",
"sjOpsConvertedAndAdapted": f"SciJava Ops {AC}"
}
},
{
"name": "BenchmarkFrameworks",
"title": "Algorithm Execution Performance by Framework",
"bars": {
"noOps": "Static Method",
"sjOps": "SciJava Ops",
"ijOps": "ImageJ Ops"
}
},
{
"name": "BenchmarkCombined",
"title": "Combined Performance Metrics",
"bars": {
"noOps": "Static Method",
"noOpsAdapted": f"Static Method {A}",
"noOpsConverted": f"Static Method {C}",
"noOpsAdaptedAndConverted": f"Static Method {AC}",
"sjOpsWithCache": "SciJava Ops (cached)",
"sjOps": "SciJava Ops",
"sjOpsAdapted": f"SciJava Ops {A}",
"sjOpsConverted": f"SciJava Ops {C}",
"sjOpsConvertedAndAdapted": f"SciJava Ops {AC}",
"ijOps": "ImageJ Ops",
}
}
]
# Read in the benchmark results.
with open("scijava-ops-benchmarks_results.json") as f:
data = json.load(f)
# Construct a mapping from test method to scores.
results = {}
for row in data:
test = row["benchmark"].split(".")[-1]
score = row["primaryMetric"]["score"]
percentiles = row["primaryMetric"]["scorePercentiles"]
minmax = [percentiles["0.0"], percentiles["100.0"]]
results[test] = {"score": score, "minmax": minmax}
# Build charts and dump them to JSON.
for figure in figures:
name = figure["name"]
print(f"Generating figure for {name}", end="")
x = []
y = []
error_y = []
error_y_minus = []
# Add each benchmark in the class
for test, label in figure["bars"].items():
print(f".", end="")
result = results[test]
x.append(label)
y.append(result["score"])
error_y.append(result["minmax"][1] - result["score"])
error_y_minus.append(result["score"] - result["minmax"][0])
# Create a bar chart
fig = go.Figure()
fig.add_bar(
x=x,
y=y,
error_y=dict(type='data', array=error_y, arrayminus=error_y_minus),
)
fig.update_layout(
title_text=figure["title"] + f"<br><sup style=\"color: gray\">{A}=Adaptation, {C}=Conversion, {AC}=Adaptation & Conversion</sup>",
yaxis_title="<b>Performance (μs/execution)</b>"
)
# Convert to JSON and dump
with open(f"images/{name}.json", "w") as f:
f.write(io.to_json(fig))
print()
print("Done!")