-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathgraph_results.py
More file actions
163 lines (145 loc) · 5.13 KB
/
graph_results.py
File metadata and controls
163 lines (145 loc) · 5.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
import json
import math
import statistics
import plotly.graph_objects as go
import plotly.io as io
from plotly.subplots import make_subplots
# This script parses JMH benchmarking results into charts developed using plot.ly (https://plotly.com/)
# It expects JMH benchmark results be dumped to a file "scijava-ops-benchmark_results.json", within its directory.
# If you'd like to add a plotly chart, add an entry to the following list.
A = "<b style=\"color:black\">[<b style=\"color:#009E73\">A</b>]</b>"
C = "<b style=\"color:black\">[<b style=\"color:#E69F00\">C</b>]</b>"
AC = "<b style=\"color:black\">[<b style=\"color:#CC79A7\">AC</b>]</b>"
figures = [
{
"name": "BenchmarkMatching",
"title": "Basic Op Matching Performance",
"bars": {
"sjOpsAdapted": f"SciJava Ops {A}",
"sjOps": "SciJava Ops",
"noOpsAdapted": f"Static Method {A}",
"noOps": "Static Method",
}
},
{
"name": "BenchmarkCaching",
"title": "Caching Effects on Op Matching Performance",
"bars": {
"sjOpsWithCache": "SciJava Ops (cached)",
"sjOps": "SciJava Ops",
"noOps": "Static Method",
}
},
{
"name": "BenchmarkConversion",
"title": "Parameter Conversion Performance",
"bars": {
"sjOpsConvertedAndAdapted": f"SciJava Ops {AC}",
"sjOpsConverted": f"SciJava Ops {C}",
"noOpsAdaptedAndConverted": f"Static Method {AC}",
"noOpsConverted": f"Static Method {C}",
}
},
{
"name": "BenchmarkFrameworks",
"title": "Algorithm Execution Performance by Framework",
"bars": {
"ijOps": "ImageJ Ops",
"sjOps": "SciJava Ops",
"noOps": "Static Method",
}
},
{
"name": "BenchmarkCombined",
"title": "Combined Performance Metrics",
"bars": {
"ijOps": "ImageJ Ops",
"sjOpsConvertedAndAdapted": f"SciJava Ops {AC}",
"sjOpsConverted": f"SciJava Ops {C}",
"sjOpsAdapted": f"SciJava Ops {A}",
"sjOps": "SciJava Ops",
"sjOpsWithCache": "SciJava Ops (cached)",
"noOpsAdaptedAndConverted": f"Static Method {AC}",
"noOpsConverted": f"Static Method {C}",
"noOpsAdapted": f"Static Method {A}",
"noOps": "Static Method",
}
}
]
# Read in the benchmark results.
with open("scijava-ops-benchmarks_results.json") as f:
data = json.load(f)
# Construct a mapping from test method to scores.
results = {}
for row in data:
test = row["benchmark"].split(".")[-1]
score = row["primaryMetric"]["score"]
percentiles = row["primaryMetric"]["scorePercentiles"]
minmax = [percentiles["0.0"], percentiles["100.0"]]
results[test] = {"score": score, "minmax": minmax}
# Build charts and dump them to JSON.
for figure in figures:
name = figure["name"]
print(f"Generating figure for {name}", end="")
labels = []
values = []
errors = []
# Add each benchmark in the class
for test, label in figure["bars"].items():
print(".", end="")
result = results[test]
labels.append(label)
score = result["score"]
values.append(score)
error = [result["minmax"][1] - score, score - result["minmax"][0]]
errors.append(error)
# Create a subplot with shared y-axis
fig = make_subplots(rows=1, cols=2, shared_yaxes=True, horizontal_spacing=0.02)
# Add log scale bars (left side)
fig.add_trace(
go.Bar(
y=labels,
x=values,
orientation='h',
error_x=dict(type='data', symmetric=False, array=[e[0] for e in errors], arrayminus=[e[1] for e in errors]),
name="Log Scale",
marker_color='blue'
),
row=1, col=1
)
# Add linear scale bars (right side)
fig.add_trace(
go.Bar(
y=labels,
x=values,
orientation='h',
error_x=dict(type='data', symmetric=False, array=[e[0] for e in errors], arrayminus=[e[1] for e in errors]),
name="Linear Scale",
marker_color='red'
),
row=1, col=2
)
# Update layout
fig.update_layout(
title_text=figure["title"] + f"<br><sup style=\"color: gray\">{A}=Adaptation, {C}=Conversion, {AC}=Adaptation & Conversion</sup>",
barmode='relative',
yaxis=dict(title=""),
#xaxis=dict(title="Log Scale (μs/execution)", type="log"),
xaxis=dict(
title="Log Scale (μs/execution)",
type="log",
range=[min(values), max(values)],
),
xaxis2=dict(title="Linear Scale (μs/execution)"),
height=max(500, 50 * len(labels)), # Adjust height based on number of bars
showlegend=False
)
# Add a vertical line at x=0
fig.add_vline(x=0, line_width=2, line_color="black")
# Reverse the log scale axis
fig.update_xaxes(autorange="reversed", row=1, col=1)
# Convert to JSON and dump
with open(f"images/{name}.json", "w") as f:
f.write(io.to_json(fig))
print()
print("Done!")