potassco-benchmark-tool 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- benchmarktool/__init__.py +0 -0
- benchmarktool/entry_points.py +417 -0
- benchmarktool/init/programs/gcat.sh +24 -0
- benchmarktool/init/runscripts/runscript-all.xml +49 -0
- benchmarktool/init/runscripts/runscript-dist.xml +20 -0
- benchmarktool/init/runscripts/runscript-example.xml +31 -0
- benchmarktool/init/runscripts/runscript-seq.xml +27 -0
- benchmarktool/init/templates/seq-generic-single.sh +27 -0
- benchmarktool/init/templates/seq-generic-zip.sh +14 -0
- benchmarktool/init/templates/seq-generic.sh +12 -0
- benchmarktool/init/templates/single.dist +25 -0
- benchmarktool/result/__init__.py +0 -0
- benchmarktool/result/ipynb_gen.py +477 -0
- benchmarktool/result/ods_config.py +42 -0
- benchmarktool/result/ods_gen.py +714 -0
- benchmarktool/result/parser.py +167 -0
- benchmarktool/result/result.py +453 -0
- benchmarktool/resultparser/__init__.py +0 -0
- benchmarktool/resultparser/clasp.py +88 -0
- benchmarktool/runscript/__init__.py +0 -0
- benchmarktool/runscript/parser.py +477 -0
- benchmarktool/runscript/runscript.py +1481 -0
- benchmarktool/tools.py +82 -0
- potassco_benchmark_tool-2.1.1.dist-info/METADATA +112 -0
- potassco_benchmark_tool-2.1.1.dist-info/RECORD +29 -0
- potassco_benchmark_tool-2.1.1.dist-info/WHEEL +5 -0
- potassco_benchmark_tool-2.1.1.dist-info/entry_points.txt +2 -0
- potassco_benchmark_tool-2.1.1.dist-info/licenses/LICENSE +21 -0
- potassco_benchmark_tool-2.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,167 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Created on Jan 19, 2010
|
|
3
|
+
|
|
4
|
+
@author: Roland Kaminski
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Optional
|
|
8
|
+
|
|
9
|
+
from lxml import etree # type: ignore[import-untyped]
|
|
10
|
+
|
|
11
|
+
from benchmarktool import tools
|
|
12
|
+
from benchmarktool.result.result import (
|
|
13
|
+
Benchmark,
|
|
14
|
+
Class,
|
|
15
|
+
ClassResult,
|
|
16
|
+
Config,
|
|
17
|
+
DistJob,
|
|
18
|
+
Instance,
|
|
19
|
+
InstanceResult,
|
|
20
|
+
Machine,
|
|
21
|
+
Project,
|
|
22
|
+
Result,
|
|
23
|
+
Run,
|
|
24
|
+
Runspec,
|
|
25
|
+
SeqJob,
|
|
26
|
+
Setting,
|
|
27
|
+
System,
|
|
28
|
+
)
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# pylint: disable=too-many-instance-attributes
|
|
32
|
+
class Parser:
|
|
33
|
+
"""
|
|
34
|
+
A parser to parse XML result files.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self) -> None:
|
|
38
|
+
"""
|
|
39
|
+
Initializes the parser.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
self.system_order = 0
|
|
43
|
+
self.result = Result()
|
|
44
|
+
self.setting_order = 0
|
|
45
|
+
self.benchscope = False
|
|
46
|
+
|
|
47
|
+
self.system: Optional[System] = None
|
|
48
|
+
self.benchmark: Optional[Benchmark] = None
|
|
49
|
+
self.benchclass: Optional[Class] = None
|
|
50
|
+
self.classresult: Optional[ClassResult] = None
|
|
51
|
+
self.instresult: Optional[InstanceResult] = None
|
|
52
|
+
self.runspec: Optional[Runspec] = None
|
|
53
|
+
self.project: Optional[Project] = None
|
|
54
|
+
self.run: Optional[Run] = None
|
|
55
|
+
|
|
56
|
+
def parse(self, infile: Any) -> Result:
|
|
57
|
+
"""
|
|
58
|
+
Parse a given result file and return its representation
|
|
59
|
+
in form of an instance of class Result.
|
|
60
|
+
|
|
61
|
+
Attributes:
|
|
62
|
+
infile (Any): The file to parse.
|
|
63
|
+
"""
|
|
64
|
+
# to reduce memory consumption especially for large result files
|
|
65
|
+
# do not use the full blown etree representation
|
|
66
|
+
parser = etree.XMLParser(target=self)
|
|
67
|
+
etree.parse(infile, parser)
|
|
68
|
+
assert isinstance(self.result, Result)
|
|
69
|
+
return self.result
|
|
70
|
+
|
|
71
|
+
# pylint: disable=too-many-statements,too-many-branches
|
|
72
|
+
def start(self, tag: str, attrib: dict[str, Any]) -> None:
|
|
73
|
+
"""
|
|
74
|
+
This method is called for every opening XML tag.
|
|
75
|
+
|
|
76
|
+
Attributes:
|
|
77
|
+
tag (str): The name of the tag.
|
|
78
|
+
attrib (dict[str, Any]): The attributes of the tag.
|
|
79
|
+
"""
|
|
80
|
+
match tag:
|
|
81
|
+
case "machine":
|
|
82
|
+
machine = Machine(attrib["name"], attrib["cpu"], attrib["memory"])
|
|
83
|
+
self.result.machines[machine.name] = machine
|
|
84
|
+
case "config":
|
|
85
|
+
config = Config(attrib["name"], attrib["template"])
|
|
86
|
+
self.result.configs[config.name] = config
|
|
87
|
+
case "system":
|
|
88
|
+
self.system = System(
|
|
89
|
+
attrib["name"], attrib["version"], attrib["config"], attrib["measures"], self.system_order
|
|
90
|
+
)
|
|
91
|
+
self.result.systems[(self.system.name, self.system.version)] = self.system
|
|
92
|
+
self.system_order += 1
|
|
93
|
+
self.setting_order = 0
|
|
94
|
+
case "setting":
|
|
95
|
+
tag = attrib.pop("tag", None)
|
|
96
|
+
name = attrib.pop("name")
|
|
97
|
+
cmdline = attrib.pop("cmdline")
|
|
98
|
+
assert self.system is not None
|
|
99
|
+
setting = Setting(self.system, name, cmdline, tag, self.setting_order, attrib)
|
|
100
|
+
self.system.settings[name] = setting
|
|
101
|
+
self.setting_order += 1
|
|
102
|
+
case "seqjob":
|
|
103
|
+
name = attrib.pop("name")
|
|
104
|
+
timeout = tools.xml_to_seconds_time(attrib.pop("timeout"))
|
|
105
|
+
runs = int(attrib.pop("runs"))
|
|
106
|
+
parallel = int(attrib.pop("parallel"))
|
|
107
|
+
seq_job = SeqJob(name, timeout, runs, attrib, parallel)
|
|
108
|
+
self.result.jobs[seq_job.name] = seq_job
|
|
109
|
+
case "distjob":
|
|
110
|
+
name = attrib.pop("name")
|
|
111
|
+
timeout = tools.xml_to_seconds_time(attrib.pop("timeout"))
|
|
112
|
+
runs = int(attrib.pop("runs"))
|
|
113
|
+
script_mode = attrib.pop("script_mode")
|
|
114
|
+
walltime = attrib.pop("walltime")
|
|
115
|
+
partition = attrib.pop("partition")
|
|
116
|
+
dist_job = DistJob(name, timeout, runs, attrib, script_mode, walltime, partition)
|
|
117
|
+
self.result.jobs[dist_job.name] = dist_job
|
|
118
|
+
case "benchmark":
|
|
119
|
+
self.benchscope = True
|
|
120
|
+
self.benchmark = Benchmark(attrib["name"])
|
|
121
|
+
self.result.benchmarks[self.benchmark.name] = self.benchmark
|
|
122
|
+
case "project":
|
|
123
|
+
self.project = Project(attrib["name"], attrib["job"])
|
|
124
|
+
self.result.projects[self.project.name] = self.project
|
|
125
|
+
case "runspec":
|
|
126
|
+
self.benchscope = False
|
|
127
|
+
self.runspec = Runspec(
|
|
128
|
+
self.result.systems[(attrib["system"], attrib["version"])],
|
|
129
|
+
self.result.machines[attrib["machine"]],
|
|
130
|
+
self.result.benchmarks[attrib["benchmark"]],
|
|
131
|
+
self.result.systems[(attrib["system"], attrib["version"])].settings[attrib["setting"]],
|
|
132
|
+
)
|
|
133
|
+
assert self.project is not None
|
|
134
|
+
self.project.runspecs.append(self.runspec)
|
|
135
|
+
case "class":
|
|
136
|
+
if self.benchscope:
|
|
137
|
+
assert self.benchmark is not None
|
|
138
|
+
self.benchclass = Class(self.benchmark, attrib["name"], int(attrib["id"]))
|
|
139
|
+
self.benchmark.classes[self.benchclass.id] = self.benchclass
|
|
140
|
+
else:
|
|
141
|
+
assert self.runspec is not None
|
|
142
|
+
benchclass = self.runspec.benchmark.classes[int(attrib["id"])]
|
|
143
|
+
self.classresult = ClassResult(benchclass)
|
|
144
|
+
self.runspec.classresults.append(self.classresult)
|
|
145
|
+
case "instance":
|
|
146
|
+
if self.benchscope:
|
|
147
|
+
assert self.benchclass is not None
|
|
148
|
+
instance = Instance(self.benchclass, attrib["name"], int(attrib["id"]))
|
|
149
|
+
self.benchclass.instances[instance.id] = instance
|
|
150
|
+
else:
|
|
151
|
+
assert self.classresult is not None
|
|
152
|
+
benchinst = self.classresult.benchclass.instances[int(attrib["id"])]
|
|
153
|
+
self.instresult = InstanceResult(benchinst)
|
|
154
|
+
self.classresult.instresults.append(self.instresult)
|
|
155
|
+
case "run":
|
|
156
|
+
if not self.benchscope:
|
|
157
|
+
assert self.instresult is not None
|
|
158
|
+
self.run = Run(self.instresult, int(attrib["number"]))
|
|
159
|
+
self.instresult.runs.append(self.run)
|
|
160
|
+
case "measure":
|
|
161
|
+
assert self.run is not None
|
|
162
|
+
self.run.measures[attrib["name"]] = (attrib["type"], attrib["val"])
|
|
163
|
+
|
|
164
|
+
def close(self) -> None:
|
|
165
|
+
"""
|
|
166
|
+
This method is called for every closing XML tag.
|
|
167
|
+
"""
|
|
@@ -0,0 +1,453 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Created on Jan 19, 2010
|
|
3
|
+
|
|
4
|
+
@author: Roland Kaminski
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Any, Iterator, Optional
|
|
10
|
+
|
|
11
|
+
from benchmarktool.result.ods_gen import ODSDoc
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class Result:
|
|
15
|
+
"""
|
|
16
|
+
Stores the benchmark description and its results.
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
def __init__(self) -> None:
|
|
20
|
+
"""
|
|
21
|
+
Initializes an empty result.
|
|
22
|
+
"""
|
|
23
|
+
self.machines: dict[str, Machine] = {}
|
|
24
|
+
self.configs: dict[str, Config] = {}
|
|
25
|
+
self.systems: dict[tuple[str, str], System] = {}
|
|
26
|
+
self.jobs: dict[str, SeqJob | DistJob] = {}
|
|
27
|
+
self.benchmarks: dict[str, Benchmark] = {}
|
|
28
|
+
self.projects: dict[str, Project] = {}
|
|
29
|
+
|
|
30
|
+
def merge(self, projects: list["Project"]) -> "BenchmarkMerge":
|
|
31
|
+
"""
|
|
32
|
+
Concatenates the benchmarks in the given projects into one benchmark set.
|
|
33
|
+
|
|
34
|
+
Attributes:
|
|
35
|
+
projects (list[Project]): The projects to merge with.
|
|
36
|
+
"""
|
|
37
|
+
benchmarks: set[Benchmark] = set()
|
|
38
|
+
for project in projects:
|
|
39
|
+
for runspec in project:
|
|
40
|
+
for classresult in runspec:
|
|
41
|
+
for instresult in classresult.instresults:
|
|
42
|
+
instresult.instance.values["max_runs"] = max(
|
|
43
|
+
instresult.instance.values["max_runs"], len(instresult.runs)
|
|
44
|
+
)
|
|
45
|
+
benchmarks.add(runspec.benchmark)
|
|
46
|
+
return BenchmarkMerge(benchmarks)
|
|
47
|
+
|
|
48
|
+
def gen_office(
|
|
49
|
+
self, out: str, sel_projects: set[str], measures: list[tuple[str, Any]], export: bool = False
|
|
50
|
+
) -> Optional[str]:
|
|
51
|
+
"""
|
|
52
|
+
Prints the current result in open office spreadsheet format.
|
|
53
|
+
Returns the name of the export file if values are exported.
|
|
54
|
+
|
|
55
|
+
Attributes:
|
|
56
|
+
out (str): The output file to write to.
|
|
57
|
+
sel_projects (set[str]): The selected projects ("" for all).
|
|
58
|
+
measures (list[tuple[str, Any]]): The measures to extract.
|
|
59
|
+
"""
|
|
60
|
+
projects: list[Project] = []
|
|
61
|
+
for project in self.projects.values():
|
|
62
|
+
if len(sel_projects) == 0 or project.name in sel_projects:
|
|
63
|
+
projects.append(project)
|
|
64
|
+
benchmark_merge = self.merge(projects)
|
|
65
|
+
|
|
66
|
+
sheet = ODSDoc(benchmark_merge, measures)
|
|
67
|
+
for project in projects:
|
|
68
|
+
for runspec in project:
|
|
69
|
+
sheet.add_runspec(runspec)
|
|
70
|
+
sheet.finish()
|
|
71
|
+
sheet.make_ods(out)
|
|
72
|
+
|
|
73
|
+
if export:
|
|
74
|
+
# as_posix() for windows compatibility
|
|
75
|
+
ex_file = Path(out).absolute().as_posix().replace(".ods", ".parquet")
|
|
76
|
+
timeout_meta = {}
|
|
77
|
+
for project in projects:
|
|
78
|
+
for runspec in project.runspecs:
|
|
79
|
+
timeout_meta[
|
|
80
|
+
"_to_"
|
|
81
|
+
+ runspec.setting.system.name
|
|
82
|
+
+ "-"
|
|
83
|
+
+ runspec.setting.system.version
|
|
84
|
+
+ "/"
|
|
85
|
+
+ runspec.setting.name
|
|
86
|
+
] = [self.jobs[project.job].timeout]
|
|
87
|
+
sheet.inst_sheet.export_values(ex_file, timeout_meta)
|
|
88
|
+
return ex_file
|
|
89
|
+
return None
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class BenchmarkMerge:
|
|
93
|
+
"""
|
|
94
|
+
Represents an (ordered) set of benchmark sets.
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
def __init__(self, benchmarks: set["Benchmark"]):
|
|
98
|
+
"""
|
|
99
|
+
Initializes using the given set of benchmarks.
|
|
100
|
+
|
|
101
|
+
Attributes:
|
|
102
|
+
benchmarks (set[Benchmark]): Benchmarks to merge.
|
|
103
|
+
"""
|
|
104
|
+
self.benchmarks = benchmarks
|
|
105
|
+
inst_num = 0
|
|
106
|
+
class_num = 0
|
|
107
|
+
for benchclass in self:
|
|
108
|
+
benchclass.values["row"] = class_num
|
|
109
|
+
benchclass.values["inst_start"] = inst_num
|
|
110
|
+
for instance in benchclass:
|
|
111
|
+
instance.values["row"] = inst_num
|
|
112
|
+
inst_num += max(instance.values["max_runs"], 1)
|
|
113
|
+
benchclass.values["inst_end"] = inst_num - 1
|
|
114
|
+
class_num += 1
|
|
115
|
+
|
|
116
|
+
def __iter__(self) -> Iterator["Class"]:
|
|
117
|
+
"""
|
|
118
|
+
Creates an interator over all benchmark classes in all benchmarks.
|
|
119
|
+
"""
|
|
120
|
+
for benchmark in sorted(self.benchmarks):
|
|
121
|
+
yield from benchmark
|
|
122
|
+
|
|
123
|
+
|
|
124
|
+
@dataclass(order=True, frozen=True)
|
|
125
|
+
class Machine:
|
|
126
|
+
"""
|
|
127
|
+
Represents a machine.
|
|
128
|
+
|
|
129
|
+
Attributes:
|
|
130
|
+
name (str): The name of the machine.
|
|
131
|
+
cpu (str): String describing the CPU.
|
|
132
|
+
memory (str): String describing the Memory.
|
|
133
|
+
"""
|
|
134
|
+
|
|
135
|
+
name: str
|
|
136
|
+
cpu: str = field(compare=False)
|
|
137
|
+
memory: str = field(compare=False)
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@dataclass(order=True, frozen=True)
|
|
141
|
+
class Config:
|
|
142
|
+
"""
|
|
143
|
+
Represents a config.
|
|
144
|
+
|
|
145
|
+
Attributes:
|
|
146
|
+
name (str): The name of the config.
|
|
147
|
+
template (str): A path to the template file.
|
|
148
|
+
"""
|
|
149
|
+
|
|
150
|
+
name: str
|
|
151
|
+
template: str = field(compare=False)
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
@dataclass(order=True, frozen=True)
|
|
155
|
+
class System:
|
|
156
|
+
"""
|
|
157
|
+
Represents a system.
|
|
158
|
+
|
|
159
|
+
Attributes:
|
|
160
|
+
name (str): The name of the system.
|
|
161
|
+
version (str): The version.
|
|
162
|
+
config (str): The config (a string).
|
|
163
|
+
measures (str): The measurement function (a string).
|
|
164
|
+
order (int): An integer denoting the occurrence in the XML file.
|
|
165
|
+
settings (dict[str, Setting]): Dictionary of all system settings.
|
|
166
|
+
"""
|
|
167
|
+
|
|
168
|
+
name: str
|
|
169
|
+
version: str
|
|
170
|
+
config: str = field(compare=False)
|
|
171
|
+
measures: str = field(compare=False)
|
|
172
|
+
order: int
|
|
173
|
+
settings: dict[str, "Setting"] = field(default_factory=dict, compare=False)
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
@dataclass(order=True, frozen=True)
|
|
177
|
+
class Setting:
|
|
178
|
+
"""
|
|
179
|
+
Represents a setting.
|
|
180
|
+
|
|
181
|
+
Attributes:
|
|
182
|
+
system (System): The system associated with the setting.
|
|
183
|
+
name (str): The name of the setting.
|
|
184
|
+
cmdline (str): Command line parameters.
|
|
185
|
+
tag (str): Tags of the setting.
|
|
186
|
+
order (int): An integer denoting the occurrence in the XML file.
|
|
187
|
+
attr (dict[str, Any]): Arbitrary extra arguments.
|
|
188
|
+
"""
|
|
189
|
+
|
|
190
|
+
system: "System"
|
|
191
|
+
name: str
|
|
192
|
+
cmdline: str = field(compare=False)
|
|
193
|
+
tag: str = field(compare=False)
|
|
194
|
+
order: int
|
|
195
|
+
attr: dict[str, Any] = field(compare=False)
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
@dataclass(order=True, frozen=True)
|
|
199
|
+
class Job:
|
|
200
|
+
"""
|
|
201
|
+
Represents a job.
|
|
202
|
+
|
|
203
|
+
Attributes:
|
|
204
|
+
name (str): The name of the job.
|
|
205
|
+
timeout (int): Timeout of the job.
|
|
206
|
+
runs (int): Number of repetitions per instance.
|
|
207
|
+
attr (dict[str, Any]): Arbitrary extra arguments.
|
|
208
|
+
"""
|
|
209
|
+
|
|
210
|
+
name: str
|
|
211
|
+
timeout: int = field(compare=False)
|
|
212
|
+
runs: int = field(compare=False)
|
|
213
|
+
attr: dict[str, Any] = field(compare=False)
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
@dataclass(order=True, frozen=True)
|
|
217
|
+
class SeqJob(Job):
|
|
218
|
+
"""
|
|
219
|
+
Represents a sequential job.
|
|
220
|
+
|
|
221
|
+
Attributes:
|
|
222
|
+
name (str): The name of the job.
|
|
223
|
+
timeout (int): Timeout of the job.
|
|
224
|
+
runs (int): Number of repetitions per instance.
|
|
225
|
+
attrib (dict[str, Any]): Arbitrary extra arguments.
|
|
226
|
+
parallel (int): Number of processes to start in parallel.
|
|
227
|
+
"""
|
|
228
|
+
|
|
229
|
+
parallel: int = field(compare=False)
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
@dataclass(order=True, frozen=True)
|
|
233
|
+
class DistJob(Job):
|
|
234
|
+
"""
|
|
235
|
+
Represents a dist job.
|
|
236
|
+
|
|
237
|
+
Attributes:
|
|
238
|
+
name (str): The name of the job.
|
|
239
|
+
timeout (int): Timeout of the job.
|
|
240
|
+
runs (int): Number of repetitions per instance.
|
|
241
|
+
attrib (dict[str, Any]): Arbitrary extra arguments.
|
|
242
|
+
script_mode (str): Specifies the script generation mode.
|
|
243
|
+
walltime (str): The walltime for a distributed job.
|
|
244
|
+
"""
|
|
245
|
+
|
|
246
|
+
script_mode: str = field(compare=False)
|
|
247
|
+
walltime: str = field(compare=False)
|
|
248
|
+
partition: str = field(compare=False)
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
@dataclass(order=True, frozen=True)
|
|
252
|
+
class Benchmark:
|
|
253
|
+
"""
|
|
254
|
+
Represents a benchmark, i.e., a set of instances.
|
|
255
|
+
|
|
256
|
+
Attributes:
|
|
257
|
+
name (str): The name of the benchmark.
|
|
258
|
+
classes (dict[int, Class]): Benchmark classes in this benchmark.
|
|
259
|
+
"""
|
|
260
|
+
|
|
261
|
+
name: str
|
|
262
|
+
classes: dict[int, "Class"] = field(default_factory=dict, compare=False)
|
|
263
|
+
|
|
264
|
+
def __iter__(self) -> Iterator["Class"]:
|
|
265
|
+
"""
|
|
266
|
+
Creates an iterator over all benchmark classes.
|
|
267
|
+
"""
|
|
268
|
+
yield from sorted(self.classes.values())
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
@dataclass(order=True, frozen=True)
|
|
272
|
+
class Class:
|
|
273
|
+
"""
|
|
274
|
+
Represents a benchmark class.
|
|
275
|
+
|
|
276
|
+
Attributes:
|
|
277
|
+
benchmark (Benchmark): The benchmark associaed with this class.
|
|
278
|
+
name (str): The name of the benchmark.
|
|
279
|
+
id (int): A unique id (in the scope of the benchmark).
|
|
280
|
+
instances (dict[int, Instance]): Instances belonging to this benchmark class.
|
|
281
|
+
values (dict[str, Any]): Mutable dict with helper values.
|
|
282
|
+
"""
|
|
283
|
+
|
|
284
|
+
benchmark: Benchmark
|
|
285
|
+
name: str
|
|
286
|
+
id: int = field(compare=False)
|
|
287
|
+
instances: dict[int, "Instance"] = field(default_factory=dict, compare=False)
|
|
288
|
+
values: dict[str, int] = field(default_factory=dict, compare=False)
|
|
289
|
+
|
|
290
|
+
def __post_init__(self) -> None:
|
|
291
|
+
"""
|
|
292
|
+
Initialize mutable helper variables.
|
|
293
|
+
"""
|
|
294
|
+
self.values["row"] = 0
|
|
295
|
+
self.values["inst_start"] = 0
|
|
296
|
+
self.values["inst_end"] = 0
|
|
297
|
+
|
|
298
|
+
def __iter__(self) -> Iterator["Instance"]:
|
|
299
|
+
"""
|
|
300
|
+
Creates an iterator over all instances in the benchmark class.
|
|
301
|
+
"""
|
|
302
|
+
yield from sorted(self.instances.values())
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
@dataclass(order=True, frozen=True)
|
|
306
|
+
class Instance:
|
|
307
|
+
"""
|
|
308
|
+
Represents a benchmark instance.
|
|
309
|
+
|
|
310
|
+
Attributes:
|
|
311
|
+
benchclass (Class): The class of the instance.
|
|
312
|
+
name (str): The name of the benchmark.
|
|
313
|
+
id (int): A unique id (in the scope of the benchmark).
|
|
314
|
+
max_runs (int): Max number of runs.
|
|
315
|
+
values (dict[str, Any]): Mutable dict with helper values.
|
|
316
|
+
"""
|
|
317
|
+
|
|
318
|
+
benchclass: Class
|
|
319
|
+
name: str
|
|
320
|
+
id: int = field(compare=False)
|
|
321
|
+
values: dict[str, int] = field(default_factory=dict, compare=False)
|
|
322
|
+
|
|
323
|
+
def __post_init__(self) -> None:
|
|
324
|
+
"""
|
|
325
|
+
Initialize mutable helper variables.
|
|
326
|
+
"""
|
|
327
|
+
self.values["max_runs"] = 0
|
|
328
|
+
self.values["row"] = 0
|
|
329
|
+
|
|
330
|
+
|
|
331
|
+
@dataclass(order=True, frozen=True)
|
|
332
|
+
class Project:
|
|
333
|
+
"""
|
|
334
|
+
Describes a project, i.e, a collection of run specifications.
|
|
335
|
+
|
|
336
|
+
Attributes:
|
|
337
|
+
name (str): The name of the project.
|
|
338
|
+
job (str): The name of the associated job.
|
|
339
|
+
runspecs (list['Runspec']): Run specifications of the project.
|
|
340
|
+
"""
|
|
341
|
+
|
|
342
|
+
name: str
|
|
343
|
+
job: str = field(compare=False)
|
|
344
|
+
runspecs: list["Runspec"] = field(default_factory=list, compare=False)
|
|
345
|
+
|
|
346
|
+
def __iter__(self) -> Iterator["Runspec"]:
|
|
347
|
+
"""
|
|
348
|
+
Creates an iterator over all run specification in the project.
|
|
349
|
+
"""
|
|
350
|
+
yield from self.runspecs
|
|
351
|
+
|
|
352
|
+
|
|
353
|
+
@dataclass(order=True, frozen=True)
|
|
354
|
+
class Runspec:
|
|
355
|
+
"""
|
|
356
|
+
Describes a run specification, i.e, how to run individual systems
|
|
357
|
+
on a set of instances.
|
|
358
|
+
|
|
359
|
+
Attributes:
|
|
360
|
+
system (System): The system to evaluate.
|
|
361
|
+
machine (Machine): The machine to run on.
|
|
362
|
+
benchmark (Benchmark): The benchmark set to evaluate.
|
|
363
|
+
setting (Setting): The setting to run with.
|
|
364
|
+
classresults (list[ClassResult]): The benchmark results.
|
|
365
|
+
"""
|
|
366
|
+
|
|
367
|
+
system: "System"
|
|
368
|
+
machine: "Machine"
|
|
369
|
+
benchmark: "Benchmark"
|
|
370
|
+
setting: "Setting"
|
|
371
|
+
classresults: list["ClassResult"] = field(default_factory=list, compare=False)
|
|
372
|
+
|
|
373
|
+
def __iter__(self) -> Iterator["ClassResult"]:
|
|
374
|
+
"""
|
|
375
|
+
Creates an iterator over all results (grouped by benchmark class.)
|
|
376
|
+
"""
|
|
377
|
+
yield from self.classresults
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
@dataclass(order=True, frozen=True)
|
|
381
|
+
class ClassResult:
|
|
382
|
+
"""
|
|
383
|
+
Represents the results of all instances of a benchmark class.
|
|
384
|
+
|
|
385
|
+
Attributes:
|
|
386
|
+
benchclass (Class): The benchmark class for the results.
|
|
387
|
+
instresults (list[InstanceResult]): Results of instances belonging to the benchmark class.
|
|
388
|
+
"""
|
|
389
|
+
|
|
390
|
+
benchclass: "Class"
|
|
391
|
+
instresults: list["InstanceResult"] = field(default_factory=list, compare=False)
|
|
392
|
+
|
|
393
|
+
def __iter__(self) -> Iterator["InstanceResult"]:
|
|
394
|
+
"""
|
|
395
|
+
Creates an iterator over all the individual results per instance.
|
|
396
|
+
"""
|
|
397
|
+
yield from self.instresults
|
|
398
|
+
|
|
399
|
+
|
|
400
|
+
@dataclass(order=True, frozen=True)
|
|
401
|
+
class InstanceResult:
|
|
402
|
+
"""
|
|
403
|
+
Represents the result of an individual instance (with possibly multiple runs).
|
|
404
|
+
|
|
405
|
+
Attributes:
|
|
406
|
+
instance (Instance): The instance for the results.
|
|
407
|
+
runs (list[Run]): Results of runs belonging to the instance.
|
|
408
|
+
"""
|
|
409
|
+
|
|
410
|
+
instance: "Instance"
|
|
411
|
+
runs: list["Run"] = field(default_factory=list, compare=False)
|
|
412
|
+
|
|
413
|
+
def __iter__(self) -> Iterator["Run"]:
|
|
414
|
+
"""
|
|
415
|
+
Creates an iterator over the result of all runs.
|
|
416
|
+
"""
|
|
417
|
+
yield from self.runs
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
@dataclass(order=True, frozen=True)
|
|
421
|
+
class Run:
|
|
422
|
+
"""
|
|
423
|
+
Represents the result of an individual run of a benchmark instance.
|
|
424
|
+
|
|
425
|
+
Attributes:
|
|
426
|
+
instresult (InstanceResult): The associated instance result.
|
|
427
|
+
number (int): The number of the run.
|
|
428
|
+
measures (dict[str, tuple[str, str]]): Concrete measurements.
|
|
429
|
+
"""
|
|
430
|
+
|
|
431
|
+
instresult: "InstanceResult"
|
|
432
|
+
number: int
|
|
433
|
+
measures: dict[str, tuple[str, str]] = field(default_factory=dict, compare=False)
|
|
434
|
+
|
|
435
|
+
def iter(self, measures: list[tuple[str, Any]]) -> Iterator[tuple[str, str, str]]:
|
|
436
|
+
"""
|
|
437
|
+
Creates an iterator over all measures captured during the run.
|
|
438
|
+
Measures can be filter by giving a string set of measure names.
|
|
439
|
+
If this string set is empty, instead all measures sorted by their keys
|
|
440
|
+
will be returned.
|
|
441
|
+
|
|
442
|
+
Attributes:
|
|
443
|
+
measures (list[tuple[str, Any]]): Selected measures.
|
|
444
|
+
"""
|
|
445
|
+
if len(measures) == 0:
|
|
446
|
+
for name in sorted(self.measures.keys()):
|
|
447
|
+
yield name, self.measures[name][0], self.measures[name][1]
|
|
448
|
+
else:
|
|
449
|
+
for name, _ in measures:
|
|
450
|
+
if name in self.measures:
|
|
451
|
+
yield name, self.measures[name][0], self.measures[name][1]
|
|
452
|
+
else:
|
|
453
|
+
yield name, "None", "NaN"
|
|
File without changes
|