potassco-benchmark-tool 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- benchmarktool/__init__.py +0 -0
- benchmarktool/entry_points.py +417 -0
- benchmarktool/init/programs/gcat.sh +24 -0
- benchmarktool/init/runscripts/runscript-all.xml +49 -0
- benchmarktool/init/runscripts/runscript-dist.xml +20 -0
- benchmarktool/init/runscripts/runscript-example.xml +31 -0
- benchmarktool/init/runscripts/runscript-seq.xml +27 -0
- benchmarktool/init/templates/seq-generic-single.sh +27 -0
- benchmarktool/init/templates/seq-generic-zip.sh +14 -0
- benchmarktool/init/templates/seq-generic.sh +12 -0
- benchmarktool/init/templates/single.dist +25 -0
- benchmarktool/result/__init__.py +0 -0
- benchmarktool/result/ipynb_gen.py +477 -0
- benchmarktool/result/ods_config.py +42 -0
- benchmarktool/result/ods_gen.py +714 -0
- benchmarktool/result/parser.py +167 -0
- benchmarktool/result/result.py +453 -0
- benchmarktool/resultparser/__init__.py +0 -0
- benchmarktool/resultparser/clasp.py +88 -0
- benchmarktool/runscript/__init__.py +0 -0
- benchmarktool/runscript/parser.py +477 -0
- benchmarktool/runscript/runscript.py +1481 -0
- benchmarktool/tools.py +82 -0
- potassco_benchmark_tool-2.1.1.dist-info/METADATA +112 -0
- potassco_benchmark_tool-2.1.1.dist-info/RECORD +29 -0
- potassco_benchmark_tool-2.1.1.dist-info/WHEEL +5 -0
- potassco_benchmark_tool-2.1.1.dist-info/entry_points.txt +2 -0
- potassco_benchmark_tool-2.1.1.dist-info/licenses/LICENSE +21 -0
- potassco_benchmark_tool-2.1.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1481 @@
|
|
|
1
|
+
"""
|
|
2
|
+
This module contains classes that describe a run script.
|
|
3
|
+
It can be used to create scripts to start a benchmark
|
|
4
|
+
specified by the run script.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
__author__ = "Roland Kaminski"
|
|
8
|
+
|
|
9
|
+
import importlib
|
|
10
|
+
import importlib.util
|
|
11
|
+
import os
|
|
12
|
+
import re
|
|
13
|
+
import sys
|
|
14
|
+
from dataclasses import dataclass, field
|
|
15
|
+
from functools import total_ordering
|
|
16
|
+
from types import ModuleType
|
|
17
|
+
from typing import Any, Iterator, Optional
|
|
18
|
+
|
|
19
|
+
from benchmarktool import tools
|
|
20
|
+
|
|
21
|
+
# pylint: disable=too-many-lines
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
@dataclass(order=True, frozen=True)
|
|
25
|
+
class Machine:
|
|
26
|
+
"""
|
|
27
|
+
Describes a machine.
|
|
28
|
+
|
|
29
|
+
Attributes:
|
|
30
|
+
name (str): Name of the machine.
|
|
31
|
+
cpu (str): Some cpu description.
|
|
32
|
+
memory (str): Some memory description.
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
name: str
|
|
36
|
+
cpu: str = field(compare=False)
|
|
37
|
+
memory: str = field(compare=False)
|
|
38
|
+
|
|
39
|
+
def to_xml(self, out: Any, indent: str) -> None:
|
|
40
|
+
"""
|
|
41
|
+
Dump the (pretty-printed) XML-representation of the machine.
|
|
42
|
+
|
|
43
|
+
Attributes:
|
|
44
|
+
out (Any): Output stream to write to.
|
|
45
|
+
indent (str): Amount of indentation.
|
|
46
|
+
"""
|
|
47
|
+
out.write('{1}<machine name="{0.name}" cpu="{0.cpu}" memory="{0.memory}"/>\n'.format(self, indent))
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
@dataclass(order=True, frozen=True)
|
|
51
|
+
class System:
|
|
52
|
+
"""
|
|
53
|
+
Describes a system. This includes a solver description
|
|
54
|
+
together with a set of settings.
|
|
55
|
+
|
|
56
|
+
Attributes:
|
|
57
|
+
name (str): The name of the system.
|
|
58
|
+
version (str): The version of the system.
|
|
59
|
+
measures (str): A string specifying the measurement function.
|
|
60
|
+
This must be a function given in the config.
|
|
61
|
+
order (int): An integer used to order different system.
|
|
62
|
+
This integer should denote the occurrence in
|
|
63
|
+
the run specification.
|
|
64
|
+
config (Config): The system configuration.
|
|
65
|
+
settings (dict[str, Setting]): Settings used with the system.
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
name: str
|
|
69
|
+
version: str
|
|
70
|
+
measures: str = field(compare=False)
|
|
71
|
+
order: int = field(compare=False)
|
|
72
|
+
config: "Config" = field(compare=False)
|
|
73
|
+
settings: dict[str, "Setting"] = field(default_factory=dict, compare=False)
|
|
74
|
+
|
|
75
|
+
def add_setting(self, setting: "Setting") -> None:
|
|
76
|
+
"""
|
|
77
|
+
Adds a given setting to the system.
|
|
78
|
+
"""
|
|
79
|
+
self.settings[setting.name] = setting
|
|
80
|
+
|
|
81
|
+
def to_xml(self, out: Any, indent: str, settings: Optional[list["Setting"]] = None) -> None:
|
|
82
|
+
"""
|
|
83
|
+
Dump the (pretty-printed) XML-representation of the system.
|
|
84
|
+
|
|
85
|
+
Attributes:
|
|
86
|
+
out (Any): Output stream to write to.
|
|
87
|
+
indent (str): Amount of indentation.
|
|
88
|
+
settings (Optional[list["Setting"]]): If None all the settings of the system are printed,
|
|
89
|
+
otherwise the given settings are printed.
|
|
90
|
+
"""
|
|
91
|
+
assert isinstance(self.config, Config)
|
|
92
|
+
out.write(
|
|
93
|
+
(
|
|
94
|
+
f'{indent}<system name="{self.name}" version="{self.version}" '
|
|
95
|
+
f'measures="{self.measures}" config="{self.config.name}">\n'
|
|
96
|
+
)
|
|
97
|
+
)
|
|
98
|
+
if settings is None:
|
|
99
|
+
settings = list(self.settings.values())
|
|
100
|
+
for setting in sorted(settings, key=lambda s: s.order):
|
|
101
|
+
setting.to_xml(out, indent + "\t")
|
|
102
|
+
out.write("{0}</system>\n".format(indent))
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
# pylint: disable=too-many-instance-attributes, too-many-positional-arguments
|
|
106
|
+
@dataclass(order=True, frozen=True)
|
|
107
|
+
class Setting:
|
|
108
|
+
"""
|
|
109
|
+
Describes a setting for a system. This are command line options
|
|
110
|
+
that can be passed to the system. Additionally, settings can be tagged.
|
|
111
|
+
|
|
112
|
+
Attributes:
|
|
113
|
+
name (str): A name uniquely identifying a setting.
|
|
114
|
+
(In the scope of a system)
|
|
115
|
+
cmdline (str): A string of command line options.
|
|
116
|
+
tag (set[str]): A set of tags.
|
|
117
|
+
order (int): An integer specifying the order of settings.
|
|
118
|
+
(This should denote the occurrence in the job specification.
|
|
119
|
+
Again in the scope of a system.)
|
|
120
|
+
disttemplate (str): Path to dist-template file. (dist only, related to mpi-version)
|
|
121
|
+
attr (dict[str, Any]): A dictionary of additional optional attributes.
|
|
122
|
+
dist_options (Optional[str]): Additional dist options for this setting.
|
|
123
|
+
encodings (dict[str, set[str]]): Encodings used with this setting, keyed with tags.
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
name: str
|
|
127
|
+
cmdline: str = field(compare=False)
|
|
128
|
+
tag: set[str] = field(compare=False)
|
|
129
|
+
order: int = field(compare=False)
|
|
130
|
+
disttemplate: str = field(compare=False)
|
|
131
|
+
attr: dict[str, Any] = field(compare=False)
|
|
132
|
+
|
|
133
|
+
dist_options: str = field(default="", compare=False)
|
|
134
|
+
encodings: dict[str, set[str]] = field(compare=False, default_factory=dict)
|
|
135
|
+
|
|
136
|
+
def to_xml(self, out: Any, indent: str) -> None:
|
|
137
|
+
"""
|
|
138
|
+
Dump a (pretty-printed) XML-representation of the setting.
|
|
139
|
+
|
|
140
|
+
Attributes:
|
|
141
|
+
out (Any): Output stream to write to.
|
|
142
|
+
indent (str): Amount of indentation.
|
|
143
|
+
"""
|
|
144
|
+
tag = " ".join(sorted(self.tag))
|
|
145
|
+
out.write('{1}<setting name="{0.name}" cmdline="{0.cmdline}" tag="{2}"'.format(self, indent, tag))
|
|
146
|
+
if self.disttemplate is not None:
|
|
147
|
+
out.write(' {0}="{1}"'.format("disttemplate", self.disttemplate))
|
|
148
|
+
for key, val in self.attr.items():
|
|
149
|
+
out.write(' {0}="{1}"'.format(key, val))
|
|
150
|
+
if self.dist_options != "":
|
|
151
|
+
out.write(' {0}="{1}"'.format("distopts", self.dist_options))
|
|
152
|
+
out.write(">\n")
|
|
153
|
+
for enctag, encodings in self.encodings.items():
|
|
154
|
+
for enc in sorted(encodings):
|
|
155
|
+
if enctag == "_default_":
|
|
156
|
+
out.write('{0}<encoding file="{1}"/>\n'.format(indent + "\t", enc))
|
|
157
|
+
else:
|
|
158
|
+
out.write('{0}<encoding file="{1}" tag="{2}"/>\n'.format(indent + "\t", enc, enctag))
|
|
159
|
+
out.write("{0}</setting>\n".format(indent))
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
@total_ordering
|
|
163
|
+
@dataclass(eq=False, frozen=True)
|
|
164
|
+
class Job:
|
|
165
|
+
"""
|
|
166
|
+
Base class for all jobs.
|
|
167
|
+
|
|
168
|
+
Attributes:
|
|
169
|
+
name (str): A unique name for a job.
|
|
170
|
+
timeout (int): A timeout in seconds for individual benchmark runs.
|
|
171
|
+
runs (int): The number of runs per benchmark.
|
|
172
|
+
attr (dict[str, Any]): A dictionary of arbitrary attributes.
|
|
173
|
+
"""
|
|
174
|
+
|
|
175
|
+
name: str
|
|
176
|
+
timeout: int = field(compare=False)
|
|
177
|
+
runs: int = field(compare=False)
|
|
178
|
+
attr: dict[str, Any] = field(compare=False)
|
|
179
|
+
|
|
180
|
+
def __eq__(self, other: Any) -> bool:
|
|
181
|
+
if not isinstance(other, Job):
|
|
182
|
+
raise RuntimeError("Cannot compare Job to non-Job")
|
|
183
|
+
return self.name == other.name
|
|
184
|
+
|
|
185
|
+
def __lt__(self, other: Any) -> bool:
|
|
186
|
+
if not isinstance(other, Job):
|
|
187
|
+
raise RuntimeError("Cannot compare Job to non-Job")
|
|
188
|
+
return self.name < other.name
|
|
189
|
+
|
|
190
|
+
def __hash__(self) -> int:
|
|
191
|
+
return hash(self.name)
|
|
192
|
+
|
|
193
|
+
def _to_xml(self, out: Any, indent: str, xmltag: str, extra: str) -> None:
|
|
194
|
+
"""
|
|
195
|
+
Helper function to dump a (pretty-printed) XML-representation of a job.
|
|
196
|
+
|
|
197
|
+
Attributes:
|
|
198
|
+
out (Any): Output stream to write to.
|
|
199
|
+
indent (str): Amount of indentation.
|
|
200
|
+
xmltag (str): Tag name for the job.
|
|
201
|
+
extra (str): Additional arguments for the job.
|
|
202
|
+
"""
|
|
203
|
+
out.write(
|
|
204
|
+
'{1}<{2} name="{0.name}" timeout="{0.timeout}" runs="{0.runs}"{3}'.format(self, indent, xmltag, extra)
|
|
205
|
+
)
|
|
206
|
+
for key, val in self.attr.items():
|
|
207
|
+
out.write(' {0}="{1}"'.format(key, val))
|
|
208
|
+
out.write("/>\n")
|
|
209
|
+
|
|
210
|
+
def script_gen(self) -> Any:
|
|
211
|
+
"""
|
|
212
|
+
Has to be overwritten by subclasses.
|
|
213
|
+
"""
|
|
214
|
+
raise NotImplementedError
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
# pylint: disable=too-few-public-methods
|
|
218
|
+
@dataclass
|
|
219
|
+
class Run:
|
|
220
|
+
"""
|
|
221
|
+
Base class for all runs.
|
|
222
|
+
|
|
223
|
+
Attributes:
|
|
224
|
+
path (str): Path that holds the target location for start scripts.
|
|
225
|
+
root (str): directory relative to the location of the run's path.
|
|
226
|
+
"""
|
|
227
|
+
|
|
228
|
+
path: str
|
|
229
|
+
|
|
230
|
+
root: str = field(init=False)
|
|
231
|
+
|
|
232
|
+
def __post_init__(self) -> None:
|
|
233
|
+
self.root = os.path.relpath(".", self.path)
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
# pylint: disable=too-many-instance-attributes, too-many-positional-arguments, too-few-public-methods
|
|
237
|
+
@dataclass
|
|
238
|
+
class SeqRun(Run):
|
|
239
|
+
"""
|
|
240
|
+
Describes a sequential run.
|
|
241
|
+
|
|
242
|
+
Attributes:
|
|
243
|
+
path (str): Path that holds the target location for start scripts.
|
|
244
|
+
run (int): The number of the run.
|
|
245
|
+
job (Job): A reference to the job description.
|
|
246
|
+
runspec (Runspec): A reference to the run description.
|
|
247
|
+
instance (Benchmark.Instance): A reference to the instance to benchmark.
|
|
248
|
+
root (str): Directory relative to the location of the run's path.
|
|
249
|
+
files (str): Relative paths to all instances.
|
|
250
|
+
encodings (str): Relative paths to all encodings.
|
|
251
|
+
args (str): The command line arguments for this run.
|
|
252
|
+
solver (str): The solver for this run.
|
|
253
|
+
timeout (int): The timeout of this run.
|
|
254
|
+
memout (int): The memory limit of this run.
|
|
255
|
+
"""
|
|
256
|
+
|
|
257
|
+
run: int
|
|
258
|
+
job: "Job"
|
|
259
|
+
runspec: "Runspec"
|
|
260
|
+
instance: "Benchmark.Instance"
|
|
261
|
+
|
|
262
|
+
files: str = field(init=False)
|
|
263
|
+
encodings: str = field(init=False)
|
|
264
|
+
args: str = field(init=False)
|
|
265
|
+
solver: str = field(init=False)
|
|
266
|
+
timeout: int = field(init=False)
|
|
267
|
+
memout: int = field(init=False)
|
|
268
|
+
|
|
269
|
+
def __post_init__(self) -> None:
|
|
270
|
+
super().__post_init__()
|
|
271
|
+
self.files = " ".join([f'"{os.path.relpath(i, self.path)}"' for i in sorted(self.instance.paths())])
|
|
272
|
+
|
|
273
|
+
encodings = self.instance.encodings
|
|
274
|
+
encodings = encodings.union(self.runspec.setting.encodings.get("_default_", set()))
|
|
275
|
+
for i in self.instance.enctags:
|
|
276
|
+
encodings = encodings.union(self.runspec.setting.encodings.get(i, set()))
|
|
277
|
+
self.encodings = " ".join([f'"{os.path.relpath(e, self.path)}"' for e in sorted(encodings)])
|
|
278
|
+
|
|
279
|
+
self.args = self.runspec.setting.cmdline
|
|
280
|
+
self.solver = self.runspec.system.name + "-" + self.runspec.system.version
|
|
281
|
+
self.timeout = self.job.timeout
|
|
282
|
+
self.memout = int(self.job.attr.get("memout", 20000))
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
class ScriptGen:
|
|
286
|
+
"""
|
|
287
|
+
A class providing basic functionality to generate
|
|
288
|
+
start scripts for arbitrary jobs and evaluation of results.
|
|
289
|
+
"""
|
|
290
|
+
|
|
291
|
+
def __init__(self, job: "Job"):
|
|
292
|
+
"""
|
|
293
|
+
Initializes the script generator.
|
|
294
|
+
|
|
295
|
+
Attributes:
|
|
296
|
+
job (Job): A reference to the associated job.
|
|
297
|
+
"""
|
|
298
|
+
self.skip = False
|
|
299
|
+
self.job = job
|
|
300
|
+
self.startfiles: list[tuple["Runspec", str, str]] = []
|
|
301
|
+
|
|
302
|
+
def set_skip(self, skip: bool) -> None:
|
|
303
|
+
"""
|
|
304
|
+
Set whether to skip.
|
|
305
|
+
|
|
306
|
+
Attributes:
|
|
307
|
+
skip (bool): Whether to skip.
|
|
308
|
+
"""
|
|
309
|
+
self.skip = skip
|
|
310
|
+
|
|
311
|
+
def _path(self, runspec: "Runspec", instance: "Benchmark.Instance", run: int) -> str:
|
|
312
|
+
"""
|
|
313
|
+
Returns the relative path to the start script location.
|
|
314
|
+
|
|
315
|
+
Attributes:
|
|
316
|
+
runspec (Runspec): The run specification for the start script.
|
|
317
|
+
instance (Benchmark.Instance): The benchmark instance for the start script.
|
|
318
|
+
run (int): The number of the run for the start script.
|
|
319
|
+
"""
|
|
320
|
+
return os.path.join(runspec.path(), instance.benchclass.name, instance.name, "run%d" % run)
|
|
321
|
+
|
|
322
|
+
def add_to_script(self, runspec: "Runspec", instance: "Benchmark.Instance") -> None:
|
|
323
|
+
"""
|
|
324
|
+
Creates a new start script for the given instance.
|
|
325
|
+
|
|
326
|
+
Attributes:
|
|
327
|
+
runspec (Runspec): The run specification for the start script.
|
|
328
|
+
instance (Benchmark.Instance): The benchmark instance for the start script.
|
|
329
|
+
"""
|
|
330
|
+
skip = self.skip
|
|
331
|
+
if runspec.system.config:
|
|
332
|
+
for run in range(1, self.job.runs + 1):
|
|
333
|
+
path = self._path(runspec, instance, run)
|
|
334
|
+
tools.mkdir_p(path)
|
|
335
|
+
startpath = os.path.join(path, "start.sh")
|
|
336
|
+
finish = os.path.join(path, ".finished")
|
|
337
|
+
if skip and os.path.isfile(finish):
|
|
338
|
+
continue
|
|
339
|
+
with open(runspec.system.config.template, "r", encoding="utf8") as f:
|
|
340
|
+
template = f.read()
|
|
341
|
+
with open(startpath, "w", encoding="utf8") as startfile:
|
|
342
|
+
startfile.write(template.format(run=SeqRun(path, run, self.job, runspec, instance)))
|
|
343
|
+
self.startfiles.append((runspec, path, "start.sh"))
|
|
344
|
+
tools.set_executable(startpath)
|
|
345
|
+
|
|
346
|
+
def eval_results(
|
|
347
|
+
self, out: Any, indent: str, runspec: "Runspec", instance: "Benchmark.Instance", parx: int = 2
|
|
348
|
+
) -> None:
|
|
349
|
+
"""
|
|
350
|
+
Parses the results of a given benchmark instance and outputs them as XML.
|
|
351
|
+
|
|
352
|
+
Attributes:
|
|
353
|
+
out (Any): Output stream to write to.
|
|
354
|
+
indent (str): Amount of indentation.
|
|
355
|
+
runspec (Runspec): The run specification of the benchmark.
|
|
356
|
+
instance (Benchmark.Instance): The benchmark instance.
|
|
357
|
+
parx (int): Factor for penalized-average-runtime score.
|
|
358
|
+
"""
|
|
359
|
+
|
|
360
|
+
def import_from_path(module_name: str, file_path: str) -> ModuleType: # nocoverage
|
|
361
|
+
"""
|
|
362
|
+
Helper function to import modules from path.
|
|
363
|
+
|
|
364
|
+
Attributes:
|
|
365
|
+
module_name (str): Name of the module.
|
|
366
|
+
file_path (str): Path to the module.
|
|
367
|
+
"""
|
|
368
|
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
|
369
|
+
assert spec is not None
|
|
370
|
+
module = importlib.util.module_from_spec(spec)
|
|
371
|
+
sys.modules[module_name] = module
|
|
372
|
+
assert spec.loader is not None
|
|
373
|
+
spec.loader.exec_module(module)
|
|
374
|
+
return module
|
|
375
|
+
|
|
376
|
+
result_parser: Optional[ModuleType] = None
|
|
377
|
+
# dynamicly import result parser
|
|
378
|
+
# prioritize local resultparsers over included in package
|
|
379
|
+
rp_name = "{0}".format(runspec.system.measures)
|
|
380
|
+
try:
|
|
381
|
+
result_parser = import_from_path(
|
|
382
|
+
rp_name, os.path.join(os.getcwd(), "resultparsers", "{0}.py".format(runspec.system.measures))
|
|
383
|
+
)
|
|
384
|
+
except FileNotFoundError:
|
|
385
|
+
try:
|
|
386
|
+
result_parser = importlib.import_module(f"benchmarktool.resultparser.{rp_name}")
|
|
387
|
+
except ModuleNotFoundError:
|
|
388
|
+
sys.stderr.write(
|
|
389
|
+
f"*** ERROR: Result parser import failed: {rp_name}! "
|
|
390
|
+
"All runs using this parser will have no measures recorded!\n."
|
|
391
|
+
)
|
|
392
|
+
for run in range(1, self.job.runs + 1):
|
|
393
|
+
out.write('{0}<run number="{1}">\n'.format(indent, run))
|
|
394
|
+
# result parser call
|
|
395
|
+
try:
|
|
396
|
+
result: dict[str, tuple[str, Any]] = result_parser.parse( # type: ignore
|
|
397
|
+
self._path(runspec, instance, run), runspec, instance
|
|
398
|
+
)
|
|
399
|
+
# penalized-average-runtime score
|
|
400
|
+
if all(key in result for key in ["time", "timeout"]):
|
|
401
|
+
value = parx * self.job.timeout if result["timeout"][1] else result["time"][1]
|
|
402
|
+
result[f"par{parx}"] = ("float", value)
|
|
403
|
+
|
|
404
|
+
for key, (valtype, val) in sorted(result.items()):
|
|
405
|
+
out.write(
|
|
406
|
+
'{0}<measure name="{1}" type="{2}" val="{3}"/>\n'.format(indent + "\t", key, valtype, val)
|
|
407
|
+
)
|
|
408
|
+
except AttributeError:
|
|
409
|
+
pass
|
|
410
|
+
|
|
411
|
+
out.write("{0}</run>\n".format(indent))
|
|
412
|
+
|
|
413
|
+
|
|
414
|
+
class SeqScriptGen(ScriptGen):
|
|
415
|
+
"""
|
|
416
|
+
A class that generates and evaluates start scripts for sequential runs.
|
|
417
|
+
"""
|
|
418
|
+
|
|
419
|
+
def __init__(self, seq_job: "SeqJob"):
|
|
420
|
+
"""
|
|
421
|
+
Initializes the script generator.
|
|
422
|
+
|
|
423
|
+
Attributes:
|
|
424
|
+
seqJob (SeqJob): A reference to the associated sequential job.
|
|
425
|
+
"""
|
|
426
|
+
ScriptGen.__init__(self, seq_job)
|
|
427
|
+
|
|
428
|
+
# pylint: disable=line-too-long
|
|
429
|
+
def gen_start_script(self, path: str) -> None:
|
|
430
|
+
"""
|
|
431
|
+
Generates a start script that can be used to start all scripts
|
|
432
|
+
generated using addToScript().
|
|
433
|
+
|
|
434
|
+
Attributes:
|
|
435
|
+
path (str): The target location for the script.
|
|
436
|
+
"""
|
|
437
|
+
assert isinstance(self.job, SeqJob)
|
|
438
|
+
tools.mkdir_p(path)
|
|
439
|
+
with open(os.path.join(path, "start.py"), "w", encoding="utf8") as startfile:
|
|
440
|
+
queue = ""
|
|
441
|
+
comma = False
|
|
442
|
+
for _, instpath, instname in self.startfiles:
|
|
443
|
+
relpath = os.path.relpath(instpath, path)
|
|
444
|
+
if comma:
|
|
445
|
+
queue += ","
|
|
446
|
+
else:
|
|
447
|
+
comma = True
|
|
448
|
+
queue += repr(os.path.join(relpath, instname))
|
|
449
|
+
startfile.write(
|
|
450
|
+
"""\
|
|
451
|
+
#!/usr/bin/python -u
|
|
452
|
+
|
|
453
|
+
import optparse
|
|
454
|
+
import threading
|
|
455
|
+
import subprocess
|
|
456
|
+
import os
|
|
457
|
+
import sys
|
|
458
|
+
import signal
|
|
459
|
+
import time
|
|
460
|
+
|
|
461
|
+
queue = [{0}]
|
|
462
|
+
|
|
463
|
+
class Main:
|
|
464
|
+
def __init__(self):
|
|
465
|
+
self.running = set()
|
|
466
|
+
self.cores = set()
|
|
467
|
+
self.started = 0
|
|
468
|
+
self.total = None
|
|
469
|
+
self.finished = threading.Condition()
|
|
470
|
+
self.coreLock = threading.Lock()
|
|
471
|
+
c = 0
|
|
472
|
+
while len(self.cores) < {1}:
|
|
473
|
+
self.cores.add(c)
|
|
474
|
+
c += 1
|
|
475
|
+
|
|
476
|
+
def finish(self, thread):
|
|
477
|
+
self.finished.acquire()
|
|
478
|
+
self.running.remove(thread)
|
|
479
|
+
with self.coreLock:
|
|
480
|
+
self.cores.add(thread.core)
|
|
481
|
+
self.finished.notify()
|
|
482
|
+
self.finished.release()
|
|
483
|
+
|
|
484
|
+
def start(self, cmd):
|
|
485
|
+
core = 0
|
|
486
|
+
with self.coreLock:
|
|
487
|
+
core = self.cores.pop()
|
|
488
|
+
thread = Run(cmd, self, core)
|
|
489
|
+
self.started += 1
|
|
490
|
+
self.running.add(thread)
|
|
491
|
+
print("({{0}}/{{1}}/{{2}}/{{4}}) {{3}}".format(len(self.running), self.started, self.total, cmd, core))
|
|
492
|
+
thread.start()
|
|
493
|
+
|
|
494
|
+
def run(self, queue):
|
|
495
|
+
signal.signal(signal.SIGTERM, self.exit)
|
|
496
|
+
signal.signal(signal.SIGINT, self.exit)
|
|
497
|
+
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
|
498
|
+
self.finished.acquire()
|
|
499
|
+
self.total = len(queue)
|
|
500
|
+
for cmd in queue:
|
|
501
|
+
while len(self.running) >= {1}:
|
|
502
|
+
self.finished.wait()
|
|
503
|
+
self.start(cmd)
|
|
504
|
+
while len(self.running) != 0:
|
|
505
|
+
self.finished.wait()
|
|
506
|
+
self.finished.release()
|
|
507
|
+
|
|
508
|
+
def exit(self, *args):
|
|
509
|
+
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
|
510
|
+
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
|
511
|
+
print("WARNING: it is not guaranteed that all processes will be terminated!")
|
|
512
|
+
print("sending sigterm ...")
|
|
513
|
+
os.killpg(os.getpgid(0), signal.SIGTERM)
|
|
514
|
+
print("waiting 10s...")
|
|
515
|
+
time.sleep(10)
|
|
516
|
+
print("sending sigkill ...")
|
|
517
|
+
os.killpg(os.getpgid(0), signal.SIGKILL)
|
|
518
|
+
|
|
519
|
+
class Run(threading.Thread):
|
|
520
|
+
def __init__(self, cmd, main, core):
|
|
521
|
+
threading.Thread.__init__(self)
|
|
522
|
+
self.cmd = cmd
|
|
523
|
+
self.main = main
|
|
524
|
+
self.core = core
|
|
525
|
+
self.proc = None
|
|
526
|
+
|
|
527
|
+
def run(self):
|
|
528
|
+
path, script = os.path.split(self.cmd)
|
|
529
|
+
openArgs = dict(cwd=path)
|
|
530
|
+
if sys.version_info[:3] >= (3,2,0):
|
|
531
|
+
openArgs["start_new_session"] = True
|
|
532
|
+
else:
|
|
533
|
+
openArgs["preexec_fn"] = os.setsid
|
|
534
|
+
self.proc = subprocess.Popen(["bash", script, str(self.core)], **openArgs)
|
|
535
|
+
self.proc.wait()
|
|
536
|
+
self.main.finish(self)
|
|
537
|
+
|
|
538
|
+
def gui():
|
|
539
|
+
import Tkinter
|
|
540
|
+
class App:
|
|
541
|
+
def __init__(self):
|
|
542
|
+
root = Tkinter.Tk()
|
|
543
|
+
frame = Tkinter.Frame(root)
|
|
544
|
+
scrollx = Tkinter.Scrollbar(frame, orient=Tkinter.HORIZONTAL)
|
|
545
|
+
scrolly = Tkinter.Scrollbar(frame)
|
|
546
|
+
list = Tkinter.Listbox(frame, selectmode=Tkinter.MULTIPLE)
|
|
547
|
+
|
|
548
|
+
for script in queue:
|
|
549
|
+
list.insert(Tkinter.END, script)
|
|
550
|
+
|
|
551
|
+
scrolly.config(command=list.yview)
|
|
552
|
+
scrollx.config(command=list.xview)
|
|
553
|
+
list.config(yscrollcommand=scrolly.set)
|
|
554
|
+
list.config(xscrollcommand=scrollx.set)
|
|
555
|
+
|
|
556
|
+
scrolly.pack(side=Tkinter.RIGHT, fill=Tkinter.Y)
|
|
557
|
+
scrollx.pack(side=Tkinter.BOTTOM, fill=Tkinter.X)
|
|
558
|
+
list.pack(fill=Tkinter.BOTH, expand=1)
|
|
559
|
+
|
|
560
|
+
button = Tkinter.Button(root, text='Run', command=self.pressed)
|
|
561
|
+
|
|
562
|
+
frame.pack(fill=Tkinter.BOTH, expand=1)
|
|
563
|
+
button.pack(side=Tkinter.BOTTOM, fill=Tkinter.X)
|
|
564
|
+
|
|
565
|
+
self.root = root
|
|
566
|
+
self.list = list
|
|
567
|
+
self.run = False
|
|
568
|
+
self.queue = []
|
|
569
|
+
|
|
570
|
+
def pressed(self):
|
|
571
|
+
sel = self.list.curselection()
|
|
572
|
+
for index in sel:
|
|
573
|
+
global queue
|
|
574
|
+
self.queue.append(queue[int(index)])
|
|
575
|
+
self.root.destroy()
|
|
576
|
+
|
|
577
|
+
def start(self):
|
|
578
|
+
self.root.mainloop()
|
|
579
|
+
return self.queue
|
|
580
|
+
|
|
581
|
+
global queue
|
|
582
|
+
queue.sort()
|
|
583
|
+
queue = App().start()
|
|
584
|
+
|
|
585
|
+
if __name__ == '__main__':
|
|
586
|
+
usage = "usage: %prog [options] <runscript>"
|
|
587
|
+
parser = optparse.OptionParser(usage=usage)
|
|
588
|
+
parser.add_option("-g", "--gui", action="store_true", dest="gui", default=False, help="start gui to selectively start benchmarks")
|
|
589
|
+
|
|
590
|
+
opts, args = parser.parse_args(sys.argv[1:])
|
|
591
|
+
if len(args) > 0: parser.error("no arguments expected")
|
|
592
|
+
|
|
593
|
+
os.chdir(os.path.dirname(sys.argv[0]))
|
|
594
|
+
if opts.gui: gui()
|
|
595
|
+
|
|
596
|
+
m = Main()
|
|
597
|
+
m.run(queue)
|
|
598
|
+
""".format(
|
|
599
|
+
queue, self.job.parallel
|
|
600
|
+
)
|
|
601
|
+
)
|
|
602
|
+
tools.set_executable(os.path.join(path, "start.py"))
|
|
603
|
+
|
|
604
|
+
|
|
605
|
+
class DistScriptGen(ScriptGen):
|
|
606
|
+
"""
|
|
607
|
+
A class that generates and evaluates start scripts for dist runs.
|
|
608
|
+
"""
|
|
609
|
+
|
|
610
|
+
class DistScript:
|
|
611
|
+
"""
|
|
612
|
+
Class realizing a dist script.
|
|
613
|
+
"""
|
|
614
|
+
|
|
615
|
+
def __init__(self, runspec: "Runspec", path: str, queue: list[str]):
|
|
616
|
+
"""
|
|
617
|
+
Initializes dist script.
|
|
618
|
+
|
|
619
|
+
Attributes:
|
|
620
|
+
runspec (Runspec): Associated runspecification.
|
|
621
|
+
path (str): Target location for the script.
|
|
622
|
+
queue (list[str]): Script queue.
|
|
623
|
+
"""
|
|
624
|
+
self.runspec = runspec
|
|
625
|
+
self.path = path
|
|
626
|
+
self.queue = queue
|
|
627
|
+
self.num = 0
|
|
628
|
+
self.time = 0 # = None, next() sets start values anyway,
|
|
629
|
+
# None only causes issues with typecheck
|
|
630
|
+
self.startscripts = "" # = None
|
|
631
|
+
self.next()
|
|
632
|
+
|
|
633
|
+
def write(self) -> None:
|
|
634
|
+
"""
|
|
635
|
+
Write script.
|
|
636
|
+
"""
|
|
637
|
+
if self.num > 0:
|
|
638
|
+
assert isinstance(self.runspec.project, Project)
|
|
639
|
+
assert isinstance(self.runspec.project.job, DistJob)
|
|
640
|
+
self.num = 0
|
|
641
|
+
with open(self.runspec.setting.disttemplate, "r", encoding="utf8") as f:
|
|
642
|
+
template = f.read()
|
|
643
|
+
script = os.path.join(self.path, "start{0:04}.dist".format(len(self.queue)))
|
|
644
|
+
if self.runspec.setting.dist_options != "":
|
|
645
|
+
distopts = "\n".join(self.runspec.setting.dist_options.split(",")) + "\n"
|
|
646
|
+
else:
|
|
647
|
+
distopts = ""
|
|
648
|
+
with open(script, "w", encoding="utf8") as f:
|
|
649
|
+
f.write(
|
|
650
|
+
template.format(
|
|
651
|
+
walltime=tools.seconds_to_slurm_time(self.runspec.project.job.walltime),
|
|
652
|
+
jobs=self.startscripts,
|
|
653
|
+
cpt=self.runspec.project.job.cpt,
|
|
654
|
+
partition=self.runspec.project.job.partition,
|
|
655
|
+
dist_options=distopts,
|
|
656
|
+
)
|
|
657
|
+
)
|
|
658
|
+
self.queue.append(script)
|
|
659
|
+
|
|
660
|
+
def next(self) -> None:
|
|
661
|
+
"""
|
|
662
|
+
Switch to and setup next script.
|
|
663
|
+
"""
|
|
664
|
+
self.write()
|
|
665
|
+
self.startscripts = ""
|
|
666
|
+
self.num = 0
|
|
667
|
+
self.time = 0
|
|
668
|
+
|
|
669
|
+
def append(self, startfile: str) -> None:
|
|
670
|
+
"""
|
|
671
|
+
Add startfile to list of jobs.
|
|
672
|
+
|
|
673
|
+
Attributes:
|
|
674
|
+
startfile (str): Start script.
|
|
675
|
+
"""
|
|
676
|
+
self.num += 1
|
|
677
|
+
self.startscripts += startfile + "\n"
|
|
678
|
+
|
|
679
|
+
def __init__(self, dist_job: "DistJob"):
|
|
680
|
+
"""
|
|
681
|
+
Initializes the script generator.
|
|
682
|
+
|
|
683
|
+
Attributes:
|
|
684
|
+
distJob (DistJob): A reference to the associated sequential job.
|
|
685
|
+
"""
|
|
686
|
+
ScriptGen.__init__(self, dist_job)
|
|
687
|
+
|
|
688
|
+
def gen_start_script(self, path: str) -> None:
|
|
689
|
+
"""
|
|
690
|
+
Generates a start script that can be used to start all scripts
|
|
691
|
+
generated using add_to_script().
|
|
692
|
+
|
|
693
|
+
Attributes:
|
|
694
|
+
path (str): The target location for the script.
|
|
695
|
+
"""
|
|
696
|
+
assert isinstance(self.job, DistJob)
|
|
697
|
+
tools.mkdir_p(path)
|
|
698
|
+
queue: list[str] = []
|
|
699
|
+
dist_scripts: dict[tuple[str, Optional[str], int, int, str], DistScriptGen.DistScript] = {}
|
|
700
|
+
for runspec, instpath, instname in self.startfiles:
|
|
701
|
+
assert isinstance(runspec.project, Project)
|
|
702
|
+
assert isinstance(runspec.project.job, DistJob)
|
|
703
|
+
relpath = os.path.relpath(instpath, path)
|
|
704
|
+
job_script = os.path.join(relpath, instname)
|
|
705
|
+
dist_key = (
|
|
706
|
+
runspec.setting.disttemplate,
|
|
707
|
+
runspec.setting.dist_options,
|
|
708
|
+
runspec.project.job.walltime,
|
|
709
|
+
runspec.project.job.cpt,
|
|
710
|
+
runspec.project.job.partition,
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
if dist_key not in dist_scripts:
|
|
714
|
+
dist_script = DistScriptGen.DistScript(runspec, path, queue)
|
|
715
|
+
dist_scripts[dist_key] = dist_script
|
|
716
|
+
else:
|
|
717
|
+
dist_script = dist_scripts[dist_key]
|
|
718
|
+
|
|
719
|
+
if self.job.script_mode == "multi":
|
|
720
|
+
if dist_script.num > 0:
|
|
721
|
+
dist_script.next()
|
|
722
|
+
dist_script.append(job_script)
|
|
723
|
+
elif self.job.script_mode == "timeout":
|
|
724
|
+
if dist_script.time + runspec.project.job.timeout + 300 >= runspec.project.job.walltime:
|
|
725
|
+
dist_script.next()
|
|
726
|
+
dist_script.time += runspec.project.job.timeout + 300
|
|
727
|
+
dist_script.append(job_script)
|
|
728
|
+
|
|
729
|
+
for dist_script in dist_scripts.values():
|
|
730
|
+
dist_script.write()
|
|
731
|
+
|
|
732
|
+
with open(os.path.join(path, "start.sh"), "w", encoding="utf8") as startfile:
|
|
733
|
+
startfile.write(
|
|
734
|
+
"""#!/bin/bash\n\ncd "$(dirname $0)"\n"""
|
|
735
|
+
+ "\n".join(['sbatch "{0}"'.format(os.path.basename(x)) for x in queue])
|
|
736
|
+
)
|
|
737
|
+
tools.set_executable(os.path.join(path, "start.sh"))
|
|
738
|
+
|
|
739
|
+
|
|
740
|
+
@dataclass(eq=False, frozen=True)
|
|
741
|
+
class SeqJob(Job):
|
|
742
|
+
"""
|
|
743
|
+
Describes a sequential job.
|
|
744
|
+
|
|
745
|
+
Attributes:
|
|
746
|
+
name (str): A unique name for a job.
|
|
747
|
+
timeout (int): A timeout in seconds for individual benchmark runs.
|
|
748
|
+
runs (int): The number of runs per benchmark.
|
|
749
|
+
attr (dict[str,Any]): A dictionary of arbitrary attributes.
|
|
750
|
+
parallel (int): The number of runs that can be started in parallel.
|
|
751
|
+
"""
|
|
752
|
+
|
|
753
|
+
parallel: int = field(compare=False)
|
|
754
|
+
|
|
755
|
+
def script_gen(self) -> "SeqScriptGen":
|
|
756
|
+
"""
|
|
757
|
+
Returns a class that can generate start scripts and evaluate benchmark results.
|
|
758
|
+
(see SeqScriptGen)
|
|
759
|
+
"""
|
|
760
|
+
return SeqScriptGen(self)
|
|
761
|
+
|
|
762
|
+
def to_xml(self, out: Any, indent: str) -> None:
|
|
763
|
+
"""
|
|
764
|
+
Dump a (pretty-printed) XML-representation of the sequential job.
|
|
765
|
+
|
|
766
|
+
Attributes:
|
|
767
|
+
out (Any): Output stream to write to.
|
|
768
|
+
indent (str): Amount of indentation.
|
|
769
|
+
"""
|
|
770
|
+
extra = ' parallel="{0.parallel}"'.format(self)
|
|
771
|
+
Job._to_xml(self, out, indent, "seqjob", extra)
|
|
772
|
+
|
|
773
|
+
|
|
774
|
+
@dataclass(eq=False, frozen=True)
|
|
775
|
+
class DistJob(Job):
|
|
776
|
+
"""
|
|
777
|
+
Describes a dist job.
|
|
778
|
+
|
|
779
|
+
Attributes:
|
|
780
|
+
name (str): A unique name for a job.
|
|
781
|
+
timeout (int): A timeout in seconds for individual benchmark runs.
|
|
782
|
+
runs (int): The number of runs per benchmark.
|
|
783
|
+
attr (dict[str,Any]): A dictionary of arbitrary attributes.
|
|
784
|
+
script_mode (str): Specifies the script generation mode.
|
|
785
|
+
walltime (int): The walltime for a distributed job.
|
|
786
|
+
cpt (int): Number of cpus per task for distributed jobs.
|
|
787
|
+
partition (str): Partition to be used in the clusters (kr by default).
|
|
788
|
+
"""
|
|
789
|
+
|
|
790
|
+
script_mode: str = field(compare=False)
|
|
791
|
+
walltime: int = field(compare=False)
|
|
792
|
+
cpt: int = field(compare=False)
|
|
793
|
+
partition: str = field(compare=False)
|
|
794
|
+
|
|
795
|
+
def to_xml(self, out: Any, indent: str) -> None:
|
|
796
|
+
"""
|
|
797
|
+
Dump a (pretty-printed) XML-representation of the parallel job.
|
|
798
|
+
|
|
799
|
+
Attributes:
|
|
800
|
+
out (Any): Output stream to write to
|
|
801
|
+
indent (str): Amount of indentation
|
|
802
|
+
"""
|
|
803
|
+
extra = ' script_mode="{0.script_mode}" walltime="{0.walltime}" cpt="{0.cpt}" partition="{0.partition}"'.format(
|
|
804
|
+
self
|
|
805
|
+
)
|
|
806
|
+
Job._to_xml(self, out, indent, "distjob", extra)
|
|
807
|
+
|
|
808
|
+
def script_gen(self) -> "DistScriptGen":
|
|
809
|
+
"""
|
|
810
|
+
Returns a class that can generate start scripts and evaluate benchmark results.
|
|
811
|
+
(see SeqScriptGen)
|
|
812
|
+
"""
|
|
813
|
+
return DistScriptGen(self)
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
@dataclass(order=True, frozen=True)
|
|
817
|
+
class Config:
|
|
818
|
+
"""
|
|
819
|
+
Describes a configuration. Currently, this only specifies a template
|
|
820
|
+
that is used for start script generation.
|
|
821
|
+
|
|
822
|
+
Attributes:
|
|
823
|
+
name (str): A name uniquely identifying the configuration.
|
|
824
|
+
template (str): A path to the template for start script generation.
|
|
825
|
+
"""
|
|
826
|
+
|
|
827
|
+
name: str
|
|
828
|
+
template: str = field(compare=False)
|
|
829
|
+
|
|
830
|
+
def to_xml(self, out: Any, indent: str) -> None:
|
|
831
|
+
"""
|
|
832
|
+
Dump a (pretty-printed) XML-representation of the configuration.
|
|
833
|
+
|
|
834
|
+
Attributes:
|
|
835
|
+
out (Any): Output stream to write to.
|
|
836
|
+
indent (str): Amount of indentation.
|
|
837
|
+
"""
|
|
838
|
+
out.write('{1}<config name="{0.name}" template="{0.template}"/>\n'.format(self, indent))
|
|
839
|
+
|
|
840
|
+
|
|
841
|
+
@dataclass(order=True, unsafe_hash=True)
|
|
842
|
+
class Benchmark:
|
|
843
|
+
"""
|
|
844
|
+
Describes a benchmark. This includes a set of classes
|
|
845
|
+
that describe where to find particular instances.
|
|
846
|
+
|
|
847
|
+
Attributes:
|
|
848
|
+
name (str):- The name of the benchmark set.
|
|
849
|
+
elements (list[Any]): A list of all benchmark elements.
|
|
850
|
+
instances (dict[benchmark.Class, set[Benchmark.Instance]]): A list of all benchmark instances.
|
|
851
|
+
initialized (bool): Whether the benchmark set is initialized or not.
|
|
852
|
+
"""
|
|
853
|
+
|
|
854
|
+
name: str
|
|
855
|
+
elements: list[Any] = field(default_factory=list, compare=False)
|
|
856
|
+
instances: dict["Benchmark.Class", set["Benchmark.Instance"]] = field(default_factory=dict, compare=False)
|
|
857
|
+
initialized: bool = field(default=False, compare=False)
|
|
858
|
+
|
|
859
|
+
@dataclass(order=True, frozen=True)
|
|
860
|
+
class Class:
|
|
861
|
+
"""
|
|
862
|
+
Describes a benchmark class.
|
|
863
|
+
Attributes:
|
|
864
|
+
name (str): A name uniquely identifying a benchmark class (per benchmark).
|
|
865
|
+
id (int): A numeric identifier.
|
|
866
|
+
"""
|
|
867
|
+
|
|
868
|
+
name: str
|
|
869
|
+
id: Optional[int] = field(default=None, compare=False)
|
|
870
|
+
|
|
871
|
+
@dataclass(order=True, frozen=True)
|
|
872
|
+
class Instance:
|
|
873
|
+
"""
|
|
874
|
+
Describes a benchmark instance.
|
|
875
|
+
|
|
876
|
+
Attributes:
|
|
877
|
+
location (str): The location of the benchmark instance.
|
|
878
|
+
benchclass (Benchmark.Class): The class name of the instance.
|
|
879
|
+
name (str): The name of the instance.
|
|
880
|
+
files set(str): Instance files associated with the instance.
|
|
881
|
+
encodings (set[str]): Encoding associated with the instance.
|
|
882
|
+
enctags (set[str]): Encoding tags associated with the instance.
|
|
883
|
+
id (int): A numeric identifier.
|
|
884
|
+
"""
|
|
885
|
+
|
|
886
|
+
location: str = field(compare=False)
|
|
887
|
+
benchclass: "Benchmark.Class" = field(compare=False)
|
|
888
|
+
name: str
|
|
889
|
+
files: set[str] = field(compare=False)
|
|
890
|
+
encodings: set[str] = field(compare=False)
|
|
891
|
+
enctags: set[str] = field(compare=False)
|
|
892
|
+
id: Optional[int] = field(default=None, compare=False)
|
|
893
|
+
|
|
894
|
+
def to_xml(self, out: Any, indent: str) -> None:
|
|
895
|
+
"""
|
|
896
|
+
Dump a (pretty-printed) XML-representation of the configuration.
|
|
897
|
+
|
|
898
|
+
Attributes:
|
|
899
|
+
out (Any): Output stream to write to
|
|
900
|
+
indent (str): Amount of indentation
|
|
901
|
+
"""
|
|
902
|
+
out.write('{1}<instance name="{0.name}" id="{0.id}">\n'.format(self, indent))
|
|
903
|
+
for instance in sorted(self.files):
|
|
904
|
+
out.write('{1}<file name="{0}"/>\n'.format(instance, indent + "\t"))
|
|
905
|
+
out.write("{0}</instance>\n".format(indent))
|
|
906
|
+
|
|
907
|
+
def paths(self) -> Iterator[str]:
|
|
908
|
+
"""
|
|
909
|
+
Returns the location of the instance files by concatenating
|
|
910
|
+
location, class name and instance name.
|
|
911
|
+
"""
|
|
912
|
+
for file in self.files:
|
|
913
|
+
yield os.path.join(self.location, self.benchclass.name, file)
|
|
914
|
+
|
|
915
|
+
class Folder:
|
|
916
|
+
"""
|
|
917
|
+
Describes a folder that should recursively be scanned for benchmarks.
|
|
918
|
+
"""
|
|
919
|
+
|
|
920
|
+
def __init__(self, path: str, group: bool = False):
|
|
921
|
+
"""
|
|
922
|
+
Initializes a benchmark folder.
|
|
923
|
+
|
|
924
|
+
Attributes:
|
|
925
|
+
path (str): The location of the folder.
|
|
926
|
+
group (bool): Whether to group instances by their file name prefix.
|
|
927
|
+
"""
|
|
928
|
+
self.path = path
|
|
929
|
+
self.group = group
|
|
930
|
+
self.prefixes: set[str] = set()
|
|
931
|
+
self.encodings: set[str] = set()
|
|
932
|
+
self.enctags: set[str] = set()
|
|
933
|
+
|
|
934
|
+
def add_ignore(self, prefix: str) -> None:
|
|
935
|
+
"""
|
|
936
|
+
Can be used to ignore certain sub-folders or instances
|
|
937
|
+
by giving a path prefix that shall be ignored.
|
|
938
|
+
|
|
939
|
+
Attributes:
|
|
940
|
+
prefix (str): The prefix to be ignored.
|
|
941
|
+
"""
|
|
942
|
+
self.prefixes.add(os.path.normpath(prefix))
|
|
943
|
+
|
|
944
|
+
def add_encoding(self, file: str) -> None:
|
|
945
|
+
"""
|
|
946
|
+
Can be used to add encodings, which will be called together
|
|
947
|
+
with all instances in this folder.
|
|
948
|
+
|
|
949
|
+
Attributes:
|
|
950
|
+
file (str): The encoding file.
|
|
951
|
+
"""
|
|
952
|
+
self.encodings.add(os.path.normpath(file))
|
|
953
|
+
|
|
954
|
+
def add_enctags(self, tags: set[str]) -> None:
|
|
955
|
+
"""
|
|
956
|
+
Can be used to add encoding tags, which refers to encodings
|
|
957
|
+
specified by the setting, whic will be called together
|
|
958
|
+
with all instances in this folder.
|
|
959
|
+
|
|
960
|
+
Attributes:
|
|
961
|
+
tag (set[str]): The encoding tags.
|
|
962
|
+
"""
|
|
963
|
+
self.enctags = self.enctags.union(tags)
|
|
964
|
+
|
|
965
|
+
def _skip(self, root: str, path: str) -> bool:
|
|
966
|
+
"""
|
|
967
|
+
Returns whether a given path should be ignored.
|
|
968
|
+
|
|
969
|
+
Attributes:
|
|
970
|
+
root (str): The root path.
|
|
971
|
+
path (str): Some path relative to the root path.
|
|
972
|
+
"""
|
|
973
|
+
if path == ".svn":
|
|
974
|
+
return True
|
|
975
|
+
path = os.path.normpath(os.path.join(root, path))
|
|
976
|
+
return path in self.prefixes
|
|
977
|
+
|
|
978
|
+
def init(self, benchmark: "Benchmark") -> None:
|
|
979
|
+
"""
|
|
980
|
+
Recursively scans the folder and adds all instances found to the given benchmark.
|
|
981
|
+
|
|
982
|
+
Attributes:
|
|
983
|
+
benchmark (Benchmark): The benchmark to be populated.
|
|
984
|
+
"""
|
|
985
|
+
for root, dirs, files in os.walk(self.path):
|
|
986
|
+
relroot = os.path.relpath(root, self.path)
|
|
987
|
+
sub = []
|
|
988
|
+
instances: dict[str, set[str]] = {}
|
|
989
|
+
for dirname in dirs:
|
|
990
|
+
if self._skip(relroot, dirname):
|
|
991
|
+
continue
|
|
992
|
+
sub.append(dirname)
|
|
993
|
+
dirs[:] = sub
|
|
994
|
+
for filename in files:
|
|
995
|
+
if self._skip(relroot, filename):
|
|
996
|
+
continue
|
|
997
|
+
m = re.match(r"^(([^\.]+).*)\.[^.]+$", filename)
|
|
998
|
+
if m is None:
|
|
999
|
+
raise RuntimeError("Invalid file name.")
|
|
1000
|
+
if self.group:
|
|
1001
|
+
# remove file extension, file.1.txt -> file
|
|
1002
|
+
group = m.group(2)
|
|
1003
|
+
else:
|
|
1004
|
+
# remove last file extension, file.1.txt -> file.1
|
|
1005
|
+
group = m.group(1)
|
|
1006
|
+
if group not in instances:
|
|
1007
|
+
instances[group] = set()
|
|
1008
|
+
instances[group].add(filename)
|
|
1009
|
+
for group, instfiles in instances.items():
|
|
1010
|
+
benchmark.add_instance(self.path, relroot, (group, instfiles), self.encodings, self.enctags)
|
|
1011
|
+
|
|
1012
|
+
class Files:
|
|
1013
|
+
"""
|
|
1014
|
+
Describes a set of individual files in a benchmark.
|
|
1015
|
+
"""
|
|
1016
|
+
|
|
1017
|
+
def __init__(self, path: str):
|
|
1018
|
+
"""
|
|
1019
|
+
Initializes to the empty set of files.
|
|
1020
|
+
|
|
1021
|
+
Attributes:
|
|
1022
|
+
path (str): Root path, all file paths are relative to this path.
|
|
1023
|
+
"""
|
|
1024
|
+
self.path = path
|
|
1025
|
+
self.files: dict[str, set[str]] = {}
|
|
1026
|
+
self.encodings: set[str] = set()
|
|
1027
|
+
self.enctags: set[str] = set()
|
|
1028
|
+
|
|
1029
|
+
def add_file(self, path: str, group: Optional[str] = None) -> None:
|
|
1030
|
+
"""
|
|
1031
|
+
Adds a file to the set of files.
|
|
1032
|
+
|
|
1033
|
+
Attributes:
|
|
1034
|
+
path (str): Location of the file.
|
|
1035
|
+
group (Optional[str]): Instance group.
|
|
1036
|
+
"""
|
|
1037
|
+
if group is None:
|
|
1038
|
+
m = re.match(r"^(([^\.]+).*)\.[^.]+$", os.path.basename(path))
|
|
1039
|
+
if m is None:
|
|
1040
|
+
raise RuntimeError("Invalid file name.")
|
|
1041
|
+
# remove file extension, file.1.txt -> file.1
|
|
1042
|
+
group = m.group(1)
|
|
1043
|
+
if group not in self.files:
|
|
1044
|
+
self.files[group] = set()
|
|
1045
|
+
self.files[group].add(os.path.normpath(path))
|
|
1046
|
+
|
|
1047
|
+
def add_encoding(self, file: str) -> None:
|
|
1048
|
+
"""
|
|
1049
|
+
Can be used to add encodings, which will be called together
|
|
1050
|
+
with all instances in these files.
|
|
1051
|
+
|
|
1052
|
+
Attributes:
|
|
1053
|
+
file (str): The encoding file.
|
|
1054
|
+
"""
|
|
1055
|
+
self.encodings.add(os.path.normpath(file))
|
|
1056
|
+
|
|
1057
|
+
def add_enctags(self, tags: set[str]) -> None:
|
|
1058
|
+
"""
|
|
1059
|
+
Can be used to add encoding tags, which refers to encodings
|
|
1060
|
+
specified by the setting, whic will be called together
|
|
1061
|
+
with all instances in this folder.
|
|
1062
|
+
|
|
1063
|
+
Attributes:
|
|
1064
|
+
tag (set[str]): The encoding tags.
|
|
1065
|
+
"""
|
|
1066
|
+
self.enctags = self.enctags.union(tags)
|
|
1067
|
+
|
|
1068
|
+
def init(self, benchmark: "Benchmark") -> None:
|
|
1069
|
+
"""
|
|
1070
|
+
Adds a files in the set to the given benchmark (if they exist).
|
|
1071
|
+
|
|
1072
|
+
Attributes:
|
|
1073
|
+
benchmark (Benchmark): The benchmark to be populated.
|
|
1074
|
+
"""
|
|
1075
|
+
for group, files in self.files.items():
|
|
1076
|
+
for file in files:
|
|
1077
|
+
if not os.path.exists(os.path.join(self.path, file)):
|
|
1078
|
+
raise FileNotFoundError("Specified instance file does not exist.")
|
|
1079
|
+
paths = list(map(os.path.split, sorted(files)))
|
|
1080
|
+
if len(set(map(lambda x: x[0], paths))) != 1:
|
|
1081
|
+
raise RuntimeError("Instances of the same group must be in the same directory.")
|
|
1082
|
+
relroot = paths[0][0]
|
|
1083
|
+
benchmark.add_instance(
|
|
1084
|
+
self.path, relroot, (group, set(map(lambda x: x[1], paths))), self.encodings, self.enctags
|
|
1085
|
+
)
|
|
1086
|
+
|
|
1087
|
+
def add_element(self, element: Any) -> None:
|
|
1088
|
+
"""
|
|
1089
|
+
Adds elements to the benchmark, e.g, files or folders.
|
|
1090
|
+
|
|
1091
|
+
Attributes:
|
|
1092
|
+
element (Any): The element to add.
|
|
1093
|
+
"""
|
|
1094
|
+
self.elements.append(element)
|
|
1095
|
+
|
|
1096
|
+
def add_instance(
|
|
1097
|
+
self, root: str, relroot: str, files: tuple[str, set[str]], encodings: set[str], enctags: set[str]
|
|
1098
|
+
) -> None:
|
|
1099
|
+
"""
|
|
1100
|
+
Adds an instance to the benchmark set. (This function
|
|
1101
|
+
is called during initialization by the benchmark elements)
|
|
1102
|
+
|
|
1103
|
+
Attributes:
|
|
1104
|
+
root (str): The root folder of the instance.
|
|
1105
|
+
relroot (str): The folder relative to the root folder.
|
|
1106
|
+
files (tuple[str,set[str]]): The name and files of the instance.
|
|
1107
|
+
encodings (set[str]): The encodings associated to the instance.
|
|
1108
|
+
enctags (set[str]): The encoding tags associated to the instance.
|
|
1109
|
+
"""
|
|
1110
|
+
classname = Benchmark.Class(relroot)
|
|
1111
|
+
if classname not in self.instances:
|
|
1112
|
+
self.instances[classname] = set()
|
|
1113
|
+
self.instances[classname].add(Benchmark.Instance(root, classname, files[0], files[1], encodings, enctags))
|
|
1114
|
+
|
|
1115
|
+
def init(self) -> None:
|
|
1116
|
+
"""
|
|
1117
|
+
Populates the benchmark set with instances specified by the
|
|
1118
|
+
benchmark elements added.
|
|
1119
|
+
"""
|
|
1120
|
+
if not self.initialized:
|
|
1121
|
+
for element in self.elements:
|
|
1122
|
+
element.init(self)
|
|
1123
|
+
id_instances: dict["Benchmark.Class", set["Benchmark.Instance"]] = {}
|
|
1124
|
+
classid = 0
|
|
1125
|
+
for classname in sorted(self.instances.keys()):
|
|
1126
|
+
id_class = Benchmark.Class(classname.name, classid)
|
|
1127
|
+
id_instances[id_class] = set()
|
|
1128
|
+
classid += 1
|
|
1129
|
+
instanceid = 0
|
|
1130
|
+
for instance in sorted(self.instances[classname]):
|
|
1131
|
+
id_instances[id_class].add(
|
|
1132
|
+
Benchmark.Instance(
|
|
1133
|
+
instance.location,
|
|
1134
|
+
id_class,
|
|
1135
|
+
instance.name,
|
|
1136
|
+
instance.files,
|
|
1137
|
+
instance.encodings,
|
|
1138
|
+
instance.enctags,
|
|
1139
|
+
instanceid,
|
|
1140
|
+
)
|
|
1141
|
+
)
|
|
1142
|
+
instanceid += 1
|
|
1143
|
+
self.instances = id_instances
|
|
1144
|
+
self.initialized = True
|
|
1145
|
+
|
|
1146
|
+
def to_xml(self, out: Any, indent: str) -> None:
|
|
1147
|
+
"""
|
|
1148
|
+
Dump the (pretty-printed) XML-representation of the benchmark set.
|
|
1149
|
+
|
|
1150
|
+
Attributes:
|
|
1151
|
+
out (Any): Output stream to write to.
|
|
1152
|
+
indent (str): Amount of indentation.
|
|
1153
|
+
"""
|
|
1154
|
+
self.init()
|
|
1155
|
+
out.write('{1}<benchmark name="{0}">\n'.format(self.name, indent))
|
|
1156
|
+
for classname in sorted(self.instances.keys()):
|
|
1157
|
+
instances = self.instances[classname]
|
|
1158
|
+
out.write('{1}<class name="{0.name}" id="{0.id}">\n'.format(classname, indent + "\t"))
|
|
1159
|
+
for instance in sorted(instances):
|
|
1160
|
+
instance.to_xml(out, indent + "\t\t")
|
|
1161
|
+
out.write("{0}</class>\n".format(indent + "\t"))
|
|
1162
|
+
out.write("{0}</benchmark>\n".format(indent))
|
|
1163
|
+
|
|
1164
|
+
|
|
1165
|
+
@dataclass(order=True, frozen=True)
|
|
1166
|
+
class Runspec:
|
|
1167
|
+
"""
|
|
1168
|
+
Describes a run specification. This specifies machine, system, settings, project
|
|
1169
|
+
to run a benchmark with.
|
|
1170
|
+
|
|
1171
|
+
Attributes:
|
|
1172
|
+
machine (Machine): The machine to run on.
|
|
1173
|
+
system (System): The system to run with.
|
|
1174
|
+
setting (Setting): The setting to run with.
|
|
1175
|
+
benchmark (Benchmark): The benchmark.
|
|
1176
|
+
project (Project): The associated project.
|
|
1177
|
+
"""
|
|
1178
|
+
|
|
1179
|
+
machine: "Machine"
|
|
1180
|
+
system: "System"
|
|
1181
|
+
setting: "Setting"
|
|
1182
|
+
benchmark: "Benchmark"
|
|
1183
|
+
project: "Project"
|
|
1184
|
+
|
|
1185
|
+
def path(self) -> str:
|
|
1186
|
+
"""
|
|
1187
|
+
Returns an output path under which start scripts
|
|
1188
|
+
and benchmark results are stored.
|
|
1189
|
+
"""
|
|
1190
|
+
name = self.system.name + "-" + self.system.version + "-" + self.setting.name
|
|
1191
|
+
return os.path.join(self.project.path(), self.machine.name, "results", self.benchmark.name, name)
|
|
1192
|
+
|
|
1193
|
+
def gen_scripts(self, script_gen: "ScriptGen") -> None:
|
|
1194
|
+
"""
|
|
1195
|
+
Generates start scripts needed to start the benchmark described
|
|
1196
|
+
by this run specification. This will simply add all instances
|
|
1197
|
+
and their associated encodings to the given script generator.
|
|
1198
|
+
|
|
1199
|
+
Attributes:
|
|
1200
|
+
script_gen (ScriptGen): A generator that is responsible for the start script generation.
|
|
1201
|
+
"""
|
|
1202
|
+
self.benchmark.init()
|
|
1203
|
+
for instances in self.benchmark.instances.values():
|
|
1204
|
+
for instance in instances:
|
|
1205
|
+
script_gen.add_to_script(self, instance)
|
|
1206
|
+
|
|
1207
|
+
|
|
1208
|
+
@dataclass(order=True, frozen=True)
|
|
1209
|
+
class Project:
|
|
1210
|
+
"""
|
|
1211
|
+
Describes a benchmark project, i.e., a set of run specifications
|
|
1212
|
+
that belong together.
|
|
1213
|
+
Attributes:
|
|
1214
|
+
name (str): The name of the project.
|
|
1215
|
+
runscript (Runscript): Associated runscript.
|
|
1216
|
+
job (Job): Associated job.
|
|
1217
|
+
runspecs (dict[str, list['Runspec']]): Run specifications of the project.
|
|
1218
|
+
"""
|
|
1219
|
+
|
|
1220
|
+
name: str
|
|
1221
|
+
runscript: "Runscript" = field(compare=False)
|
|
1222
|
+
job: Job = field(compare=False)
|
|
1223
|
+
runspecs: dict[str, list["Runspec"]] = field(default_factory=dict, compare=False)
|
|
1224
|
+
|
|
1225
|
+
def add_runtag(self, machine_name: str, benchmark_name: str, tag: str) -> None:
|
|
1226
|
+
"""
|
|
1227
|
+
Adds a run tag to the project, i.e., a set of run specifications
|
|
1228
|
+
identified by certain tags.
|
|
1229
|
+
|
|
1230
|
+
Attributes:
|
|
1231
|
+
machine_name (str): The machine to run on.
|
|
1232
|
+
benchmark_name (str): The benchmark set to evaluate.
|
|
1233
|
+
tag (str): The tags of systems+settings to run.
|
|
1234
|
+
"""
|
|
1235
|
+
disj = TagDisj(tag)
|
|
1236
|
+
assert isinstance(self.runscript, Runscript)
|
|
1237
|
+
for system in self.runscript.systems.values():
|
|
1238
|
+
for setting in system.settings.values():
|
|
1239
|
+
if disj.match(setting.tag):
|
|
1240
|
+
self.add_runspec(machine_name, system.name, system.version, setting.name, benchmark_name)
|
|
1241
|
+
|
|
1242
|
+
def add_runspec(
|
|
1243
|
+
self, machine_name: str, system_name: str, version: str, setting_name: str, benchmark_name: str
|
|
1244
|
+
) -> None:
|
|
1245
|
+
"""
|
|
1246
|
+
Adds a run specification, described by machine, system+settings, and benchmark set,
|
|
1247
|
+
to the project.
|
|
1248
|
+
|
|
1249
|
+
Attributes:
|
|
1250
|
+
machine_name (str): The machine to run on.
|
|
1251
|
+
system_name (str): The system to evaluate.
|
|
1252
|
+
version (str): The version of the system.
|
|
1253
|
+
setting_name (str): The settings to run the system with.
|
|
1254
|
+
benchmark_name (str): The benchmark set to evaluate.
|
|
1255
|
+
"""
|
|
1256
|
+
runspec = Runspec(
|
|
1257
|
+
self.runscript.machines[machine_name],
|
|
1258
|
+
self.runscript.systems[(system_name, version)],
|
|
1259
|
+
self.runscript.systems[(system_name, version)].settings[setting_name],
|
|
1260
|
+
self.runscript.benchmarks[benchmark_name],
|
|
1261
|
+
self,
|
|
1262
|
+
)
|
|
1263
|
+
if not machine_name in self.runspecs:
|
|
1264
|
+
self.runspecs[machine_name] = []
|
|
1265
|
+
self.runspecs[machine_name].append(runspec)
|
|
1266
|
+
|
|
1267
|
+
def path(self) -> str:
|
|
1268
|
+
"""
|
|
1269
|
+
Returns an output path under which start scripts
|
|
1270
|
+
and benchmark results are stored for this project.
|
|
1271
|
+
"""
|
|
1272
|
+
return os.path.join(self.runscript.path(), self.name)
|
|
1273
|
+
|
|
1274
|
+
def gen_scripts(self, skip: bool) -> None:
|
|
1275
|
+
"""
|
|
1276
|
+
Generates start scripts for this project.
|
|
1277
|
+
"""
|
|
1278
|
+
for machine, runspecs in self.runspecs.items():
|
|
1279
|
+
script_gen = self.job.script_gen()
|
|
1280
|
+
script_gen.set_skip(skip)
|
|
1281
|
+
for runspec in runspecs:
|
|
1282
|
+
runspec.gen_scripts(script_gen)
|
|
1283
|
+
script_gen.gen_start_script(os.path.join(self.path(), machine))
|
|
1284
|
+
|
|
1285
|
+
|
|
1286
|
+
class Runscript:
|
|
1287
|
+
"""
|
|
1288
|
+
Describes a run script, i.e., everything that is needed
|
|
1289
|
+
to start and evaluate a set of benchmarks.
|
|
1290
|
+
"""
|
|
1291
|
+
|
|
1292
|
+
def __init__(self, output: str):
|
|
1293
|
+
"""
|
|
1294
|
+
Initializes an empty run script.
|
|
1295
|
+
|
|
1296
|
+
Attributes:
|
|
1297
|
+
output (str): The output folder to store start scripts and result files.
|
|
1298
|
+
"""
|
|
1299
|
+
self.output = output
|
|
1300
|
+
self.jobs: dict[str, Job] = {}
|
|
1301
|
+
self.projects: dict[str, Project] = {}
|
|
1302
|
+
self.machines: dict[str, Machine] = {}
|
|
1303
|
+
self.systems: dict[tuple[str, str], System] = {}
|
|
1304
|
+
self.configs: dict[str, Config] = {}
|
|
1305
|
+
self.benchmarks: dict[str, Benchmark] = {}
|
|
1306
|
+
|
|
1307
|
+
def add_machine(self, machine: "Machine") -> None:
|
|
1308
|
+
"""
|
|
1309
|
+
Adds a given machine to the run script.
|
|
1310
|
+
|
|
1311
|
+
Attributes:
|
|
1312
|
+
machine (Machine): The machine to be added.
|
|
1313
|
+
"""
|
|
1314
|
+
self.machines[machine.name] = machine
|
|
1315
|
+
|
|
1316
|
+
def add_system(self, system: "System") -> None:
|
|
1317
|
+
"""
|
|
1318
|
+
Adds a given system to the run script.
|
|
1319
|
+
|
|
1320
|
+
Attributes:
|
|
1321
|
+
system (System): The system to add.
|
|
1322
|
+
"""
|
|
1323
|
+
self.systems[(system.name, system.version)] = system
|
|
1324
|
+
|
|
1325
|
+
def add_config(self, config: "Config") -> None:
|
|
1326
|
+
"""
|
|
1327
|
+
Adds a configuration to the run script.
|
|
1328
|
+
|
|
1329
|
+
Attributes:
|
|
1330
|
+
config (Config): The config to be added.
|
|
1331
|
+
"""
|
|
1332
|
+
self.configs[config.name] = config
|
|
1333
|
+
|
|
1334
|
+
def add_benchmark(self, benchmark: "Benchmark") -> None:
|
|
1335
|
+
"""
|
|
1336
|
+
Adds a benchmark to the run script.
|
|
1337
|
+
|
|
1338
|
+
Attributes:
|
|
1339
|
+
benchmark (Benchmark): The benchmark to be added.
|
|
1340
|
+
"""
|
|
1341
|
+
self.benchmarks[benchmark.name] = benchmark
|
|
1342
|
+
|
|
1343
|
+
def add_job(self, job: "Job") -> None:
|
|
1344
|
+
"""
|
|
1345
|
+
Adds a job to the runscript.
|
|
1346
|
+
|
|
1347
|
+
Attributes:
|
|
1348
|
+
job (Job): The job to be added.
|
|
1349
|
+
"""
|
|
1350
|
+
self.jobs[job.name] = job
|
|
1351
|
+
|
|
1352
|
+
def add_project(self, project: "Project") -> None:
|
|
1353
|
+
"""
|
|
1354
|
+
Adds a project to therun script.
|
|
1355
|
+
|
|
1356
|
+
Attributes:
|
|
1357
|
+
project (Project): The project to add.
|
|
1358
|
+
"""
|
|
1359
|
+
self.projects[project.name] = project
|
|
1360
|
+
|
|
1361
|
+
def gen_scripts(self, skip: bool) -> None:
|
|
1362
|
+
"""
|
|
1363
|
+
Generates the start scripts for all benchmarks described by
|
|
1364
|
+
this run script.
|
|
1365
|
+
"""
|
|
1366
|
+
for project in self.projects.values():
|
|
1367
|
+
project.gen_scripts(skip)
|
|
1368
|
+
|
|
1369
|
+
def path(self) -> str:
|
|
1370
|
+
"""
|
|
1371
|
+
Returns the output path of this run script.
|
|
1372
|
+
"""
|
|
1373
|
+
return self.output
|
|
1374
|
+
|
|
1375
|
+
# pylint: disable=too-many-branches
|
|
1376
|
+
def eval_results(self, out: Any, parx: int = 2) -> None:
|
|
1377
|
+
"""
|
|
1378
|
+
Evaluates and prints the results of all benchmarks described
|
|
1379
|
+
by this run script. (Start scripts have to be run first.)
|
|
1380
|
+
|
|
1381
|
+
Attributes:
|
|
1382
|
+
out (Any): Output stream for xml output.
|
|
1383
|
+
parx (int): Factor for penalized-average-runtime score.
|
|
1384
|
+
"""
|
|
1385
|
+
machines: set[Machine] = set()
|
|
1386
|
+
jobs: set[SeqJob | DistJob] = set()
|
|
1387
|
+
configs: set[Config] = set()
|
|
1388
|
+
systems: dict[System, list[Setting]] = {}
|
|
1389
|
+
benchmarks: set[Benchmark] = set()
|
|
1390
|
+
|
|
1391
|
+
for project in self.projects.values():
|
|
1392
|
+
assert isinstance(project.job, (SeqJob, DistJob))
|
|
1393
|
+
jobs.add(project.job)
|
|
1394
|
+
for runspecs in project.runspecs.values():
|
|
1395
|
+
for runspec in runspecs:
|
|
1396
|
+
assert isinstance(runspec.system.config, Config)
|
|
1397
|
+
machines.add(runspec.machine)
|
|
1398
|
+
configs.add(runspec.system.config)
|
|
1399
|
+
if not runspec.system in systems:
|
|
1400
|
+
systems[runspec.system] = []
|
|
1401
|
+
systems[runspec.system].append(runspec.setting)
|
|
1402
|
+
benchmarks.add(runspec.benchmark)
|
|
1403
|
+
|
|
1404
|
+
out.write("<result>\n")
|
|
1405
|
+
|
|
1406
|
+
for machine in sorted(machines):
|
|
1407
|
+
machine.to_xml(out, "\t")
|
|
1408
|
+
for config in sorted(configs):
|
|
1409
|
+
config.to_xml(out, "\t")
|
|
1410
|
+
for system in sorted(systems.keys(), key=lambda s: s.order):
|
|
1411
|
+
system.to_xml(out, "\t", systems[system])
|
|
1412
|
+
for job in sorted(jobs):
|
|
1413
|
+
job.to_xml(out, "\t")
|
|
1414
|
+
for benchmark in sorted(benchmarks):
|
|
1415
|
+
benchmark.to_xml(out, "\t")
|
|
1416
|
+
|
|
1417
|
+
for project in self.projects.values():
|
|
1418
|
+
assert isinstance(project.job, (SeqJob, DistJob))
|
|
1419
|
+
out.write('\t<project name="{0.name}" job="{0.job.name}">\n'.format(project))
|
|
1420
|
+
job_gen = project.job.script_gen()
|
|
1421
|
+
jobs.add(project.job)
|
|
1422
|
+
for runspecs in project.runspecs.values():
|
|
1423
|
+
for runspec in runspecs:
|
|
1424
|
+
out.write(
|
|
1425
|
+
(
|
|
1426
|
+
'\t\t<runspec machine="{0.machine.name}" system="{0.system.name}" '
|
|
1427
|
+
'version="{0.system.version}" benchmark="{0.benchmark.name}" '
|
|
1428
|
+
'setting="{0.setting.name}">\n'
|
|
1429
|
+
).format(runspec)
|
|
1430
|
+
)
|
|
1431
|
+
for classname in sorted(runspec.benchmark.instances):
|
|
1432
|
+
out.write('\t\t\t<class id="{0.id}">\n'.format(classname))
|
|
1433
|
+
instances = runspec.benchmark.instances[classname]
|
|
1434
|
+
for instance in instances:
|
|
1435
|
+
out.write('\t\t\t\t<instance id="{0.id}">\n'.format(instance))
|
|
1436
|
+
job_gen.eval_results(out, "\t\t\t\t\t", runspec, instance, parx)
|
|
1437
|
+
out.write("\t\t\t\t</instance>\n")
|
|
1438
|
+
out.write("\t\t\t</class>\n")
|
|
1439
|
+
out.write("\t\t</runspec>\n")
|
|
1440
|
+
out.write("\t</project>\n")
|
|
1441
|
+
out.write("</result>\n")
|
|
1442
|
+
|
|
1443
|
+
|
|
1444
|
+
class TagDisj:
|
|
1445
|
+
"""Represents tags in form of a disjunctive normal form."""
|
|
1446
|
+
|
|
1447
|
+
ALL = 1
|
|
1448
|
+
|
|
1449
|
+
def __init__(self, tag: str):
|
|
1450
|
+
"""
|
|
1451
|
+
Transforms a string into a disjunctive normal form of tags.
|
|
1452
|
+
Spaces between tags are interpreted as conjunctions and |
|
|
1453
|
+
is interpreted as disjunction. The special value "*all*"
|
|
1454
|
+
matches everything.
|
|
1455
|
+
|
|
1456
|
+
Attributes:
|
|
1457
|
+
tag (str): a string representing a disjunctive normal form of tags.
|
|
1458
|
+
"""
|
|
1459
|
+
self.tag: Any # int | list[frozenset[...]]
|
|
1460
|
+
if tag == "*all*":
|
|
1461
|
+
self.tag = self.ALL
|
|
1462
|
+
else:
|
|
1463
|
+
self.tag = []
|
|
1464
|
+
tag_disj = tag.split("|")
|
|
1465
|
+
for tag_conj in tag_disj:
|
|
1466
|
+
tag_list = tag_conj.split(None)
|
|
1467
|
+
self.tag.append(frozenset(tag_list))
|
|
1468
|
+
|
|
1469
|
+
def match(self, tag: Any) -> bool:
|
|
1470
|
+
"""
|
|
1471
|
+
Checks whether a given set of tags is subsumed.
|
|
1472
|
+
|
|
1473
|
+
Attributes:
|
|
1474
|
+
tag (Any): a set of tags.
|
|
1475
|
+
"""
|
|
1476
|
+
if self.tag == self.ALL:
|
|
1477
|
+
return True
|
|
1478
|
+
for conj in self.tag:
|
|
1479
|
+
if conj.issubset(tag):
|
|
1480
|
+
return True
|
|
1481
|
+
return False
|