potassco-benchmark-tool 2.1.1__py3-none-any.whl → 2.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -35,37 +35,41 @@ def btool_conv(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
35
35
  def run(args: Any) -> None:
36
36
  p = ResParser()
37
37
  if args.resultfile:
38
- with open(args.resultfile, encoding="utf-8") as in_file:
39
- res = p.parse(in_file)
38
+ try:
39
+ with open(args.resultfile, encoding="utf-8") as in_file:
40
+ res = p.parse(in_file)
41
+ except FileNotFoundError:
42
+ sys.stderr.write(f"*** ERROR: Result file '{args.resultfile}' not found.\n")
43
+ sys.exit(1)
40
44
  else:
41
45
  res = p.parse(sys.stdin)
42
46
  export: bool = args.export
43
47
  if args.jupyter_notebook is not None:
44
48
  export = True
45
- ex_file = res.gen_office(args.output, args.projects, args.measures, export)
49
+ ex_file = res.gen_spreadsheet(args.output, args.projects, args.measures, export, args.max_col_width)
46
50
  if args.jupyter_notebook is not None and ex_file is not None:
47
51
  gen_ipynb(ex_file, args.jupyter_notebook)
48
52
 
49
53
  def parse_set(s: str) -> set[str]:
50
54
  return set(filter(None, (x.strip() for x in s.split(","))))
51
55
 
52
- def parse_measures(s: str) -> list[tuple[str, str | None]]:
53
- measures = []
56
+ def parse_measures(s: str) -> dict[str, Any]:
57
+ measures = {}
54
58
  if s != "all": # empty list = select all measures
55
59
  for x in s.split(","):
56
60
  parts = x.split(":", 1)
57
61
  if not parts[0]:
58
62
  raise ArgumentTypeError(f"Invalid measure: '{x}'")
59
- measures.append((parts[0], parts[1] if len(parts) > 1 else None))
63
+ measures[parts[0]] = parts[1] if len(parts) > 1 else None
60
64
  return measures
61
65
 
62
66
  conv_parser = subparsers.add_parser(
63
67
  "conv",
64
- help="Convert results to ODS or other formats",
68
+ help="Convert results to XLSX or other formats",
65
69
  description=dedent(
66
70
  """\
67
- Convert previously collected benchmark results to ODS file
68
- and optionally generate Jupyter notebook.
71
+ Convert previously collected benchmark results to XLSX
72
+ spreadsheet and optionally generate Jupyter notebook.
69
73
  """
70
74
  ),
71
75
  formatter_class=formatter,
@@ -76,7 +80,20 @@ def btool_conv(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
76
80
 
77
81
  conv_parser.add_argument("resultfile", nargs="?", type=str, help="Result file (default: stdin)")
78
82
  conv_parser.add_argument(
79
- "-o", "--output", default="out.ods", help="Name of generated ods file (default: out.ods)", metavar="<file.ods>"
83
+ "-o",
84
+ "--output",
85
+ type=str,
86
+ default="out.xlsx",
87
+ help="Name of generated xlsx file (default: %(default)s)",
88
+ metavar="<file.xlsx>",
89
+ )
90
+ conv_parser.add_argument(
91
+ "--max-col-width",
92
+ type=int,
93
+ default=300,
94
+ help="Maximum column width for spreadsheet (default: %(default)d)",
95
+ metavar="<n>",
96
+ dest="max_col_width",
80
97
  )
81
98
  conv_parser.add_argument(
82
99
  "-p",
@@ -96,7 +113,7 @@ def btool_conv(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
96
113
  Measures to display
97
114
  Comma separated list of form 'name[:{t,to,-}]' (optional argument determines coloring)
98
115
  Use '-m all' to display all measures
99
- (default: time:t,timeout:to)
116
+ (default: %(default)s)
100
117
  """
101
118
  ),
102
119
  metavar="<measure[:{t,to,-}][,measure[:{t,to,-}],...]>",
@@ -105,13 +122,12 @@ def btool_conv(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
105
122
  "-e",
106
123
  "--export",
107
124
  action="store_true",
108
- help="Export instance data to parquet file (same name as ods file)",
125
+ help="Export instance data to parquet file (same name as .xlsx file)",
109
126
  )
110
127
  conv_parser.add_argument(
111
128
  "-j",
112
129
  "--jupyter-notebook",
113
130
  type=str,
114
- nargs="?",
115
131
  help=dedent(
116
132
  """\
117
133
  Name of generated .ipynb file
@@ -146,7 +162,7 @@ def btool_eval(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
146
162
  type=int,
147
163
  default=2,
148
164
  dest="par_x",
149
- help="Add penalized-average-runtime score factor as measure (default: 2)",
165
+ help="Add penalized-average-runtime score factor as measure (default: %(default)d)",
150
166
  metavar="<n>",
151
167
  )
152
168
  eval_parser.set_defaults(func=run)
@@ -160,7 +176,7 @@ def btool_gen(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
160
176
  def run(args: Any) -> None:
161
177
  p = RunParser()
162
178
  run = p.parse(args.runscript)
163
- run.gen_scripts(args.exclude)
179
+ run.gen_scripts(args.exclude, args.force)
164
180
 
165
181
  gen_parser = subparsers.add_parser(
166
182
  "gen",
@@ -170,6 +186,12 @@ def btool_gen(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
170
186
  )
171
187
  gen_parser.add_argument("runscript", type=str, help="Runscript file", metavar="<runscript.xml>")
172
188
  gen_parser.add_argument("-e", "--exclude", action="store_true", help="Exclude finished runs")
189
+ gen_parser.add_argument(
190
+ "-f",
191
+ "--force",
192
+ action="store_true",
193
+ help="Overwrite existing files",
194
+ )
173
195
  gen_parser.set_defaults(func=run)
174
196
 
175
197
 
@@ -178,7 +200,7 @@ def btool_init(subparsers: "_SubParsersAction[ArgumentParser]") -> None: # noco
178
200
  Register init subcommand.
179
201
  """
180
202
 
181
- def copy_dir(src_dir: str, dst_dir: str, overwrite: bool = False) -> None:
203
+ def copy_dir(src_dir: str, dst_dir: str, force: bool = False) -> None:
182
204
  """
183
205
  Copy directory src_dir to dst_dir.
184
206
  By default existing files are not overwritten.
@@ -199,33 +221,36 @@ def btool_init(subparsers: "_SubParsersAction[ArgumentParser]") -> None: # noco
199
221
  if not os.path.isdir(target_dir):
200
222
  os.mkdir(target_dir)
201
223
  else:
202
- sys.stderr.write(f"INFO: Directory already exists:\t{target_dir}\n")
224
+ sys.stderr.write(f"*** INFO: Directory already exists:\t{target_dir}\n")
203
225
  # Files
204
226
  for file in files:
205
227
  source_name = os.path.join(root, file)
206
228
  target_name = os.path.join(target_root, file)
207
229
  if os.path.isfile(target_name):
208
- sys.stderr.write(f"INFO: File already exists:\t{target_name}\n")
209
- if not overwrite:
230
+ sys.stderr.write(f"*** INFO: File already exists:\t{target_name}\n")
231
+ if not force:
210
232
  continue
211
233
  shutil.copy(source_name, target_name)
212
234
 
213
235
  def run(args: Any) -> None:
214
236
  src_dir = os.path.join(os.path.dirname(__file__), "init")
215
237
  if not os.path.isdir(src_dir):
216
- raise SystemExit(f"Resources missing: '{src_dir}' does not exist.\nTry reinstalling the package.")
238
+ sys.stderr.write(
239
+ f"*** ERROR: Resources missing: '{src_dir}' does not exist.\nTry reinstalling the package.\n"
240
+ )
241
+ sys.exit(1)
217
242
  cwd = os.getcwd()
218
- copy_dir(src_dir, cwd, args.overwrite)
243
+ copy_dir(src_dir, cwd, args.force)
219
244
  rp_dir = os.path.join(cwd, "resultparsers")
220
245
  if not os.path.isdir(rp_dir):
221
246
  os.mkdir(rp_dir)
222
247
  else:
223
- sys.stderr.write(f"INFO: Directory already exists:\t{rp_dir}\n")
248
+ sys.stderr.write(f"*** INFO: Directory already exists:\t{rp_dir}\n")
224
249
  if args.resultparser_template:
225
250
  rp_tmp = os.path.join(rp_dir, "rp_tmp.py")
226
251
  if os.path.isfile(rp_tmp):
227
- sys.stderr.write(f"INFO: File already exists:\t{rp_tmp}\n")
228
- if not args.overwrite:
252
+ sys.stderr.write(f"*** INFO: File already exists:\t{rp_tmp}\n")
253
+ if not args.force:
229
254
  return
230
255
  shutil.copy(os.path.join(os.path.dirname(__file__), "resultparser", "clasp.py"), rp_tmp)
231
256
 
@@ -236,14 +261,14 @@ def btool_init(subparsers: "_SubParsersAction[ArgumentParser]") -> None: # noco
236
261
  """\
237
262
  Initialize the benchmark environment with the necessary directory structure
238
263
  and example runscript and templates.
239
- By default existing files are not overwritten; use --overwrite to change this behavior.
264
+ By default existing files are not overwritten; use --force to change this behavior.
240
265
  """
241
266
  ),
242
267
  formatter_class=formatter,
243
268
  )
244
269
  parser.add_argument(
245
- "-o",
246
- "--overwrite",
270
+ "-f",
271
+ "--force",
247
272
  action="store_true",
248
273
  help="Overwrite existing files",
249
274
  )
@@ -270,9 +295,15 @@ def btool_run_dist(subparsers: "_SubParsersAction[ArgumentParser]") -> None: #
270
295
  return len([f for f in result.stdout.strip().splitlines() if f])
271
296
 
272
297
  def run(args: Any) -> None:
273
- pending = [
274
- f for f in os.listdir(args.folder) if os.path.isfile(os.path.join(args.folder, f)) and f.endswith(".dist")
275
- ]
298
+ try:
299
+ pending = [
300
+ f
301
+ for f in os.listdir(args.folder)
302
+ if os.path.isfile(os.path.join(args.folder, f)) and f.endswith(".dist")
303
+ ]
304
+ except FileNotFoundError:
305
+ sys.stderr.write(f"*** ERROR: Folder '{args.folder}' not found.\n")
306
+ sys.exit(1)
276
307
  print(f"Found {len(pending)} jobs to dispatch.")
277
308
  while pending:
278
309
  jobs = running_jobs(args.user)
@@ -311,17 +342,17 @@ def btool_run_dist(subparsers: "_SubParsersAction[ArgumentParser]") -> None: #
311
342
  parser.add_argument(
312
343
  "-j",
313
344
  "--jobs",
314
- help="Maximum number of jobs running at once (default: 100)",
315
345
  type=int,
316
346
  default=100,
347
+ help="Maximum number of jobs running at once (default: %(default)d)",
317
348
  metavar="<n>",
318
349
  )
319
350
  parser.add_argument(
320
351
  "-w",
321
352
  "--wait",
322
- help="Time to wait between checks in seconds (default: 1)",
323
353
  type=int,
324
354
  default=1,
355
+ help="Time to wait between checks in seconds (default: %(default)d)",
325
356
  metavar="<n>",
326
357
  )
327
358
  parser.set_defaults(func=run)
@@ -340,15 +371,22 @@ def btool_verify(subparsers: Any) -> None: # nocoverage
340
371
  for file in files:
341
372
  if file == "runsolver.watcher":
342
373
  watcher_path = os.path.join(root, file)
374
+ if os.path.getsize(watcher_path) == 0:
375
+ sys.stderr.write(f"*** WARNING: Empty watcher file: {watcher_path}\n")
376
+ continue
343
377
  with open(watcher_path, encoding="utf-8") as f:
344
378
  if "runlim error" in f.read():
345
379
  error_files.append(watcher_path)
380
+ elif file == "runsolver.solver":
381
+ solver_path = os.path.join(root, file)
382
+ if os.path.getsize(solver_path) == 0:
383
+ sys.stderr.write(f"*** WARNING: Empty solver file: {solver_path}\n")
346
384
  return error_files
347
385
 
348
386
  def run(args: Any) -> None:
349
387
  folder = args.folder
350
388
  if not os.path.isdir(folder):
351
- print("Error: provided folder doesn't exist", file=sys.stderr)
389
+ sys.stderr.write(f"*** ERROR: Folder '{folder}' not found.\n")
352
390
  sys.exit(1)
353
391
 
354
392
  if error_files := find_runlim_errors(folder):
@@ -16,12 +16,12 @@
16
16
 
17
17
  <config name="dist-generic" template="templates/seq-generic.sh"/>
18
18
  <system name="clingo" version="5.8.0" measures="clasp" config="dist-generic">
19
- <setting name="one-as" tag="one-as" cmdline="--stats 1" disttemplate="templates/single.dist" distopts="#SBATCH --hint=compute_bound"/>
19
+ <setting name="one-as" tag="one-as" cmdline="--stats 1" dist_template="templates/single.dist" dist_options="#SBATCH --hint=compute_bound"/>
20
20
  <setting name="all-as" tag="all-as" cmdline="--stats -q 0"/>
21
21
  </system>
22
22
 
23
23
  <seqjob name="seq-generic" timeout="120s" runs="1" memout="1000" parallel="8"/>
24
- <distjob name="dist-generic" timeout="120s" runs="1" memout="1000" script_mode="timeout" walltime="23h 59m 59s" cpt="4" partition="short"/>
24
+ <distjob name="dist-generic" timeout="120s" runs="1" memout="1000" template_options="--single" script_mode="timeout" walltime="23h 59m 59s" cpt="4" partition="short"/>
25
25
 
26
26
  <benchmark name="seq-suite">
27
27
  <folder path="benchmarks/clasp" group="true">
@@ -4,10 +4,10 @@
4
4
 
5
5
  <config name="dist-generic" template="templates/seq-generic.sh"/>
6
6
  <system name="clasp" version="3.4.0" measures="clasp" config="dist-generic">
7
- <setting name="one-as" tag="one-as" cmdline="--stats 1" disttemplate="templates/single.dist"/>
7
+ <setting name="one-as" tag="one-as" cmdline="--stats 1" dist_template="templates/single.dist"/>
8
8
  </system>
9
9
 
10
- <distjob name="dist-generic" timeout="1200s" runs="1" script_mode="timeout" walltime="23h 59m 59s" cpt="4"/>
10
+ <distjob name="dist-generic" timeout="1200s" runs="1" template_options="--single" script_mode="timeout" walltime="23h 59m 59s" cpt="4"/>
11
11
 
12
12
  <benchmark name="dist-suite">
13
13
  <folder path="benchmarks/clasp"/>
@@ -12,7 +12,7 @@
12
12
 
13
13
  <seqjob name="seq-gen" timeout="900s" runs="1" parallel="1"/>
14
14
 
15
- <distjob name="dist-gen" timeout="1200s" runs="1" script_mode="timeout" walltime="23h 59m 59s" cpt="4"/>
15
+ <distjob name="dist-gen" timeout="1200s" runs="1" template_options="--single" script_mode="timeout" walltime="23h 59m 59s" cpt="4"/>
16
16
 
17
17
  <benchmark name="no-pigeons">
18
18
  <folder path="benchmarks/clasp/">
@@ -1,12 +1,27 @@
1
1
  #!/bin/bash
2
2
  # https://github.com/arminbiere/runlim
3
3
 
4
+ CAT="{root}/programs/gcat.sh"
5
+
4
6
  cd "$(dirname $0)"
5
7
 
6
- [[ -e .finished ]] || "{run.root}/programs/runlim" \
7
- --space-limit={run.memout} \
8
- --output-file=runsolver.watcher \
9
- --real-time-limit={run.timeout} \
10
- "{run.root}/programs/{run.solver}" {run.args} {run.files} {run.encodings} > runsolver.solver
8
+ runner=( "{root}/programs/runlim" \
9
+ {options} \
10
+ --space-limit={memout} \
11
+ --output-file=runsolver.watcher \
12
+ --real-time-limit={timeout} \
13
+ "{root}/programs/{solver}" {args})
14
+
15
+ input=( {files} {encodings} )
16
+
17
+ if [[ ! -e .finished ]]; then
18
+ {{
19
+ if file -b --mime-type -L "${{input[@]}}" | grep -qv "text/"; then
20
+ "$CAT" "${{input[@]}}" | "${{runner[@]}}"
21
+ else
22
+ "${{runner[@]}}" "${{input[@]}}"
23
+ fi
24
+ }} > runsolver.solver
25
+ fi
11
26
 
12
27
  touch .finished
@@ -464,6 +464,8 @@ display(out_plot)
464
464
  nbf.v4.new_code_cell(plot_code),
465
465
  ]
466
466
  fname = file_name
467
+ if not fname.lower().endswith(".ipynb"):
468
+ fname += ".ipynb"
467
469
  nb.cells[1]["metadata"]["jp-MarkdownHeadingCollapsed"] = True
468
470
  nb.cells[3]["metadata"]["jp-MarkdownHeadingCollapsed"] = True
469
471
  # nb.cells[6]["metadata"]["jupyter"] = {"source_hidden": True}
@@ -8,7 +8,7 @@ from dataclasses import dataclass, field
8
8
  from pathlib import Path
9
9
  from typing import Any, Iterator, Optional
10
10
 
11
- from benchmarktool.result.ods_gen import ODSDoc
11
+ from benchmarktool.result.xlsx_gen import XLSXDoc
12
12
 
13
13
 
14
14
  class Result:
@@ -45,17 +45,25 @@ class Result:
45
45
  benchmarks.add(runspec.benchmark)
46
46
  return BenchmarkMerge(benchmarks)
47
47
 
48
- def gen_office(
49
- self, out: str, sel_projects: set[str], measures: list[tuple[str, Any]], export: bool = False
48
+ # pylint: disable=too-many-positional-arguments
49
+ def gen_spreadsheet(
50
+ self,
51
+ out: str,
52
+ sel_projects: set[str],
53
+ measures: dict[str, Any],
54
+ export: bool = False,
55
+ max_col_width: int = 300,
50
56
  ) -> Optional[str]:
51
57
  """
52
- Prints the current result in open office spreadsheet format.
58
+ Prints the current result in Microsoft Excel Spreadsheet format (XLSX).
53
59
  Returns the name of the export file if values are exported.
54
60
 
55
61
  Attributes:
56
62
  out (str): The output file to write to.
57
63
  sel_projects (set[str]): The selected projects ("" for all).
58
- measures (list[tuple[str, Any]]): The measures to extract.
64
+ measures (dict[str, Any]): The measures to extract.
65
+ export (bool): Whether to export the raw values as parquet file.
66
+ max_col_width (int): The maximum column width for spreadsheet.
59
67
  """
60
68
  projects: list[Project] = []
61
69
  for project in self.projects.values():
@@ -63,16 +71,18 @@ class Result:
63
71
  projects.append(project)
64
72
  benchmark_merge = self.merge(projects)
65
73
 
66
- sheet = ODSDoc(benchmark_merge, measures)
74
+ doc = XLSXDoc(benchmark_merge, measures, max_col_width)
67
75
  for project in projects:
68
76
  for runspec in project:
69
- sheet.add_runspec(runspec)
70
- sheet.finish()
71
- sheet.make_ods(out)
77
+ doc.add_runspec(runspec)
78
+ doc.finish()
79
+ if not out.lower().endswith(".xlsx"):
80
+ out += ".xlsx"
81
+ doc.make_xlsx(out)
72
82
 
73
83
  if export:
74
84
  # as_posix() for windows compatibility
75
- ex_file = Path(out).absolute().as_posix().replace(".ods", ".parquet")
85
+ ex_file = Path(out).absolute().as_posix().replace(".xlsx", ".parquet")
76
86
  timeout_meta = {}
77
87
  for project in projects:
78
88
  for runspec in project.runspecs:
@@ -84,7 +94,7 @@ class Result:
84
94
  + "/"
85
95
  + runspec.setting.name
86
96
  ] = [self.jobs[project.job].timeout]
87
- sheet.inst_sheet.export_values(ex_file, timeout_meta)
97
+ doc.inst_sheet.export_values(ex_file, timeout_meta)
88
98
  return ex_file
89
99
  return None
90
100
 
@@ -397,7 +407,7 @@ class ClassResult:
397
407
  yield from self.instresults
398
408
 
399
409
 
400
- @dataclass(order=True, frozen=True)
410
+ @dataclass(order=True, frozen=True, eq=True)
401
411
  class InstanceResult:
402
412
  """
403
413
  Represents the result of an individual instance (with possibly multiple runs).
@@ -432,7 +442,7 @@ class Run:
432
442
  number: int
433
443
  measures: dict[str, tuple[str, str]] = field(default_factory=dict, compare=False)
434
444
 
435
- def iter(self, measures: list[tuple[str, Any]]) -> Iterator[tuple[str, str, str]]:
445
+ def iter(self, measures: dict[str, Any]) -> Iterator[tuple[str, str, str]]:
436
446
  """
437
447
  Creates an iterator over all measures captured during the run.
438
448
  Measures can be filter by giving a string set of measure names.
@@ -440,13 +450,13 @@ class Run:
440
450
  will be returned.
441
451
 
442
452
  Attributes:
443
- measures (list[tuple[str, Any]]): Selected measures.
453
+ measures (dict[str, Any]): Selected measures.
444
454
  """
445
- if len(measures) == 0:
455
+ if len(measures.keys()) == 0:
446
456
  for name in sorted(self.measures.keys()):
447
457
  yield name, self.measures[name][0], self.measures[name][1]
448
458
  else:
449
- for name, _ in measures:
459
+ for name in measures.keys():
450
460
  if name in self.measures:
451
461
  yield name, self.measures[name][0], self.measures[name][1]
452
462
  else: