potassco-benchmark-tool 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
File without changes
@@ -0,0 +1,417 @@
1
+ """
2
+ Entry points for different components.
3
+ """
4
+
5
+ import importlib.metadata
6
+ import os
7
+ import shutil
8
+ import subprocess
9
+ import sys
10
+ import time
11
+ from argparse import ArgumentParser, ArgumentTypeError, RawTextHelpFormatter, _SubParsersAction
12
+ from textwrap import dedent
13
+ from typing import Any
14
+
15
+ from benchmarktool.result.ipynb_gen import gen_ipynb
16
+ from benchmarktool.result.parser import Parser as ResParser
17
+ from benchmarktool.runscript.parser import Parser as RunParser
18
+
19
+
20
+ def formatter(prog: str) -> RawTextHelpFormatter:
21
+ """
22
+ Custom formatter for argparse help messages.
23
+
24
+ Attributes:
25
+ prog (str): The program name.
26
+ """
27
+ return RawTextHelpFormatter(prog, max_help_position=15, width=100)
28
+
29
+
30
+ def btool_conv(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
31
+ """
32
+ Register conv subcommand.
33
+ """
34
+
35
+ def run(args: Any) -> None:
36
+ p = ResParser()
37
+ if args.resultfile:
38
+ with open(args.resultfile, encoding="utf-8") as in_file:
39
+ res = p.parse(in_file)
40
+ else:
41
+ res = p.parse(sys.stdin)
42
+ export: bool = args.export
43
+ if args.jupyter_notebook is not None:
44
+ export = True
45
+ ex_file = res.gen_office(args.output, args.projects, args.measures, export)
46
+ if args.jupyter_notebook is not None and ex_file is not None:
47
+ gen_ipynb(ex_file, args.jupyter_notebook)
48
+
49
+ def parse_set(s: str) -> set[str]:
50
+ return set(filter(None, (x.strip() for x in s.split(","))))
51
+
52
+ def parse_measures(s: str) -> list[tuple[str, str | None]]:
53
+ measures = []
54
+ if s != "all": # empty list = select all measures
55
+ for x in s.split(","):
56
+ parts = x.split(":", 1)
57
+ if not parts[0]:
58
+ raise ArgumentTypeError(f"Invalid measure: '{x}'")
59
+ measures.append((parts[0], parts[1] if len(parts) > 1 else None))
60
+ return measures
61
+
62
+ conv_parser = subparsers.add_parser(
63
+ "conv",
64
+ help="Convert results to ODS or other formats",
65
+ description=dedent(
66
+ """\
67
+ Convert previously collected benchmark results to ODS file
68
+ and optionally generate Jupyter notebook.
69
+ """
70
+ ),
71
+ formatter_class=formatter,
72
+ )
73
+
74
+ conv_parser.register("type", "project_set", parse_set)
75
+ conv_parser.register("type", "measure_list", parse_measures)
76
+
77
+ conv_parser.add_argument("resultfile", nargs="?", type=str, help="Result file (default: stdin)")
78
+ conv_parser.add_argument(
79
+ "-o", "--output", default="out.ods", help="Name of generated ods file (default: out.ods)", metavar="<file.ods>"
80
+ )
81
+ conv_parser.add_argument(
82
+ "-p",
83
+ "--projects",
84
+ type="project_set",
85
+ default=set(),
86
+ help="Projects to display (comma separated)\nBy default all projects are shown",
87
+ metavar="<project[,project,...]>",
88
+ )
89
+ conv_parser.add_argument(
90
+ "-m",
91
+ "--measures",
92
+ type="measure_list",
93
+ default="time:t,timeout:to",
94
+ help=dedent(
95
+ """\
96
+ Measures to display
97
+ Comma separated list of form 'name[:{t,to,-}]' (optional argument determines coloring)
98
+ Use '-m all' to display all measures
99
+ (default: time:t,timeout:to)
100
+ """
101
+ ),
102
+ metavar="<measure[:{t,to,-}][,measure[:{t,to,-}],...]>",
103
+ )
104
+ conv_parser.add_argument(
105
+ "-e",
106
+ "--export",
107
+ action="store_true",
108
+ help="Export instance data to parquet file (same name as ods file)",
109
+ )
110
+ conv_parser.add_argument(
111
+ "-j",
112
+ "--jupyter-notebook",
113
+ type=str,
114
+ nargs="?",
115
+ help=dedent(
116
+ """\
117
+ Name of generated .ipynb file
118
+ Can be started using 'jupyter notebook <notebook>'
119
+ All dependencies for the notebook can be installed using 'pip install .[plot]'
120
+ """
121
+ ),
122
+ metavar="<file.ipynb>",
123
+ )
124
+ conv_parser.set_defaults(func=run)
125
+
126
+
127
+ def btool_eval(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
128
+ """
129
+ Register eval subcommand.
130
+ """
131
+
132
+ def run(args: Any) -> None:
133
+ p = RunParser()
134
+ run = p.parse(args.runscript)
135
+ run.eval_results(sys.stdout, args.par_x)
136
+
137
+ eval_parser = subparsers.add_parser(
138
+ "eval",
139
+ help="Collect results",
140
+ description="Collect benchmark results belonging to a runscript.",
141
+ formatter_class=formatter,
142
+ )
143
+ eval_parser.add_argument("runscript", type=str, help="Runscript file", metavar="<runscript.xml>")
144
+ eval_parser.add_argument(
145
+ "--par-x",
146
+ type=int,
147
+ default=2,
148
+ dest="par_x",
149
+ help="Add penalized-average-runtime score factor as measure (default: 2)",
150
+ metavar="<n>",
151
+ )
152
+ eval_parser.set_defaults(func=run)
153
+
154
+
155
+ def btool_gen(subparsers: "_SubParsersAction[ArgumentParser]") -> None:
156
+ """
157
+ Register gen subcommand.
158
+ """
159
+
160
+ def run(args: Any) -> None:
161
+ p = RunParser()
162
+ run = p.parse(args.runscript)
163
+ run.gen_scripts(args.exclude)
164
+
165
+ gen_parser = subparsers.add_parser(
166
+ "gen",
167
+ help="Generate scripts from runscript",
168
+ description="Generate benchmark scripts defined by a runscript.",
169
+ formatter_class=formatter,
170
+ )
171
+ gen_parser.add_argument("runscript", type=str, help="Runscript file", metavar="<runscript.xml>")
172
+ gen_parser.add_argument("-e", "--exclude", action="store_true", help="Exclude finished runs")
173
+ gen_parser.set_defaults(func=run)
174
+
175
+
176
+ def btool_init(subparsers: "_SubParsersAction[ArgumentParser]") -> None: # nocoverage
177
+ """
178
+ Register init subcommand.
179
+ """
180
+
181
+ def copy_dir(src_dir: str, dst_dir: str, overwrite: bool = False) -> None:
182
+ """
183
+ Copy directory src_dir to dst_dir.
184
+ By default existing files are not overwritten.
185
+
186
+ Attributes:
187
+ src_dir (str): Source directory path.
188
+ dst_dir (str): Destination directory path.
189
+ overwrite (bool): Whether to overwrite existing files.
190
+ """
191
+ if not os.path.isdir(src_dir) or not os.path.isdir(dst_dir):
192
+ raise SystemExit("Source and target must be directories.")
193
+ for root, dirs, files in os.walk(src_dir):
194
+ rel_path = os.path.relpath(root, src_dir)
195
+ target_root = os.path.join(dst_dir, rel_path)
196
+ # Directories
197
+ for d in dirs:
198
+ target_dir = os.path.join(target_root, d)
199
+ if not os.path.isdir(target_dir):
200
+ os.mkdir(target_dir)
201
+ else:
202
+ sys.stderr.write(f"INFO: Directory already exists:\t{target_dir}\n")
203
+ # Files
204
+ for file in files:
205
+ source_name = os.path.join(root, file)
206
+ target_name = os.path.join(target_root, file)
207
+ if os.path.isfile(target_name):
208
+ sys.stderr.write(f"INFO: File already exists:\t{target_name}\n")
209
+ if not overwrite:
210
+ continue
211
+ shutil.copy(source_name, target_name)
212
+
213
+ def run(args: Any) -> None:
214
+ src_dir = os.path.join(os.path.dirname(__file__), "init")
215
+ if not os.path.isdir(src_dir):
216
+ raise SystemExit(f"Resources missing: '{src_dir}' does not exist.\nTry reinstalling the package.")
217
+ cwd = os.getcwd()
218
+ copy_dir(src_dir, cwd, args.overwrite)
219
+ rp_dir = os.path.join(cwd, "resultparsers")
220
+ if not os.path.isdir(rp_dir):
221
+ os.mkdir(rp_dir)
222
+ else:
223
+ sys.stderr.write(f"INFO: Directory already exists:\t{rp_dir}\n")
224
+ if args.resultparser_template:
225
+ rp_tmp = os.path.join(rp_dir, "rp_tmp.py")
226
+ if os.path.isfile(rp_tmp):
227
+ sys.stderr.write(f"INFO: File already exists:\t{rp_tmp}\n")
228
+ if not args.overwrite:
229
+ return
230
+ shutil.copy(os.path.join(os.path.dirname(__file__), "resultparser", "clasp.py"), rp_tmp)
231
+
232
+ parser = subparsers.add_parser(
233
+ "init",
234
+ help="Initialize benchmark environment",
235
+ description=dedent(
236
+ """\
237
+ Initialize the benchmark environment with the necessary directory structure
238
+ and example runscript and templates.
239
+ By default existing files are not overwritten; use --overwrite to change this behavior.
240
+ """
241
+ ),
242
+ formatter_class=formatter,
243
+ )
244
+ parser.add_argument(
245
+ "-o",
246
+ "--overwrite",
247
+ action="store_true",
248
+ help="Overwrite existing files",
249
+ )
250
+ parser.add_argument(
251
+ "--resultparser-template",
252
+ action="store_true",
253
+ help="Also create a copy of the default 'clasp' resultparser as 'rp_tmp.py'",
254
+ )
255
+ parser.set_defaults(func=run)
256
+
257
+
258
+ def btool_run_dist(subparsers: "_SubParsersAction[ArgumentParser]") -> None: # nocoverage
259
+ """
260
+ Run distributed jobs from a folder.
261
+ """
262
+
263
+ def running_jobs(user: str) -> int:
264
+ result = subprocess.run(
265
+ ["squeue", "-u", user, "-h", "-o", "%j"],
266
+ stdout=subprocess.PIPE,
267
+ text=True,
268
+ check=True,
269
+ )
270
+ return len([f for f in result.stdout.strip().splitlines() if f])
271
+
272
+ def run(args: Any) -> None:
273
+ pending = [
274
+ f for f in os.listdir(args.folder) if os.path.isfile(os.path.join(args.folder, f)) and f.endswith(".dist")
275
+ ]
276
+ print(f"Found {len(pending)} jobs to dispatch.")
277
+ while pending:
278
+ jobs = running_jobs(args.user)
279
+ while jobs < args.jobs and pending:
280
+ job = pending[0]
281
+ res = subprocess.run(["sbatch", job], cwd=args.folder, check=False)
282
+ if res.returncode != 0:
283
+ print(f"Failed to submit {job}, try again later.")
284
+ break
285
+ print(f"Submitted {job}")
286
+ pending.pop(0)
287
+ jobs += 1
288
+ time.sleep(args.wait)
289
+ print("All jobs submitted.")
290
+
291
+ parser = subparsers.add_parser(
292
+ "run-dist",
293
+ help="Run distributed jobs",
294
+ description="Dispatch all distributed jobs (*.dist files) in a given folder.",
295
+ formatter_class=formatter,
296
+ )
297
+ parser.add_argument(
298
+ "folder",
299
+ help="Folder with *.dist files to dispatch",
300
+ type=str,
301
+ metavar="<folder>",
302
+ )
303
+ parser.add_argument(
304
+ "-u",
305
+ "--user",
306
+ type=str,
307
+ default=os.environ.get("USER", "unknown"),
308
+ help="Username for job querying (default: current user)",
309
+ metavar="<user>",
310
+ )
311
+ parser.add_argument(
312
+ "-j",
313
+ "--jobs",
314
+ help="Maximum number of jobs running at once (default: 100)",
315
+ type=int,
316
+ default=100,
317
+ metavar="<n>",
318
+ )
319
+ parser.add_argument(
320
+ "-w",
321
+ "--wait",
322
+ help="Time to wait between checks in seconds (default: 1)",
323
+ type=int,
324
+ default=1,
325
+ metavar="<n>",
326
+ )
327
+ parser.set_defaults(func=run)
328
+
329
+
330
+ def btool_verify(subparsers: Any) -> None: # nocoverage
331
+ """
332
+ Register verify subcommand.
333
+
334
+ Checks benchmark results for runlim errors and re-runs such instances.
335
+ """
336
+
337
+ def find_runlim_errors(folder: str) -> list[str]:
338
+ error_files = []
339
+ for root, _, files in os.walk(folder):
340
+ for file in files:
341
+ if file == "runsolver.watcher":
342
+ watcher_path = os.path.join(root, file)
343
+ with open(watcher_path, encoding="utf-8") as f:
344
+ if "runlim error" in f.read():
345
+ error_files.append(watcher_path)
346
+ return error_files
347
+
348
+ def run(args: Any) -> None:
349
+ folder = args.folder
350
+ if not os.path.isdir(folder):
351
+ print("Error: provided folder doesn't exist", file=sys.stderr)
352
+ sys.exit(1)
353
+
354
+ if error_files := find_runlim_errors(folder):
355
+ for watcher_file in error_files:
356
+ finished_file = os.path.join(os.path.dirname(watcher_file), ".finished")
357
+ if os.path.isfile(finished_file):
358
+ os.remove(finished_file)
359
+ print(f"Removed: {finished_file}")
360
+ else:
361
+ print(f"Pending: {os.path.dirname(finished_file)}")
362
+
363
+ else:
364
+ print("No runlim errors found")
365
+
366
+ parser = subparsers.add_parser(
367
+ "verify",
368
+ help="Check for runlim errors",
369
+ description=dedent(
370
+ """\
371
+ Checks benchmark results in the given folder for runlim errors
372
+ and removes '.finished' files for affected instances.
373
+ Use 'btool gen -e <runscript.xml>' to re-generate new start scripts
374
+ which exclude finished/valid runs.
375
+ """
376
+ ),
377
+ formatter_class=formatter,
378
+ )
379
+ parser.add_argument("folder", type=str, help="Folder containing the benchmark results", metavar="<folder>")
380
+ parser.set_defaults(func=run)
381
+
382
+
383
+ def get_parser() -> ArgumentParser:
384
+ """
385
+ Get parser.
386
+ """
387
+ parser = ArgumentParser(
388
+ prog="btool",
389
+ description="Benchmark Tool CLI",
390
+ formatter_class=formatter,
391
+ )
392
+ parser.add_argument(
393
+ "-v",
394
+ "--version",
395
+ action="version",
396
+ version=f"potassco-benchmark-tool {importlib.metadata.version('potassco-benchmark-tool')}",
397
+ )
398
+ subparsers = parser.add_subparsers(dest="command", required=True)
399
+
400
+ btool_conv(subparsers)
401
+ btool_eval(subparsers)
402
+ btool_gen(subparsers)
403
+ btool_init(subparsers)
404
+ btool_run_dist(subparsers)
405
+ btool_verify(subparsers)
406
+
407
+ return parser
408
+
409
+
410
+ def main() -> None: # nocoverage
411
+ """
412
+ Entry point for benchmark tool CLI.
413
+ """
414
+ parser = get_parser()
415
+
416
+ args = parser.parse_args()
417
+ args.func(args)
@@ -0,0 +1,24 @@
1
+ #!/bin/bash
2
+
3
+ function bat()
4
+ {
5
+ for x in "$@"; do
6
+ type=$(file -b --mime-type -L "${x}")
7
+ case ${type} in
8
+ application/*bzip2*)
9
+ bzcat "${x}"
10
+ ;;
11
+ application/*gzip*)
12
+ zcat "${x}"
13
+ ;;
14
+ application/*xz*)
15
+ xzcat "${x}"
16
+ ;;
17
+ *)
18
+ cat "${x}"
19
+ ;;
20
+ esac
21
+ done
22
+ }
23
+
24
+ bat "$@"
@@ -0,0 +1,49 @@
1
+ <runscript output="outputdist">
2
+
3
+ <machine name="houat" cpu="8xE5520@2.27GHz" memory="24GB"/>
4
+ <machine name="hpc" cpu="24x8xE5520@2.27GHz" memory="24GB"/>
5
+
6
+ <config name="seq-generic" template="templates/seq-generic.sh"/>
7
+ <system name="clasp" version="3.4.0" measures="clasp" config="seq-generic" cmdline="--stats">
8
+ <setting name="default" tag="seq" cmdline="1"/>
9
+ <setting name="vsids" tag="seq" cmdline="--heu=vsids 1"/>
10
+ <setting name="progress" tag="seq" cmdline="--save-progress 1"/>
11
+ <setting name="estimate" tag="seq" cmdline="--estimate 1"/>
12
+ <setting name="recstr" tag="seq" cmdline="--recursive-str 1"/>
13
+ <setting name="nodel" tag="seq" cmdline="--del=no 1"/>
14
+ <setting name="norest" tag="seq" cmdline="--restarts=no 1"/>
15
+ </system>
16
+
17
+ <config name="dist-generic" template="templates/seq-generic.sh"/>
18
+ <system name="clingo" version="5.8.0" measures="clasp" config="dist-generic">
19
+ <setting name="one-as" tag="one-as" cmdline="--stats 1" disttemplate="templates/single.dist" distopts="#SBATCH --hint=compute_bound"/>
20
+ <setting name="all-as" tag="all-as" cmdline="--stats -q 0"/>
21
+ </system>
22
+
23
+ <seqjob name="seq-generic" timeout="120s" runs="1" memout="1000" parallel="8"/>
24
+ <distjob name="dist-generic" timeout="120s" runs="1" memout="1000" script_mode="timeout" walltime="23h 59m 59s" cpt="4" partition="short"/>
25
+
26
+ <benchmark name="seq-suite">
27
+ <folder path="benchmarks/clasp" group="true">
28
+ <ignore prefix="pigeons"/>
29
+ </folder>
30
+ <files path="benchmarks/clasp">
31
+ <add file="pigeons/pigeonhole10-unsat.lp"/>
32
+ <add file="pigeons/pigeonhole11-unsat.lp"/>
33
+ </files>
34
+ </benchmark>
35
+ <benchmark name="dist-suite">
36
+ <folder path="benchmarks/clasp"/>
37
+ </benchmark>
38
+
39
+ <project name="clasp-seq" job="seq-generic">
40
+ <runtag machine="houat" benchmark="seq-suite" tag="seq"/>
41
+ </project>
42
+ <project name="clingo-all-as" job="dist-generic">
43
+ <runtag machine="houat" benchmark="dist-suite" tag="par one-as"/>
44
+ </project>
45
+ <project name="clingo-one-as" job="dist-generic">
46
+ <runspec machine="hpc" benchmark="dist-suite" system="clingo" version="5.8.0" setting="one-as"/>
47
+ </project>
48
+
49
+ </runscript>
@@ -0,0 +1,20 @@
1
+ <runscript output="output">
2
+
3
+ <machine name="hpc" cpu="24x8xE5520@2.27GHz" memory="24GB"/>
4
+
5
+ <config name="dist-generic" template="templates/seq-generic.sh"/>
6
+ <system name="clasp" version="3.4.0" measures="clasp" config="dist-generic">
7
+ <setting name="one-as" tag="one-as" cmdline="--stats 1" disttemplate="templates/single.dist"/>
8
+ </system>
9
+
10
+ <distjob name="dist-generic" timeout="1200s" runs="1" script_mode="timeout" walltime="23h 59m 59s" cpt="4"/>
11
+
12
+ <benchmark name="dist-suite">
13
+ <folder path="benchmarks/clasp"/>
14
+ </benchmark>
15
+
16
+ <project name="clasp-one-as" job="dist-generic">
17
+ <runtag machine="hpc" benchmark="dist-suite" tag="*all*"/>
18
+ </project>
19
+
20
+ </runscript>
@@ -0,0 +1,31 @@
1
+ <runscript output="output-folder">
2
+
3
+ <machine name="hpc" cpu="24x8xE5520@2.27GHz" memory="24GB"/>
4
+
5
+ <config name="seq-generic" template="templates/seq-generic.sh"/>
6
+
7
+ <system name="clasp" version="3.4.0" measures="clasp" config="seq-generic">
8
+
9
+ <setting name="setting-1" cmdline="--stats --quiet=1,0" tag="basic" />
10
+
11
+ </system>
12
+
13
+ <seqjob name="seq-gen" timeout="900s" runs="1" parallel="1"/>
14
+
15
+ <distjob name="dist-gen" timeout="1200s" runs="1" script_mode="timeout" walltime="23h 59m 59s" cpt="4"/>
16
+
17
+ <benchmark name="no-pigeons">
18
+ <folder path="benchmarks/clasp/">
19
+ <ignore prefix="pigeons"/>
20
+ </folder>
21
+ </benchmark>
22
+
23
+ <project name="clasp-seq-job" job="seq-gen">
24
+ <runtag machine="hpc" benchmark="no-pigeons" tag="basic"/>
25
+ </project>
26
+
27
+ <project name="clasp-dist-job" job="dist-gen">
28
+ <runtag machine="hpc" benchmark="no-pigeons" tag="basic"/>
29
+ </project>
30
+
31
+ </runscript>
@@ -0,0 +1,27 @@
1
+ <runscript output="output">
2
+
3
+ <machine name="houat" cpu="8xE5520@2.27GHz" memory="24GB"/>
4
+
5
+ <config name="seq-generic" template="templates/seq-generic.sh"/>
6
+ <system name="clasp" version="3.4.0" measures="clasp" config="seq-generic" cmdline="--stats">
7
+ <setting name="default" cmdline="1" tag="basic"/>
8
+ <setting name="vsids" cmdline="--heu=vsids 1"/>
9
+ </system>
10
+
11
+ <seqjob name="seq-generic" timeout="120s" runs="1" parallel="4"/>
12
+
13
+ <benchmark name="seq-suite">
14
+ <folder path="benchmarks/clasp">
15
+ <ignore prefix="pigeons"/>
16
+ </folder>
17
+ <files path="benchmarks/clasp">
18
+ <add file="pigeons/pigeonhole10-unsat.lp"/>
19
+ <add file="pigeons/pigeonhole11-unsat.lp"/>
20
+ </files>
21
+ </benchmark>
22
+
23
+ <project name="clasp-big" job="seq-generic">
24
+ <runtag machine="houat" benchmark="seq-suite" tag="*all*"/>
25
+ </project>
26
+
27
+ </runscript>
@@ -0,0 +1,27 @@
1
+ #!/bin/bash
2
+ # https://github.com/arminbiere/runlim
3
+
4
+ CAT="{run.root}/programs/gcat.sh"
5
+
6
+ cd "$(dirname $0)"
7
+
8
+ runner=( "{run.root}/programs/runlim" \
9
+ --single \
10
+ --space-limit={run.memout} \
11
+ --output-file=runsolver.watcher \
12
+ --real-time-limit={run.timeout} \
13
+ "{run.root}/programs/{run.solver}" {run.args})
14
+
15
+ input=( {run.files} {run.encodings} )
16
+
17
+ if [[ ! -e .finished ]]; then
18
+ {{
19
+ if file -b --mime-type -L "${{input[@]}}" | grep -qv "text/"; then
20
+ "$CAT" "${{input[@]}}" | "${{runner[@]}}"
21
+ else
22
+ "${{runner[@]}}" "${{input[@]}}"
23
+ fi
24
+ }} > runsolver.solver
25
+ fi
26
+
27
+ touch .finished
@@ -0,0 +1,14 @@
1
+ #!/bin/bash
2
+ # https://github.com/arminbiere/runlim
3
+
4
+ CAT="{run.root}/programs/gcat.sh"
5
+
6
+ cd "$(dirname $0)"
7
+
8
+ [[ -e .finished ]] || $CAT {run.files} {run.encodings} | "{run.root}/programs/runlim" \
9
+ --space-limit={run.memout} \
10
+ --output-file=runsolver.watcher \
11
+ --real-time-limit={run.timeout} \
12
+ "{run.root}/programs/{run.solver}" {run.args} > runsolver.solver
13
+
14
+ touch .finished
@@ -0,0 +1,12 @@
1
+ #!/bin/bash
2
+ # https://github.com/arminbiere/runlim
3
+
4
+ cd "$(dirname $0)"
5
+
6
+ [[ -e .finished ]] || "{run.root}/programs/runlim" \
7
+ --space-limit={run.memout} \
8
+ --output-file=runsolver.watcher \
9
+ --real-time-limit={run.timeout} \
10
+ "{run.root}/programs/{run.solver}" {run.args} {run.files} {run.encodings} > runsolver.solver
11
+
12
+ touch .finished
@@ -0,0 +1,25 @@
1
+ #!/bin/bash
2
+ #SBATCH --output=out.%j
3
+ #SBATCH --error=err.%j
4
+ #SBATCH --time={walltime} # walltime
5
+ #SBATCH --cpus-per-task={cpt} # number of processor cores (i.e. tasks)
6
+ #SBATCH --partition={partition}
7
+ {dist_options} # additional dist options (specified via runscript setting)
8
+
9
+ # Good Idea to stop operation on first error.
10
+ set -e
11
+
12
+ # Load environment modules for your application here.
13
+ source ~/.bashrc
14
+
15
+ # Run jobs
16
+ jobs="{jobs}"
17
+ echo running jobs@$(hostname)...
18
+ echo ""
19
+ echo "jobs:" $jobs
20
+ echo ""
21
+ for i in $jobs
22
+ do
23
+ echo "running " $i
24
+ $i
25
+ done
File without changes