potassco-benchmark-tool 2.1.1__tar.gz → 2.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (122) hide show
  1. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/PKG-INFO +24 -11
  2. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/README.md +22 -9
  3. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/examples/index.md +11 -8
  4. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/conv/index.md +17 -9
  5. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/gen/index.md +3 -0
  6. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/gen/runscript.md +25 -11
  7. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/gen/templates.md +9 -8
  8. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/index.md +10 -5
  9. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/init/index.md +3 -3
  10. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/workflow/index.md +5 -6
  11. potassco_benchmark_tool-2.1.1/docs/reference/api/result/ods_gen.md → potassco_benchmark_tool-2.2.0/docs/reference/api/result/xlsx_gen.md +2 -2
  12. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/mkdocs.yml +1 -1
  13. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/pyproject.toml +2 -2
  14. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/entry_points.py +71 -33
  15. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/init/runscripts/runscript-all.xml +2 -2
  16. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/init/runscripts/runscript-dist.xml +2 -2
  17. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/init/runscripts/runscript-example.xml +1 -1
  18. potassco_benchmark_tool-2.1.1/src/benchmarktool/init/templates/seq-generic-single.sh → potassco_benchmark_tool-2.2.0/src/benchmarktool/init/templates/seq-generic.sh +7 -7
  19. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/result/ipynb_gen.py +2 -0
  20. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/result/result.py +26 -16
  21. potassco_benchmark_tool-2.2.0/src/benchmarktool/result/xlsx_gen.py +935 -0
  22. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/resultparser/clasp.py +20 -9
  23. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/runscript/parser.py +235 -134
  24. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/runscript/runscript.py +190 -191
  25. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/tools.py +22 -2
  26. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/potassco_benchmark_tool.egg-info/PKG-INFO +24 -11
  27. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/potassco_benchmark_tool.egg-info/SOURCES.txt +9 -8
  28. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/potassco_benchmark_tool.egg-info/requires.txt +1 -1
  29. potassco_benchmark_tool-2.2.0/tests/ref/runscripts/invalid_runscript.xml +3 -0
  30. potassco_benchmark_tool-2.2.0/tests/ref/runscripts/invalid_xml.xml +5 -0
  31. {potassco_benchmark_tool-2.1.1/tests/ref → potassco_benchmark_tool-2.2.0/tests/ref/runscripts}/test_runscript.xml +6 -6
  32. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/test_eval.xml +2 -1
  33. potassco_benchmark_tool-2.2.0/tests/ref/test_template.sh +1 -0
  34. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/result/test_ipynb_gen.py +3 -3
  35. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/result/test_result_classes.py +18 -18
  36. potassco_benchmark_tool-2.2.0/tests/result/test_xlsx_gen.py +1099 -0
  37. potassco_benchmark_tool-2.2.0/tests/resultparser/__init__.py +0 -0
  38. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/resultparser/test_result_parser.py +30 -5
  39. potassco_benchmark_tool-2.2.0/tests/runscript/__init__.py +0 -0
  40. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/runscript/test_runscript_classes.py +161 -113
  41. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/runscript/test_runscript_parser.py +82 -8
  42. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/test_entry.py +43 -16
  43. potassco_benchmark_tool-2.1.1/src/benchmarktool/init/templates/seq-generic-zip.sh +0 -14
  44. potassco_benchmark_tool-2.1.1/src/benchmarktool/init/templates/seq-generic.sh +0 -12
  45. potassco_benchmark_tool-2.1.1/src/benchmarktool/result/ods_config.py +0 -42
  46. potassco_benchmark_tool-2.1.1/src/benchmarktool/result/ods_gen.py +0 -714
  47. potassco_benchmark_tool-2.1.1/tests/ref/test_template.sh +0 -1
  48. potassco_benchmark_tool-2.1.1/tests/result/test_ods_gen.py +0 -644
  49. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/.envrc +0 -0
  50. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/.github/workflows/deploy.yml +0 -0
  51. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/.github/workflows/doc.yml +0 -0
  52. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/.github/workflows/test.yml +0 -0
  53. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/.gitignore +0 -0
  54. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/.pre-commit-config.yaml +0 -0
  55. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/CONTRIBUTING.md +0 -0
  56. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/LICENSE +0 -0
  57. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/_custom/css/extra.css +0 -0
  58. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/_custom/css/mkdoclingo.css +0 -0
  59. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/_custom/css/mkdocstrings.css +0 -0
  60. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/_custom/javascripts/mathjax.js +0 -0
  61. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/_custom/overrides/.icons/potassco-full-logo.svg +0 -0
  62. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/_custom/overrides/.icons/potassco-logo.svg +0 -0
  63. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/_custom/overrides/partials/logo.html +0 -0
  64. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/assets/images/potassco-full-logo.svg +0 -0
  65. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/assets/images/potassco-logo-dark.svg +0 -0
  66. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/assets/images/potassco-logo.svg +0 -0
  67. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/community/CONTRIBUTING.md +0 -0
  68. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/community/help.md +0 -0
  69. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/community/index.md +0 -0
  70. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/eval/index.md +0 -0
  71. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/run_dist/index.md +0 -0
  72. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/getting_started/verify/index.md +0 -0
  73. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/index.md +0 -0
  74. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/entry_points.md +0 -0
  75. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/index.md +0 -0
  76. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/result/index.md +0 -0
  77. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/result/parser.md +0 -0
  78. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/result/result.md +0 -0
  79. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/resultparser.md +0 -0
  80. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/runscript/index.md +0 -0
  81. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/runscript/parser.md +0 -0
  82. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/runscript/runscript.md +0 -0
  83. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/api/tools.md +0 -0
  84. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/encoding_support.md +0 -0
  85. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/index.md +0 -0
  86. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/docs/reference/resultparser.md +0 -0
  87. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/mount-zip +0 -0
  88. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/noxfile.py +0 -0
  89. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/setup.cfg +0 -0
  90. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/__init__.py +0 -0
  91. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/init/programs/gcat.sh +0 -0
  92. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/init/runscripts/runscript-seq.xml +0 -0
  93. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/init/templates/single.dist +0 -0
  94. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/result/__init__.py +0 -0
  95. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/result/parser.py +0 -0
  96. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/resultparser/__init__.py +0 -0
  97. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/benchmarktool/runscript/__init__.py +0 -0
  98. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/potassco_benchmark_tool.egg-info/dependency_links.txt +0 -0
  99. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/potassco_benchmark_tool.egg-info/entry_points.txt +0 -0
  100. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/src/potassco_benchmark_tool.egg-info/top_level.txt +0 -0
  101. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/__init__.py +0 -0
  102. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/py.typed +0 -0
  103. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/README.md +0 -0
  104. /potassco_benchmark_tool-2.1.1/tests/ref/test_bench/.invalid.file → /potassco_benchmark_tool-2.2.0/tests/ref/out/x.txt +0 -0
  105. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/results/clasp_error/runsolver.solver +0 -0
  106. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/results/clasp_error/runsolver.watcher +0 -0
  107. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/results/finished/runsolver.solver +0 -0
  108. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/results/finished/runsolver.watcher +0 -0
  109. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/results/memout/runsolver.solver +0 -0
  110. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/results/memout/runsolver.watcher +0 -0
  111. /potassco_benchmark_tool-2.1.1/tests/ref/test_bench/test_f1.1.lp → /potassco_benchmark_tool-2.2.0/tests/ref/results/missing/x.txt +0 -0
  112. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/results/timeout/runsolver.solver +0 -0
  113. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/results/timeout/runsolver.watcher +0 -0
  114. /potassco_benchmark_tool-2.1.1/tests/ref/test_bench/test_f1.2.1.lp → /potassco_benchmark_tool-2.2.0/tests/ref/test_bench/.invalid.file +0 -0
  115. /potassco_benchmark_tool-2.1.1/tests/ref/test_bench/test_f2.lp → /potassco_benchmark_tool-2.2.0/tests/ref/test_bench/test_f1.2.1.lp +0 -0
  116. /potassco_benchmark_tool-2.1.1/tests/ref/test_bench/test_folder/test_foldered.lp → /potassco_benchmark_tool-2.2.0/tests/ref/test_bench/test_f1.2.2.lp +0 -0
  117. /potassco_benchmark_tool-2.1.1/tests/result/__init__.py → /potassco_benchmark_tool-2.2.0/tests/ref/test_bench/test_f2.lp +0 -0
  118. /potassco_benchmark_tool-2.1.1/tests/resultparser/__init__.py → /potassco_benchmark_tool-2.2.0/tests/ref/test_bench/test_folder/test_foldered.lp +0 -0
  119. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/ref/test_disttemplate.dist +0 -0
  120. {potassco_benchmark_tool-2.1.1/tests/runscript → potassco_benchmark_tool-2.2.0/tests/result}/__init__.py +0 -0
  121. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/result/test_result_parser.py +0 -0
  122. {potassco_benchmark_tool-2.1.1 → potassco_benchmark_tool-2.2.0}/tests/test_tools.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: potassco-benchmark-tool
3
- Version: 2.1.1
3
+ Version: 2.2.0
4
4
  Summary: A benchmark-tool for ASP based systems.
5
5
  Author: Roland Kaminski, Tom Schmidt
6
6
  License: MIT License
@@ -34,7 +34,7 @@ Description-Content-Type: text/markdown
34
34
  License-File: LICENSE
35
35
  Requires-Dist: lxml
36
36
  Requires-Dist: pandas
37
- Requires-Dist: odswriter
37
+ Requires-Dist: xlsxwriter
38
38
  Requires-Dist: pyarrow
39
39
  Requires-Dist: nbformat
40
40
  Provides-Extra: format
@@ -67,16 +67,29 @@ A tool to easier generate, run and evaluate benchmarks.
67
67
 
68
68
  ## Installation
69
69
 
70
- The `setuptools` package is required to run the commands below. We recommend
71
- the usage of conda, which already includes `setuptools` in its default python
72
- installation. Any python version newer than 3.10 is supported.
70
+ The benchmark tool can be installed with any Python version newer than 3.10
71
+ using pip:
73
72
 
74
73
  ```bash
75
- $ git clone https://github.com/potassco/benchmark-tool
76
- $ cd benchmark-tool
77
- $ conda create -n <env-name> python=3.10
78
- $ conda activate <env-name>
79
- $ pip install .
74
+ pip install potassco-benchmark-tool
75
+ ```
76
+
77
+ To access the latest updates and fixes you can either use:
78
+
79
+ ```bash
80
+ pip install git+https://github.com/potassco/benchmark-tool
81
+ ```
82
+
83
+ Or alternatively build the tool yourself, which requires the `setuptools`
84
+ package. We recommend using conda, which includes `setuptools` in its default
85
+ Python installation. To build the tool manually run the following commands:
86
+
87
+ ```bash
88
+ git clone https://github.com/potassco/benchmark-tool
89
+ cd benchmark-tool
90
+ conda create -n <env-name> python=3.10
91
+ conda activate <env-name>
92
+ pip install .
80
93
  ```
81
94
 
82
95
  The documentation can be accessed [here](https://potassco.org/benchmark-tool/)
@@ -104,7 +117,7 @@ Supported subcommands in order of use:
104
117
  - `run-dist` Run distributed jobs
105
118
  - `verify` Check for runlim errors and re-run failed instances
106
119
  - `eval` Collect results
107
- - `conv` Convert results to ODS or other formats
120
+ - `conv` Convert results to spreadsheet and more
108
121
 
109
122
  For more information and examples check the documentation.
110
123
 
@@ -4,16 +4,29 @@ A tool to easier generate, run and evaluate benchmarks.
4
4
 
5
5
  ## Installation
6
6
 
7
- The `setuptools` package is required to run the commands below. We recommend
8
- the usage of conda, which already includes `setuptools` in its default python
9
- installation. Any python version newer than 3.10 is supported.
7
+ The benchmark tool can be installed with any Python version newer than 3.10
8
+ using pip:
10
9
 
11
10
  ```bash
12
- $ git clone https://github.com/potassco/benchmark-tool
13
- $ cd benchmark-tool
14
- $ conda create -n <env-name> python=3.10
15
- $ conda activate <env-name>
16
- $ pip install .
11
+ pip install potassco-benchmark-tool
12
+ ```
13
+
14
+ To access the latest updates and fixes you can either use:
15
+
16
+ ```bash
17
+ pip install git+https://github.com/potassco/benchmark-tool
18
+ ```
19
+
20
+ Or alternatively build the tool yourself, which requires the `setuptools`
21
+ package. We recommend using conda, which includes `setuptools` in its default
22
+ Python installation. To build the tool manually run the following commands:
23
+
24
+ ```bash
25
+ git clone https://github.com/potassco/benchmark-tool
26
+ cd benchmark-tool
27
+ conda create -n <env-name> python=3.10
28
+ conda activate <env-name>
29
+ pip install .
17
30
  ```
18
31
 
19
32
  The documentation can be accessed [here](https://potassco.org/benchmark-tool/)
@@ -41,7 +54,7 @@ Supported subcommands in order of use:
41
54
  - `run-dist` Run distributed jobs
42
55
  - `verify` Check for runlim errors and re-run failed instances
43
56
  - `eval` Collect results
44
- - `conv` Convert results to ODS or other formats
57
+ - `conv` Convert results to spreadsheet and more
45
58
 
46
59
  For more information and examples check the documentation.
47
60
 
@@ -7,29 +7,32 @@ hide:
7
7
 
8
8
  ## Sequential Benchmark
9
9
 
10
- The example assumes that you want to run a benchmark that shall be started using simple bash scripts. All the following instruction assume that the current working directory is the root directory of the benchmark-tool project. To begin, the two executables [clasp-3.4.0][1] and [runlim][2] have to be copied (or symlinked) into the `./programs` folder.
10
+ The example assumes that you want to run a benchmark that shall be started using simple bash scripts. To begin, call `btool init` and copy (or symlink) the two executables [clasp-3.4.0][1] and [runlim][2]
11
+ into the `./programs` folder.
11
12
  Now, run:
12
13
  `$ btool gen ./runscripts/runscript-seq.xml`
13
14
  This creates a set of start scripts in the `./output` folder.
14
15
  To start the benchmark, run:
15
16
  `$ ./output/clasp-big/houat/start.py`
16
17
  Once the benchmark is finished, run:
17
- `$ btool eval ./runscripts/runscript-seq.xml | btool conv -o result.ods`
18
- Finally, open the file:
19
- `$ soffice result.ods`
18
+ `$ btool eval ./runscripts/runscript-seq.xml | btool conv -o result.xlsx`
19
+ Finally, open the file in your favourite spreadsheet tool:
20
+ `$ xdg-open result.xlsx`
20
21
 
21
22
  ## Cluster Benchmark
22
23
 
23
- This example assumes that you want to run a benchmark on a cluster, i.g. on the [HPC][3] cluster at the university of Potsdam. Again, all the following instruction assume that the current working directory is the root directory of the benchmark-tool project. Once again make sure, the two executables [clasp-3.4.0][1] and [runlim][2] have been copied (or symlinked) into the `./programs` folder.
24
+ This example assumes that you want to run a benchmark on a cluster. Once again,
25
+ call `btool init` and make sure, the two executables [clasp-3.4.0][1]
26
+ and [runlim][2] have been copied (or symlinked) into the `./programs` folder.
24
27
  Now, run:
25
28
  `$ btool gen ./runscripts/runscript-dist.xml`
26
29
  This creates a set of start scripts in the `./output` folder.
27
30
  To start the benchmark, run (on the cluster):
28
31
  `$ ./output/clasp-one-as/hpc/start.sh`
29
32
  Once the benchmark is finished, run:
30
- `$ btool eval ./runscripts/runscript-dist.xml | btool conv -o result.ods`
31
- Finally, open the file:
32
- `$ soffice result.ods`
33
+ `$ btool eval ./runscripts/runscript-dist.xml | btool conv -o result.xlsx`
34
+ Finally, open the file in your favourite spreadsheet tool:
35
+ `$ xdg-open result.xlsx`
33
36
 
34
37
  ## Runscripts
35
38
  This tool comes with a [collection](https://github.com/potassco/benchmark-tool/blob/master/runscripts) of example runscripts to help you get started.
@@ -3,28 +3,36 @@ title: "Converting Benchmark Results to Spreadsheets"
3
3
  icon: "material/play-outline"
4
4
  ---
5
5
 
6
- The `conv` subcommand allows you to convert results generated by `btool eval` into an ODS
7
- spreadsheet, which can be opened with LibreOffice, OpenOffice, or Excel.
6
+ The `conv` subcommand allows you to convert results generated by `btool eval` into an XLSX
7
+ spreadsheet, which should be used with Excel. Other programs, such as LibreOffice
8
+ or OpenOffice, should also work. Keep in mind that by default LibreOffice does not automatically
9
+ recalculate formulas. Therefore, after opening the spreadsheet, use `CTRL + SHIFT + F9` to
10
+ manually recalculate all formulas. You can also enable automatic recalculation in the settings.
8
11
 
9
12
  To convert your benchmark results to a spreadsheet, use the following command:
10
13
 
11
14
  ```bash
12
- btool conv benchmark-results.xml -m "time:t,choices" -o results.ods
15
+ btool conv benchmark-results.xml -m "time:t,choices" -o results.xlsx
13
16
  ```
14
17
 
15
- The name of the resulting `.ods` file is set using the `-o, --output` option (default `out.ods`).
18
+ The name of the resulting `.xlsx` file is set using the `-o, --output` option (default `out.xlsx`).
19
+ In cases of a missing or wrong file extension the correct file extension is added.
16
20
 
17
21
  Which benchmark projects to include in the output can be selected via the `-p, --project`
18
22
  option. By default all projects are selected.
19
23
 
20
24
  The `-m, --measures` option specifies which measures to include in the table (default: time:t,timeout:to;
21
- `-m all` selects all measures). Available measures depend on the [result parser] used during evaluation. Each measure can optionally include a formatting argument after a `:`. Currently,
25
+ `-m all` selects all measures). Available measures depend on the [result parser] used during evaluation.
26
+ Each measure can optionally include a formatting argument after a `:`. Currently,
22
27
  the supported formatting options are `t` and `to`. Both highlight best and worst values for
23
28
  float measures. Use `t` for most measures, and `to` for float measures representing booleans,
24
29
  such as `timeout`.
25
30
 
31
+ The `--max-col-width` option can be used to set the maximum column width of the spreadsheet
32
+ in terms of pixel. The default is 300.
33
+
26
34
  You can chose to export the instance data to a `.parquet` file using the `-e, --export`
27
- option. The name of the file will be the same as the specified output, i.e. `-o res.ods -e`
35
+ option. The name of the file will be the same as the specified output, i.e. `-o res.xlsx -e`
28
36
  -> `res.parquet`.
29
37
 
30
38
  The `-j, --jupyter-notebook` option can be used to generate a `.ipynb` file, which contains
@@ -34,7 +42,7 @@ the `-e` option.
34
42
 
35
43
  ## Spreadsheet Generation
36
44
 
37
- When generating a spreadsheet in ODS format, two sheets are created:
45
+ When generating a spreadsheet in XLSX format, two sheets are created:
38
46
 
39
47
  1. **Instance Sheet**
40
48
  - The instance sheet lists all runs for each benchmark instance (rows) and
@@ -49,11 +57,11 @@ When generating a spreadsheet in ODS format, two sheets are created:
49
57
  comparisons between classes.
50
58
 
51
59
  !!! info
52
- All summaries are written as formulas in the ODS file. The calculated
60
+ All summaries are written as formulas in the .xlsx file. The calculated
53
61
  values are also accessible via the *content* attribute of the *Sheet*
54
62
  object.
55
63
 
56
- Both the ODS representation and the actual content are stored in [pandas]
64
+ Both the XLSX representation and the actual content are stored in [pandas]
57
65
  DataFrames for easier handling and future modifications.
58
66
 
59
67
  [result parser]: ../../reference/resultparser.md
@@ -17,6 +17,9 @@ btool gen ./runscripts/runscript-example.xml
17
17
  You can use the `-e, --exclude` option to exclude previously finished benchmarks
18
18
  in the start script, thus avoiding running them again.
19
19
 
20
+ If the output directory, specified in the runscript, already exists, the program
21
+ is interrupted. The `-f, --force` option can be used to disable this behaviour
22
+ and overwrite existing files.
20
23
 
21
24
  After generation, start your benchmarks by executing either the `start.sh` or
22
25
  `start.py` file found in the `machine` subfolder of the generated structure.
@@ -103,6 +103,8 @@ benchmark result evaluation. This does not affect benchmark script generation.
103
103
  system.
104
104
  - The `cmdline` attribute is optional and can be any string, which will be passed
105
105
  to the system regardless of the setting.
106
+ - The `cmdline_post` attribute is similar but is placed after `setting.cmdline`
107
+ in the order of arguments.
106
108
 
107
109
  A runscript can contain any number of systems, each with any number of
108
110
  settings.
@@ -120,7 +122,9 @@ encodings used by the system.
120
122
  ```
121
123
 
122
124
  - The `cmdline` attribute can be any valid string, which will be passed to the
123
- system via the run template when this setting is selected.
125
+ system after `system.cmdline` when this setting is selected.
126
+ - The `cmdline_post` attribute is similar but is placed after `system.cmdline_post`
127
+ in the order of arguments.
124
128
  - The `tag` attribute is a space seperated identifier used within the runscript
125
129
  to select multiple settings at once.
126
130
  - Each setting can contain any number of encoding elements.
@@ -130,24 +134,31 @@ to select multiple settings at once.
130
134
  instances when this setting is selected.
131
135
  - If a `tag` is given, encoding usage is instance-dependent. Multiple
132
136
  encodings can be selected by using the same tag.
133
- - The setting element also supports an optional `disttemplate` attribute. The
137
+ - The setting element also supports an optional `dist_template` attribute. The
134
138
  default value is `templates/single.dist`, which refers to [single.dist]. This
135
139
  attribute is only relevant for distributed jobs. More information about dist
136
140
  templates can be found on the [templates] page.
137
- - Another optional attribute for distributed jobs is `distopts`, which allows
138
- you to add additional options for distributed jobs. `distopts` expects a
139
- comma-separated string of options. For example, `distopts="#SBATCH
140
- --hint=compute_bound,#SBATCH --job-name=\"my_benchmark_run\""` results in
141
+ - Another optional attribute for distributed jobs is `dist_options`, which allows
142
+ you to add additional options for distributed jobs. `dist_options` expects a
143
+ comma-separated string of options. For example,
144
+ `dist_options="#SBATCH --hint=compute_bound,#SBATCH -J=%x.%j.out"` results in
141
145
  the following lines being added to the script:
142
146
 
143
147
  ```bash
144
148
  #SBATCH --hint=compute_bound
145
- #SBATCH --job-name="my_benchmark_run"
149
+ #SBATCH -J=%x.%j.out
146
150
  ```
147
151
 
148
152
  The default template for distributed jobs uses SLURM; a comprehensive list
149
153
  of available options is provided in the [SLURM documentation].
150
154
 
155
+ To summarize, the commandline arguments will always be given to the
156
+ system-under-test in the following order:
157
+
158
+ ```
159
+ system.cmdline setting.cmdline system.cmdline_post setting.cmdline_post
160
+ ```
161
+
151
162
  ## Job
152
163
 
153
164
  A job defines additional arguments for individual runs. You can define any
@@ -160,19 +171,22 @@ A sequential job is identified by its `name` and sets the `timeout` (in
160
171
  seconds) for a single run, the number of `runs` for each instance, and
161
172
  the number of solver processes executed in `parallel`. The optional
162
173
  attribute `memout` sets a memory limit (in MB) for each run. If no limit
163
- is set, a default limit of 2000 MB is used:
174
+ is set, a default limit of 20 GB is used. Additional options, which will be
175
+ passed to the runlim call, can be set using the optional `template_options` attribute.
176
+ `template_options` expects a comma-separated string of options, e.g.
177
+ `template_options="--single,--report-rate=2000"`.
164
178
 
165
179
  ```xml
166
- <seqjob name="seq-gen" timeout="900" runs="1" memout="1000" parallel="1"/>
180
+ <seqjob name="seq-gen" timeout="900" runs="1" memout="1000" template_options="--single" parallel="1"/>
167
181
  ```
168
182
 
169
183
  ### Distributed Jobs
170
184
 
171
185
  A distributed job is also identified by its `name` and defines a `timeout`,
172
- the number of `runs` and an optional `memout`:
186
+ the number of `runs` and an optional `memout` and `template_options`:
173
187
 
174
188
  ```xml
175
- <distjob name="dist-gen" timeout="900" runs="1" memout="1000"
189
+ <distjob name="dist-gen" timeout="900" runs="1" memout="1000" template_options="--single"
176
190
  script_mode="timeout" walltime="23h 59m 59s" cpt="4"/>
177
191
  ```
178
192
 
@@ -12,18 +12,19 @@ repository].
12
12
  ## Run Templates
13
13
 
14
14
  Run templates define how each benchmark instance is executed. During script
15
- generation, references within the template (e.g., `run.files`) are replaced
15
+ generation, references within the template (e.g., `{files}`) are replaced
16
16
  with corresponding values.
17
17
 
18
18
  The following references are available:
19
19
 
20
- - `run.files`: instance files
21
- - `run.encodings`: encoding files used for this instance
22
- - `run.root`: path to the benchmark-tool folder
23
- - `run.timeout`: walltime for this run
24
- - `run.memout`: memory limit for this run in MB (default: 20000)
25
- - `run.solver`: solver or program used for this run
26
- - `run.args`: additional arguments for the solver/program
20
+ - `files`: instance files
21
+ - `encodings`: encoding files used for this instance
22
+ - `root`: path to the benchmark-tool folder
23
+ - `timeout`: walltime for this run
24
+ - `memout`: memory limit for this run in MB (default: 20000)
25
+ - `solver`: solver or program used for this run
26
+ - `args`: additional arguments for the solver/program
27
+ - `options`: additional options
27
28
 
28
29
  Most templates use the [runlim] program to supervise benchmark runs.
29
30
 
@@ -8,10 +8,15 @@ The benchmark tool can be installed with any Python version newer than 3.10 usin
8
8
  pip install potassco-benchmark-tool
9
9
  ```
10
10
 
11
- To access the latest updates and fixes you can alternatively
12
- build the tool yourself, which requires the `setuptools` package.
13
- We recommend using conda, which includes `setuptools` in its default
14
- Python installation. To build the tool manually run the following commands:
11
+ To access the latest updates and fixes you can either use:
12
+
13
+ ```bash
14
+ pip install git+https://github.com/potassco/benchmark-tool
15
+ ```
16
+
17
+ Or alternatively build the tool yourself, which requires the `setuptools` package.
18
+ We recommend using conda, which includes `setuptools` in its default Python
19
+ installation. To build the tool manually run the following commands:
15
20
 
16
21
  ```bash
17
22
  git clone https://github.com/potassco/benchmark-tool
@@ -40,7 +45,7 @@ Supported subcommands in order of use:
40
45
  - `run-dist`: Run distributed jobs
41
46
  - `verify`: Check for runlim errors
42
47
  - `eval`: Collect results
43
- - `conv`: Convert results to ODS or other formats
48
+ - `conv`: Convert results to spreadsheet and more
44
49
 
45
50
 
46
51
  Each subcommand has their own help page, which you can access using:
@@ -4,14 +4,14 @@ icon: "material/play-outline"
4
4
  ---
5
5
 
6
6
  The `init` subcommand can be used to prepare the necessary folder structure to run
7
- the benchmarktool and provide some example [runscripts] and script [templates]. By
8
- default existing files are not overwritten.
7
+ the benchmarktool and provide some example [runscripts] and script [templates].
9
8
 
10
9
  ```bash
11
10
  btool init
12
11
  ```
13
12
 
14
- The `-o, --overwrite` option can be used to overwrite existing files.
13
+ By default existing files are not overwritten. This can be changed using
14
+ the `-f, --force` option.
15
15
 
16
16
  You can use the `--resultparser-template` option to create a copy of the `clasp` resultparser
17
17
  called `rp_tmp.py`, which you can use as a base to create your own. You can overwrite the
@@ -57,16 +57,15 @@ To evaluate your benchmarks and collect the results use the [eval] subcommand:
57
57
  btool eval <runscript.xml> > <results.xml>
58
58
  ```
59
59
 
60
- This newly created .xml file can then be used as input for the [conv] subcommand to generate an .ods file
61
- and optionally an .ipynb jupyter notebook. By default only the time and timeout measures are displayed.
62
- Further measures can be selected using the -m option.
60
+ This newly created .xml file can then be used as input for the [conv] subcommand to generate an .xlsx
61
+ file and optionally an .ipynb jupyter notebook. By default only the time and timeout measures are displayed. Further measures can be selected using the -m option.
63
62
 
64
63
  ```
65
- btool conv -o <out.ods> <result.xml>
64
+ btool conv -o <out.xlsx> <result.xml>
66
65
  ```
67
66
 
68
- [runscripts]: ./gen/runscript.md
69
- [templates]: ./gen/templates.md
67
+ [runscripts]: ../gen/runscript.md
68
+ [templates]: ../gen/templates.md
70
69
  [dispatcher]: ../run_dist/index.md
71
70
  [verify]: ../verify/index.md
72
71
  [init]: ../init/index.md
@@ -1,10 +1,10 @@
1
1
  ---
2
- title: "ODS Gen"
2
+ title: "XLSX Gen"
3
3
  ---
4
4
 
5
5
  # ODS Gen
6
6
 
7
- ::: benchmarktool.result.ods_gen
7
+ ::: benchmarktool.result.xlsx_gen
8
8
  handler: python
9
9
  options:
10
10
  filters: public
@@ -152,7 +152,7 @@ nav:
152
152
  - reference/api/result/index.md
153
153
  - Classes: reference/api/result/result.md
154
154
  - Parser: reference/api/result/parser.md
155
- - ODS Gen: reference/api/result/ods_gen.md
155
+ - XLSX Gen: reference/api/result/xlsx_gen.md
156
156
  - Tools: reference/api/tools.md
157
157
  - Entry Points: reference/api/entry_points.md
158
158
  - Community:
@@ -11,7 +11,7 @@ dynamic = ["version"]
11
11
  dependencies = [
12
12
  "lxml",
13
13
  "pandas",
14
- "odswriter",
14
+ "xlsxwriter",
15
15
  "pyarrow",
16
16
  "nbformat"
17
17
  ]
@@ -84,7 +84,7 @@ variable-rgx = "^[a-z][a-z0-9]*((_[a-z0-9]+)*_?)?$"
84
84
  good-names = ["_"]
85
85
 
86
86
  [tool.pylint."messages control"]
87
- disable = ["consider-using-f-string", "duplicate-code"]
87
+ disable = ["duplicate-code"]
88
88
  extension-pkg-allow-list = ["lxml"]
89
89
 
90
90
  [tool.coverage.run]