sierra-research 1.3.6__py3-none-any.whl → 1.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sierra/__init__.py +3 -3
- sierra/core/__init__.py +3 -3
- sierra/core/batchroot.py +223 -0
- sierra/core/cmdline.py +681 -1057
- sierra/core/compare.py +11 -0
- sierra/core/config.py +96 -88
- sierra/core/engine.py +306 -0
- sierra/core/execenv.py +380 -0
- sierra/core/expdef.py +11 -0
- sierra/core/experiment/__init__.py +1 -0
- sierra/core/experiment/bindings.py +150 -101
- sierra/core/experiment/definition.py +414 -245
- sierra/core/experiment/spec.py +83 -85
- sierra/core/exproot.py +44 -0
- sierra/core/generators/__init__.py +10 -0
- sierra/core/generators/experiment.py +528 -0
- sierra/core/generators/generator_factory.py +138 -137
- sierra/core/graphs/__init__.py +23 -0
- sierra/core/graphs/bcbridge.py +94 -0
- sierra/core/graphs/heatmap.py +245 -324
- sierra/core/graphs/pathset.py +27 -0
- sierra/core/graphs/schema.py +77 -0
- sierra/core/graphs/stacked_line.py +341 -0
- sierra/core/graphs/summary_line.py +506 -0
- sierra/core/logging.py +3 -2
- sierra/core/models/__init__.py +3 -1
- sierra/core/models/info.py +19 -0
- sierra/core/models/interface.py +52 -122
- sierra/core/pipeline/__init__.py +2 -5
- sierra/core/pipeline/pipeline.py +228 -126
- sierra/core/pipeline/stage1/__init__.py +10 -0
- sierra/core/pipeline/stage1/pipeline_stage1.py +45 -31
- sierra/core/pipeline/stage2/__init__.py +10 -0
- sierra/core/pipeline/stage2/pipeline_stage2.py +8 -11
- sierra/core/pipeline/stage2/runner.py +401 -0
- sierra/core/pipeline/stage3/__init__.py +12 -0
- sierra/core/pipeline/stage3/gather.py +321 -0
- sierra/core/pipeline/stage3/pipeline_stage3.py +37 -84
- sierra/core/pipeline/stage4/__init__.py +12 -2
- sierra/core/pipeline/stage4/pipeline_stage4.py +36 -354
- sierra/core/pipeline/stage5/__init__.py +12 -0
- sierra/core/pipeline/stage5/pipeline_stage5.py +33 -208
- sierra/core/pipeline/yaml.py +48 -0
- sierra/core/plugin.py +529 -62
- sierra/core/proc.py +11 -0
- sierra/core/prod.py +11 -0
- sierra/core/ros1/__init__.py +5 -1
- sierra/core/ros1/callbacks.py +22 -21
- sierra/core/ros1/cmdline.py +59 -88
- sierra/core/ros1/generators.py +159 -175
- sierra/core/ros1/variables/__init__.py +3 -0
- sierra/core/ros1/variables/exp_setup.py +122 -116
- sierra/core/startup.py +106 -76
- sierra/core/stat_kernels.py +4 -5
- sierra/core/storage.py +13 -32
- sierra/core/trampoline.py +30 -0
- sierra/core/types.py +116 -71
- sierra/core/utils.py +103 -106
- sierra/core/variables/__init__.py +1 -1
- sierra/core/variables/base_variable.py +12 -17
- sierra/core/variables/batch_criteria.py +387 -481
- sierra/core/variables/builtin.py +135 -0
- sierra/core/variables/exp_setup.py +19 -39
- sierra/core/variables/population_size.py +72 -76
- sierra/core/variables/variable_density.py +44 -68
- sierra/core/vector.py +1 -1
- sierra/main.py +256 -88
- sierra/plugins/__init__.py +119 -0
- sierra/plugins/compare/__init__.py +14 -0
- sierra/plugins/compare/graphs/__init__.py +19 -0
- sierra/plugins/compare/graphs/cmdline.py +120 -0
- sierra/plugins/compare/graphs/comparator.py +291 -0
- sierra/plugins/compare/graphs/inter_controller.py +531 -0
- sierra/plugins/compare/graphs/inter_scenario.py +297 -0
- sierra/plugins/compare/graphs/namecalc.py +53 -0
- sierra/plugins/compare/graphs/outputroot.py +73 -0
- sierra/plugins/compare/graphs/plugin.py +147 -0
- sierra/plugins/compare/graphs/preprocess.py +172 -0
- sierra/plugins/compare/graphs/schema.py +37 -0
- sierra/plugins/engine/__init__.py +14 -0
- sierra/plugins/engine/argos/__init__.py +18 -0
- sierra/plugins/{platform → engine}/argos/cmdline.py +144 -151
- sierra/plugins/{platform/argos/variables → engine/argos/generators}/__init__.py +5 -0
- sierra/plugins/engine/argos/generators/engine.py +394 -0
- sierra/plugins/engine/argos/plugin.py +393 -0
- sierra/plugins/{platform/argos/generators → engine/argos/variables}/__init__.py +5 -0
- sierra/plugins/engine/argos/variables/arena_shape.py +183 -0
- sierra/plugins/engine/argos/variables/cameras.py +240 -0
- sierra/plugins/engine/argos/variables/constant_density.py +112 -0
- sierra/plugins/engine/argos/variables/exp_setup.py +82 -0
- sierra/plugins/{platform → engine}/argos/variables/physics_engines.py +83 -87
- sierra/plugins/engine/argos/variables/population_constant_density.py +178 -0
- sierra/plugins/engine/argos/variables/population_size.py +115 -0
- sierra/plugins/engine/argos/variables/population_variable_density.py +123 -0
- sierra/plugins/engine/argos/variables/rendering.py +108 -0
- sierra/plugins/engine/ros1gazebo/__init__.py +18 -0
- sierra/plugins/engine/ros1gazebo/cmdline.py +175 -0
- sierra/plugins/{platform/ros1robot → engine/ros1gazebo}/generators/__init__.py +5 -0
- sierra/plugins/engine/ros1gazebo/generators/engine.py +125 -0
- sierra/plugins/engine/ros1gazebo/plugin.py +404 -0
- sierra/plugins/engine/ros1gazebo/variables/__init__.py +15 -0
- sierra/plugins/engine/ros1gazebo/variables/population_size.py +214 -0
- sierra/plugins/engine/ros1robot/__init__.py +18 -0
- sierra/plugins/engine/ros1robot/cmdline.py +159 -0
- sierra/plugins/{platform/ros1gazebo → engine/ros1robot}/generators/__init__.py +4 -0
- sierra/plugins/engine/ros1robot/generators/engine.py +95 -0
- sierra/plugins/engine/ros1robot/plugin.py +410 -0
- sierra/plugins/{hpc/local → engine/ros1robot/variables}/__init__.py +5 -0
- sierra/plugins/engine/ros1robot/variables/population_size.py +146 -0
- sierra/plugins/execenv/__init__.py +11 -0
- sierra/plugins/execenv/hpc/__init__.py +18 -0
- sierra/plugins/execenv/hpc/adhoc/__init__.py +18 -0
- sierra/plugins/execenv/hpc/adhoc/cmdline.py +30 -0
- sierra/plugins/execenv/hpc/adhoc/plugin.py +131 -0
- sierra/plugins/execenv/hpc/cmdline.py +137 -0
- sierra/plugins/execenv/hpc/local/__init__.py +18 -0
- sierra/plugins/execenv/hpc/local/cmdline.py +31 -0
- sierra/plugins/execenv/hpc/local/plugin.py +145 -0
- sierra/plugins/execenv/hpc/pbs/__init__.py +18 -0
- sierra/plugins/execenv/hpc/pbs/cmdline.py +30 -0
- sierra/plugins/execenv/hpc/pbs/plugin.py +121 -0
- sierra/plugins/execenv/hpc/slurm/__init__.py +18 -0
- sierra/plugins/execenv/hpc/slurm/cmdline.py +30 -0
- sierra/plugins/execenv/hpc/slurm/plugin.py +133 -0
- sierra/plugins/execenv/prefectserver/__init__.py +18 -0
- sierra/plugins/execenv/prefectserver/cmdline.py +66 -0
- sierra/plugins/execenv/prefectserver/dockerremote/__init__.py +18 -0
- sierra/plugins/execenv/prefectserver/dockerremote/cmdline.py +66 -0
- sierra/plugins/execenv/prefectserver/dockerremote/plugin.py +132 -0
- sierra/plugins/execenv/prefectserver/flow.py +66 -0
- sierra/plugins/execenv/prefectserver/local/__init__.py +18 -0
- sierra/plugins/execenv/prefectserver/local/cmdline.py +29 -0
- sierra/plugins/execenv/prefectserver/local/plugin.py +133 -0
- sierra/plugins/{hpc/adhoc → execenv/robot}/__init__.py +1 -0
- sierra/plugins/execenv/robot/turtlebot3/__init__.py +18 -0
- sierra/plugins/execenv/robot/turtlebot3/plugin.py +204 -0
- sierra/plugins/expdef/__init__.py +14 -0
- sierra/plugins/expdef/json/__init__.py +14 -0
- sierra/plugins/expdef/json/plugin.py +504 -0
- sierra/plugins/expdef/xml/__init__.py +14 -0
- sierra/plugins/expdef/xml/plugin.py +386 -0
- sierra/{core/hpc → plugins/proc}/__init__.py +1 -1
- sierra/plugins/proc/collate/__init__.py +15 -0
- sierra/plugins/proc/collate/cmdline.py +47 -0
- sierra/plugins/proc/collate/plugin.py +271 -0
- sierra/plugins/proc/compress/__init__.py +18 -0
- sierra/plugins/proc/compress/cmdline.py +47 -0
- sierra/plugins/proc/compress/plugin.py +123 -0
- sierra/plugins/proc/decompress/__init__.py +18 -0
- sierra/plugins/proc/decompress/plugin.py +96 -0
- sierra/plugins/proc/imagize/__init__.py +15 -0
- sierra/plugins/proc/imagize/cmdline.py +49 -0
- sierra/plugins/proc/imagize/plugin.py +270 -0
- sierra/plugins/proc/modelrunner/__init__.py +16 -0
- sierra/plugins/proc/modelrunner/plugin.py +250 -0
- sierra/plugins/proc/statistics/__init__.py +15 -0
- sierra/plugins/proc/statistics/cmdline.py +64 -0
- sierra/plugins/proc/statistics/plugin.py +390 -0
- sierra/plugins/{hpc → prod}/__init__.py +1 -0
- sierra/plugins/prod/graphs/__init__.py +18 -0
- sierra/plugins/prod/graphs/cmdline.py +269 -0
- sierra/plugins/prod/graphs/collate.py +279 -0
- sierra/plugins/prod/graphs/inter/__init__.py +13 -0
- sierra/plugins/prod/graphs/inter/generate.py +83 -0
- sierra/plugins/prod/graphs/inter/heatmap.py +86 -0
- sierra/plugins/prod/graphs/inter/line.py +134 -0
- sierra/plugins/prod/graphs/intra/__init__.py +15 -0
- sierra/plugins/prod/graphs/intra/generate.py +202 -0
- sierra/plugins/prod/graphs/intra/heatmap.py +74 -0
- sierra/plugins/prod/graphs/intra/line.py +114 -0
- sierra/plugins/prod/graphs/plugin.py +103 -0
- sierra/plugins/prod/graphs/targets.py +63 -0
- sierra/plugins/prod/render/__init__.py +18 -0
- sierra/plugins/prod/render/cmdline.py +72 -0
- sierra/plugins/prod/render/plugin.py +282 -0
- sierra/plugins/storage/__init__.py +5 -0
- sierra/plugins/storage/arrow/__init__.py +18 -0
- sierra/plugins/storage/arrow/plugin.py +38 -0
- sierra/plugins/storage/csv/__init__.py +9 -0
- sierra/plugins/storage/csv/plugin.py +12 -5
- sierra/version.py +3 -2
- sierra_research-1.5.0.dist-info/METADATA +238 -0
- sierra_research-1.5.0.dist-info/RECORD +186 -0
- {sierra_research-1.3.6.dist-info → sierra_research-1.5.0.dist-info}/WHEEL +1 -2
- sierra/core/experiment/xml.py +0 -454
- sierra/core/generators/controller_generator_parser.py +0 -34
- sierra/core/generators/exp_creator.py +0 -351
- sierra/core/generators/exp_generators.py +0 -142
- sierra/core/graphs/scatterplot2D.py +0 -109
- sierra/core/graphs/stacked_line_graph.py +0 -249
- sierra/core/graphs/stacked_surface_graph.py +0 -220
- sierra/core/graphs/summary_line_graph.py +0 -369
- sierra/core/hpc/cmdline.py +0 -142
- sierra/core/models/graphs.py +0 -87
- sierra/core/pipeline/stage2/exp_runner.py +0 -286
- sierra/core/pipeline/stage3/imagizer.py +0 -149
- sierra/core/pipeline/stage3/run_collator.py +0 -317
- sierra/core/pipeline/stage3/statistics_calculator.py +0 -478
- sierra/core/pipeline/stage4/graph_collator.py +0 -319
- sierra/core/pipeline/stage4/inter_exp_graph_generator.py +0 -240
- sierra/core/pipeline/stage4/intra_exp_graph_generator.py +0 -317
- sierra/core/pipeline/stage4/model_runner.py +0 -168
- sierra/core/pipeline/stage4/rendering.py +0 -283
- sierra/core/pipeline/stage4/yaml_config_loader.py +0 -103
- sierra/core/pipeline/stage5/inter_scenario_comparator.py +0 -328
- sierra/core/pipeline/stage5/intra_scenario_comparator.py +0 -989
- sierra/core/platform.py +0 -493
- sierra/core/plugin_manager.py +0 -369
- sierra/core/root_dirpath_generator.py +0 -241
- sierra/plugins/hpc/adhoc/plugin.py +0 -125
- sierra/plugins/hpc/local/plugin.py +0 -81
- sierra/plugins/hpc/pbs/__init__.py +0 -9
- sierra/plugins/hpc/pbs/plugin.py +0 -126
- sierra/plugins/hpc/slurm/__init__.py +0 -9
- sierra/plugins/hpc/slurm/plugin.py +0 -130
- sierra/plugins/platform/__init__.py +0 -9
- sierra/plugins/platform/argos/__init__.py +0 -9
- sierra/plugins/platform/argos/generators/platform_generators.py +0 -383
- sierra/plugins/platform/argos/plugin.py +0 -337
- sierra/plugins/platform/argos/variables/arena_shape.py +0 -145
- sierra/plugins/platform/argos/variables/cameras.py +0 -243
- sierra/plugins/platform/argos/variables/constant_density.py +0 -136
- sierra/plugins/platform/argos/variables/exp_setup.py +0 -113
- sierra/plugins/platform/argos/variables/population_constant_density.py +0 -175
- sierra/plugins/platform/argos/variables/population_size.py +0 -102
- sierra/plugins/platform/argos/variables/population_variable_density.py +0 -132
- sierra/plugins/platform/argos/variables/rendering.py +0 -104
- sierra/plugins/platform/ros1gazebo/__init__.py +0 -9
- sierra/plugins/platform/ros1gazebo/cmdline.py +0 -213
- sierra/plugins/platform/ros1gazebo/generators/platform_generators.py +0 -137
- sierra/plugins/platform/ros1gazebo/plugin.py +0 -335
- sierra/plugins/platform/ros1gazebo/variables/__init__.py +0 -10
- sierra/plugins/platform/ros1gazebo/variables/population_size.py +0 -204
- sierra/plugins/platform/ros1robot/__init__.py +0 -9
- sierra/plugins/platform/ros1robot/cmdline.py +0 -175
- sierra/plugins/platform/ros1robot/generators/platform_generators.py +0 -112
- sierra/plugins/platform/ros1robot/plugin.py +0 -373
- sierra/plugins/platform/ros1robot/variables/__init__.py +0 -10
- sierra/plugins/platform/ros1robot/variables/population_size.py +0 -146
- sierra/plugins/robot/__init__.py +0 -9
- sierra/plugins/robot/turtlebot3/__init__.py +0 -9
- sierra/plugins/robot/turtlebot3/plugin.py +0 -194
- sierra_research-1.3.6.data/data/share/man/man1/sierra-cli.1 +0 -2349
- sierra_research-1.3.6.data/data/share/man/man7/sierra-examples.7 +0 -488
- sierra_research-1.3.6.data/data/share/man/man7/sierra-exec-envs.7 +0 -331
- sierra_research-1.3.6.data/data/share/man/man7/sierra-glossary.7 +0 -285
- sierra_research-1.3.6.data/data/share/man/man7/sierra-platforms.7 +0 -358
- sierra_research-1.3.6.data/data/share/man/man7/sierra-usage.7 +0 -725
- sierra_research-1.3.6.data/data/share/man/man7/sierra.7 +0 -78
- sierra_research-1.3.6.dist-info/METADATA +0 -500
- sierra_research-1.3.6.dist-info/RECORD +0 -133
- sierra_research-1.3.6.dist-info/top_level.txt +0 -1
- {sierra_research-1.3.6.dist-info → sierra_research-1.5.0.dist-info}/entry_points.txt +0 -0
- {sierra_research-1.3.6.dist-info → sierra_research-1.5.0.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,146 @@
|
|
1
|
+
# Copyright 2020 John Harwell, All rights reserved.
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MIT
|
4
|
+
"""Classes for the population size batch criteria.
|
5
|
+
|
6
|
+
See :ref:`plugins/engine/ros1robot/bc/population-size` for usage
|
7
|
+
documentation.
|
8
|
+
|
9
|
+
"""
|
10
|
+
|
11
|
+
# Core packages
|
12
|
+
import typing as tp
|
13
|
+
import logging
|
14
|
+
import pathlib
|
15
|
+
|
16
|
+
# 3rd party packages
|
17
|
+
import implements
|
18
|
+
|
19
|
+
# Project packages
|
20
|
+
from sierra.core.variables import batch_criteria as bc
|
21
|
+
from sierra.core import types
|
22
|
+
from sierra.core.variables import population_size
|
23
|
+
from sierra.core.experiment import definition
|
24
|
+
from sierra.core.graphs import bcbridge
|
25
|
+
|
26
|
+
|
27
|
+
@implements.implements(bcbridge.IGraphable)
|
28
|
+
@implements.implements(bc.IQueryableBatchCriteria)
|
29
|
+
class PopulationSize(population_size.PopulationSize):
|
30
|
+
"""A univariate range of system sizes used to define batch experiments.
|
31
|
+
|
32
|
+
This class is a base class which should (almost) never be used on its
|
33
|
+
own. Instead, the ``factory()`` function should be used to dynamically
|
34
|
+
create derived classes expressing the user's desired size distribution.
|
35
|
+
|
36
|
+
Note: Usage of this class assumes homogeneous systems.
|
37
|
+
|
38
|
+
Attributes:
|
39
|
+
size_list: List of integer system sizes defining the range of the
|
40
|
+
variable for the batch experiment.
|
41
|
+
|
42
|
+
"""
|
43
|
+
|
44
|
+
def __init__(
|
45
|
+
self,
|
46
|
+
cli_arg: str,
|
47
|
+
main_config: types.YAMLDict,
|
48
|
+
batch_input_root: pathlib.Path,
|
49
|
+
robot: str,
|
50
|
+
sizes: tp.List[int],
|
51
|
+
) -> None:
|
52
|
+
population_size.PopulationSize.__init__(
|
53
|
+
self, cli_arg, main_config, batch_input_root
|
54
|
+
)
|
55
|
+
self.sizes = sizes
|
56
|
+
self.robot = robot
|
57
|
+
self.logger = logging.getLogger(__name__)
|
58
|
+
self.element_adds = [] # type: tp.List[definition.ElementAddList]
|
59
|
+
|
60
|
+
def gen_element_addlist(self) -> tp.List[definition.ElementAddList]:
|
61
|
+
"""
|
62
|
+
Generate XML modifications to set system sizes.
|
63
|
+
"""
|
64
|
+
if not self.element_adds:
|
65
|
+
robot_config = self.main_config["ros"]["robots"][self.robot]
|
66
|
+
prefix = robot_config["prefix"]
|
67
|
+
|
68
|
+
for s in self.sizes:
|
69
|
+
per_robot = definition.ElementAddList()
|
70
|
+
per_robot.append(definition.ElementAdd(".", "master", {}, True))
|
71
|
+
per_robot.append(
|
72
|
+
definition.ElementAdd("./master", "group", {"ns": "sierra"}, False)
|
73
|
+
)
|
74
|
+
per_robot.append(
|
75
|
+
definition.ElementAdd(
|
76
|
+
"./master/group/[@ns='sierra']",
|
77
|
+
"param",
|
78
|
+
{"name": "experiment/n_agents", "value": str(s)},
|
79
|
+
False,
|
80
|
+
)
|
81
|
+
)
|
82
|
+
|
83
|
+
for i in range(0, s):
|
84
|
+
|
85
|
+
# Note that we don't try to do any of the robot bringup
|
86
|
+
# here--we can't know the exact node/package names without
|
87
|
+
# using a lot of (brittle) config.
|
88
|
+
ns = f"{prefix}{i}"
|
89
|
+
per_robot.append(
|
90
|
+
definition.ElementAdd("./robot", "group", {"ns": ns}, True)
|
91
|
+
)
|
92
|
+
|
93
|
+
per_robot.append(
|
94
|
+
definition.ElementAdd(
|
95
|
+
f"./robot/group/[@ns='{ns}']",
|
96
|
+
"param",
|
97
|
+
{"name": "tf_prefix", "value": ns},
|
98
|
+
True,
|
99
|
+
)
|
100
|
+
)
|
101
|
+
|
102
|
+
self.element_adds.append(per_robot)
|
103
|
+
|
104
|
+
return self.element_adds
|
105
|
+
|
106
|
+
def n_agents(self, exp_num: int) -> int:
|
107
|
+
return self.sizes[exp_num]
|
108
|
+
|
109
|
+
def graph_info(
|
110
|
+
self,
|
111
|
+
cmdopts: types.Cmdopts,
|
112
|
+
batch_output_root: tp.Optional[pathlib.Path] = None,
|
113
|
+
exp_names: tp.Optional[tp.List[str]] = None,
|
114
|
+
) -> bcbridge.GraphInfo:
|
115
|
+
info = bcbridge.GraphInfo(
|
116
|
+
cmdopts,
|
117
|
+
batch_output_root,
|
118
|
+
exp_names if exp_names else self.gen_exp_names(),
|
119
|
+
)
|
120
|
+
|
121
|
+
info.xlabel = super().graph_xlabel(info.cmdopts)
|
122
|
+
info.xticklabels = super().graph_xticklabels(
|
123
|
+
info.cmdopts, info.batch_output_root, info.exp_names
|
124
|
+
)
|
125
|
+
info.xticks = super().graph_xticks(
|
126
|
+
info.cmdopts, info.batch_output_root, info.exp_names
|
127
|
+
)
|
128
|
+
return info
|
129
|
+
|
130
|
+
|
131
|
+
def factory(
|
132
|
+
cli_arg: str,
|
133
|
+
main_config: types.YAMLDict,
|
134
|
+
cmdopts: types.Cmdopts,
|
135
|
+
batch_input_root: pathlib.Path,
|
136
|
+
**kwargs,
|
137
|
+
) -> PopulationSize:
|
138
|
+
"""Create a :class:`PopulationSize` derived class from the cmdline definition."""
|
139
|
+
max_sizes = population_size.parse(cli_arg)
|
140
|
+
|
141
|
+
return PopulationSize(
|
142
|
+
cli_arg, main_config, batch_input_root, cmdopts["robot"], max_sizes
|
143
|
+
)
|
144
|
+
|
145
|
+
|
146
|
+
__all__ = ["PopulationSize"]
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# Copyright 2021 John Harwell, All rights reserved.
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MIT
|
4
|
+
"""
|
5
|
+
Container module for plugins related to execution environments (HPC flavor).
|
6
|
+
|
7
|
+
Driven by ``--execenv``.
|
8
|
+
"""
|
9
|
+
|
10
|
+
# Core packages
|
11
|
+
|
12
|
+
# 3rd party packages
|
13
|
+
|
14
|
+
# Project packages
|
15
|
+
from . import cmdline
|
16
|
+
|
17
|
+
|
18
|
+
__all__ = ["cmdline"]
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# Copyright 2021 John Harwell, All rights reserved.
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MIT
|
4
|
+
"""
|
5
|
+
Container module for the adhoc execution environment.
|
6
|
+
|
7
|
+
See :ref:`plugins/execenv/hpc/adhoc`.
|
8
|
+
"""
|
9
|
+
|
10
|
+
# Core packages
|
11
|
+
|
12
|
+
# 3rd party packages
|
13
|
+
|
14
|
+
# Project packages
|
15
|
+
|
16
|
+
|
17
|
+
def sierra_plugin_type() -> str:
|
18
|
+
return "pipeline"
|
@@ -0,0 +1,30 @@
|
|
1
|
+
#
|
2
|
+
# Copyright 2025 John Harwell, All rights reserved.
|
3
|
+
#
|
4
|
+
# SPDX-License Identifier: MIT
|
5
|
+
#
|
6
|
+
"""Command line definitions for the :ref:`plugins/execenv/hpc/PBS`."""
|
7
|
+
|
8
|
+
# Core packages
|
9
|
+
import typing as tp
|
10
|
+
import argparse
|
11
|
+
|
12
|
+
# 3rd party packages
|
13
|
+
|
14
|
+
# Project packages
|
15
|
+
from sierra.plugins.execenv import hpc
|
16
|
+
from sierra.core import types
|
17
|
+
from sierra.plugins import PluginCmdline
|
18
|
+
|
19
|
+
|
20
|
+
def build(
|
21
|
+
parents: tp.List[argparse.ArgumentParser], stages: tp.List[int]
|
22
|
+
) -> PluginCmdline:
|
23
|
+
"""
|
24
|
+
Get a cmdline parser supporting the ``hpc.adhoc`` execution environment.
|
25
|
+
"""
|
26
|
+
return hpc.cmdline.HPCCmdline(parents, stages)
|
27
|
+
|
28
|
+
|
29
|
+
def to_cmdopts(args: argparse.Namespace) -> types.Cmdopts:
|
30
|
+
return hpc.cmdline.to_cmdopts(args)
|
@@ -0,0 +1,131 @@
|
|
1
|
+
# Copyright 2020 John Harwell, All rights reserved.
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MIT
|
4
|
+
"""HPC plugin for running experiments with an ad-hoc set of compute nodes.
|
5
|
+
|
6
|
+
E.g., whatever computers you happen to have laying around in the lab.
|
7
|
+
|
8
|
+
"""
|
9
|
+
|
10
|
+
# Core packages
|
11
|
+
import os
|
12
|
+
import typing as tp
|
13
|
+
import argparse
|
14
|
+
import shutil
|
15
|
+
import pathlib
|
16
|
+
|
17
|
+
# 3rd party packages
|
18
|
+
import implements
|
19
|
+
|
20
|
+
# Project packages
|
21
|
+
from sierra.core import types, utils
|
22
|
+
from sierra.core.experiment import bindings
|
23
|
+
|
24
|
+
|
25
|
+
def cmdline_postparse_configure(args: argparse.Namespace) -> argparse.Namespace:
|
26
|
+
"""
|
27
|
+
Configure SIERRA for ad-hoc HPC.
|
28
|
+
|
29
|
+
May use the following environment variables:
|
30
|
+
|
31
|
+
- :envvar:`SIERRA_NODEFILE` - If this is not defined ``--nodefile`` must be
|
32
|
+
passed.
|
33
|
+
|
34
|
+
"""
|
35
|
+
|
36
|
+
if args.nodefile is None:
|
37
|
+
assert "SIERRA_NODEFILE" in os.environ, (
|
38
|
+
"Non-hpc.adhoc environment detected: --nodefile not "
|
39
|
+
"passed and 'SIERRA_NODEFILE' not found"
|
40
|
+
)
|
41
|
+
args.nodefile = os.environ["SIERRA_NODEFILE"]
|
42
|
+
|
43
|
+
assert utils.path_exists(
|
44
|
+
args.nodefile
|
45
|
+
), f"SIERRA_NODEFILE '{args.nodefile}' does not exist"
|
46
|
+
|
47
|
+
assert not args.engine_vc, "Engine visual capture not supported on Adhoc"
|
48
|
+
|
49
|
+
return args
|
50
|
+
|
51
|
+
|
52
|
+
@implements.implements(bindings.IExpShellCmdsGenerator)
|
53
|
+
class ExpShellCmdsGenerator:
|
54
|
+
"""Generate the cmd to invoke GNU Parallel in the ad-hoc HPC environment."""
|
55
|
+
|
56
|
+
def __init__(self, cmdopts: types.Cmdopts, exp_num: int) -> None:
|
57
|
+
self.cmdopts = cmdopts
|
58
|
+
|
59
|
+
def pre_exp_cmds(self) -> tp.List[types.ShellCmdSpec]:
|
60
|
+
shell = shutil.which("bash")
|
61
|
+
|
62
|
+
return [
|
63
|
+
# Since parallel doesn't export any envvars to child processes by
|
64
|
+
# default, we add some common ones.
|
65
|
+
types.ShellCmdSpec(
|
66
|
+
cmd='export PARALLEL="${PARALLEL} --env LD_LIBRARY_PATH --env PYTHONPATH"',
|
67
|
+
shell=True,
|
68
|
+
wait=True,
|
69
|
+
env=True,
|
70
|
+
),
|
71
|
+
# Make sure GNU parallel uses the right shell, because it seems to
|
72
|
+
# defaults to /bin/sh since all cmds are run in a python shell which
|
73
|
+
# does not have $SHELL set.
|
74
|
+
types.ShellCmdSpec(
|
75
|
+
cmd=f"export PARALLEL_SHELL={shell}", shell=True, wait=True, env=True
|
76
|
+
),
|
77
|
+
]
|
78
|
+
|
79
|
+
def post_exp_cmds(self) -> tp.List[types.ShellCmdSpec]:
|
80
|
+
return []
|
81
|
+
|
82
|
+
def exec_exp_cmds(self, exec_opts: types.StrDict) -> tp.List[types.ShellCmdSpec]:
|
83
|
+
jobid = os.getpid()
|
84
|
+
|
85
|
+
# Even if we are passed --nodelist, we still make our own copy of it, so
|
86
|
+
# that the user can safely modify it (if they want to) after running
|
87
|
+
# stage 1.
|
88
|
+
nodelist = pathlib.Path(exec_opts["exp_input_root"], f"{jobid}-nodelist.txt")
|
89
|
+
|
90
|
+
resume = ""
|
91
|
+
# This can't be --resume, because then GNU parallel looks at the results
|
92
|
+
# directory, and if there is stuff in it, (apparently) assumes that the
|
93
|
+
# job finished...
|
94
|
+
if exec_opts["exec_resume"]:
|
95
|
+
resume = "--resume-failed"
|
96
|
+
|
97
|
+
# Make sure there are no duplicate nodes
|
98
|
+
unique_nodes = types.ShellCmdSpec(
|
99
|
+
cmd="sort -u {0} > {1}".format(exec_opts["nodefile"], nodelist),
|
100
|
+
shell=True,
|
101
|
+
wait=True,
|
102
|
+
)
|
103
|
+
# GNU parallel cmd
|
104
|
+
parallel = (
|
105
|
+
"env && parallel {2} "
|
106
|
+
"--jobs {1} "
|
107
|
+
"--results {4} "
|
108
|
+
"--joblog {3} "
|
109
|
+
"--sshloginfile {0} "
|
110
|
+
'--workdir {4} < "{5}"'
|
111
|
+
)
|
112
|
+
|
113
|
+
log = pathlib.Path(exec_opts["exp_scratch_root"], "parallel.log")
|
114
|
+
parallel = parallel.format(
|
115
|
+
nodelist,
|
116
|
+
exec_opts["n_jobs"],
|
117
|
+
resume,
|
118
|
+
log,
|
119
|
+
exec_opts["exp_scratch_root"],
|
120
|
+
exec_opts["cmdfile_stem_path"] + exec_opts["cmdfile_ext"],
|
121
|
+
)
|
122
|
+
|
123
|
+
parallel_spec = types.ShellCmdSpec(cmd=parallel, shell=True, wait=True)
|
124
|
+
|
125
|
+
return [unique_nodes, parallel_spec]
|
126
|
+
|
127
|
+
|
128
|
+
__all__ = [
|
129
|
+
"cmdline_postparse_configure",
|
130
|
+
"ExpShellCmdsGenerator",
|
131
|
+
]
|
@@ -0,0 +1,137 @@
|
|
1
|
+
# Copyright 2020 John Harwell, All rights reserved.
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MIT
|
4
|
+
"""
|
5
|
+
Common cmdline classes for the various HPC plugins.
|
6
|
+
"""
|
7
|
+
|
8
|
+
# Core packages
|
9
|
+
import argparse
|
10
|
+
import typing as tp
|
11
|
+
|
12
|
+
# 3rd party packages
|
13
|
+
|
14
|
+
# Project packages
|
15
|
+
from sierra.core import types
|
16
|
+
from sierra.plugins import PluginCmdline
|
17
|
+
|
18
|
+
|
19
|
+
class HPCCmdline(PluginCmdline):
|
20
|
+
def __init__(
|
21
|
+
self, parents: tp.List[argparse.ArgumentParser], stages: tp.List[int]
|
22
|
+
) -> None:
|
23
|
+
super().__init__(parents, stages)
|
24
|
+
|
25
|
+
def init_stage2(self) -> None:
|
26
|
+
"""Add HPC cmdline options.
|
27
|
+
|
28
|
+
Options may be interpreted differently between :term:`Engines
|
29
|
+
<Engine>`, or ignored, depending. These include:
|
30
|
+
|
31
|
+
- ``--exec-jobs-per-node``
|
32
|
+
|
33
|
+
- ``--exec-no-devnull``
|
34
|
+
|
35
|
+
- ``--exec-resume``
|
36
|
+
|
37
|
+
- ``--exec-strict``
|
38
|
+
|
39
|
+
"""
|
40
|
+
self.stage2.add_argument(
|
41
|
+
"--exec-jobs-per-node",
|
42
|
+
help="""
|
43
|
+
|
44
|
+
Specify the maximum number of parallel jobs to run
|
45
|
+
per allocated node. By default this is computed
|
46
|
+
from the selected HPC environment for maximum
|
47
|
+
throughput given the desired ``--n-runs`` and CPUs
|
48
|
+
per allocated node. However, for some environments
|
49
|
+
being able to override the computed default can be
|
50
|
+
useful.
|
51
|
+
|
52
|
+
"""
|
53
|
+
+ self.stage_usage_doc([2]),
|
54
|
+
type=int,
|
55
|
+
default=None,
|
56
|
+
)
|
57
|
+
|
58
|
+
self.stage2.add_argument(
|
59
|
+
"--exec-devnull",
|
60
|
+
help="""
|
61
|
+
|
62
|
+
Redirect ALL output from simulations to
|
63
|
+
/dev/null. Useful for engine where you can't
|
64
|
+
disable all INFO messages at compile time, and
|
65
|
+
don't want to have to grep through lots of
|
66
|
+
redundant stdout files to see if there were any
|
67
|
+
errors.
|
68
|
+
|
69
|
+
"""
|
70
|
+
+ self.stage_usage_doc([1, 2]),
|
71
|
+
action="store_true",
|
72
|
+
dest="exec_devnull",
|
73
|
+
default=True,
|
74
|
+
)
|
75
|
+
|
76
|
+
self.stage2.add_argument(
|
77
|
+
"--exec-no-devnull",
|
78
|
+
help="""
|
79
|
+
|
80
|
+
Don't redirect ALL output from simulations to
|
81
|
+
/dev/null. Useful for engines where you can't
|
82
|
+
disable all INFO messages at compile time, and
|
83
|
+
don't want to have to grep through lots of
|
84
|
+
redundant stdout files to see if there were any
|
85
|
+
errors.
|
86
|
+
|
87
|
+
"""
|
88
|
+
+ self.stage_usage_doc([1, 2]),
|
89
|
+
action="store_false",
|
90
|
+
dest="exec_devnull",
|
91
|
+
)
|
92
|
+
|
93
|
+
self.stage2.add_argument(
|
94
|
+
"--exec-resume",
|
95
|
+
help="""
|
96
|
+
Resume a batch experiment that was killed/stopped/etc last time
|
97
|
+
SIERRA was run.
|
98
|
+
"""
|
99
|
+
+ self.stage_usage_doc([2]),
|
100
|
+
action="store_true",
|
101
|
+
default=False,
|
102
|
+
)
|
103
|
+
|
104
|
+
self.stage2.add_argument(
|
105
|
+
"--exec-strict",
|
106
|
+
help="""
|
107
|
+
If passed, then if any experimental commands fail during stage
|
108
|
+
2 SIERRA will exit, rather than try to keep going and execute
|
109
|
+
the rest of the experiments.
|
110
|
+
|
111
|
+
Useful for:
|
112
|
+
|
113
|
+
- "Correctness by construction" experiments, where you know
|
114
|
+
if SIERRA doesn't crash and it makes it to the end of
|
115
|
+
your batch experiment then none of the individual
|
116
|
+
experiments crashed.
|
117
|
+
|
118
|
+
- CI pipelines.
|
119
|
+
""",
|
120
|
+
action="store_true",
|
121
|
+
)
|
122
|
+
|
123
|
+
|
124
|
+
def to_cmdopts(args: argparse.Namespace) -> types.Cmdopts:
|
125
|
+
"""Update cmdopts dictionary with the HPC-specific cmdline options."""
|
126
|
+
return {
|
127
|
+
# Multistage
|
128
|
+
"exec_devnull": args.exec_devnull,
|
129
|
+
"exec_jobs_per_node": args.exec_jobs_per_node,
|
130
|
+
"exec_resume": args.exec_resume,
|
131
|
+
"exec_strict": args.exec_strict,
|
132
|
+
}
|
133
|
+
|
134
|
+
|
135
|
+
__all__ = [
|
136
|
+
"HPCCmdline",
|
137
|
+
]
|
@@ -0,0 +1,18 @@
|
|
1
|
+
# Copyright 2021 John Harwell, All rights reserved.
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MIT
|
4
|
+
"""
|
5
|
+
Container module for the local execution environment.
|
6
|
+
|
7
|
+
See :ref:`plugins/execenv/hpc/local`.
|
8
|
+
"""
|
9
|
+
|
10
|
+
# Core packages
|
11
|
+
|
12
|
+
# 3rd party packages
|
13
|
+
|
14
|
+
# Project packages
|
15
|
+
|
16
|
+
|
17
|
+
def sierra_plugin_type() -> str:
|
18
|
+
return "pipeline"
|
@@ -0,0 +1,31 @@
|
|
1
|
+
#
|
2
|
+
# Copyright 2025 John Harwell, All rights reserved.
|
3
|
+
#
|
4
|
+
# SPDX-License Identifier: MIT
|
5
|
+
#
|
6
|
+
"""
|
7
|
+
Command line definitions for the :ref:`plugins/execenv/hpc/local`.
|
8
|
+
"""
|
9
|
+
# Core packages
|
10
|
+
import typing as tp
|
11
|
+
import argparse
|
12
|
+
|
13
|
+
# 3rd party packages
|
14
|
+
|
15
|
+
# Project packages
|
16
|
+
from sierra.plugins.execenv import hpc
|
17
|
+
from sierra.core import types
|
18
|
+
from sierra.plugins import PluginCmdline
|
19
|
+
|
20
|
+
|
21
|
+
def build(
|
22
|
+
parents: tp.List[argparse.ArgumentParser], stages: tp.List[int]
|
23
|
+
) -> PluginCmdline:
|
24
|
+
"""
|
25
|
+
Get a cmdline parser supporting the ``hpc.local`` execution environment.
|
26
|
+
"""
|
27
|
+
return hpc.cmdline.HPCCmdline(parents, stages)
|
28
|
+
|
29
|
+
|
30
|
+
def to_cmdopts(args: argparse.Namespace) -> types.Cmdopts:
|
31
|
+
return hpc.cmdline.to_cmdopts(args)
|
@@ -0,0 +1,145 @@
|
|
1
|
+
# Copyright 2020 John Harwell, All rights reserved.
|
2
|
+
#
|
3
|
+
# SPDX-License-Identifier: MIT
|
4
|
+
"""HPC plugin for running SIERRA locally.
|
5
|
+
|
6
|
+
Not necessarily HPC, but it fits well enough under that semantic umbrella.
|
7
|
+
|
8
|
+
"""
|
9
|
+
|
10
|
+
# Core packages
|
11
|
+
import typing as tp
|
12
|
+
import shutil
|
13
|
+
import pathlib
|
14
|
+
|
15
|
+
# 3rd party packages
|
16
|
+
import implements
|
17
|
+
|
18
|
+
# Project packages
|
19
|
+
from sierra.core import types
|
20
|
+
from sierra.core.experiment import bindings
|
21
|
+
|
22
|
+
|
23
|
+
@implements.implements(bindings.IExpShellCmdsGenerator)
|
24
|
+
class ExpShellCmdsGenerator:
|
25
|
+
"""
|
26
|
+
Generate the commands for local HPC (experiment-level parallelism).
|
27
|
+
"""
|
28
|
+
|
29
|
+
def __init__(self, cmdopts: types.Cmdopts, exp_num: int) -> None:
|
30
|
+
self.cmdopts = cmdopts
|
31
|
+
|
32
|
+
def pre_exp_cmds(self) -> tp.List[types.ShellCmdSpec]:
|
33
|
+
shell = shutil.which("bash")
|
34
|
+
|
35
|
+
return [
|
36
|
+
# Since parallel doesn't export any envvars to child processes by
|
37
|
+
# default, we add some common ones.
|
38
|
+
types.ShellCmdSpec(
|
39
|
+
cmd='export PARALLEL="${PARALLEL} --env LD_LIBRARY_PATH --env PYTHONPATH"',
|
40
|
+
shell=True,
|
41
|
+
wait=True,
|
42
|
+
env=True,
|
43
|
+
),
|
44
|
+
# Make sure GNU parallel uses the right shell, because it seems to
|
45
|
+
# defaults to /bin/sh since all cmds are run in a python shell which
|
46
|
+
# does not have $SHELL set.
|
47
|
+
types.ShellCmdSpec(
|
48
|
+
cmd=f"export PARALLEL_SHELL={shell}", shell=True, wait=True, env=True
|
49
|
+
),
|
50
|
+
]
|
51
|
+
|
52
|
+
def post_exp_cmds(self) -> tp.List[types.ShellCmdSpec]:
|
53
|
+
return []
|
54
|
+
|
55
|
+
def exec_exp_cmds(self, exec_opts: types.StrDict) -> tp.List[types.ShellCmdSpec]:
|
56
|
+
resume = ""
|
57
|
+
|
58
|
+
# This can't be --resume, because then GNU parallel looks at the results
|
59
|
+
# directory, and if there is stuff in it, (apparently) assumes that the
|
60
|
+
# job finished...
|
61
|
+
if exec_opts["exec_resume"]:
|
62
|
+
resume = "--resume-failed"
|
63
|
+
|
64
|
+
parallel = (
|
65
|
+
"parallel {1} "
|
66
|
+
"--jobs {2} "
|
67
|
+
"--results {0} "
|
68
|
+
"--joblog {3} "
|
69
|
+
'--no-notice < "{4}"'
|
70
|
+
)
|
71
|
+
|
72
|
+
log = pathlib.Path(exec_opts["exp_scratch_root"], "parallel.log")
|
73
|
+
parallel = parallel.format(
|
74
|
+
exec_opts["exp_scratch_root"],
|
75
|
+
resume,
|
76
|
+
exec_opts["n_jobs"],
|
77
|
+
log,
|
78
|
+
exec_opts["cmdfile_stem_path"] + exec_opts["cmdfile_ext"],
|
79
|
+
)
|
80
|
+
|
81
|
+
return [types.ShellCmdSpec(cmd=parallel, shell=True, wait=True)]
|
82
|
+
|
83
|
+
|
84
|
+
@implements.implements(bindings.IBatchShellCmdsGenerator)
|
85
|
+
class BatchShellCmdsGenerator:
|
86
|
+
"""
|
87
|
+
Generate the commands for local HPC (batch-level parallelism).
|
88
|
+
"""
|
89
|
+
|
90
|
+
def __init__(self, cmdopts: types.Cmdopts) -> None:
|
91
|
+
self.cmdopts = cmdopts
|
92
|
+
|
93
|
+
def pre_batch_cmds(self) -> tp.List[types.ShellCmdSpec]:
|
94
|
+
shell = shutil.which("bash")
|
95
|
+
|
96
|
+
return [
|
97
|
+
# Since parallel doesn't export any envvars to child processes by
|
98
|
+
# default, we add some common ones.
|
99
|
+
types.ShellCmdSpec(
|
100
|
+
cmd='export PARALLEL="${PARALLEL} --env LD_LIBRARY_PATH"',
|
101
|
+
shell=True,
|
102
|
+
wait=True,
|
103
|
+
env=True,
|
104
|
+
),
|
105
|
+
# Make sure GNU parallel uses the right shell, because it seems to
|
106
|
+
# defaults to /bin/sh since all cmds are run in a python shell which
|
107
|
+
# does not have $SHELL set.
|
108
|
+
types.ShellCmdSpec(
|
109
|
+
cmd=f"export PARALLEL_SHELL={shell}", shell=True, wait=True, env=True
|
110
|
+
),
|
111
|
+
]
|
112
|
+
|
113
|
+
def post_batch_cmds(self) -> tp.List[types.ShellCmdSpec]:
|
114
|
+
return []
|
115
|
+
|
116
|
+
def exec_batch_cmds(self, exec_opts: types.StrDict) -> tp.List[types.ShellCmdSpec]:
|
117
|
+
resume = ""
|
118
|
+
|
119
|
+
# This can't be --resume, because then GNU parallel looks at the results
|
120
|
+
# directory, and if there is stuff in it, (apparently) assumes that the
|
121
|
+
# job finished...
|
122
|
+
if exec_opts["exec_resume"]:
|
123
|
+
resume = "--resume-failed"
|
124
|
+
|
125
|
+
parallel = (
|
126
|
+
"env && parallel {1} "
|
127
|
+
"--jobs {2} "
|
128
|
+
"--results {0} "
|
129
|
+
"--joblog {3} "
|
130
|
+
'--no-notice < "{4}"'
|
131
|
+
)
|
132
|
+
|
133
|
+
log = pathlib.Path(exec_opts["batch_scratch_root"], "parallel.log")
|
134
|
+
parallel = parallel.format(
|
135
|
+
exec_opts["batch_scratch_root"],
|
136
|
+
resume,
|
137
|
+
exec_opts["n_jobs"],
|
138
|
+
log,
|
139
|
+
exec_opts["cmdfile_stem_path"] + exec_opts["cmdfile_ext"],
|
140
|
+
)
|
141
|
+
|
142
|
+
return [types.ShellCmdSpec(cmd=parallel, shell=True, wait=True)]
|
143
|
+
|
144
|
+
|
145
|
+
__all__ = ["ExpShellCmdsGenerator", "BatchShellCmdsGenerator"]
|