hpcflow-new2 0.2.0a189__py3-none-any.whl → 0.2.0a199__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hpcflow/__pyinstaller/hook-hpcflow.py +9 -6
- hpcflow/_version.py +1 -1
- hpcflow/app.py +1 -0
- hpcflow/data/scripts/bad_script.py +2 -0
- hpcflow/data/scripts/do_nothing.py +2 -0
- hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
- hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/input_file_generator_basic.py +3 -0
- hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
- hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_hdf5_in_obj.py +1 -1
- hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
- hpcflow/data/scripts/main_script_test_hdf5_out_obj.py +1 -1
- hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
- hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
- hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
- hpcflow/data/scripts/output_file_parser_basic.py +3 -0
- hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
- hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/script_exit_test.py +5 -0
- hpcflow/data/template_components/environments.yaml +1 -1
- hpcflow/sdk/__init__.py +26 -15
- hpcflow/sdk/app.py +2192 -768
- hpcflow/sdk/cli.py +506 -296
- hpcflow/sdk/cli_common.py +105 -7
- hpcflow/sdk/config/__init__.py +1 -1
- hpcflow/sdk/config/callbacks.py +115 -43
- hpcflow/sdk/config/cli.py +126 -103
- hpcflow/sdk/config/config.py +674 -318
- hpcflow/sdk/config/config_file.py +131 -95
- hpcflow/sdk/config/errors.py +125 -84
- hpcflow/sdk/config/types.py +148 -0
- hpcflow/sdk/core/__init__.py +25 -1
- hpcflow/sdk/core/actions.py +1771 -1059
- hpcflow/sdk/core/app_aware.py +24 -0
- hpcflow/sdk/core/cache.py +139 -79
- hpcflow/sdk/core/command_files.py +263 -287
- hpcflow/sdk/core/commands.py +145 -112
- hpcflow/sdk/core/element.py +828 -535
- hpcflow/sdk/core/enums.py +192 -0
- hpcflow/sdk/core/environment.py +74 -93
- hpcflow/sdk/core/errors.py +455 -52
- hpcflow/sdk/core/execute.py +207 -0
- hpcflow/sdk/core/json_like.py +540 -272
- hpcflow/sdk/core/loop.py +751 -347
- hpcflow/sdk/core/loop_cache.py +164 -47
- hpcflow/sdk/core/object_list.py +370 -207
- hpcflow/sdk/core/parameters.py +1100 -627
- hpcflow/sdk/core/rule.py +59 -41
- hpcflow/sdk/core/run_dir_files.py +21 -37
- hpcflow/sdk/core/skip_reason.py +7 -0
- hpcflow/sdk/core/task.py +1649 -1339
- hpcflow/sdk/core/task_schema.py +308 -196
- hpcflow/sdk/core/test_utils.py +191 -114
- hpcflow/sdk/core/types.py +440 -0
- hpcflow/sdk/core/utils.py +485 -309
- hpcflow/sdk/core/validation.py +82 -9
- hpcflow/sdk/core/workflow.py +2544 -1178
- hpcflow/sdk/core/zarr_io.py +98 -137
- hpcflow/sdk/data/workflow_spec_schema.yaml +2 -0
- hpcflow/sdk/demo/cli.py +53 -33
- hpcflow/sdk/helper/cli.py +18 -15
- hpcflow/sdk/helper/helper.py +75 -63
- hpcflow/sdk/helper/watcher.py +61 -28
- hpcflow/sdk/log.py +122 -71
- hpcflow/sdk/persistence/__init__.py +8 -31
- hpcflow/sdk/persistence/base.py +1360 -606
- hpcflow/sdk/persistence/defaults.py +6 -0
- hpcflow/sdk/persistence/discovery.py +38 -0
- hpcflow/sdk/persistence/json.py +568 -188
- hpcflow/sdk/persistence/pending.py +382 -179
- hpcflow/sdk/persistence/store_resource.py +39 -23
- hpcflow/sdk/persistence/types.py +318 -0
- hpcflow/sdk/persistence/utils.py +14 -11
- hpcflow/sdk/persistence/zarr.py +1337 -433
- hpcflow/sdk/runtime.py +44 -41
- hpcflow/sdk/submission/{jobscript_info.py → enums.py} +39 -12
- hpcflow/sdk/submission/jobscript.py +1651 -692
- hpcflow/sdk/submission/schedulers/__init__.py +167 -39
- hpcflow/sdk/submission/schedulers/direct.py +121 -81
- hpcflow/sdk/submission/schedulers/sge.py +170 -129
- hpcflow/sdk/submission/schedulers/slurm.py +291 -268
- hpcflow/sdk/submission/schedulers/utils.py +12 -2
- hpcflow/sdk/submission/shells/__init__.py +14 -15
- hpcflow/sdk/submission/shells/base.py +150 -29
- hpcflow/sdk/submission/shells/bash.py +283 -173
- hpcflow/sdk/submission/shells/os_version.py +31 -30
- hpcflow/sdk/submission/shells/powershell.py +228 -170
- hpcflow/sdk/submission/submission.py +1014 -335
- hpcflow/sdk/submission/types.py +140 -0
- hpcflow/sdk/typing.py +182 -12
- hpcflow/sdk/utils/arrays.py +71 -0
- hpcflow/sdk/utils/deferred_file.py +55 -0
- hpcflow/sdk/utils/hashing.py +16 -0
- hpcflow/sdk/utils/patches.py +12 -0
- hpcflow/sdk/utils/strings.py +33 -0
- hpcflow/tests/api/test_api.py +32 -0
- hpcflow/tests/conftest.py +27 -6
- hpcflow/tests/data/multi_path_sequences.yaml +29 -0
- hpcflow/tests/data/workflow_test_run_abort.yaml +34 -35
- hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
- hpcflow/tests/schedulers/slurm/test_slurm_submission.py +5 -2
- hpcflow/tests/scripts/test_input_file_generators.py +282 -0
- hpcflow/tests/scripts/test_main_scripts.py +866 -85
- hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
- hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
- hpcflow/tests/shells/wsl/test_wsl_submission.py +12 -4
- hpcflow/tests/unit/test_action.py +262 -75
- hpcflow/tests/unit/test_action_rule.py +9 -4
- hpcflow/tests/unit/test_app.py +33 -6
- hpcflow/tests/unit/test_cache.py +46 -0
- hpcflow/tests/unit/test_cli.py +134 -1
- hpcflow/tests/unit/test_command.py +71 -54
- hpcflow/tests/unit/test_config.py +142 -16
- hpcflow/tests/unit/test_config_file.py +21 -18
- hpcflow/tests/unit/test_element.py +58 -62
- hpcflow/tests/unit/test_element_iteration.py +50 -1
- hpcflow/tests/unit/test_element_set.py +29 -19
- hpcflow/tests/unit/test_group.py +4 -2
- hpcflow/tests/unit/test_input_source.py +116 -93
- hpcflow/tests/unit/test_input_value.py +29 -24
- hpcflow/tests/unit/test_jobscript_unit.py +757 -0
- hpcflow/tests/unit/test_json_like.py +44 -35
- hpcflow/tests/unit/test_loop.py +1396 -84
- hpcflow/tests/unit/test_meta_task.py +325 -0
- hpcflow/tests/unit/test_multi_path_sequences.py +229 -0
- hpcflow/tests/unit/test_object_list.py +17 -12
- hpcflow/tests/unit/test_parameter.py +29 -7
- hpcflow/tests/unit/test_persistence.py +237 -42
- hpcflow/tests/unit/test_resources.py +20 -18
- hpcflow/tests/unit/test_run.py +117 -6
- hpcflow/tests/unit/test_run_directories.py +29 -0
- hpcflow/tests/unit/test_runtime.py +2 -1
- hpcflow/tests/unit/test_schema_input.py +23 -15
- hpcflow/tests/unit/test_shell.py +23 -2
- hpcflow/tests/unit/test_slurm.py +8 -7
- hpcflow/tests/unit/test_submission.py +38 -89
- hpcflow/tests/unit/test_task.py +352 -247
- hpcflow/tests/unit/test_task_schema.py +33 -20
- hpcflow/tests/unit/test_utils.py +9 -11
- hpcflow/tests/unit/test_value_sequence.py +15 -12
- hpcflow/tests/unit/test_workflow.py +114 -83
- hpcflow/tests/unit/test_workflow_template.py +0 -1
- hpcflow/tests/unit/utils/test_arrays.py +40 -0
- hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
- hpcflow/tests/unit/utils/test_hashing.py +65 -0
- hpcflow/tests/unit/utils/test_patches.py +5 -0
- hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
- hpcflow/tests/workflows/__init__.py +0 -0
- hpcflow/tests/workflows/test_directory_structure.py +31 -0
- hpcflow/tests/workflows/test_jobscript.py +334 -1
- hpcflow/tests/workflows/test_run_status.py +198 -0
- hpcflow/tests/workflows/test_skip_downstream.py +696 -0
- hpcflow/tests/workflows/test_submission.py +140 -0
- hpcflow/tests/workflows/test_workflows.py +160 -15
- hpcflow/tests/workflows/test_zip.py +18 -0
- hpcflow/viz_demo.ipynb +6587 -3
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/METADATA +8 -4
- hpcflow_new2-0.2.0a199.dist-info/RECORD +221 -0
- hpcflow/sdk/core/parallel.py +0 -21
- hpcflow_new2-0.2.0a189.dist-info/RECORD +0 -158
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/LICENSE +0 -0
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/WHEEL +0 -0
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/entry_points.txt +0 -0
@@ -1,3 +1,7 @@
|
|
1
|
+
import json
|
2
|
+
import os
|
3
|
+
from pathlib import Path
|
4
|
+
import shutil
|
1
5
|
import time
|
2
6
|
import pytest
|
3
7
|
|
@@ -10,7 +14,8 @@ from hpcflow.sdk.core.test_utils import P1_parameter_cls as P1
|
|
10
14
|
|
11
15
|
@pytest.mark.integration
|
12
16
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
13
|
-
|
17
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
18
|
+
def test_script_direct_in_direct_out(null_config, tmp_path: Path, combine_scripts: bool):
|
14
19
|
s1 = hf.TaskSchema(
|
15
20
|
objective="t1",
|
16
21
|
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
@@ -28,18 +33,24 @@ def test_script_direct_in_direct_out(null_config, tmp_path):
|
|
28
33
|
p1_val = 101
|
29
34
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
30
35
|
wk = hf.Workflow.from_template_data(
|
31
|
-
tasks=[t1],
|
36
|
+
tasks=[t1],
|
37
|
+
template_name="main_script_test",
|
38
|
+
path=tmp_path,
|
39
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
32
40
|
)
|
33
41
|
wk.submit(wait=True, add_to_known=False)
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
assert
|
42
|
+
|
43
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
44
|
+
assert isinstance(p2, hf.ElementParameter)
|
45
|
+
assert p2.value == p1_val + 100
|
38
46
|
|
39
47
|
|
40
48
|
@pytest.mark.integration
|
41
49
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
42
|
-
|
50
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
51
|
+
def test_script_direct_sub_param_in_direct_out(
|
52
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
53
|
+
):
|
43
54
|
s1 = hf.TaskSchema(
|
44
55
|
objective="t1",
|
45
56
|
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
@@ -57,18 +68,24 @@ def test_script_direct_sub_param_in_direct_out(null_config, tmp_path):
|
|
57
68
|
p1_val = {"a": 101}
|
58
69
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
59
70
|
wk = hf.Workflow.from_template_data(
|
60
|
-
tasks=[t1],
|
71
|
+
tasks=[t1],
|
72
|
+
template_name="main_script_test",
|
73
|
+
path=tmp_path,
|
74
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
61
75
|
)
|
62
76
|
wk.submit(wait=True, add_to_known=False)
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
assert
|
77
|
+
|
78
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
79
|
+
assert isinstance(p2, hf.ElementParameter)
|
80
|
+
assert p2.value == p1_val["a"] + 100
|
67
81
|
|
68
82
|
|
69
83
|
@pytest.mark.integration
|
70
84
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
71
|
-
|
85
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
86
|
+
def test_script_direct_in_direct_out_single_label(
|
87
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
88
|
+
):
|
72
89
|
"""This uses the same test script as the `test_script_direct_in_direct_out` test;
|
73
90
|
single labels are trivial and need not be referenced in the script."""
|
74
91
|
p1_label = "one"
|
@@ -89,18 +106,24 @@ def test_script_direct_in_direct_out_single_label(null_config, tmp_path):
|
|
89
106
|
p1_val = 101
|
90
107
|
t1 = hf.Task(schema=s1, inputs={f"p1[{p1_label}]": p1_val})
|
91
108
|
wk = hf.Workflow.from_template_data(
|
92
|
-
tasks=[t1],
|
109
|
+
tasks=[t1],
|
110
|
+
template_name="main_script_test",
|
111
|
+
path=tmp_path,
|
112
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
93
113
|
)
|
94
114
|
wk.submit(wait=True, add_to_known=False)
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
assert
|
115
|
+
|
116
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
117
|
+
assert isinstance(p2, hf.ElementParameter)
|
118
|
+
assert p2.value == p1_val + 100
|
99
119
|
|
100
120
|
|
101
121
|
@pytest.mark.integration
|
102
122
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
103
|
-
|
123
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
124
|
+
def test_script_direct_in_direct_out_labels(
|
125
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
126
|
+
):
|
104
127
|
p1_label_1 = "one"
|
105
128
|
p1_label_2 = "two"
|
106
129
|
s1 = hf.TaskSchema(
|
@@ -133,18 +156,22 @@ def test_script_direct_in_direct_out_labels(null_config, tmp_path):
|
|
133
156
|
},
|
134
157
|
)
|
135
158
|
wk = hf.Workflow.from_template_data(
|
136
|
-
tasks=[t1],
|
159
|
+
tasks=[t1],
|
160
|
+
template_name="main_script_test",
|
161
|
+
path=tmp_path,
|
162
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
137
163
|
)
|
138
164
|
wk.submit(wait=True, add_to_known=False)
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
assert
|
165
|
+
|
166
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
167
|
+
assert isinstance(p2, hf.ElementParameter)
|
168
|
+
assert p2.value == p1_1_val + p1_2_val
|
143
169
|
|
144
170
|
|
145
171
|
@pytest.mark.integration
|
146
172
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
147
|
-
|
173
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
174
|
+
def test_script_json_in_json_out(null_config, tmp_path: Path, combine_scripts: bool):
|
148
175
|
s1 = hf.TaskSchema(
|
149
176
|
objective="t1",
|
150
177
|
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
@@ -156,24 +183,31 @@ def test_script_json_in_json_out(null_config, tmp_path):
|
|
156
183
|
script_data_out="json",
|
157
184
|
script_exe="python_script",
|
158
185
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
186
|
+
requires_dir=True,
|
159
187
|
)
|
160
188
|
],
|
161
189
|
)
|
162
190
|
p1_val = 101
|
163
191
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
164
192
|
wk = hf.Workflow.from_template_data(
|
165
|
-
tasks=[t1],
|
193
|
+
tasks=[t1],
|
194
|
+
template_name="main_script_test",
|
195
|
+
path=tmp_path,
|
196
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
166
197
|
)
|
167
198
|
wk.submit(wait=True, add_to_known=False)
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
assert
|
199
|
+
|
200
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
201
|
+
assert isinstance(p2, hf.ElementParameter)
|
202
|
+
assert p2.value == p1_val + 100
|
172
203
|
|
173
204
|
|
174
205
|
@pytest.mark.integration
|
175
206
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
176
|
-
|
207
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
208
|
+
def test_script_json_in_json_out_labels(
|
209
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
210
|
+
):
|
177
211
|
p1_label_1 = "one"
|
178
212
|
p1_label_2 = "two"
|
179
213
|
s1 = hf.TaskSchema(
|
@@ -193,6 +227,7 @@ def test_script_json_in_json_out_labels(null_config, tmp_path):
|
|
193
227
|
script_data_out="json",
|
194
228
|
script_exe="python_script",
|
195
229
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
230
|
+
requires_dir=True,
|
196
231
|
)
|
197
232
|
],
|
198
233
|
)
|
@@ -206,18 +241,24 @@ def test_script_json_in_json_out_labels(null_config, tmp_path):
|
|
206
241
|
},
|
207
242
|
)
|
208
243
|
wk = hf.Workflow.from_template_data(
|
209
|
-
tasks=[t1],
|
244
|
+
tasks=[t1],
|
245
|
+
template_name="main_script_test",
|
246
|
+
path=tmp_path,
|
247
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
210
248
|
)
|
211
249
|
wk.submit(wait=True, add_to_known=False)
|
212
|
-
|
213
|
-
|
214
|
-
|
215
|
-
assert
|
250
|
+
|
251
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
252
|
+
assert isinstance(p2, hf.ElementParameter)
|
253
|
+
assert p2.value == p1_1_val + p1_2_val
|
216
254
|
|
217
255
|
|
218
256
|
@pytest.mark.integration
|
219
257
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
220
|
-
|
258
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
259
|
+
def test_script_json_sub_param_in_json_out_labels(
|
260
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
261
|
+
):
|
221
262
|
p1_label_1 = "one"
|
222
263
|
p1_label_2 = "two"
|
223
264
|
s1 = hf.TaskSchema(
|
@@ -237,6 +278,7 @@ def test_script_json_sub_param_in_json_out_labels(null_config, tmp_path):
|
|
237
278
|
script_data_out="json",
|
238
279
|
script_exe="python_script",
|
239
280
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
281
|
+
requires_dir=True,
|
240
282
|
)
|
241
283
|
],
|
242
284
|
)
|
@@ -250,18 +292,24 @@ def test_script_json_sub_param_in_json_out_labels(null_config, tmp_path):
|
|
250
292
|
},
|
251
293
|
)
|
252
294
|
wk = hf.Workflow.from_template_data(
|
253
|
-
tasks=[t1],
|
295
|
+
tasks=[t1],
|
296
|
+
template_name="main_script_test",
|
297
|
+
path=tmp_path,
|
298
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
254
299
|
)
|
255
300
|
wk.submit(wait=True, add_to_known=False)
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
assert
|
301
|
+
|
302
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
303
|
+
assert isinstance(p2, hf.ElementParameter)
|
304
|
+
assert p2.value == a_val + p1_2_val
|
260
305
|
|
261
306
|
|
262
307
|
@pytest.mark.integration
|
263
308
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
264
|
-
|
309
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
310
|
+
def test_script_json_and_direct_in_json_out(
|
311
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
312
|
+
):
|
265
313
|
s1 = hf.TaskSchema(
|
266
314
|
objective="t1",
|
267
315
|
inputs=[
|
@@ -276,6 +324,7 @@ def test_script_json_and_direct_in_json_out(null_config, tmp_path):
|
|
276
324
|
script_data_out="json",
|
277
325
|
script_exe="python_script",
|
278
326
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
327
|
+
requires_dir=True,
|
279
328
|
)
|
280
329
|
],
|
281
330
|
)
|
@@ -283,18 +332,24 @@ def test_script_json_and_direct_in_json_out(null_config, tmp_path):
|
|
283
332
|
p2_val = 201
|
284
333
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val, "p2": p2_val})
|
285
334
|
wk = hf.Workflow.from_template_data(
|
286
|
-
tasks=[t1],
|
335
|
+
tasks=[t1],
|
336
|
+
template_name="main_script_test",
|
337
|
+
path=tmp_path,
|
338
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
287
339
|
)
|
288
340
|
wk.submit(wait=True, add_to_known=False)
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
assert
|
341
|
+
|
342
|
+
p3 = wk.tasks[0].elements[0].outputs.p3
|
343
|
+
assert isinstance(p3, hf.ElementParameter)
|
344
|
+
assert p3.value == p1_val + p2_val
|
293
345
|
|
294
346
|
|
295
347
|
@pytest.mark.integration
|
296
348
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
297
|
-
|
349
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
350
|
+
def test_script_json_in_json_and_direct_out(
|
351
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
352
|
+
):
|
298
353
|
s1 = hf.TaskSchema(
|
299
354
|
objective="t1",
|
300
355
|
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
@@ -309,25 +364,32 @@ def test_script_json_in_json_and_direct_out(null_config, tmp_path):
|
|
309
364
|
script_data_out={"p2": "json", "p3": "direct"},
|
310
365
|
script_exe="python_script",
|
311
366
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
367
|
+
requires_dir=True,
|
312
368
|
)
|
313
369
|
],
|
314
370
|
)
|
315
371
|
p1_val = 101
|
316
372
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
317
373
|
wk = hf.Workflow.from_template_data(
|
318
|
-
tasks=[t1],
|
374
|
+
tasks=[t1],
|
375
|
+
template_name="main_script_test",
|
376
|
+
path=tmp_path,
|
377
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
319
378
|
)
|
320
379
|
wk.submit(wait=True, add_to_known=False)
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
assert
|
380
|
+
|
381
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
382
|
+
assert isinstance(p2, hf.ElementParameter)
|
383
|
+
p3 = wk.tasks[0].elements[0].outputs.p3
|
384
|
+
assert isinstance(p3, hf.ElementParameter)
|
385
|
+
assert p2.value == p1_val + 100
|
386
|
+
assert p3.value == p1_val + 200
|
326
387
|
|
327
388
|
|
328
389
|
@pytest.mark.integration
|
329
390
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
330
|
-
|
391
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
392
|
+
def test_script_json_in_obj(null_config, tmp_path: Path, combine_scripts: bool):
|
331
393
|
"""Use a custom JSON dumper defined in the P1 class."""
|
332
394
|
s1 = hf.TaskSchema(
|
333
395
|
objective="t1",
|
@@ -340,6 +402,7 @@ def test_script_json_in_obj(null_config, tmp_path):
|
|
340
402
|
script_data_out="direct",
|
341
403
|
script_exe="python_script",
|
342
404
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
405
|
+
requires_dir=True,
|
343
406
|
)
|
344
407
|
],
|
345
408
|
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
@@ -347,18 +410,22 @@ def test_script_json_in_obj(null_config, tmp_path):
|
|
347
410
|
a_val = 1
|
348
411
|
t1 = hf.Task(schema=s1, inputs={"p1c": P1(a=a_val)})
|
349
412
|
wk = hf.Workflow.from_template_data(
|
350
|
-
tasks=[t1],
|
413
|
+
tasks=[t1],
|
414
|
+
template_name="main_script_test",
|
415
|
+
path=tmp_path,
|
416
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
351
417
|
)
|
352
418
|
wk.submit(wait=True, add_to_known=False)
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
assert
|
419
|
+
|
420
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
421
|
+
assert isinstance(p2, hf.ElementParameter)
|
422
|
+
assert p2.value == a_val + 100
|
357
423
|
|
358
424
|
|
359
425
|
@pytest.mark.integration
|
360
426
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
361
|
-
|
427
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
428
|
+
def test_script_hdf5_in_obj(null_config, tmp_path: Path, combine_scripts: bool):
|
362
429
|
"""Use a custom HDF5 dumper defined in the P1 class."""
|
363
430
|
s1 = hf.TaskSchema(
|
364
431
|
objective="t1",
|
@@ -371,6 +438,7 @@ def test_script_hdf5_in_obj(null_config, tmp_path):
|
|
371
438
|
script_data_out="direct",
|
372
439
|
script_exe="python_script",
|
373
440
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
441
|
+
requires_dir=True,
|
374
442
|
)
|
375
443
|
],
|
376
444
|
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
@@ -378,18 +446,22 @@ def test_script_hdf5_in_obj(null_config, tmp_path):
|
|
378
446
|
a_val = 1
|
379
447
|
t1 = hf.Task(schema=s1, inputs={"p1c": P1(a=a_val)})
|
380
448
|
wk = hf.Workflow.from_template_data(
|
381
|
-
tasks=[t1],
|
449
|
+
tasks=[t1],
|
450
|
+
template_name="main_script_test",
|
451
|
+
path=tmp_path,
|
452
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
382
453
|
)
|
383
454
|
wk.submit(wait=True, add_to_known=False)
|
384
|
-
|
385
|
-
|
386
|
-
|
387
|
-
assert
|
455
|
+
|
456
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
457
|
+
assert isinstance(p2, hf.ElementParameter)
|
458
|
+
assert p2.value == a_val + 100
|
388
459
|
|
389
460
|
|
390
461
|
@pytest.mark.integration
|
391
462
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
392
|
-
|
463
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
464
|
+
def test_script_json_out_obj(null_config, tmp_path: Path, combine_scripts: bool):
|
393
465
|
"""Use a custom JSON saver defined in the P1 class."""
|
394
466
|
s1 = hf.TaskSchema(
|
395
467
|
objective="t1",
|
@@ -402,6 +474,7 @@ def test_script_json_out_obj(null_config, tmp_path):
|
|
402
474
|
script_data_out="json",
|
403
475
|
script_exe="python_script",
|
404
476
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
477
|
+
requires_dir=True,
|
405
478
|
)
|
406
479
|
],
|
407
480
|
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
@@ -409,18 +482,22 @@ def test_script_json_out_obj(null_config, tmp_path):
|
|
409
482
|
p1_val = 1
|
410
483
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
411
484
|
wk = hf.Workflow.from_template_data(
|
412
|
-
tasks=[t1],
|
485
|
+
tasks=[t1],
|
486
|
+
template_name="main_script_test",
|
487
|
+
path=tmp_path,
|
488
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
413
489
|
)
|
414
490
|
wk.submit(wait=True, add_to_known=False)
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
assert
|
491
|
+
|
492
|
+
p1c = wk.tasks[0].elements[0].outputs.p1c
|
493
|
+
assert isinstance(p1c, hf.ElementParameter)
|
494
|
+
assert p1c.value == P1(a=p1_val + 100)
|
419
495
|
|
420
496
|
|
421
497
|
@pytest.mark.integration
|
422
498
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
423
|
-
|
499
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
500
|
+
def test_script_hdf5_out_obj(null_config, tmp_path: Path, combine_scripts: bool):
|
424
501
|
"""Use a custom HDF5 saver defined in the P1 class."""
|
425
502
|
s1 = hf.TaskSchema(
|
426
503
|
objective="t1",
|
@@ -433,6 +510,7 @@ def test_script_hdf5_out_obj(null_config, tmp_path):
|
|
433
510
|
script_data_out="hdf5",
|
434
511
|
script_exe="python_script",
|
435
512
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
513
|
+
requires_dir=True,
|
436
514
|
)
|
437
515
|
],
|
438
516
|
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
@@ -440,18 +518,24 @@ def test_script_hdf5_out_obj(null_config, tmp_path):
|
|
440
518
|
p1_val = 1
|
441
519
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
442
520
|
wk = hf.Workflow.from_template_data(
|
443
|
-
tasks=[t1],
|
521
|
+
tasks=[t1],
|
522
|
+
template_name="main_script_test",
|
523
|
+
path=tmp_path,
|
524
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
444
525
|
)
|
445
526
|
wk.submit(wait=True, add_to_known=False)
|
446
|
-
|
447
|
-
|
448
|
-
|
449
|
-
assert
|
527
|
+
|
528
|
+
p1c = wk.tasks[0].elements[0].outputs.p1c
|
529
|
+
assert isinstance(p1c, hf.ElementParameter)
|
530
|
+
assert p1c.value == P1(a=p1_val + 100)
|
450
531
|
|
451
532
|
|
452
533
|
@pytest.mark.integration
|
453
534
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
454
|
-
|
535
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
536
|
+
def test_script_direct_in_pass_env_spec(
|
537
|
+
new_null_config, tmp_path: Path, combine_scripts: bool
|
538
|
+
):
|
455
539
|
|
456
540
|
vers_spec = {"version": "1.2"}
|
457
541
|
env = hf.Environment(
|
@@ -462,7 +546,7 @@ def test_script_direct_in_pass_env_spec(new_null_config, tmp_path):
|
|
462
546
|
label="python_script",
|
463
547
|
instances=[
|
464
548
|
hf.ExecutableInstance(
|
465
|
-
command="python <<
|
549
|
+
command="python <<script_path>> <<args>>",
|
466
550
|
num_cores=1,
|
467
551
|
parallel_mode=None,
|
468
552
|
)
|
@@ -498,12 +582,709 @@ def test_script_direct_in_pass_env_spec(new_null_config, tmp_path):
|
|
498
582
|
tasks=[t1],
|
499
583
|
template_name="main_script_test",
|
500
584
|
path=tmp_path,
|
585
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
501
586
|
)
|
502
587
|
wk.submit(wait=True, add_to_known=False)
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
assert
|
588
|
+
|
589
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
590
|
+
assert isinstance(p2, hf.ElementParameter)
|
591
|
+
assert p2.value == {
|
507
592
|
"name": "python_env_with_specifiers",
|
508
593
|
**vers_spec,
|
509
594
|
}
|
595
|
+
hf.reload_template_components() # remove extra envs
|
596
|
+
|
597
|
+
|
598
|
+
@pytest.mark.integration
|
599
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
600
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
601
|
+
def test_script_std_stream_redirect_on_exception(
|
602
|
+
new_null_config, tmp_path: Path, combine_scripts: bool
|
603
|
+
):
|
604
|
+
"""Test exceptions raised by the app during execution of a script are printed to the
|
605
|
+
std-stream redirect file (and not the jobscript's standard error file)."""
|
606
|
+
|
607
|
+
# define a custom python environment which redefines the `WK_PATH` shell variable to
|
608
|
+
# a nonsense value so the app cannot load the workflow and thus raises an exception
|
609
|
+
app_caps = hf.package_name.upper()
|
610
|
+
if os.name == "nt":
|
611
|
+
env_cmd = f'$env:{app_caps}_WK_PATH = "nonsense_path"'
|
612
|
+
else:
|
613
|
+
env_cmd = f'export {app_caps}_WK_PATH="nonsense_path"'
|
614
|
+
|
615
|
+
env_cmd += "; python <<script_path>> <<args>>"
|
616
|
+
bad_env = hf.Environment(
|
617
|
+
name="bad_python_env",
|
618
|
+
executables=[
|
619
|
+
hf.Executable(
|
620
|
+
label="python_script",
|
621
|
+
instances=[
|
622
|
+
hf.ExecutableInstance(
|
623
|
+
command=env_cmd,
|
624
|
+
num_cores=1,
|
625
|
+
parallel_mode=None,
|
626
|
+
)
|
627
|
+
],
|
628
|
+
)
|
629
|
+
],
|
630
|
+
)
|
631
|
+
hf.envs.add_object(bad_env, skip_duplicates=True)
|
632
|
+
|
633
|
+
s1 = hf.TaskSchema(
|
634
|
+
objective="t1",
|
635
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
636
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
637
|
+
actions=[
|
638
|
+
hf.Action(
|
639
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
640
|
+
script_data_in="direct",
|
641
|
+
script_data_out="direct",
|
642
|
+
script_exe="python_script",
|
643
|
+
environments=[hf.ActionEnvironment(environment="bad_python_env")],
|
644
|
+
)
|
645
|
+
],
|
646
|
+
)
|
647
|
+
p1_val = 101
|
648
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
649
|
+
wk = hf.Workflow.from_template_data(
|
650
|
+
tasks=[t1],
|
651
|
+
template_name="main_script_test",
|
652
|
+
path=tmp_path,
|
653
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
654
|
+
)
|
655
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
656
|
+
|
657
|
+
# jobscript stderr should be empty
|
658
|
+
assert not wk.submissions[0].jobscripts[0].direct_stderr_path.read_text()
|
659
|
+
|
660
|
+
# std stream file has workflow not found traceback
|
661
|
+
if combine_scripts:
|
662
|
+
std_stream_path = wk.submissions[0].jobscripts[0].get_app_std_path()
|
663
|
+
else:
|
664
|
+
run = wk.get_all_EARs()[0]
|
665
|
+
std_stream_path = run.get_app_std_path()
|
666
|
+
assert std_stream_path.is_file()
|
667
|
+
assert "WorkflowNotFoundError" in std_stream_path.read_text()
|
668
|
+
|
669
|
+
hf.reload_template_components() # remove extra envs
|
670
|
+
|
671
|
+
|
672
|
+
@pytest.mark.integration
|
673
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
674
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
675
|
+
def test_script_std_out_std_err_not_redirected(
|
676
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
677
|
+
):
|
678
|
+
"""Test that standard error and output streams from a script are written to the jobscript
|
679
|
+
standard error and output files."""
|
680
|
+
s1 = hf.TaskSchema(
|
681
|
+
objective="t1",
|
682
|
+
inputs=[
|
683
|
+
hf.SchemaInput(parameter=hf.Parameter("stdout_msg")),
|
684
|
+
hf.SchemaInput(parameter=hf.Parameter("stderr_msg")),
|
685
|
+
],
|
686
|
+
actions=[
|
687
|
+
hf.Action(
|
688
|
+
script="<<script:main_script_test_std_out_std_err.py>>",
|
689
|
+
script_data_in="direct",
|
690
|
+
script_data_out="direct",
|
691
|
+
script_exe="python_script",
|
692
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
693
|
+
)
|
694
|
+
],
|
695
|
+
)
|
696
|
+
stdout_msg = "hello stdout!"
|
697
|
+
stderr_msg = "hello stderr!"
|
698
|
+
t1 = hf.Task(schema=s1, inputs={"stdout_msg": stdout_msg, "stderr_msg": stderr_msg})
|
699
|
+
wk = hf.Workflow.from_template_data(
|
700
|
+
tasks=[t1],
|
701
|
+
template_name="main_script_test",
|
702
|
+
path=tmp_path,
|
703
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
704
|
+
)
|
705
|
+
wk.submit(wait=True, add_to_known=False)
|
706
|
+
|
707
|
+
if wk.submissions[0].jobscripts[0].resources.combine_jobscript_std:
|
708
|
+
std_out_err = wk.submissions[0].jobscripts[0].direct_std_out_err_path.read_text()
|
709
|
+
assert std_out_err.strip() == f"{stdout_msg}\n{stderr_msg}"
|
710
|
+
else:
|
711
|
+
std_out = wk.submissions[0].jobscripts[0].direct_stdout_path.read_text()
|
712
|
+
std_err = wk.submissions[0].jobscripts[0].direct_stderr_path.read_text()
|
713
|
+
assert std_out.strip() == stdout_msg
|
714
|
+
assert std_err.strip() == stderr_msg
|
715
|
+
|
716
|
+
|
717
|
+
@pytest.mark.integration
|
718
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
719
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
720
|
+
def test_script_pass_env_spec(null_config, tmp_path: Path, combine_scripts: bool):
|
721
|
+
s1 = hf.TaskSchema(
|
722
|
+
objective="t1",
|
723
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
724
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
725
|
+
actions=[
|
726
|
+
hf.Action(
|
727
|
+
script="<<script:env_specifier_test/main_script_test_pass_env_spec.py>>",
|
728
|
+
script_data_in="direct",
|
729
|
+
script_data_out="direct",
|
730
|
+
script_exe="python_script",
|
731
|
+
script_pass_env_spec=True,
|
732
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
733
|
+
)
|
734
|
+
],
|
735
|
+
)
|
736
|
+
p1_val = 101
|
737
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
738
|
+
wk = hf.Workflow.from_template_data(
|
739
|
+
tasks=[t1],
|
740
|
+
template_name="main_script_test_pass_env_spec",
|
741
|
+
path=tmp_path,
|
742
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
743
|
+
)
|
744
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
745
|
+
|
746
|
+
std_out = wk.submissions[0].jobscripts[0].direct_stdout_path.read_text().strip()
|
747
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
748
|
+
assert isinstance(p2, hf.ElementParameter)
|
749
|
+
assert p2.value == p1_val + 100
|
750
|
+
assert std_out == "{'name': 'python_env'}"
|
751
|
+
|
752
|
+
|
753
|
+
@pytest.mark.integration
|
754
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
755
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
756
|
+
def test_env_specifier_in_main_script_path(
|
757
|
+
new_null_config, tmp_path: Path, combine_scripts: bool
|
758
|
+
):
|
759
|
+
py_env = hf.Environment(
|
760
|
+
name="python_env",
|
761
|
+
specifiers={"version": "v1"},
|
762
|
+
executables=[
|
763
|
+
hf.Executable(
|
764
|
+
label="python_script",
|
765
|
+
instances=[
|
766
|
+
hf.ExecutableInstance(
|
767
|
+
command="python <<script_path>> <<args>>",
|
768
|
+
num_cores=1,
|
769
|
+
parallel_mode=None,
|
770
|
+
)
|
771
|
+
],
|
772
|
+
)
|
773
|
+
],
|
774
|
+
)
|
775
|
+
hf.envs.add_object(py_env, skip_duplicates=True)
|
776
|
+
|
777
|
+
s1 = hf.TaskSchema(
|
778
|
+
objective="t1",
|
779
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
780
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
781
|
+
actions=[
|
782
|
+
hf.Action(
|
783
|
+
script="<<script:env_specifier_test/<<env:version>>/main_script_test_direct_in_direct_out.py>>",
|
784
|
+
script_data_in="direct",
|
785
|
+
script_data_out="direct",
|
786
|
+
script_exe="python_script",
|
787
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
788
|
+
)
|
789
|
+
],
|
790
|
+
)
|
791
|
+
|
792
|
+
p1_val = 101
|
793
|
+
t1 = hf.Task(
|
794
|
+
schema=s1,
|
795
|
+
inputs={"p1": p1_val},
|
796
|
+
environments={"python_env": {"version": "v1"}},
|
797
|
+
)
|
798
|
+
wk = hf.Workflow.from_template_data(
|
799
|
+
tasks=[t1],
|
800
|
+
template_name="main_script_test_env_spec_script_path",
|
801
|
+
path=tmp_path,
|
802
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
803
|
+
)
|
804
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
805
|
+
|
806
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
807
|
+
assert isinstance(p2, hf.ElementParameter)
|
808
|
+
assert p2.value == p1_val + 100
|
809
|
+
|
810
|
+
hf.reload_template_components() # remove extra envs
|
811
|
+
|
812
|
+
|
813
|
+
@pytest.mark.integration
|
814
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
815
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
816
|
+
def test_env_specifier_in_main_script_path_multiple_scripts(
|
817
|
+
new_null_config, tmp_path: Path, combine_scripts: bool
|
818
|
+
):
|
819
|
+
"""Test two elements with different environment specifiers use two distinct scripts"""
|
820
|
+
py_env_v1 = hf.Environment(
|
821
|
+
name="python_env",
|
822
|
+
specifiers={"version": "v1"},
|
823
|
+
executables=[
|
824
|
+
hf.Executable(
|
825
|
+
label="python_script",
|
826
|
+
instances=[
|
827
|
+
hf.ExecutableInstance(
|
828
|
+
command="python <<script_path>> <<args>>",
|
829
|
+
num_cores=1,
|
830
|
+
parallel_mode=None,
|
831
|
+
)
|
832
|
+
],
|
833
|
+
)
|
834
|
+
],
|
835
|
+
)
|
836
|
+
py_env_v2 = hf.Environment(
|
837
|
+
name="python_env",
|
838
|
+
specifiers={"version": "v2"},
|
839
|
+
executables=[
|
840
|
+
hf.Executable(
|
841
|
+
label="python_script",
|
842
|
+
instances=[
|
843
|
+
hf.ExecutableInstance(
|
844
|
+
command="python <<script_path>> <<args>>",
|
845
|
+
num_cores=1,
|
846
|
+
parallel_mode=None,
|
847
|
+
)
|
848
|
+
],
|
849
|
+
)
|
850
|
+
],
|
851
|
+
)
|
852
|
+
hf.envs.add_objects([py_env_v1, py_env_v2], skip_duplicates=True)
|
853
|
+
|
854
|
+
s1 = hf.TaskSchema(
|
855
|
+
objective="t1",
|
856
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
857
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
858
|
+
actions=[
|
859
|
+
hf.Action(
|
860
|
+
script="<<script:env_specifier_test/<<env:version>>/main_script_test_direct_in_direct_out.py>>",
|
861
|
+
script_data_in="direct",
|
862
|
+
script_data_out="direct",
|
863
|
+
script_exe="python_script",
|
864
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
865
|
+
)
|
866
|
+
],
|
867
|
+
)
|
868
|
+
|
869
|
+
p1_val = 101
|
870
|
+
t1 = hf.Task(
|
871
|
+
schema=s1,
|
872
|
+
inputs={"p1": p1_val},
|
873
|
+
environments={"python_env": {"version": "v1"}},
|
874
|
+
sequences=[
|
875
|
+
hf.ValueSequence(
|
876
|
+
path="environments.python_env.version",
|
877
|
+
values=["v1", "v2"],
|
878
|
+
)
|
879
|
+
],
|
880
|
+
)
|
881
|
+
wk = hf.Workflow.from_template_data(
|
882
|
+
tasks=[t1],
|
883
|
+
template_name="main_script_test_multiple_env_spec_script",
|
884
|
+
path=tmp_path,
|
885
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
886
|
+
)
|
887
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
888
|
+
|
889
|
+
# v1 and v2 scripts output different values:
|
890
|
+
e1, e2 = wk.tasks.t1.elements
|
891
|
+
e1_p2 = e1.outputs.p2
|
892
|
+
e2_p2 = e2.outputs.p2
|
893
|
+
assert isinstance(e1_p2, hf.ElementParameter)
|
894
|
+
assert isinstance(e2_p2, hf.ElementParameter)
|
895
|
+
assert e1_p2.value == 201
|
896
|
+
assert e2_p2.value == 301
|
897
|
+
|
898
|
+
hf.reload_template_components() # remove extra envs
|
899
|
+
|
900
|
+
|
901
|
+
@pytest.mark.integration
|
902
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
903
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
904
|
+
def test_script_direct_in_direct_out_multi_element(
|
905
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
906
|
+
):
|
907
|
+
s1 = hf.TaskSchema(
|
908
|
+
objective="t1",
|
909
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
910
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
911
|
+
actions=[
|
912
|
+
hf.Action(
|
913
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
914
|
+
script_data_in="direct",
|
915
|
+
script_data_out="direct",
|
916
|
+
script_exe="python_script",
|
917
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
918
|
+
)
|
919
|
+
],
|
920
|
+
)
|
921
|
+
p1_vals = (101, 102, 103)
|
922
|
+
t1 = hf.Task(
|
923
|
+
schema=s1, sequences=[hf.ValueSequence(path="inputs.p1", values=p1_vals)]
|
924
|
+
)
|
925
|
+
wk = hf.Workflow.from_template_data(
|
926
|
+
tasks=[t1],
|
927
|
+
template_name="main_script_test_multi_element",
|
928
|
+
path=tmp_path,
|
929
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
930
|
+
)
|
931
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
932
|
+
|
933
|
+
e0_p2 = wk.tasks[0].elements[0].outputs.p2
|
934
|
+
e1_p2 = wk.tasks[0].elements[1].outputs.p2
|
935
|
+
e2_p2 = wk.tasks[0].elements[2].outputs.p2
|
936
|
+
|
937
|
+
assert isinstance(e0_p2, hf.ElementParameter)
|
938
|
+
assert isinstance(e1_p2, hf.ElementParameter)
|
939
|
+
assert isinstance(e2_p2, hf.ElementParameter)
|
940
|
+
|
941
|
+
assert e0_p2.value == p1_vals[0] + 100
|
942
|
+
assert e1_p2.value == p1_vals[1] + 100
|
943
|
+
assert e2_p2.value == p1_vals[2] + 100
|
944
|
+
|
945
|
+
# check only one script generated, and its name:
|
946
|
+
script_name, _ = t1.schema.actions[0].get_script_artifact_name(env_spec={}, act_idx=0)
|
947
|
+
script_files = list(i.name for i in wk.submissions[0].scripts_path.glob("*"))
|
948
|
+
assert len(script_files) == 1
|
949
|
+
assert script_files[0] == script_name if not combine_scripts else "js_0.py"
|
950
|
+
|
951
|
+
|
952
|
+
@pytest.mark.integration
|
953
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
954
|
+
def test_repeated_action_in_schema(null_config, tmp_path: Path):
|
955
|
+
# TODO: cannot currently use same Action object multiple times in a schema
|
956
|
+
s1 = hf.TaskSchema(
|
957
|
+
objective="t1",
|
958
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
959
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
960
|
+
actions=[
|
961
|
+
hf.Action(
|
962
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
963
|
+
script_data_in="direct",
|
964
|
+
script_data_out="direct",
|
965
|
+
script_exe="python_script",
|
966
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
967
|
+
),
|
968
|
+
hf.Action(
|
969
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
970
|
+
script_data_in="direct",
|
971
|
+
script_data_out="direct",
|
972
|
+
script_exe="python_script",
|
973
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
974
|
+
),
|
975
|
+
],
|
976
|
+
)
|
977
|
+
p1_val = 101
|
978
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
979
|
+
wk = hf.Workflow.from_template_data(
|
980
|
+
tasks=[t1],
|
981
|
+
template_name="test_repeated_action_in_schema",
|
982
|
+
path=tmp_path,
|
983
|
+
resources={"any": {"write_app_logs": True}},
|
984
|
+
)
|
985
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
986
|
+
|
987
|
+
# check scripts generated for act 0 and 1 have the same contents
|
988
|
+
act_0_script, _ = wk.tasks.t1.template.schema.actions[0].get_script_artifact_name(
|
989
|
+
env_spec={}, act_idx=0
|
990
|
+
)
|
991
|
+
act_1_script, _ = wk.tasks.t1.template.schema.actions[1].get_script_artifact_name(
|
992
|
+
env_spec={}, act_idx=1
|
993
|
+
)
|
994
|
+
act_0_script_path = wk.submissions[0].scripts_path / act_0_script
|
995
|
+
act_1_script_path = wk.submissions[0].scripts_path / act_1_script
|
996
|
+
assert act_0_script_path.read_text() == act_1_script_path.read_text()
|
997
|
+
|
998
|
+
# the two files will be symlinked if not on Windows (may be symlinked on Windows,
|
999
|
+
# depending on if user is admin)
|
1000
|
+
if os.name != "nt":
|
1001
|
+
assert act_1_script_path.is_symlink()
|
1002
|
+
|
1003
|
+
# output will be taken from second action
|
1004
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
1005
|
+
assert isinstance(p2, hf.ElementParameter)
|
1006
|
+
assert p2.value == p1_val + 100
|
1007
|
+
|
1008
|
+
|
1009
|
+
# TODO: same action with different env spec path (v1/v2) in same schema (check contents
|
1010
|
+
# different!). Cannot yet do this because it is not possible to set environment spec
|
1011
|
+
# for diferrent "main" actions within the same task.
|
1012
|
+
|
1013
|
+
|
1014
|
+
@pytest.mark.integration
|
1015
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
1016
|
+
def test_main_script_two_schemas_same_action(null_config, tmp_path: Path):
|
1017
|
+
s1 = hf.TaskSchema(
|
1018
|
+
objective="t1",
|
1019
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1020
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
1021
|
+
actions=[
|
1022
|
+
hf.Action(
|
1023
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
1024
|
+
script_data_in="direct",
|
1025
|
+
script_data_out="direct",
|
1026
|
+
script_exe="python_script",
|
1027
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1028
|
+
),
|
1029
|
+
],
|
1030
|
+
)
|
1031
|
+
s2 = hf.TaskSchema(
|
1032
|
+
objective="t2",
|
1033
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1034
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
1035
|
+
actions=[
|
1036
|
+
hf.Action(
|
1037
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
1038
|
+
script_data_in="direct",
|
1039
|
+
script_data_out="direct",
|
1040
|
+
script_exe="python_script",
|
1041
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1042
|
+
),
|
1043
|
+
],
|
1044
|
+
)
|
1045
|
+
p1_val = 101
|
1046
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
1047
|
+
t2 = hf.Task(schema=s2, inputs={"p1": p1_val})
|
1048
|
+
wk = hf.Workflow.from_template_data(
|
1049
|
+
tasks=[t1, t2],
|
1050
|
+
template_name="main_script_test_two_schemas_same_action",
|
1051
|
+
path=tmp_path,
|
1052
|
+
)
|
1053
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
1054
|
+
|
1055
|
+
# check scripts generated for t1 and t2 have the same contents
|
1056
|
+
t1_script, _ = wk.tasks.t1.template.schema.actions[0].get_script_artifact_name(
|
1057
|
+
env_spec={}, act_idx=0
|
1058
|
+
)
|
1059
|
+
t2_script, _ = wk.tasks.t2.template.schema.actions[0].get_script_artifact_name(
|
1060
|
+
env_spec={}, act_idx=0
|
1061
|
+
)
|
1062
|
+
t1_script_path = wk.submissions[0].scripts_path / t1_script
|
1063
|
+
t2_script_path = wk.submissions[0].scripts_path / t2_script
|
1064
|
+
assert t1_script_path.read_text() == t2_script_path.read_text()
|
1065
|
+
|
1066
|
+
# the two files will be symlinked if not on Windows (may be symlinked on Windows,
|
1067
|
+
# depending on if user is admin)
|
1068
|
+
if os.name != "nt":
|
1069
|
+
assert t2_script_path.is_symlink()
|
1070
|
+
|
1071
|
+
# check output
|
1072
|
+
t0_p2 = wk.tasks[0].elements[0].outputs.p2
|
1073
|
+
t1_p2 = wk.tasks[1].elements[0].outputs.p2
|
1074
|
+
assert isinstance(t0_p2, hf.ElementParameter)
|
1075
|
+
assert isinstance(t1_p2, hf.ElementParameter)
|
1076
|
+
assert t0_p2.value == p1_val + 100
|
1077
|
+
assert t1_p2.value == p1_val + 100
|
1078
|
+
|
1079
|
+
# now copy the workflow elsewhere and check the symlink between the scripts still
|
1080
|
+
# works:
|
1081
|
+
wk_path = Path(wk.path)
|
1082
|
+
copy_path = wk_path.parent.joinpath(wk_path.with_suffix(".copy"))
|
1083
|
+
shutil.copytree(wk.path, copy_path, symlinks=True)
|
1084
|
+
t2_script_path_copy = Path(str(t2_script_path).replace(wk.path, f"{wk.path}.copy"))
|
1085
|
+
assert t1_script_path.read_text() == t2_script_path_copy.read_text()
|
1086
|
+
|
1087
|
+
|
1088
|
+
@pytest.mark.integration
|
1089
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
1090
|
+
def test_main_script_two_actions_same_schema(null_config, tmp_path: Path):
|
1091
|
+
s1 = hf.TaskSchema(
|
1092
|
+
objective="t1",
|
1093
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1094
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
1095
|
+
actions=[
|
1096
|
+
hf.Action(
|
1097
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
1098
|
+
script_data_in="direct",
|
1099
|
+
script_data_out="direct",
|
1100
|
+
script_exe="python_script",
|
1101
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1102
|
+
),
|
1103
|
+
hf.Action(
|
1104
|
+
script="<<script:main_script_test_json_in_json_out.py>>",
|
1105
|
+
script_data_in="json",
|
1106
|
+
script_data_out="json",
|
1107
|
+
script_exe="python_script",
|
1108
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1109
|
+
requires_dir=True,
|
1110
|
+
),
|
1111
|
+
],
|
1112
|
+
)
|
1113
|
+
p1_val = 101
|
1114
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
1115
|
+
wk = hf.Workflow.from_template_data(
|
1116
|
+
tasks=[t1],
|
1117
|
+
template_name="main_script_test_distinct_actions_same_schema",
|
1118
|
+
path=tmp_path,
|
1119
|
+
)
|
1120
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
1121
|
+
|
1122
|
+
# check scripts generated for act 0 and 1 have different contents
|
1123
|
+
act_0_script, _ = wk.tasks.t1.template.schema.actions[0].get_script_artifact_name(
|
1124
|
+
env_spec={}, act_idx=0
|
1125
|
+
)
|
1126
|
+
act_1_script, _ = wk.tasks.t1.template.schema.actions[1].get_script_artifact_name(
|
1127
|
+
env_spec={}, act_idx=1
|
1128
|
+
)
|
1129
|
+
act_0_script_path = wk.submissions[0].scripts_path / act_0_script
|
1130
|
+
act_1_script_path = wk.submissions[0].scripts_path / act_1_script
|
1131
|
+
assert act_0_script_path.read_text() != act_1_script_path.read_text()
|
1132
|
+
|
1133
|
+
|
1134
|
+
@pytest.mark.integration
|
1135
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
1136
|
+
def test_shell_env_vars(null_config, tmp_path: Path):
|
1137
|
+
s1 = hf.TaskSchema(
|
1138
|
+
objective="t1",
|
1139
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1140
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p1"))],
|
1141
|
+
actions=[
|
1142
|
+
hf.Action(
|
1143
|
+
script="<<script:main_script_test_shell_env_vars.py>>",
|
1144
|
+
script_data_in="direct",
|
1145
|
+
script_data_out="direct",
|
1146
|
+
script_exe="python_script",
|
1147
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1148
|
+
requires_dir=True,
|
1149
|
+
)
|
1150
|
+
],
|
1151
|
+
)
|
1152
|
+
tasks = [
|
1153
|
+
hf.Task(
|
1154
|
+
schema=s1,
|
1155
|
+
inputs={"p1": 1},
|
1156
|
+
repeats=3,
|
1157
|
+
),
|
1158
|
+
hf.Task(
|
1159
|
+
schema=s1,
|
1160
|
+
inputs={"p1": 1},
|
1161
|
+
),
|
1162
|
+
hf.Task(
|
1163
|
+
schema=s1,
|
1164
|
+
inputs={"p1": 1},
|
1165
|
+
repeats=2,
|
1166
|
+
),
|
1167
|
+
]
|
1168
|
+
loops = [
|
1169
|
+
hf.Loop(
|
1170
|
+
tasks=[2],
|
1171
|
+
num_iterations=2,
|
1172
|
+
)
|
1173
|
+
]
|
1174
|
+
wk = hf.Workflow.from_template_data(
|
1175
|
+
tasks=tasks,
|
1176
|
+
loops=loops,
|
1177
|
+
template_name="main_script_test_shell_env",
|
1178
|
+
path=tmp_path,
|
1179
|
+
)
|
1180
|
+
wk.add_submission(tasks=[0, 1])
|
1181
|
+
wk.submit(wait=True, add_to_known=False, status=False) # first submission
|
1182
|
+
|
1183
|
+
wk.submit(wait=True, add_to_known=False, status=False) # outstanding runs
|
1184
|
+
|
1185
|
+
for run in wk.get_all_EARs():
|
1186
|
+
run_dir = run.get_directory()
|
1187
|
+
assert run_dir
|
1188
|
+
with run_dir.joinpath("env_vars.json").open("rt") as fp:
|
1189
|
+
env_dat = json.load(fp)
|
1190
|
+
|
1191
|
+
assert env_dat["HPCFLOW_WK_PATH"] == str(run.workflow.path)
|
1192
|
+
assert env_dat["HPCFLOW_WK_PATH_ARG"] == str(run.workflow.path)
|
1193
|
+
|
1194
|
+
assert run.submission_idx is not None
|
1195
|
+
for js in wk.submissions[run.submission_idx].jobscripts:
|
1196
|
+
js_funcs_path = str(js.jobscript_functions_path)
|
1197
|
+
for block in js.blocks:
|
1198
|
+
for run_i in block.all_EARs:
|
1199
|
+
if run_i.id_ == run.id_:
|
1200
|
+
assert int(env_dat["HPCFLOW_JS_IDX"]) == js.index
|
1201
|
+
assert env_dat["HPCFLOW_JS_FUNCS_PATH"] == js_funcs_path
|
1202
|
+
|
1203
|
+
assert int(env_dat["HPCFLOW_RUN_ID"]) == run.id_
|
1204
|
+
assert int(env_dat["HPCFLOW_RUN_IDX"]) == run.index
|
1205
|
+
assert int(env_dat["HPCFLOW_RUN_PORT"]) == run.port_number
|
1206
|
+
|
1207
|
+
script_name = run.get_script_artifact_name()
|
1208
|
+
sub_scripts_dir = wk.submissions[run.submission_idx].scripts_path
|
1209
|
+
script_path = sub_scripts_dir.joinpath(script_name)
|
1210
|
+
|
1211
|
+
assert env_dat["HPCFLOW_SUB_SCRIPTS_DIR"] == str(sub_scripts_dir)
|
1212
|
+
assert int(env_dat["HPCFLOW_SUB_IDX"]) == run.submission_idx
|
1213
|
+
|
1214
|
+
assert env_dat["HPCFLOW_RUN_SCRIPT_DIR"] == str(script_path.parent)
|
1215
|
+
assert env_dat["HPCFLOW_RUN_SCRIPT_PATH"] == str(script_path)
|
1216
|
+
assert env_dat["HPCFLOW_RUN_SCRIPT_NAME"] == script_name
|
1217
|
+
assert env_dat["HPCFLOW_RUN_SCRIPT_NAME_NO_EXT"] == script_path.stem
|
1218
|
+
|
1219
|
+
assert env_dat["HPCFLOW_RUN_STD_PATH"] == str(run.get_app_std_path())
|
1220
|
+
assert (
|
1221
|
+
env_dat["HPCFLOW_RUN_LOG_PATH"]
|
1222
|
+
== env_dat["HPCFLOW_LOG_PATH"]
|
1223
|
+
== str(run.get_app_log_path())
|
1224
|
+
if run.resources.write_app_logs
|
1225
|
+
else " "
|
1226
|
+
)
|
1227
|
+
|
1228
|
+
assert env_dat["HPCFLOW_ELEMENT_ID"] == str(run.element.id_)
|
1229
|
+
assert env_dat["HPCFLOW_ELEMENT_IDX"] == str(run.element.index)
|
1230
|
+
|
1231
|
+
assert env_dat["HPCFLOW_ELEMENT_ITER_ID"] == str(run.element_iteration.id_)
|
1232
|
+
assert env_dat["HPCFLOW_ELEMENT_ITER_IDX"] == str(run.element_iteration.index)
|
1233
|
+
|
1234
|
+
|
1235
|
+
@pytest.mark.integration
|
1236
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
1237
|
+
def test_combine_scripts_script_data_multiple_input_file_formats(
|
1238
|
+
null_config, tmp_path: Path
|
1239
|
+
):
|
1240
|
+
s1 = hf.TaskSchema(
|
1241
|
+
objective="t1",
|
1242
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1243
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
1244
|
+
actions=[
|
1245
|
+
hf.Action(
|
1246
|
+
script="<<script:main_script_test_json_in_json_out.py>>",
|
1247
|
+
script_data_in="json",
|
1248
|
+
script_data_out="json",
|
1249
|
+
script_exe="python_script",
|
1250
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1251
|
+
requires_dir=True,
|
1252
|
+
),
|
1253
|
+
],
|
1254
|
+
)
|
1255
|
+
s2 = hf.TaskSchema(
|
1256
|
+
objective="t2",
|
1257
|
+
inputs=[
|
1258
|
+
hf.SchemaInput(parameter=hf.Parameter("p2")),
|
1259
|
+
hf.SchemaInput(parameter=hf.Parameter("p1c")),
|
1260
|
+
],
|
1261
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
|
1262
|
+
actions=[
|
1263
|
+
hf.Action(
|
1264
|
+
script="<<script:main_script_test_hdf5_in_obj_2.py>>",
|
1265
|
+
script_data_in={"p2": "direct", "p1c": "hdf5"},
|
1266
|
+
script_data_out="direct",
|
1267
|
+
script_exe="python_script",
|
1268
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1269
|
+
requires_dir=True,
|
1270
|
+
),
|
1271
|
+
],
|
1272
|
+
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
1273
|
+
)
|
1274
|
+
p1_val = 101
|
1275
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
1276
|
+
t2 = hf.Task(schema=s2, inputs={"p1c": P1(a=p1_val)})
|
1277
|
+
wk = hf.Workflow.from_template_data(
|
1278
|
+
tasks=[t1, t2],
|
1279
|
+
template_name="main_script_test",
|
1280
|
+
path=tmp_path,
|
1281
|
+
resources={"any": {"combine_scripts": True}},
|
1282
|
+
)
|
1283
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
1284
|
+
|
1285
|
+
t0_p2 = wk.tasks[0].elements[0].outputs.p2
|
1286
|
+
t1_p3 = wk.tasks[1].elements[0].outputs.p3
|
1287
|
+
assert isinstance(t0_p2, hf.ElementParameter)
|
1288
|
+
assert isinstance(t1_p3, hf.ElementParameter)
|
1289
|
+
assert t0_p2.value == p1_val + 100
|
1290
|
+
assert t1_p3.value == p1_val + 100
|