hpcflow-new2 0.2.0a190__py3-none-any.whl → 0.2.0a199__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hpcflow/__pyinstaller/hook-hpcflow.py +1 -0
- hpcflow/_version.py +1 -1
- hpcflow/data/scripts/bad_script.py +2 -0
- hpcflow/data/scripts/do_nothing.py +2 -0
- hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
- hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/input_file_generator_basic.py +3 -0
- hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
- hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
- hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
- hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
- hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
- hpcflow/data/scripts/output_file_parser_basic.py +3 -0
- hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
- hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/script_exit_test.py +5 -0
- hpcflow/data/template_components/environments.yaml +1 -1
- hpcflow/sdk/__init__.py +5 -0
- hpcflow/sdk/app.py +150 -89
- hpcflow/sdk/cli.py +263 -84
- hpcflow/sdk/cli_common.py +99 -5
- hpcflow/sdk/config/callbacks.py +38 -1
- hpcflow/sdk/config/config.py +102 -13
- hpcflow/sdk/config/errors.py +19 -5
- hpcflow/sdk/config/types.py +3 -0
- hpcflow/sdk/core/__init__.py +25 -1
- hpcflow/sdk/core/actions.py +914 -262
- hpcflow/sdk/core/cache.py +76 -34
- hpcflow/sdk/core/command_files.py +14 -128
- hpcflow/sdk/core/commands.py +35 -6
- hpcflow/sdk/core/element.py +122 -50
- hpcflow/sdk/core/errors.py +58 -2
- hpcflow/sdk/core/execute.py +207 -0
- hpcflow/sdk/core/loop.py +408 -50
- hpcflow/sdk/core/loop_cache.py +4 -4
- hpcflow/sdk/core/parameters.py +382 -37
- hpcflow/sdk/core/run_dir_files.py +13 -40
- hpcflow/sdk/core/skip_reason.py +7 -0
- hpcflow/sdk/core/task.py +119 -30
- hpcflow/sdk/core/task_schema.py +68 -0
- hpcflow/sdk/core/test_utils.py +66 -27
- hpcflow/sdk/core/types.py +54 -1
- hpcflow/sdk/core/utils.py +78 -7
- hpcflow/sdk/core/workflow.py +1538 -336
- hpcflow/sdk/data/workflow_spec_schema.yaml +2 -0
- hpcflow/sdk/demo/cli.py +7 -0
- hpcflow/sdk/helper/cli.py +1 -0
- hpcflow/sdk/log.py +42 -15
- hpcflow/sdk/persistence/base.py +405 -53
- hpcflow/sdk/persistence/json.py +177 -52
- hpcflow/sdk/persistence/pending.py +237 -69
- hpcflow/sdk/persistence/store_resource.py +3 -2
- hpcflow/sdk/persistence/types.py +15 -4
- hpcflow/sdk/persistence/zarr.py +928 -81
- hpcflow/sdk/submission/jobscript.py +1408 -489
- hpcflow/sdk/submission/schedulers/__init__.py +40 -5
- hpcflow/sdk/submission/schedulers/direct.py +33 -19
- hpcflow/sdk/submission/schedulers/sge.py +51 -16
- hpcflow/sdk/submission/schedulers/slurm.py +44 -16
- hpcflow/sdk/submission/schedulers/utils.py +7 -2
- hpcflow/sdk/submission/shells/base.py +68 -20
- hpcflow/sdk/submission/shells/bash.py +222 -129
- hpcflow/sdk/submission/shells/powershell.py +200 -150
- hpcflow/sdk/submission/submission.py +852 -119
- hpcflow/sdk/submission/types.py +18 -21
- hpcflow/sdk/typing.py +24 -5
- hpcflow/sdk/utils/arrays.py +71 -0
- hpcflow/sdk/utils/deferred_file.py +55 -0
- hpcflow/sdk/utils/hashing.py +16 -0
- hpcflow/sdk/utils/patches.py +12 -0
- hpcflow/sdk/utils/strings.py +33 -0
- hpcflow/tests/api/test_api.py +32 -0
- hpcflow/tests/conftest.py +19 -0
- hpcflow/tests/data/multi_path_sequences.yaml +29 -0
- hpcflow/tests/data/workflow_test_run_abort.yaml +34 -35
- hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
- hpcflow/tests/scripts/test_input_file_generators.py +282 -0
- hpcflow/tests/scripts/test_main_scripts.py +821 -70
- hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
- hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
- hpcflow/tests/shells/wsl/test_wsl_submission.py +6 -0
- hpcflow/tests/unit/test_action.py +176 -0
- hpcflow/tests/unit/test_app.py +20 -0
- hpcflow/tests/unit/test_cache.py +46 -0
- hpcflow/tests/unit/test_cli.py +133 -0
- hpcflow/tests/unit/test_config.py +122 -1
- hpcflow/tests/unit/test_element_iteration.py +47 -0
- hpcflow/tests/unit/test_jobscript_unit.py +757 -0
- hpcflow/tests/unit/test_loop.py +1332 -27
- hpcflow/tests/unit/test_meta_task.py +325 -0
- hpcflow/tests/unit/test_multi_path_sequences.py +229 -0
- hpcflow/tests/unit/test_parameter.py +13 -0
- hpcflow/tests/unit/test_persistence.py +190 -8
- hpcflow/tests/unit/test_run.py +109 -3
- hpcflow/tests/unit/test_run_directories.py +29 -0
- hpcflow/tests/unit/test_shell.py +20 -0
- hpcflow/tests/unit/test_submission.py +5 -76
- hpcflow/tests/unit/utils/test_arrays.py +40 -0
- hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
- hpcflow/tests/unit/utils/test_hashing.py +65 -0
- hpcflow/tests/unit/utils/test_patches.py +5 -0
- hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
- hpcflow/tests/workflows/__init__.py +0 -0
- hpcflow/tests/workflows/test_directory_structure.py +31 -0
- hpcflow/tests/workflows/test_jobscript.py +332 -0
- hpcflow/tests/workflows/test_run_status.py +198 -0
- hpcflow/tests/workflows/test_skip_downstream.py +696 -0
- hpcflow/tests/workflows/test_submission.py +140 -0
- hpcflow/tests/workflows/test_workflows.py +142 -2
- hpcflow/tests/workflows/test_zip.py +18 -0
- hpcflow/viz_demo.ipynb +6587 -3
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/METADATA +7 -4
- hpcflow_new2-0.2.0a199.dist-info/RECORD +221 -0
- hpcflow_new2-0.2.0a190.dist-info/RECORD +0 -165
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/LICENSE +0 -0
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/WHEEL +0 -0
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/entry_points.txt +0 -0
@@ -1,4 +1,7 @@
|
|
1
|
+
import json
|
2
|
+
import os
|
1
3
|
from pathlib import Path
|
4
|
+
import shutil
|
2
5
|
import time
|
3
6
|
import pytest
|
4
7
|
|
@@ -11,7 +14,8 @@ from hpcflow.sdk.core.test_utils import P1_parameter_cls as P1
|
|
11
14
|
|
12
15
|
@pytest.mark.integration
|
13
16
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
14
|
-
|
17
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
18
|
+
def test_script_direct_in_direct_out(null_config, tmp_path: Path, combine_scripts: bool):
|
15
19
|
s1 = hf.TaskSchema(
|
16
20
|
objective="t1",
|
17
21
|
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
@@ -29,12 +33,13 @@ def test_script_direct_in_direct_out(null_config, tmp_path: Path):
|
|
29
33
|
p1_val = 101
|
30
34
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
31
35
|
wk = hf.Workflow.from_template_data(
|
32
|
-
tasks=[t1],
|
36
|
+
tasks=[t1],
|
37
|
+
template_name="main_script_test",
|
38
|
+
path=tmp_path,
|
39
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
33
40
|
)
|
34
41
|
wk.submit(wait=True, add_to_known=False)
|
35
|
-
|
36
|
-
# to be later Python versions):
|
37
|
-
time.sleep(10)
|
42
|
+
|
38
43
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
39
44
|
assert isinstance(p2, hf.ElementParameter)
|
40
45
|
assert p2.value == p1_val + 100
|
@@ -42,7 +47,10 @@ def test_script_direct_in_direct_out(null_config, tmp_path: Path):
|
|
42
47
|
|
43
48
|
@pytest.mark.integration
|
44
49
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
45
|
-
|
50
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
51
|
+
def test_script_direct_sub_param_in_direct_out(
|
52
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
53
|
+
):
|
46
54
|
s1 = hf.TaskSchema(
|
47
55
|
objective="t1",
|
48
56
|
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
@@ -60,12 +68,13 @@ def test_script_direct_sub_param_in_direct_out(null_config, tmp_path: Path):
|
|
60
68
|
p1_val = {"a": 101}
|
61
69
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
62
70
|
wk = hf.Workflow.from_template_data(
|
63
|
-
tasks=[t1],
|
71
|
+
tasks=[t1],
|
72
|
+
template_name="main_script_test",
|
73
|
+
path=tmp_path,
|
74
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
64
75
|
)
|
65
76
|
wk.submit(wait=True, add_to_known=False)
|
66
|
-
|
67
|
-
# to be later Python versions):
|
68
|
-
time.sleep(10)
|
77
|
+
|
69
78
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
70
79
|
assert isinstance(p2, hf.ElementParameter)
|
71
80
|
assert p2.value == p1_val["a"] + 100
|
@@ -73,7 +82,10 @@ def test_script_direct_sub_param_in_direct_out(null_config, tmp_path: Path):
|
|
73
82
|
|
74
83
|
@pytest.mark.integration
|
75
84
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
76
|
-
|
85
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
86
|
+
def test_script_direct_in_direct_out_single_label(
|
87
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
88
|
+
):
|
77
89
|
"""This uses the same test script as the `test_script_direct_in_direct_out` test;
|
78
90
|
single labels are trivial and need not be referenced in the script."""
|
79
91
|
p1_label = "one"
|
@@ -94,12 +106,13 @@ def test_script_direct_in_direct_out_single_label(null_config, tmp_path: Path):
|
|
94
106
|
p1_val = 101
|
95
107
|
t1 = hf.Task(schema=s1, inputs={f"p1[{p1_label}]": p1_val})
|
96
108
|
wk = hf.Workflow.from_template_data(
|
97
|
-
tasks=[t1],
|
109
|
+
tasks=[t1],
|
110
|
+
template_name="main_script_test",
|
111
|
+
path=tmp_path,
|
112
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
98
113
|
)
|
99
114
|
wk.submit(wait=True, add_to_known=False)
|
100
|
-
|
101
|
-
# to be later Python versions):
|
102
|
-
time.sleep(10)
|
115
|
+
|
103
116
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
104
117
|
assert isinstance(p2, hf.ElementParameter)
|
105
118
|
assert p2.value == p1_val + 100
|
@@ -107,7 +120,10 @@ def test_script_direct_in_direct_out_single_label(null_config, tmp_path: Path):
|
|
107
120
|
|
108
121
|
@pytest.mark.integration
|
109
122
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
110
|
-
|
123
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
124
|
+
def test_script_direct_in_direct_out_labels(
|
125
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
126
|
+
):
|
111
127
|
p1_label_1 = "one"
|
112
128
|
p1_label_2 = "two"
|
113
129
|
s1 = hf.TaskSchema(
|
@@ -140,12 +156,13 @@ def test_script_direct_in_direct_out_labels(null_config, tmp_path: Path):
|
|
140
156
|
},
|
141
157
|
)
|
142
158
|
wk = hf.Workflow.from_template_data(
|
143
|
-
tasks=[t1],
|
159
|
+
tasks=[t1],
|
160
|
+
template_name="main_script_test",
|
161
|
+
path=tmp_path,
|
162
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
144
163
|
)
|
145
164
|
wk.submit(wait=True, add_to_known=False)
|
146
|
-
|
147
|
-
# to be later Python versions):
|
148
|
-
time.sleep(10)
|
165
|
+
|
149
166
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
150
167
|
assert isinstance(p2, hf.ElementParameter)
|
151
168
|
assert p2.value == p1_1_val + p1_2_val
|
@@ -153,7 +170,8 @@ def test_script_direct_in_direct_out_labels(null_config, tmp_path: Path):
|
|
153
170
|
|
154
171
|
@pytest.mark.integration
|
155
172
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
156
|
-
|
173
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
174
|
+
def test_script_json_in_json_out(null_config, tmp_path: Path, combine_scripts: bool):
|
157
175
|
s1 = hf.TaskSchema(
|
158
176
|
objective="t1",
|
159
177
|
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
@@ -165,18 +183,20 @@ def test_script_json_in_json_out(null_config, tmp_path: Path):
|
|
165
183
|
script_data_out="json",
|
166
184
|
script_exe="python_script",
|
167
185
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
186
|
+
requires_dir=True,
|
168
187
|
)
|
169
188
|
],
|
170
189
|
)
|
171
190
|
p1_val = 101
|
172
191
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
173
192
|
wk = hf.Workflow.from_template_data(
|
174
|
-
tasks=[t1],
|
193
|
+
tasks=[t1],
|
194
|
+
template_name="main_script_test",
|
195
|
+
path=tmp_path,
|
196
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
175
197
|
)
|
176
198
|
wk.submit(wait=True, add_to_known=False)
|
177
|
-
|
178
|
-
# to be later Python versions):
|
179
|
-
time.sleep(10)
|
199
|
+
|
180
200
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
181
201
|
assert isinstance(p2, hf.ElementParameter)
|
182
202
|
assert p2.value == p1_val + 100
|
@@ -184,7 +204,10 @@ def test_script_json_in_json_out(null_config, tmp_path: Path):
|
|
184
204
|
|
185
205
|
@pytest.mark.integration
|
186
206
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
187
|
-
|
207
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
208
|
+
def test_script_json_in_json_out_labels(
|
209
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
210
|
+
):
|
188
211
|
p1_label_1 = "one"
|
189
212
|
p1_label_2 = "two"
|
190
213
|
s1 = hf.TaskSchema(
|
@@ -204,6 +227,7 @@ def test_script_json_in_json_out_labels(null_config, tmp_path: Path):
|
|
204
227
|
script_data_out="json",
|
205
228
|
script_exe="python_script",
|
206
229
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
230
|
+
requires_dir=True,
|
207
231
|
)
|
208
232
|
],
|
209
233
|
)
|
@@ -217,12 +241,13 @@ def test_script_json_in_json_out_labels(null_config, tmp_path: Path):
|
|
217
241
|
},
|
218
242
|
)
|
219
243
|
wk = hf.Workflow.from_template_data(
|
220
|
-
tasks=[t1],
|
244
|
+
tasks=[t1],
|
245
|
+
template_name="main_script_test",
|
246
|
+
path=tmp_path,
|
247
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
221
248
|
)
|
222
249
|
wk.submit(wait=True, add_to_known=False)
|
223
|
-
|
224
|
-
# to be later Python versions):
|
225
|
-
time.sleep(10)
|
250
|
+
|
226
251
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
227
252
|
assert isinstance(p2, hf.ElementParameter)
|
228
253
|
assert p2.value == p1_1_val + p1_2_val
|
@@ -230,7 +255,10 @@ def test_script_json_in_json_out_labels(null_config, tmp_path: Path):
|
|
230
255
|
|
231
256
|
@pytest.mark.integration
|
232
257
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
233
|
-
|
258
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
259
|
+
def test_script_json_sub_param_in_json_out_labels(
|
260
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
261
|
+
):
|
234
262
|
p1_label_1 = "one"
|
235
263
|
p1_label_2 = "two"
|
236
264
|
s1 = hf.TaskSchema(
|
@@ -250,6 +278,7 @@ def test_script_json_sub_param_in_json_out_labels(null_config, tmp_path: Path):
|
|
250
278
|
script_data_out="json",
|
251
279
|
script_exe="python_script",
|
252
280
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
281
|
+
requires_dir=True,
|
253
282
|
)
|
254
283
|
],
|
255
284
|
)
|
@@ -263,12 +292,13 @@ def test_script_json_sub_param_in_json_out_labels(null_config, tmp_path: Path):
|
|
263
292
|
},
|
264
293
|
)
|
265
294
|
wk = hf.Workflow.from_template_data(
|
266
|
-
tasks=[t1],
|
295
|
+
tasks=[t1],
|
296
|
+
template_name="main_script_test",
|
297
|
+
path=tmp_path,
|
298
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
267
299
|
)
|
268
300
|
wk.submit(wait=True, add_to_known=False)
|
269
|
-
|
270
|
-
# to be later Python versions):
|
271
|
-
time.sleep(10)
|
301
|
+
|
272
302
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
273
303
|
assert isinstance(p2, hf.ElementParameter)
|
274
304
|
assert p2.value == a_val + p1_2_val
|
@@ -276,7 +306,10 @@ def test_script_json_sub_param_in_json_out_labels(null_config, tmp_path: Path):
|
|
276
306
|
|
277
307
|
@pytest.mark.integration
|
278
308
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
279
|
-
|
309
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
310
|
+
def test_script_json_and_direct_in_json_out(
|
311
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
312
|
+
):
|
280
313
|
s1 = hf.TaskSchema(
|
281
314
|
objective="t1",
|
282
315
|
inputs=[
|
@@ -291,6 +324,7 @@ def test_script_json_and_direct_in_json_out(null_config, tmp_path: Path):
|
|
291
324
|
script_data_out="json",
|
292
325
|
script_exe="python_script",
|
293
326
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
327
|
+
requires_dir=True,
|
294
328
|
)
|
295
329
|
],
|
296
330
|
)
|
@@ -298,12 +332,13 @@ def test_script_json_and_direct_in_json_out(null_config, tmp_path: Path):
|
|
298
332
|
p2_val = 201
|
299
333
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val, "p2": p2_val})
|
300
334
|
wk = hf.Workflow.from_template_data(
|
301
|
-
tasks=[t1],
|
335
|
+
tasks=[t1],
|
336
|
+
template_name="main_script_test",
|
337
|
+
path=tmp_path,
|
338
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
302
339
|
)
|
303
340
|
wk.submit(wait=True, add_to_known=False)
|
304
|
-
|
305
|
-
# to be later Python versions):
|
306
|
-
time.sleep(10)
|
341
|
+
|
307
342
|
p3 = wk.tasks[0].elements[0].outputs.p3
|
308
343
|
assert isinstance(p3, hf.ElementParameter)
|
309
344
|
assert p3.value == p1_val + p2_val
|
@@ -311,7 +346,10 @@ def test_script_json_and_direct_in_json_out(null_config, tmp_path: Path):
|
|
311
346
|
|
312
347
|
@pytest.mark.integration
|
313
348
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
314
|
-
|
349
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
350
|
+
def test_script_json_in_json_and_direct_out(
|
351
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
352
|
+
):
|
315
353
|
s1 = hf.TaskSchema(
|
316
354
|
objective="t1",
|
317
355
|
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
@@ -326,18 +364,20 @@ def test_script_json_in_json_and_direct_out(null_config, tmp_path: Path):
|
|
326
364
|
script_data_out={"p2": "json", "p3": "direct"},
|
327
365
|
script_exe="python_script",
|
328
366
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
367
|
+
requires_dir=True,
|
329
368
|
)
|
330
369
|
],
|
331
370
|
)
|
332
371
|
p1_val = 101
|
333
372
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
334
373
|
wk = hf.Workflow.from_template_data(
|
335
|
-
tasks=[t1],
|
374
|
+
tasks=[t1],
|
375
|
+
template_name="main_script_test",
|
376
|
+
path=tmp_path,
|
377
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
336
378
|
)
|
337
379
|
wk.submit(wait=True, add_to_known=False)
|
338
|
-
|
339
|
-
# to be later Python versions):
|
340
|
-
time.sleep(10)
|
380
|
+
|
341
381
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
342
382
|
assert isinstance(p2, hf.ElementParameter)
|
343
383
|
p3 = wk.tasks[0].elements[0].outputs.p3
|
@@ -348,7 +388,8 @@ def test_script_json_in_json_and_direct_out(null_config, tmp_path: Path):
|
|
348
388
|
|
349
389
|
@pytest.mark.integration
|
350
390
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
351
|
-
|
391
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
392
|
+
def test_script_json_in_obj(null_config, tmp_path: Path, combine_scripts: bool):
|
352
393
|
"""Use a custom JSON dumper defined in the P1 class."""
|
353
394
|
s1 = hf.TaskSchema(
|
354
395
|
objective="t1",
|
@@ -361,6 +402,7 @@ def test_script_json_in_obj(null_config, tmp_path: Path):
|
|
361
402
|
script_data_out="direct",
|
362
403
|
script_exe="python_script",
|
363
404
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
405
|
+
requires_dir=True,
|
364
406
|
)
|
365
407
|
],
|
366
408
|
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
@@ -368,12 +410,13 @@ def test_script_json_in_obj(null_config, tmp_path: Path):
|
|
368
410
|
a_val = 1
|
369
411
|
t1 = hf.Task(schema=s1, inputs={"p1c": P1(a=a_val)})
|
370
412
|
wk = hf.Workflow.from_template_data(
|
371
|
-
tasks=[t1],
|
413
|
+
tasks=[t1],
|
414
|
+
template_name="main_script_test",
|
415
|
+
path=tmp_path,
|
416
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
372
417
|
)
|
373
418
|
wk.submit(wait=True, add_to_known=False)
|
374
|
-
|
375
|
-
# to be later Python versions):
|
376
|
-
time.sleep(10)
|
419
|
+
|
377
420
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
378
421
|
assert isinstance(p2, hf.ElementParameter)
|
379
422
|
assert p2.value == a_val + 100
|
@@ -381,7 +424,8 @@ def test_script_json_in_obj(null_config, tmp_path: Path):
|
|
381
424
|
|
382
425
|
@pytest.mark.integration
|
383
426
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
384
|
-
|
427
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
428
|
+
def test_script_hdf5_in_obj(null_config, tmp_path: Path, combine_scripts: bool):
|
385
429
|
"""Use a custom HDF5 dumper defined in the P1 class."""
|
386
430
|
s1 = hf.TaskSchema(
|
387
431
|
objective="t1",
|
@@ -394,6 +438,7 @@ def test_script_hdf5_in_obj(null_config, tmp_path: Path):
|
|
394
438
|
script_data_out="direct",
|
395
439
|
script_exe="python_script",
|
396
440
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
441
|
+
requires_dir=True,
|
397
442
|
)
|
398
443
|
],
|
399
444
|
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
@@ -401,12 +446,13 @@ def test_script_hdf5_in_obj(null_config, tmp_path: Path):
|
|
401
446
|
a_val = 1
|
402
447
|
t1 = hf.Task(schema=s1, inputs={"p1c": P1(a=a_val)})
|
403
448
|
wk = hf.Workflow.from_template_data(
|
404
|
-
tasks=[t1],
|
449
|
+
tasks=[t1],
|
450
|
+
template_name="main_script_test",
|
451
|
+
path=tmp_path,
|
452
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
405
453
|
)
|
406
454
|
wk.submit(wait=True, add_to_known=False)
|
407
|
-
|
408
|
-
# to be later Python versions):
|
409
|
-
time.sleep(10)
|
455
|
+
|
410
456
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
411
457
|
assert isinstance(p2, hf.ElementParameter)
|
412
458
|
assert p2.value == a_val + 100
|
@@ -414,7 +460,8 @@ def test_script_hdf5_in_obj(null_config, tmp_path: Path):
|
|
414
460
|
|
415
461
|
@pytest.mark.integration
|
416
462
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
417
|
-
|
463
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
464
|
+
def test_script_json_out_obj(null_config, tmp_path: Path, combine_scripts: bool):
|
418
465
|
"""Use a custom JSON saver defined in the P1 class."""
|
419
466
|
s1 = hf.TaskSchema(
|
420
467
|
objective="t1",
|
@@ -427,6 +474,7 @@ def test_script_json_out_obj(null_config, tmp_path: Path):
|
|
427
474
|
script_data_out="json",
|
428
475
|
script_exe="python_script",
|
429
476
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
477
|
+
requires_dir=True,
|
430
478
|
)
|
431
479
|
],
|
432
480
|
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
@@ -434,12 +482,13 @@ def test_script_json_out_obj(null_config, tmp_path: Path):
|
|
434
482
|
p1_val = 1
|
435
483
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
436
484
|
wk = hf.Workflow.from_template_data(
|
437
|
-
tasks=[t1],
|
485
|
+
tasks=[t1],
|
486
|
+
template_name="main_script_test",
|
487
|
+
path=tmp_path,
|
488
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
438
489
|
)
|
439
490
|
wk.submit(wait=True, add_to_known=False)
|
440
|
-
|
441
|
-
# to be later Python versions):
|
442
|
-
time.sleep(10)
|
491
|
+
|
443
492
|
p1c = wk.tasks[0].elements[0].outputs.p1c
|
444
493
|
assert isinstance(p1c, hf.ElementParameter)
|
445
494
|
assert p1c.value == P1(a=p1_val + 100)
|
@@ -447,7 +496,8 @@ def test_script_json_out_obj(null_config, tmp_path: Path):
|
|
447
496
|
|
448
497
|
@pytest.mark.integration
|
449
498
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
450
|
-
|
499
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
500
|
+
def test_script_hdf5_out_obj(null_config, tmp_path: Path, combine_scripts: bool):
|
451
501
|
"""Use a custom HDF5 saver defined in the P1 class."""
|
452
502
|
s1 = hf.TaskSchema(
|
453
503
|
objective="t1",
|
@@ -460,6 +510,7 @@ def test_script_hdf5_out_obj(null_config, tmp_path: Path):
|
|
460
510
|
script_data_out="hdf5",
|
461
511
|
script_exe="python_script",
|
462
512
|
environments=[hf.ActionEnvironment(environment="python_env")],
|
513
|
+
requires_dir=True,
|
463
514
|
)
|
464
515
|
],
|
465
516
|
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
@@ -467,12 +518,13 @@ def test_script_hdf5_out_obj(null_config, tmp_path: Path):
|
|
467
518
|
p1_val = 1
|
468
519
|
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
469
520
|
wk = hf.Workflow.from_template_data(
|
470
|
-
tasks=[t1],
|
521
|
+
tasks=[t1],
|
522
|
+
template_name="main_script_test",
|
523
|
+
path=tmp_path,
|
524
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
471
525
|
)
|
472
526
|
wk.submit(wait=True, add_to_known=False)
|
473
|
-
|
474
|
-
# to be later Python versions):
|
475
|
-
time.sleep(10)
|
527
|
+
|
476
528
|
p1c = wk.tasks[0].elements[0].outputs.p1c
|
477
529
|
assert isinstance(p1c, hf.ElementParameter)
|
478
530
|
assert p1c.value == P1(a=p1_val + 100)
|
@@ -480,7 +532,11 @@ def test_script_hdf5_out_obj(null_config, tmp_path: Path):
|
|
480
532
|
|
481
533
|
@pytest.mark.integration
|
482
534
|
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
483
|
-
|
535
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
536
|
+
def test_script_direct_in_pass_env_spec(
|
537
|
+
new_null_config, tmp_path: Path, combine_scripts: bool
|
538
|
+
):
|
539
|
+
|
484
540
|
vers_spec = {"version": "1.2"}
|
485
541
|
env = hf.Environment(
|
486
542
|
name="python_env_with_specifiers",
|
@@ -490,7 +546,7 @@ def test_script_direct_in_pass_env_spec(new_null_config, tmp_path: Path):
|
|
490
546
|
label="python_script",
|
491
547
|
instances=[
|
492
548
|
hf.ExecutableInstance(
|
493
|
-
command="python <<
|
549
|
+
command="python <<script_path>> <<args>>",
|
494
550
|
num_cores=1,
|
495
551
|
parallel_mode=None,
|
496
552
|
)
|
@@ -526,14 +582,709 @@ def test_script_direct_in_pass_env_spec(new_null_config, tmp_path: Path):
|
|
526
582
|
tasks=[t1],
|
527
583
|
template_name="main_script_test",
|
528
584
|
path=tmp_path,
|
585
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
529
586
|
)
|
530
587
|
wk.submit(wait=True, add_to_known=False)
|
531
|
-
|
532
|
-
# to be later Python versions):
|
533
|
-
time.sleep(10)
|
588
|
+
|
534
589
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
535
590
|
assert isinstance(p2, hf.ElementParameter)
|
536
591
|
assert p2.value == {
|
537
592
|
"name": "python_env_with_specifiers",
|
538
593
|
**vers_spec,
|
539
594
|
}
|
595
|
+
hf.reload_template_components() # remove extra envs
|
596
|
+
|
597
|
+
|
598
|
+
@pytest.mark.integration
|
599
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
600
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
601
|
+
def test_script_std_stream_redirect_on_exception(
|
602
|
+
new_null_config, tmp_path: Path, combine_scripts: bool
|
603
|
+
):
|
604
|
+
"""Test exceptions raised by the app during execution of a script are printed to the
|
605
|
+
std-stream redirect file (and not the jobscript's standard error file)."""
|
606
|
+
|
607
|
+
# define a custom python environment which redefines the `WK_PATH` shell variable to
|
608
|
+
# a nonsense value so the app cannot load the workflow and thus raises an exception
|
609
|
+
app_caps = hf.package_name.upper()
|
610
|
+
if os.name == "nt":
|
611
|
+
env_cmd = f'$env:{app_caps}_WK_PATH = "nonsense_path"'
|
612
|
+
else:
|
613
|
+
env_cmd = f'export {app_caps}_WK_PATH="nonsense_path"'
|
614
|
+
|
615
|
+
env_cmd += "; python <<script_path>> <<args>>"
|
616
|
+
bad_env = hf.Environment(
|
617
|
+
name="bad_python_env",
|
618
|
+
executables=[
|
619
|
+
hf.Executable(
|
620
|
+
label="python_script",
|
621
|
+
instances=[
|
622
|
+
hf.ExecutableInstance(
|
623
|
+
command=env_cmd,
|
624
|
+
num_cores=1,
|
625
|
+
parallel_mode=None,
|
626
|
+
)
|
627
|
+
],
|
628
|
+
)
|
629
|
+
],
|
630
|
+
)
|
631
|
+
hf.envs.add_object(bad_env, skip_duplicates=True)
|
632
|
+
|
633
|
+
s1 = hf.TaskSchema(
|
634
|
+
objective="t1",
|
635
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
636
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
637
|
+
actions=[
|
638
|
+
hf.Action(
|
639
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
640
|
+
script_data_in="direct",
|
641
|
+
script_data_out="direct",
|
642
|
+
script_exe="python_script",
|
643
|
+
environments=[hf.ActionEnvironment(environment="bad_python_env")],
|
644
|
+
)
|
645
|
+
],
|
646
|
+
)
|
647
|
+
p1_val = 101
|
648
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
649
|
+
wk = hf.Workflow.from_template_data(
|
650
|
+
tasks=[t1],
|
651
|
+
template_name="main_script_test",
|
652
|
+
path=tmp_path,
|
653
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
654
|
+
)
|
655
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
656
|
+
|
657
|
+
# jobscript stderr should be empty
|
658
|
+
assert not wk.submissions[0].jobscripts[0].direct_stderr_path.read_text()
|
659
|
+
|
660
|
+
# std stream file has workflow not found traceback
|
661
|
+
if combine_scripts:
|
662
|
+
std_stream_path = wk.submissions[0].jobscripts[0].get_app_std_path()
|
663
|
+
else:
|
664
|
+
run = wk.get_all_EARs()[0]
|
665
|
+
std_stream_path = run.get_app_std_path()
|
666
|
+
assert std_stream_path.is_file()
|
667
|
+
assert "WorkflowNotFoundError" in std_stream_path.read_text()
|
668
|
+
|
669
|
+
hf.reload_template_components() # remove extra envs
|
670
|
+
|
671
|
+
|
672
|
+
@pytest.mark.integration
|
673
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
674
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
675
|
+
def test_script_std_out_std_err_not_redirected(
|
676
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
677
|
+
):
|
678
|
+
"""Test that standard error and output streams from a script are written to the jobscript
|
679
|
+
standard error and output files."""
|
680
|
+
s1 = hf.TaskSchema(
|
681
|
+
objective="t1",
|
682
|
+
inputs=[
|
683
|
+
hf.SchemaInput(parameter=hf.Parameter("stdout_msg")),
|
684
|
+
hf.SchemaInput(parameter=hf.Parameter("stderr_msg")),
|
685
|
+
],
|
686
|
+
actions=[
|
687
|
+
hf.Action(
|
688
|
+
script="<<script:main_script_test_std_out_std_err.py>>",
|
689
|
+
script_data_in="direct",
|
690
|
+
script_data_out="direct",
|
691
|
+
script_exe="python_script",
|
692
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
693
|
+
)
|
694
|
+
],
|
695
|
+
)
|
696
|
+
stdout_msg = "hello stdout!"
|
697
|
+
stderr_msg = "hello stderr!"
|
698
|
+
t1 = hf.Task(schema=s1, inputs={"stdout_msg": stdout_msg, "stderr_msg": stderr_msg})
|
699
|
+
wk = hf.Workflow.from_template_data(
|
700
|
+
tasks=[t1],
|
701
|
+
template_name="main_script_test",
|
702
|
+
path=tmp_path,
|
703
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
704
|
+
)
|
705
|
+
wk.submit(wait=True, add_to_known=False)
|
706
|
+
|
707
|
+
if wk.submissions[0].jobscripts[0].resources.combine_jobscript_std:
|
708
|
+
std_out_err = wk.submissions[0].jobscripts[0].direct_std_out_err_path.read_text()
|
709
|
+
assert std_out_err.strip() == f"{stdout_msg}\n{stderr_msg}"
|
710
|
+
else:
|
711
|
+
std_out = wk.submissions[0].jobscripts[0].direct_stdout_path.read_text()
|
712
|
+
std_err = wk.submissions[0].jobscripts[0].direct_stderr_path.read_text()
|
713
|
+
assert std_out.strip() == stdout_msg
|
714
|
+
assert std_err.strip() == stderr_msg
|
715
|
+
|
716
|
+
|
717
|
+
@pytest.mark.integration
|
718
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
719
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
720
|
+
def test_script_pass_env_spec(null_config, tmp_path: Path, combine_scripts: bool):
|
721
|
+
s1 = hf.TaskSchema(
|
722
|
+
objective="t1",
|
723
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
724
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
725
|
+
actions=[
|
726
|
+
hf.Action(
|
727
|
+
script="<<script:env_specifier_test/main_script_test_pass_env_spec.py>>",
|
728
|
+
script_data_in="direct",
|
729
|
+
script_data_out="direct",
|
730
|
+
script_exe="python_script",
|
731
|
+
script_pass_env_spec=True,
|
732
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
733
|
+
)
|
734
|
+
],
|
735
|
+
)
|
736
|
+
p1_val = 101
|
737
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
738
|
+
wk = hf.Workflow.from_template_data(
|
739
|
+
tasks=[t1],
|
740
|
+
template_name="main_script_test_pass_env_spec",
|
741
|
+
path=tmp_path,
|
742
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
743
|
+
)
|
744
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
745
|
+
|
746
|
+
std_out = wk.submissions[0].jobscripts[0].direct_stdout_path.read_text().strip()
|
747
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
748
|
+
assert isinstance(p2, hf.ElementParameter)
|
749
|
+
assert p2.value == p1_val + 100
|
750
|
+
assert std_out == "{'name': 'python_env'}"
|
751
|
+
|
752
|
+
|
753
|
+
@pytest.mark.integration
|
754
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
755
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
756
|
+
def test_env_specifier_in_main_script_path(
|
757
|
+
new_null_config, tmp_path: Path, combine_scripts: bool
|
758
|
+
):
|
759
|
+
py_env = hf.Environment(
|
760
|
+
name="python_env",
|
761
|
+
specifiers={"version": "v1"},
|
762
|
+
executables=[
|
763
|
+
hf.Executable(
|
764
|
+
label="python_script",
|
765
|
+
instances=[
|
766
|
+
hf.ExecutableInstance(
|
767
|
+
command="python <<script_path>> <<args>>",
|
768
|
+
num_cores=1,
|
769
|
+
parallel_mode=None,
|
770
|
+
)
|
771
|
+
],
|
772
|
+
)
|
773
|
+
],
|
774
|
+
)
|
775
|
+
hf.envs.add_object(py_env, skip_duplicates=True)
|
776
|
+
|
777
|
+
s1 = hf.TaskSchema(
|
778
|
+
objective="t1",
|
779
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
780
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
781
|
+
actions=[
|
782
|
+
hf.Action(
|
783
|
+
script="<<script:env_specifier_test/<<env:version>>/main_script_test_direct_in_direct_out.py>>",
|
784
|
+
script_data_in="direct",
|
785
|
+
script_data_out="direct",
|
786
|
+
script_exe="python_script",
|
787
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
788
|
+
)
|
789
|
+
],
|
790
|
+
)
|
791
|
+
|
792
|
+
p1_val = 101
|
793
|
+
t1 = hf.Task(
|
794
|
+
schema=s1,
|
795
|
+
inputs={"p1": p1_val},
|
796
|
+
environments={"python_env": {"version": "v1"}},
|
797
|
+
)
|
798
|
+
wk = hf.Workflow.from_template_data(
|
799
|
+
tasks=[t1],
|
800
|
+
template_name="main_script_test_env_spec_script_path",
|
801
|
+
path=tmp_path,
|
802
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
803
|
+
)
|
804
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
805
|
+
|
806
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
807
|
+
assert isinstance(p2, hf.ElementParameter)
|
808
|
+
assert p2.value == p1_val + 100
|
809
|
+
|
810
|
+
hf.reload_template_components() # remove extra envs
|
811
|
+
|
812
|
+
|
813
|
+
@pytest.mark.integration
|
814
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
815
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
816
|
+
def test_env_specifier_in_main_script_path_multiple_scripts(
|
817
|
+
new_null_config, tmp_path: Path, combine_scripts: bool
|
818
|
+
):
|
819
|
+
"""Test two elements with different environment specifiers use two distinct scripts"""
|
820
|
+
py_env_v1 = hf.Environment(
|
821
|
+
name="python_env",
|
822
|
+
specifiers={"version": "v1"},
|
823
|
+
executables=[
|
824
|
+
hf.Executable(
|
825
|
+
label="python_script",
|
826
|
+
instances=[
|
827
|
+
hf.ExecutableInstance(
|
828
|
+
command="python <<script_path>> <<args>>",
|
829
|
+
num_cores=1,
|
830
|
+
parallel_mode=None,
|
831
|
+
)
|
832
|
+
],
|
833
|
+
)
|
834
|
+
],
|
835
|
+
)
|
836
|
+
py_env_v2 = hf.Environment(
|
837
|
+
name="python_env",
|
838
|
+
specifiers={"version": "v2"},
|
839
|
+
executables=[
|
840
|
+
hf.Executable(
|
841
|
+
label="python_script",
|
842
|
+
instances=[
|
843
|
+
hf.ExecutableInstance(
|
844
|
+
command="python <<script_path>> <<args>>",
|
845
|
+
num_cores=1,
|
846
|
+
parallel_mode=None,
|
847
|
+
)
|
848
|
+
],
|
849
|
+
)
|
850
|
+
],
|
851
|
+
)
|
852
|
+
hf.envs.add_objects([py_env_v1, py_env_v2], skip_duplicates=True)
|
853
|
+
|
854
|
+
s1 = hf.TaskSchema(
|
855
|
+
objective="t1",
|
856
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
857
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
858
|
+
actions=[
|
859
|
+
hf.Action(
|
860
|
+
script="<<script:env_specifier_test/<<env:version>>/main_script_test_direct_in_direct_out.py>>",
|
861
|
+
script_data_in="direct",
|
862
|
+
script_data_out="direct",
|
863
|
+
script_exe="python_script",
|
864
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
865
|
+
)
|
866
|
+
],
|
867
|
+
)
|
868
|
+
|
869
|
+
p1_val = 101
|
870
|
+
t1 = hf.Task(
|
871
|
+
schema=s1,
|
872
|
+
inputs={"p1": p1_val},
|
873
|
+
environments={"python_env": {"version": "v1"}},
|
874
|
+
sequences=[
|
875
|
+
hf.ValueSequence(
|
876
|
+
path="environments.python_env.version",
|
877
|
+
values=["v1", "v2"],
|
878
|
+
)
|
879
|
+
],
|
880
|
+
)
|
881
|
+
wk = hf.Workflow.from_template_data(
|
882
|
+
tasks=[t1],
|
883
|
+
template_name="main_script_test_multiple_env_spec_script",
|
884
|
+
path=tmp_path,
|
885
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
886
|
+
)
|
887
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
888
|
+
|
889
|
+
# v1 and v2 scripts output different values:
|
890
|
+
e1, e2 = wk.tasks.t1.elements
|
891
|
+
e1_p2 = e1.outputs.p2
|
892
|
+
e2_p2 = e2.outputs.p2
|
893
|
+
assert isinstance(e1_p2, hf.ElementParameter)
|
894
|
+
assert isinstance(e2_p2, hf.ElementParameter)
|
895
|
+
assert e1_p2.value == 201
|
896
|
+
assert e2_p2.value == 301
|
897
|
+
|
898
|
+
hf.reload_template_components() # remove extra envs
|
899
|
+
|
900
|
+
|
901
|
+
@pytest.mark.integration
|
902
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
903
|
+
@pytest.mark.parametrize("combine_scripts", [False, True])
|
904
|
+
def test_script_direct_in_direct_out_multi_element(
|
905
|
+
null_config, tmp_path: Path, combine_scripts: bool
|
906
|
+
):
|
907
|
+
s1 = hf.TaskSchema(
|
908
|
+
objective="t1",
|
909
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
910
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
911
|
+
actions=[
|
912
|
+
hf.Action(
|
913
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
914
|
+
script_data_in="direct",
|
915
|
+
script_data_out="direct",
|
916
|
+
script_exe="python_script",
|
917
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
918
|
+
)
|
919
|
+
],
|
920
|
+
)
|
921
|
+
p1_vals = (101, 102, 103)
|
922
|
+
t1 = hf.Task(
|
923
|
+
schema=s1, sequences=[hf.ValueSequence(path="inputs.p1", values=p1_vals)]
|
924
|
+
)
|
925
|
+
wk = hf.Workflow.from_template_data(
|
926
|
+
tasks=[t1],
|
927
|
+
template_name="main_script_test_multi_element",
|
928
|
+
path=tmp_path,
|
929
|
+
resources={"any": {"combine_scripts": combine_scripts}},
|
930
|
+
)
|
931
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
932
|
+
|
933
|
+
e0_p2 = wk.tasks[0].elements[0].outputs.p2
|
934
|
+
e1_p2 = wk.tasks[0].elements[1].outputs.p2
|
935
|
+
e2_p2 = wk.tasks[0].elements[2].outputs.p2
|
936
|
+
|
937
|
+
assert isinstance(e0_p2, hf.ElementParameter)
|
938
|
+
assert isinstance(e1_p2, hf.ElementParameter)
|
939
|
+
assert isinstance(e2_p2, hf.ElementParameter)
|
940
|
+
|
941
|
+
assert e0_p2.value == p1_vals[0] + 100
|
942
|
+
assert e1_p2.value == p1_vals[1] + 100
|
943
|
+
assert e2_p2.value == p1_vals[2] + 100
|
944
|
+
|
945
|
+
# check only one script generated, and its name:
|
946
|
+
script_name, _ = t1.schema.actions[0].get_script_artifact_name(env_spec={}, act_idx=0)
|
947
|
+
script_files = list(i.name for i in wk.submissions[0].scripts_path.glob("*"))
|
948
|
+
assert len(script_files) == 1
|
949
|
+
assert script_files[0] == script_name if not combine_scripts else "js_0.py"
|
950
|
+
|
951
|
+
|
952
|
+
@pytest.mark.integration
|
953
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
954
|
+
def test_repeated_action_in_schema(null_config, tmp_path: Path):
|
955
|
+
# TODO: cannot currently use same Action object multiple times in a schema
|
956
|
+
s1 = hf.TaskSchema(
|
957
|
+
objective="t1",
|
958
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
959
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
960
|
+
actions=[
|
961
|
+
hf.Action(
|
962
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
963
|
+
script_data_in="direct",
|
964
|
+
script_data_out="direct",
|
965
|
+
script_exe="python_script",
|
966
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
967
|
+
),
|
968
|
+
hf.Action(
|
969
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
970
|
+
script_data_in="direct",
|
971
|
+
script_data_out="direct",
|
972
|
+
script_exe="python_script",
|
973
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
974
|
+
),
|
975
|
+
],
|
976
|
+
)
|
977
|
+
p1_val = 101
|
978
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
979
|
+
wk = hf.Workflow.from_template_data(
|
980
|
+
tasks=[t1],
|
981
|
+
template_name="test_repeated_action_in_schema",
|
982
|
+
path=tmp_path,
|
983
|
+
resources={"any": {"write_app_logs": True}},
|
984
|
+
)
|
985
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
986
|
+
|
987
|
+
# check scripts generated for act 0 and 1 have the same contents
|
988
|
+
act_0_script, _ = wk.tasks.t1.template.schema.actions[0].get_script_artifact_name(
|
989
|
+
env_spec={}, act_idx=0
|
990
|
+
)
|
991
|
+
act_1_script, _ = wk.tasks.t1.template.schema.actions[1].get_script_artifact_name(
|
992
|
+
env_spec={}, act_idx=1
|
993
|
+
)
|
994
|
+
act_0_script_path = wk.submissions[0].scripts_path / act_0_script
|
995
|
+
act_1_script_path = wk.submissions[0].scripts_path / act_1_script
|
996
|
+
assert act_0_script_path.read_text() == act_1_script_path.read_text()
|
997
|
+
|
998
|
+
# the two files will be symlinked if not on Windows (may be symlinked on Windows,
|
999
|
+
# depending on if user is admin)
|
1000
|
+
if os.name != "nt":
|
1001
|
+
assert act_1_script_path.is_symlink()
|
1002
|
+
|
1003
|
+
# output will be taken from second action
|
1004
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
1005
|
+
assert isinstance(p2, hf.ElementParameter)
|
1006
|
+
assert p2.value == p1_val + 100
|
1007
|
+
|
1008
|
+
|
1009
|
+
# TODO: same action with different env spec path (v1/v2) in same schema (check contents
|
1010
|
+
# different!). Cannot yet do this because it is not possible to set environment spec
|
1011
|
+
# for diferrent "main" actions within the same task.
|
1012
|
+
|
1013
|
+
|
1014
|
+
@pytest.mark.integration
|
1015
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
1016
|
+
def test_main_script_two_schemas_same_action(null_config, tmp_path: Path):
|
1017
|
+
s1 = hf.TaskSchema(
|
1018
|
+
objective="t1",
|
1019
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1020
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
1021
|
+
actions=[
|
1022
|
+
hf.Action(
|
1023
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
1024
|
+
script_data_in="direct",
|
1025
|
+
script_data_out="direct",
|
1026
|
+
script_exe="python_script",
|
1027
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1028
|
+
),
|
1029
|
+
],
|
1030
|
+
)
|
1031
|
+
s2 = hf.TaskSchema(
|
1032
|
+
objective="t2",
|
1033
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1034
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
1035
|
+
actions=[
|
1036
|
+
hf.Action(
|
1037
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
1038
|
+
script_data_in="direct",
|
1039
|
+
script_data_out="direct",
|
1040
|
+
script_exe="python_script",
|
1041
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1042
|
+
),
|
1043
|
+
],
|
1044
|
+
)
|
1045
|
+
p1_val = 101
|
1046
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
1047
|
+
t2 = hf.Task(schema=s2, inputs={"p1": p1_val})
|
1048
|
+
wk = hf.Workflow.from_template_data(
|
1049
|
+
tasks=[t1, t2],
|
1050
|
+
template_name="main_script_test_two_schemas_same_action",
|
1051
|
+
path=tmp_path,
|
1052
|
+
)
|
1053
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
1054
|
+
|
1055
|
+
# check scripts generated for t1 and t2 have the same contents
|
1056
|
+
t1_script, _ = wk.tasks.t1.template.schema.actions[0].get_script_artifact_name(
|
1057
|
+
env_spec={}, act_idx=0
|
1058
|
+
)
|
1059
|
+
t2_script, _ = wk.tasks.t2.template.schema.actions[0].get_script_artifact_name(
|
1060
|
+
env_spec={}, act_idx=0
|
1061
|
+
)
|
1062
|
+
t1_script_path = wk.submissions[0].scripts_path / t1_script
|
1063
|
+
t2_script_path = wk.submissions[0].scripts_path / t2_script
|
1064
|
+
assert t1_script_path.read_text() == t2_script_path.read_text()
|
1065
|
+
|
1066
|
+
# the two files will be symlinked if not on Windows (may be symlinked on Windows,
|
1067
|
+
# depending on if user is admin)
|
1068
|
+
if os.name != "nt":
|
1069
|
+
assert t2_script_path.is_symlink()
|
1070
|
+
|
1071
|
+
# check output
|
1072
|
+
t0_p2 = wk.tasks[0].elements[0].outputs.p2
|
1073
|
+
t1_p2 = wk.tasks[1].elements[0].outputs.p2
|
1074
|
+
assert isinstance(t0_p2, hf.ElementParameter)
|
1075
|
+
assert isinstance(t1_p2, hf.ElementParameter)
|
1076
|
+
assert t0_p2.value == p1_val + 100
|
1077
|
+
assert t1_p2.value == p1_val + 100
|
1078
|
+
|
1079
|
+
# now copy the workflow elsewhere and check the symlink between the scripts still
|
1080
|
+
# works:
|
1081
|
+
wk_path = Path(wk.path)
|
1082
|
+
copy_path = wk_path.parent.joinpath(wk_path.with_suffix(".copy"))
|
1083
|
+
shutil.copytree(wk.path, copy_path, symlinks=True)
|
1084
|
+
t2_script_path_copy = Path(str(t2_script_path).replace(wk.path, f"{wk.path}.copy"))
|
1085
|
+
assert t1_script_path.read_text() == t2_script_path_copy.read_text()
|
1086
|
+
|
1087
|
+
|
1088
|
+
@pytest.mark.integration
|
1089
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
1090
|
+
def test_main_script_two_actions_same_schema(null_config, tmp_path: Path):
|
1091
|
+
s1 = hf.TaskSchema(
|
1092
|
+
objective="t1",
|
1093
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1094
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
1095
|
+
actions=[
|
1096
|
+
hf.Action(
|
1097
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
1098
|
+
script_data_in="direct",
|
1099
|
+
script_data_out="direct",
|
1100
|
+
script_exe="python_script",
|
1101
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1102
|
+
),
|
1103
|
+
hf.Action(
|
1104
|
+
script="<<script:main_script_test_json_in_json_out.py>>",
|
1105
|
+
script_data_in="json",
|
1106
|
+
script_data_out="json",
|
1107
|
+
script_exe="python_script",
|
1108
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1109
|
+
requires_dir=True,
|
1110
|
+
),
|
1111
|
+
],
|
1112
|
+
)
|
1113
|
+
p1_val = 101
|
1114
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
1115
|
+
wk = hf.Workflow.from_template_data(
|
1116
|
+
tasks=[t1],
|
1117
|
+
template_name="main_script_test_distinct_actions_same_schema",
|
1118
|
+
path=tmp_path,
|
1119
|
+
)
|
1120
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
1121
|
+
|
1122
|
+
# check scripts generated for act 0 and 1 have different contents
|
1123
|
+
act_0_script, _ = wk.tasks.t1.template.schema.actions[0].get_script_artifact_name(
|
1124
|
+
env_spec={}, act_idx=0
|
1125
|
+
)
|
1126
|
+
act_1_script, _ = wk.tasks.t1.template.schema.actions[1].get_script_artifact_name(
|
1127
|
+
env_spec={}, act_idx=1
|
1128
|
+
)
|
1129
|
+
act_0_script_path = wk.submissions[0].scripts_path / act_0_script
|
1130
|
+
act_1_script_path = wk.submissions[0].scripts_path / act_1_script
|
1131
|
+
assert act_0_script_path.read_text() != act_1_script_path.read_text()
|
1132
|
+
|
1133
|
+
|
1134
|
+
@pytest.mark.integration
|
1135
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
1136
|
+
def test_shell_env_vars(null_config, tmp_path: Path):
|
1137
|
+
s1 = hf.TaskSchema(
|
1138
|
+
objective="t1",
|
1139
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1140
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p1"))],
|
1141
|
+
actions=[
|
1142
|
+
hf.Action(
|
1143
|
+
script="<<script:main_script_test_shell_env_vars.py>>",
|
1144
|
+
script_data_in="direct",
|
1145
|
+
script_data_out="direct",
|
1146
|
+
script_exe="python_script",
|
1147
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1148
|
+
requires_dir=True,
|
1149
|
+
)
|
1150
|
+
],
|
1151
|
+
)
|
1152
|
+
tasks = [
|
1153
|
+
hf.Task(
|
1154
|
+
schema=s1,
|
1155
|
+
inputs={"p1": 1},
|
1156
|
+
repeats=3,
|
1157
|
+
),
|
1158
|
+
hf.Task(
|
1159
|
+
schema=s1,
|
1160
|
+
inputs={"p1": 1},
|
1161
|
+
),
|
1162
|
+
hf.Task(
|
1163
|
+
schema=s1,
|
1164
|
+
inputs={"p1": 1},
|
1165
|
+
repeats=2,
|
1166
|
+
),
|
1167
|
+
]
|
1168
|
+
loops = [
|
1169
|
+
hf.Loop(
|
1170
|
+
tasks=[2],
|
1171
|
+
num_iterations=2,
|
1172
|
+
)
|
1173
|
+
]
|
1174
|
+
wk = hf.Workflow.from_template_data(
|
1175
|
+
tasks=tasks,
|
1176
|
+
loops=loops,
|
1177
|
+
template_name="main_script_test_shell_env",
|
1178
|
+
path=tmp_path,
|
1179
|
+
)
|
1180
|
+
wk.add_submission(tasks=[0, 1])
|
1181
|
+
wk.submit(wait=True, add_to_known=False, status=False) # first submission
|
1182
|
+
|
1183
|
+
wk.submit(wait=True, add_to_known=False, status=False) # outstanding runs
|
1184
|
+
|
1185
|
+
for run in wk.get_all_EARs():
|
1186
|
+
run_dir = run.get_directory()
|
1187
|
+
assert run_dir
|
1188
|
+
with run_dir.joinpath("env_vars.json").open("rt") as fp:
|
1189
|
+
env_dat = json.load(fp)
|
1190
|
+
|
1191
|
+
assert env_dat["HPCFLOW_WK_PATH"] == str(run.workflow.path)
|
1192
|
+
assert env_dat["HPCFLOW_WK_PATH_ARG"] == str(run.workflow.path)
|
1193
|
+
|
1194
|
+
assert run.submission_idx is not None
|
1195
|
+
for js in wk.submissions[run.submission_idx].jobscripts:
|
1196
|
+
js_funcs_path = str(js.jobscript_functions_path)
|
1197
|
+
for block in js.blocks:
|
1198
|
+
for run_i in block.all_EARs:
|
1199
|
+
if run_i.id_ == run.id_:
|
1200
|
+
assert int(env_dat["HPCFLOW_JS_IDX"]) == js.index
|
1201
|
+
assert env_dat["HPCFLOW_JS_FUNCS_PATH"] == js_funcs_path
|
1202
|
+
|
1203
|
+
assert int(env_dat["HPCFLOW_RUN_ID"]) == run.id_
|
1204
|
+
assert int(env_dat["HPCFLOW_RUN_IDX"]) == run.index
|
1205
|
+
assert int(env_dat["HPCFLOW_RUN_PORT"]) == run.port_number
|
1206
|
+
|
1207
|
+
script_name = run.get_script_artifact_name()
|
1208
|
+
sub_scripts_dir = wk.submissions[run.submission_idx].scripts_path
|
1209
|
+
script_path = sub_scripts_dir.joinpath(script_name)
|
1210
|
+
|
1211
|
+
assert env_dat["HPCFLOW_SUB_SCRIPTS_DIR"] == str(sub_scripts_dir)
|
1212
|
+
assert int(env_dat["HPCFLOW_SUB_IDX"]) == run.submission_idx
|
1213
|
+
|
1214
|
+
assert env_dat["HPCFLOW_RUN_SCRIPT_DIR"] == str(script_path.parent)
|
1215
|
+
assert env_dat["HPCFLOW_RUN_SCRIPT_PATH"] == str(script_path)
|
1216
|
+
assert env_dat["HPCFLOW_RUN_SCRIPT_NAME"] == script_name
|
1217
|
+
assert env_dat["HPCFLOW_RUN_SCRIPT_NAME_NO_EXT"] == script_path.stem
|
1218
|
+
|
1219
|
+
assert env_dat["HPCFLOW_RUN_STD_PATH"] == str(run.get_app_std_path())
|
1220
|
+
assert (
|
1221
|
+
env_dat["HPCFLOW_RUN_LOG_PATH"]
|
1222
|
+
== env_dat["HPCFLOW_LOG_PATH"]
|
1223
|
+
== str(run.get_app_log_path())
|
1224
|
+
if run.resources.write_app_logs
|
1225
|
+
else " "
|
1226
|
+
)
|
1227
|
+
|
1228
|
+
assert env_dat["HPCFLOW_ELEMENT_ID"] == str(run.element.id_)
|
1229
|
+
assert env_dat["HPCFLOW_ELEMENT_IDX"] == str(run.element.index)
|
1230
|
+
|
1231
|
+
assert env_dat["HPCFLOW_ELEMENT_ITER_ID"] == str(run.element_iteration.id_)
|
1232
|
+
assert env_dat["HPCFLOW_ELEMENT_ITER_IDX"] == str(run.element_iteration.index)
|
1233
|
+
|
1234
|
+
|
1235
|
+
@pytest.mark.integration
|
1236
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
1237
|
+
def test_combine_scripts_script_data_multiple_input_file_formats(
|
1238
|
+
null_config, tmp_path: Path
|
1239
|
+
):
|
1240
|
+
s1 = hf.TaskSchema(
|
1241
|
+
objective="t1",
|
1242
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
1243
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
1244
|
+
actions=[
|
1245
|
+
hf.Action(
|
1246
|
+
script="<<script:main_script_test_json_in_json_out.py>>",
|
1247
|
+
script_data_in="json",
|
1248
|
+
script_data_out="json",
|
1249
|
+
script_exe="python_script",
|
1250
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1251
|
+
requires_dir=True,
|
1252
|
+
),
|
1253
|
+
],
|
1254
|
+
)
|
1255
|
+
s2 = hf.TaskSchema(
|
1256
|
+
objective="t2",
|
1257
|
+
inputs=[
|
1258
|
+
hf.SchemaInput(parameter=hf.Parameter("p2")),
|
1259
|
+
hf.SchemaInput(parameter=hf.Parameter("p1c")),
|
1260
|
+
],
|
1261
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
|
1262
|
+
actions=[
|
1263
|
+
hf.Action(
|
1264
|
+
script="<<script:main_script_test_hdf5_in_obj_2.py>>",
|
1265
|
+
script_data_in={"p2": "direct", "p1c": "hdf5"},
|
1266
|
+
script_data_out="direct",
|
1267
|
+
script_exe="python_script",
|
1268
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
1269
|
+
requires_dir=True,
|
1270
|
+
),
|
1271
|
+
],
|
1272
|
+
parameter_class_modules=["hpcflow.sdk.core.test_utils"],
|
1273
|
+
)
|
1274
|
+
p1_val = 101
|
1275
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
1276
|
+
t2 = hf.Task(schema=s2, inputs={"p1c": P1(a=p1_val)})
|
1277
|
+
wk = hf.Workflow.from_template_data(
|
1278
|
+
tasks=[t1, t2],
|
1279
|
+
template_name="main_script_test",
|
1280
|
+
path=tmp_path,
|
1281
|
+
resources={"any": {"combine_scripts": True}},
|
1282
|
+
)
|
1283
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
1284
|
+
|
1285
|
+
t0_p2 = wk.tasks[0].elements[0].outputs.p2
|
1286
|
+
t1_p3 = wk.tasks[1].elements[0].outputs.p3
|
1287
|
+
assert isinstance(t0_p2, hf.ElementParameter)
|
1288
|
+
assert isinstance(t1_p3, hf.ElementParameter)
|
1289
|
+
assert t0_p2.value == p1_val + 100
|
1290
|
+
assert t1_p3.value == p1_val + 100
|