hpcflow-new2 0.2.0a190__py3-none-any.whl → 0.2.0a199__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hpcflow/__pyinstaller/hook-hpcflow.py +1 -0
- hpcflow/_version.py +1 -1
- hpcflow/data/scripts/bad_script.py +2 -0
- hpcflow/data/scripts/do_nothing.py +2 -0
- hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
- hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/input_file_generator_basic.py +3 -0
- hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
- hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
- hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
- hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
- hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
- hpcflow/data/scripts/output_file_parser_basic.py +3 -0
- hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
- hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/script_exit_test.py +5 -0
- hpcflow/data/template_components/environments.yaml +1 -1
- hpcflow/sdk/__init__.py +5 -0
- hpcflow/sdk/app.py +150 -89
- hpcflow/sdk/cli.py +263 -84
- hpcflow/sdk/cli_common.py +99 -5
- hpcflow/sdk/config/callbacks.py +38 -1
- hpcflow/sdk/config/config.py +102 -13
- hpcflow/sdk/config/errors.py +19 -5
- hpcflow/sdk/config/types.py +3 -0
- hpcflow/sdk/core/__init__.py +25 -1
- hpcflow/sdk/core/actions.py +914 -262
- hpcflow/sdk/core/cache.py +76 -34
- hpcflow/sdk/core/command_files.py +14 -128
- hpcflow/sdk/core/commands.py +35 -6
- hpcflow/sdk/core/element.py +122 -50
- hpcflow/sdk/core/errors.py +58 -2
- hpcflow/sdk/core/execute.py +207 -0
- hpcflow/sdk/core/loop.py +408 -50
- hpcflow/sdk/core/loop_cache.py +4 -4
- hpcflow/sdk/core/parameters.py +382 -37
- hpcflow/sdk/core/run_dir_files.py +13 -40
- hpcflow/sdk/core/skip_reason.py +7 -0
- hpcflow/sdk/core/task.py +119 -30
- hpcflow/sdk/core/task_schema.py +68 -0
- hpcflow/sdk/core/test_utils.py +66 -27
- hpcflow/sdk/core/types.py +54 -1
- hpcflow/sdk/core/utils.py +78 -7
- hpcflow/sdk/core/workflow.py +1538 -336
- hpcflow/sdk/data/workflow_spec_schema.yaml +2 -0
- hpcflow/sdk/demo/cli.py +7 -0
- hpcflow/sdk/helper/cli.py +1 -0
- hpcflow/sdk/log.py +42 -15
- hpcflow/sdk/persistence/base.py +405 -53
- hpcflow/sdk/persistence/json.py +177 -52
- hpcflow/sdk/persistence/pending.py +237 -69
- hpcflow/sdk/persistence/store_resource.py +3 -2
- hpcflow/sdk/persistence/types.py +15 -4
- hpcflow/sdk/persistence/zarr.py +928 -81
- hpcflow/sdk/submission/jobscript.py +1408 -489
- hpcflow/sdk/submission/schedulers/__init__.py +40 -5
- hpcflow/sdk/submission/schedulers/direct.py +33 -19
- hpcflow/sdk/submission/schedulers/sge.py +51 -16
- hpcflow/sdk/submission/schedulers/slurm.py +44 -16
- hpcflow/sdk/submission/schedulers/utils.py +7 -2
- hpcflow/sdk/submission/shells/base.py +68 -20
- hpcflow/sdk/submission/shells/bash.py +222 -129
- hpcflow/sdk/submission/shells/powershell.py +200 -150
- hpcflow/sdk/submission/submission.py +852 -119
- hpcflow/sdk/submission/types.py +18 -21
- hpcflow/sdk/typing.py +24 -5
- hpcflow/sdk/utils/arrays.py +71 -0
- hpcflow/sdk/utils/deferred_file.py +55 -0
- hpcflow/sdk/utils/hashing.py +16 -0
- hpcflow/sdk/utils/patches.py +12 -0
- hpcflow/sdk/utils/strings.py +33 -0
- hpcflow/tests/api/test_api.py +32 -0
- hpcflow/tests/conftest.py +19 -0
- hpcflow/tests/data/multi_path_sequences.yaml +29 -0
- hpcflow/tests/data/workflow_test_run_abort.yaml +34 -35
- hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
- hpcflow/tests/scripts/test_input_file_generators.py +282 -0
- hpcflow/tests/scripts/test_main_scripts.py +821 -70
- hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
- hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
- hpcflow/tests/shells/wsl/test_wsl_submission.py +6 -0
- hpcflow/tests/unit/test_action.py +176 -0
- hpcflow/tests/unit/test_app.py +20 -0
- hpcflow/tests/unit/test_cache.py +46 -0
- hpcflow/tests/unit/test_cli.py +133 -0
- hpcflow/tests/unit/test_config.py +122 -1
- hpcflow/tests/unit/test_element_iteration.py +47 -0
- hpcflow/tests/unit/test_jobscript_unit.py +757 -0
- hpcflow/tests/unit/test_loop.py +1332 -27
- hpcflow/tests/unit/test_meta_task.py +325 -0
- hpcflow/tests/unit/test_multi_path_sequences.py +229 -0
- hpcflow/tests/unit/test_parameter.py +13 -0
- hpcflow/tests/unit/test_persistence.py +190 -8
- hpcflow/tests/unit/test_run.py +109 -3
- hpcflow/tests/unit/test_run_directories.py +29 -0
- hpcflow/tests/unit/test_shell.py +20 -0
- hpcflow/tests/unit/test_submission.py +5 -76
- hpcflow/tests/unit/utils/test_arrays.py +40 -0
- hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
- hpcflow/tests/unit/utils/test_hashing.py +65 -0
- hpcflow/tests/unit/utils/test_patches.py +5 -0
- hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
- hpcflow/tests/workflows/__init__.py +0 -0
- hpcflow/tests/workflows/test_directory_structure.py +31 -0
- hpcflow/tests/workflows/test_jobscript.py +332 -0
- hpcflow/tests/workflows/test_run_status.py +198 -0
- hpcflow/tests/workflows/test_skip_downstream.py +696 -0
- hpcflow/tests/workflows/test_submission.py +140 -0
- hpcflow/tests/workflows/test_workflows.py +142 -2
- hpcflow/tests/workflows/test_zip.py +18 -0
- hpcflow/viz_demo.ipynb +6587 -3
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/METADATA +7 -4
- hpcflow_new2-0.2.0a199.dist-info/RECORD +221 -0
- hpcflow_new2-0.2.0a190.dist-info/RECORD +0 -165
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/LICENSE +0 -0
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/WHEEL +0 -0
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,46 @@
|
|
1
|
+
import os
|
2
|
+
import time
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
from hpcflow.app import app as hf
|
7
|
+
|
8
|
+
|
9
|
+
@pytest.mark.integration
|
10
|
+
def test_non_snippet_script_execution(null_config, tmp_path):
|
11
|
+
test_str = "non-snippet script!"
|
12
|
+
script_name = "my_script.py"
|
13
|
+
script_contents = f'print("{test_str}")'
|
14
|
+
|
15
|
+
if os.name == "nt":
|
16
|
+
cmd = f"Set-Content -Path {script_name} -Value '{script_contents}'"
|
17
|
+
else:
|
18
|
+
cmd = f"echo '{script_contents}' > {script_name}"
|
19
|
+
|
20
|
+
act_1 = hf.Action(
|
21
|
+
commands=[hf.Command(cmd)],
|
22
|
+
)
|
23
|
+
act_2 = hf.Action(
|
24
|
+
script="my_script.py",
|
25
|
+
script_exe="python_script",
|
26
|
+
script_data_in="direct",
|
27
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
28
|
+
)
|
29
|
+
s1 = hf.TaskSchema(
|
30
|
+
objective="t1",
|
31
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
32
|
+
actions=[act_1, act_2],
|
33
|
+
)
|
34
|
+
|
35
|
+
t1 = hf.Task(schema=s1, inputs={"p1": 101})
|
36
|
+
wk = hf.Workflow.from_template_data(
|
37
|
+
tasks=[t1],
|
38
|
+
template_name="non_snippet_script_test",
|
39
|
+
workflow_name="non_snippet_script_test",
|
40
|
+
overwrite=True,
|
41
|
+
path=tmp_path,
|
42
|
+
)
|
43
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
44
|
+
|
45
|
+
std_out = wk.submissions[0].jobscripts[0].direct_stdout_path.read_text().strip()
|
46
|
+
assert std_out.endswith(test_str)
|
@@ -0,0 +1,353 @@
|
|
1
|
+
import os
|
2
|
+
import time
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
from hpcflow.app import app as hf
|
6
|
+
|
7
|
+
|
8
|
+
@pytest.mark.integration
|
9
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
10
|
+
def test_output_file_parser_parses_file(null_config, tmp_path):
|
11
|
+
out_file_name = "my_output_file.txt"
|
12
|
+
out_file = hf.FileSpec(label="my_output_file", name=out_file_name)
|
13
|
+
|
14
|
+
if os.name == "nt":
|
15
|
+
cmd = f"Set-Content -Path {out_file_name} -Value (<<parameter:p1>> + 100)"
|
16
|
+
else:
|
17
|
+
cmd = f"echo $(( <<parameter:p1>> + 100 )) > {out_file_name}"
|
18
|
+
|
19
|
+
act = hf.Action(
|
20
|
+
commands=[hf.Command(cmd)],
|
21
|
+
output_file_parsers=[
|
22
|
+
hf.OutputFileParser(
|
23
|
+
output_files=[out_file],
|
24
|
+
output=hf.Parameter("p2"),
|
25
|
+
script="<<script:output_file_parser_basic.py>>",
|
26
|
+
),
|
27
|
+
],
|
28
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
29
|
+
)
|
30
|
+
s1 = hf.TaskSchema(
|
31
|
+
objective="t1",
|
32
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
33
|
+
outputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
|
34
|
+
actions=[act],
|
35
|
+
)
|
36
|
+
|
37
|
+
p1_val = 101
|
38
|
+
p2_val_expected = p1_val + 100
|
39
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
40
|
+
wk = hf.Workflow.from_template_data(
|
41
|
+
tasks=[t1],
|
42
|
+
template_name="output_file_parser_test",
|
43
|
+
path=tmp_path,
|
44
|
+
)
|
45
|
+
|
46
|
+
wk.submit(wait=True, add_to_known=False)
|
47
|
+
|
48
|
+
# check the command successfully generated the output file:
|
49
|
+
run_0 = wk.get_all_EARs()[0]
|
50
|
+
exec_path = run_0.get_directory()
|
51
|
+
out_file_path = exec_path.joinpath(out_file.name.name)
|
52
|
+
out_file_contents = out_file_path.read_text()
|
53
|
+
assert out_file_contents.strip() == str(p2_val_expected)
|
54
|
+
|
55
|
+
# check the output is parsed correctly:
|
56
|
+
assert wk.tasks[0].elements[0].outputs.p2.value == p2_val_expected
|
57
|
+
|
58
|
+
|
59
|
+
@pytest.mark.integration
|
60
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
61
|
+
def test_OFP_std_stream_redirect_on_exception(new_null_config, tmp_path):
|
62
|
+
"""Test exceptions raised by the app during execution of an OFP script are printed to the
|
63
|
+
std-stream redirect file (and not the jobscript's standard error file)."""
|
64
|
+
|
65
|
+
# define a custom python environment which redefines the `WK_PATH` shell variable to
|
66
|
+
# a nonsense value so the app cannot load the workflow and thus raises an exception
|
67
|
+
|
68
|
+
app_caps = hf.package_name.upper()
|
69
|
+
if os.name == "nt":
|
70
|
+
env_cmd = f'$env:{app_caps}_WK_PATH = "nonsense_path"'
|
71
|
+
else:
|
72
|
+
env_cmd = f'export {app_caps}_WK_PATH="nonsense_path"'
|
73
|
+
|
74
|
+
env_cmd += "; python <<script_path>> <<args>>"
|
75
|
+
bad_env = hf.Environment(
|
76
|
+
name="bad_python_env",
|
77
|
+
executables=[
|
78
|
+
hf.Executable(
|
79
|
+
label="python_script",
|
80
|
+
instances=[
|
81
|
+
hf.ExecutableInstance(
|
82
|
+
command=env_cmd,
|
83
|
+
num_cores=1,
|
84
|
+
parallel_mode=None,
|
85
|
+
)
|
86
|
+
],
|
87
|
+
)
|
88
|
+
],
|
89
|
+
)
|
90
|
+
hf.envs.add_object(bad_env, skip_duplicates=True)
|
91
|
+
|
92
|
+
out_file_name = "my_output_file.txt"
|
93
|
+
out_file = hf.FileSpec(label="my_output_file", name=out_file_name)
|
94
|
+
|
95
|
+
if os.name == "nt":
|
96
|
+
cmd = f"Set-Content -Path {out_file_name} -Value (<<parameter:p1>> + 100)"
|
97
|
+
else:
|
98
|
+
cmd = f"echo $(( <<parameter:p1>> + 100 )) > {out_file_name}"
|
99
|
+
|
100
|
+
act = hf.Action(
|
101
|
+
commands=[hf.Command(cmd)],
|
102
|
+
output_file_parsers=[
|
103
|
+
hf.OutputFileParser(
|
104
|
+
output_files=[out_file],
|
105
|
+
output=hf.Parameter("p2"),
|
106
|
+
script="<<script:output_file_parser_basic.py>>",
|
107
|
+
),
|
108
|
+
],
|
109
|
+
environments=[hf.ActionEnvironment(environment="bad_python_env")],
|
110
|
+
)
|
111
|
+
|
112
|
+
s1 = hf.TaskSchema(
|
113
|
+
objective="t1",
|
114
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
115
|
+
outputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
|
116
|
+
actions=[act],
|
117
|
+
)
|
118
|
+
|
119
|
+
p1_val = 101
|
120
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
121
|
+
wk = hf.Workflow.from_template_data(
|
122
|
+
tasks=[t1],
|
123
|
+
template_name="output_file_parser_test",
|
124
|
+
path=tmp_path,
|
125
|
+
)
|
126
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
127
|
+
|
128
|
+
# jobscript stderr should be empty
|
129
|
+
assert not wk.submissions[0].jobscripts[0].direct_stderr_path.read_text()
|
130
|
+
|
131
|
+
# std stream file has workflow not found traceback
|
132
|
+
run = wk.get_all_EARs()[1]
|
133
|
+
std_stream_path = run.get_app_std_path()
|
134
|
+
assert std_stream_path.is_file()
|
135
|
+
assert "WorkflowNotFoundError" in std_stream_path.read_text()
|
136
|
+
|
137
|
+
hf.reload_template_components() # remove extra envs
|
138
|
+
|
139
|
+
|
140
|
+
@pytest.mark.integration
|
141
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
142
|
+
def test_OFP_std_out_std_err_not_redirected(null_config, tmp_path):
|
143
|
+
"""Test that standard error and output streams from an OFP script are written to the jobscript
|
144
|
+
standard error and output files."""
|
145
|
+
out_file_name = "my_output_file.txt"
|
146
|
+
out_file = hf.FileSpec(label="my_output_file", name=out_file_name)
|
147
|
+
|
148
|
+
if os.name == "nt":
|
149
|
+
cmd = f"Set-Content -Path {out_file_name} -Value (<<parameter:p1>> + 100)"
|
150
|
+
else:
|
151
|
+
cmd = f"echo $(( <<parameter:p1>> + 100 )) > {out_file_name}"
|
152
|
+
|
153
|
+
act = hf.Action(
|
154
|
+
commands=[hf.Command(cmd)],
|
155
|
+
output_file_parsers=[
|
156
|
+
hf.OutputFileParser(
|
157
|
+
output_files=[out_file],
|
158
|
+
output=hf.Parameter("p2"),
|
159
|
+
inputs=["p1"],
|
160
|
+
script="<<script:output_file_parser_test_stdout_stderr.py>>",
|
161
|
+
),
|
162
|
+
],
|
163
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
164
|
+
)
|
165
|
+
|
166
|
+
s1 = hf.TaskSchema(
|
167
|
+
objective="t1",
|
168
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
169
|
+
outputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
|
170
|
+
actions=[act],
|
171
|
+
)
|
172
|
+
p1_val = 101
|
173
|
+
stdout_msg = str(p1_val)
|
174
|
+
stderr_msg = str(p1_val)
|
175
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
176
|
+
wk = hf.Workflow.from_template_data(
|
177
|
+
tasks=[t1],
|
178
|
+
template_name="ouput_file_parser_test",
|
179
|
+
path=tmp_path,
|
180
|
+
)
|
181
|
+
wk.submit(wait=True, add_to_known=False)
|
182
|
+
|
183
|
+
if wk.submissions[0].jobscripts[0].resources.combine_jobscript_std:
|
184
|
+
std_out_err = wk.submissions[0].jobscripts[0].direct_std_out_err_path.read_text()
|
185
|
+
assert std_out_err.strip() == f"{stdout_msg}\n{stderr_msg}"
|
186
|
+
else:
|
187
|
+
std_out = wk.submissions[0].jobscripts[0].direct_stdout_path.read_text()
|
188
|
+
std_err = wk.submissions[0].jobscripts[0].direct_stderr_path.read_text()
|
189
|
+
assert std_out.strip() == stdout_msg
|
190
|
+
assert std_err.strip() == stderr_msg
|
191
|
+
|
192
|
+
|
193
|
+
@pytest.mark.integration
|
194
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
195
|
+
def test_output_file_parser_pass_env_spec(null_config, tmp_path):
|
196
|
+
out_file_name = "my_output_file.txt"
|
197
|
+
out_file = hf.FileSpec(label="my_output_file", name=out_file_name)
|
198
|
+
|
199
|
+
if os.name == "nt":
|
200
|
+
cmd = f"Set-Content -Path {out_file_name} -Value (<<parameter:p1>> + 100)"
|
201
|
+
else:
|
202
|
+
cmd = f"echo $(( <<parameter:p1>> + 100 )) > {out_file_name}"
|
203
|
+
|
204
|
+
act = hf.Action(
|
205
|
+
commands=[hf.Command(cmd)],
|
206
|
+
output_file_parsers=[
|
207
|
+
hf.OutputFileParser(
|
208
|
+
output_files=[out_file],
|
209
|
+
output=hf.Parameter("p2"),
|
210
|
+
script="<<script:env_specifier_test/output_file_parser_pass_env_spec.py>>",
|
211
|
+
script_pass_env_spec=True,
|
212
|
+
),
|
213
|
+
],
|
214
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
215
|
+
)
|
216
|
+
s1 = hf.TaskSchema(
|
217
|
+
objective="t1",
|
218
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
219
|
+
outputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
|
220
|
+
actions=[act],
|
221
|
+
)
|
222
|
+
|
223
|
+
t1 = hf.Task(schema=s1, inputs={"p1": 101})
|
224
|
+
wk = hf.Workflow.from_template_data(
|
225
|
+
tasks=[t1],
|
226
|
+
template_name="output_file_parser_pass_env_spec",
|
227
|
+
path=tmp_path,
|
228
|
+
)
|
229
|
+
|
230
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
231
|
+
|
232
|
+
std_out = wk.submissions[0].jobscripts[0].direct_stdout_path.read_text().strip()
|
233
|
+
assert std_out == "{'name': 'python_env'}"
|
234
|
+
|
235
|
+
|
236
|
+
@pytest.mark.integration
|
237
|
+
@pytest.mark.skipif("hf.run_time_info.is_frozen")
|
238
|
+
def test_env_specifier_in_output_file_parser_script_path(new_null_config, tmp_path):
|
239
|
+
|
240
|
+
py_env = hf.Environment(
|
241
|
+
name="python_env",
|
242
|
+
specifiers={"version": "v1"},
|
243
|
+
executables=[
|
244
|
+
hf.Executable(
|
245
|
+
label="python_script",
|
246
|
+
instances=[
|
247
|
+
hf.ExecutableInstance(
|
248
|
+
command="python <<script_path>> <<args>>",
|
249
|
+
num_cores=1,
|
250
|
+
parallel_mode=None,
|
251
|
+
)
|
252
|
+
],
|
253
|
+
)
|
254
|
+
],
|
255
|
+
)
|
256
|
+
hf.envs.add_object(py_env, skip_duplicates=True)
|
257
|
+
|
258
|
+
out_file_name = "my_output_file.txt"
|
259
|
+
out_file = hf.FileSpec(label="my_output_file", name=out_file_name)
|
260
|
+
|
261
|
+
if os.name == "nt":
|
262
|
+
cmd = f"Set-Content -Path {out_file_name} -Value (<<parameter:p1>> + 100)"
|
263
|
+
else:
|
264
|
+
cmd = f"echo $(( <<parameter:p1>> + 100 )) > {out_file_name}"
|
265
|
+
|
266
|
+
act = hf.Action(
|
267
|
+
commands=[hf.Command(cmd)],
|
268
|
+
output_file_parsers=[
|
269
|
+
hf.OutputFileParser(
|
270
|
+
output_files=[out_file],
|
271
|
+
output=hf.Parameter("p2"),
|
272
|
+
script="<<script:env_specifier_test/<<env:version>>/output_file_parser_basic.py>>",
|
273
|
+
),
|
274
|
+
],
|
275
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
276
|
+
)
|
277
|
+
s1 = hf.TaskSchema(
|
278
|
+
objective="t1",
|
279
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
280
|
+
outputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
|
281
|
+
actions=[act],
|
282
|
+
)
|
283
|
+
|
284
|
+
p1_val = 101
|
285
|
+
p2_val_expected = p1_val + 100
|
286
|
+
t1 = hf.Task(
|
287
|
+
schema=s1,
|
288
|
+
inputs={"p1": p1_val},
|
289
|
+
environments={"python_env": {"version": "v1"}},
|
290
|
+
)
|
291
|
+
wk = hf.Workflow.from_template_data(
|
292
|
+
tasks=[t1],
|
293
|
+
template_name="output_file_parser_test_env_specifier",
|
294
|
+
path=tmp_path,
|
295
|
+
)
|
296
|
+
|
297
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
298
|
+
|
299
|
+
# check the command successfully generated the output file:
|
300
|
+
run_0 = wk.get_all_EARs()[0]
|
301
|
+
exec_path = run_0.get_directory()
|
302
|
+
out_file_path = exec_path.joinpath(out_file.name.name)
|
303
|
+
out_file_contents = out_file_path.read_text()
|
304
|
+
assert out_file_contents.strip() == str(p2_val_expected)
|
305
|
+
|
306
|
+
# check the output is parsed correctly:
|
307
|
+
assert wk.tasks[0].elements[0].outputs.p2.value == p2_val_expected
|
308
|
+
|
309
|
+
hf.reload_template_components() # remove extra envs
|
310
|
+
|
311
|
+
|
312
|
+
@pytest.mark.integration
|
313
|
+
def test_no_script_no_output_saves_files(null_config, tmp_path):
|
314
|
+
"""Check we can use an output file parser with no script or output to save files."""
|
315
|
+
out_file_name = "my_output_file.txt"
|
316
|
+
out_file = hf.FileSpec(label="my_output_file", name=out_file_name)
|
317
|
+
|
318
|
+
if os.name == "nt":
|
319
|
+
cmd = f"Set-Content -Path {out_file_name} -Value (<<parameter:p1>> + 100)"
|
320
|
+
else:
|
321
|
+
cmd = f"echo $(( <<parameter:p1>> + 100 )) > {out_file_name}"
|
322
|
+
|
323
|
+
act = hf.Action(
|
324
|
+
commands=[hf.Command(cmd)],
|
325
|
+
output_file_parsers=[hf.OutputFileParser(output_files=[out_file])],
|
326
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
327
|
+
)
|
328
|
+
s1 = hf.TaskSchema(
|
329
|
+
objective="t1",
|
330
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
331
|
+
actions=[act],
|
332
|
+
)
|
333
|
+
|
334
|
+
p1_val = 101
|
335
|
+
p2_val_expected = p1_val + 100
|
336
|
+
t1 = hf.Task(schema=s1, inputs={"p1": p1_val})
|
337
|
+
wk = hf.Workflow.from_template_data(
|
338
|
+
tasks=[t1],
|
339
|
+
template_name="output_file_parser_test_no_output_no_script",
|
340
|
+
path=tmp_path,
|
341
|
+
)
|
342
|
+
|
343
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
344
|
+
|
345
|
+
# check the output file is saved to artifacts:
|
346
|
+
run_0 = wk.get_all_EARs()[0]
|
347
|
+
exec_path = run_0.get_directory()
|
348
|
+
out_file_path = exec_path.joinpath(out_file.name.name)
|
349
|
+
out_file_contents = out_file_path.read_text()
|
350
|
+
assert out_file_contents.strip() == str(p2_val_expected)
|
351
|
+
|
352
|
+
# check no scripts generated
|
353
|
+
assert not any(wk.submissions[0].scripts_path.iterdir())
|
@@ -11,6 +11,12 @@ def test_workflow_1(tmp_path: Path, null_config):
|
|
11
11
|
wk.submit(wait=True, add_to_known=False)
|
12
12
|
time.sleep(20) # TODO: bug! for some reason the new parameter isn't actually written
|
13
13
|
# to disk when using WSL until several seconds after the workflow has finished!
|
14
|
+
# this is probably because the NTFS filesystem is "sync'd" via polling in this case?
|
15
|
+
# so changes made on the NTFS files by WSL are not immediate on the Windows side.
|
16
|
+
# perhaps when we re-wire the wait command, we could add an option to wait on a
|
17
|
+
# parameter being set, which could watch the relevant chunk file for changes?
|
18
|
+
|
19
|
+
# ACTUALLY: I think wait is not working here at all for WSL... it's returning early!
|
14
20
|
p2 = wk.tasks[0].elements[0].outputs.p2
|
15
21
|
assert isinstance(p2, hf.ElementParameter)
|
16
22
|
assert p2.value == "201"
|
@@ -799,3 +799,179 @@ def test_command_rules_prevent_runs_initialised_with_valid_action_rules(
|
|
799
799
|
assert len(wk.tasks[0].elements[0].action_runs[0].commands_idx) == 1
|
800
800
|
|
801
801
|
assert not wk.tasks[1].elements[0].iterations[0].EARs_initialised
|
802
|
+
|
803
|
+
|
804
|
+
def test_get_commands_file_hash_distinct_act_idx(null_config):
|
805
|
+
act = hf.Action(commands=[hf.Command("echo <<parameter:p1>>")])
|
806
|
+
data_idx = {"inputs.p1": 0}
|
807
|
+
h1 = act.get_commands_file_hash(data_idx=data_idx, action_idx=0)
|
808
|
+
h2 = act.get_commands_file_hash(data_idx=data_idx, action_idx=1)
|
809
|
+
assert h1 != h2
|
810
|
+
|
811
|
+
|
812
|
+
def test_get_commands_file_hash_distinct_data_idx_vals(null_config):
|
813
|
+
act = hf.Action(commands=[hf.Command("echo <<parameter:p1>>")])
|
814
|
+
h1 = act.get_commands_file_hash(data_idx={"inputs.p1": 0}, action_idx=0)
|
815
|
+
h2 = act.get_commands_file_hash(data_idx={"inputs.p1": 1}, action_idx=0)
|
816
|
+
assert h1 != h2
|
817
|
+
|
818
|
+
|
819
|
+
def test_get_commands_file_hash_distinct_data_idx_sub_vals(null_config):
|
820
|
+
act = hf.Action(commands=[hf.Command("echo <<parameter:p1>>")])
|
821
|
+
di_1 = {"inputs.p1": 0, "inputs.p1.a": 1}
|
822
|
+
di_2 = {"inputs.p1": 0, "inputs.p1.a": 2}
|
823
|
+
h1 = act.get_commands_file_hash(data_idx=di_1, action_idx=0)
|
824
|
+
h2 = act.get_commands_file_hash(data_idx=di_2, action_idx=0)
|
825
|
+
assert h1 != h2
|
826
|
+
|
827
|
+
|
828
|
+
def test_get_commands_file_hash_equivalent_data_idx_outputs(null_config):
|
829
|
+
"""Different output data indices should not generate distinct hashes."""
|
830
|
+
act = hf.Action(commands=[hf.Command("echo <<parameter:p1>>")])
|
831
|
+
di_1 = {"inputs.p1": 0, "outputs.p2": 1}
|
832
|
+
di_2 = {"inputs.p1": 0, "outputs.p2": 2}
|
833
|
+
h1 = act.get_commands_file_hash(data_idx=di_1, action_idx=0)
|
834
|
+
h2 = act.get_commands_file_hash(data_idx=di_2, action_idx=0)
|
835
|
+
assert h1 == h2
|
836
|
+
|
837
|
+
|
838
|
+
def test_get_commands_file_hash_return_int(null_config):
|
839
|
+
act = hf.Action(commands=[hf.Command("echo <<parameter:p1>>")])
|
840
|
+
h1 = act.get_commands_file_hash(data_idx={"inputs.p1": 0}, action_idx=0)
|
841
|
+
assert type(h1) == int
|
842
|
+
|
843
|
+
|
844
|
+
def test_get_commands_file_hash_distinct_schema(null_config):
|
845
|
+
act_1 = hf.Action(commands=[hf.Command("echo <<parameter:p1>>")])
|
846
|
+
act_2 = hf.Action(commands=[hf.Command("echo <<parameter:p1>>")])
|
847
|
+
hf.TaskSchema(objective="t1", inputs=[hf.SchemaInput("p1")], actions=[act_1])
|
848
|
+
hf.TaskSchema(objective="t2", inputs=[hf.SchemaInput("p1")], actions=[act_2])
|
849
|
+
assert act_1.task_schema
|
850
|
+
assert act_2.task_schema
|
851
|
+
h1 = act_1.get_commands_file_hash(data_idx={}, action_idx=0)
|
852
|
+
h2 = act_2.get_commands_file_hash(data_idx={}, action_idx=0)
|
853
|
+
assert h1 != h2
|
854
|
+
|
855
|
+
|
856
|
+
def test_get_commands_file_hash_equivalent_cmd_rule_inputs_path(null_config):
|
857
|
+
"""Input-path rule does not affect hash, given equivalent data indices."""
|
858
|
+
act = hf.Action(
|
859
|
+
commands=[
|
860
|
+
hf.Command(
|
861
|
+
command="echo <<parameter:p1>>",
|
862
|
+
rules=[hf.ActionRule(path="inputs.p1", condition={"value.equal_to": 1})],
|
863
|
+
)
|
864
|
+
],
|
865
|
+
)
|
866
|
+
h1 = act.get_commands_file_hash(data_idx={"inputs.p1": 0}, action_idx=0)
|
867
|
+
h2 = act.get_commands_file_hash(data_idx={"inputs.p1": 0}, action_idx=0)
|
868
|
+
assert h1 == h2
|
869
|
+
|
870
|
+
|
871
|
+
def test_get_commands_file_hash_distinct_cmd_rule_resources_path(null_config):
|
872
|
+
"""Resource-path rule affects hash given distinct resource data indices."""
|
873
|
+
act = hf.Action(
|
874
|
+
commands=[
|
875
|
+
hf.Command(
|
876
|
+
command="echo <<parameter:p1>>",
|
877
|
+
rules=[
|
878
|
+
hf.ActionRule(
|
879
|
+
path="resources.num_cores", condition={"value.equal_to": 8}
|
880
|
+
)
|
881
|
+
],
|
882
|
+
)
|
883
|
+
],
|
884
|
+
)
|
885
|
+
di_1 = {"inputs.p1": 0, "resources.any.num_cores": 2}
|
886
|
+
di_2 = {"inputs.p1": 0, "resources.any.num_cores": 3}
|
887
|
+
h1 = act.get_commands_file_hash(data_idx=di_1, action_idx=0)
|
888
|
+
h2 = act.get_commands_file_hash(data_idx=di_2, action_idx=0)
|
889
|
+
assert h1 != h2
|
890
|
+
|
891
|
+
|
892
|
+
def test_get_script_input_output_file_paths_json_in_json_out(null_config):
|
893
|
+
act = hf.Action(
|
894
|
+
script="<<script:main_script_test_json_in_json_out.py>>",
|
895
|
+
script_data_in="json",
|
896
|
+
script_data_out="json",
|
897
|
+
script_exe="python_script",
|
898
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
899
|
+
requires_dir=True,
|
900
|
+
)
|
901
|
+
s1 = hf.TaskSchema(
|
902
|
+
objective="t1",
|
903
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
904
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
905
|
+
actions=[act],
|
906
|
+
)
|
907
|
+
assert s1.actions[0].get_script_input_output_file_paths((0, 1, 2)) == {
|
908
|
+
"inputs": {"json": Path("js_0_block_1_act_2_inputs.json")},
|
909
|
+
"outputs": {"json": Path("js_0_block_1_act_2_outputs.json")},
|
910
|
+
}
|
911
|
+
|
912
|
+
|
913
|
+
def test_get_script_input_output_file_paths_hdf5_in_direct_out(null_config):
|
914
|
+
act = hf.Action(
|
915
|
+
script="<<script:main_script_test_hdf5_in_obj_2.py>>",
|
916
|
+
script_data_in="hdf5",
|
917
|
+
script_data_out="direct",
|
918
|
+
script_exe="python_script",
|
919
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
920
|
+
requires_dir=True,
|
921
|
+
)
|
922
|
+
s1 = hf.TaskSchema(
|
923
|
+
objective="t1",
|
924
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
925
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
926
|
+
actions=[act],
|
927
|
+
)
|
928
|
+
assert s1.actions[0].get_script_input_output_file_paths((0, 1, 2)) == {
|
929
|
+
"inputs": {"hdf5": Path("js_0_block_1_act_2_inputs.h5")},
|
930
|
+
"outputs": {},
|
931
|
+
}
|
932
|
+
|
933
|
+
|
934
|
+
def test_get_script_input_output_file_command_args_json_in_json_out(null_config):
|
935
|
+
act = hf.Action(
|
936
|
+
script="<<script:main_script_test_json_in_json_out.py>>",
|
937
|
+
script_data_in="json",
|
938
|
+
script_data_out="json",
|
939
|
+
script_exe="python_script",
|
940
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
941
|
+
requires_dir=True,
|
942
|
+
)
|
943
|
+
s1 = hf.TaskSchema(
|
944
|
+
objective="t1",
|
945
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
946
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
947
|
+
actions=[act],
|
948
|
+
)
|
949
|
+
js_idx, blk_idx, blk_act_idx = s1.actions[0].get_block_act_idx_shell_vars()
|
950
|
+
assert s1.actions[0].get_script_input_output_file_command_args() == [
|
951
|
+
"--inputs-json",
|
952
|
+
f"js_{js_idx}_block_{blk_idx}_act_{blk_act_idx}_inputs.json",
|
953
|
+
"--outputs-json",
|
954
|
+
f"js_{js_idx}_block_{blk_idx}_act_{blk_act_idx}_outputs.json",
|
955
|
+
]
|
956
|
+
|
957
|
+
|
958
|
+
def test_get_script_input_output_file_command_args_hdf5_in_direct_out(null_config):
|
959
|
+
act = hf.Action(
|
960
|
+
script="<<script:main_script_test_hdf5_in_obj_2.py>>",
|
961
|
+
script_data_in="hdf5",
|
962
|
+
script_data_out="direct",
|
963
|
+
script_exe="python_script",
|
964
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
965
|
+
requires_dir=True,
|
966
|
+
)
|
967
|
+
s1 = hf.TaskSchema(
|
968
|
+
objective="t1",
|
969
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
970
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
971
|
+
actions=[act],
|
972
|
+
)
|
973
|
+
js_idx, blk_idx, blk_act_idx = s1.actions[0].get_block_act_idx_shell_vars()
|
974
|
+
assert s1.actions[0].get_script_input_output_file_command_args() == [
|
975
|
+
"--inputs-hdf5",
|
976
|
+
f"js_{js_idx}_block_{blk_idx}_act_{blk_act_idx}_inputs.h5",
|
977
|
+
]
|
hpcflow/tests/unit/test_app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
from __future__ import annotations
|
2
|
+
from pathlib import Path
|
2
3
|
import sys
|
3
4
|
from typing import TYPE_CHECKING
|
4
5
|
import pytest
|
@@ -110,3 +111,22 @@ def test_get_demo_data_cache(null_config) -> None:
|
|
110
111
|
with hf.demo_data_cache_dir.joinpath("text_file.txt").open("rt") as fh:
|
111
112
|
contents = fh.read()
|
112
113
|
assert contents == "\n".join(f"{i}" for i in range(1, 11)) + "\n"
|
114
|
+
|
115
|
+
|
116
|
+
def test_list_demo_workflows():
|
117
|
+
# sanity checks
|
118
|
+
lst = hf.list_demo_workflows()
|
119
|
+
assert isinstance(lst, tuple)
|
120
|
+
assert all(isinstance(i, str) and "." not in i for i in lst) # no extension included
|
121
|
+
|
122
|
+
|
123
|
+
def test_get_demo_workflows():
|
124
|
+
# sanity checks
|
125
|
+
lst = hf.list_demo_workflows()
|
126
|
+
demo_paths = hf._get_demo_workflows()
|
127
|
+
# keys should be those in the list:
|
128
|
+
assert sorted(list(lst)) == sorted(list(demo_paths.keys()))
|
129
|
+
|
130
|
+
# values should be distinct, absolute paths:
|
131
|
+
assert all(isinstance(i, Path) and i.is_absolute() for i in demo_paths.values())
|
132
|
+
assert len(set(demo_paths.values())) == len(demo_paths)
|
@@ -0,0 +1,46 @@
|
|
1
|
+
from pathlib import Path
|
2
|
+
from hpcflow.sdk.core.cache import ObjectCache
|
3
|
+
from hpcflow.sdk.core.test_utils import make_workflow
|
4
|
+
|
5
|
+
|
6
|
+
def test_object_cache_dependencies_simple(tmp_path: Path):
|
7
|
+
wk = make_workflow(
|
8
|
+
schemas_spec=[
|
9
|
+
({"p1": None}, ("p2",), "t1"),
|
10
|
+
({"p2": None}, ("p3",), "t2"),
|
11
|
+
({"p3": None}, ("p4",), "t3"),
|
12
|
+
({"p4": None}, ("p5",), "t4"),
|
13
|
+
],
|
14
|
+
path=tmp_path,
|
15
|
+
local_inputs={0: ("p1",)},
|
16
|
+
overwrite=True,
|
17
|
+
)
|
18
|
+
obj_cache = ObjectCache.build(wk, dependencies=True)
|
19
|
+
assert obj_cache.run_dependencies == {0: set(), 1: {0}, 2: {1}, 3: {2}}
|
20
|
+
assert obj_cache.run_dependents == {0: {1}, 1: {2}, 2: {3}, 3: set()}
|
21
|
+
assert obj_cache.iter_run_dependencies == {0: set(), 1: {0}, 2: {1}, 3: {2}}
|
22
|
+
assert obj_cache.iter_iter_dependencies == {
|
23
|
+
0: set(),
|
24
|
+
1: {0},
|
25
|
+
2: {1},
|
26
|
+
3: {2},
|
27
|
+
}
|
28
|
+
assert obj_cache.elem_iter_dependencies == {
|
29
|
+
0: set(),
|
30
|
+
1: {0},
|
31
|
+
2: {1},
|
32
|
+
3: {2},
|
33
|
+
}
|
34
|
+
assert obj_cache.elem_elem_dependencies == {
|
35
|
+
0: set(),
|
36
|
+
1: {0},
|
37
|
+
2: {1},
|
38
|
+
3: {2},
|
39
|
+
}
|
40
|
+
assert obj_cache.elem_elem_dependents == {0: {1}, 1: {2}, 2: {3}, 3: set()}
|
41
|
+
assert obj_cache.elem_elem_dependents_rec == {
|
42
|
+
0: {1, 2, 3},
|
43
|
+
1: {2, 3},
|
44
|
+
2: {3},
|
45
|
+
3: set(),
|
46
|
+
}
|