hpcflow-new2 0.2.0a189__py3-none-any.whl → 0.2.0a199__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hpcflow/__pyinstaller/hook-hpcflow.py +9 -6
- hpcflow/_version.py +1 -1
- hpcflow/app.py +1 -0
- hpcflow/data/scripts/bad_script.py +2 -0
- hpcflow/data/scripts/do_nothing.py +2 -0
- hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
- hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/input_file_generator_basic.py +3 -0
- hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
- hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_hdf5_in_obj.py +1 -1
- hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
- hpcflow/data/scripts/main_script_test_hdf5_out_obj.py +1 -1
- hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
- hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
- hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
- hpcflow/data/scripts/output_file_parser_basic.py +3 -0
- hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
- hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/script_exit_test.py +5 -0
- hpcflow/data/template_components/environments.yaml +1 -1
- hpcflow/sdk/__init__.py +26 -15
- hpcflow/sdk/app.py +2192 -768
- hpcflow/sdk/cli.py +506 -296
- hpcflow/sdk/cli_common.py +105 -7
- hpcflow/sdk/config/__init__.py +1 -1
- hpcflow/sdk/config/callbacks.py +115 -43
- hpcflow/sdk/config/cli.py +126 -103
- hpcflow/sdk/config/config.py +674 -318
- hpcflow/sdk/config/config_file.py +131 -95
- hpcflow/sdk/config/errors.py +125 -84
- hpcflow/sdk/config/types.py +148 -0
- hpcflow/sdk/core/__init__.py +25 -1
- hpcflow/sdk/core/actions.py +1771 -1059
- hpcflow/sdk/core/app_aware.py +24 -0
- hpcflow/sdk/core/cache.py +139 -79
- hpcflow/sdk/core/command_files.py +263 -287
- hpcflow/sdk/core/commands.py +145 -112
- hpcflow/sdk/core/element.py +828 -535
- hpcflow/sdk/core/enums.py +192 -0
- hpcflow/sdk/core/environment.py +74 -93
- hpcflow/sdk/core/errors.py +455 -52
- hpcflow/sdk/core/execute.py +207 -0
- hpcflow/sdk/core/json_like.py +540 -272
- hpcflow/sdk/core/loop.py +751 -347
- hpcflow/sdk/core/loop_cache.py +164 -47
- hpcflow/sdk/core/object_list.py +370 -207
- hpcflow/sdk/core/parameters.py +1100 -627
- hpcflow/sdk/core/rule.py +59 -41
- hpcflow/sdk/core/run_dir_files.py +21 -37
- hpcflow/sdk/core/skip_reason.py +7 -0
- hpcflow/sdk/core/task.py +1649 -1339
- hpcflow/sdk/core/task_schema.py +308 -196
- hpcflow/sdk/core/test_utils.py +191 -114
- hpcflow/sdk/core/types.py +440 -0
- hpcflow/sdk/core/utils.py +485 -309
- hpcflow/sdk/core/validation.py +82 -9
- hpcflow/sdk/core/workflow.py +2544 -1178
- hpcflow/sdk/core/zarr_io.py +98 -137
- hpcflow/sdk/data/workflow_spec_schema.yaml +2 -0
- hpcflow/sdk/demo/cli.py +53 -33
- hpcflow/sdk/helper/cli.py +18 -15
- hpcflow/sdk/helper/helper.py +75 -63
- hpcflow/sdk/helper/watcher.py +61 -28
- hpcflow/sdk/log.py +122 -71
- hpcflow/sdk/persistence/__init__.py +8 -31
- hpcflow/sdk/persistence/base.py +1360 -606
- hpcflow/sdk/persistence/defaults.py +6 -0
- hpcflow/sdk/persistence/discovery.py +38 -0
- hpcflow/sdk/persistence/json.py +568 -188
- hpcflow/sdk/persistence/pending.py +382 -179
- hpcflow/sdk/persistence/store_resource.py +39 -23
- hpcflow/sdk/persistence/types.py +318 -0
- hpcflow/sdk/persistence/utils.py +14 -11
- hpcflow/sdk/persistence/zarr.py +1337 -433
- hpcflow/sdk/runtime.py +44 -41
- hpcflow/sdk/submission/{jobscript_info.py → enums.py} +39 -12
- hpcflow/sdk/submission/jobscript.py +1651 -692
- hpcflow/sdk/submission/schedulers/__init__.py +167 -39
- hpcflow/sdk/submission/schedulers/direct.py +121 -81
- hpcflow/sdk/submission/schedulers/sge.py +170 -129
- hpcflow/sdk/submission/schedulers/slurm.py +291 -268
- hpcflow/sdk/submission/schedulers/utils.py +12 -2
- hpcflow/sdk/submission/shells/__init__.py +14 -15
- hpcflow/sdk/submission/shells/base.py +150 -29
- hpcflow/sdk/submission/shells/bash.py +283 -173
- hpcflow/sdk/submission/shells/os_version.py +31 -30
- hpcflow/sdk/submission/shells/powershell.py +228 -170
- hpcflow/sdk/submission/submission.py +1014 -335
- hpcflow/sdk/submission/types.py +140 -0
- hpcflow/sdk/typing.py +182 -12
- hpcflow/sdk/utils/arrays.py +71 -0
- hpcflow/sdk/utils/deferred_file.py +55 -0
- hpcflow/sdk/utils/hashing.py +16 -0
- hpcflow/sdk/utils/patches.py +12 -0
- hpcflow/sdk/utils/strings.py +33 -0
- hpcflow/tests/api/test_api.py +32 -0
- hpcflow/tests/conftest.py +27 -6
- hpcflow/tests/data/multi_path_sequences.yaml +29 -0
- hpcflow/tests/data/workflow_test_run_abort.yaml +34 -35
- hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
- hpcflow/tests/schedulers/slurm/test_slurm_submission.py +5 -2
- hpcflow/tests/scripts/test_input_file_generators.py +282 -0
- hpcflow/tests/scripts/test_main_scripts.py +866 -85
- hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
- hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
- hpcflow/tests/shells/wsl/test_wsl_submission.py +12 -4
- hpcflow/tests/unit/test_action.py +262 -75
- hpcflow/tests/unit/test_action_rule.py +9 -4
- hpcflow/tests/unit/test_app.py +33 -6
- hpcflow/tests/unit/test_cache.py +46 -0
- hpcflow/tests/unit/test_cli.py +134 -1
- hpcflow/tests/unit/test_command.py +71 -54
- hpcflow/tests/unit/test_config.py +142 -16
- hpcflow/tests/unit/test_config_file.py +21 -18
- hpcflow/tests/unit/test_element.py +58 -62
- hpcflow/tests/unit/test_element_iteration.py +50 -1
- hpcflow/tests/unit/test_element_set.py +29 -19
- hpcflow/tests/unit/test_group.py +4 -2
- hpcflow/tests/unit/test_input_source.py +116 -93
- hpcflow/tests/unit/test_input_value.py +29 -24
- hpcflow/tests/unit/test_jobscript_unit.py +757 -0
- hpcflow/tests/unit/test_json_like.py +44 -35
- hpcflow/tests/unit/test_loop.py +1396 -84
- hpcflow/tests/unit/test_meta_task.py +325 -0
- hpcflow/tests/unit/test_multi_path_sequences.py +229 -0
- hpcflow/tests/unit/test_object_list.py +17 -12
- hpcflow/tests/unit/test_parameter.py +29 -7
- hpcflow/tests/unit/test_persistence.py +237 -42
- hpcflow/tests/unit/test_resources.py +20 -18
- hpcflow/tests/unit/test_run.py +117 -6
- hpcflow/tests/unit/test_run_directories.py +29 -0
- hpcflow/tests/unit/test_runtime.py +2 -1
- hpcflow/tests/unit/test_schema_input.py +23 -15
- hpcflow/tests/unit/test_shell.py +23 -2
- hpcflow/tests/unit/test_slurm.py +8 -7
- hpcflow/tests/unit/test_submission.py +38 -89
- hpcflow/tests/unit/test_task.py +352 -247
- hpcflow/tests/unit/test_task_schema.py +33 -20
- hpcflow/tests/unit/test_utils.py +9 -11
- hpcflow/tests/unit/test_value_sequence.py +15 -12
- hpcflow/tests/unit/test_workflow.py +114 -83
- hpcflow/tests/unit/test_workflow_template.py +0 -1
- hpcflow/tests/unit/utils/test_arrays.py +40 -0
- hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
- hpcflow/tests/unit/utils/test_hashing.py +65 -0
- hpcflow/tests/unit/utils/test_patches.py +5 -0
- hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
- hpcflow/tests/workflows/__init__.py +0 -0
- hpcflow/tests/workflows/test_directory_structure.py +31 -0
- hpcflow/tests/workflows/test_jobscript.py +334 -1
- hpcflow/tests/workflows/test_run_status.py +198 -0
- hpcflow/tests/workflows/test_skip_downstream.py +696 -0
- hpcflow/tests/workflows/test_submission.py +140 -0
- hpcflow/tests/workflows/test_workflows.py +160 -15
- hpcflow/tests/workflows/test_zip.py +18 -0
- hpcflow/viz_demo.ipynb +6587 -3
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/METADATA +8 -4
- hpcflow_new2-0.2.0a199.dist-info/RECORD +221 -0
- hpcflow/sdk/core/parallel.py +0 -21
- hpcflow_new2-0.2.0a189.dist-info/RECORD +0 -158
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/LICENSE +0 -0
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/WHEEL +0 -0
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,140 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import pytest
|
4
|
+
from hpcflow.app import app as hf
|
5
|
+
|
6
|
+
|
7
|
+
@pytest.mark.integration
|
8
|
+
def test_zarr_metadata_file_modification_times_many_jobscripts(null_config, tmp_path):
|
9
|
+
"""Test that root group attributes are modified first, then individual jobscript
|
10
|
+
at-submit-metadata chunk files, then the submission at-submit-metadata group
|
11
|
+
attributes."""
|
12
|
+
|
13
|
+
num_js = 30
|
14
|
+
t1 = hf.Task(
|
15
|
+
schema=hf.task_schemas.test_t1_conditional_OS,
|
16
|
+
inputs={"p1": 100},
|
17
|
+
sequences=[
|
18
|
+
hf.ValueSequence(
|
19
|
+
path="resources.any.resources_id", values=list(range(num_js))
|
20
|
+
)
|
21
|
+
],
|
22
|
+
)
|
23
|
+
wk = hf.Workflow.from_template_data(
|
24
|
+
template_name="test_zarr_metadata_attrs_modified_times",
|
25
|
+
path=tmp_path,
|
26
|
+
tasks=[t1],
|
27
|
+
store="zarr",
|
28
|
+
)
|
29
|
+
wk.submit(add_to_known=False, status=False, cancel=True)
|
30
|
+
|
31
|
+
mtime_meta_group = Path(wk.path).joinpath(".zattrs").stat().st_mtime
|
32
|
+
mtime_mid_jobscript_chunk = (
|
33
|
+
wk._store._get_jobscripts_at_submit_metadata_arr_path(0)
|
34
|
+
.joinpath(str(int(num_js / 2)))
|
35
|
+
.stat()
|
36
|
+
.st_mtime
|
37
|
+
)
|
38
|
+
mtime_submission_group = (
|
39
|
+
wk._store._get_submission_metadata_group_path(0)
|
40
|
+
.joinpath(".zattrs")
|
41
|
+
.stat()
|
42
|
+
.st_mtime
|
43
|
+
)
|
44
|
+
assert mtime_meta_group < mtime_mid_jobscript_chunk < mtime_submission_group
|
45
|
+
|
46
|
+
|
47
|
+
@pytest.mark.integration
|
48
|
+
def test_json_metadata_file_modification_times_many_jobscripts(null_config, tmp_path):
|
49
|
+
"""Test that the metadata.json file is modified first, then the submissions.json
|
50
|
+
file."""
|
51
|
+
|
52
|
+
num_js = 30
|
53
|
+
t1 = hf.Task(
|
54
|
+
schema=hf.task_schemas.test_t1_conditional_OS,
|
55
|
+
inputs={"p1": 100},
|
56
|
+
sequences=[
|
57
|
+
hf.ValueSequence(
|
58
|
+
path="resources.any.resources_id", values=list(range(num_js))
|
59
|
+
)
|
60
|
+
],
|
61
|
+
)
|
62
|
+
wk = hf.Workflow.from_template_data(
|
63
|
+
template_name="test_zarr_metadata_attrs_modified_times",
|
64
|
+
path=tmp_path,
|
65
|
+
tasks=[t1],
|
66
|
+
store="json",
|
67
|
+
)
|
68
|
+
wk.submit(add_to_known=False, status=False, cancel=True)
|
69
|
+
|
70
|
+
mtime_meta = Path(wk.path).joinpath("metadata.json").stat().st_mtime
|
71
|
+
mtime_subs = Path(wk.path).joinpath("submissions.json").stat().st_mtime
|
72
|
+
assert mtime_meta < mtime_subs
|
73
|
+
|
74
|
+
|
75
|
+
@pytest.mark.integration
|
76
|
+
def test_subission_start_end_times_equal_to_first_and_last_jobscript_start_end_times(
|
77
|
+
null_config, tmp_path
|
78
|
+
):
|
79
|
+
num_js = 2
|
80
|
+
t1 = hf.Task(
|
81
|
+
schema=hf.task_schemas.test_t1_conditional_OS,
|
82
|
+
inputs={"p1": 100},
|
83
|
+
sequences=[
|
84
|
+
hf.ValueSequence(
|
85
|
+
path="resources.any.resources_id", values=list(range(num_js))
|
86
|
+
)
|
87
|
+
],
|
88
|
+
)
|
89
|
+
wk = hf.Workflow.from_template_data(
|
90
|
+
template_name="test_subission_start_end_times",
|
91
|
+
path=tmp_path,
|
92
|
+
tasks=[t1],
|
93
|
+
)
|
94
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
95
|
+
|
96
|
+
sub = wk.submissions[0]
|
97
|
+
jobscripts = sub.jobscripts
|
98
|
+
|
99
|
+
assert len(jobscripts) == num_js
|
100
|
+
|
101
|
+
# submission has two jobscripts, so start time should be start time of first jobscript:
|
102
|
+
assert sub.start_time == jobscripts[0].start_time
|
103
|
+
|
104
|
+
# ...and end time should be end time of second jobscript:
|
105
|
+
assert sub.end_time == jobscripts[1].end_time
|
106
|
+
|
107
|
+
|
108
|
+
@pytest.mark.integration
|
109
|
+
def test_multiple_jobscript_functions_files(null_config, tmp_path):
|
110
|
+
if os.name == "nt":
|
111
|
+
shell_exes = ["powershell.exe", "pwsh.exe", "pwsh.exe"]
|
112
|
+
else:
|
113
|
+
shell_exes = ["/bin/bash", "bash", "bash"]
|
114
|
+
t1 = hf.Task(
|
115
|
+
schema=hf.task_schemas.test_t1_conditional_OS,
|
116
|
+
inputs={"p1": 100},
|
117
|
+
sequences=[
|
118
|
+
hf.ValueSequence(
|
119
|
+
path="resources.any.shell_args.executable",
|
120
|
+
values=shell_exes,
|
121
|
+
)
|
122
|
+
],
|
123
|
+
)
|
124
|
+
wk = hf.Workflow.from_template_data(
|
125
|
+
template_name="test_multi_js_funcs_files",
|
126
|
+
path=tmp_path,
|
127
|
+
tasks=[t1],
|
128
|
+
store="json",
|
129
|
+
)
|
130
|
+
wk.submit(add_to_known=True, status=False, cancel=True)
|
131
|
+
|
132
|
+
sub_js = wk.submissions[0].jobscripts
|
133
|
+
assert len(sub_js) == 2
|
134
|
+
|
135
|
+
funcs_0 = sub_js[0].jobscript_functions_path
|
136
|
+
funcs_1 = sub_js[1].jobscript_functions_path
|
137
|
+
|
138
|
+
assert funcs_0.is_file()
|
139
|
+
assert funcs_1.is_file()
|
140
|
+
assert funcs_0 != funcs_1
|
@@ -1,8 +1,11 @@
|
|
1
1
|
import os
|
2
|
+
import sys
|
3
|
+
from pathlib import Path
|
2
4
|
import time
|
3
5
|
import pytest
|
4
6
|
from hpcflow.app import app as hf
|
5
|
-
from hpcflow.sdk.core.
|
7
|
+
from hpcflow.sdk.core.enums import EARStatus
|
8
|
+
from hpcflow.sdk.core.skip_reason import SkipReason
|
6
9
|
from hpcflow.sdk.core.test_utils import (
|
7
10
|
P1_parameter_cls as P1,
|
8
11
|
P1_sub_parameter_cls as P1_sub,
|
@@ -11,26 +14,30 @@ from hpcflow.sdk.core.test_utils import (
|
|
11
14
|
|
12
15
|
|
13
16
|
@pytest.mark.integration
|
14
|
-
def test_workflow_1(tmp_path, new_null_config):
|
17
|
+
def test_workflow_1(tmp_path: Path, new_null_config):
|
15
18
|
wk = make_test_data_YAML_workflow("workflow_1.yaml", path=tmp_path)
|
16
19
|
wk.submit(wait=True, add_to_known=False)
|
17
|
-
|
20
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
21
|
+
assert isinstance(p2, hf.ElementParameter)
|
22
|
+
assert p2.value == "201"
|
18
23
|
|
19
24
|
|
20
25
|
@pytest.mark.integration
|
21
|
-
def test_workflow_1_with_working_dir_with_spaces(tmp_path, new_null_config):
|
26
|
+
def test_workflow_1_with_working_dir_with_spaces(tmp_path: Path, new_null_config):
|
22
27
|
workflow_dir = tmp_path / "sub path with spaces"
|
23
28
|
workflow_dir.mkdir()
|
24
29
|
wk = make_test_data_YAML_workflow("workflow_1.yaml", path=workflow_dir)
|
25
30
|
wk.submit(wait=True, add_to_known=False)
|
26
|
-
|
31
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
32
|
+
assert isinstance(p2, hf.ElementParameter)
|
33
|
+
assert p2.value == "201"
|
27
34
|
|
28
35
|
|
29
36
|
@pytest.mark.integration
|
30
|
-
@pytest.mark.
|
31
|
-
reason="
|
37
|
+
@pytest.mark.skipif(
|
38
|
+
sys.platform == "darwin", reason="fails/too slow; need to investigate"
|
32
39
|
)
|
33
|
-
def test_run_abort(tmp_path, new_null_config):
|
40
|
+
def test_run_abort(tmp_path: Path, new_null_config):
|
34
41
|
wk = make_test_data_YAML_workflow("workflow_test_run_abort.yaml", path=tmp_path)
|
35
42
|
wk.submit(add_to_known=False)
|
36
43
|
|
@@ -56,7 +63,7 @@ def test_run_abort(tmp_path, new_null_config):
|
|
56
63
|
|
57
64
|
@pytest.mark.integration
|
58
65
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
59
|
-
def test_multi_command_action_stdout_parsing(null_config, tmp_path, store):
|
66
|
+
def test_multi_command_action_stdout_parsing(null_config, tmp_path: Path, store: str):
|
60
67
|
if os.name == "nt":
|
61
68
|
cmds = [
|
62
69
|
"Write-Output (<<parameter:p1>> + 100)",
|
@@ -98,7 +105,7 @@ def test_multi_command_action_stdout_parsing(null_config, tmp_path, store):
|
|
98
105
|
|
99
106
|
@pytest.mark.integration
|
100
107
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
101
|
-
def test_element_get_group(null_config, tmp_path, store):
|
108
|
+
def test_element_get_group(null_config, tmp_path: Path, store: str):
|
102
109
|
if os.name == "nt":
|
103
110
|
cmd = "Write-Output (<<parameter:p1c>> + 100)"
|
104
111
|
else:
|
@@ -146,7 +153,7 @@ def test_element_get_group(null_config, tmp_path, store):
|
|
146
153
|
|
147
154
|
|
148
155
|
@pytest.mark.integration
|
149
|
-
def test_element_get_sub_object_group(null_config, tmp_path):
|
156
|
+
def test_element_get_sub_object_group(null_config, tmp_path: Path):
|
150
157
|
if os.name == "nt":
|
151
158
|
cmd = "Write-Output (<<parameter:p1c>> + 100)"
|
152
159
|
else:
|
@@ -196,7 +203,7 @@ def test_element_get_sub_object_group(null_config, tmp_path):
|
|
196
203
|
|
197
204
|
|
198
205
|
@pytest.mark.integration
|
199
|
-
def test_element_get_sub_data_group(null_config, tmp_path):
|
206
|
+
def test_element_get_sub_data_group(null_config, tmp_path: Path):
|
200
207
|
if os.name == "nt":
|
201
208
|
cmd = "Write-Output (<<parameter:p1c>> + 100)"
|
202
209
|
else:
|
@@ -243,7 +250,7 @@ def test_element_get_sub_data_group(null_config, tmp_path):
|
|
243
250
|
|
244
251
|
|
245
252
|
@pytest.mark.integration
|
246
|
-
def test_input_source_labels_and_groups(null_config, tmp_path):
|
253
|
+
def test_input_source_labels_and_groups(null_config, tmp_path: Path):
|
247
254
|
"""This is structurally the same as the `fit_yield_functions` MatFlow workflow."""
|
248
255
|
if os.name == "nt":
|
249
256
|
cmds = [
|
@@ -338,7 +345,7 @@ def test_input_source_labels_and_groups(null_config, tmp_path):
|
|
338
345
|
|
339
346
|
|
340
347
|
@pytest.mark.integration
|
341
|
-
def test_loop_simple(null_config, tmp_path):
|
348
|
+
def test_loop_simple(null_config, tmp_path: Path):
|
342
349
|
if os.name == "nt":
|
343
350
|
cmd = "Write-Output (<<parameter:p1>> + 100)"
|
344
351
|
else:
|
@@ -362,7 +369,8 @@ def test_loop_simple(null_config, tmp_path):
|
|
362
369
|
|
363
370
|
|
364
371
|
@pytest.mark.integration
|
365
|
-
|
372
|
+
@pytest.mark.skip(reason="need to fix loop termination for multiple elements")
|
373
|
+
def test_loop_termination_multi_element(null_config, tmp_path: Path):
|
366
374
|
if os.name == "nt":
|
367
375
|
cmds = [
|
368
376
|
"Write-Output (<<parameter:p1>> + 100)",
|
@@ -417,3 +425,140 @@ def test_loop_termination_multi_element(null_config, tmp_path):
|
|
417
425
|
assert elem_1.iterations[0].action_runs[0].status is EARStatus.success
|
418
426
|
assert elem_1.iterations[1].action_runs[0].status is EARStatus.success
|
419
427
|
assert elem_1.iterations[2].action_runs[0].status is EARStatus.skipped
|
428
|
+
|
429
|
+
|
430
|
+
@pytest.mark.integration
|
431
|
+
def test_input_file_generator_no_errors_on_skip(null_config, tmp_path):
|
432
|
+
"""i.e. we don't try to save a file that hasn't been created because the run was
|
433
|
+
skipped"""
|
434
|
+
|
435
|
+
inp_file = hf.FileSpec(label="my_input_file", name="my_input_file.txt")
|
436
|
+
|
437
|
+
if os.name == "nt":
|
438
|
+
cmds = (
|
439
|
+
"Write-Output ((<<parameter:p0>> + 1))",
|
440
|
+
"Get-Content <<file:my_input_file>>",
|
441
|
+
)
|
442
|
+
else:
|
443
|
+
cmds = ('echo "$((<<parameter:p0>> + 1))"', "cat <<file:my_input_file>>")
|
444
|
+
|
445
|
+
s1 = hf.TaskSchema(
|
446
|
+
objective="t1",
|
447
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p0"))],
|
448
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p1"))],
|
449
|
+
actions=[
|
450
|
+
hf.Action(
|
451
|
+
commands=[hf.Command(command=cmds[0], stdout="<<parameter:p1>>")],
|
452
|
+
)
|
453
|
+
],
|
454
|
+
)
|
455
|
+
|
456
|
+
s2 = hf.TaskSchema(
|
457
|
+
objective="t2",
|
458
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
459
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p0"))],
|
460
|
+
actions=[
|
461
|
+
hf.Action(
|
462
|
+
commands=[hf.Command(cmds[1], stdout="<<int(parameter:p0)>>")],
|
463
|
+
input_file_generators=[
|
464
|
+
hf.InputFileGenerator(
|
465
|
+
input_file=inp_file,
|
466
|
+
inputs=[hf.Parameter("p1")],
|
467
|
+
script="<<script:input_file_generator_basic.py>>",
|
468
|
+
),
|
469
|
+
],
|
470
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
471
|
+
)
|
472
|
+
],
|
473
|
+
)
|
474
|
+
p0_val = 100
|
475
|
+
t1 = hf.Task(schema=s1, inputs={"p0": p0_val})
|
476
|
+
t2 = hf.Task(schema=s2)
|
477
|
+
wk = hf.Workflow.from_template_data(
|
478
|
+
tasks=[t1, t2],
|
479
|
+
loops=[
|
480
|
+
hf.Loop(
|
481
|
+
tasks=[0, 1],
|
482
|
+
num_iterations=2,
|
483
|
+
termination={"path": "outputs.p0", "condition": {"value.equal_to": 101}},
|
484
|
+
)
|
485
|
+
],
|
486
|
+
template_name="input_file_generator_skip_test",
|
487
|
+
path=tmp_path,
|
488
|
+
)
|
489
|
+
|
490
|
+
wk.submit(wait=True, add_to_known=False)
|
491
|
+
|
492
|
+
# check correct runs are set to skip due to loop termination:
|
493
|
+
runs = wk.get_all_EARs()
|
494
|
+
assert runs[0].skip_reason is SkipReason.NOT_SKIPPED
|
495
|
+
assert runs[1].skip_reason is SkipReason.NOT_SKIPPED
|
496
|
+
assert runs[2].skip_reason is SkipReason.NOT_SKIPPED
|
497
|
+
assert runs[3].skip_reason is SkipReason.LOOP_TERMINATION
|
498
|
+
assert runs[4].skip_reason is SkipReason.LOOP_TERMINATION
|
499
|
+
assert runs[5].skip_reason is SkipReason.LOOP_TERMINATION
|
500
|
+
|
501
|
+
# run 4 is the input file generator of the second iteration, which should be skipped
|
502
|
+
# check no error from trying to save the input file:
|
503
|
+
std_stream_path = runs[4].get_app_std_path()
|
504
|
+
if std_stream_path.is_file():
|
505
|
+
assert "FileNotFoundError" not in std_stream_path.read_text()
|
506
|
+
|
507
|
+
|
508
|
+
@pytest.mark.integration
|
509
|
+
@pytest.mark.parametrize("store", ["zarr", "json"])
|
510
|
+
def test_get_text_file(null_config, tmp_path, store):
|
511
|
+
|
512
|
+
s1 = hf.TaskSchema("t1", actions=[hf.Action(commands=[hf.Command("echo 'hi!'")])])
|
513
|
+
wk = hf.Workflow.from_template_data(
|
514
|
+
tasks=[hf.Task(s1)], template_name="print_stdout", path=tmp_path, store=store
|
515
|
+
)
|
516
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
517
|
+
|
518
|
+
combine = wk.submissions[0].jobscripts[0].resources.combine_jobscript_std
|
519
|
+
filename = "js_0_std.log" if combine else "js_0_stdout.log"
|
520
|
+
rel_path = f"artifacts/submissions/0/js_std/0/{filename}"
|
521
|
+
abs_path = f"{wk.url}/{rel_path}"
|
522
|
+
|
523
|
+
assert wk.get_text_file(rel_path) == "hi!\n"
|
524
|
+
assert wk.get_text_file(abs_path) == "hi!\n"
|
525
|
+
|
526
|
+
|
527
|
+
@pytest.mark.integration
|
528
|
+
def test_get_text_file_zarr_zip(null_config, tmp_path):
|
529
|
+
|
530
|
+
s1 = hf.TaskSchema("t1", actions=[hf.Action(commands=[hf.Command("echo 'hi!'")])])
|
531
|
+
wk = hf.Workflow.from_template_data(
|
532
|
+
tasks=[hf.Task(s1)], template_name="print_stdout", path=tmp_path, store="zarr"
|
533
|
+
)
|
534
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
535
|
+
|
536
|
+
wkz = hf.Workflow(wk.zip())
|
537
|
+
|
538
|
+
combine = wkz.submissions[0].jobscripts[0].resources.combine_jobscript_std
|
539
|
+
filename = "js_0_std.log" if combine else "js_0_stdout.log"
|
540
|
+
rel_path = f"artifacts/submissions/0/js_std/0/{filename}"
|
541
|
+
abs_path = f"{wkz.url}/{rel_path}"
|
542
|
+
|
543
|
+
assert wkz.get_text_file(rel_path) == "hi!\n"
|
544
|
+
assert wkz.get_text_file(abs_path) == "hi!\n"
|
545
|
+
|
546
|
+
|
547
|
+
@pytest.mark.parametrize("store", ["zarr", "json"])
|
548
|
+
def test_get_text_file_file_not_found(null_config, tmp_path, store):
|
549
|
+
s1 = hf.TaskSchema("t1", actions=[hf.Action(commands=[hf.Command("echo 'hi!'")])])
|
550
|
+
wk = hf.Workflow.from_template_data(
|
551
|
+
tasks=[hf.Task(s1)], template_name="print_stdout", path=tmp_path, store=store
|
552
|
+
)
|
553
|
+
with pytest.raises(FileNotFoundError):
|
554
|
+
wk.get_text_file("non_existent_file.txt")
|
555
|
+
|
556
|
+
|
557
|
+
def test_get_text_file_file_not_found_zarr_zip(null_config, tmp_path):
|
558
|
+
s1 = hf.TaskSchema("t1", actions=[hf.Action(commands=[hf.Command("echo 'hi!'")])])
|
559
|
+
wk = hf.Workflow.from_template_data(
|
560
|
+
tasks=[hf.Task(s1)], template_name="print_stdout", path=tmp_path, store="zarr"
|
561
|
+
)
|
562
|
+
wkz = hf.Workflow(wk.zip())
|
563
|
+
with pytest.raises(FileNotFoundError):
|
564
|
+
wkz.get_text_file("non_existent_file.txt")
|
@@ -0,0 +1,18 @@
|
|
1
|
+
import sys
|
2
|
+
import pytest
|
3
|
+
|
4
|
+
from hpcflow.app import app as hf
|
5
|
+
from hpcflow.sdk.core.test_utils import make_test_data_YAML_workflow
|
6
|
+
|
7
|
+
|
8
|
+
@pytest.mark.integration
|
9
|
+
@pytest.mark.skipif(
|
10
|
+
sys.version_info < (3, 9), reason="Python 3.8 support is being removed anyway."
|
11
|
+
)
|
12
|
+
def test_workflow_1_zip(tmp_path, new_null_config):
|
13
|
+
wk = make_test_data_YAML_workflow("workflow_1.yaml", path=tmp_path)
|
14
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
15
|
+
|
16
|
+
zip_path = wk.zip(path=tmp_path)
|
17
|
+
wkz = hf.Workflow(zip_path)
|
18
|
+
assert wkz.tasks[0].elements[0].outputs.p2.value == "201"
|