hpcflow-new2 0.2.0a190__py3-none-any.whl → 0.2.0a199__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hpcflow/__pyinstaller/hook-hpcflow.py +1 -0
- hpcflow/_version.py +1 -1
- hpcflow/data/scripts/bad_script.py +2 -0
- hpcflow/data/scripts/do_nothing.py +2 -0
- hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
- hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
- hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
- hpcflow/data/scripts/input_file_generator_basic.py +3 -0
- hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
- hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
- hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
- hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
- hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
- hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
- hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
- hpcflow/data/scripts/output_file_parser_basic.py +3 -0
- hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
- hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
- hpcflow/data/scripts/script_exit_test.py +5 -0
- hpcflow/data/template_components/environments.yaml +1 -1
- hpcflow/sdk/__init__.py +5 -0
- hpcflow/sdk/app.py +150 -89
- hpcflow/sdk/cli.py +263 -84
- hpcflow/sdk/cli_common.py +99 -5
- hpcflow/sdk/config/callbacks.py +38 -1
- hpcflow/sdk/config/config.py +102 -13
- hpcflow/sdk/config/errors.py +19 -5
- hpcflow/sdk/config/types.py +3 -0
- hpcflow/sdk/core/__init__.py +25 -1
- hpcflow/sdk/core/actions.py +914 -262
- hpcflow/sdk/core/cache.py +76 -34
- hpcflow/sdk/core/command_files.py +14 -128
- hpcflow/sdk/core/commands.py +35 -6
- hpcflow/sdk/core/element.py +122 -50
- hpcflow/sdk/core/errors.py +58 -2
- hpcflow/sdk/core/execute.py +207 -0
- hpcflow/sdk/core/loop.py +408 -50
- hpcflow/sdk/core/loop_cache.py +4 -4
- hpcflow/sdk/core/parameters.py +382 -37
- hpcflow/sdk/core/run_dir_files.py +13 -40
- hpcflow/sdk/core/skip_reason.py +7 -0
- hpcflow/sdk/core/task.py +119 -30
- hpcflow/sdk/core/task_schema.py +68 -0
- hpcflow/sdk/core/test_utils.py +66 -27
- hpcflow/sdk/core/types.py +54 -1
- hpcflow/sdk/core/utils.py +78 -7
- hpcflow/sdk/core/workflow.py +1538 -336
- hpcflow/sdk/data/workflow_spec_schema.yaml +2 -0
- hpcflow/sdk/demo/cli.py +7 -0
- hpcflow/sdk/helper/cli.py +1 -0
- hpcflow/sdk/log.py +42 -15
- hpcflow/sdk/persistence/base.py +405 -53
- hpcflow/sdk/persistence/json.py +177 -52
- hpcflow/sdk/persistence/pending.py +237 -69
- hpcflow/sdk/persistence/store_resource.py +3 -2
- hpcflow/sdk/persistence/types.py +15 -4
- hpcflow/sdk/persistence/zarr.py +928 -81
- hpcflow/sdk/submission/jobscript.py +1408 -489
- hpcflow/sdk/submission/schedulers/__init__.py +40 -5
- hpcflow/sdk/submission/schedulers/direct.py +33 -19
- hpcflow/sdk/submission/schedulers/sge.py +51 -16
- hpcflow/sdk/submission/schedulers/slurm.py +44 -16
- hpcflow/sdk/submission/schedulers/utils.py +7 -2
- hpcflow/sdk/submission/shells/base.py +68 -20
- hpcflow/sdk/submission/shells/bash.py +222 -129
- hpcflow/sdk/submission/shells/powershell.py +200 -150
- hpcflow/sdk/submission/submission.py +852 -119
- hpcflow/sdk/submission/types.py +18 -21
- hpcflow/sdk/typing.py +24 -5
- hpcflow/sdk/utils/arrays.py +71 -0
- hpcflow/sdk/utils/deferred_file.py +55 -0
- hpcflow/sdk/utils/hashing.py +16 -0
- hpcflow/sdk/utils/patches.py +12 -0
- hpcflow/sdk/utils/strings.py +33 -0
- hpcflow/tests/api/test_api.py +32 -0
- hpcflow/tests/conftest.py +19 -0
- hpcflow/tests/data/multi_path_sequences.yaml +29 -0
- hpcflow/tests/data/workflow_test_run_abort.yaml +34 -35
- hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
- hpcflow/tests/scripts/test_input_file_generators.py +282 -0
- hpcflow/tests/scripts/test_main_scripts.py +821 -70
- hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
- hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
- hpcflow/tests/shells/wsl/test_wsl_submission.py +6 -0
- hpcflow/tests/unit/test_action.py +176 -0
- hpcflow/tests/unit/test_app.py +20 -0
- hpcflow/tests/unit/test_cache.py +46 -0
- hpcflow/tests/unit/test_cli.py +133 -0
- hpcflow/tests/unit/test_config.py +122 -1
- hpcflow/tests/unit/test_element_iteration.py +47 -0
- hpcflow/tests/unit/test_jobscript_unit.py +757 -0
- hpcflow/tests/unit/test_loop.py +1332 -27
- hpcflow/tests/unit/test_meta_task.py +325 -0
- hpcflow/tests/unit/test_multi_path_sequences.py +229 -0
- hpcflow/tests/unit/test_parameter.py +13 -0
- hpcflow/tests/unit/test_persistence.py +190 -8
- hpcflow/tests/unit/test_run.py +109 -3
- hpcflow/tests/unit/test_run_directories.py +29 -0
- hpcflow/tests/unit/test_shell.py +20 -0
- hpcflow/tests/unit/test_submission.py +5 -76
- hpcflow/tests/unit/utils/test_arrays.py +40 -0
- hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
- hpcflow/tests/unit/utils/test_hashing.py +65 -0
- hpcflow/tests/unit/utils/test_patches.py +5 -0
- hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
- hpcflow/tests/workflows/__init__.py +0 -0
- hpcflow/tests/workflows/test_directory_structure.py +31 -0
- hpcflow/tests/workflows/test_jobscript.py +332 -0
- hpcflow/tests/workflows/test_run_status.py +198 -0
- hpcflow/tests/workflows/test_skip_downstream.py +696 -0
- hpcflow/tests/workflows/test_submission.py +140 -0
- hpcflow/tests/workflows/test_workflows.py +142 -2
- hpcflow/tests/workflows/test_zip.py +18 -0
- hpcflow/viz_demo.ipynb +6587 -3
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/METADATA +7 -4
- hpcflow_new2-0.2.0a199.dist-info/RECORD +221 -0
- hpcflow_new2-0.2.0a190.dist-info/RECORD +0 -165
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/LICENSE +0 -0
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/WHEEL +0 -0
- {hpcflow_new2-0.2.0a190.dist-info → hpcflow_new2-0.2.0a199.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,696 @@
|
|
1
|
+
import os
|
2
|
+
from hpcflow.sdk.core.test_utils import make_schemas
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
from hpcflow.app import app as hf
|
6
|
+
from hpcflow.sdk.core.actions import EARStatus
|
7
|
+
|
8
|
+
|
9
|
+
@pytest.mark.integration
|
10
|
+
def test_skip_downstream_on_failure_true_combine_scripts(null_config, tmp_path):
|
11
|
+
s1 = hf.TaskSchema(
|
12
|
+
objective="t1",
|
13
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
14
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
15
|
+
actions=[
|
16
|
+
hf.Action(
|
17
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
18
|
+
script_data_in="direct",
|
19
|
+
script_data_out="direct",
|
20
|
+
script_exe="python_script",
|
21
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
22
|
+
)
|
23
|
+
],
|
24
|
+
)
|
25
|
+
|
26
|
+
s2 = hf.TaskSchema(
|
27
|
+
objective="t2",
|
28
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
|
29
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
|
30
|
+
actions=[
|
31
|
+
hf.Action(
|
32
|
+
script="<<script:main_script_test_direct_in_direct_out_2.py>>",
|
33
|
+
script_data_in="direct",
|
34
|
+
script_data_out="direct",
|
35
|
+
script_exe="python_script",
|
36
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
37
|
+
)
|
38
|
+
],
|
39
|
+
)
|
40
|
+
|
41
|
+
s3 = hf.TaskSchema(
|
42
|
+
objective="t3",
|
43
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p3"), group="my_group")],
|
44
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p4"))],
|
45
|
+
actions=[
|
46
|
+
hf.Action(
|
47
|
+
script="<<script:main_script_test_direct_in_group_direct_out_3.py>>",
|
48
|
+
script_data_in="direct",
|
49
|
+
script_data_out="direct",
|
50
|
+
script_exe="python_script",
|
51
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
52
|
+
)
|
53
|
+
],
|
54
|
+
)
|
55
|
+
|
56
|
+
tasks = [
|
57
|
+
hf.Task(
|
58
|
+
s1,
|
59
|
+
sequences=[
|
60
|
+
hf.ValueSequence(path="inputs.p1", values=[101, "NONSENSE VALUE"])
|
61
|
+
],
|
62
|
+
),
|
63
|
+
hf.Task(s2, groups=[hf.ElementGroup(name="my_group")]),
|
64
|
+
hf.Task(s3),
|
65
|
+
]
|
66
|
+
|
67
|
+
wk = hf.Workflow.from_template_data(
|
68
|
+
template_name="test_skip_downstream_on_failure",
|
69
|
+
path=tmp_path,
|
70
|
+
tasks=tasks,
|
71
|
+
resources={
|
72
|
+
"any": {
|
73
|
+
"write_app_logs": True,
|
74
|
+
"skip_downstream_on_failure": True,
|
75
|
+
"combine_scripts": True,
|
76
|
+
}
|
77
|
+
},
|
78
|
+
)
|
79
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
80
|
+
runs = wk.get_all_EARs()
|
81
|
+
|
82
|
+
assert runs[0].status is EARStatus.success
|
83
|
+
assert runs[1].status is EARStatus.error # original error
|
84
|
+
assert runs[2].status is EARStatus.success
|
85
|
+
assert runs[3].status is EARStatus.skipped # skipped due to run 1 error
|
86
|
+
assert runs[4].status is EARStatus.skipped # skipped due to run 3 skipped
|
87
|
+
|
88
|
+
|
89
|
+
@pytest.mark.integration
|
90
|
+
def test_skip_downstream_on_failure_false_combine_scripts(null_config, tmp_path):
|
91
|
+
s1 = hf.TaskSchema(
|
92
|
+
objective="t1",
|
93
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
94
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
95
|
+
actions=[
|
96
|
+
hf.Action(
|
97
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
98
|
+
script_data_in="direct",
|
99
|
+
script_data_out="direct",
|
100
|
+
script_exe="python_script",
|
101
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
102
|
+
)
|
103
|
+
],
|
104
|
+
)
|
105
|
+
|
106
|
+
s2 = hf.TaskSchema(
|
107
|
+
objective="t2",
|
108
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
|
109
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
|
110
|
+
actions=[
|
111
|
+
hf.Action(
|
112
|
+
script="<<script:main_script_test_direct_in_direct_out_2.py>>",
|
113
|
+
script_data_in="direct",
|
114
|
+
script_data_out="direct",
|
115
|
+
script_exe="python_script",
|
116
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
117
|
+
)
|
118
|
+
],
|
119
|
+
)
|
120
|
+
|
121
|
+
s3 = hf.TaskSchema(
|
122
|
+
objective="t3",
|
123
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p3"), group="my_group")],
|
124
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p4"))],
|
125
|
+
actions=[
|
126
|
+
hf.Action(
|
127
|
+
script="<<script:main_script_test_direct_in_group_direct_out_3.py>>",
|
128
|
+
script_data_in="direct",
|
129
|
+
script_data_out="direct",
|
130
|
+
script_exe="python_script",
|
131
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
132
|
+
)
|
133
|
+
],
|
134
|
+
)
|
135
|
+
|
136
|
+
tasks = [
|
137
|
+
hf.Task(
|
138
|
+
s1,
|
139
|
+
sequences=[
|
140
|
+
hf.ValueSequence(path="inputs.p1", values=[101, "NONSENSE VALUE"])
|
141
|
+
],
|
142
|
+
),
|
143
|
+
hf.Task(s2, groups=[hf.ElementGroup(name="my_group")]),
|
144
|
+
hf.Task(s3),
|
145
|
+
]
|
146
|
+
|
147
|
+
wk = hf.Workflow.from_template_data(
|
148
|
+
template_name="test_skip_downstream_on_failure",
|
149
|
+
path=tmp_path,
|
150
|
+
tasks=tasks,
|
151
|
+
resources={
|
152
|
+
"any": {
|
153
|
+
"write_app_logs": True,
|
154
|
+
"skip_downstream_on_failure": False,
|
155
|
+
"combine_scripts": True,
|
156
|
+
}
|
157
|
+
},
|
158
|
+
)
|
159
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
160
|
+
runs = wk.get_all_EARs()
|
161
|
+
|
162
|
+
assert runs[0].status is EARStatus.success
|
163
|
+
assert runs[1].status is EARStatus.error # original error
|
164
|
+
assert runs[2].status is EARStatus.success
|
165
|
+
assert runs[3].status is EARStatus.error # relies on run 1 output so fails
|
166
|
+
assert runs[4].status is EARStatus.error # relies on run 3 output so fails
|
167
|
+
|
168
|
+
|
169
|
+
@pytest.mark.integration
|
170
|
+
def test_skip_downstream_on_failure_true(null_config, tmp_path):
|
171
|
+
s1, s2 = make_schemas(
|
172
|
+
({"p1": None}, ("p2",), "t1"),
|
173
|
+
({"p2": None}, ("p3",), "t2"),
|
174
|
+
)
|
175
|
+
s3 = hf.TaskSchema(
|
176
|
+
"t3",
|
177
|
+
inputs=[hf.SchemaInput("p3", group="my_group")],
|
178
|
+
outputs=[hf.SchemaOutput("p4")],
|
179
|
+
actions=[
|
180
|
+
hf.Action(
|
181
|
+
commands=[
|
182
|
+
hf.Command(
|
183
|
+
"echo $(( <<sum(parameter:p3)>> ))",
|
184
|
+
stdout="<<int(parameter:p4)>>",
|
185
|
+
)
|
186
|
+
]
|
187
|
+
)
|
188
|
+
],
|
189
|
+
)
|
190
|
+
|
191
|
+
tasks = [
|
192
|
+
hf.Task(
|
193
|
+
s1,
|
194
|
+
sequences=[
|
195
|
+
hf.ValueSequence(path="inputs.p1", values=[101, "NONSENSE VALUE"])
|
196
|
+
],
|
197
|
+
),
|
198
|
+
hf.Task(s2, groups=[hf.ElementGroup(name="my_group")]),
|
199
|
+
hf.Task(s3),
|
200
|
+
]
|
201
|
+
|
202
|
+
wk = hf.Workflow.from_template_data(
|
203
|
+
template_name="test_skip_downstream_on_failure",
|
204
|
+
path=tmp_path,
|
205
|
+
tasks=tasks,
|
206
|
+
resources={"any": {"write_app_logs": True, "skip_downstream_on_failure": True}},
|
207
|
+
)
|
208
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
209
|
+
runs = wk.get_all_EARs()
|
210
|
+
|
211
|
+
assert runs[0].status is EARStatus.success
|
212
|
+
assert runs[1].status is EARStatus.error # original error
|
213
|
+
assert runs[2].status is EARStatus.success
|
214
|
+
assert runs[3].status is EARStatus.skipped # skipped due to run 1 error
|
215
|
+
assert runs[4].status is EARStatus.skipped # skipped due to run 3 skipped
|
216
|
+
|
217
|
+
|
218
|
+
@pytest.mark.integration
|
219
|
+
def test_skip_downstream_on_failure_false(null_config, tmp_path):
|
220
|
+
s1, s2 = make_schemas(
|
221
|
+
({"p1": None}, ("p2",), "t1"),
|
222
|
+
({"p2": None}, ("p3",), "t2"),
|
223
|
+
)
|
224
|
+
s3 = hf.TaskSchema(
|
225
|
+
"t3",
|
226
|
+
inputs=[hf.SchemaInput("p3", group="my_group")],
|
227
|
+
outputs=[hf.SchemaOutput("p4")],
|
228
|
+
actions=[
|
229
|
+
hf.Action(
|
230
|
+
commands=[
|
231
|
+
hf.Command(
|
232
|
+
"echo $(( <<sum(parameter:p3)>> ))",
|
233
|
+
stdout="<<int(parameter:p4)>>",
|
234
|
+
)
|
235
|
+
]
|
236
|
+
)
|
237
|
+
],
|
238
|
+
)
|
239
|
+
|
240
|
+
tasks = [
|
241
|
+
hf.Task(
|
242
|
+
s1,
|
243
|
+
sequences=[
|
244
|
+
hf.ValueSequence(path="inputs.p1", values=[101, "NONSENSE VALUE"])
|
245
|
+
],
|
246
|
+
),
|
247
|
+
hf.Task(s2, groups=[hf.ElementGroup(name="my_group")]),
|
248
|
+
hf.Task(s3),
|
249
|
+
]
|
250
|
+
|
251
|
+
wk = hf.Workflow.from_template_data(
|
252
|
+
template_name="test_skip_downstream_on_failure",
|
253
|
+
path=tmp_path,
|
254
|
+
tasks=tasks,
|
255
|
+
resources={"any": {"write_app_logs": True, "skip_downstream_on_failure": False}},
|
256
|
+
)
|
257
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
258
|
+
runs = wk.get_all_EARs()
|
259
|
+
|
260
|
+
assert runs[0].status is EARStatus.success
|
261
|
+
assert runs[1].status is EARStatus.error # original error
|
262
|
+
assert runs[2].status is EARStatus.success
|
263
|
+
assert runs[3].status is EARStatus.error # relies on run 1 output so fails
|
264
|
+
assert runs[4].status is EARStatus.error # relies on run 3 output so fails
|
265
|
+
|
266
|
+
|
267
|
+
@pytest.mark.integration
|
268
|
+
@pytest.mark.parametrize("allow_failed_dependencies", ["UNSET", None, False, 0.0, 0])
|
269
|
+
@pytest.mark.parametrize("combine_scripts", [True, False])
|
270
|
+
def test_skip_downstream_on_failure_false_expected_failure(
|
271
|
+
null_config, tmp_path, allow_failed_dependencies, combine_scripts
|
272
|
+
):
|
273
|
+
s1 = hf.TaskSchema(
|
274
|
+
objective="t1",
|
275
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
276
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
277
|
+
actions=[
|
278
|
+
hf.Action(
|
279
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
280
|
+
script_data_in="direct",
|
281
|
+
script_data_out="direct",
|
282
|
+
script_exe="python_script",
|
283
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
284
|
+
)
|
285
|
+
],
|
286
|
+
)
|
287
|
+
|
288
|
+
sch_inp_args = {"parameter": hf.Parameter("p2")}
|
289
|
+
if allow_failed_dependencies != "UNSET":
|
290
|
+
sch_inp_args["allow_failed_dependencies"] = allow_failed_dependencies
|
291
|
+
|
292
|
+
# schema with a script that handles missing data (p2):
|
293
|
+
s2 = hf.TaskSchema(
|
294
|
+
objective="t2",
|
295
|
+
inputs=[hf.SchemaInput(**sch_inp_args)],
|
296
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
|
297
|
+
actions=[
|
298
|
+
hf.Action(
|
299
|
+
script="<<script:main_script_test_direct_in_direct_out_2_fail_allowed.py>>",
|
300
|
+
script_data_in="direct",
|
301
|
+
script_data_out="direct",
|
302
|
+
script_exe="python_script",
|
303
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
304
|
+
)
|
305
|
+
],
|
306
|
+
)
|
307
|
+
tasks = [
|
308
|
+
hf.Task(s1, inputs={"p1": "NONSENSE VALUE"}), # will fail
|
309
|
+
hf.Task(s2), # depends on t1, will fail
|
310
|
+
]
|
311
|
+
|
312
|
+
wk = hf.Workflow.from_template_data(
|
313
|
+
template_name="test_allowed_failed_dependencies",
|
314
|
+
path=tmp_path,
|
315
|
+
tasks=tasks,
|
316
|
+
resources={
|
317
|
+
"any": {
|
318
|
+
"write_app_logs": True,
|
319
|
+
"skip_downstream_on_failure": False,
|
320
|
+
"combine_scripts": combine_scripts,
|
321
|
+
}
|
322
|
+
},
|
323
|
+
)
|
324
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
325
|
+
runs = wk.get_all_EARs()
|
326
|
+
|
327
|
+
assert runs[0].status is EARStatus.error
|
328
|
+
assert runs[1].status is EARStatus.error
|
329
|
+
|
330
|
+
|
331
|
+
@pytest.mark.integration
|
332
|
+
@pytest.mark.parametrize("allow_failed_dependencies", [True, 1.0, 1])
|
333
|
+
@pytest.mark.parametrize("combine_scripts", [True, False])
|
334
|
+
def test_skip_downstream_on_failure_false_handled_failure_allow_failed_dependencies(
|
335
|
+
null_config, tmp_path, allow_failed_dependencies, combine_scripts
|
336
|
+
):
|
337
|
+
s1 = hf.TaskSchema(
|
338
|
+
objective="t1",
|
339
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
340
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
341
|
+
actions=[
|
342
|
+
hf.Action(
|
343
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
344
|
+
script_data_in="direct",
|
345
|
+
script_data_out="direct",
|
346
|
+
script_exe="python_script",
|
347
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
348
|
+
)
|
349
|
+
],
|
350
|
+
)
|
351
|
+
|
352
|
+
# schema with a script that handles missing data (p2):
|
353
|
+
s2 = hf.TaskSchema(
|
354
|
+
objective="t2",
|
355
|
+
inputs=[
|
356
|
+
hf.SchemaInput(
|
357
|
+
parameter=hf.Parameter("p2"),
|
358
|
+
allow_failed_dependencies=allow_failed_dependencies,
|
359
|
+
)
|
360
|
+
],
|
361
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
|
362
|
+
actions=[
|
363
|
+
hf.Action(
|
364
|
+
script="<<script:main_script_test_direct_in_direct_out_2_fail_allowed.py>>",
|
365
|
+
script_data_in="direct",
|
366
|
+
script_data_out="direct",
|
367
|
+
script_exe="python_script",
|
368
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
369
|
+
)
|
370
|
+
],
|
371
|
+
)
|
372
|
+
tasks = [
|
373
|
+
hf.Task(s1, inputs={"p1": "NONSENSE VALUE"}), # will fail
|
374
|
+
hf.Task(s2), # should succeed
|
375
|
+
]
|
376
|
+
|
377
|
+
wk = hf.Workflow.from_template_data(
|
378
|
+
template_name="test_allowed_failed_dependencies",
|
379
|
+
path=tmp_path,
|
380
|
+
tasks=tasks,
|
381
|
+
resources={
|
382
|
+
"any": {
|
383
|
+
"write_app_logs": True,
|
384
|
+
"skip_downstream_on_failure": False,
|
385
|
+
"combine_scripts": combine_scripts,
|
386
|
+
}
|
387
|
+
},
|
388
|
+
)
|
389
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
390
|
+
runs = wk.get_all_EARs()
|
391
|
+
|
392
|
+
assert runs[0].status is EARStatus.error
|
393
|
+
assert runs[1].status is EARStatus.success
|
394
|
+
|
395
|
+
|
396
|
+
@pytest.mark.integration
|
397
|
+
@pytest.mark.parametrize(
|
398
|
+
"allow_failed_dependencies",
|
399
|
+
[
|
400
|
+
"UNSET",
|
401
|
+
None,
|
402
|
+
False,
|
403
|
+
0.4,
|
404
|
+
1,
|
405
|
+
],
|
406
|
+
)
|
407
|
+
@pytest.mark.parametrize("combine_scripts", [True, False])
|
408
|
+
def test_skip_downstream_on_failure_false_expected_failure_group(
|
409
|
+
null_config, tmp_path, allow_failed_dependencies, combine_scripts
|
410
|
+
):
|
411
|
+
s1 = hf.TaskSchema(
|
412
|
+
objective="t1",
|
413
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
414
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
415
|
+
actions=[
|
416
|
+
hf.Action(
|
417
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
418
|
+
script_data_in="direct",
|
419
|
+
script_data_out="direct",
|
420
|
+
script_exe="python_script",
|
421
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
422
|
+
)
|
423
|
+
],
|
424
|
+
)
|
425
|
+
|
426
|
+
sch_inp_args = {"parameter": hf.Parameter("p2"), "group": "my_group"}
|
427
|
+
if allow_failed_dependencies != "UNSET":
|
428
|
+
sch_inp_args["allow_failed_dependencies"] = allow_failed_dependencies
|
429
|
+
|
430
|
+
# schema with a script that handles missing data (p2):
|
431
|
+
s2 = hf.TaskSchema(
|
432
|
+
objective="t2",
|
433
|
+
inputs=[hf.SchemaInput(**sch_inp_args)],
|
434
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
|
435
|
+
actions=[
|
436
|
+
hf.Action(
|
437
|
+
script="<<script:main_script_test_direct_in_direct_out_2_fail_allowed_group.py>>",
|
438
|
+
script_data_in="direct",
|
439
|
+
script_data_out="direct",
|
440
|
+
script_exe="python_script",
|
441
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
442
|
+
)
|
443
|
+
],
|
444
|
+
)
|
445
|
+
tasks = [
|
446
|
+
hf.Task(
|
447
|
+
s1,
|
448
|
+
sequences=[
|
449
|
+
hf.ValueSequence(
|
450
|
+
path="inputs.p1", values=[100, "NONSENSE VALUE", "NONSENSE VALUE"]
|
451
|
+
)
|
452
|
+
],
|
453
|
+
groups=[hf.ElementGroup("my_group")],
|
454
|
+
), # two thirds will fail
|
455
|
+
hf.Task(s2), # should succeed
|
456
|
+
]
|
457
|
+
|
458
|
+
wk = hf.Workflow.from_template_data(
|
459
|
+
template_name="test_allowed_failed_dependencies",
|
460
|
+
path=tmp_path,
|
461
|
+
tasks=tasks,
|
462
|
+
resources={
|
463
|
+
"any": {
|
464
|
+
"write_app_logs": True,
|
465
|
+
"skip_downstream_on_failure": False,
|
466
|
+
"combine_scripts": combine_scripts,
|
467
|
+
}
|
468
|
+
},
|
469
|
+
)
|
470
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
471
|
+
runs = wk.get_all_EARs()
|
472
|
+
|
473
|
+
assert runs[0].status is EARStatus.success
|
474
|
+
assert runs[1].status is EARStatus.error
|
475
|
+
assert runs[2].status is EARStatus.error
|
476
|
+
assert runs[3].status is EARStatus.error
|
477
|
+
|
478
|
+
|
479
|
+
@pytest.mark.integration
|
480
|
+
@pytest.mark.parametrize("allow_failed_dependencies", [True, 0.4, 1])
|
481
|
+
@pytest.mark.parametrize("combine_scripts", [True, False])
|
482
|
+
def test_skip_downstream_on_failure_false_handled_failure_allow_failed_dependencies_group(
|
483
|
+
null_config, tmp_path, allow_failed_dependencies, combine_scripts
|
484
|
+
):
|
485
|
+
s1 = hf.TaskSchema(
|
486
|
+
objective="t1",
|
487
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
488
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
489
|
+
actions=[
|
490
|
+
hf.Action(
|
491
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
492
|
+
script_data_in="direct",
|
493
|
+
script_data_out="direct",
|
494
|
+
script_exe="python_script",
|
495
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
496
|
+
)
|
497
|
+
],
|
498
|
+
)
|
499
|
+
|
500
|
+
# schema with a script that handles missing data (p2):
|
501
|
+
s2 = hf.TaskSchema(
|
502
|
+
objective="t2",
|
503
|
+
inputs=[
|
504
|
+
hf.SchemaInput(
|
505
|
+
parameter=hf.Parameter("p2"),
|
506
|
+
allow_failed_dependencies=allow_failed_dependencies,
|
507
|
+
group="my_group",
|
508
|
+
)
|
509
|
+
],
|
510
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
|
511
|
+
actions=[
|
512
|
+
hf.Action(
|
513
|
+
script="<<script:main_script_test_direct_in_direct_out_2_fail_allowed_group.py>>",
|
514
|
+
script_data_in="direct",
|
515
|
+
script_data_out="direct",
|
516
|
+
script_exe="python_script",
|
517
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
518
|
+
)
|
519
|
+
],
|
520
|
+
)
|
521
|
+
tasks = [
|
522
|
+
hf.Task(
|
523
|
+
s1,
|
524
|
+
sequences=[
|
525
|
+
hf.ValueSequence(path="inputs.p1", values=[100, 200, "NONSENSE VALUE"])
|
526
|
+
],
|
527
|
+
groups=[hf.ElementGroup("my_group")],
|
528
|
+
), # one third will fail
|
529
|
+
hf.Task(s2), # should succeed
|
530
|
+
]
|
531
|
+
|
532
|
+
wk = hf.Workflow.from_template_data(
|
533
|
+
template_name="test_allowed_failed_dependencies",
|
534
|
+
path=tmp_path,
|
535
|
+
tasks=tasks,
|
536
|
+
resources={
|
537
|
+
"any": {
|
538
|
+
"write_app_logs": True,
|
539
|
+
"skip_downstream_on_failure": False,
|
540
|
+
"combine_scripts": combine_scripts,
|
541
|
+
}
|
542
|
+
},
|
543
|
+
)
|
544
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
545
|
+
runs = wk.get_all_EARs()
|
546
|
+
|
547
|
+
assert runs[0].status is EARStatus.success
|
548
|
+
assert runs[1].status is EARStatus.success
|
549
|
+
assert runs[2].status is EARStatus.error
|
550
|
+
assert runs[3].status is EARStatus.success
|
551
|
+
|
552
|
+
|
553
|
+
@pytest.mark.integration
|
554
|
+
def test_unset_parameters_found_when_writing_commands(null_config, tmp_path):
|
555
|
+
cmd_ps = "echo <<parameter:p1>>; exit 1"
|
556
|
+
cmd_bash = "exit; echo <<parameter:p1>>"
|
557
|
+
cmd = cmd_ps if os.name == "nt" else cmd_bash
|
558
|
+
s1 = hf.TaskSchema(
|
559
|
+
objective="t1",
|
560
|
+
inputs=[hf.SchemaInput("p1")],
|
561
|
+
outputs=[hf.SchemaInput("p2")],
|
562
|
+
actions=[
|
563
|
+
hf.Action(commands=[hf.Command(command=cmd, stdout="<<parameter:p2>>")])
|
564
|
+
], # will fail
|
565
|
+
)
|
566
|
+
s2 = make_schemas(
|
567
|
+
({"p2": None}, ("p3",), "t2"), # command-line based action
|
568
|
+
)
|
569
|
+
tasks = [
|
570
|
+
hf.Task(s1, inputs={"p1": 123}), # will fail, and not set p2 for next task
|
571
|
+
hf.Task(s2), # will fail when writing commands
|
572
|
+
]
|
573
|
+
|
574
|
+
wk = hf.Workflow.from_template_data(
|
575
|
+
template_name="test_unset_parameters_in_cmdline",
|
576
|
+
path=tmp_path,
|
577
|
+
tasks=tasks,
|
578
|
+
resources={
|
579
|
+
"any": {
|
580
|
+
"write_app_logs": True,
|
581
|
+
"skip_downstream_on_failure": False,
|
582
|
+
}
|
583
|
+
},
|
584
|
+
)
|
585
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
586
|
+
runs = wk.get_all_EARs()
|
587
|
+
assert runs[0].status is EARStatus.error
|
588
|
+
assert runs[1].status is EARStatus.error
|
589
|
+
|
590
|
+
|
591
|
+
@pytest.mark.integration
|
592
|
+
def test_unset_parameters_found_when_writing_script_input_file(null_config, tmp_path):
|
593
|
+
cmd_ps = "echo <<parameter:p0>>; exit 1"
|
594
|
+
cmd_bash = "exit; echo <<parameter:p0>>"
|
595
|
+
cmd = cmd_ps if os.name == "nt" else cmd_bash
|
596
|
+
s1 = hf.TaskSchema(
|
597
|
+
objective="t1",
|
598
|
+
inputs=[hf.SchemaInput("p0")],
|
599
|
+
outputs=[hf.SchemaInput("p1")],
|
600
|
+
actions=[
|
601
|
+
hf.Action(commands=[hf.Command(command=cmd, stdout="<<parameter:p1>>")])
|
602
|
+
], # will fail
|
603
|
+
)
|
604
|
+
|
605
|
+
s2 = hf.TaskSchema(
|
606
|
+
objective="t2",
|
607
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
608
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
609
|
+
actions=[
|
610
|
+
hf.Action(
|
611
|
+
script="<<script:main_script_test_json_in_json_out.py>>",
|
612
|
+
script_data_in="json",
|
613
|
+
script_data_out="json",
|
614
|
+
script_exe="python_script",
|
615
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
616
|
+
requires_dir=True,
|
617
|
+
)
|
618
|
+
],
|
619
|
+
)
|
620
|
+
|
621
|
+
tasks = [
|
622
|
+
hf.Task(s1, inputs={"p0": 123}), # will fail, and not set p2 for next task
|
623
|
+
hf.Task(s2), # will fail when writing input JSON file
|
624
|
+
]
|
625
|
+
|
626
|
+
wk = hf.Workflow.from_template_data(
|
627
|
+
template_name="test_unset_parameters_in_script_input_file",
|
628
|
+
path=tmp_path,
|
629
|
+
tasks=tasks,
|
630
|
+
resources={
|
631
|
+
"any": {
|
632
|
+
"write_app_logs": True,
|
633
|
+
"skip_downstream_on_failure": False,
|
634
|
+
}
|
635
|
+
},
|
636
|
+
)
|
637
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
638
|
+
runs = wk.get_all_EARs()
|
639
|
+
assert runs[0].status is EARStatus.error
|
640
|
+
assert runs[1].status is EARStatus.error
|
641
|
+
|
642
|
+
|
643
|
+
@pytest.mark.integration
|
644
|
+
@pytest.mark.parametrize("combine_scripts", [True, False])
|
645
|
+
def test_unset_parameters_found_when_py_script_gets_direct_inputs(
|
646
|
+
null_config, tmp_path, combine_scripts
|
647
|
+
):
|
648
|
+
cmd_ps = "echo <<parameter:p0>>; exit 1"
|
649
|
+
cmd_bash = "exit; echo <<parameter:p0>>"
|
650
|
+
cmd = cmd_ps if os.name == "nt" else cmd_bash
|
651
|
+
s1 = hf.TaskSchema(
|
652
|
+
objective="t1",
|
653
|
+
inputs=[hf.SchemaInput("p0")],
|
654
|
+
outputs=[hf.SchemaInput("p1")],
|
655
|
+
actions=[
|
656
|
+
hf.Action(commands=[hf.Command(command=cmd, stdout="<<parameter:p1>>")])
|
657
|
+
], # will fail
|
658
|
+
)
|
659
|
+
|
660
|
+
s2 = hf.TaskSchema(
|
661
|
+
objective="t2",
|
662
|
+
inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
|
663
|
+
outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
|
664
|
+
actions=[
|
665
|
+
hf.Action(
|
666
|
+
script="<<script:main_script_test_direct_in_direct_out.py>>",
|
667
|
+
script_data_in="direct",
|
668
|
+
script_data_out="direct",
|
669
|
+
script_exe="python_script",
|
670
|
+
environments=[hf.ActionEnvironment(environment="python_env")],
|
671
|
+
requires_dir=True,
|
672
|
+
)
|
673
|
+
],
|
674
|
+
)
|
675
|
+
|
676
|
+
tasks = [
|
677
|
+
hf.Task(s1, inputs={"p0": 123}), # will fail, and not set p2 for next task
|
678
|
+
hf.Task(s2), # will fail when retrieving input p2 within generated script
|
679
|
+
]
|
680
|
+
|
681
|
+
wk = hf.Workflow.from_template_data(
|
682
|
+
template_name="test_unset_parameters_in_py_script",
|
683
|
+
path=tmp_path,
|
684
|
+
tasks=tasks,
|
685
|
+
resources={
|
686
|
+
"any": {
|
687
|
+
"write_app_logs": True,
|
688
|
+
"skip_downstream_on_failure": False,
|
689
|
+
"combine_scripts": combine_scripts,
|
690
|
+
}
|
691
|
+
},
|
692
|
+
)
|
693
|
+
wk.submit(wait=True, add_to_known=False, status=False)
|
694
|
+
runs = wk.get_all_EARs()
|
695
|
+
assert runs[0].status is EARStatus.error
|
696
|
+
assert runs[1].status is EARStatus.error
|