hpcflow-new2 0.2.0a189__py3-none-any.whl → 0.2.0a199__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (176) hide show
  1. hpcflow/__pyinstaller/hook-hpcflow.py +9 -6
  2. hpcflow/_version.py +1 -1
  3. hpcflow/app.py +1 -0
  4. hpcflow/data/scripts/bad_script.py +2 -0
  5. hpcflow/data/scripts/do_nothing.py +2 -0
  6. hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
  7. hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
  8. hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
  9. hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
  10. hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
  11. hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
  12. hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
  13. hpcflow/data/scripts/input_file_generator_basic.py +3 -0
  14. hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
  15. hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
  16. hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
  17. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
  18. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
  19. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
  20. hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
  21. hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
  22. hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
  23. hpcflow/data/scripts/main_script_test_hdf5_in_obj.py +1 -1
  24. hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
  25. hpcflow/data/scripts/main_script_test_hdf5_out_obj.py +1 -1
  26. hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
  27. hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
  28. hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
  29. hpcflow/data/scripts/output_file_parser_basic.py +3 -0
  30. hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
  31. hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
  32. hpcflow/data/scripts/script_exit_test.py +5 -0
  33. hpcflow/data/template_components/environments.yaml +1 -1
  34. hpcflow/sdk/__init__.py +26 -15
  35. hpcflow/sdk/app.py +2192 -768
  36. hpcflow/sdk/cli.py +506 -296
  37. hpcflow/sdk/cli_common.py +105 -7
  38. hpcflow/sdk/config/__init__.py +1 -1
  39. hpcflow/sdk/config/callbacks.py +115 -43
  40. hpcflow/sdk/config/cli.py +126 -103
  41. hpcflow/sdk/config/config.py +674 -318
  42. hpcflow/sdk/config/config_file.py +131 -95
  43. hpcflow/sdk/config/errors.py +125 -84
  44. hpcflow/sdk/config/types.py +148 -0
  45. hpcflow/sdk/core/__init__.py +25 -1
  46. hpcflow/sdk/core/actions.py +1771 -1059
  47. hpcflow/sdk/core/app_aware.py +24 -0
  48. hpcflow/sdk/core/cache.py +139 -79
  49. hpcflow/sdk/core/command_files.py +263 -287
  50. hpcflow/sdk/core/commands.py +145 -112
  51. hpcflow/sdk/core/element.py +828 -535
  52. hpcflow/sdk/core/enums.py +192 -0
  53. hpcflow/sdk/core/environment.py +74 -93
  54. hpcflow/sdk/core/errors.py +455 -52
  55. hpcflow/sdk/core/execute.py +207 -0
  56. hpcflow/sdk/core/json_like.py +540 -272
  57. hpcflow/sdk/core/loop.py +751 -347
  58. hpcflow/sdk/core/loop_cache.py +164 -47
  59. hpcflow/sdk/core/object_list.py +370 -207
  60. hpcflow/sdk/core/parameters.py +1100 -627
  61. hpcflow/sdk/core/rule.py +59 -41
  62. hpcflow/sdk/core/run_dir_files.py +21 -37
  63. hpcflow/sdk/core/skip_reason.py +7 -0
  64. hpcflow/sdk/core/task.py +1649 -1339
  65. hpcflow/sdk/core/task_schema.py +308 -196
  66. hpcflow/sdk/core/test_utils.py +191 -114
  67. hpcflow/sdk/core/types.py +440 -0
  68. hpcflow/sdk/core/utils.py +485 -309
  69. hpcflow/sdk/core/validation.py +82 -9
  70. hpcflow/sdk/core/workflow.py +2544 -1178
  71. hpcflow/sdk/core/zarr_io.py +98 -137
  72. hpcflow/sdk/data/workflow_spec_schema.yaml +2 -0
  73. hpcflow/sdk/demo/cli.py +53 -33
  74. hpcflow/sdk/helper/cli.py +18 -15
  75. hpcflow/sdk/helper/helper.py +75 -63
  76. hpcflow/sdk/helper/watcher.py +61 -28
  77. hpcflow/sdk/log.py +122 -71
  78. hpcflow/sdk/persistence/__init__.py +8 -31
  79. hpcflow/sdk/persistence/base.py +1360 -606
  80. hpcflow/sdk/persistence/defaults.py +6 -0
  81. hpcflow/sdk/persistence/discovery.py +38 -0
  82. hpcflow/sdk/persistence/json.py +568 -188
  83. hpcflow/sdk/persistence/pending.py +382 -179
  84. hpcflow/sdk/persistence/store_resource.py +39 -23
  85. hpcflow/sdk/persistence/types.py +318 -0
  86. hpcflow/sdk/persistence/utils.py +14 -11
  87. hpcflow/sdk/persistence/zarr.py +1337 -433
  88. hpcflow/sdk/runtime.py +44 -41
  89. hpcflow/sdk/submission/{jobscript_info.py → enums.py} +39 -12
  90. hpcflow/sdk/submission/jobscript.py +1651 -692
  91. hpcflow/sdk/submission/schedulers/__init__.py +167 -39
  92. hpcflow/sdk/submission/schedulers/direct.py +121 -81
  93. hpcflow/sdk/submission/schedulers/sge.py +170 -129
  94. hpcflow/sdk/submission/schedulers/slurm.py +291 -268
  95. hpcflow/sdk/submission/schedulers/utils.py +12 -2
  96. hpcflow/sdk/submission/shells/__init__.py +14 -15
  97. hpcflow/sdk/submission/shells/base.py +150 -29
  98. hpcflow/sdk/submission/shells/bash.py +283 -173
  99. hpcflow/sdk/submission/shells/os_version.py +31 -30
  100. hpcflow/sdk/submission/shells/powershell.py +228 -170
  101. hpcflow/sdk/submission/submission.py +1014 -335
  102. hpcflow/sdk/submission/types.py +140 -0
  103. hpcflow/sdk/typing.py +182 -12
  104. hpcflow/sdk/utils/arrays.py +71 -0
  105. hpcflow/sdk/utils/deferred_file.py +55 -0
  106. hpcflow/sdk/utils/hashing.py +16 -0
  107. hpcflow/sdk/utils/patches.py +12 -0
  108. hpcflow/sdk/utils/strings.py +33 -0
  109. hpcflow/tests/api/test_api.py +32 -0
  110. hpcflow/tests/conftest.py +27 -6
  111. hpcflow/tests/data/multi_path_sequences.yaml +29 -0
  112. hpcflow/tests/data/workflow_test_run_abort.yaml +34 -35
  113. hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
  114. hpcflow/tests/schedulers/slurm/test_slurm_submission.py +5 -2
  115. hpcflow/tests/scripts/test_input_file_generators.py +282 -0
  116. hpcflow/tests/scripts/test_main_scripts.py +866 -85
  117. hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
  118. hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
  119. hpcflow/tests/shells/wsl/test_wsl_submission.py +12 -4
  120. hpcflow/tests/unit/test_action.py +262 -75
  121. hpcflow/tests/unit/test_action_rule.py +9 -4
  122. hpcflow/tests/unit/test_app.py +33 -6
  123. hpcflow/tests/unit/test_cache.py +46 -0
  124. hpcflow/tests/unit/test_cli.py +134 -1
  125. hpcflow/tests/unit/test_command.py +71 -54
  126. hpcflow/tests/unit/test_config.py +142 -16
  127. hpcflow/tests/unit/test_config_file.py +21 -18
  128. hpcflow/tests/unit/test_element.py +58 -62
  129. hpcflow/tests/unit/test_element_iteration.py +50 -1
  130. hpcflow/tests/unit/test_element_set.py +29 -19
  131. hpcflow/tests/unit/test_group.py +4 -2
  132. hpcflow/tests/unit/test_input_source.py +116 -93
  133. hpcflow/tests/unit/test_input_value.py +29 -24
  134. hpcflow/tests/unit/test_jobscript_unit.py +757 -0
  135. hpcflow/tests/unit/test_json_like.py +44 -35
  136. hpcflow/tests/unit/test_loop.py +1396 -84
  137. hpcflow/tests/unit/test_meta_task.py +325 -0
  138. hpcflow/tests/unit/test_multi_path_sequences.py +229 -0
  139. hpcflow/tests/unit/test_object_list.py +17 -12
  140. hpcflow/tests/unit/test_parameter.py +29 -7
  141. hpcflow/tests/unit/test_persistence.py +237 -42
  142. hpcflow/tests/unit/test_resources.py +20 -18
  143. hpcflow/tests/unit/test_run.py +117 -6
  144. hpcflow/tests/unit/test_run_directories.py +29 -0
  145. hpcflow/tests/unit/test_runtime.py +2 -1
  146. hpcflow/tests/unit/test_schema_input.py +23 -15
  147. hpcflow/tests/unit/test_shell.py +23 -2
  148. hpcflow/tests/unit/test_slurm.py +8 -7
  149. hpcflow/tests/unit/test_submission.py +38 -89
  150. hpcflow/tests/unit/test_task.py +352 -247
  151. hpcflow/tests/unit/test_task_schema.py +33 -20
  152. hpcflow/tests/unit/test_utils.py +9 -11
  153. hpcflow/tests/unit/test_value_sequence.py +15 -12
  154. hpcflow/tests/unit/test_workflow.py +114 -83
  155. hpcflow/tests/unit/test_workflow_template.py +0 -1
  156. hpcflow/tests/unit/utils/test_arrays.py +40 -0
  157. hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
  158. hpcflow/tests/unit/utils/test_hashing.py +65 -0
  159. hpcflow/tests/unit/utils/test_patches.py +5 -0
  160. hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
  161. hpcflow/tests/workflows/__init__.py +0 -0
  162. hpcflow/tests/workflows/test_directory_structure.py +31 -0
  163. hpcflow/tests/workflows/test_jobscript.py +334 -1
  164. hpcflow/tests/workflows/test_run_status.py +198 -0
  165. hpcflow/tests/workflows/test_skip_downstream.py +696 -0
  166. hpcflow/tests/workflows/test_submission.py +140 -0
  167. hpcflow/tests/workflows/test_workflows.py +160 -15
  168. hpcflow/tests/workflows/test_zip.py +18 -0
  169. hpcflow/viz_demo.ipynb +6587 -3
  170. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/METADATA +8 -4
  171. hpcflow_new2-0.2.0a199.dist-info/RECORD +221 -0
  172. hpcflow/sdk/core/parallel.py +0 -21
  173. hpcflow_new2-0.2.0a189.dist-info/RECORD +0 -158
  174. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/LICENSE +0 -0
  175. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/WHEEL +0 -0
  176. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,696 @@
1
+ import os
2
+ from hpcflow.sdk.core.test_utils import make_schemas
3
+ import pytest
4
+
5
+ from hpcflow.app import app as hf
6
+ from hpcflow.sdk.core.actions import EARStatus
7
+
8
+
9
+ @pytest.mark.integration
10
+ def test_skip_downstream_on_failure_true_combine_scripts(null_config, tmp_path):
11
+ s1 = hf.TaskSchema(
12
+ objective="t1",
13
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
14
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
15
+ actions=[
16
+ hf.Action(
17
+ script="<<script:main_script_test_direct_in_direct_out.py>>",
18
+ script_data_in="direct",
19
+ script_data_out="direct",
20
+ script_exe="python_script",
21
+ environments=[hf.ActionEnvironment(environment="python_env")],
22
+ )
23
+ ],
24
+ )
25
+
26
+ s2 = hf.TaskSchema(
27
+ objective="t2",
28
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
29
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
30
+ actions=[
31
+ hf.Action(
32
+ script="<<script:main_script_test_direct_in_direct_out_2.py>>",
33
+ script_data_in="direct",
34
+ script_data_out="direct",
35
+ script_exe="python_script",
36
+ environments=[hf.ActionEnvironment(environment="python_env")],
37
+ )
38
+ ],
39
+ )
40
+
41
+ s3 = hf.TaskSchema(
42
+ objective="t3",
43
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p3"), group="my_group")],
44
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p4"))],
45
+ actions=[
46
+ hf.Action(
47
+ script="<<script:main_script_test_direct_in_group_direct_out_3.py>>",
48
+ script_data_in="direct",
49
+ script_data_out="direct",
50
+ script_exe="python_script",
51
+ environments=[hf.ActionEnvironment(environment="python_env")],
52
+ )
53
+ ],
54
+ )
55
+
56
+ tasks = [
57
+ hf.Task(
58
+ s1,
59
+ sequences=[
60
+ hf.ValueSequence(path="inputs.p1", values=[101, "NONSENSE VALUE"])
61
+ ],
62
+ ),
63
+ hf.Task(s2, groups=[hf.ElementGroup(name="my_group")]),
64
+ hf.Task(s3),
65
+ ]
66
+
67
+ wk = hf.Workflow.from_template_data(
68
+ template_name="test_skip_downstream_on_failure",
69
+ path=tmp_path,
70
+ tasks=tasks,
71
+ resources={
72
+ "any": {
73
+ "write_app_logs": True,
74
+ "skip_downstream_on_failure": True,
75
+ "combine_scripts": True,
76
+ }
77
+ },
78
+ )
79
+ wk.submit(wait=True, add_to_known=False, status=False)
80
+ runs = wk.get_all_EARs()
81
+
82
+ assert runs[0].status is EARStatus.success
83
+ assert runs[1].status is EARStatus.error # original error
84
+ assert runs[2].status is EARStatus.success
85
+ assert runs[3].status is EARStatus.skipped # skipped due to run 1 error
86
+ assert runs[4].status is EARStatus.skipped # skipped due to run 3 skipped
87
+
88
+
89
+ @pytest.mark.integration
90
+ def test_skip_downstream_on_failure_false_combine_scripts(null_config, tmp_path):
91
+ s1 = hf.TaskSchema(
92
+ objective="t1",
93
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
94
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
95
+ actions=[
96
+ hf.Action(
97
+ script="<<script:main_script_test_direct_in_direct_out.py>>",
98
+ script_data_in="direct",
99
+ script_data_out="direct",
100
+ script_exe="python_script",
101
+ environments=[hf.ActionEnvironment(environment="python_env")],
102
+ )
103
+ ],
104
+ )
105
+
106
+ s2 = hf.TaskSchema(
107
+ objective="t2",
108
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))],
109
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
110
+ actions=[
111
+ hf.Action(
112
+ script="<<script:main_script_test_direct_in_direct_out_2.py>>",
113
+ script_data_in="direct",
114
+ script_data_out="direct",
115
+ script_exe="python_script",
116
+ environments=[hf.ActionEnvironment(environment="python_env")],
117
+ )
118
+ ],
119
+ )
120
+
121
+ s3 = hf.TaskSchema(
122
+ objective="t3",
123
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p3"), group="my_group")],
124
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p4"))],
125
+ actions=[
126
+ hf.Action(
127
+ script="<<script:main_script_test_direct_in_group_direct_out_3.py>>",
128
+ script_data_in="direct",
129
+ script_data_out="direct",
130
+ script_exe="python_script",
131
+ environments=[hf.ActionEnvironment(environment="python_env")],
132
+ )
133
+ ],
134
+ )
135
+
136
+ tasks = [
137
+ hf.Task(
138
+ s1,
139
+ sequences=[
140
+ hf.ValueSequence(path="inputs.p1", values=[101, "NONSENSE VALUE"])
141
+ ],
142
+ ),
143
+ hf.Task(s2, groups=[hf.ElementGroup(name="my_group")]),
144
+ hf.Task(s3),
145
+ ]
146
+
147
+ wk = hf.Workflow.from_template_data(
148
+ template_name="test_skip_downstream_on_failure",
149
+ path=tmp_path,
150
+ tasks=tasks,
151
+ resources={
152
+ "any": {
153
+ "write_app_logs": True,
154
+ "skip_downstream_on_failure": False,
155
+ "combine_scripts": True,
156
+ }
157
+ },
158
+ )
159
+ wk.submit(wait=True, add_to_known=False, status=False)
160
+ runs = wk.get_all_EARs()
161
+
162
+ assert runs[0].status is EARStatus.success
163
+ assert runs[1].status is EARStatus.error # original error
164
+ assert runs[2].status is EARStatus.success
165
+ assert runs[3].status is EARStatus.error # relies on run 1 output so fails
166
+ assert runs[4].status is EARStatus.error # relies on run 3 output so fails
167
+
168
+
169
+ @pytest.mark.integration
170
+ def test_skip_downstream_on_failure_true(null_config, tmp_path):
171
+ s1, s2 = make_schemas(
172
+ ({"p1": None}, ("p2",), "t1"),
173
+ ({"p2": None}, ("p3",), "t2"),
174
+ )
175
+ s3 = hf.TaskSchema(
176
+ "t3",
177
+ inputs=[hf.SchemaInput("p3", group="my_group")],
178
+ outputs=[hf.SchemaOutput("p4")],
179
+ actions=[
180
+ hf.Action(
181
+ commands=[
182
+ hf.Command(
183
+ "echo $(( <<sum(parameter:p3)>> ))",
184
+ stdout="<<int(parameter:p4)>>",
185
+ )
186
+ ]
187
+ )
188
+ ],
189
+ )
190
+
191
+ tasks = [
192
+ hf.Task(
193
+ s1,
194
+ sequences=[
195
+ hf.ValueSequence(path="inputs.p1", values=[101, "NONSENSE VALUE"])
196
+ ],
197
+ ),
198
+ hf.Task(s2, groups=[hf.ElementGroup(name="my_group")]),
199
+ hf.Task(s3),
200
+ ]
201
+
202
+ wk = hf.Workflow.from_template_data(
203
+ template_name="test_skip_downstream_on_failure",
204
+ path=tmp_path,
205
+ tasks=tasks,
206
+ resources={"any": {"write_app_logs": True, "skip_downstream_on_failure": True}},
207
+ )
208
+ wk.submit(wait=True, add_to_known=False, status=False)
209
+ runs = wk.get_all_EARs()
210
+
211
+ assert runs[0].status is EARStatus.success
212
+ assert runs[1].status is EARStatus.error # original error
213
+ assert runs[2].status is EARStatus.success
214
+ assert runs[3].status is EARStatus.skipped # skipped due to run 1 error
215
+ assert runs[4].status is EARStatus.skipped # skipped due to run 3 skipped
216
+
217
+
218
+ @pytest.mark.integration
219
+ def test_skip_downstream_on_failure_false(null_config, tmp_path):
220
+ s1, s2 = make_schemas(
221
+ ({"p1": None}, ("p2",), "t1"),
222
+ ({"p2": None}, ("p3",), "t2"),
223
+ )
224
+ s3 = hf.TaskSchema(
225
+ "t3",
226
+ inputs=[hf.SchemaInput("p3", group="my_group")],
227
+ outputs=[hf.SchemaOutput("p4")],
228
+ actions=[
229
+ hf.Action(
230
+ commands=[
231
+ hf.Command(
232
+ "echo $(( <<sum(parameter:p3)>> ))",
233
+ stdout="<<int(parameter:p4)>>",
234
+ )
235
+ ]
236
+ )
237
+ ],
238
+ )
239
+
240
+ tasks = [
241
+ hf.Task(
242
+ s1,
243
+ sequences=[
244
+ hf.ValueSequence(path="inputs.p1", values=[101, "NONSENSE VALUE"])
245
+ ],
246
+ ),
247
+ hf.Task(s2, groups=[hf.ElementGroup(name="my_group")]),
248
+ hf.Task(s3),
249
+ ]
250
+
251
+ wk = hf.Workflow.from_template_data(
252
+ template_name="test_skip_downstream_on_failure",
253
+ path=tmp_path,
254
+ tasks=tasks,
255
+ resources={"any": {"write_app_logs": True, "skip_downstream_on_failure": False}},
256
+ )
257
+ wk.submit(wait=True, add_to_known=False, status=False)
258
+ runs = wk.get_all_EARs()
259
+
260
+ assert runs[0].status is EARStatus.success
261
+ assert runs[1].status is EARStatus.error # original error
262
+ assert runs[2].status is EARStatus.success
263
+ assert runs[3].status is EARStatus.error # relies on run 1 output so fails
264
+ assert runs[4].status is EARStatus.error # relies on run 3 output so fails
265
+
266
+
267
+ @pytest.mark.integration
268
+ @pytest.mark.parametrize("allow_failed_dependencies", ["UNSET", None, False, 0.0, 0])
269
+ @pytest.mark.parametrize("combine_scripts", [True, False])
270
+ def test_skip_downstream_on_failure_false_expected_failure(
271
+ null_config, tmp_path, allow_failed_dependencies, combine_scripts
272
+ ):
273
+ s1 = hf.TaskSchema(
274
+ objective="t1",
275
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
276
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
277
+ actions=[
278
+ hf.Action(
279
+ script="<<script:main_script_test_direct_in_direct_out.py>>",
280
+ script_data_in="direct",
281
+ script_data_out="direct",
282
+ script_exe="python_script",
283
+ environments=[hf.ActionEnvironment(environment="python_env")],
284
+ )
285
+ ],
286
+ )
287
+
288
+ sch_inp_args = {"parameter": hf.Parameter("p2")}
289
+ if allow_failed_dependencies != "UNSET":
290
+ sch_inp_args["allow_failed_dependencies"] = allow_failed_dependencies
291
+
292
+ # schema with a script that handles missing data (p2):
293
+ s2 = hf.TaskSchema(
294
+ objective="t2",
295
+ inputs=[hf.SchemaInput(**sch_inp_args)],
296
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
297
+ actions=[
298
+ hf.Action(
299
+ script="<<script:main_script_test_direct_in_direct_out_2_fail_allowed.py>>",
300
+ script_data_in="direct",
301
+ script_data_out="direct",
302
+ script_exe="python_script",
303
+ environments=[hf.ActionEnvironment(environment="python_env")],
304
+ )
305
+ ],
306
+ )
307
+ tasks = [
308
+ hf.Task(s1, inputs={"p1": "NONSENSE VALUE"}), # will fail
309
+ hf.Task(s2), # depends on t1, will fail
310
+ ]
311
+
312
+ wk = hf.Workflow.from_template_data(
313
+ template_name="test_allowed_failed_dependencies",
314
+ path=tmp_path,
315
+ tasks=tasks,
316
+ resources={
317
+ "any": {
318
+ "write_app_logs": True,
319
+ "skip_downstream_on_failure": False,
320
+ "combine_scripts": combine_scripts,
321
+ }
322
+ },
323
+ )
324
+ wk.submit(wait=True, add_to_known=False, status=False)
325
+ runs = wk.get_all_EARs()
326
+
327
+ assert runs[0].status is EARStatus.error
328
+ assert runs[1].status is EARStatus.error
329
+
330
+
331
+ @pytest.mark.integration
332
+ @pytest.mark.parametrize("allow_failed_dependencies", [True, 1.0, 1])
333
+ @pytest.mark.parametrize("combine_scripts", [True, False])
334
+ def test_skip_downstream_on_failure_false_handled_failure_allow_failed_dependencies(
335
+ null_config, tmp_path, allow_failed_dependencies, combine_scripts
336
+ ):
337
+ s1 = hf.TaskSchema(
338
+ objective="t1",
339
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
340
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
341
+ actions=[
342
+ hf.Action(
343
+ script="<<script:main_script_test_direct_in_direct_out.py>>",
344
+ script_data_in="direct",
345
+ script_data_out="direct",
346
+ script_exe="python_script",
347
+ environments=[hf.ActionEnvironment(environment="python_env")],
348
+ )
349
+ ],
350
+ )
351
+
352
+ # schema with a script that handles missing data (p2):
353
+ s2 = hf.TaskSchema(
354
+ objective="t2",
355
+ inputs=[
356
+ hf.SchemaInput(
357
+ parameter=hf.Parameter("p2"),
358
+ allow_failed_dependencies=allow_failed_dependencies,
359
+ )
360
+ ],
361
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
362
+ actions=[
363
+ hf.Action(
364
+ script="<<script:main_script_test_direct_in_direct_out_2_fail_allowed.py>>",
365
+ script_data_in="direct",
366
+ script_data_out="direct",
367
+ script_exe="python_script",
368
+ environments=[hf.ActionEnvironment(environment="python_env")],
369
+ )
370
+ ],
371
+ )
372
+ tasks = [
373
+ hf.Task(s1, inputs={"p1": "NONSENSE VALUE"}), # will fail
374
+ hf.Task(s2), # should succeed
375
+ ]
376
+
377
+ wk = hf.Workflow.from_template_data(
378
+ template_name="test_allowed_failed_dependencies",
379
+ path=tmp_path,
380
+ tasks=tasks,
381
+ resources={
382
+ "any": {
383
+ "write_app_logs": True,
384
+ "skip_downstream_on_failure": False,
385
+ "combine_scripts": combine_scripts,
386
+ }
387
+ },
388
+ )
389
+ wk.submit(wait=True, add_to_known=False, status=False)
390
+ runs = wk.get_all_EARs()
391
+
392
+ assert runs[0].status is EARStatus.error
393
+ assert runs[1].status is EARStatus.success
394
+
395
+
396
+ @pytest.mark.integration
397
+ @pytest.mark.parametrize(
398
+ "allow_failed_dependencies",
399
+ [
400
+ "UNSET",
401
+ None,
402
+ False,
403
+ 0.4,
404
+ 1,
405
+ ],
406
+ )
407
+ @pytest.mark.parametrize("combine_scripts", [True, False])
408
+ def test_skip_downstream_on_failure_false_expected_failure_group(
409
+ null_config, tmp_path, allow_failed_dependencies, combine_scripts
410
+ ):
411
+ s1 = hf.TaskSchema(
412
+ objective="t1",
413
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
414
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
415
+ actions=[
416
+ hf.Action(
417
+ script="<<script:main_script_test_direct_in_direct_out.py>>",
418
+ script_data_in="direct",
419
+ script_data_out="direct",
420
+ script_exe="python_script",
421
+ environments=[hf.ActionEnvironment(environment="python_env")],
422
+ )
423
+ ],
424
+ )
425
+
426
+ sch_inp_args = {"parameter": hf.Parameter("p2"), "group": "my_group"}
427
+ if allow_failed_dependencies != "UNSET":
428
+ sch_inp_args["allow_failed_dependencies"] = allow_failed_dependencies
429
+
430
+ # schema with a script that handles missing data (p2):
431
+ s2 = hf.TaskSchema(
432
+ objective="t2",
433
+ inputs=[hf.SchemaInput(**sch_inp_args)],
434
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
435
+ actions=[
436
+ hf.Action(
437
+ script="<<script:main_script_test_direct_in_direct_out_2_fail_allowed_group.py>>",
438
+ script_data_in="direct",
439
+ script_data_out="direct",
440
+ script_exe="python_script",
441
+ environments=[hf.ActionEnvironment(environment="python_env")],
442
+ )
443
+ ],
444
+ )
445
+ tasks = [
446
+ hf.Task(
447
+ s1,
448
+ sequences=[
449
+ hf.ValueSequence(
450
+ path="inputs.p1", values=[100, "NONSENSE VALUE", "NONSENSE VALUE"]
451
+ )
452
+ ],
453
+ groups=[hf.ElementGroup("my_group")],
454
+ ), # two thirds will fail
455
+ hf.Task(s2), # should succeed
456
+ ]
457
+
458
+ wk = hf.Workflow.from_template_data(
459
+ template_name="test_allowed_failed_dependencies",
460
+ path=tmp_path,
461
+ tasks=tasks,
462
+ resources={
463
+ "any": {
464
+ "write_app_logs": True,
465
+ "skip_downstream_on_failure": False,
466
+ "combine_scripts": combine_scripts,
467
+ }
468
+ },
469
+ )
470
+ wk.submit(wait=True, add_to_known=False, status=False)
471
+ runs = wk.get_all_EARs()
472
+
473
+ assert runs[0].status is EARStatus.success
474
+ assert runs[1].status is EARStatus.error
475
+ assert runs[2].status is EARStatus.error
476
+ assert runs[3].status is EARStatus.error
477
+
478
+
479
+ @pytest.mark.integration
480
+ @pytest.mark.parametrize("allow_failed_dependencies", [True, 0.4, 1])
481
+ @pytest.mark.parametrize("combine_scripts", [True, False])
482
+ def test_skip_downstream_on_failure_false_handled_failure_allow_failed_dependencies_group(
483
+ null_config, tmp_path, allow_failed_dependencies, combine_scripts
484
+ ):
485
+ s1 = hf.TaskSchema(
486
+ objective="t1",
487
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
488
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
489
+ actions=[
490
+ hf.Action(
491
+ script="<<script:main_script_test_direct_in_direct_out.py>>",
492
+ script_data_in="direct",
493
+ script_data_out="direct",
494
+ script_exe="python_script",
495
+ environments=[hf.ActionEnvironment(environment="python_env")],
496
+ )
497
+ ],
498
+ )
499
+
500
+ # schema with a script that handles missing data (p2):
501
+ s2 = hf.TaskSchema(
502
+ objective="t2",
503
+ inputs=[
504
+ hf.SchemaInput(
505
+ parameter=hf.Parameter("p2"),
506
+ allow_failed_dependencies=allow_failed_dependencies,
507
+ group="my_group",
508
+ )
509
+ ],
510
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p3"))],
511
+ actions=[
512
+ hf.Action(
513
+ script="<<script:main_script_test_direct_in_direct_out_2_fail_allowed_group.py>>",
514
+ script_data_in="direct",
515
+ script_data_out="direct",
516
+ script_exe="python_script",
517
+ environments=[hf.ActionEnvironment(environment="python_env")],
518
+ )
519
+ ],
520
+ )
521
+ tasks = [
522
+ hf.Task(
523
+ s1,
524
+ sequences=[
525
+ hf.ValueSequence(path="inputs.p1", values=[100, 200, "NONSENSE VALUE"])
526
+ ],
527
+ groups=[hf.ElementGroup("my_group")],
528
+ ), # one third will fail
529
+ hf.Task(s2), # should succeed
530
+ ]
531
+
532
+ wk = hf.Workflow.from_template_data(
533
+ template_name="test_allowed_failed_dependencies",
534
+ path=tmp_path,
535
+ tasks=tasks,
536
+ resources={
537
+ "any": {
538
+ "write_app_logs": True,
539
+ "skip_downstream_on_failure": False,
540
+ "combine_scripts": combine_scripts,
541
+ }
542
+ },
543
+ )
544
+ wk.submit(wait=True, add_to_known=False, status=False)
545
+ runs = wk.get_all_EARs()
546
+
547
+ assert runs[0].status is EARStatus.success
548
+ assert runs[1].status is EARStatus.success
549
+ assert runs[2].status is EARStatus.error
550
+ assert runs[3].status is EARStatus.success
551
+
552
+
553
+ @pytest.mark.integration
554
+ def test_unset_parameters_found_when_writing_commands(null_config, tmp_path):
555
+ cmd_ps = "echo <<parameter:p1>>; exit 1"
556
+ cmd_bash = "exit; echo <<parameter:p1>>"
557
+ cmd = cmd_ps if os.name == "nt" else cmd_bash
558
+ s1 = hf.TaskSchema(
559
+ objective="t1",
560
+ inputs=[hf.SchemaInput("p1")],
561
+ outputs=[hf.SchemaInput("p2")],
562
+ actions=[
563
+ hf.Action(commands=[hf.Command(command=cmd, stdout="<<parameter:p2>>")])
564
+ ], # will fail
565
+ )
566
+ s2 = make_schemas(
567
+ ({"p2": None}, ("p3",), "t2"), # command-line based action
568
+ )
569
+ tasks = [
570
+ hf.Task(s1, inputs={"p1": 123}), # will fail, and not set p2 for next task
571
+ hf.Task(s2), # will fail when writing commands
572
+ ]
573
+
574
+ wk = hf.Workflow.from_template_data(
575
+ template_name="test_unset_parameters_in_cmdline",
576
+ path=tmp_path,
577
+ tasks=tasks,
578
+ resources={
579
+ "any": {
580
+ "write_app_logs": True,
581
+ "skip_downstream_on_failure": False,
582
+ }
583
+ },
584
+ )
585
+ wk.submit(wait=True, add_to_known=False, status=False)
586
+ runs = wk.get_all_EARs()
587
+ assert runs[0].status is EARStatus.error
588
+ assert runs[1].status is EARStatus.error
589
+
590
+
591
+ @pytest.mark.integration
592
+ def test_unset_parameters_found_when_writing_script_input_file(null_config, tmp_path):
593
+ cmd_ps = "echo <<parameter:p0>>; exit 1"
594
+ cmd_bash = "exit; echo <<parameter:p0>>"
595
+ cmd = cmd_ps if os.name == "nt" else cmd_bash
596
+ s1 = hf.TaskSchema(
597
+ objective="t1",
598
+ inputs=[hf.SchemaInput("p0")],
599
+ outputs=[hf.SchemaInput("p1")],
600
+ actions=[
601
+ hf.Action(commands=[hf.Command(command=cmd, stdout="<<parameter:p1>>")])
602
+ ], # will fail
603
+ )
604
+
605
+ s2 = hf.TaskSchema(
606
+ objective="t2",
607
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
608
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
609
+ actions=[
610
+ hf.Action(
611
+ script="<<script:main_script_test_json_in_json_out.py>>",
612
+ script_data_in="json",
613
+ script_data_out="json",
614
+ script_exe="python_script",
615
+ environments=[hf.ActionEnvironment(environment="python_env")],
616
+ requires_dir=True,
617
+ )
618
+ ],
619
+ )
620
+
621
+ tasks = [
622
+ hf.Task(s1, inputs={"p0": 123}), # will fail, and not set p2 for next task
623
+ hf.Task(s2), # will fail when writing input JSON file
624
+ ]
625
+
626
+ wk = hf.Workflow.from_template_data(
627
+ template_name="test_unset_parameters_in_script_input_file",
628
+ path=tmp_path,
629
+ tasks=tasks,
630
+ resources={
631
+ "any": {
632
+ "write_app_logs": True,
633
+ "skip_downstream_on_failure": False,
634
+ }
635
+ },
636
+ )
637
+ wk.submit(wait=True, add_to_known=False, status=False)
638
+ runs = wk.get_all_EARs()
639
+ assert runs[0].status is EARStatus.error
640
+ assert runs[1].status is EARStatus.error
641
+
642
+
643
+ @pytest.mark.integration
644
+ @pytest.mark.parametrize("combine_scripts", [True, False])
645
+ def test_unset_parameters_found_when_py_script_gets_direct_inputs(
646
+ null_config, tmp_path, combine_scripts
647
+ ):
648
+ cmd_ps = "echo <<parameter:p0>>; exit 1"
649
+ cmd_bash = "exit; echo <<parameter:p0>>"
650
+ cmd = cmd_ps if os.name == "nt" else cmd_bash
651
+ s1 = hf.TaskSchema(
652
+ objective="t1",
653
+ inputs=[hf.SchemaInput("p0")],
654
+ outputs=[hf.SchemaInput("p1")],
655
+ actions=[
656
+ hf.Action(commands=[hf.Command(command=cmd, stdout="<<parameter:p1>>")])
657
+ ], # will fail
658
+ )
659
+
660
+ s2 = hf.TaskSchema(
661
+ objective="t2",
662
+ inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
663
+ outputs=[hf.SchemaOutput(parameter=hf.Parameter("p2"))],
664
+ actions=[
665
+ hf.Action(
666
+ script="<<script:main_script_test_direct_in_direct_out.py>>",
667
+ script_data_in="direct",
668
+ script_data_out="direct",
669
+ script_exe="python_script",
670
+ environments=[hf.ActionEnvironment(environment="python_env")],
671
+ requires_dir=True,
672
+ )
673
+ ],
674
+ )
675
+
676
+ tasks = [
677
+ hf.Task(s1, inputs={"p0": 123}), # will fail, and not set p2 for next task
678
+ hf.Task(s2), # will fail when retrieving input p2 within generated script
679
+ ]
680
+
681
+ wk = hf.Workflow.from_template_data(
682
+ template_name="test_unset_parameters_in_py_script",
683
+ path=tmp_path,
684
+ tasks=tasks,
685
+ resources={
686
+ "any": {
687
+ "write_app_logs": True,
688
+ "skip_downstream_on_failure": False,
689
+ "combine_scripts": combine_scripts,
690
+ }
691
+ },
692
+ )
693
+ wk.submit(wait=True, add_to_known=False, status=False)
694
+ runs = wk.get_all_EARs()
695
+ assert runs[0].status is EARStatus.error
696
+ assert runs[1].status is EARStatus.error