hpcflow-new2 0.2.0a189__py3-none-any.whl → 0.2.0a199__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (176) hide show
  1. hpcflow/__pyinstaller/hook-hpcflow.py +9 -6
  2. hpcflow/_version.py +1 -1
  3. hpcflow/app.py +1 -0
  4. hpcflow/data/scripts/bad_script.py +2 -0
  5. hpcflow/data/scripts/do_nothing.py +2 -0
  6. hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
  7. hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
  8. hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
  9. hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
  10. hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
  11. hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
  12. hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
  13. hpcflow/data/scripts/input_file_generator_basic.py +3 -0
  14. hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
  15. hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
  16. hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
  17. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
  18. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
  19. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
  20. hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
  21. hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
  22. hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
  23. hpcflow/data/scripts/main_script_test_hdf5_in_obj.py +1 -1
  24. hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
  25. hpcflow/data/scripts/main_script_test_hdf5_out_obj.py +1 -1
  26. hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
  27. hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
  28. hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
  29. hpcflow/data/scripts/output_file_parser_basic.py +3 -0
  30. hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
  31. hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
  32. hpcflow/data/scripts/script_exit_test.py +5 -0
  33. hpcflow/data/template_components/environments.yaml +1 -1
  34. hpcflow/sdk/__init__.py +26 -15
  35. hpcflow/sdk/app.py +2192 -768
  36. hpcflow/sdk/cli.py +506 -296
  37. hpcflow/sdk/cli_common.py +105 -7
  38. hpcflow/sdk/config/__init__.py +1 -1
  39. hpcflow/sdk/config/callbacks.py +115 -43
  40. hpcflow/sdk/config/cli.py +126 -103
  41. hpcflow/sdk/config/config.py +674 -318
  42. hpcflow/sdk/config/config_file.py +131 -95
  43. hpcflow/sdk/config/errors.py +125 -84
  44. hpcflow/sdk/config/types.py +148 -0
  45. hpcflow/sdk/core/__init__.py +25 -1
  46. hpcflow/sdk/core/actions.py +1771 -1059
  47. hpcflow/sdk/core/app_aware.py +24 -0
  48. hpcflow/sdk/core/cache.py +139 -79
  49. hpcflow/sdk/core/command_files.py +263 -287
  50. hpcflow/sdk/core/commands.py +145 -112
  51. hpcflow/sdk/core/element.py +828 -535
  52. hpcflow/sdk/core/enums.py +192 -0
  53. hpcflow/sdk/core/environment.py +74 -93
  54. hpcflow/sdk/core/errors.py +455 -52
  55. hpcflow/sdk/core/execute.py +207 -0
  56. hpcflow/sdk/core/json_like.py +540 -272
  57. hpcflow/sdk/core/loop.py +751 -347
  58. hpcflow/sdk/core/loop_cache.py +164 -47
  59. hpcflow/sdk/core/object_list.py +370 -207
  60. hpcflow/sdk/core/parameters.py +1100 -627
  61. hpcflow/sdk/core/rule.py +59 -41
  62. hpcflow/sdk/core/run_dir_files.py +21 -37
  63. hpcflow/sdk/core/skip_reason.py +7 -0
  64. hpcflow/sdk/core/task.py +1649 -1339
  65. hpcflow/sdk/core/task_schema.py +308 -196
  66. hpcflow/sdk/core/test_utils.py +191 -114
  67. hpcflow/sdk/core/types.py +440 -0
  68. hpcflow/sdk/core/utils.py +485 -309
  69. hpcflow/sdk/core/validation.py +82 -9
  70. hpcflow/sdk/core/workflow.py +2544 -1178
  71. hpcflow/sdk/core/zarr_io.py +98 -137
  72. hpcflow/sdk/data/workflow_spec_schema.yaml +2 -0
  73. hpcflow/sdk/demo/cli.py +53 -33
  74. hpcflow/sdk/helper/cli.py +18 -15
  75. hpcflow/sdk/helper/helper.py +75 -63
  76. hpcflow/sdk/helper/watcher.py +61 -28
  77. hpcflow/sdk/log.py +122 -71
  78. hpcflow/sdk/persistence/__init__.py +8 -31
  79. hpcflow/sdk/persistence/base.py +1360 -606
  80. hpcflow/sdk/persistence/defaults.py +6 -0
  81. hpcflow/sdk/persistence/discovery.py +38 -0
  82. hpcflow/sdk/persistence/json.py +568 -188
  83. hpcflow/sdk/persistence/pending.py +382 -179
  84. hpcflow/sdk/persistence/store_resource.py +39 -23
  85. hpcflow/sdk/persistence/types.py +318 -0
  86. hpcflow/sdk/persistence/utils.py +14 -11
  87. hpcflow/sdk/persistence/zarr.py +1337 -433
  88. hpcflow/sdk/runtime.py +44 -41
  89. hpcflow/sdk/submission/{jobscript_info.py → enums.py} +39 -12
  90. hpcflow/sdk/submission/jobscript.py +1651 -692
  91. hpcflow/sdk/submission/schedulers/__init__.py +167 -39
  92. hpcflow/sdk/submission/schedulers/direct.py +121 -81
  93. hpcflow/sdk/submission/schedulers/sge.py +170 -129
  94. hpcflow/sdk/submission/schedulers/slurm.py +291 -268
  95. hpcflow/sdk/submission/schedulers/utils.py +12 -2
  96. hpcflow/sdk/submission/shells/__init__.py +14 -15
  97. hpcflow/sdk/submission/shells/base.py +150 -29
  98. hpcflow/sdk/submission/shells/bash.py +283 -173
  99. hpcflow/sdk/submission/shells/os_version.py +31 -30
  100. hpcflow/sdk/submission/shells/powershell.py +228 -170
  101. hpcflow/sdk/submission/submission.py +1014 -335
  102. hpcflow/sdk/submission/types.py +140 -0
  103. hpcflow/sdk/typing.py +182 -12
  104. hpcflow/sdk/utils/arrays.py +71 -0
  105. hpcflow/sdk/utils/deferred_file.py +55 -0
  106. hpcflow/sdk/utils/hashing.py +16 -0
  107. hpcflow/sdk/utils/patches.py +12 -0
  108. hpcflow/sdk/utils/strings.py +33 -0
  109. hpcflow/tests/api/test_api.py +32 -0
  110. hpcflow/tests/conftest.py +27 -6
  111. hpcflow/tests/data/multi_path_sequences.yaml +29 -0
  112. hpcflow/tests/data/workflow_test_run_abort.yaml +34 -35
  113. hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
  114. hpcflow/tests/schedulers/slurm/test_slurm_submission.py +5 -2
  115. hpcflow/tests/scripts/test_input_file_generators.py +282 -0
  116. hpcflow/tests/scripts/test_main_scripts.py +866 -85
  117. hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
  118. hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
  119. hpcflow/tests/shells/wsl/test_wsl_submission.py +12 -4
  120. hpcflow/tests/unit/test_action.py +262 -75
  121. hpcflow/tests/unit/test_action_rule.py +9 -4
  122. hpcflow/tests/unit/test_app.py +33 -6
  123. hpcflow/tests/unit/test_cache.py +46 -0
  124. hpcflow/tests/unit/test_cli.py +134 -1
  125. hpcflow/tests/unit/test_command.py +71 -54
  126. hpcflow/tests/unit/test_config.py +142 -16
  127. hpcflow/tests/unit/test_config_file.py +21 -18
  128. hpcflow/tests/unit/test_element.py +58 -62
  129. hpcflow/tests/unit/test_element_iteration.py +50 -1
  130. hpcflow/tests/unit/test_element_set.py +29 -19
  131. hpcflow/tests/unit/test_group.py +4 -2
  132. hpcflow/tests/unit/test_input_source.py +116 -93
  133. hpcflow/tests/unit/test_input_value.py +29 -24
  134. hpcflow/tests/unit/test_jobscript_unit.py +757 -0
  135. hpcflow/tests/unit/test_json_like.py +44 -35
  136. hpcflow/tests/unit/test_loop.py +1396 -84
  137. hpcflow/tests/unit/test_meta_task.py +325 -0
  138. hpcflow/tests/unit/test_multi_path_sequences.py +229 -0
  139. hpcflow/tests/unit/test_object_list.py +17 -12
  140. hpcflow/tests/unit/test_parameter.py +29 -7
  141. hpcflow/tests/unit/test_persistence.py +237 -42
  142. hpcflow/tests/unit/test_resources.py +20 -18
  143. hpcflow/tests/unit/test_run.py +117 -6
  144. hpcflow/tests/unit/test_run_directories.py +29 -0
  145. hpcflow/tests/unit/test_runtime.py +2 -1
  146. hpcflow/tests/unit/test_schema_input.py +23 -15
  147. hpcflow/tests/unit/test_shell.py +23 -2
  148. hpcflow/tests/unit/test_slurm.py +8 -7
  149. hpcflow/tests/unit/test_submission.py +38 -89
  150. hpcflow/tests/unit/test_task.py +352 -247
  151. hpcflow/tests/unit/test_task_schema.py +33 -20
  152. hpcflow/tests/unit/test_utils.py +9 -11
  153. hpcflow/tests/unit/test_value_sequence.py +15 -12
  154. hpcflow/tests/unit/test_workflow.py +114 -83
  155. hpcflow/tests/unit/test_workflow_template.py +0 -1
  156. hpcflow/tests/unit/utils/test_arrays.py +40 -0
  157. hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
  158. hpcflow/tests/unit/utils/test_hashing.py +65 -0
  159. hpcflow/tests/unit/utils/test_patches.py +5 -0
  160. hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
  161. hpcflow/tests/workflows/__init__.py +0 -0
  162. hpcflow/tests/workflows/test_directory_structure.py +31 -0
  163. hpcflow/tests/workflows/test_jobscript.py +334 -1
  164. hpcflow/tests/workflows/test_run_status.py +198 -0
  165. hpcflow/tests/workflows/test_skip_downstream.py +696 -0
  166. hpcflow/tests/workflows/test_submission.py +140 -0
  167. hpcflow/tests/workflows/test_workflows.py +160 -15
  168. hpcflow/tests/workflows/test_zip.py +18 -0
  169. hpcflow/viz_demo.ipynb +6587 -3
  170. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/METADATA +8 -4
  171. hpcflow_new2-0.2.0a199.dist-info/RECORD +221 -0
  172. hpcflow/sdk/core/parallel.py +0 -21
  173. hpcflow_new2-0.2.0a189.dist-info/RECORD +0 -158
  174. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/LICENSE +0 -0
  175. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/WHEEL +0 -0
  176. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/entry_points.txt +0 -0
@@ -1,17 +1,19 @@
1
+ from __future__ import annotations
2
+ from pathlib import Path
1
3
  import pytest
2
4
 
3
- from valida.conditions import Value
4
-
5
+ from valida.conditions import Value # type: ignore
5
6
 
6
7
  from hpcflow.app import app as hf
7
8
  from hpcflow.sdk.core.errors import LoopAlreadyExistsError, LoopTaskSubsetError
8
- from hpcflow.sdk.core.test_utils import P1_parameter_cls, make_workflow
9
+ from hpcflow.sdk.core.skip_reason import SkipReason
10
+ from hpcflow.sdk.core.test_utils import P1_parameter_cls, make_schemas, make_workflow
9
11
 
10
12
 
11
13
  @pytest.mark.parametrize("store", ["json", "zarr"])
12
- def test_loop_tasks_obj_insert_ID_equivalence(tmp_path, store):
14
+ def test_loop_tasks_obj_insert_ID_equivalence(tmp_path: Path, store: str):
13
15
  wk_1 = make_workflow(
14
- schemas_spec=[[{"p1": None}, ("p1",), "t1"]],
16
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
15
17
  local_inputs={0: ("p1",)},
16
18
  path=tmp_path,
17
19
  store=store,
@@ -21,9 +23,9 @@ def test_loop_tasks_obj_insert_ID_equivalence(tmp_path, store):
21
23
  assert lp_0.task_insert_IDs == lp_1.task_insert_IDs
22
24
 
23
25
 
24
- def test_raise_on_add_loop_same_name(tmp_path):
26
+ def test_raise_on_add_loop_same_name(tmp_path: Path):
25
27
  wk = make_workflow(
26
- schemas_spec=[[{"p1": None}, ("p1",), "t1"], [{"p2": None}, ("p2",), "t2"]],
28
+ schemas_spec=[({"p1": None}, ("p1",), "t1"), ({"p2": None}, ("p2",), "t2")],
27
29
  local_inputs={0: ("p1",), 1: ("p2",)},
28
30
  path=tmp_path,
29
31
  store="json",
@@ -38,10 +40,10 @@ def test_raise_on_add_loop_same_name(tmp_path):
38
40
 
39
41
  @pytest.mark.parametrize("store", ["json", "zarr"])
40
42
  def test_wk_loop_data_idx_single_task_single_element_single_parameter_three_iters(
41
- tmp_path, store
43
+ tmp_path: Path, store: str
42
44
  ):
43
45
  wk = make_workflow(
44
- schemas_spec=[[{"p1": None}, ("p1",), "t1"]],
46
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
45
47
  local_inputs={0: ("p1",)},
46
48
  path=tmp_path,
47
49
  store=store,
@@ -59,10 +61,10 @@ def test_wk_loop_data_idx_single_task_single_element_single_parameter_three_iter
59
61
 
60
62
  @pytest.mark.parametrize("store", ["json", "zarr"])
61
63
  def test_wk_loop_EARs_initialised_single_task_single_element_single_parameter_three_iters(
62
- tmp_path, store
64
+ tmp_path: Path, store: str
63
65
  ):
64
66
  wk = make_workflow(
65
- schemas_spec=[[{"p1": None}, ("p1",), "t1"]],
67
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
66
68
  local_inputs={0: ("p1",)},
67
69
  path=tmp_path,
68
70
  store=store,
@@ -74,10 +76,10 @@ def test_wk_loop_EARs_initialised_single_task_single_element_single_parameter_th
74
76
 
75
77
  @pytest.mark.parametrize("store", ["json", "zarr"])
76
78
  def test_wk_loop_data_idx_single_task_multi_element_single_parameter_three_iters(
77
- tmp_path, store
79
+ tmp_path: Path, store: str
78
80
  ):
79
81
  wk = make_workflow(
80
- schemas_spec=[[{"p1": None}, ("p1",), "t1"]],
82
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
81
83
  local_sequences={0: [("inputs.p1", 2, 0)]},
82
84
  path=tmp_path,
83
85
  store=store,
@@ -106,13 +108,13 @@ def test_wk_loop_data_idx_single_task_multi_element_single_parameter_three_iters
106
108
 
107
109
  @pytest.mark.parametrize("store", ["json", "zarr"])
108
110
  def test_wk_loop_data_idx_multi_task_single_element_single_parameter_two_iters(
109
- tmp_path, store
111
+ tmp_path: Path, store: str
110
112
  ):
111
113
  wk = make_workflow(
112
114
  schemas_spec=[
113
- [{"p1": None}, ("p1",), "t1"],
114
- [{"p1": None}, ("p1",), "t2"],
115
- [{"p1": None}, ("p1",), "t3"],
115
+ ({"p1": None}, ("p1",), "t1"),
116
+ ({"p1": None}, ("p1",), "t2"),
117
+ ({"p1": None}, ("p1",), "t3"),
116
118
  ],
117
119
  local_inputs={0: ("p1",)},
118
120
  path=tmp_path,
@@ -149,10 +151,10 @@ def test_wk_loop_data_idx_multi_task_single_element_single_parameter_two_iters(
149
151
 
150
152
  @pytest.mark.parametrize("store", ["json", "zarr"])
151
153
  def test_wk_loop_data_idx_single_task_single_element_single_parameter_three_iters_non_iterable_param(
152
- tmp_path, store
154
+ tmp_path: Path, store: str
153
155
  ):
154
156
  wk = make_workflow(
155
- schemas_spec=[[{"p1": None}, ("p1",), "t1"]],
157
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
156
158
  local_inputs={0: ("p1",)},
157
159
  path=tmp_path,
158
160
  store=store,
@@ -171,12 +173,12 @@ def test_wk_loop_data_idx_single_task_single_element_single_parameter_three_iter
171
173
 
172
174
 
173
175
  @pytest.mark.parametrize("store", ["json", "zarr"])
174
- def test_wk_loop_iterable_parameters(tmp_path, store):
176
+ def test_wk_loop_iterable_parameters(tmp_path: Path, store: str):
175
177
  wk = make_workflow(
176
178
  schemas_spec=[
177
- [{"p1": None, "p2": None}, ("p1", "p2"), "t1"],
178
- [{"p1": None}, ("p1",), "t2"],
179
- [{"p1": None, "p2": None}, ("p1", "p2"), "t3"],
179
+ ({"p1": None, "p2": None}, ("p1", "p2"), "t1"),
180
+ ({"p1": None}, ("p1",), "t2"),
181
+ ({"p1": None, "p2": None}, ("p1", "p2"), "t3"),
180
182
  ],
181
183
  local_inputs={0: ("p1", "p2"), 1: ("p1",)},
182
184
  path=tmp_path,
@@ -190,12 +192,14 @@ def test_wk_loop_iterable_parameters(tmp_path, store):
190
192
 
191
193
 
192
194
  @pytest.mark.parametrize("store", ["json", "zarr"])
193
- def test_wk_loop_input_sources_including_local_single_element_two_iters(tmp_path, store):
195
+ def test_wk_loop_input_sources_including_local_single_element_two_iters(
196
+ tmp_path: Path, store: str
197
+ ):
194
198
  wk = make_workflow(
195
199
  schemas_spec=[
196
- [{"p1": None, "p2": None}, ("p1", "p2"), "t1"],
197
- [{"p1": None}, ("p1",), "t2"],
198
- [{"p1": None, "p2": None}, ("p1", "p2"), "t3"],
200
+ ({"p1": None, "p2": None}, ("p1", "p2"), "t1"),
201
+ ({"p1": None}, ("p1",), "t2"),
202
+ ({"p1": None, "p2": None}, ("p1", "p2"), "t3"),
199
203
  ],
200
204
  local_inputs={0: ("p1", "p2"), 1: ("p1",)},
201
205
  path=tmp_path,
@@ -228,10 +232,10 @@ def test_wk_loop_input_sources_including_local_single_element_two_iters(tmp_path
228
232
 
229
233
  @pytest.mark.parametrize("store", ["json", "zarr"])
230
234
  def test_get_iteration_task_pathway_single_task_single_element_three_iters(
231
- tmp_path, store
235
+ tmp_path: Path, store: str
232
236
  ):
233
237
  wk = make_workflow(
234
- schemas_spec=[[{"p1": None}, ("p1",), "t1"]],
238
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
235
239
  local_inputs={0: ("p1",)},
236
240
  path=tmp_path,
237
241
  store=store,
@@ -245,7 +249,7 @@ def test_get_iteration_task_pathway_single_task_single_element_three_iters(
245
249
  ]
246
250
 
247
251
 
248
- def test_get_iteration_task_pathway_nested_loops_multi_iter(null_config, tmp_path):
252
+ def test_get_iteration_task_pathway_nested_loops_multi_iter(null_config, tmp_path: Path):
249
253
  ts1 = hf.TaskSchema(
250
254
  objective="t1",
251
255
  inputs=[hf.SchemaInput("p1")],
@@ -288,7 +292,9 @@ def test_get_iteration_task_pathway_nested_loops_multi_iter(null_config, tmp_pat
288
292
  @pytest.mark.skip(
289
293
  reason="second set of asserts fail; need to re-source inputs on adding iterations."
290
294
  )
291
- def test_get_iteration_task_pathway_nested_loops_multi_iter_jagged(null_config, tmp_path):
295
+ def test_get_iteration_task_pathway_nested_loops_multi_iter_jagged(
296
+ null_config, tmp_path: Path
297
+ ):
292
298
  ts1 = hf.TaskSchema(
293
299
  objective="t1",
294
300
  inputs=[hf.SchemaInput("p1")],
@@ -347,7 +353,7 @@ def test_get_iteration_task_pathway_nested_loops_multi_iter_jagged(null_config,
347
353
 
348
354
 
349
355
  def test_get_iteration_task_pathway_nested_loops_multi_iter_add_outer_iter(
350
- null_config, tmp_path
356
+ null_config, tmp_path: Path
351
357
  ):
352
358
  ts1 = hf.TaskSchema(
353
359
  objective="t1",
@@ -392,10 +398,7 @@ def test_get_iteration_task_pathway_nested_loops_multi_iter_add_outer_iter(
392
398
  ]
393
399
 
394
400
 
395
- @pytest.mark.skip(
396
- reason="second set of asserts fail; need to re-source inputs on adding iterations."
397
- )
398
- def test_get_iteration_task_pathway_unconnected_loops(null_config, tmp_path):
401
+ def test_get_iteration_task_pathway_unconnected_loops(null_config, tmp_path: Path):
399
402
  ts1 = hf.TaskSchema(
400
403
  objective="t1",
401
404
  inputs=[hf.SchemaInput("p1")],
@@ -443,12 +446,12 @@ def test_get_iteration_task_pathway_unconnected_loops(null_config, tmp_path):
443
446
  assert pathway[5][2][0]["inputs.p1"] == pathway[4][2][0]["outputs.p1"]
444
447
  assert pathway[6][2][0]["inputs.p1"] == pathway[5][2][0]["outputs.p1"]
445
448
  assert pathway[7][2][0]["inputs.p1"] == pathway[6][2][0]["outputs.p1"]
446
-
447
- # FAILS currently:
448
449
  assert pathway[4][2][0]["inputs.p1"] == pathway[3][2][0]["outputs.p1"]
449
450
 
450
451
 
451
- def test_wk_loop_input_sources_including_non_iteration_task_source(null_config, tmp_path):
452
+ def test_wk_loop_input_sources_including_non_iteration_task_source(
453
+ null_config, tmp_path: Path
454
+ ):
452
455
  act_env = hf.ActionEnvironment("null_env")
453
456
  ts1 = hf.TaskSchema(
454
457
  objective="t1",
@@ -522,7 +525,7 @@ def test_wk_loop_input_sources_including_non_iteration_task_source(null_config,
522
525
  assert t2_iter_1["inputs.p3"] == t3_iter_0["outputs.p3"]
523
526
 
524
527
 
525
- def test_wk_loop_input_sources_default(null_config, tmp_path):
528
+ def test_wk_loop_input_sources_default(null_config, tmp_path: Path):
526
529
  act_env = hf.ActionEnvironment("null_env")
527
530
  ts1 = hf.TaskSchema(
528
531
  objective="t1",
@@ -552,7 +555,7 @@ def test_wk_loop_input_sources_default(null_config, tmp_path):
552
555
  assert t1_iter_0["inputs.p2"] == t1_iter_1["inputs.p2"]
553
556
 
554
557
 
555
- def test_wk_loop_input_sources_iterable_param_default(null_config, tmp_path):
558
+ def test_wk_loop_input_sources_iterable_param_default(null_config, tmp_path: Path):
556
559
  act_env = hf.ActionEnvironment("null_env")
557
560
  ts1 = hf.TaskSchema(
558
561
  objective="t1",
@@ -589,7 +592,7 @@ def test_wk_loop_input_sources_iterable_param_default(null_config, tmp_path):
589
592
 
590
593
 
591
594
  def test_wk_loop_input_sources_iterable_param_default_conditional_action(
592
- null_config, tmp_path
595
+ null_config, tmp_path: Path
593
596
  ):
594
597
  act_env = hf.ActionEnvironment("null_env")
595
598
  ts1 = hf.TaskSchema(
@@ -637,7 +640,7 @@ def test_wk_loop_input_sources_iterable_param_default_conditional_action(
637
640
 
638
641
 
639
642
  def test_wk_loop_input_sources_including_non_iteration_task_source_with_groups(
640
- null_config, tmp_path
643
+ null_config, tmp_path: Path
641
644
  ):
642
645
  act_env = hf.ActionEnvironment("null_env")
643
646
  ts1 = hf.TaskSchema(
@@ -733,7 +736,7 @@ def test_wk_loop_input_sources_including_non_iteration_task_source_with_groups(
733
736
  ]
734
737
 
735
738
 
736
- def test_loop_local_sub_parameters(null_config, tmp_path):
739
+ def test_loop_local_sub_parameters(null_config, tmp_path: Path):
737
740
  act_env = hf.ActionEnvironment("null_env")
738
741
  ts1 = hf.TaskSchema(
739
742
  objective="t1",
@@ -796,7 +799,7 @@ def test_loop_local_sub_parameters(null_config, tmp_path):
796
799
  assert t1_iter_0["inputs.p1c.d"] == t1_iter_1["inputs.p1c.d"]
797
800
 
798
801
 
799
- def test_nested_loop_iter_loop_idx(null_config, tmp_path):
802
+ def test_nested_loop_iter_loop_idx(null_config, tmp_path: Path):
800
803
  ts1 = hf.TaskSchema(
801
804
  objective="t1",
802
805
  inputs=[hf.SchemaInput("p1")],
@@ -828,7 +831,7 @@ def test_nested_loop_iter_loop_idx(null_config, tmp_path):
828
831
  }
829
832
 
830
833
 
831
- def test_schema_input_with_group_sourced_from_prev_iteration(null_config, tmp_path):
834
+ def test_schema_input_with_group_sourced_from_prev_iteration(null_config, tmp_path: Path):
832
835
  s1 = hf.TaskSchema(
833
836
  objective="t1",
834
837
  inputs=[hf.SchemaInput("p1")],
@@ -907,7 +910,7 @@ def test_schema_input_with_group_sourced_from_prev_iteration(null_config, tmp_pa
907
910
  ] == [wk.tasks.t2.elements[0].iterations[1].get_data_idx()["outputs.p3"]] * 3
908
911
 
909
912
 
910
- def test_loop_downstream_tasks(null_config, tmp_path):
913
+ def test_loop_downstream_tasks(null_config, tmp_path: Path):
911
914
  ts1 = hf.TaskSchema(
912
915
  objective="t1",
913
916
  inputs=[hf.SchemaInput("p1")],
@@ -951,11 +954,11 @@ def test_loop_downstream_tasks(null_config, tmp_path):
951
954
  hf.Loop(name="my_loop", tasks=[1, 2], num_iterations=2),
952
955
  ],
953
956
  )
954
- assert wk.loops.my_loop.downstream_tasks == [wk.tasks[3]]
955
- assert wk.loops.my_loop.upstream_tasks == [wk.tasks[0]]
957
+ assert list(wk.loops.my_loop.downstream_tasks) == [wk.tasks[3]]
958
+ assert list(wk.loops.my_loop.upstream_tasks) == [wk.tasks[0]]
956
959
 
957
960
 
958
- def test_raise_loop_task_subset_error(null_config, tmp_path):
961
+ def test_raise_loop_task_subset_error(null_config, tmp_path: Path):
959
962
  ts1 = hf.TaskSchema(
960
963
  objective="t1",
961
964
  inputs=[hf.SchemaInput("p1")],
@@ -986,38 +989,868 @@ def test_raise_loop_task_subset_error(null_config, tmp_path):
986
989
  )
987
990
 
988
991
 
989
- def test_raise_downstream_task_with_iterable_parameter(null_config, tmp_path):
990
- ts1 = hf.TaskSchema(
991
- objective="t1",
992
- inputs=[hf.SchemaInput("p1")],
993
- outputs=[hf.SchemaOutput("p1")],
992
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param(
993
+ new_null_config, tmp_path: Path
994
+ ):
995
+ # loop output (but not iterable) parameter sourced in task downstream of loop:
996
+ s1, s2, s3 = make_schemas(
997
+ ({"p1": None}, ("p2",), "t1"),
998
+ (
999
+ {"p2": None},
1000
+ (
1001
+ "p2",
1002
+ "p3",
1003
+ ),
1004
+ "t2",
1005
+ ),
1006
+ ({"p3": None}, ("p4",), "t3"),
1007
+ )
1008
+ tasks = [
1009
+ hf.Task(s1, inputs={"p1": 100}),
1010
+ hf.Task(s2),
1011
+ hf.Task(s3),
1012
+ ]
1013
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1014
+ wk = hf.Workflow.from_template_data(
1015
+ template_name="loop_param_update",
1016
+ tasks=tasks,
1017
+ loops=loops,
1018
+ path=tmp_path,
1019
+ )
1020
+
1021
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1022
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1023
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1024
+ t2_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1025
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1026
+
1027
+ # final task should get its input from the final iteration of the second task
1028
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1029
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1030
+ assert t2_i2_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1031
+ assert t3_di["inputs.p3"] == t2_i2_di["outputs.p3"]
1032
+
1033
+
1034
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_multi_element(
1035
+ new_null_config, tmp_path: Path
1036
+ ):
1037
+ # loop output (but not iterable) parameter sourced in task downstream of loop - multi
1038
+ # element
1039
+ s1, s2, s3 = make_schemas(
1040
+ ({"p1": None}, ("p2",), "t1"),
1041
+ (
1042
+ {"p2": None},
1043
+ (
1044
+ "p2",
1045
+ "p3",
1046
+ ),
1047
+ "t2",
1048
+ ),
1049
+ ({"p3": None}, ("p4",), "t3"),
1050
+ )
1051
+ tasks = [
1052
+ hf.Task(s1, sequences=[hf.ValueSequence("inputs.p1", values=[100, 101])]),
1053
+ hf.Task(s2),
1054
+ hf.Task(s3),
1055
+ ]
1056
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1057
+ wk = hf.Workflow.from_template_data(
1058
+ template_name="loop_param_update",
1059
+ tasks=tasks,
1060
+ loops=loops,
1061
+ path=tmp_path,
1062
+ )
1063
+
1064
+ assert wk.tasks.t1.num_elements == 2
1065
+ assert wk.tasks.t2.num_elements == 2
1066
+ assert wk.tasks.t3.num_elements == 2
1067
+
1068
+ t1_e0_di = wk.tasks.t1.elements[0].get_data_idx()
1069
+ t2_e0_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1070
+ t2_e0_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1071
+ t2_e0_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1072
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1073
+
1074
+ t1_e1_di = wk.tasks.t1.elements[1].get_data_idx()
1075
+ t2_e1_i0_di = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
1076
+ t2_e1_i1_di = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
1077
+ t2_e1_i2_di = wk.tasks.t2.elements[1].iterations[2].get_data_idx()
1078
+ t3_e1_di = wk.tasks.t3.elements[1].get_data_idx()
1079
+
1080
+ assert t2_e0_i0_di["inputs.p2"] == t1_e0_di["outputs.p2"]
1081
+ assert t2_e0_i1_di["inputs.p2"] == t2_e0_i0_di["outputs.p2"]
1082
+ assert t2_e0_i2_di["inputs.p2"] == t2_e0_i1_di["outputs.p2"]
1083
+ assert t3_e0_di["inputs.p3"] == t2_e0_i2_di["outputs.p3"]
1084
+
1085
+ assert t2_e1_i0_di["inputs.p2"] == t1_e1_di["outputs.p2"]
1086
+ assert t2_e1_i1_di["inputs.p2"] == t2_e1_i0_di["outputs.p2"]
1087
+ assert t2_e1_i2_di["inputs.p2"] == t2_e1_i1_di["outputs.p2"]
1088
+ assert t3_e1_di["inputs.p3"] == t2_e1_i2_di["outputs.p3"]
1089
+
1090
+
1091
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_multi_element_to_group(
1092
+ new_null_config, tmp_path: Path
1093
+ ):
1094
+ # loop output (but not iterable) parameter sourced in task downstream of loop - multi
1095
+ # element group
1096
+ s1, s2 = make_schemas(
1097
+ ({"p1": None}, ("p2",), "t1"),
1098
+ (
1099
+ {"p2": None},
1100
+ (
1101
+ "p2",
1102
+ "p3",
1103
+ ),
1104
+ "t2",
1105
+ ),
1106
+ )
1107
+ s3 = hf.TaskSchema(
1108
+ objective="t3",
1109
+ inputs=[hf.SchemaInput("p3", group="all")],
1110
+ outputs=[hf.SchemaOutput("p4")],
994
1111
  actions=[
995
1112
  hf.Action(
996
1113
  commands=[
997
1114
  hf.Command(
998
- "Write-Output (<<parameter:p1>> + 100)",
999
- stdout="<<int(parameter:p1)>>",
1115
+ command="echo $((<<sum(parameter:p3)>>))",
1116
+ stdout="<<parameter:p4>>",
1000
1117
  )
1001
1118
  ],
1002
- ),
1119
+ )
1003
1120
  ],
1004
1121
  )
1005
- with pytest.raises(NotImplementedError):
1006
- hf.Workflow.from_template_data(
1007
- template_name="test_loop",
1008
- path=tmp_path,
1009
- tasks=[
1010
- hf.Task(schema=ts1, inputs={"p1": 101}),
1011
- hf.Task(schema=ts1),
1012
- hf.Task(schema=ts1),
1013
- ],
1014
- loops=[
1015
- hf.Loop(name="my_loop", tasks=[1], num_iterations=2),
1016
- ],
1017
- )
1122
+ tasks = [
1123
+ hf.Task(s1, sequences=[hf.ValueSequence("inputs.p1", values=[100, 101])]),
1124
+ hf.Task(s2, groups=[hf.ElementGroup(name="all")]),
1125
+ hf.Task(s3),
1126
+ ]
1127
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1128
+ wk = hf.Workflow.from_template_data(
1129
+ template_name="loop_param_update",
1130
+ tasks=tasks,
1131
+ loops=loops,
1132
+ path=tmp_path,
1133
+ )
1134
+ assert wk.tasks.t1.num_elements == 2
1135
+ assert wk.tasks.t2.num_elements == 2
1136
+ assert wk.tasks.t3.num_elements == 1
1137
+
1138
+ t1_e0_di = wk.tasks.t1.elements[0].get_data_idx()
1139
+ t2_e0_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1140
+ t2_e0_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1141
+ t2_e0_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1142
+
1143
+ t1_e1_di = wk.tasks.t1.elements[1].get_data_idx()
1144
+ t2_e1_i0_di = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
1145
+ t2_e1_i1_di = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
1146
+ t2_e1_i2_di = wk.tasks.t2.elements[1].iterations[2].get_data_idx()
1147
+
1148
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1149
+
1150
+ assert t2_e0_i0_di["inputs.p2"] == t1_e0_di["outputs.p2"]
1151
+ assert t2_e0_i1_di["inputs.p2"] == t2_e0_i0_di["outputs.p2"]
1152
+ assert t2_e0_i2_di["inputs.p2"] == t2_e0_i1_di["outputs.p2"]
1153
+
1154
+ assert t2_e1_i0_di["inputs.p2"] == t1_e1_di["outputs.p2"]
1155
+ assert t2_e1_i1_di["inputs.p2"] == t2_e1_i0_di["outputs.p2"]
1156
+ assert t2_e1_i2_di["inputs.p2"] == t2_e1_i1_di["outputs.p2"]
1157
+
1158
+ assert t3_e0_di["inputs.p3"] == [t2_e0_i2_di["outputs.p3"], t2_e1_i2_di["outputs.p3"]]
1159
+
1160
+
1161
+ def test_add_iteration_updates_downstream_data_idx_loop_iterable_param(
1162
+ new_null_config, tmp_path: Path
1163
+ ):
1164
+ # loop iterable parameter sourced in task downstream of loop:
1165
+ s1, s2, s3 = make_schemas(
1166
+ ({"p1": None}, ("p2",), "t1"),
1167
+ ({"p2": None}, ("p2",), "t2"),
1168
+ ({"p2": None}, ("p3",), "t3"),
1169
+ )
1170
+ tasks = [
1171
+ hf.Task(s1, inputs={"p1": 100}),
1172
+ hf.Task(s2),
1173
+ hf.Task(s3),
1174
+ ]
1175
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1176
+ wk = hf.Workflow.from_template_data(
1177
+ template_name="loop_param_update",
1178
+ tasks=tasks,
1179
+ loops=loops,
1180
+ path=tmp_path,
1181
+ )
1182
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1183
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1184
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1185
+ t2_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1186
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1187
+
1188
+ # final task should get its input from the final iteration of the second task
1189
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1190
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1191
+ assert t2_i2_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1192
+ assert t3_di["inputs.p2"] == t2_i2_di["outputs.p2"]
1193
+
1194
+
1195
+ def test_add_iteration_updates_downstream_data_idx_loop_iterable_param_multi_element(
1196
+ new_null_config, tmp_path: Path
1197
+ ):
1198
+ # loop iterable parameter sourced in task downstream of loop - multi element:
1199
+ s1, s2, s3 = make_schemas(
1200
+ ({"p1": None}, ("p2",), "t1"),
1201
+ ({"p2": None}, ("p2",), "t2"),
1202
+ ({"p2": None}, ("p3",), "t3"),
1203
+ )
1204
+ tasks = [
1205
+ hf.Task(s1, sequences=[hf.ValueSequence("inputs.p1", values=[100, 101])]),
1206
+ hf.Task(s2),
1207
+ hf.Task(s3),
1208
+ ]
1209
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1210
+ wk = hf.Workflow.from_template_data(
1211
+ template_name="loop_param_update",
1212
+ tasks=tasks,
1213
+ loops=loops,
1214
+ path=tmp_path,
1215
+ )
1216
+ t1_e0_di = wk.tasks.t1.elements[0].get_data_idx()
1217
+ t2_e0_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1218
+ t2_e0_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1219
+ t2_e0_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1220
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1221
+
1222
+ t1_e1_di = wk.tasks.t1.elements[1].get_data_idx()
1223
+ t2_e1_i0_di = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
1224
+ t2_e1_i1_di = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
1225
+ t2_e1_i2_di = wk.tasks.t2.elements[1].iterations[2].get_data_idx()
1226
+ t3_e1_di = wk.tasks.t3.elements[1].get_data_idx()
1227
+
1228
+ # final task should get its input from the final iteration of the second task
1229
+ assert t2_e0_i0_di["inputs.p2"] == t1_e0_di["outputs.p2"]
1230
+ assert t2_e0_i1_di["inputs.p2"] == t2_e0_i0_di["outputs.p2"]
1231
+ assert t2_e0_i2_di["inputs.p2"] == t2_e0_i1_di["outputs.p2"]
1232
+ assert t3_e0_di["inputs.p2"] == t2_e0_i2_di["outputs.p2"]
1233
+
1234
+ assert t2_e1_i0_di["inputs.p2"] == t1_e1_di["outputs.p2"]
1235
+ assert t2_e1_i1_di["inputs.p2"] == t2_e1_i0_di["outputs.p2"]
1236
+ assert t2_e1_i2_di["inputs.p2"] == t2_e1_i1_di["outputs.p2"]
1237
+ assert t3_e1_di["inputs.p2"] == t2_e1_i2_di["outputs.p2"]
1238
+
1239
+
1240
+ def test_add_iteration_updates_downstream_data_idx_loop_iterable_param_multi_element_to_group(
1241
+ new_null_config, tmp_path: Path
1242
+ ):
1243
+ # loop iterable parameter sourced in task downstream of loop - multi element:
1244
+ s1, s2 = make_schemas(
1245
+ ({"p1": None}, ("p2",), "t1"),
1246
+ ({"p2": None}, ("p2",), "t2"),
1247
+ )
1248
+
1249
+ s3 = hf.TaskSchema(
1250
+ objective="t3",
1251
+ inputs=[hf.SchemaInput("p2", group="all")],
1252
+ outputs=[hf.SchemaOutput("p3")],
1253
+ actions=[
1254
+ hf.Action(
1255
+ commands=[
1256
+ hf.Command(
1257
+ command="echo $((<<sum(parameter:p2)>>))",
1258
+ stdout="<<parameter:p3>>",
1259
+ )
1260
+ ],
1261
+ )
1262
+ ],
1263
+ )
1264
+ tasks = [
1265
+ hf.Task(s1, sequences=[hf.ValueSequence("inputs.p1", values=[100, 101])]),
1266
+ hf.Task(s2, groups=[hf.ElementGroup(name="all")]),
1267
+ hf.Task(s3),
1268
+ ]
1269
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1270
+ wk = hf.Workflow.from_template_data(
1271
+ template_name="loop_param_update",
1272
+ tasks=tasks,
1273
+ loops=loops,
1274
+ path=tmp_path,
1275
+ )
1276
+ assert wk.tasks.t1.num_elements == 2
1277
+ assert wk.tasks.t2.num_elements == 2
1278
+ assert wk.tasks.t3.num_elements == 1
1279
+
1280
+ t1_e0_di = wk.tasks.t1.elements[0].get_data_idx()
1281
+ t2_e0_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1282
+ t2_e0_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1283
+ t2_e0_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1284
+
1285
+ t1_e1_di = wk.tasks.t1.elements[1].get_data_idx()
1286
+ t2_e1_i0_di = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
1287
+ t2_e1_i1_di = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
1288
+ t2_e1_i2_di = wk.tasks.t2.elements[1].iterations[2].get_data_idx()
1289
+
1290
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1291
+
1292
+ assert t2_e0_i0_di["inputs.p2"] == t1_e0_di["outputs.p2"]
1293
+ assert t2_e0_i1_di["inputs.p2"] == t2_e0_i0_di["outputs.p2"]
1294
+ assert t2_e0_i2_di["inputs.p2"] == t2_e0_i1_di["outputs.p2"]
1018
1295
 
1296
+ assert t2_e1_i0_di["inputs.p2"] == t1_e1_di["outputs.p2"]
1297
+ assert t2_e1_i1_di["inputs.p2"] == t2_e1_i0_di["outputs.p2"]
1298
+ assert t2_e1_i2_di["inputs.p2"] == t2_e1_i1_di["outputs.p2"]
1299
+
1300
+ assert t3_e0_di["inputs.p2"] == [t2_e0_i2_di["outputs.p2"], t2_e1_i2_di["outputs.p2"]]
1301
+
1302
+
1303
+ def test_add_iteration_correct_downstream_data_idx_iterable_param_downstream_adjacent_loop(
1304
+ null_config, tmp_path: Path
1305
+ ):
1306
+
1307
+ s1, s2, s3 = make_schemas(
1308
+ ({"p1": None}, ("p2",), "t1"),
1309
+ ({"p2": None}, ("p2",), "t2"),
1310
+ ({"p2": None}, ("p2",), "t3"),
1311
+ )
1312
+ tasks = [
1313
+ hf.Task(s1, inputs={"p1": 100}),
1314
+ hf.Task(s2),
1315
+ hf.Task(s3),
1316
+ ]
1317
+
1318
+ # downstream loop added after upstream loop:
1319
+ loops = [
1320
+ hf.Loop(tasks=[1], num_iterations=2),
1321
+ hf.Loop(tasks=[2], num_iterations=2),
1322
+ ]
1323
+
1324
+ wk = hf.Workflow.from_template_data(
1325
+ template_name="loop_param_update",
1326
+ tasks=tasks,
1327
+ loops=loops,
1328
+ path=tmp_path,
1329
+ )
1330
+
1331
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1332
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1333
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1334
+ t3_i0_di = wk.tasks.t3.elements[0].iterations[0].get_data_idx()
1335
+ t3_i1_di = wk.tasks.t3.elements[0].iterations[1].get_data_idx()
1336
+
1337
+ # final task should get its input from the final iteration of the second task
1338
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1339
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1340
+ assert t3_i0_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1341
+ assert t3_i1_di["inputs.p2"] == t3_i0_di["outputs.p2"]
1342
+
1343
+ t1_iter_di = wk.tasks.t1.elements[0].iterations[0].data_idx
1344
+ t2_i0_iter_di = wk.tasks.t2.elements[0].iterations[0].data_idx
1345
+ t2_i1_iter_di = wk.tasks.t2.elements[0].iterations[1].data_idx
1346
+ t3_i0_iter_di = wk.tasks.t3.elements[0].iterations[0].data_idx
1347
+ t3_i1_iter_di = wk.tasks.t3.elements[0].iterations[1].data_idx
1348
+
1349
+ assert t2_i0_iter_di["inputs.p2"] == t1_iter_di["outputs.p2"]
1350
+ assert t2_i1_iter_di["inputs.p2"] == t2_i0_iter_di["outputs.p2"]
1351
+ assert t3_i0_iter_di["inputs.p2"] == t2_i1_iter_di["outputs.p2"]
1352
+ assert t3_i1_iter_di["inputs.p2"] == t3_i0_iter_di["outputs.p2"]
1353
+
1354
+
1355
+ def test_add_iteration_correct_downstream_data_idx_iterable_param_downstream_adjacent_loop_added_before(
1356
+ null_config, tmp_path: Path
1357
+ ):
1358
+ s1, s2, s3 = make_schemas(
1359
+ ({"p1": None}, ("p2",), "t1"),
1360
+ ({"p2": None}, ("p2",), "t2"),
1361
+ ({"p2": None}, ("p2",), "t3"),
1362
+ )
1363
+ tasks = [
1364
+ hf.Task(s1, inputs={"p1": 100}),
1365
+ hf.Task(s2),
1366
+ hf.Task(s3),
1367
+ ]
1368
+
1369
+ # upstream loop added after downstream loop:
1370
+ loops = [
1371
+ hf.Loop(tasks=[2], num_iterations=2),
1372
+ hf.Loop(tasks=[1], num_iterations=2),
1373
+ ]
1374
+
1375
+ wk = hf.Workflow.from_template_data(
1376
+ template_name="loop_param_update",
1377
+ tasks=tasks,
1378
+ loops=loops,
1379
+ path=tmp_path,
1380
+ )
1381
+
1382
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1383
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1384
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1385
+ t3_i0_di = wk.tasks.t3.elements[0].iterations[0].get_data_idx()
1386
+ t3_i1_di = wk.tasks.t3.elements[0].iterations[1].get_data_idx()
1387
+
1388
+ # final task should get its input from the final iteration of the second task
1389
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1390
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1391
+ assert t3_i0_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1392
+ assert t3_i1_di["inputs.p2"] == t3_i0_di["outputs.p2"]
1393
+
1394
+ t1_iter_di = wk.tasks.t1.elements[0].iterations[0].data_idx
1395
+ t2_i0_iter_di = wk.tasks.t2.elements[0].iterations[0].data_idx
1396
+ t2_i1_iter_di = wk.tasks.t2.elements[0].iterations[1].data_idx
1397
+ t3_i0_iter_di = wk.tasks.t3.elements[0].iterations[0].data_idx
1398
+ t3_i1_iter_di = wk.tasks.t3.elements[0].iterations[1].data_idx
1399
+
1400
+ assert t2_i0_iter_di["inputs.p2"] == t1_iter_di["outputs.p2"]
1401
+ assert t2_i1_iter_di["inputs.p2"] == t2_i0_iter_di["outputs.p2"]
1402
+ assert t3_i0_iter_di["inputs.p2"] == t2_i1_iter_di["outputs.p2"]
1403
+ assert t3_i1_iter_di["inputs.p2"] == t3_i0_iter_di["outputs.p2"]
1404
+
1405
+
1406
+ def test_add_iteration_correct_downstream_data_idx_iterable_param_downstream_multi_task_adjacent_loop_added_before(
1407
+ null_config, tmp_path: Path
1408
+ ):
1409
+ s1, s2, s3, s4 = make_schemas(
1410
+ ({"p1": None}, ("p2",), "t1"),
1411
+ ({"p2": None}, ("p2",), "t2"),
1412
+ ({"p2": None}, ("p2",), "t3"),
1413
+ ({"p2": None}, ("p2",), "t4"),
1414
+ )
1415
+ tasks = [
1416
+ hf.Task(s1, inputs={"p1": 100}),
1417
+ hf.Task(s2),
1418
+ hf.Task(s3),
1419
+ hf.Task(s4),
1420
+ ]
1421
+
1422
+ # upstream loop added after downstream loop:
1423
+ loops = [
1424
+ hf.Loop(tasks=[2, 3], num_iterations=2),
1425
+ hf.Loop(tasks=[1], num_iterations=2),
1426
+ ]
1427
+ wk = hf.Workflow.from_template_data(
1428
+ template_name="loop_param_update",
1429
+ tasks=tasks,
1430
+ loops=loops,
1431
+ path=tmp_path,
1432
+ )
1433
+
1434
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1435
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1436
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1437
+ t3_i0_di = wk.tasks.t3.elements[0].iterations[0].get_data_idx()
1438
+ t3_i1_di = wk.tasks.t3.elements[0].iterations[1].get_data_idx()
1439
+ t4_i0_di = wk.tasks.t4.elements[0].iterations[0].get_data_idx()
1440
+ t4_i1_di = wk.tasks.t4.elements[0].iterations[1].get_data_idx()
1441
+
1442
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1443
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1444
+
1445
+ assert t3_i0_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1446
+ assert t3_i1_di["inputs.p2"] == t4_i0_di["outputs.p2"]
1447
+
1448
+ assert t4_i0_di["inputs.p2"] == t3_i0_di["outputs.p2"]
1449
+ assert t4_i1_di["inputs.p2"] == t3_i1_di["outputs.p2"]
1450
+
1451
+ t1_iter_di = wk.tasks.t1.elements[0].iterations[0].data_idx
1452
+ t2_i0_iter_di = wk.tasks.t2.elements[0].iterations[0].data_idx
1453
+ t2_i1_iter_di = wk.tasks.t2.elements[0].iterations[1].data_idx
1454
+ t3_i0_iter_di = wk.tasks.t3.elements[0].iterations[0].data_idx
1455
+ t3_i1_iter_di = wk.tasks.t3.elements[0].iterations[1].data_idx
1456
+ t4_i0_iter_di = wk.tasks.t4.elements[0].iterations[0].data_idx
1457
+ t4_i1_iter_di = wk.tasks.t4.elements[0].iterations[1].data_idx
1458
+
1459
+ assert t2_i0_iter_di["inputs.p2"] == t1_iter_di["outputs.p2"]
1460
+ assert t2_i1_iter_di["inputs.p2"] == t2_i0_iter_di["outputs.p2"]
1461
+ assert t3_i0_iter_di["inputs.p2"] == t2_i1_iter_di["outputs.p2"]
1462
+ assert t3_i1_iter_di["inputs.p2"] == t4_i0_iter_di["outputs.p2"]
1463
+ assert t4_i0_iter_di["inputs.p2"] == t3_i0_iter_di["outputs.p2"]
1464
+ assert t4_i1_iter_di["inputs.p2"] == t3_i1_iter_di["outputs.p2"]
1465
+
1466
+
1467
+ def test_nested_loops_with_downstream_updates_iteration_pathway(
1468
+ null_config, tmp_path: Path
1469
+ ):
1470
+ s1, s2, s3 = make_schemas(
1471
+ ({"p1": None}, ("p2",), "t1"),
1472
+ ({"p2": None}, ("p2",), "t2"),
1473
+ ({"p2": None}, ("p1",), "t3"),
1474
+ )
1475
+ tasks = [
1476
+ hf.Task(s1, inputs={"p1": 100}),
1477
+ hf.Task(s2),
1478
+ hf.Task(s3),
1479
+ ]
1480
+
1481
+ loops = [
1482
+ hf.Loop(name="inner", tasks=[1], num_iterations=2),
1483
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=2),
1484
+ ]
1485
+
1486
+ # when adding the inner loop iterations, the data index of the downstream task t3
1487
+ # must be updated to use the newly-added output. This should happen once before the
1488
+ # outer loop is added, and once again when adding the inner loop iteration as part of
1489
+ # adding the outer loop's second iteration!
1490
+
1491
+ wk = hf.Workflow.from_template_data(
1492
+ template_name="loop_param_update_nested",
1493
+ tasks=tasks,
1494
+ loops=loops,
1495
+ path=tmp_path,
1496
+ )
1497
+
1498
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
1499
+
1500
+ # task insert IDs:
1501
+ assert [i[0] for i in pathway] == [0, 1, 1, 2, 0, 1, 1, 2]
1502
+
1503
+ # loop indices:
1504
+ assert [i[1] for i in pathway] == [
1505
+ {"outer": 0},
1506
+ {"outer": 0, "inner": 0},
1507
+ {"outer": 0, "inner": 1},
1508
+ {"outer": 0},
1509
+ {"outer": 1},
1510
+ {"outer": 1, "inner": 0},
1511
+ {"outer": 1, "inner": 1},
1512
+ {"outer": 1},
1513
+ ]
1514
+
1515
+ # flow of parameter p1/p2 (element zero):
1516
+ assert pathway[0][2][0]["outputs.p2"] == pathway[1][2][0]["inputs.p2"]
1517
+ assert pathway[1][2][0]["outputs.p2"] == pathway[2][2][0]["inputs.p2"]
1518
+ assert pathway[2][2][0]["outputs.p2"] == pathway[3][2][0]["inputs.p2"]
1519
+ assert pathway[3][2][0]["outputs.p1"] == pathway[4][2][0]["inputs.p1"]
1520
+ assert pathway[4][2][0]["outputs.p2"] == pathway[5][2][0]["inputs.p2"]
1521
+ assert pathway[5][2][0]["outputs.p2"] == pathway[6][2][0]["inputs.p2"]
1522
+ assert pathway[6][2][0]["outputs.p2"] == pathway[7][2][0]["inputs.p2"]
1523
+
1524
+
1525
+ def test_multi_task_loop_with_downstream_updates_iteration_pathway(
1526
+ null_config, tmp_path: Path
1527
+ ):
1528
+ s1, s2, s3, s4 = make_schemas(
1529
+ ({"p1": None}, ("p2",), "t1"),
1530
+ ({"p2": None}, ("p2",), "t2"),
1531
+ ({"p2": None}, ("p2",), "t3"),
1532
+ ({"p2": None}, ("p3",), "t4"),
1533
+ )
1534
+ tasks = [
1535
+ hf.Task(s1, inputs={"p1": 100}),
1536
+ hf.Task(s2),
1537
+ hf.Task(s3),
1538
+ hf.Task(s4),
1539
+ ]
1540
+
1541
+ loops = [
1542
+ hf.Loop(tasks=[1, 2], num_iterations=2),
1543
+ ]
1544
+
1545
+ wk = hf.Workflow.from_template_data(
1546
+ template_name="loop_param_update",
1547
+ tasks=tasks,
1548
+ loops=loops,
1549
+ path=tmp_path,
1550
+ )
1551
+
1552
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
1553
+
1554
+ # task insert IDs:
1555
+ assert [i[0] for i in pathway] == [0, 1, 2, 1, 2, 3]
1556
+
1557
+ # loop indices:
1558
+ assert [i[1] for i in pathway] == [
1559
+ {},
1560
+ {"loop_0": 0},
1561
+ {"loop_0": 0},
1562
+ {"loop_0": 1},
1563
+ {"loop_0": 1},
1564
+ {},
1565
+ ]
1566
+
1567
+ # flow of parameter p2 (element zero):
1568
+ assert pathway[0][2][0]["outputs.p2"] == pathway[1][2][0]["inputs.p2"]
1569
+ assert pathway[1][2][0]["outputs.p2"] == pathway[2][2][0]["inputs.p2"]
1570
+ assert pathway[2][2][0]["outputs.p2"] == pathway[3][2][0]["inputs.p2"]
1571
+ assert pathway[3][2][0]["outputs.p2"] == pathway[4][2][0]["inputs.p2"]
1572
+ assert pathway[4][2][0]["outputs.p2"] == pathway[5][2][0]["inputs.p2"]
1573
+
1574
+
1575
+ def test_multi_nested_loops_with_downstream_updates_iteration_pathway(
1576
+ null_config, tmp_path: Path
1577
+ ):
1578
+
1579
+ s1, s2, s3, s4, s5, s6 = make_schemas(
1580
+ ({"p1": None}, ("p2",), "t1"),
1581
+ ({"p2": None}, ("p2",), "t2"),
1582
+ ({"p2": None}, ("p2",), "t3"),
1583
+ ({"p2": None}, ("p2",), "t4"),
1584
+ ({"p2": None}, ("p1",), "t5"),
1585
+ ({"p1": None}, ("p3",), "t6"),
1586
+ )
1587
+ tasks = [
1588
+ hf.Task(s1, inputs={"p1": 100}),
1589
+ hf.Task(s2),
1590
+ hf.Task(s3),
1591
+ hf.Task(s4),
1592
+ hf.Task(s5),
1593
+ hf.Task(s6),
1594
+ ]
1595
+
1596
+ loops = [
1597
+ hf.Loop(name="inner", tasks=[1], num_iterations=2),
1598
+ hf.Loop(name="middle", tasks=[1, 2], num_iterations=2),
1599
+ hf.Loop(name="outer", tasks=[0, 1, 2, 3, 4], num_iterations=2),
1600
+ ]
1601
+
1602
+ wk = hf.Workflow.from_template_data(
1603
+ template_name="loop_param_update_nested",
1604
+ tasks=tasks,
1605
+ loops=loops,
1606
+ path=tmp_path,
1607
+ )
1608
+
1609
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
1610
+
1611
+ # task insert IDs:
1612
+ assert [i[0] for i in pathway] == [
1613
+ 0,
1614
+ 1,
1615
+ 1,
1616
+ 2,
1617
+ 1,
1618
+ 1,
1619
+ 2,
1620
+ 3,
1621
+ 4,
1622
+ 0,
1623
+ 1,
1624
+ 1,
1625
+ 2,
1626
+ 1,
1627
+ 1,
1628
+ 2,
1629
+ 3,
1630
+ 4,
1631
+ 5,
1632
+ ]
1633
+
1634
+ # loop indices:
1635
+ assert [i[1] for i in pathway] == [
1636
+ {"outer": 0},
1637
+ {"outer": 0, "middle": 0, "inner": 0},
1638
+ {"outer": 0, "middle": 0, "inner": 1},
1639
+ {"outer": 0, "middle": 0},
1640
+ {"outer": 0, "middle": 1, "inner": 0},
1641
+ {"outer": 0, "middle": 1, "inner": 1},
1642
+ {"outer": 0, "middle": 1},
1643
+ {"outer": 0},
1644
+ {"outer": 0},
1645
+ {"outer": 1},
1646
+ {"outer": 1, "middle": 0, "inner": 0},
1647
+ {"outer": 1, "middle": 0, "inner": 1},
1648
+ {"outer": 1, "middle": 0},
1649
+ {"outer": 1, "middle": 1, "inner": 0},
1650
+ {"outer": 1, "middle": 1, "inner": 1},
1651
+ {"outer": 1, "middle": 1},
1652
+ {"outer": 1},
1653
+ {"outer": 1},
1654
+ {},
1655
+ ]
1656
+
1657
+ # flow of parameter p1/p2 (element zero):
1658
+ assert pathway[0][2][0]["outputs.p2"] == pathway[1][2][0]["inputs.p2"]
1659
+ assert pathway[1][2][0]["outputs.p2"] == pathway[2][2][0]["inputs.p2"]
1660
+ assert pathway[2][2][0]["outputs.p2"] == pathway[3][2][0]["inputs.p2"]
1661
+ assert pathway[3][2][0]["outputs.p2"] == pathway[4][2][0]["inputs.p2"]
1662
+ assert pathway[4][2][0]["outputs.p2"] == pathway[5][2][0]["inputs.p2"]
1663
+ assert pathway[5][2][0]["outputs.p2"] == pathway[6][2][0]["inputs.p2"]
1664
+ assert pathway[6][2][0]["outputs.p2"] == pathway[7][2][0]["inputs.p2"]
1665
+ assert pathway[7][2][0]["outputs.p2"] == pathway[8][2][0]["inputs.p2"]
1666
+ assert pathway[8][2][0]["outputs.p1"] == pathway[9][2][0]["inputs.p1"]
1667
+ assert pathway[9][2][0]["outputs.p2"] == pathway[10][2][0]["inputs.p2"]
1668
+ assert pathway[10][2][0]["outputs.p2"] == pathway[11][2][0]["inputs.p2"]
1669
+ assert pathway[11][2][0]["outputs.p2"] == pathway[12][2][0]["inputs.p2"]
1670
+ assert pathway[12][2][0]["outputs.p2"] == pathway[13][2][0]["inputs.p2"]
1671
+ assert pathway[13][2][0]["outputs.p2"] == pathway[14][2][0]["inputs.p2"]
1672
+ assert pathway[14][2][0]["outputs.p2"] == pathway[15][2][0]["inputs.p2"]
1673
+ assert pathway[15][2][0]["outputs.p2"] == pathway[16][2][0]["inputs.p2"]
1674
+ assert pathway[16][2][0]["outputs.p2"] == pathway[17][2][0]["inputs.p2"]
1675
+ assert pathway[17][2][0]["outputs.p1"] == pathway[18][2][0]["inputs.p1"]
1676
+
1677
+
1678
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_including_task_input_sources(
1679
+ new_null_config, tmp_path: Path
1680
+ ):
1681
+ # task `t3` input `p1` has `InputSource.task(task_ref=1, task_source_type="input")`,
1682
+ # so `t3` elements needs to have data indices updated, since task `t2` (i.e.
1683
+ # `task_ref=1`) will have had its data indices updated:
1684
+ s1, s2, s3 = make_schemas(
1685
+ ({"p1": None}, ("p1",), "t1"),
1686
+ ({"p1": None}, ("p2",), "t2"),
1687
+ ({"p1": None, "p2": None}, ("p3",), "t3"),
1688
+ )
1689
+ tasks = [
1690
+ hf.Task(s1, inputs={"p1": 100}),
1691
+ hf.Task(s2),
1692
+ hf.Task(s3),
1693
+ ]
1694
+ loops = [hf.Loop(tasks=[0], num_iterations=2)]
1695
+
1696
+ wk = hf.Workflow.from_template_data(
1697
+ template_name="loop_param_update_task_input_source",
1698
+ tasks=tasks,
1699
+ loops=loops,
1700
+ path=tmp_path,
1701
+ )
1702
+
1703
+ t1_i0_di = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
1704
+ t1_i1_di = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
1705
+ t2_di = wk.tasks.t2.elements[0].get_data_idx()
1706
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1707
+
1708
+ assert t1_i0_di["outputs.p1"] == t1_i1_di["inputs.p1"]
1709
+ assert t1_i1_di["outputs.p1"] == t2_di["inputs.p1"]
1710
+ assert t1_i1_di["outputs.p1"] == t3_di["inputs.p1"]
1711
+ assert t2_di["outputs.p2"] == t3_di["inputs.p2"]
1712
+
1713
+
1714
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_including_task_input_sources_twice(
1715
+ new_null_config, tmp_path: Path
1716
+ ):
1717
+ # tasks `t3/t4` inputs `p1` have `InputSource.task(task_ref=1/2, task_source_type="input")`,
1718
+ # so `t3/t4` elements needs to have data indices updated, since task `t2/t3` (i.e.
1719
+ # `task_ref=1/2`) will have had their data indices updated:
1720
+
1721
+ s1, s2, s3, s4 = make_schemas(
1722
+ ({"p1": None}, ("p1",), "t1"),
1723
+ ({"p1": None}, ("p2",), "t2"),
1724
+ ({"p1": None, "p2": None}, ("p3",), "t3"),
1725
+ ({"p1": None, "p3": None}, ("p4",), "t4"),
1726
+ )
1727
+ tasks = [
1728
+ hf.Task(s1, inputs={"p1": 100}),
1729
+ hf.Task(s2),
1730
+ hf.Task(s3),
1731
+ hf.Task(s4),
1732
+ ]
1733
+ loops = [hf.Loop(tasks=[0], num_iterations=2)]
1734
+ wk = hf.Workflow.from_template_data(
1735
+ template_name="loop_param_update_task_input_source",
1736
+ tasks=tasks,
1737
+ loops=loops,
1738
+ path=tmp_path,
1739
+ )
1740
+ t1_i0_di = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
1741
+ t1_i1_di = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
1742
+ t2_di = wk.tasks.t2.elements[0].get_data_idx()
1743
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1744
+ t4_di = wk.tasks.t4.elements[0].get_data_idx()
1019
1745
 
1020
- def test_adjacent_loops_iteration_pathway(null_config, tmp_path):
1746
+ assert t1_i0_di["outputs.p1"] == t1_i1_di["inputs.p1"]
1747
+ assert t1_i1_di["outputs.p1"] == t2_di["inputs.p1"]
1748
+ assert t1_i1_di["outputs.p1"] == t3_di["inputs.p1"]
1749
+ assert t1_i1_di["outputs.p1"] == t4_di["inputs.p1"]
1750
+ assert t2_di["outputs.p2"] == t3_di["inputs.p2"]
1751
+
1752
+
1753
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_including_task_input_sources_thrice(
1754
+ new_null_config, tmp_path: Path
1755
+ ):
1756
+ # tasks `t3/t4/t5` inputs `p1` have `InputSource.task(task_ref=1/2/3, task_source_type="input")`,
1757
+ # so `t3/t4/t5` elements needs to have data indices updated, since task `t2/t3/t4` (i.e.
1758
+ # `task_ref=1/2/3`) will have had their data indices updated:
1759
+
1760
+ s1, s2, s3, s4, s5 = make_schemas(
1761
+ ({"p1": None}, ("p1",), "t1"),
1762
+ ({"p1": None}, ("p2",), "t2"),
1763
+ ({"p1": None, "p2": None}, ("p3",), "t3"),
1764
+ ({"p1": None, "p3": None}, ("p4",), "t4"),
1765
+ ({"p1": None, "p4": None}, ("p5",), "t5"),
1766
+ )
1767
+ tasks = [
1768
+ hf.Task(s1, inputs={"p1": 100}),
1769
+ hf.Task(s2),
1770
+ hf.Task(s3),
1771
+ hf.Task(s4),
1772
+ hf.Task(s5),
1773
+ ]
1774
+ loops = [hf.Loop(tasks=[0], num_iterations=2)]
1775
+ wk = hf.Workflow.from_template_data(
1776
+ template_name="loop_param_update_task_input_source",
1777
+ tasks=tasks,
1778
+ loops=loops,
1779
+ path=tmp_path,
1780
+ )
1781
+ t1_i0_di = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
1782
+ t1_i1_di = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
1783
+ t2_di = wk.tasks.t2.elements[0].get_data_idx()
1784
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1785
+ t4_di = wk.tasks.t4.elements[0].get_data_idx()
1786
+ t5_di = wk.tasks.t5.elements[0].get_data_idx()
1787
+
1788
+ assert t1_i0_di["outputs.p1"] == t1_i1_di["inputs.p1"]
1789
+ assert t1_i1_di["outputs.p1"] == t2_di["inputs.p1"]
1790
+ assert t1_i1_di["outputs.p1"] == t3_di["inputs.p1"]
1791
+ assert t1_i1_di["outputs.p1"] == t4_di["inputs.p1"]
1792
+ assert t1_i1_di["outputs.p1"] == t5_di["inputs.p1"]
1793
+ assert t2_di["outputs.p2"] == t3_di["inputs.p2"]
1794
+
1795
+
1796
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_including_task_input_sources_thrice_multi_element(
1797
+ new_null_config, tmp_path: Path
1798
+ ):
1799
+ # tasks `t3/t4/t5` inputs `p1` have `InputSource.task(task_ref=1/2/3, task_source_type="input")`,
1800
+ # so `t3/t4/t5` elements needs to have data indices updated, since task `t2/t3/t4` (i.e.
1801
+ # `task_ref=1/2/3`) will have had their data indices updated:
1802
+
1803
+ s1, s2, s3, s4, s5 = make_schemas(
1804
+ ({"p1": None}, ("p1",), "t1"),
1805
+ ({"p1": None}, ("p2",), "t2"),
1806
+ ({"p1": None, "p2": None}, ("p3",), "t3"),
1807
+ ({"p1": None, "p3": None}, ("p4",), "t4"),
1808
+ ({"p1": None, "p4": None}, ("p5",), "t5"),
1809
+ )
1810
+ tasks = [
1811
+ hf.Task(s1, inputs={"p1": 100}, repeats=2),
1812
+ hf.Task(s2),
1813
+ hf.Task(s3),
1814
+ hf.Task(s4),
1815
+ hf.Task(s5),
1816
+ ]
1817
+ loops = [hf.Loop(tasks=[0], num_iterations=2)]
1818
+ wk = hf.Workflow.from_template_data(
1819
+ template_name="loop_param_update_task_input_source",
1820
+ tasks=tasks,
1821
+ loops=loops,
1822
+ path=tmp_path,
1823
+ )
1824
+ t1_e0_i0_di = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
1825
+ t1_e0_i1_di = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
1826
+ t2_e0_di = wk.tasks.t2.elements[0].get_data_idx()
1827
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1828
+ t4_e0_di = wk.tasks.t4.elements[0].get_data_idx()
1829
+ t5_e0_di = wk.tasks.t5.elements[0].get_data_idx()
1830
+
1831
+ t1_e1_i0_di = wk.tasks.t1.elements[1].iterations[0].get_data_idx()
1832
+ t1_e1_i1_di = wk.tasks.t1.elements[1].iterations[1].get_data_idx()
1833
+ t2_e1_di = wk.tasks.t2.elements[1].get_data_idx()
1834
+ t3_e1_di = wk.tasks.t3.elements[1].get_data_idx()
1835
+ t4_e1_di = wk.tasks.t4.elements[1].get_data_idx()
1836
+ t5_e1_di = wk.tasks.t5.elements[1].get_data_idx()
1837
+
1838
+ assert t1_e0_i0_di["outputs.p1"] == t1_e0_i1_di["inputs.p1"]
1839
+ assert t1_e0_i1_di["outputs.p1"] == t2_e0_di["inputs.p1"]
1840
+ assert t1_e0_i1_di["outputs.p1"] == t3_e0_di["inputs.p1"]
1841
+ assert t1_e0_i1_di["outputs.p1"] == t4_e0_di["inputs.p1"]
1842
+ assert t1_e0_i1_di["outputs.p1"] == t5_e0_di["inputs.p1"]
1843
+ assert t2_e0_di["outputs.p2"] == t3_e0_di["inputs.p2"]
1844
+
1845
+ assert t1_e1_i0_di["outputs.p1"] == t1_e1_i1_di["inputs.p1"]
1846
+ assert t1_e1_i1_di["outputs.p1"] == t2_e1_di["inputs.p1"]
1847
+ assert t1_e1_i1_di["outputs.p1"] == t3_e1_di["inputs.p1"]
1848
+ assert t1_e1_i1_di["outputs.p1"] == t4_e1_di["inputs.p1"]
1849
+ assert t1_e1_i1_di["outputs.p1"] == t5_e1_di["inputs.p1"]
1850
+ assert t2_e1_di["outputs.p2"] == t3_e1_di["inputs.p2"]
1851
+
1852
+
1853
+ def test_adjacent_loops_iteration_pathway(null_config, tmp_path: Path):
1021
1854
  ts1 = hf.TaskSchema(
1022
1855
  objective="t1",
1023
1856
  inputs=[hf.SchemaInput("p1")],
@@ -1071,7 +1904,7 @@ def test_adjacent_loops_iteration_pathway(null_config, tmp_path):
1071
1904
  ]
1072
1905
 
1073
1906
 
1074
- def test_get_child_loops_ordered_by_depth(null_config, tmp_path):
1907
+ def test_get_child_loops_ordered_by_depth(null_config, tmp_path: Path):
1075
1908
  ts1 = hf.TaskSchema(
1076
1909
  objective="t1",
1077
1910
  inputs=[hf.SchemaInput("p1")],
@@ -1104,7 +1937,7 @@ def test_get_child_loops_ordered_by_depth(null_config, tmp_path):
1104
1937
  assert wk.loops.outer.get_child_loops() == [wk.loops.middle, wk.loops.inner]
1105
1938
 
1106
1939
 
1107
- def test_multi_nested_loops(null_config, tmp_path):
1940
+ def test_multi_nested_loops(null_config, tmp_path: Path):
1108
1941
  ts1 = hf.TaskSchema(
1109
1942
  objective="t1",
1110
1943
  inputs=[hf.SchemaInput("p1")],
@@ -1161,14 +1994,14 @@ def test_multi_nested_loops(null_config, tmp_path):
1161
1994
  ]
1162
1995
 
1163
1996
 
1164
- def test_nested_loop_input_from_parent_loop_task(null_config, tmp_path):
1997
+ def test_nested_loop_input_from_parent_loop_task(null_config, tmp_path: Path):
1165
1998
  """Test that an input in a nested-loop task is correctly sourced from latest
1166
1999
  iteration of the parent loop."""
1167
2000
  wk = make_workflow(
1168
2001
  schemas_spec=[
1169
- [{"p1": None}, ("p2", "p3")],
1170
- [{"p2": None}, ("p4",)],
1171
- [{"p4": None, "p3": None}, ("p2", "p1")], # testing p3 source
2002
+ ({"p1": None}, ("p2", "p3")),
2003
+ ({"p2": None}, ("p4",)),
2004
+ ({"p4": None, "p3": None}, ("p2", "p1")), # testing p3 source
1172
2005
  ],
1173
2006
  path=tmp_path,
1174
2007
  local_inputs={0: {"p1": 101}},
@@ -1186,16 +2019,16 @@ def test_nested_loop_input_from_parent_loop_task(null_config, tmp_path):
1186
2019
  assert p3_inp_idx == [p3_out_idx[0]] * 3 + [p3_out_idx[1]] * 3
1187
2020
 
1188
2021
 
1189
- def test_doubly_nested_loop_input_from_parent_loop_task(null_config, tmp_path):
2022
+ def test_doubly_nested_loop_input_from_parent_loop_task(null_config, tmp_path: Path):
1190
2023
  """Test that an input in a doubly-nested-loop task is correctly sourced from latest
1191
2024
  iteration of the parent loop."""
1192
2025
  # test source of p6 in final task:
1193
2026
  wk = make_workflow(
1194
2027
  schemas_spec=[
1195
- [{"p5": None}, ("p6", "p1")],
1196
- [{"p1": None}, ("p2", "p3")],
1197
- [{"p2": None}, ("p4",)],
1198
- [{"p4": None, "p3": None, "p6": None}, ("p2", "p1", "p5")],
2028
+ ({"p5": None}, ("p6", "p1")),
2029
+ ({"p1": None}, ("p2", "p3")),
2030
+ ({"p2": None}, ("p4",)),
2031
+ ({"p4": None, "p3": None, "p6": None}, ("p2", "p1", "p5")),
1199
2032
  ],
1200
2033
  path=tmp_path,
1201
2034
  local_inputs={0: {"p5": 101}},
@@ -1216,7 +2049,7 @@ def test_doubly_nested_loop_input_from_parent_loop_task(null_config, tmp_path):
1216
2049
  assert p6_inp_idx == [p6_out_idx[0]] * 9 + [p6_out_idx[1]] * 9 + [p6_out_idx[2]] * 9
1217
2050
 
1218
2051
 
1219
- def test_loop_non_input_task_input_from_element_group(null_config, tmp_path):
2052
+ def test_loop_non_input_task_input_from_element_group(null_config, tmp_path: Path):
1220
2053
  """Test correct sourcing of an element group input within a loop, for a task that is
1221
2054
  not that loop's "input task" with respect to that parameter."""
1222
2055
  s1 = hf.TaskSchema(
@@ -1286,3 +2119,482 @@ def test_loop_non_input_task_input_from_element_group(null_config, tmp_path):
1286
2119
  for i in pathway:
1287
2120
  if i[0] == 2: # task 3
1288
2121
  assert i[2][0]["inputs.p3"] == expected
2122
+
2123
+
2124
+ @pytest.mark.integration
2125
+ def test_multi_task_loop_termination(null_config, tmp_path: Path):
2126
+ s1 = hf.TaskSchema(
2127
+ objective="t1",
2128
+ inputs=[hf.SchemaInput("p1")],
2129
+ outputs=[hf.SchemaOutput("p2")],
2130
+ actions=[
2131
+ hf.Action(
2132
+ commands=[
2133
+ hf.Command(
2134
+ command="echo $((<<parameter:p1>> + 1))",
2135
+ stdout="<<int(parameter:p2)>>",
2136
+ )
2137
+ ]
2138
+ )
2139
+ ],
2140
+ )
2141
+ s2 = hf.TaskSchema(
2142
+ objective="t2",
2143
+ inputs=[hf.SchemaInput("p2")],
2144
+ outputs=[hf.SchemaOutput("p1")],
2145
+ actions=[
2146
+ hf.Action(
2147
+ commands=[
2148
+ hf.Command(
2149
+ command="echo $((<<parameter:p2>> + 1))",
2150
+ stdout="<<int(parameter:p1)>>",
2151
+ )
2152
+ ]
2153
+ )
2154
+ ],
2155
+ )
2156
+ tasks = [
2157
+ hf.Task(schema=s1, inputs={"p1": 0}),
2158
+ hf.Task(schema=s2),
2159
+ ]
2160
+ wk = hf.Workflow.from_template_data(
2161
+ tasks=tasks,
2162
+ loops=[
2163
+ hf.Loop(
2164
+ tasks=[0, 1],
2165
+ num_iterations=3,
2166
+ termination=hf.Rule(
2167
+ path="outputs.p1",
2168
+ condition={"value.greater_than": 3}, # should stop after 2nd iter
2169
+ ),
2170
+ )
2171
+ ],
2172
+ path=tmp_path,
2173
+ template_name="test_loops",
2174
+ )
2175
+ wk.submit(wait=True, add_to_known=False)
2176
+ for task in wk.tasks:
2177
+ for element in task.elements:
2178
+ for iter_i in element.iterations:
2179
+ skips = (i.skip for i in iter_i.action_runs)
2180
+ if iter_i.loop_idx[wk.loops[0].name] > 1:
2181
+ assert all(skips)
2182
+ assert iter_i.loop_skipped
2183
+ else:
2184
+ assert not any(skips)
2185
+
2186
+
2187
+ @pytest.mark.integration
2188
+ def test_multi_task_loop_termination_task(null_config, tmp_path: Path):
2189
+ """Specify non-default task at which to check for termination."""
2190
+ s1 = hf.TaskSchema(
2191
+ objective="t1",
2192
+ inputs=[hf.SchemaInput("p1")],
2193
+ outputs=[hf.SchemaOutput("p2")],
2194
+ actions=[
2195
+ hf.Action(
2196
+ commands=[
2197
+ hf.Command(
2198
+ command="echo $((<<parameter:p1>> + 1))",
2199
+ stdout="<<int(parameter:p2)>>",
2200
+ )
2201
+ ]
2202
+ )
2203
+ ],
2204
+ )
2205
+ s2 = hf.TaskSchema(
2206
+ objective="t2",
2207
+ inputs=[hf.SchemaInput("p2")],
2208
+ outputs=[hf.SchemaOutput("p1")],
2209
+ actions=[
2210
+ hf.Action(
2211
+ commands=[
2212
+ hf.Command(
2213
+ command="echo $((<<parameter:p2>> + 1))",
2214
+ stdout="<<int(parameter:p1)>>",
2215
+ )
2216
+ ]
2217
+ )
2218
+ ],
2219
+ )
2220
+ tasks = [
2221
+ hf.Task(schema=s1, inputs={"p1": 0}),
2222
+ hf.Task(schema=s2),
2223
+ ]
2224
+ wk = hf.Workflow.from_template_data(
2225
+ tasks=tasks,
2226
+ resources={"any": {"write_app_logs": True}},
2227
+ loops=[
2228
+ hf.Loop(
2229
+ tasks=[0, 1],
2230
+ num_iterations=3,
2231
+ termination_task=0, # default would be final task (1)
2232
+ termination=hf.Rule(
2233
+ path="inputs.p1",
2234
+ condition={
2235
+ "value.greater_than": 3
2236
+ }, # should stop after first task of final iteration
2237
+ ),
2238
+ )
2239
+ ],
2240
+ path=tmp_path,
2241
+ template_name="test_loops",
2242
+ )
2243
+ wk.submit(wait=True, add_to_known=False, status=False)
2244
+ runs_t0 = [j for i in wk.tasks[0].elements[0].iterations for j in i.action_runs]
2245
+ runs_t1 = [j for i in wk.tasks[1].elements[0].iterations for j in i.action_runs]
2246
+
2247
+ assert [i.skip for i in runs_t0] == [0, 0, 0]
2248
+ assert [i.skip for i in runs_t1] == [0, 0, SkipReason.LOOP_TERMINATION.value]
2249
+
2250
+
2251
+ @pytest.mark.integration
2252
+ @pytest.mark.skip(reason="need to fix loop termination for multiple elements")
2253
+ def test_multi_task_loop_termination_multi_element(null_config, tmp_path: Path):
2254
+ s1 = hf.TaskSchema(
2255
+ objective="t1",
2256
+ inputs=[hf.SchemaInput("p1")],
2257
+ outputs=[hf.SchemaOutput("p2")],
2258
+ actions=[
2259
+ hf.Action(
2260
+ commands=[
2261
+ hf.Command(
2262
+ command="echo $((<<parameter:p1>> + 1))",
2263
+ stdout="<<int(parameter:p2)>>",
2264
+ )
2265
+ ]
2266
+ )
2267
+ ],
2268
+ )
2269
+ s2 = hf.TaskSchema(
2270
+ objective="t2",
2271
+ inputs=[hf.SchemaInput("p2")],
2272
+ outputs=[hf.SchemaOutput("p1")],
2273
+ actions=[
2274
+ hf.Action(
2275
+ commands=[
2276
+ hf.Command(
2277
+ command="echo $((<<parameter:p2>> + 1))",
2278
+ stdout="<<int(parameter:p1)>>",
2279
+ )
2280
+ ]
2281
+ )
2282
+ ],
2283
+ )
2284
+ tasks = [
2285
+ hf.Task(schema=s1, sequences=[hf.ValueSequence(path="inputs.p1", values=[0, 1])]),
2286
+ hf.Task(schema=s2),
2287
+ ]
2288
+ wk = hf.Workflow.from_template_data(
2289
+ tasks=tasks,
2290
+ loops=[
2291
+ hf.Loop(
2292
+ tasks=[0, 1],
2293
+ num_iterations=3,
2294
+ termination=hf.Rule(
2295
+ path="outputs.p1",
2296
+ condition={
2297
+ "value.greater_than": 3
2298
+ }, # should stop after 2nd iter (element 0), 1st iter (element 1)
2299
+ ),
2300
+ )
2301
+ ],
2302
+ path=tmp_path,
2303
+ template_name="test_loops",
2304
+ )
2305
+ wk.submit(wait=True, add_to_known=False)
2306
+ expected_num_iters = [2, 1]
2307
+ for task in wk.tasks:
2308
+ for element in task.elements:
2309
+ for iter_i in element.iterations:
2310
+ skips = (i.skip for i in iter_i.action_runs)
2311
+ if (
2312
+ iter_i.loop_idx[wk.loops[0].name]
2313
+ > expected_num_iters[element.index] - 1
2314
+ ):
2315
+ assert all(skips)
2316
+ assert iter_i.loop_skipped
2317
+ else:
2318
+ assert not any(skips)
2319
+
2320
+
2321
+ def test_loop_termination_task_default(null_config):
2322
+ loop = hf.Loop(
2323
+ tasks=[0, 1],
2324
+ num_iterations=3,
2325
+ )
2326
+ assert loop.termination_task_insert_ID == 1
2327
+
2328
+
2329
+ def test_loop_termination_task_non_default_specified(null_config):
2330
+ loop = hf.Loop(
2331
+ tasks=[0, 1],
2332
+ num_iterations=3,
2333
+ termination_task=0,
2334
+ )
2335
+ assert loop.termination_task_insert_ID == 0
2336
+
2337
+
2338
+ def test_loop_termination_task_default_specified(null_config):
2339
+ loop = hf.Loop(
2340
+ tasks=[0, 1],
2341
+ num_iterations=3,
2342
+ termination_task=1,
2343
+ )
2344
+ assert loop.termination_task_insert_ID == 1
2345
+
2346
+
2347
+ def test_loop_termination_task_raise_on_bad_task(null_config):
2348
+ with pytest.raises(ValueError):
2349
+ hf.Loop(
2350
+ tasks=[0, 1],
2351
+ num_iterations=3,
2352
+ termination_task=2,
2353
+ )
2354
+
2355
+
2356
+ @pytest.mark.parametrize("num_iters", [1, 2])
2357
+ def test_inner_loop_num_added_iterations_on_reload(null_config, tmp_path, num_iters):
2358
+ # this tests that the pending num_added_iterations are saved correctly when adding
2359
+ # loop iterations
2360
+ s1, s2 = make_schemas(
2361
+ ({"p2": None}, ("p2",), "t1"),
2362
+ ({"p2": None}, ("p2",), "t2"),
2363
+ )
2364
+ tasks = [
2365
+ hf.Task(s1, inputs={"p2": 100}),
2366
+ hf.Task(s2),
2367
+ ]
2368
+
2369
+ loops = [
2370
+ hf.Loop(name="inner", tasks=[0], num_iterations=num_iters),
2371
+ hf.Loop(name="outer", tasks=[0, 1], num_iterations=2),
2372
+ ]
2373
+
2374
+ wk = hf.Workflow.from_template_data(
2375
+ template_name="test_loop_num_added_iters_reload",
2376
+ tasks=tasks,
2377
+ loops=loops,
2378
+ path=tmp_path,
2379
+ )
2380
+
2381
+ wk = wk.reload()
2382
+ assert wk.loops.inner.num_added_iterations == {
2383
+ (0,): num_iters,
2384
+ (1,): num_iters,
2385
+ }
2386
+
2387
+
2388
+ @pytest.mark.parametrize("num_outer_iters", [1, 2])
2389
+ def test_outer_loop_num_added_iterations_on_reload(
2390
+ null_config, tmp_path, num_outer_iters
2391
+ ):
2392
+ # this tests that the pending num_added_iterations are saved correctly when adding
2393
+ # loop iterations
2394
+
2395
+ s1, s2 = make_schemas(
2396
+ ({"p2": None}, ("p2",), "t1"),
2397
+ ({"p2": None}, ("p2",), "t2"),
2398
+ )
2399
+ tasks = [
2400
+ hf.Task(s1, inputs={"p2": 100}),
2401
+ hf.Task(s2),
2402
+ ]
2403
+
2404
+ loops = [
2405
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2406
+ hf.Loop(name="outer", tasks=[0, 1], num_iterations=num_outer_iters),
2407
+ ]
2408
+
2409
+ wk = hf.Workflow.from_template_data(
2410
+ template_name="test_loop_num_added_iters_reload",
2411
+ tasks=tasks,
2412
+ loops=loops,
2413
+ path=tmp_path,
2414
+ )
2415
+
2416
+ wk = wk.reload()
2417
+ if num_outer_iters == 1:
2418
+ assert wk.loops.inner.num_added_iterations == {(0,): 2}
2419
+ elif num_outer_iters == 2:
2420
+ assert wk.loops.inner.num_added_iterations == {(0,): 2, (1,): 2}
2421
+
2422
+
2423
+ def test_multi_nested_loop_num_added_iterations_on_reload(null_config, tmp_path: Path):
2424
+ s1, s2, s3 = make_schemas(
2425
+ ({"p2": None}, ("p2",), "t1"),
2426
+ ({"p2": None}, ("p2",), "t2"),
2427
+ ({"p2": None}, ("p2",), "t3"),
2428
+ )
2429
+ tasks = [
2430
+ hf.Task(s1, inputs={"p2": 100}),
2431
+ hf.Task(s2),
2432
+ hf.Task(s3),
2433
+ ]
2434
+
2435
+ loops = [
2436
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2437
+ hf.Loop(name="middle", tasks=[0, 1], num_iterations=3),
2438
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=4),
2439
+ ]
2440
+
2441
+ wk = hf.Workflow.from_template_data(
2442
+ template_name="test_loop_num_added_iters_reload",
2443
+ tasks=tasks,
2444
+ loops=loops,
2445
+ path=tmp_path,
2446
+ )
2447
+
2448
+ wk = wk.reload()
2449
+ for loop in wk.loops:
2450
+ print(loop.num_added_iterations)
2451
+
2452
+ assert wk.loops.inner.num_added_iterations == {
2453
+ (0, 0): 2,
2454
+ (1, 0): 2,
2455
+ (2, 0): 2,
2456
+ (0, 1): 2,
2457
+ (1, 1): 2,
2458
+ (2, 1): 2,
2459
+ (0, 2): 2,
2460
+ (1, 2): 2,
2461
+ (2, 2): 2,
2462
+ (0, 3): 2,
2463
+ (1, 3): 2,
2464
+ (2, 3): 2,
2465
+ }
2466
+ assert wk.loops.middle.num_added_iterations == {(0,): 3, (1,): 3, (2,): 3, (3,): 3}
2467
+ assert wk.loops.outer.num_added_iterations == {(): 4}
2468
+
2469
+
2470
+ def test_multi_nested_loop_num_added_iterations_on_reload_single_iter_inner(
2471
+ null_config, tmp_path: Path
2472
+ ):
2473
+ s1, s2, s3 = make_schemas(
2474
+ ({"p2": None}, ("p2",), "t1"),
2475
+ ({"p2": None}, ("p2",), "t2"),
2476
+ ({"p2": None}, ("p2",), "t3"),
2477
+ )
2478
+ tasks = [
2479
+ hf.Task(s1, inputs={"p2": 100}),
2480
+ hf.Task(s2),
2481
+ hf.Task(s3),
2482
+ ]
2483
+
2484
+ loops = [
2485
+ hf.Loop(name="inner", tasks=[0], num_iterations=1),
2486
+ hf.Loop(name="middle", tasks=[0, 1], num_iterations=3),
2487
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=4),
2488
+ ]
2489
+
2490
+ wk = hf.Workflow.from_template_data(
2491
+ template_name="test_loop_num_added_iters_reload",
2492
+ tasks=tasks,
2493
+ loops=loops,
2494
+ path=tmp_path,
2495
+ )
2496
+
2497
+ wk = wk.reload()
2498
+ for loop in wk.loops:
2499
+ print(loop.num_added_iterations)
2500
+
2501
+ assert wk.loops.inner.num_added_iterations == {
2502
+ (0, 0): 1,
2503
+ (1, 0): 1,
2504
+ (2, 0): 1,
2505
+ (0, 1): 1,
2506
+ (1, 1): 1,
2507
+ (2, 1): 1,
2508
+ (0, 2): 1,
2509
+ (1, 2): 1,
2510
+ (2, 2): 1,
2511
+ (0, 3): 1,
2512
+ (1, 3): 1,
2513
+ (2, 3): 1,
2514
+ }
2515
+ assert wk.loops.middle.num_added_iterations == {(0,): 3, (1,): 3, (2,): 3, (3,): 3}
2516
+ assert wk.loops.outer.num_added_iterations == {(): 4}
2517
+
2518
+
2519
+ def test_multi_nested_loop_num_added_iterations_on_reload_single_iter_middle(
2520
+ null_config, tmp_path: Path
2521
+ ):
2522
+ s1, s2, s3 = make_schemas(
2523
+ ({"p2": None}, ("p2",), "t1"),
2524
+ ({"p2": None}, ("p2",), "t2"),
2525
+ ({"p2": None}, ("p2",), "t3"),
2526
+ )
2527
+ tasks = [
2528
+ hf.Task(s1, inputs={"p2": 100}),
2529
+ hf.Task(s2),
2530
+ hf.Task(s3),
2531
+ ]
2532
+
2533
+ loops = [
2534
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2535
+ hf.Loop(name="middle", tasks=[0, 1], num_iterations=1),
2536
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=4),
2537
+ ]
2538
+
2539
+ wk = hf.Workflow.from_template_data(
2540
+ template_name="test_loop_num_added_iters_reload",
2541
+ tasks=tasks,
2542
+ loops=loops,
2543
+ path=tmp_path,
2544
+ )
2545
+
2546
+ wk = wk.reload()
2547
+ for loop in wk.loops:
2548
+ print(loop.num_added_iterations)
2549
+
2550
+ assert wk.loops.inner.num_added_iterations == {
2551
+ (0, 0): 2,
2552
+ (0, 1): 2,
2553
+ (0, 2): 2,
2554
+ (0, 3): 2,
2555
+ }
2556
+ assert wk.loops.middle.num_added_iterations == {(0,): 1, (1,): 1, (2,): 1, (3,): 1}
2557
+ assert wk.loops.outer.num_added_iterations == {(): 4}
2558
+
2559
+
2560
+ def test_multi_nested_loop_num_added_iterations_on_reload_single_iter_outer(
2561
+ null_config, tmp_path: Path
2562
+ ):
2563
+ s1, s2, s3 = make_schemas(
2564
+ ({"p2": None}, ("p2",), "t1"),
2565
+ ({"p2": None}, ("p2",), "t2"),
2566
+ ({"p2": None}, ("p2",), "t3"),
2567
+ )
2568
+ tasks = [
2569
+ hf.Task(s1, inputs={"p2": 100}),
2570
+ hf.Task(s2),
2571
+ hf.Task(s3),
2572
+ ]
2573
+
2574
+ loops = [
2575
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2576
+ hf.Loop(name="middle", tasks=[0, 1], num_iterations=3),
2577
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=1),
2578
+ ]
2579
+
2580
+ wk = hf.Workflow.from_template_data(
2581
+ template_name="test_loop_num_added_iters_reload",
2582
+ tasks=tasks,
2583
+ loops=loops,
2584
+ path=tmp_path,
2585
+ )
2586
+
2587
+ wk = wk.reload()
2588
+ for loop in wk.loops:
2589
+ print(loop.num_added_iterations)
2590
+
2591
+ assert wk.loops.inner.num_added_iterations == {
2592
+ (0, 0): 2,
2593
+ (1, 0): 2,
2594
+ (2, 0): 2,
2595
+ }
2596
+ assert wk.loops.middle.num_added_iterations == {(0,): 3}
2597
+ assert wk.loops.outer.num_added_iterations == {(): 1}
2598
+
2599
+
2600
+ # TODO: test loop termination across jobscripts