hpcflow 0.1.15__py3-none-any.whl → 0.2.0a271__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (275) hide show
  1. hpcflow/__init__.py +2 -11
  2. hpcflow/__pyinstaller/__init__.py +5 -0
  3. hpcflow/__pyinstaller/hook-hpcflow.py +40 -0
  4. hpcflow/_version.py +1 -1
  5. hpcflow/app.py +43 -0
  6. hpcflow/cli.py +2 -461
  7. hpcflow/data/demo_data_manifest/__init__.py +3 -0
  8. hpcflow/data/demo_data_manifest/demo_data_manifest.json +6 -0
  9. hpcflow/data/jinja_templates/test/test_template.txt +8 -0
  10. hpcflow/data/programs/hello_world/README.md +1 -0
  11. hpcflow/data/programs/hello_world/hello_world.c +87 -0
  12. hpcflow/data/programs/hello_world/linux/hello_world +0 -0
  13. hpcflow/data/programs/hello_world/macos/hello_world +0 -0
  14. hpcflow/data/programs/hello_world/win/hello_world.exe +0 -0
  15. hpcflow/data/scripts/__init__.py +1 -0
  16. hpcflow/data/scripts/bad_script.py +2 -0
  17. hpcflow/data/scripts/demo_task_1_generate_t1_infile_1.py +8 -0
  18. hpcflow/data/scripts/demo_task_1_generate_t1_infile_2.py +8 -0
  19. hpcflow/data/scripts/demo_task_1_parse_p3.py +7 -0
  20. hpcflow/data/scripts/do_nothing.py +2 -0
  21. hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
  22. hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
  23. hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
  24. hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
  25. hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
  26. hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
  27. hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
  28. hpcflow/data/scripts/generate_t1_file_01.py +7 -0
  29. hpcflow/data/scripts/import_future_script.py +7 -0
  30. hpcflow/data/scripts/input_file_generator_basic.py +3 -0
  31. hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
  32. hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
  33. hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
  34. hpcflow/data/scripts/main_script_test_direct_in_direct_out.py +6 -0
  35. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
  36. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
  37. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
  38. hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
  39. hpcflow/data/scripts/main_script_test_direct_in_direct_out_all_iters_test.py +15 -0
  40. hpcflow/data/scripts/main_script_test_direct_in_direct_out_env_spec.py +7 -0
  41. hpcflow/data/scripts/main_script_test_direct_in_direct_out_labels.py +8 -0
  42. hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
  43. hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
  44. hpcflow/data/scripts/main_script_test_direct_sub_param_in_direct_out.py +6 -0
  45. hpcflow/data/scripts/main_script_test_hdf5_in_obj.py +12 -0
  46. hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
  47. hpcflow/data/scripts/main_script_test_hdf5_in_obj_group.py +12 -0
  48. hpcflow/data/scripts/main_script_test_hdf5_out_obj.py +11 -0
  49. hpcflow/data/scripts/main_script_test_json_and_direct_in_json_out.py +14 -0
  50. hpcflow/data/scripts/main_script_test_json_in_json_and_direct_out.py +17 -0
  51. hpcflow/data/scripts/main_script_test_json_in_json_out.py +14 -0
  52. hpcflow/data/scripts/main_script_test_json_in_json_out_labels.py +16 -0
  53. hpcflow/data/scripts/main_script_test_json_in_obj.py +12 -0
  54. hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
  55. hpcflow/data/scripts/main_script_test_json_out_obj.py +10 -0
  56. hpcflow/data/scripts/main_script_test_json_sub_param_in_json_out_labels.py +16 -0
  57. hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
  58. hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
  59. hpcflow/data/scripts/output_file_parser_basic.py +3 -0
  60. hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
  61. hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
  62. hpcflow/data/scripts/parse_t1_file_01.py +4 -0
  63. hpcflow/data/scripts/script_exit_test.py +5 -0
  64. hpcflow/data/template_components/__init__.py +1 -0
  65. hpcflow/data/template_components/command_files.yaml +26 -0
  66. hpcflow/data/template_components/environments.yaml +13 -0
  67. hpcflow/data/template_components/parameters.yaml +14 -0
  68. hpcflow/data/template_components/task_schemas.yaml +139 -0
  69. hpcflow/data/workflows/workflow_1.yaml +5 -0
  70. hpcflow/examples.ipynb +1037 -0
  71. hpcflow/sdk/__init__.py +149 -0
  72. hpcflow/sdk/app.py +4266 -0
  73. hpcflow/sdk/cli.py +1479 -0
  74. hpcflow/sdk/cli_common.py +385 -0
  75. hpcflow/sdk/config/__init__.py +5 -0
  76. hpcflow/sdk/config/callbacks.py +246 -0
  77. hpcflow/sdk/config/cli.py +388 -0
  78. hpcflow/sdk/config/config.py +1410 -0
  79. hpcflow/sdk/config/config_file.py +501 -0
  80. hpcflow/sdk/config/errors.py +272 -0
  81. hpcflow/sdk/config/types.py +150 -0
  82. hpcflow/sdk/core/__init__.py +38 -0
  83. hpcflow/sdk/core/actions.py +3857 -0
  84. hpcflow/sdk/core/app_aware.py +25 -0
  85. hpcflow/sdk/core/cache.py +224 -0
  86. hpcflow/sdk/core/command_files.py +814 -0
  87. hpcflow/sdk/core/commands.py +424 -0
  88. hpcflow/sdk/core/element.py +2071 -0
  89. hpcflow/sdk/core/enums.py +221 -0
  90. hpcflow/sdk/core/environment.py +256 -0
  91. hpcflow/sdk/core/errors.py +1043 -0
  92. hpcflow/sdk/core/execute.py +207 -0
  93. hpcflow/sdk/core/json_like.py +809 -0
  94. hpcflow/sdk/core/loop.py +1320 -0
  95. hpcflow/sdk/core/loop_cache.py +282 -0
  96. hpcflow/sdk/core/object_list.py +933 -0
  97. hpcflow/sdk/core/parameters.py +3371 -0
  98. hpcflow/sdk/core/rule.py +196 -0
  99. hpcflow/sdk/core/run_dir_files.py +57 -0
  100. hpcflow/sdk/core/skip_reason.py +7 -0
  101. hpcflow/sdk/core/task.py +3792 -0
  102. hpcflow/sdk/core/task_schema.py +993 -0
  103. hpcflow/sdk/core/test_utils.py +538 -0
  104. hpcflow/sdk/core/types.py +447 -0
  105. hpcflow/sdk/core/utils.py +1207 -0
  106. hpcflow/sdk/core/validation.py +87 -0
  107. hpcflow/sdk/core/values.py +477 -0
  108. hpcflow/sdk/core/workflow.py +4820 -0
  109. hpcflow/sdk/core/zarr_io.py +206 -0
  110. hpcflow/sdk/data/__init__.py +13 -0
  111. hpcflow/sdk/data/config_file_schema.yaml +34 -0
  112. hpcflow/sdk/data/config_schema.yaml +260 -0
  113. hpcflow/sdk/data/environments_spec_schema.yaml +21 -0
  114. hpcflow/sdk/data/files_spec_schema.yaml +5 -0
  115. hpcflow/sdk/data/parameters_spec_schema.yaml +7 -0
  116. hpcflow/sdk/data/task_schema_spec_schema.yaml +3 -0
  117. hpcflow/sdk/data/workflow_spec_schema.yaml +22 -0
  118. hpcflow/sdk/demo/__init__.py +3 -0
  119. hpcflow/sdk/demo/cli.py +242 -0
  120. hpcflow/sdk/helper/__init__.py +3 -0
  121. hpcflow/sdk/helper/cli.py +137 -0
  122. hpcflow/sdk/helper/helper.py +300 -0
  123. hpcflow/sdk/helper/watcher.py +192 -0
  124. hpcflow/sdk/log.py +288 -0
  125. hpcflow/sdk/persistence/__init__.py +18 -0
  126. hpcflow/sdk/persistence/base.py +2817 -0
  127. hpcflow/sdk/persistence/defaults.py +6 -0
  128. hpcflow/sdk/persistence/discovery.py +39 -0
  129. hpcflow/sdk/persistence/json.py +954 -0
  130. hpcflow/sdk/persistence/pending.py +948 -0
  131. hpcflow/sdk/persistence/store_resource.py +203 -0
  132. hpcflow/sdk/persistence/types.py +309 -0
  133. hpcflow/sdk/persistence/utils.py +73 -0
  134. hpcflow/sdk/persistence/zarr.py +2388 -0
  135. hpcflow/sdk/runtime.py +320 -0
  136. hpcflow/sdk/submission/__init__.py +3 -0
  137. hpcflow/sdk/submission/enums.py +70 -0
  138. hpcflow/sdk/submission/jobscript.py +2379 -0
  139. hpcflow/sdk/submission/schedulers/__init__.py +281 -0
  140. hpcflow/sdk/submission/schedulers/direct.py +233 -0
  141. hpcflow/sdk/submission/schedulers/sge.py +376 -0
  142. hpcflow/sdk/submission/schedulers/slurm.py +598 -0
  143. hpcflow/sdk/submission/schedulers/utils.py +25 -0
  144. hpcflow/sdk/submission/shells/__init__.py +52 -0
  145. hpcflow/sdk/submission/shells/base.py +229 -0
  146. hpcflow/sdk/submission/shells/bash.py +504 -0
  147. hpcflow/sdk/submission/shells/os_version.py +115 -0
  148. hpcflow/sdk/submission/shells/powershell.py +352 -0
  149. hpcflow/sdk/submission/submission.py +1402 -0
  150. hpcflow/sdk/submission/types.py +140 -0
  151. hpcflow/sdk/typing.py +194 -0
  152. hpcflow/sdk/utils/arrays.py +69 -0
  153. hpcflow/sdk/utils/deferred_file.py +55 -0
  154. hpcflow/sdk/utils/hashing.py +16 -0
  155. hpcflow/sdk/utils/patches.py +31 -0
  156. hpcflow/sdk/utils/strings.py +69 -0
  157. hpcflow/tests/api/test_api.py +32 -0
  158. hpcflow/tests/conftest.py +123 -0
  159. hpcflow/tests/data/__init__.py +0 -0
  160. hpcflow/tests/data/benchmark_N_elements.yaml +6 -0
  161. hpcflow/tests/data/benchmark_script_runner.yaml +26 -0
  162. hpcflow/tests/data/multi_path_sequences.yaml +29 -0
  163. hpcflow/tests/data/workflow_1.json +10 -0
  164. hpcflow/tests/data/workflow_1.yaml +5 -0
  165. hpcflow/tests/data/workflow_1_slurm.yaml +8 -0
  166. hpcflow/tests/data/workflow_1_wsl.yaml +8 -0
  167. hpcflow/tests/data/workflow_test_run_abort.yaml +42 -0
  168. hpcflow/tests/jinja_templates/test_jinja_templates.py +161 -0
  169. hpcflow/tests/programs/test_programs.py +180 -0
  170. hpcflow/tests/schedulers/direct_linux/test_direct_linux_submission.py +12 -0
  171. hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
  172. hpcflow/tests/schedulers/slurm/test_slurm_submission.py +14 -0
  173. hpcflow/tests/scripts/test_input_file_generators.py +282 -0
  174. hpcflow/tests/scripts/test_main_scripts.py +1361 -0
  175. hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
  176. hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
  177. hpcflow/tests/shells/wsl/test_wsl_submission.py +14 -0
  178. hpcflow/tests/unit/test_action.py +1066 -0
  179. hpcflow/tests/unit/test_action_rule.py +24 -0
  180. hpcflow/tests/unit/test_app.py +132 -0
  181. hpcflow/tests/unit/test_cache.py +46 -0
  182. hpcflow/tests/unit/test_cli.py +172 -0
  183. hpcflow/tests/unit/test_command.py +377 -0
  184. hpcflow/tests/unit/test_config.py +195 -0
  185. hpcflow/tests/unit/test_config_file.py +162 -0
  186. hpcflow/tests/unit/test_element.py +666 -0
  187. hpcflow/tests/unit/test_element_iteration.py +88 -0
  188. hpcflow/tests/unit/test_element_set.py +158 -0
  189. hpcflow/tests/unit/test_group.py +115 -0
  190. hpcflow/tests/unit/test_input_source.py +1479 -0
  191. hpcflow/tests/unit/test_input_value.py +398 -0
  192. hpcflow/tests/unit/test_jobscript_unit.py +757 -0
  193. hpcflow/tests/unit/test_json_like.py +1247 -0
  194. hpcflow/tests/unit/test_loop.py +2674 -0
  195. hpcflow/tests/unit/test_meta_task.py +325 -0
  196. hpcflow/tests/unit/test_multi_path_sequences.py +259 -0
  197. hpcflow/tests/unit/test_object_list.py +116 -0
  198. hpcflow/tests/unit/test_parameter.py +243 -0
  199. hpcflow/tests/unit/test_persistence.py +664 -0
  200. hpcflow/tests/unit/test_resources.py +243 -0
  201. hpcflow/tests/unit/test_run.py +286 -0
  202. hpcflow/tests/unit/test_run_directories.py +29 -0
  203. hpcflow/tests/unit/test_runtime.py +9 -0
  204. hpcflow/tests/unit/test_schema_input.py +372 -0
  205. hpcflow/tests/unit/test_shell.py +129 -0
  206. hpcflow/tests/unit/test_slurm.py +39 -0
  207. hpcflow/tests/unit/test_submission.py +502 -0
  208. hpcflow/tests/unit/test_task.py +2560 -0
  209. hpcflow/tests/unit/test_task_schema.py +182 -0
  210. hpcflow/tests/unit/test_utils.py +616 -0
  211. hpcflow/tests/unit/test_value_sequence.py +549 -0
  212. hpcflow/tests/unit/test_values.py +91 -0
  213. hpcflow/tests/unit/test_workflow.py +827 -0
  214. hpcflow/tests/unit/test_workflow_template.py +186 -0
  215. hpcflow/tests/unit/utils/test_arrays.py +40 -0
  216. hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
  217. hpcflow/tests/unit/utils/test_hashing.py +65 -0
  218. hpcflow/tests/unit/utils/test_patches.py +5 -0
  219. hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
  220. hpcflow/tests/unit/utils/test_strings.py +97 -0
  221. hpcflow/tests/workflows/__init__.py +0 -0
  222. hpcflow/tests/workflows/test_directory_structure.py +31 -0
  223. hpcflow/tests/workflows/test_jobscript.py +355 -0
  224. hpcflow/tests/workflows/test_run_status.py +198 -0
  225. hpcflow/tests/workflows/test_skip_downstream.py +696 -0
  226. hpcflow/tests/workflows/test_submission.py +140 -0
  227. hpcflow/tests/workflows/test_workflows.py +564 -0
  228. hpcflow/tests/workflows/test_zip.py +18 -0
  229. hpcflow/viz_demo.ipynb +6794 -0
  230. hpcflow-0.2.0a271.dist-info/LICENSE +375 -0
  231. hpcflow-0.2.0a271.dist-info/METADATA +65 -0
  232. hpcflow-0.2.0a271.dist-info/RECORD +237 -0
  233. {hpcflow-0.1.15.dist-info → hpcflow-0.2.0a271.dist-info}/WHEEL +4 -5
  234. hpcflow-0.2.0a271.dist-info/entry_points.txt +6 -0
  235. hpcflow/api.py +0 -490
  236. hpcflow/archive/archive.py +0 -307
  237. hpcflow/archive/cloud/cloud.py +0 -45
  238. hpcflow/archive/cloud/errors.py +0 -9
  239. hpcflow/archive/cloud/providers/dropbox.py +0 -427
  240. hpcflow/archive/errors.py +0 -5
  241. hpcflow/base_db.py +0 -4
  242. hpcflow/config.py +0 -233
  243. hpcflow/copytree.py +0 -66
  244. hpcflow/data/examples/_config.yml +0 -14
  245. hpcflow/data/examples/damask/demo/1.run.yml +0 -4
  246. hpcflow/data/examples/damask/demo/2.process.yml +0 -29
  247. hpcflow/data/examples/damask/demo/geom.geom +0 -2052
  248. hpcflow/data/examples/damask/demo/load.load +0 -1
  249. hpcflow/data/examples/damask/demo/material.config +0 -185
  250. hpcflow/data/examples/damask/inputs/geom.geom +0 -2052
  251. hpcflow/data/examples/damask/inputs/load.load +0 -1
  252. hpcflow/data/examples/damask/inputs/material.config +0 -185
  253. hpcflow/data/examples/damask/profiles/_variable_lookup.yml +0 -21
  254. hpcflow/data/examples/damask/profiles/damask.yml +0 -4
  255. hpcflow/data/examples/damask/profiles/damask_process.yml +0 -8
  256. hpcflow/data/examples/damask/profiles/damask_run.yml +0 -5
  257. hpcflow/data/examples/damask/profiles/default.yml +0 -6
  258. hpcflow/data/examples/thinking.yml +0 -177
  259. hpcflow/errors.py +0 -2
  260. hpcflow/init_db.py +0 -37
  261. hpcflow/models.py +0 -2595
  262. hpcflow/nesting.py +0 -9
  263. hpcflow/profiles.py +0 -455
  264. hpcflow/project.py +0 -81
  265. hpcflow/scheduler.py +0 -322
  266. hpcflow/utils.py +0 -103
  267. hpcflow/validation.py +0 -166
  268. hpcflow/variables.py +0 -543
  269. hpcflow-0.1.15.dist-info/METADATA +0 -168
  270. hpcflow-0.1.15.dist-info/RECORD +0 -45
  271. hpcflow-0.1.15.dist-info/entry_points.txt +0 -8
  272. hpcflow-0.1.15.dist-info/top_level.txt +0 -1
  273. /hpcflow/{archive → data/jinja_templates}/__init__.py +0 -0
  274. /hpcflow/{archive/cloud → data/programs}/__init__.py +0 -0
  275. /hpcflow/{archive/cloud/providers → data/workflows}/__init__.py +0 -0
@@ -0,0 +1,2674 @@
1
+ from __future__ import annotations
2
+ from pathlib import Path
3
+ from textwrap import dedent
4
+ import pytest
5
+
6
+ from valida.conditions import Value # type: ignore
7
+
8
+ from hpcflow.app import app as hf
9
+ from hpcflow.sdk.core.errors import LoopAlreadyExistsError, LoopTaskSubsetError
10
+ from hpcflow.sdk.core.skip_reason import SkipReason
11
+ from hpcflow.sdk.core.test_utils import P1_parameter_cls, make_schemas, make_workflow
12
+
13
+
14
+ @pytest.mark.parametrize("store", ["json", "zarr"])
15
+ def test_loop_tasks_obj_insert_ID_equivalence(tmp_path: Path, store: str):
16
+ wk_1 = make_workflow(
17
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
18
+ local_inputs={0: ("p1",)},
19
+ path=tmp_path,
20
+ store=store,
21
+ )
22
+ lp_0 = hf.Loop(tasks=[wk_1.tasks.t1], num_iterations=2)
23
+ lp_1 = hf.Loop(tasks=[0], num_iterations=2)
24
+ assert lp_0.task_insert_IDs == lp_1.task_insert_IDs
25
+
26
+
27
+ @pytest.mark.parametrize("store", ["json", "zarr"])
28
+ def test_loop_tasks_names(tmp_path: Path, store: str):
29
+ wk_1 = make_workflow(
30
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
31
+ local_inputs={0: ("p1",)},
32
+ path=tmp_path,
33
+ store=store,
34
+ )
35
+ lp_0 = hf.Loop(tasks=["t1"], num_iterations=2)
36
+ wk_1.add_loop(lp_0)
37
+
38
+ assert wk_1.loops[0].template.task_insert_IDs == (0,)
39
+ assert wk_1.loops[0].template.task_refs == ("t1",)
40
+ assert wk_1.loops[0].template.termination_task_insert_ID == 0
41
+ assert wk_1.loops[0].template.termination_task_ref == "t1"
42
+
43
+ wk_1 = wk_1.reload()
44
+ assert wk_1.loops[0].template.task_insert_IDs == (0,)
45
+ assert wk_1.loops[0].template.task_refs == ("t1",)
46
+ assert wk_1.loops[0].template.termination_task_insert_ID == 0
47
+ assert wk_1.loops[0].template.termination_task_ref == "t1"
48
+
49
+
50
+ @pytest.mark.parametrize("store", ["json", "zarr"])
51
+ def test_loop_task_names_yaml_template(tmp_path: Path, store: str):
52
+ wk_yaml = dedent(
53
+ """\
54
+ name: test_loops
55
+ loops:
56
+ - tasks: [test_t1_conditional_OS]
57
+ num_iterations: 2
58
+
59
+ tasks:
60
+ - schema: test_t1_conditional_OS
61
+ inputs:
62
+ p1: 100
63
+ """
64
+ )
65
+ wf = hf.Workflow.from_YAML_string(wk_yaml, path=tmp_path, store=store)
66
+
67
+ assert wf.loops[0].template.task_insert_IDs == (0,)
68
+ assert wf.loops[0].template.task_refs == ("test_t1_conditional_OS",)
69
+ assert wf.loops[0].template.termination_task_insert_ID == 0
70
+ assert wf.loops[0].template.termination_task_ref == "test_t1_conditional_OS"
71
+
72
+
73
+ def test_raise_on_add_loop_same_name(tmp_path: Path):
74
+ wk = make_workflow(
75
+ schemas_spec=[({"p1": None}, ("p1",), "t1"), ({"p2": None}, ("p2",), "t2")],
76
+ local_inputs={0: ("p1",), 1: ("p2",)},
77
+ path=tmp_path,
78
+ store="json",
79
+ )
80
+ lp_0 = hf.Loop(name="my_loop", tasks=[0], num_iterations=2)
81
+ lp_1 = hf.Loop(name="my_loop", tasks=[1], num_iterations=2)
82
+
83
+ wk.add_loop(lp_0)
84
+ with pytest.raises(LoopAlreadyExistsError):
85
+ wk.add_loop(lp_1)
86
+
87
+
88
+ @pytest.mark.parametrize("store", ["json", "zarr"])
89
+ def test_wk_loop_data_idx_single_task_single_element_single_parameter_three_iters(
90
+ tmp_path: Path, store: str
91
+ ):
92
+ wk = make_workflow(
93
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
94
+ local_inputs={0: ("p1",)},
95
+ path=tmp_path,
96
+ store=store,
97
+ )
98
+ wk.add_loop(hf.Loop(tasks=[wk.tasks.t1], num_iterations=3))
99
+ iter_0, iter_1, iter_2 = wk.tasks.t1.elements[0].iterations
100
+
101
+ p1_idx_i0_out = iter_0.get_data_idx()["outputs.p1"]
102
+ p1_idx_i1_in = iter_1.get_data_idx()["inputs.p1"]
103
+ p1_idx_i1_out = iter_1.get_data_idx()["outputs.p1"]
104
+ p1_idx_i2_in = iter_2.get_data_idx()["inputs.p1"]
105
+
106
+ assert p1_idx_i0_out == p1_idx_i1_in and p1_idx_i1_out == p1_idx_i2_in
107
+
108
+
109
+ @pytest.mark.parametrize("store", ["json", "zarr"])
110
+ def test_wk_loop_EARs_initialised_single_task_single_element_single_parameter_three_iters(
111
+ tmp_path: Path, store: str
112
+ ):
113
+ wk = make_workflow(
114
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
115
+ local_inputs={0: ("p1",)},
116
+ path=tmp_path,
117
+ store=store,
118
+ )
119
+ wk.add_loop(hf.Loop(tasks=[wk.tasks.t1], num_iterations=3))
120
+ iter_0, iter_1, iter_2 = wk.tasks.t1.elements[0].iterations
121
+ assert iter_0.EARs_initialised and iter_1.EARs_initialised and iter_2.EARs_initialised
122
+
123
+
124
+ @pytest.mark.parametrize("store", ["json", "zarr"])
125
+ def test_wk_loop_data_idx_single_task_multi_element_single_parameter_three_iters(
126
+ tmp_path: Path, store: str
127
+ ):
128
+ wk = make_workflow(
129
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
130
+ local_sequences={0: [("inputs.p1", 2, 0)]},
131
+ path=tmp_path,
132
+ store=store,
133
+ )
134
+ wk.add_loop(hf.Loop(tasks=[wk.tasks.t1], num_iterations=3))
135
+ e0_iter_0, e0_iter_1, e0_iter_2 = wk.tasks.t1.elements[0].iterations
136
+ e1_iter_0, e1_iter_1, e1_iter_2 = wk.tasks.t1.elements[1].iterations
137
+
138
+ e0_p1_idx_i0_out = e0_iter_0.get_data_idx()["outputs.p1"]
139
+ e0_p1_idx_i1_in = e0_iter_1.get_data_idx()["inputs.p1"]
140
+ e0_p1_idx_i1_out = e0_iter_1.get_data_idx()["outputs.p1"]
141
+ e0_p1_idx_i2_in = e0_iter_2.get_data_idx()["inputs.p1"]
142
+
143
+ e1_p1_idx_i0_out = e1_iter_0.get_data_idx()["outputs.p1"]
144
+ e1_p1_idx_i1_in = e1_iter_1.get_data_idx()["inputs.p1"]
145
+ e1_p1_idx_i1_out = e1_iter_1.get_data_idx()["outputs.p1"]
146
+ e1_p1_idx_i2_in = e1_iter_2.get_data_idx()["inputs.p1"]
147
+
148
+ assert (
149
+ e0_p1_idx_i0_out == e0_p1_idx_i1_in
150
+ and e0_p1_idx_i1_out == e0_p1_idx_i2_in
151
+ and e1_p1_idx_i0_out == e1_p1_idx_i1_in
152
+ and e1_p1_idx_i1_out == e1_p1_idx_i2_in
153
+ )
154
+
155
+
156
+ @pytest.mark.parametrize("store", ["json", "zarr"])
157
+ def test_wk_loop_data_idx_multi_task_single_element_single_parameter_two_iters(
158
+ tmp_path: Path, store: str
159
+ ):
160
+ wk = make_workflow(
161
+ schemas_spec=[
162
+ ({"p1": None}, ("p1",), "t1"),
163
+ ({"p1": None}, ("p1",), "t2"),
164
+ ({"p1": None}, ("p1",), "t3"),
165
+ ],
166
+ local_inputs={0: ("p1",)},
167
+ path=tmp_path,
168
+ store=store,
169
+ )
170
+ wk.add_loop(hf.Loop(tasks=[0, 1, 2], num_iterations=2))
171
+ t1_iter_0, t1_iter_1 = wk.tasks.t1.elements[0].iterations
172
+ t2_iter_0, t2_iter_1 = wk.tasks.t2.elements[0].iterations
173
+ t3_iter_0, t3_iter_1 = wk.tasks.t3.elements[0].iterations
174
+
175
+ in_key = "inputs.p1"
176
+ out_key = "outputs.p1"
177
+
178
+ t1_i0_p1_idx_out = t1_iter_0.get_data_idx()[out_key]
179
+ t2_i0_p1_idx_in = t2_iter_0.get_data_idx()[in_key]
180
+ t2_i0_p1_idx_out = t2_iter_0.get_data_idx()[out_key]
181
+ t3_i0_p1_idx_in = t3_iter_0.get_data_idx()[in_key]
182
+ t3_i0_p1_idx_out = t3_iter_0.get_data_idx()[out_key]
183
+
184
+ t1_i1_p1_idx_in = t1_iter_1.get_data_idx()[in_key]
185
+ t1_i1_p1_idx_out = t1_iter_1.get_data_idx()[out_key]
186
+ t2_i1_p1_idx_in = t2_iter_1.get_data_idx()[in_key]
187
+ t2_i1_p1_idx_out = t2_iter_1.get_data_idx()[out_key]
188
+ t3_i1_p1_idx_in = t3_iter_1.get_data_idx()[in_key]
189
+
190
+ assert (
191
+ t1_i0_p1_idx_out == t2_i0_p1_idx_in
192
+ and t2_i0_p1_idx_out == t3_i0_p1_idx_in
193
+ and t3_i0_p1_idx_out == t1_i1_p1_idx_in
194
+ and t1_i1_p1_idx_out == t2_i1_p1_idx_in
195
+ and t2_i1_p1_idx_out == t3_i1_p1_idx_in
196
+ )
197
+
198
+
199
+ @pytest.mark.parametrize("store", ["json", "zarr"])
200
+ def test_wk_loop_data_idx_single_task_single_element_single_parameter_three_iters_non_iterable_param(
201
+ tmp_path: Path, store: str
202
+ ):
203
+ wk = make_workflow(
204
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
205
+ local_inputs={0: ("p1",)},
206
+ path=tmp_path,
207
+ store=store,
208
+ )
209
+ wk.add_loop(
210
+ hf.Loop(tasks=[wk.tasks.t1], num_iterations=3, non_iterable_parameters=["p1"])
211
+ )
212
+ iter_0, iter_1, iter_2 = wk.tasks.t1.elements[0].iterations
213
+
214
+ p1_idx_i0_out = iter_0.get_data_idx()["outputs.p1"]
215
+ p1_idx_i1_in = iter_1.get_data_idx()["inputs.p1"]
216
+ p1_idx_i1_out = iter_1.get_data_idx()["outputs.p1"]
217
+ p1_idx_i2_in = iter_2.get_data_idx()["inputs.p1"]
218
+
219
+ assert p1_idx_i0_out != p1_idx_i1_in and p1_idx_i1_out != p1_idx_i2_in
220
+
221
+
222
+ @pytest.mark.parametrize("store", ["json", "zarr"])
223
+ def test_wk_loop_iterable_parameters(tmp_path: Path, store: str):
224
+ wk = make_workflow(
225
+ schemas_spec=[
226
+ ({"p1": None, "p2": None}, ("p1", "p2"), "t1"),
227
+ ({"p1": None}, ("p1",), "t2"),
228
+ ({"p1": None, "p2": None}, ("p1", "p2"), "t3"),
229
+ ],
230
+ local_inputs={0: ("p1", "p2"), 1: ("p1",)},
231
+ path=tmp_path,
232
+ store=store,
233
+ )
234
+ wk.add_loop(hf.Loop(tasks=[0, 1, 2], num_iterations=2))
235
+ assert dict(sorted(wk.loops[0].iterable_parameters.items(), key=lambda x: x[0])) == {
236
+ "p1": {"input_task": 0, "output_tasks": [0, 1, 2]},
237
+ "p2": {"input_task": 0, "output_tasks": [0, 2]},
238
+ }
239
+
240
+
241
+ @pytest.mark.parametrize("store", ["json", "zarr"])
242
+ def test_wk_loop_input_sources_including_local_single_element_two_iters(
243
+ tmp_path: Path, store: str
244
+ ):
245
+ wk = make_workflow(
246
+ schemas_spec=[
247
+ ({"p1": None, "p2": None}, ("p1", "p2"), "t1"),
248
+ ({"p1": None}, ("p1",), "t2"),
249
+ ({"p1": None, "p2": None}, ("p1", "p2"), "t3"),
250
+ ],
251
+ local_inputs={0: ("p1", "p2"), 1: ("p1",)},
252
+ path=tmp_path,
253
+ store=store,
254
+ )
255
+ wk.add_loop(hf.Loop(tasks=[0, 1, 2], num_iterations=2))
256
+
257
+ t2_iter_0 = wk.tasks.t2.elements[0].iterations[0]
258
+ t3_iter_0 = wk.tasks.t3.elements[0].iterations[0]
259
+ t1_iter_1 = wk.tasks.t1.elements[0].iterations[1]
260
+ t2_iter_1 = wk.tasks.t2.elements[0].iterations[1]
261
+
262
+ t3_p1_i0_out = t3_iter_0.get_data_idx()["outputs.p1"]
263
+ t3_p2_i0_out = t3_iter_0.get_data_idx()["outputs.p2"]
264
+
265
+ t1_p1_i1_in = t1_iter_1.get_data_idx()["inputs.p1"]
266
+ t1_p2_i1_in = t1_iter_1.get_data_idx()["inputs.p2"]
267
+
268
+ # local input defined in task 2 is not an input task of the iterative parameter p1,
269
+ # so it is sourced in all iterations from the original local input:
270
+ t2_p1_i0_in = t2_iter_0.get_data_idx()["inputs.p1"]
271
+ t2_p1_i1_in = t2_iter_1.get_data_idx()["inputs.p1"]
272
+
273
+ assert (
274
+ t3_p1_i0_out == t1_p1_i1_in
275
+ and t3_p2_i0_out == t1_p2_i1_in
276
+ and t2_p1_i0_in == t2_p1_i1_in
277
+ )
278
+
279
+
280
+ @pytest.mark.parametrize("store", ["json", "zarr"])
281
+ def test_get_iteration_task_pathway_single_task_single_element_three_iters(
282
+ tmp_path: Path, store: str
283
+ ):
284
+ wk = make_workflow(
285
+ schemas_spec=[({"p1": None}, ("p1",), "t1")],
286
+ local_inputs={0: ("p1",)},
287
+ path=tmp_path,
288
+ store=store,
289
+ )
290
+ wk.add_loop(hf.Loop(name="loop_0", tasks=[wk.tasks.t1], num_iterations=3))
291
+
292
+ assert wk.get_iteration_task_pathway() == [
293
+ (0, {"loop_0": 0}),
294
+ (0, {"loop_0": 1}),
295
+ (0, {"loop_0": 2}),
296
+ ]
297
+
298
+
299
+ def test_get_iteration_task_pathway_nested_loops_multi_iter(null_config, tmp_path: Path):
300
+ ts1 = hf.TaskSchema(
301
+ objective="t1",
302
+ inputs=[hf.SchemaInput("p1")],
303
+ outputs=[hf.SchemaOutput("p1")],
304
+ actions=[
305
+ hf.Action(
306
+ commands=[
307
+ hf.Command(
308
+ "Write-Output (<<parameter:p1>> + 100)",
309
+ stdout="<<int(parameter:p1)>>",
310
+ )
311
+ ],
312
+ ),
313
+ ],
314
+ )
315
+ wk = hf.Workflow.from_template_data(
316
+ template_name="test_loop",
317
+ path=tmp_path,
318
+ tasks=[
319
+ hf.Task(schema=ts1, inputs={"p1": 101}),
320
+ hf.Task(schema=ts1),
321
+ hf.Task(schema=ts1),
322
+ ],
323
+ loops=[
324
+ hf.Loop(name="inner_loop", tasks=[2], num_iterations=2),
325
+ hf.Loop(name="outer_loop", tasks=[1, 2], num_iterations=2),
326
+ ],
327
+ )
328
+ assert wk.get_iteration_task_pathway() == [
329
+ (0, {}),
330
+ (1, {"outer_loop": 0}),
331
+ (2, {"outer_loop": 0, "inner_loop": 0}),
332
+ (2, {"outer_loop": 0, "inner_loop": 1}),
333
+ (1, {"outer_loop": 1}),
334
+ (2, {"outer_loop": 1, "inner_loop": 0}),
335
+ (2, {"outer_loop": 1, "inner_loop": 1}),
336
+ ]
337
+
338
+
339
+ @pytest.mark.skip(
340
+ reason="second set of asserts fail; need to re-source inputs on adding iterations."
341
+ )
342
+ def test_get_iteration_task_pathway_nested_loops_multi_iter_jagged(
343
+ null_config, tmp_path: Path
344
+ ):
345
+ ts1 = hf.TaskSchema(
346
+ objective="t1",
347
+ inputs=[hf.SchemaInput("p1")],
348
+ outputs=[hf.SchemaOutput("p1")],
349
+ actions=[
350
+ hf.Action(
351
+ commands=[
352
+ hf.Command(
353
+ "Write-Output (<<parameter:p1>> + 100)",
354
+ stdout="<<int(parameter:p1)>>",
355
+ )
356
+ ],
357
+ ),
358
+ ],
359
+ )
360
+ wk = hf.Workflow.from_template_data(
361
+ template_name="test_loop",
362
+ path=tmp_path,
363
+ tasks=[
364
+ hf.Task(schema=ts1, inputs={"p1": 101}),
365
+ hf.Task(schema=ts1),
366
+ hf.Task(schema=ts1),
367
+ hf.Task(schema=ts1),
368
+ ],
369
+ loops=[
370
+ hf.Loop(name="inner_loop", tasks=[2], num_iterations=2),
371
+ hf.Loop(name="outer_loop", tasks=[1, 2], num_iterations=2),
372
+ ],
373
+ )
374
+ wk.loops.inner_loop.add_iteration(parent_loop_indices={"outer_loop": 1})
375
+ wk.loops.inner_loop.add_iteration(parent_loop_indices={"outer_loop": 1})
376
+ assert wk.get_iteration_task_pathway() == [
377
+ (0, {}),
378
+ (1, {"outer_loop": 0}),
379
+ (2, {"outer_loop": 0, "inner_loop": 0}),
380
+ (2, {"outer_loop": 0, "inner_loop": 1}),
381
+ (1, {"outer_loop": 1}),
382
+ (2, {"outer_loop": 1, "inner_loop": 0}),
383
+ (2, {"outer_loop": 1, "inner_loop": 1}),
384
+ (2, {"outer_loop": 1, "inner_loop": 2}),
385
+ (2, {"outer_loop": 1, "inner_loop": 3}),
386
+ (3, {}),
387
+ ]
388
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
389
+ assert pathway[1][2][0]["inputs.p1"] == pathway[0][2][0]["outputs.p1"]
390
+ assert pathway[2][2][0]["inputs.p1"] == pathway[1][2][0]["outputs.p1"]
391
+ assert pathway[3][2][0]["inputs.p1"] == pathway[2][2][0]["outputs.p1"]
392
+ assert pathway[4][2][0]["inputs.p1"] == pathway[3][2][0]["outputs.p1"]
393
+ assert pathway[5][2][0]["inputs.p1"] == pathway[4][2][0]["outputs.p1"]
394
+ assert pathway[6][2][0]["inputs.p1"] == pathway[5][2][0]["outputs.p1"]
395
+ assert pathway[7][2][0]["inputs.p1"] == pathway[6][2][0]["outputs.p1"]
396
+ assert pathway[8][2][0]["inputs.p1"] == pathway[7][2][0]["outputs.p1"]
397
+
398
+ # FAILS currently:
399
+ assert pathway[9][2][0]["inputs.p1"] == pathway[8][2][0]["outputs.p1"]
400
+
401
+
402
+ def test_get_iteration_task_pathway_nested_loops_multi_iter_add_outer_iter(
403
+ null_config, tmp_path: Path
404
+ ):
405
+ ts1 = hf.TaskSchema(
406
+ objective="t1",
407
+ inputs=[hf.SchemaInput("p1")],
408
+ outputs=[hf.SchemaOutput("p1")],
409
+ actions=[
410
+ hf.Action(
411
+ commands=[
412
+ hf.Command(
413
+ "Write-Output (<<parameter:p1>> + 100)",
414
+ stdout="<<int(parameter:p1)>>",
415
+ )
416
+ ],
417
+ ),
418
+ ],
419
+ )
420
+ wk = hf.Workflow.from_template_data(
421
+ template_name="test_loop",
422
+ path=tmp_path,
423
+ tasks=[
424
+ hf.Task(schema=ts1, inputs={"p1": 101}),
425
+ hf.Task(schema=ts1),
426
+ hf.Task(schema=ts1),
427
+ ],
428
+ loops=[
429
+ hf.Loop(name="inner_loop", tasks=[2], num_iterations=2),
430
+ hf.Loop(name="outer_loop", tasks=[1, 2], num_iterations=2),
431
+ ],
432
+ )
433
+ wk.loops.outer_loop.add_iteration()
434
+ assert wk.get_iteration_task_pathway() == [
435
+ (0, {}),
436
+ (1, {"outer_loop": 0}),
437
+ (2, {"outer_loop": 0, "inner_loop": 0}),
438
+ (2, {"outer_loop": 0, "inner_loop": 1}),
439
+ (1, {"outer_loop": 1}),
440
+ (2, {"outer_loop": 1, "inner_loop": 0}),
441
+ (2, {"outer_loop": 1, "inner_loop": 1}),
442
+ (1, {"outer_loop": 2}),
443
+ (2, {"outer_loop": 2, "inner_loop": 0}),
444
+ (2, {"outer_loop": 2, "inner_loop": 1}),
445
+ ]
446
+
447
+
448
+ def test_get_iteration_task_pathway_unconnected_loops(null_config, tmp_path: Path):
449
+ ts1 = hf.TaskSchema(
450
+ objective="t1",
451
+ inputs=[hf.SchemaInput("p1")],
452
+ outputs=[hf.SchemaOutput("p1")],
453
+ actions=[
454
+ hf.Action(
455
+ commands=[
456
+ hf.Command(
457
+ "Write-Output (<<parameter:p1>> + 100)",
458
+ stdout="<<int(parameter:p1)>>",
459
+ )
460
+ ],
461
+ ),
462
+ ],
463
+ )
464
+ wk = hf.Workflow.from_template_data(
465
+ template_name="test_loop",
466
+ path=tmp_path,
467
+ tasks=[
468
+ hf.Task(schema=ts1, inputs={"p1": 101}),
469
+ hf.Task(schema=ts1),
470
+ hf.Task(schema=ts1),
471
+ hf.Task(schema=ts1),
472
+ ],
473
+ loops=[
474
+ hf.Loop(name="loop_A", tasks=[0, 1], num_iterations=2),
475
+ hf.Loop(name="loop_B", tasks=[2, 3], num_iterations=2),
476
+ ],
477
+ )
478
+ assert wk.get_iteration_task_pathway() == [
479
+ (0, {"loop_A": 0}),
480
+ (1, {"loop_A": 0}),
481
+ (0, {"loop_A": 1}),
482
+ (1, {"loop_A": 1}),
483
+ (2, {"loop_B": 0}),
484
+ (3, {"loop_B": 0}),
485
+ (2, {"loop_B": 1}),
486
+ (3, {"loop_B": 1}),
487
+ ]
488
+
489
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
490
+ assert pathway[1][2][0]["inputs.p1"] == pathway[0][2][0]["outputs.p1"]
491
+ assert pathway[2][2][0]["inputs.p1"] == pathway[1][2][0]["outputs.p1"]
492
+ assert pathway[3][2][0]["inputs.p1"] == pathway[2][2][0]["outputs.p1"]
493
+ assert pathway[5][2][0]["inputs.p1"] == pathway[4][2][0]["outputs.p1"]
494
+ assert pathway[6][2][0]["inputs.p1"] == pathway[5][2][0]["outputs.p1"]
495
+ assert pathway[7][2][0]["inputs.p1"] == pathway[6][2][0]["outputs.p1"]
496
+ assert pathway[4][2][0]["inputs.p1"] == pathway[3][2][0]["outputs.p1"]
497
+
498
+
499
+ def test_wk_loop_input_sources_including_non_iteration_task_source(
500
+ null_config, tmp_path: Path
501
+ ):
502
+ act_env = hf.ActionEnvironment("null_env")
503
+ ts1 = hf.TaskSchema(
504
+ objective="t1",
505
+ inputs=[hf.SchemaInput("p1")],
506
+ outputs=[hf.SchemaOutput("p2")],
507
+ actions=[
508
+ hf.Action(
509
+ commands=[
510
+ hf.Command(
511
+ "Write-Output ((<<parameter:p1>> + 100))",
512
+ stdout="<<int(parameter:p2)>>",
513
+ )
514
+ ],
515
+ environments=[act_env],
516
+ ),
517
+ ],
518
+ )
519
+ ts2 = hf.TaskSchema(
520
+ objective="t2",
521
+ inputs=[hf.SchemaInput("p2"), hf.SchemaInput("p3")],
522
+ outputs=[hf.SchemaOutput("p4")],
523
+ actions=[
524
+ hf.Action(
525
+ commands=[
526
+ hf.Command(
527
+ "Write-Output ((<<parameter:p2>> + <<parameter:p3>>))",
528
+ stdout="<<int(parameter:p4)>>",
529
+ )
530
+ ],
531
+ environments=[act_env],
532
+ ),
533
+ ],
534
+ )
535
+ ts3 = hf.TaskSchema(
536
+ objective="t3",
537
+ inputs=[hf.SchemaInput("p3"), hf.SchemaInput("p4")],
538
+ outputs=[hf.SchemaOutput("p3")],
539
+ actions=[
540
+ hf.Action(
541
+ commands=[
542
+ hf.Command(
543
+ "Write-Output ((<<parameter:p3>> + <<parameter:p4>>))",
544
+ stdout="<<int(parameter:p3)>>",
545
+ )
546
+ ],
547
+ environments=[act_env],
548
+ ),
549
+ ],
550
+ )
551
+ wk = hf.Workflow.from_template_data(
552
+ template_name="test_loop",
553
+ path=tmp_path,
554
+ tasks=[
555
+ hf.Task(schema=ts1, inputs={"p1": 101}),
556
+ hf.Task(schema=ts2, inputs={"p3": 301}),
557
+ hf.Task(schema=ts3),
558
+ ],
559
+ )
560
+ wk.add_loop(hf.Loop(tasks=[1, 2], num_iterations=2))
561
+ t1 = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
562
+ t2_iter_0 = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
563
+ t3_iter_0 = wk.tasks.t3.elements[0].iterations[0].get_data_idx()
564
+ t2_iter_1 = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
565
+ t3_iter_1 = wk.tasks.t3.elements[0].iterations[1].get_data_idx()
566
+
567
+ assert t2_iter_0["inputs.p2"] == t2_iter_1["inputs.p2"] == t1["outputs.p2"]
568
+ assert t3_iter_0["inputs.p3"] == t2_iter_0["inputs.p3"]
569
+ assert t3_iter_0["inputs.p4"] == t2_iter_0["outputs.p4"]
570
+ assert t3_iter_1["inputs.p3"] == t2_iter_1["inputs.p3"]
571
+ assert t3_iter_1["inputs.p4"] == t2_iter_1["outputs.p4"]
572
+ assert t2_iter_1["inputs.p3"] == t3_iter_0["outputs.p3"]
573
+
574
+
575
+ def test_wk_loop_input_sources_default(null_config, tmp_path: Path):
576
+ act_env = hf.ActionEnvironment("null_env")
577
+ ts1 = hf.TaskSchema(
578
+ objective="t1",
579
+ inputs=[hf.SchemaInput("p1"), hf.SchemaInput("p2", default_value=2)],
580
+ outputs=[hf.SchemaOutput("p1")],
581
+ actions=[
582
+ hf.Action(
583
+ commands=[
584
+ hf.Command(
585
+ "Write-Output ((<<parameter:p1>> + <<parameter:p2>>))",
586
+ stdout="<<int(parameter:p1)>>",
587
+ )
588
+ ],
589
+ environments=[act_env],
590
+ ),
591
+ ],
592
+ )
593
+ wk = hf.Workflow.from_template_data(
594
+ template_name="test_loop",
595
+ path=tmp_path,
596
+ tasks=[hf.Task(schema=ts1, inputs={"p1": 101})],
597
+ )
598
+ wk.add_loop(hf.Loop(tasks=[0], num_iterations=2))
599
+ t1_iter_0 = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
600
+ t1_iter_1 = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
601
+
602
+ assert t1_iter_0["inputs.p2"] == t1_iter_1["inputs.p2"]
603
+
604
+
605
+ def test_wk_loop_input_sources_iterable_param_default(null_config, tmp_path: Path):
606
+ act_env = hf.ActionEnvironment("null_env")
607
+ ts1 = hf.TaskSchema(
608
+ objective="t1",
609
+ inputs=[hf.SchemaInput("p1", default_value=1)],
610
+ outputs=[hf.SchemaOutput("p1")],
611
+ actions=[
612
+ hf.Action(
613
+ commands=[
614
+ hf.Command(
615
+ "Write-Output ((<<parameter:p1>> + 10))",
616
+ stdout="<<int(parameter:p1)>>",
617
+ )
618
+ ],
619
+ environments=[act_env],
620
+ ),
621
+ ],
622
+ )
623
+ wk = hf.Workflow.from_template_data(
624
+ template_name="test_loop",
625
+ path=tmp_path,
626
+ tasks=[hf.Task(schema=ts1, inputs={"p1": 101})],
627
+ )
628
+ wk.add_loop(hf.Loop(tasks=[0], num_iterations=3))
629
+ # first iteration should be the default value, second and third iterations should
630
+ # be from previous iteration outputs:
631
+ t1_iter_0 = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
632
+ t1_iter_1 = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
633
+ t1_iter_2 = wk.tasks.t1.elements[0].iterations[2].get_data_idx()
634
+
635
+ assert t1_iter_0["inputs.p1"] != t1_iter_1["inputs.p1"]
636
+ assert t1_iter_1["inputs.p1"] != t1_iter_2["inputs.p1"]
637
+ assert t1_iter_1["inputs.p1"] == t1_iter_0["outputs.p1"]
638
+ assert t1_iter_2["inputs.p1"] == t1_iter_1["outputs.p1"]
639
+
640
+
641
+ def test_wk_loop_input_sources_iterable_param_default_conditional_action(
642
+ null_config, tmp_path: Path
643
+ ):
644
+ act_env = hf.ActionEnvironment("null_env")
645
+ ts1 = hf.TaskSchema(
646
+ objective="t1",
647
+ inputs=[
648
+ hf.SchemaInput("p1", default_value=1),
649
+ hf.SchemaInput("p2", default_value=None),
650
+ ],
651
+ outputs=[hf.SchemaOutput("p1")],
652
+ actions=[
653
+ hf.Action(
654
+ commands=[
655
+ hf.Command(
656
+ "Write-Output ((<<parameter:p1>> + 10))",
657
+ stdout="<<int(parameter:p1)>>",
658
+ )
659
+ ],
660
+ environments=[act_env],
661
+ ),
662
+ hf.Action(
663
+ commands=[hf.Command("Write-Output ((<<parameter:p2>> + 10))")],
664
+ environments=[act_env],
665
+ rules=[
666
+ hf.ActionRule(path="inputs.p2", condition=Value.not_equal_to(None))
667
+ ],
668
+ ),
669
+ ],
670
+ )
671
+ wk = hf.Workflow.from_template_data(
672
+ template_name="test_loop",
673
+ path=tmp_path,
674
+ tasks=[hf.Task(schema=ts1, inputs={"p1": 101})],
675
+ )
676
+ wk.add_loop(hf.Loop(tasks=[0], num_iterations=3))
677
+ # first iteration should be the default value, second and third iterations should
678
+ # be from previous iteration outputs:
679
+ t1_iter_0 = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
680
+ t1_iter_1 = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
681
+ t1_iter_2 = wk.tasks.t1.elements[0].iterations[2].get_data_idx()
682
+
683
+ assert t1_iter_0["inputs.p1"] != t1_iter_1["inputs.p1"]
684
+ assert t1_iter_1["inputs.p1"] != t1_iter_2["inputs.p1"]
685
+ assert t1_iter_1["inputs.p1"] == t1_iter_0["outputs.p1"]
686
+ assert t1_iter_2["inputs.p1"] == t1_iter_1["outputs.p1"]
687
+
688
+
689
+ def test_wk_loop_input_sources_including_non_iteration_task_source_with_groups(
690
+ null_config, tmp_path: Path
691
+ ):
692
+ act_env = hf.ActionEnvironment("null_env")
693
+ ts1 = hf.TaskSchema(
694
+ objective="t1",
695
+ inputs=[hf.SchemaInput("p1")],
696
+ outputs=[hf.SchemaOutput("p2")],
697
+ actions=[
698
+ hf.Action(
699
+ commands=[
700
+ hf.Command(
701
+ "Write-Output ((<<parameter:p1>> + 100))",
702
+ stdout="<<int(parameter:p2)>>",
703
+ )
704
+ ],
705
+ environments=[act_env],
706
+ ),
707
+ ],
708
+ )
709
+ ts2 = hf.TaskSchema(
710
+ objective="t2",
711
+ inputs=[hf.SchemaInput("p2"), hf.SchemaInput("p3")],
712
+ outputs=[hf.SchemaOutput("p4")],
713
+ actions=[
714
+ hf.Action(
715
+ commands=[
716
+ hf.Command(
717
+ "Write-Output ((<<parameter:p2>> + <<parameter:p3>>))",
718
+ stdout="<<int(parameter:p4)>>",
719
+ )
720
+ ],
721
+ environments=[act_env],
722
+ ),
723
+ ],
724
+ )
725
+ ts3 = hf.TaskSchema(
726
+ objective="t3",
727
+ inputs=[
728
+ hf.SchemaInput("p3", labels={"": {"group": "my_group"}}),
729
+ hf.SchemaInput("p4", labels={"": {"group": "my_group"}}),
730
+ ],
731
+ outputs=[hf.SchemaOutput("p3")],
732
+ actions=[
733
+ hf.Action(
734
+ commands=[
735
+ hf.Command(
736
+ "Write-Output ((<<sum(parameter:p3)>> + <<sum(parameter:p4)>>))",
737
+ stdout="<<int(parameter:p3)>>",
738
+ )
739
+ ],
740
+ environments=[act_env],
741
+ ),
742
+ ],
743
+ )
744
+ wk = hf.Workflow.from_template_data(
745
+ template_name="test_loop",
746
+ path=tmp_path,
747
+ tasks=[
748
+ hf.Task(schema=ts1, inputs={"p1": 101}),
749
+ hf.Task(
750
+ schema=ts2,
751
+ sequences=[hf.ValueSequence(path="inputs.p3", values=[301, 302])],
752
+ groups=[hf.ElementGroup(name="my_group")],
753
+ ),
754
+ hf.Task(schema=ts3),
755
+ ],
756
+ )
757
+ wk.add_loop(hf.Loop(tasks=[1, 2], num_iterations=2))
758
+
759
+ t2_elem_0_iter_0 = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
760
+ t2_elem_1_iter_0 = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
761
+ t2_elem_0_iter_1 = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
762
+ t2_elem_1_iter_1 = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
763
+
764
+ t3_iter_0 = wk.tasks.t3.elements[0].iterations[0].get_data_idx()
765
+ t3_iter_1 = wk.tasks.t3.elements[0].iterations[1].get_data_idx()
766
+ assert len(t3_iter_0["inputs.p3"]) == len(t3_iter_1["inputs.p3"]) == 2
767
+ assert len(t3_iter_0["inputs.p4"]) == len(t3_iter_1["inputs.p4"]) == 2
768
+ assert t3_iter_0["inputs.p3"] == [
769
+ t2_elem_0_iter_0["inputs.p3"],
770
+ t2_elem_1_iter_0["inputs.p3"],
771
+ ]
772
+ assert t3_iter_0["inputs.p4"] == [
773
+ t2_elem_0_iter_0["outputs.p4"],
774
+ t2_elem_1_iter_0["outputs.p4"],
775
+ ]
776
+ assert t3_iter_1["inputs.p3"] == [
777
+ t2_elem_0_iter_1["inputs.p3"],
778
+ t2_elem_1_iter_1["inputs.p3"],
779
+ ]
780
+ assert t3_iter_1["inputs.p4"] == [
781
+ t2_elem_0_iter_1["outputs.p4"],
782
+ t2_elem_1_iter_1["outputs.p4"],
783
+ ]
784
+
785
+
786
+ def test_loop_local_sub_parameters(null_config, tmp_path: Path):
787
+ act_env = hf.ActionEnvironment("null_env")
788
+ ts1 = hf.TaskSchema(
789
+ objective="t1",
790
+ inputs=[hf.SchemaInput("p1c")],
791
+ outputs=[hf.SchemaOutput("p2")],
792
+ actions=[
793
+ hf.Action(
794
+ commands=[
795
+ hf.Command(
796
+ "Write-Output ((<<parameter:p1c.a>> + 100))",
797
+ stdout="<<int(parameter:p2)>>",
798
+ )
799
+ ],
800
+ environments=[act_env],
801
+ ),
802
+ ],
803
+ parameter_class_modules=["hpcflow.sdk.core.test_utils"],
804
+ )
805
+ ts2 = hf.TaskSchema(
806
+ objective="t2",
807
+ inputs=[hf.SchemaInput("p2")],
808
+ outputs=[hf.SchemaOutput("p1c")],
809
+ actions=[
810
+ hf.Action(
811
+ commands=[
812
+ hf.Command(
813
+ "Write-Output ((<<parameter:p2>> + 100))",
814
+ stdout="<<parameter:p1c>>",
815
+ )
816
+ ],
817
+ environments=[act_env],
818
+ ),
819
+ ],
820
+ parameter_class_modules=["hpcflow.sdk.core.test_utils"],
821
+ )
822
+ wk = hf.Workflow.from_template_data(
823
+ template_name="test_loop",
824
+ path=tmp_path,
825
+ tasks=[
826
+ hf.Task(
827
+ schema=ts1,
828
+ inputs=[
829
+ hf.InputValue(parameter="p1c", value=P1_parameter_cls(a=101)),
830
+ hf.InputValue(parameter="p1c", path="d", value=9),
831
+ ],
832
+ ),
833
+ hf.Task(schema=ts2),
834
+ ],
835
+ )
836
+ wk.add_loop(hf.Loop(tasks=[0, 1], num_iterations=2))
837
+
838
+ t1_iter_0 = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
839
+ t2_iter_0 = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
840
+ t1_iter_1 = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
841
+ t2_iter_1 = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
842
+
843
+ assert t2_iter_0["inputs.p2"] == t1_iter_0["outputs.p2"]
844
+ assert t1_iter_1["inputs.p1c"] == t2_iter_0["outputs.p1c"]
845
+ assert t2_iter_1["inputs.p2"] == t1_iter_1["outputs.p2"]
846
+ assert t1_iter_0["inputs.p1c.d"] == t1_iter_1["inputs.p1c.d"]
847
+
848
+
849
+ def test_nested_loop_iter_loop_idx(null_config, tmp_path: Path):
850
+ ts1 = hf.TaskSchema(
851
+ objective="t1",
852
+ inputs=[hf.SchemaInput("p1")],
853
+ outputs=[hf.SchemaOutput("p1")],
854
+ actions=[
855
+ hf.Action(
856
+ commands=[
857
+ hf.Command(
858
+ "Write-Output (<<parameter:p1>> + 100)",
859
+ stdout="<<int(parameter:p1)>>",
860
+ )
861
+ ],
862
+ ),
863
+ ],
864
+ )
865
+
866
+ wk = hf.Workflow.from_template_data(
867
+ template_name="test_loop",
868
+ path=tmp_path,
869
+ tasks=[hf.Task(schema=ts1, inputs={"p1": 101})],
870
+ loops=[
871
+ hf.Loop(name="outer_loop", tasks=[0], num_iterations=1),
872
+ hf.Loop(name="inner_loop", tasks=[0], num_iterations=1),
873
+ ],
874
+ )
875
+ assert wk.tasks[0].elements[0].iterations[0].loop_idx == {
876
+ "inner_loop": 0,
877
+ "outer_loop": 0,
878
+ }
879
+
880
+
881
+ def test_schema_input_with_group_sourced_from_prev_iteration(null_config, tmp_path: Path):
882
+ s1 = hf.TaskSchema(
883
+ objective="t1",
884
+ inputs=[hf.SchemaInput("p1")],
885
+ outputs=[hf.SchemaOutput("p2")],
886
+ actions=[
887
+ hf.Action(
888
+ commands=[
889
+ hf.Command(
890
+ "echo $(( <<parameter:p1>> + 1 ))", stdout="<<parameter:p2>>"
891
+ )
892
+ ]
893
+ )
894
+ ],
895
+ )
896
+ s2 = hf.TaskSchema(
897
+ objective="t2",
898
+ inputs=[hf.SchemaInput("p2", group="my_group")],
899
+ outputs=[hf.SchemaOutput("p3")],
900
+ actions=[
901
+ hf.Action(
902
+ commands=[
903
+ hf.Command(
904
+ "echo $(( <<parameter:p2>> + 2 ))", stdout="<<parameter:p3>>"
905
+ )
906
+ ]
907
+ )
908
+ ],
909
+ )
910
+ s3 = hf.TaskSchema(
911
+ objective="t3",
912
+ inputs=[hf.SchemaInput("p3")],
913
+ outputs=[hf.SchemaOutput("p2")],
914
+ actions=[
915
+ hf.Action(
916
+ commands=[
917
+ hf.Command(
918
+ "echo $(( <<parameter:p3>> + 3 ))", stdout="<<parameter:p2>>"
919
+ )
920
+ ]
921
+ )
922
+ ],
923
+ )
924
+
925
+ t1 = hf.Task(
926
+ schema=s1,
927
+ sequences=[hf.ValueSequence("inputs.p1", values=[1, 2, 3])],
928
+ groups=[hf.ElementGroup(name="my_group")],
929
+ )
930
+ t2 = hf.Task(schema=s2)
931
+ t3 = hf.Task(
932
+ schema=s3,
933
+ repeats=3,
934
+ groups=[hf.ElementGroup(name="my_group")],
935
+ )
936
+
937
+ l1 = hf.Loop(name="my_loop", tasks=[1, 2], num_iterations=2)
938
+
939
+ wk = hf.Workflow.from_template_data(
940
+ template_name="test_loops",
941
+ path=tmp_path,
942
+ tasks=[t1, t2, t3],
943
+ loops=[l1],
944
+ )
945
+
946
+ assert wk.tasks.t2.elements[0].iterations[0].get_data_idx()["inputs.p2"] == [
947
+ i.get_data_idx()["outputs.p2"] for i in wk.tasks.t1.elements
948
+ ]
949
+ assert [
950
+ i.iterations[0].get_data_idx()["inputs.p3"] for i in wk.tasks.t3.elements
951
+ ] == [wk.tasks.t2.elements[0].iterations[0].get_data_idx()["outputs.p3"]] * 3
952
+ assert wk.tasks.t2.elements[0].iterations[1].get_data_idx()["inputs.p2"] == [
953
+ i.iterations[0].get_data_idx()["outputs.p2"] for i in wk.tasks.t3.elements
954
+ ]
955
+ assert [
956
+ i.iterations[1].get_data_idx()["inputs.p3"] for i in wk.tasks.t3.elements
957
+ ] == [wk.tasks.t2.elements[0].iterations[1].get_data_idx()["outputs.p3"]] * 3
958
+
959
+
960
+ def test_loop_downstream_tasks(null_config, tmp_path: Path):
961
+ ts1 = hf.TaskSchema(
962
+ objective="t1",
963
+ inputs=[hf.SchemaInput("p1")],
964
+ outputs=[hf.SchemaOutput("p1")],
965
+ actions=[
966
+ hf.Action(
967
+ commands=[
968
+ hf.Command(
969
+ "Write-Output (<<parameter:p1>> + 100)",
970
+ stdout="<<int(parameter:p1)>>",
971
+ )
972
+ ],
973
+ ),
974
+ ],
975
+ )
976
+ ts2 = hf.TaskSchema(
977
+ objective="t2",
978
+ inputs=[hf.SchemaInput("p2")],
979
+ outputs=[hf.SchemaOutput("p2")],
980
+ actions=[
981
+ hf.Action(
982
+ commands=[
983
+ hf.Command(
984
+ "Write-Output (<<parameter:p2>> + 100)",
985
+ stdout="<<int(parameter:p2)>>",
986
+ )
987
+ ],
988
+ ),
989
+ ],
990
+ )
991
+ wk = hf.Workflow.from_template_data(
992
+ template_name="test_loop",
993
+ path=tmp_path,
994
+ tasks=[
995
+ hf.Task(schema=ts1, inputs={"p1": 101}),
996
+ hf.Task(schema=ts1),
997
+ hf.Task(schema=ts1),
998
+ hf.Task(schema=ts2, inputs={"p2": 201}),
999
+ ],
1000
+ loops=[
1001
+ hf.Loop(name="my_loop", tasks=[1, 2], num_iterations=2),
1002
+ ],
1003
+ )
1004
+ assert list(wk.loops.my_loop.downstream_tasks) == [wk.tasks[3]]
1005
+ assert list(wk.loops.my_loop.upstream_tasks) == [wk.tasks[0]]
1006
+
1007
+
1008
+ def test_raise_loop_task_subset_error(null_config, tmp_path: Path):
1009
+ ts1 = hf.TaskSchema(
1010
+ objective="t1",
1011
+ inputs=[hf.SchemaInput("p1")],
1012
+ outputs=[hf.SchemaOutput("p1")],
1013
+ actions=[
1014
+ hf.Action(
1015
+ commands=[
1016
+ hf.Command(
1017
+ "Write-Output (<<parameter:p1>> + 100)",
1018
+ stdout="<<int(parameter:p1)>>",
1019
+ )
1020
+ ],
1021
+ ),
1022
+ ],
1023
+ )
1024
+ with pytest.raises(LoopTaskSubsetError):
1025
+ hf.Workflow.from_template_data(
1026
+ template_name="test_loop",
1027
+ path=tmp_path,
1028
+ tasks=[
1029
+ hf.Task(schema=ts1, inputs={"p1": 101}),
1030
+ hf.Task(schema=ts1),
1031
+ hf.Task(schema=ts1),
1032
+ ],
1033
+ loops=[
1034
+ hf.Loop(name="my_loop", tasks=[2, 1], num_iterations=2),
1035
+ ],
1036
+ )
1037
+
1038
+
1039
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param(
1040
+ new_null_config, tmp_path: Path
1041
+ ):
1042
+ # loop output (but not iterable) parameter sourced in task downstream of loop:
1043
+ s1, s2, s3 = make_schemas(
1044
+ ({"p1": None}, ("p2",), "t1"),
1045
+ (
1046
+ {"p2": None},
1047
+ (
1048
+ "p2",
1049
+ "p3",
1050
+ ),
1051
+ "t2",
1052
+ ),
1053
+ ({"p3": None}, ("p4",), "t3"),
1054
+ )
1055
+ tasks = [
1056
+ hf.Task(s1, inputs={"p1": 100}),
1057
+ hf.Task(s2),
1058
+ hf.Task(s3),
1059
+ ]
1060
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1061
+ wk = hf.Workflow.from_template_data(
1062
+ template_name="loop_param_update",
1063
+ tasks=tasks,
1064
+ loops=loops,
1065
+ path=tmp_path,
1066
+ )
1067
+
1068
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1069
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1070
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1071
+ t2_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1072
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1073
+
1074
+ # final task should get its input from the final iteration of the second task
1075
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1076
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1077
+ assert t2_i2_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1078
+ assert t3_di["inputs.p3"] == t2_i2_di["outputs.p3"]
1079
+
1080
+
1081
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_multi_element(
1082
+ new_null_config, tmp_path: Path
1083
+ ):
1084
+ # loop output (but not iterable) parameter sourced in task downstream of loop - multi
1085
+ # element
1086
+ s1, s2, s3 = make_schemas(
1087
+ ({"p1": None}, ("p2",), "t1"),
1088
+ (
1089
+ {"p2": None},
1090
+ (
1091
+ "p2",
1092
+ "p3",
1093
+ ),
1094
+ "t2",
1095
+ ),
1096
+ ({"p3": None}, ("p4",), "t3"),
1097
+ )
1098
+ tasks = [
1099
+ hf.Task(s1, sequences=[hf.ValueSequence("inputs.p1", values=[100, 101])]),
1100
+ hf.Task(s2),
1101
+ hf.Task(s3),
1102
+ ]
1103
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1104
+ wk = hf.Workflow.from_template_data(
1105
+ template_name="loop_param_update",
1106
+ tasks=tasks,
1107
+ loops=loops,
1108
+ path=tmp_path,
1109
+ )
1110
+
1111
+ assert wk.tasks.t1.num_elements == 2
1112
+ assert wk.tasks.t2.num_elements == 2
1113
+ assert wk.tasks.t3.num_elements == 2
1114
+
1115
+ t1_e0_di = wk.tasks.t1.elements[0].get_data_idx()
1116
+ t2_e0_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1117
+ t2_e0_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1118
+ t2_e0_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1119
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1120
+
1121
+ t1_e1_di = wk.tasks.t1.elements[1].get_data_idx()
1122
+ t2_e1_i0_di = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
1123
+ t2_e1_i1_di = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
1124
+ t2_e1_i2_di = wk.tasks.t2.elements[1].iterations[2].get_data_idx()
1125
+ t3_e1_di = wk.tasks.t3.elements[1].get_data_idx()
1126
+
1127
+ assert t2_e0_i0_di["inputs.p2"] == t1_e0_di["outputs.p2"]
1128
+ assert t2_e0_i1_di["inputs.p2"] == t2_e0_i0_di["outputs.p2"]
1129
+ assert t2_e0_i2_di["inputs.p2"] == t2_e0_i1_di["outputs.p2"]
1130
+ assert t3_e0_di["inputs.p3"] == t2_e0_i2_di["outputs.p3"]
1131
+
1132
+ assert t2_e1_i0_di["inputs.p2"] == t1_e1_di["outputs.p2"]
1133
+ assert t2_e1_i1_di["inputs.p2"] == t2_e1_i0_di["outputs.p2"]
1134
+ assert t2_e1_i2_di["inputs.p2"] == t2_e1_i1_di["outputs.p2"]
1135
+ assert t3_e1_di["inputs.p3"] == t2_e1_i2_di["outputs.p3"]
1136
+
1137
+
1138
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_multi_element_to_group(
1139
+ new_null_config, tmp_path: Path
1140
+ ):
1141
+ # loop output (but not iterable) parameter sourced in task downstream of loop - multi
1142
+ # element group
1143
+ s1, s2 = make_schemas(
1144
+ ({"p1": None}, ("p2",), "t1"),
1145
+ (
1146
+ {"p2": None},
1147
+ (
1148
+ "p2",
1149
+ "p3",
1150
+ ),
1151
+ "t2",
1152
+ ),
1153
+ )
1154
+ s3 = hf.TaskSchema(
1155
+ objective="t3",
1156
+ inputs=[hf.SchemaInput("p3", group="all")],
1157
+ outputs=[hf.SchemaOutput("p4")],
1158
+ actions=[
1159
+ hf.Action(
1160
+ commands=[
1161
+ hf.Command(
1162
+ command="echo $((<<sum(parameter:p3)>>))",
1163
+ stdout="<<parameter:p4>>",
1164
+ )
1165
+ ],
1166
+ )
1167
+ ],
1168
+ )
1169
+ tasks = [
1170
+ hf.Task(s1, sequences=[hf.ValueSequence("inputs.p1", values=[100, 101])]),
1171
+ hf.Task(s2, groups=[hf.ElementGroup(name="all")]),
1172
+ hf.Task(s3),
1173
+ ]
1174
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1175
+ wk = hf.Workflow.from_template_data(
1176
+ template_name="loop_param_update",
1177
+ tasks=tasks,
1178
+ loops=loops,
1179
+ path=tmp_path,
1180
+ )
1181
+ assert wk.tasks.t1.num_elements == 2
1182
+ assert wk.tasks.t2.num_elements == 2
1183
+ assert wk.tasks.t3.num_elements == 1
1184
+
1185
+ t1_e0_di = wk.tasks.t1.elements[0].get_data_idx()
1186
+ t2_e0_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1187
+ t2_e0_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1188
+ t2_e0_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1189
+
1190
+ t1_e1_di = wk.tasks.t1.elements[1].get_data_idx()
1191
+ t2_e1_i0_di = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
1192
+ t2_e1_i1_di = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
1193
+ t2_e1_i2_di = wk.tasks.t2.elements[1].iterations[2].get_data_idx()
1194
+
1195
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1196
+
1197
+ assert t2_e0_i0_di["inputs.p2"] == t1_e0_di["outputs.p2"]
1198
+ assert t2_e0_i1_di["inputs.p2"] == t2_e0_i0_di["outputs.p2"]
1199
+ assert t2_e0_i2_di["inputs.p2"] == t2_e0_i1_di["outputs.p2"]
1200
+
1201
+ assert t2_e1_i0_di["inputs.p2"] == t1_e1_di["outputs.p2"]
1202
+ assert t2_e1_i1_di["inputs.p2"] == t2_e1_i0_di["outputs.p2"]
1203
+ assert t2_e1_i2_di["inputs.p2"] == t2_e1_i1_di["outputs.p2"]
1204
+
1205
+ assert t3_e0_di["inputs.p3"] == [t2_e0_i2_di["outputs.p3"], t2_e1_i2_di["outputs.p3"]]
1206
+
1207
+
1208
+ def test_add_iteration_updates_downstream_data_idx_loop_iterable_param(
1209
+ new_null_config, tmp_path: Path
1210
+ ):
1211
+ # loop iterable parameter sourced in task downstream of loop:
1212
+ s1, s2, s3 = make_schemas(
1213
+ ({"p1": None}, ("p2",), "t1"),
1214
+ ({"p2": None}, ("p2",), "t2"),
1215
+ ({"p2": None}, ("p3",), "t3"),
1216
+ )
1217
+ tasks = [
1218
+ hf.Task(s1, inputs={"p1": 100}),
1219
+ hf.Task(s2),
1220
+ hf.Task(s3),
1221
+ ]
1222
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1223
+ wk = hf.Workflow.from_template_data(
1224
+ template_name="loop_param_update",
1225
+ tasks=tasks,
1226
+ loops=loops,
1227
+ path=tmp_path,
1228
+ )
1229
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1230
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1231
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1232
+ t2_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1233
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1234
+
1235
+ # final task should get its input from the final iteration of the second task
1236
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1237
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1238
+ assert t2_i2_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1239
+ assert t3_di["inputs.p2"] == t2_i2_di["outputs.p2"]
1240
+
1241
+
1242
+ def test_add_iteration_updates_downstream_data_idx_loop_iterable_param_multi_element(
1243
+ new_null_config, tmp_path: Path
1244
+ ):
1245
+ # loop iterable parameter sourced in task downstream of loop - multi element:
1246
+ s1, s2, s3 = make_schemas(
1247
+ ({"p1": None}, ("p2",), "t1"),
1248
+ ({"p2": None}, ("p2",), "t2"),
1249
+ ({"p2": None}, ("p3",), "t3"),
1250
+ )
1251
+ tasks = [
1252
+ hf.Task(s1, sequences=[hf.ValueSequence("inputs.p1", values=[100, 101])]),
1253
+ hf.Task(s2),
1254
+ hf.Task(s3),
1255
+ ]
1256
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1257
+ wk = hf.Workflow.from_template_data(
1258
+ template_name="loop_param_update",
1259
+ tasks=tasks,
1260
+ loops=loops,
1261
+ path=tmp_path,
1262
+ )
1263
+ t1_e0_di = wk.tasks.t1.elements[0].get_data_idx()
1264
+ t2_e0_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1265
+ t2_e0_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1266
+ t2_e0_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1267
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1268
+
1269
+ t1_e1_di = wk.tasks.t1.elements[1].get_data_idx()
1270
+ t2_e1_i0_di = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
1271
+ t2_e1_i1_di = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
1272
+ t2_e1_i2_di = wk.tasks.t2.elements[1].iterations[2].get_data_idx()
1273
+ t3_e1_di = wk.tasks.t3.elements[1].get_data_idx()
1274
+
1275
+ # final task should get its input from the final iteration of the second task
1276
+ assert t2_e0_i0_di["inputs.p2"] == t1_e0_di["outputs.p2"]
1277
+ assert t2_e0_i1_di["inputs.p2"] == t2_e0_i0_di["outputs.p2"]
1278
+ assert t2_e0_i2_di["inputs.p2"] == t2_e0_i1_di["outputs.p2"]
1279
+ assert t3_e0_di["inputs.p2"] == t2_e0_i2_di["outputs.p2"]
1280
+
1281
+ assert t2_e1_i0_di["inputs.p2"] == t1_e1_di["outputs.p2"]
1282
+ assert t2_e1_i1_di["inputs.p2"] == t2_e1_i0_di["outputs.p2"]
1283
+ assert t2_e1_i2_di["inputs.p2"] == t2_e1_i1_di["outputs.p2"]
1284
+ assert t3_e1_di["inputs.p2"] == t2_e1_i2_di["outputs.p2"]
1285
+
1286
+
1287
+ def test_add_iteration_updates_downstream_data_idx_loop_iterable_param_multi_element_to_group(
1288
+ new_null_config, tmp_path: Path
1289
+ ):
1290
+ # loop iterable parameter sourced in task downstream of loop - multi element:
1291
+ s1, s2 = make_schemas(
1292
+ ({"p1": None}, ("p2",), "t1"),
1293
+ ({"p2": None}, ("p2",), "t2"),
1294
+ )
1295
+
1296
+ s3 = hf.TaskSchema(
1297
+ objective="t3",
1298
+ inputs=[hf.SchemaInput("p2", group="all")],
1299
+ outputs=[hf.SchemaOutput("p3")],
1300
+ actions=[
1301
+ hf.Action(
1302
+ commands=[
1303
+ hf.Command(
1304
+ command="echo $((<<sum(parameter:p2)>>))",
1305
+ stdout="<<parameter:p3>>",
1306
+ )
1307
+ ],
1308
+ )
1309
+ ],
1310
+ )
1311
+ tasks = [
1312
+ hf.Task(s1, sequences=[hf.ValueSequence("inputs.p1", values=[100, 101])]),
1313
+ hf.Task(s2, groups=[hf.ElementGroup(name="all")]),
1314
+ hf.Task(s3),
1315
+ ]
1316
+ loops = [hf.Loop(tasks=[1], num_iterations=3)]
1317
+ wk = hf.Workflow.from_template_data(
1318
+ template_name="loop_param_update",
1319
+ tasks=tasks,
1320
+ loops=loops,
1321
+ path=tmp_path,
1322
+ )
1323
+ assert wk.tasks.t1.num_elements == 2
1324
+ assert wk.tasks.t2.num_elements == 2
1325
+ assert wk.tasks.t3.num_elements == 1
1326
+
1327
+ t1_e0_di = wk.tasks.t1.elements[0].get_data_idx()
1328
+ t2_e0_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1329
+ t2_e0_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1330
+ t2_e0_i2_di = wk.tasks.t2.elements[0].iterations[2].get_data_idx()
1331
+
1332
+ t1_e1_di = wk.tasks.t1.elements[1].get_data_idx()
1333
+ t2_e1_i0_di = wk.tasks.t2.elements[1].iterations[0].get_data_idx()
1334
+ t2_e1_i1_di = wk.tasks.t2.elements[1].iterations[1].get_data_idx()
1335
+ t2_e1_i2_di = wk.tasks.t2.elements[1].iterations[2].get_data_idx()
1336
+
1337
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1338
+
1339
+ assert t2_e0_i0_di["inputs.p2"] == t1_e0_di["outputs.p2"]
1340
+ assert t2_e0_i1_di["inputs.p2"] == t2_e0_i0_di["outputs.p2"]
1341
+ assert t2_e0_i2_di["inputs.p2"] == t2_e0_i1_di["outputs.p2"]
1342
+
1343
+ assert t2_e1_i0_di["inputs.p2"] == t1_e1_di["outputs.p2"]
1344
+ assert t2_e1_i1_di["inputs.p2"] == t2_e1_i0_di["outputs.p2"]
1345
+ assert t2_e1_i2_di["inputs.p2"] == t2_e1_i1_di["outputs.p2"]
1346
+
1347
+ assert t3_e0_di["inputs.p2"] == [t2_e0_i2_di["outputs.p2"], t2_e1_i2_di["outputs.p2"]]
1348
+
1349
+
1350
+ def test_add_iteration_correct_downstream_data_idx_iterable_param_downstream_adjacent_loop(
1351
+ null_config, tmp_path: Path
1352
+ ):
1353
+
1354
+ s1, s2, s3 = make_schemas(
1355
+ ({"p1": None}, ("p2",), "t1"),
1356
+ ({"p2": None}, ("p2",), "t2"),
1357
+ ({"p2": None}, ("p2",), "t3"),
1358
+ )
1359
+ tasks = [
1360
+ hf.Task(s1, inputs={"p1": 100}),
1361
+ hf.Task(s2),
1362
+ hf.Task(s3),
1363
+ ]
1364
+
1365
+ # downstream loop added after upstream loop:
1366
+ loops = [
1367
+ hf.Loop(tasks=[1], num_iterations=2),
1368
+ hf.Loop(tasks=[2], num_iterations=2),
1369
+ ]
1370
+
1371
+ wk = hf.Workflow.from_template_data(
1372
+ template_name="loop_param_update",
1373
+ tasks=tasks,
1374
+ loops=loops,
1375
+ path=tmp_path,
1376
+ )
1377
+
1378
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1379
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1380
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1381
+ t3_i0_di = wk.tasks.t3.elements[0].iterations[0].get_data_idx()
1382
+ t3_i1_di = wk.tasks.t3.elements[0].iterations[1].get_data_idx()
1383
+
1384
+ # final task should get its input from the final iteration of the second task
1385
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1386
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1387
+ assert t3_i0_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1388
+ assert t3_i1_di["inputs.p2"] == t3_i0_di["outputs.p2"]
1389
+
1390
+ t1_iter_di = wk.tasks.t1.elements[0].iterations[0].data_idx
1391
+ t2_i0_iter_di = wk.tasks.t2.elements[0].iterations[0].data_idx
1392
+ t2_i1_iter_di = wk.tasks.t2.elements[0].iterations[1].data_idx
1393
+ t3_i0_iter_di = wk.tasks.t3.elements[0].iterations[0].data_idx
1394
+ t3_i1_iter_di = wk.tasks.t3.elements[0].iterations[1].data_idx
1395
+
1396
+ assert t2_i0_iter_di["inputs.p2"] == t1_iter_di["outputs.p2"]
1397
+ assert t2_i1_iter_di["inputs.p2"] == t2_i0_iter_di["outputs.p2"]
1398
+ assert t3_i0_iter_di["inputs.p2"] == t2_i1_iter_di["outputs.p2"]
1399
+ assert t3_i1_iter_di["inputs.p2"] == t3_i0_iter_di["outputs.p2"]
1400
+
1401
+
1402
+ def test_add_iteration_correct_downstream_data_idx_iterable_param_downstream_adjacent_loop_added_before(
1403
+ null_config, tmp_path: Path
1404
+ ):
1405
+ s1, s2, s3 = make_schemas(
1406
+ ({"p1": None}, ("p2",), "t1"),
1407
+ ({"p2": None}, ("p2",), "t2"),
1408
+ ({"p2": None}, ("p2",), "t3"),
1409
+ )
1410
+ tasks = [
1411
+ hf.Task(s1, inputs={"p1": 100}),
1412
+ hf.Task(s2),
1413
+ hf.Task(s3),
1414
+ ]
1415
+
1416
+ # upstream loop added after downstream loop:
1417
+ loops = [
1418
+ hf.Loop(tasks=[2], num_iterations=2),
1419
+ hf.Loop(tasks=[1], num_iterations=2),
1420
+ ]
1421
+
1422
+ wk = hf.Workflow.from_template_data(
1423
+ template_name="loop_param_update",
1424
+ tasks=tasks,
1425
+ loops=loops,
1426
+ path=tmp_path,
1427
+ )
1428
+
1429
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1430
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1431
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1432
+ t3_i0_di = wk.tasks.t3.elements[0].iterations[0].get_data_idx()
1433
+ t3_i1_di = wk.tasks.t3.elements[0].iterations[1].get_data_idx()
1434
+
1435
+ # final task should get its input from the final iteration of the second task
1436
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1437
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1438
+ assert t3_i0_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1439
+ assert t3_i1_di["inputs.p2"] == t3_i0_di["outputs.p2"]
1440
+
1441
+ t1_iter_di = wk.tasks.t1.elements[0].iterations[0].data_idx
1442
+ t2_i0_iter_di = wk.tasks.t2.elements[0].iterations[0].data_idx
1443
+ t2_i1_iter_di = wk.tasks.t2.elements[0].iterations[1].data_idx
1444
+ t3_i0_iter_di = wk.tasks.t3.elements[0].iterations[0].data_idx
1445
+ t3_i1_iter_di = wk.tasks.t3.elements[0].iterations[1].data_idx
1446
+
1447
+ assert t2_i0_iter_di["inputs.p2"] == t1_iter_di["outputs.p2"]
1448
+ assert t2_i1_iter_di["inputs.p2"] == t2_i0_iter_di["outputs.p2"]
1449
+ assert t3_i0_iter_di["inputs.p2"] == t2_i1_iter_di["outputs.p2"]
1450
+ assert t3_i1_iter_di["inputs.p2"] == t3_i0_iter_di["outputs.p2"]
1451
+
1452
+
1453
+ def test_add_iteration_correct_downstream_data_idx_iterable_param_downstream_multi_task_adjacent_loop_added_before(
1454
+ null_config, tmp_path: Path
1455
+ ):
1456
+ s1, s2, s3, s4 = make_schemas(
1457
+ ({"p1": None}, ("p2",), "t1"),
1458
+ ({"p2": None}, ("p2",), "t2"),
1459
+ ({"p2": None}, ("p2",), "t3"),
1460
+ ({"p2": None}, ("p2",), "t4"),
1461
+ )
1462
+ tasks = [
1463
+ hf.Task(s1, inputs={"p1": 100}),
1464
+ hf.Task(s2),
1465
+ hf.Task(s3),
1466
+ hf.Task(s4),
1467
+ ]
1468
+
1469
+ # upstream loop added after downstream loop:
1470
+ loops = [
1471
+ hf.Loop(tasks=[2, 3], num_iterations=2),
1472
+ hf.Loop(tasks=[1], num_iterations=2),
1473
+ ]
1474
+ wk = hf.Workflow.from_template_data(
1475
+ template_name="loop_param_update",
1476
+ tasks=tasks,
1477
+ loops=loops,
1478
+ path=tmp_path,
1479
+ )
1480
+
1481
+ t1_di = wk.tasks.t1.elements[0].get_data_idx()
1482
+ t2_i0_di = wk.tasks.t2.elements[0].iterations[0].get_data_idx()
1483
+ t2_i1_di = wk.tasks.t2.elements[0].iterations[1].get_data_idx()
1484
+ t3_i0_di = wk.tasks.t3.elements[0].iterations[0].get_data_idx()
1485
+ t3_i1_di = wk.tasks.t3.elements[0].iterations[1].get_data_idx()
1486
+ t4_i0_di = wk.tasks.t4.elements[0].iterations[0].get_data_idx()
1487
+ t4_i1_di = wk.tasks.t4.elements[0].iterations[1].get_data_idx()
1488
+
1489
+ assert t2_i0_di["inputs.p2"] == t1_di["outputs.p2"]
1490
+ assert t2_i1_di["inputs.p2"] == t2_i0_di["outputs.p2"]
1491
+
1492
+ assert t3_i0_di["inputs.p2"] == t2_i1_di["outputs.p2"]
1493
+ assert t3_i1_di["inputs.p2"] == t4_i0_di["outputs.p2"]
1494
+
1495
+ assert t4_i0_di["inputs.p2"] == t3_i0_di["outputs.p2"]
1496
+ assert t4_i1_di["inputs.p2"] == t3_i1_di["outputs.p2"]
1497
+
1498
+ t1_iter_di = wk.tasks.t1.elements[0].iterations[0].data_idx
1499
+ t2_i0_iter_di = wk.tasks.t2.elements[0].iterations[0].data_idx
1500
+ t2_i1_iter_di = wk.tasks.t2.elements[0].iterations[1].data_idx
1501
+ t3_i0_iter_di = wk.tasks.t3.elements[0].iterations[0].data_idx
1502
+ t3_i1_iter_di = wk.tasks.t3.elements[0].iterations[1].data_idx
1503
+ t4_i0_iter_di = wk.tasks.t4.elements[0].iterations[0].data_idx
1504
+ t4_i1_iter_di = wk.tasks.t4.elements[0].iterations[1].data_idx
1505
+
1506
+ assert t2_i0_iter_di["inputs.p2"] == t1_iter_di["outputs.p2"]
1507
+ assert t2_i1_iter_di["inputs.p2"] == t2_i0_iter_di["outputs.p2"]
1508
+ assert t3_i0_iter_di["inputs.p2"] == t2_i1_iter_di["outputs.p2"]
1509
+ assert t3_i1_iter_di["inputs.p2"] == t4_i0_iter_di["outputs.p2"]
1510
+ assert t4_i0_iter_di["inputs.p2"] == t3_i0_iter_di["outputs.p2"]
1511
+ assert t4_i1_iter_di["inputs.p2"] == t3_i1_iter_di["outputs.p2"]
1512
+
1513
+
1514
+ def test_nested_loops_with_downstream_updates_iteration_pathway(
1515
+ null_config, tmp_path: Path
1516
+ ):
1517
+ s1, s2, s3 = make_schemas(
1518
+ ({"p1": None}, ("p2",), "t1"),
1519
+ ({"p2": None}, ("p2",), "t2"),
1520
+ ({"p2": None}, ("p1",), "t3"),
1521
+ )
1522
+ tasks = [
1523
+ hf.Task(s1, inputs={"p1": 100}),
1524
+ hf.Task(s2),
1525
+ hf.Task(s3),
1526
+ ]
1527
+
1528
+ loops = [
1529
+ hf.Loop(name="inner", tasks=[1], num_iterations=2),
1530
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=2),
1531
+ ]
1532
+
1533
+ # when adding the inner loop iterations, the data index of the downstream task t3
1534
+ # must be updated to use the newly-added output. This should happen once before the
1535
+ # outer loop is added, and once again when adding the inner loop iteration as part of
1536
+ # adding the outer loop's second iteration!
1537
+
1538
+ wk = hf.Workflow.from_template_data(
1539
+ template_name="loop_param_update_nested",
1540
+ tasks=tasks,
1541
+ loops=loops,
1542
+ path=tmp_path,
1543
+ )
1544
+
1545
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
1546
+
1547
+ # task insert IDs:
1548
+ assert [i[0] for i in pathway] == [0, 1, 1, 2, 0, 1, 1, 2]
1549
+
1550
+ # loop indices:
1551
+ assert [i[1] for i in pathway] == [
1552
+ {"outer": 0},
1553
+ {"outer": 0, "inner": 0},
1554
+ {"outer": 0, "inner": 1},
1555
+ {"outer": 0},
1556
+ {"outer": 1},
1557
+ {"outer": 1, "inner": 0},
1558
+ {"outer": 1, "inner": 1},
1559
+ {"outer": 1},
1560
+ ]
1561
+
1562
+ # flow of parameter p1/p2 (element zero):
1563
+ assert pathway[0][2][0]["outputs.p2"] == pathway[1][2][0]["inputs.p2"]
1564
+ assert pathway[1][2][0]["outputs.p2"] == pathway[2][2][0]["inputs.p2"]
1565
+ assert pathway[2][2][0]["outputs.p2"] == pathway[3][2][0]["inputs.p2"]
1566
+ assert pathway[3][2][0]["outputs.p1"] == pathway[4][2][0]["inputs.p1"]
1567
+ assert pathway[4][2][0]["outputs.p2"] == pathway[5][2][0]["inputs.p2"]
1568
+ assert pathway[5][2][0]["outputs.p2"] == pathway[6][2][0]["inputs.p2"]
1569
+ assert pathway[6][2][0]["outputs.p2"] == pathway[7][2][0]["inputs.p2"]
1570
+
1571
+
1572
+ def test_multi_task_loop_with_downstream_updates_iteration_pathway(
1573
+ null_config, tmp_path: Path
1574
+ ):
1575
+ s1, s2, s3, s4 = make_schemas(
1576
+ ({"p1": None}, ("p2",), "t1"),
1577
+ ({"p2": None}, ("p2",), "t2"),
1578
+ ({"p2": None}, ("p2",), "t3"),
1579
+ ({"p2": None}, ("p3",), "t4"),
1580
+ )
1581
+ tasks = [
1582
+ hf.Task(s1, inputs={"p1": 100}),
1583
+ hf.Task(s2),
1584
+ hf.Task(s3),
1585
+ hf.Task(s4),
1586
+ ]
1587
+
1588
+ loops = [
1589
+ hf.Loop(tasks=[1, 2], num_iterations=2),
1590
+ ]
1591
+
1592
+ wk = hf.Workflow.from_template_data(
1593
+ template_name="loop_param_update",
1594
+ tasks=tasks,
1595
+ loops=loops,
1596
+ path=tmp_path,
1597
+ )
1598
+
1599
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
1600
+
1601
+ # task insert IDs:
1602
+ assert [i[0] for i in pathway] == [0, 1, 2, 1, 2, 3]
1603
+
1604
+ # loop indices:
1605
+ assert [i[1] for i in pathway] == [
1606
+ {},
1607
+ {"loop_0": 0},
1608
+ {"loop_0": 0},
1609
+ {"loop_0": 1},
1610
+ {"loop_0": 1},
1611
+ {},
1612
+ ]
1613
+
1614
+ # flow of parameter p2 (element zero):
1615
+ assert pathway[0][2][0]["outputs.p2"] == pathway[1][2][0]["inputs.p2"]
1616
+ assert pathway[1][2][0]["outputs.p2"] == pathway[2][2][0]["inputs.p2"]
1617
+ assert pathway[2][2][0]["outputs.p2"] == pathway[3][2][0]["inputs.p2"]
1618
+ assert pathway[3][2][0]["outputs.p2"] == pathway[4][2][0]["inputs.p2"]
1619
+ assert pathway[4][2][0]["outputs.p2"] == pathway[5][2][0]["inputs.p2"]
1620
+
1621
+
1622
+ def test_multi_nested_loops_with_downstream_updates_iteration_pathway(
1623
+ null_config, tmp_path: Path
1624
+ ):
1625
+
1626
+ s1, s2, s3, s4, s5, s6 = make_schemas(
1627
+ ({"p1": None}, ("p2",), "t1"),
1628
+ ({"p2": None}, ("p2",), "t2"),
1629
+ ({"p2": None}, ("p2",), "t3"),
1630
+ ({"p2": None}, ("p2",), "t4"),
1631
+ ({"p2": None}, ("p1",), "t5"),
1632
+ ({"p1": None}, ("p3",), "t6"),
1633
+ )
1634
+ tasks = [
1635
+ hf.Task(s1, inputs={"p1": 100}),
1636
+ hf.Task(s2),
1637
+ hf.Task(s3),
1638
+ hf.Task(s4),
1639
+ hf.Task(s5),
1640
+ hf.Task(s6),
1641
+ ]
1642
+
1643
+ loops = [
1644
+ hf.Loop(name="inner", tasks=[1], num_iterations=2),
1645
+ hf.Loop(name="middle", tasks=[1, 2], num_iterations=2),
1646
+ hf.Loop(name="outer", tasks=[0, 1, 2, 3, 4], num_iterations=2),
1647
+ ]
1648
+
1649
+ wk = hf.Workflow.from_template_data(
1650
+ template_name="loop_param_update_nested",
1651
+ tasks=tasks,
1652
+ loops=loops,
1653
+ path=tmp_path,
1654
+ )
1655
+
1656
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
1657
+
1658
+ # task insert IDs:
1659
+ assert [i[0] for i in pathway] == [
1660
+ 0,
1661
+ 1,
1662
+ 1,
1663
+ 2,
1664
+ 1,
1665
+ 1,
1666
+ 2,
1667
+ 3,
1668
+ 4,
1669
+ 0,
1670
+ 1,
1671
+ 1,
1672
+ 2,
1673
+ 1,
1674
+ 1,
1675
+ 2,
1676
+ 3,
1677
+ 4,
1678
+ 5,
1679
+ ]
1680
+
1681
+ # loop indices:
1682
+ assert [i[1] for i in pathway] == [
1683
+ {"outer": 0},
1684
+ {"outer": 0, "middle": 0, "inner": 0},
1685
+ {"outer": 0, "middle": 0, "inner": 1},
1686
+ {"outer": 0, "middle": 0},
1687
+ {"outer": 0, "middle": 1, "inner": 0},
1688
+ {"outer": 0, "middle": 1, "inner": 1},
1689
+ {"outer": 0, "middle": 1},
1690
+ {"outer": 0},
1691
+ {"outer": 0},
1692
+ {"outer": 1},
1693
+ {"outer": 1, "middle": 0, "inner": 0},
1694
+ {"outer": 1, "middle": 0, "inner": 1},
1695
+ {"outer": 1, "middle": 0},
1696
+ {"outer": 1, "middle": 1, "inner": 0},
1697
+ {"outer": 1, "middle": 1, "inner": 1},
1698
+ {"outer": 1, "middle": 1},
1699
+ {"outer": 1},
1700
+ {"outer": 1},
1701
+ {},
1702
+ ]
1703
+
1704
+ # flow of parameter p1/p2 (element zero):
1705
+ assert pathway[0][2][0]["outputs.p2"] == pathway[1][2][0]["inputs.p2"]
1706
+ assert pathway[1][2][0]["outputs.p2"] == pathway[2][2][0]["inputs.p2"]
1707
+ assert pathway[2][2][0]["outputs.p2"] == pathway[3][2][0]["inputs.p2"]
1708
+ assert pathway[3][2][0]["outputs.p2"] == pathway[4][2][0]["inputs.p2"]
1709
+ assert pathway[4][2][0]["outputs.p2"] == pathway[5][2][0]["inputs.p2"]
1710
+ assert pathway[5][2][0]["outputs.p2"] == pathway[6][2][0]["inputs.p2"]
1711
+ assert pathway[6][2][0]["outputs.p2"] == pathway[7][2][0]["inputs.p2"]
1712
+ assert pathway[7][2][0]["outputs.p2"] == pathway[8][2][0]["inputs.p2"]
1713
+ assert pathway[8][2][0]["outputs.p1"] == pathway[9][2][0]["inputs.p1"]
1714
+ assert pathway[9][2][0]["outputs.p2"] == pathway[10][2][0]["inputs.p2"]
1715
+ assert pathway[10][2][0]["outputs.p2"] == pathway[11][2][0]["inputs.p2"]
1716
+ assert pathway[11][2][0]["outputs.p2"] == pathway[12][2][0]["inputs.p2"]
1717
+ assert pathway[12][2][0]["outputs.p2"] == pathway[13][2][0]["inputs.p2"]
1718
+ assert pathway[13][2][0]["outputs.p2"] == pathway[14][2][0]["inputs.p2"]
1719
+ assert pathway[14][2][0]["outputs.p2"] == pathway[15][2][0]["inputs.p2"]
1720
+ assert pathway[15][2][0]["outputs.p2"] == pathway[16][2][0]["inputs.p2"]
1721
+ assert pathway[16][2][0]["outputs.p2"] == pathway[17][2][0]["inputs.p2"]
1722
+ assert pathway[17][2][0]["outputs.p1"] == pathway[18][2][0]["inputs.p1"]
1723
+
1724
+
1725
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_including_task_input_sources(
1726
+ new_null_config, tmp_path: Path
1727
+ ):
1728
+ # task `t3` input `p1` has `InputSource.task(task_ref=1, task_source_type="input")`,
1729
+ # so `t3` elements needs to have data indices updated, since task `t2` (i.e.
1730
+ # `task_ref=1`) will have had its data indices updated:
1731
+ s1, s2, s3 = make_schemas(
1732
+ ({"p1": None}, ("p1",), "t1"),
1733
+ ({"p1": None}, ("p2",), "t2"),
1734
+ ({"p1": None, "p2": None}, ("p3",), "t3"),
1735
+ )
1736
+ tasks = [
1737
+ hf.Task(s1, inputs={"p1": 100}),
1738
+ hf.Task(s2),
1739
+ hf.Task(s3),
1740
+ ]
1741
+ loops = [hf.Loop(tasks=[0], num_iterations=2)]
1742
+
1743
+ wk = hf.Workflow.from_template_data(
1744
+ template_name="loop_param_update_task_input_source",
1745
+ tasks=tasks,
1746
+ loops=loops,
1747
+ path=tmp_path,
1748
+ )
1749
+
1750
+ t1_i0_di = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
1751
+ t1_i1_di = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
1752
+ t2_di = wk.tasks.t2.elements[0].get_data_idx()
1753
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1754
+
1755
+ assert t1_i0_di["outputs.p1"] == t1_i1_di["inputs.p1"]
1756
+ assert t1_i1_di["outputs.p1"] == t2_di["inputs.p1"]
1757
+ assert t1_i1_di["outputs.p1"] == t3_di["inputs.p1"]
1758
+ assert t2_di["outputs.p2"] == t3_di["inputs.p2"]
1759
+
1760
+
1761
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_including_task_input_sources_twice(
1762
+ new_null_config, tmp_path: Path
1763
+ ):
1764
+ # tasks `t3/t4` inputs `p1` have `InputSource.task(task_ref=1/2, task_source_type="input")`,
1765
+ # so `t3/t4` elements needs to have data indices updated, since task `t2/t3` (i.e.
1766
+ # `task_ref=1/2`) will have had their data indices updated:
1767
+
1768
+ s1, s2, s3, s4 = make_schemas(
1769
+ ({"p1": None}, ("p1",), "t1"),
1770
+ ({"p1": None}, ("p2",), "t2"),
1771
+ ({"p1": None, "p2": None}, ("p3",), "t3"),
1772
+ ({"p1": None, "p3": None}, ("p4",), "t4"),
1773
+ )
1774
+ tasks = [
1775
+ hf.Task(s1, inputs={"p1": 100}),
1776
+ hf.Task(s2),
1777
+ hf.Task(s3),
1778
+ hf.Task(s4),
1779
+ ]
1780
+ loops = [hf.Loop(tasks=[0], num_iterations=2)]
1781
+ wk = hf.Workflow.from_template_data(
1782
+ template_name="loop_param_update_task_input_source",
1783
+ tasks=tasks,
1784
+ loops=loops,
1785
+ path=tmp_path,
1786
+ )
1787
+ t1_i0_di = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
1788
+ t1_i1_di = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
1789
+ t2_di = wk.tasks.t2.elements[0].get_data_idx()
1790
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1791
+ t4_di = wk.tasks.t4.elements[0].get_data_idx()
1792
+
1793
+ assert t1_i0_di["outputs.p1"] == t1_i1_di["inputs.p1"]
1794
+ assert t1_i1_di["outputs.p1"] == t2_di["inputs.p1"]
1795
+ assert t1_i1_di["outputs.p1"] == t3_di["inputs.p1"]
1796
+ assert t1_i1_di["outputs.p1"] == t4_di["inputs.p1"]
1797
+ assert t2_di["outputs.p2"] == t3_di["inputs.p2"]
1798
+
1799
+
1800
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_including_task_input_sources_thrice(
1801
+ new_null_config, tmp_path: Path
1802
+ ):
1803
+ # tasks `t3/t4/t5` inputs `p1` have `InputSource.task(task_ref=1/2/3, task_source_type="input")`,
1804
+ # so `t3/t4/t5` elements needs to have data indices updated, since task `t2/t3/t4` (i.e.
1805
+ # `task_ref=1/2/3`) will have had their data indices updated:
1806
+
1807
+ s1, s2, s3, s4, s5 = make_schemas(
1808
+ ({"p1": None}, ("p1",), "t1"),
1809
+ ({"p1": None}, ("p2",), "t2"),
1810
+ ({"p1": None, "p2": None}, ("p3",), "t3"),
1811
+ ({"p1": None, "p3": None}, ("p4",), "t4"),
1812
+ ({"p1": None, "p4": None}, ("p5",), "t5"),
1813
+ )
1814
+ tasks = [
1815
+ hf.Task(s1, inputs={"p1": 100}),
1816
+ hf.Task(s2),
1817
+ hf.Task(s3),
1818
+ hf.Task(s4),
1819
+ hf.Task(s5),
1820
+ ]
1821
+ loops = [hf.Loop(tasks=[0], num_iterations=2)]
1822
+ wk = hf.Workflow.from_template_data(
1823
+ template_name="loop_param_update_task_input_source",
1824
+ tasks=tasks,
1825
+ loops=loops,
1826
+ path=tmp_path,
1827
+ )
1828
+ t1_i0_di = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
1829
+ t1_i1_di = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
1830
+ t2_di = wk.tasks.t2.elements[0].get_data_idx()
1831
+ t3_di = wk.tasks.t3.elements[0].get_data_idx()
1832
+ t4_di = wk.tasks.t4.elements[0].get_data_idx()
1833
+ t5_di = wk.tasks.t5.elements[0].get_data_idx()
1834
+
1835
+ assert t1_i0_di["outputs.p1"] == t1_i1_di["inputs.p1"]
1836
+ assert t1_i1_di["outputs.p1"] == t2_di["inputs.p1"]
1837
+ assert t1_i1_di["outputs.p1"] == t3_di["inputs.p1"]
1838
+ assert t1_i1_di["outputs.p1"] == t4_di["inputs.p1"]
1839
+ assert t1_i1_di["outputs.p1"] == t5_di["inputs.p1"]
1840
+ assert t2_di["outputs.p2"] == t3_di["inputs.p2"]
1841
+
1842
+
1843
+ def test_add_iteration_updates_downstream_data_idx_loop_output_param_including_task_input_sources_thrice_multi_element(
1844
+ new_null_config, tmp_path: Path
1845
+ ):
1846
+ # tasks `t3/t4/t5` inputs `p1` have `InputSource.task(task_ref=1/2/3, task_source_type="input")`,
1847
+ # so `t3/t4/t5` elements needs to have data indices updated, since task `t2/t3/t4` (i.e.
1848
+ # `task_ref=1/2/3`) will have had their data indices updated:
1849
+
1850
+ s1, s2, s3, s4, s5 = make_schemas(
1851
+ ({"p1": None}, ("p1",), "t1"),
1852
+ ({"p1": None}, ("p2",), "t2"),
1853
+ ({"p1": None, "p2": None}, ("p3",), "t3"),
1854
+ ({"p1": None, "p3": None}, ("p4",), "t4"),
1855
+ ({"p1": None, "p4": None}, ("p5",), "t5"),
1856
+ )
1857
+ tasks = [
1858
+ hf.Task(s1, inputs={"p1": 100}, repeats=2),
1859
+ hf.Task(s2),
1860
+ hf.Task(s3),
1861
+ hf.Task(s4),
1862
+ hf.Task(s5),
1863
+ ]
1864
+ loops = [hf.Loop(tasks=[0], num_iterations=2)]
1865
+ wk = hf.Workflow.from_template_data(
1866
+ template_name="loop_param_update_task_input_source",
1867
+ tasks=tasks,
1868
+ loops=loops,
1869
+ path=tmp_path,
1870
+ )
1871
+ t1_e0_i0_di = wk.tasks.t1.elements[0].iterations[0].get_data_idx()
1872
+ t1_e0_i1_di = wk.tasks.t1.elements[0].iterations[1].get_data_idx()
1873
+ t2_e0_di = wk.tasks.t2.elements[0].get_data_idx()
1874
+ t3_e0_di = wk.tasks.t3.elements[0].get_data_idx()
1875
+ t4_e0_di = wk.tasks.t4.elements[0].get_data_idx()
1876
+ t5_e0_di = wk.tasks.t5.elements[0].get_data_idx()
1877
+
1878
+ t1_e1_i0_di = wk.tasks.t1.elements[1].iterations[0].get_data_idx()
1879
+ t1_e1_i1_di = wk.tasks.t1.elements[1].iterations[1].get_data_idx()
1880
+ t2_e1_di = wk.tasks.t2.elements[1].get_data_idx()
1881
+ t3_e1_di = wk.tasks.t3.elements[1].get_data_idx()
1882
+ t4_e1_di = wk.tasks.t4.elements[1].get_data_idx()
1883
+ t5_e1_di = wk.tasks.t5.elements[1].get_data_idx()
1884
+
1885
+ assert t1_e0_i0_di["outputs.p1"] == t1_e0_i1_di["inputs.p1"]
1886
+ assert t1_e0_i1_di["outputs.p1"] == t2_e0_di["inputs.p1"]
1887
+ assert t1_e0_i1_di["outputs.p1"] == t3_e0_di["inputs.p1"]
1888
+ assert t1_e0_i1_di["outputs.p1"] == t4_e0_di["inputs.p1"]
1889
+ assert t1_e0_i1_di["outputs.p1"] == t5_e0_di["inputs.p1"]
1890
+ assert t2_e0_di["outputs.p2"] == t3_e0_di["inputs.p2"]
1891
+
1892
+ assert t1_e1_i0_di["outputs.p1"] == t1_e1_i1_di["inputs.p1"]
1893
+ assert t1_e1_i1_di["outputs.p1"] == t2_e1_di["inputs.p1"]
1894
+ assert t1_e1_i1_di["outputs.p1"] == t3_e1_di["inputs.p1"]
1895
+ assert t1_e1_i1_di["outputs.p1"] == t4_e1_di["inputs.p1"]
1896
+ assert t1_e1_i1_di["outputs.p1"] == t5_e1_di["inputs.p1"]
1897
+ assert t2_e1_di["outputs.p2"] == t3_e1_di["inputs.p2"]
1898
+
1899
+
1900
+ def test_adjacent_loops_iteration_pathway(null_config, tmp_path: Path):
1901
+ ts1 = hf.TaskSchema(
1902
+ objective="t1",
1903
+ inputs=[hf.SchemaInput("p1")],
1904
+ outputs=[hf.SchemaOutput("p1")],
1905
+ actions=[
1906
+ hf.Action(
1907
+ commands=[
1908
+ hf.Command(
1909
+ "Write-Output (<<parameter:p1>> + 100)",
1910
+ stdout="<<int(parameter:p1)>>",
1911
+ )
1912
+ ],
1913
+ ),
1914
+ ],
1915
+ )
1916
+ ts2 = hf.TaskSchema(
1917
+ objective="t2",
1918
+ inputs=[hf.SchemaInput("p2")],
1919
+ outputs=[hf.SchemaOutput("p2")],
1920
+ actions=[
1921
+ hf.Action(
1922
+ commands=[
1923
+ hf.Command(
1924
+ "Write-Output (<<parameter:p2>> + 100)",
1925
+ stdout="<<int(parameter:p2)>>",
1926
+ )
1927
+ ],
1928
+ ),
1929
+ ],
1930
+ )
1931
+ wk = hf.Workflow.from_template_data(
1932
+ template_name="test_loop",
1933
+ path=tmp_path,
1934
+ tasks=[
1935
+ hf.Task(schema=ts1, inputs={"p1": 101}),
1936
+ hf.Task(schema=ts1),
1937
+ hf.Task(schema=ts2, inputs={"p2": 201}),
1938
+ ],
1939
+ loops=[
1940
+ hf.Loop(name="loop_A", tasks=[0, 1], num_iterations=2),
1941
+ hf.Loop(name="loop_B", tasks=[2], num_iterations=2),
1942
+ ],
1943
+ )
1944
+ assert wk.get_iteration_task_pathway() == [
1945
+ (0, {"loop_A": 0}),
1946
+ (1, {"loop_A": 0}),
1947
+ (0, {"loop_A": 1}),
1948
+ (1, {"loop_A": 1}),
1949
+ (2, {"loop_B": 0}),
1950
+ (2, {"loop_B": 1}),
1951
+ ]
1952
+
1953
+
1954
+ def test_get_child_loops_ordered_by_depth(null_config, tmp_path: Path):
1955
+ ts1 = hf.TaskSchema(
1956
+ objective="t1",
1957
+ inputs=[hf.SchemaInput("p1")],
1958
+ outputs=[hf.SchemaOutput("p1")],
1959
+ actions=[
1960
+ hf.Action(
1961
+ commands=[
1962
+ hf.Command(
1963
+ "Write-Output (<<parameter:p1>> + 100)",
1964
+ stdout="<<int(parameter:p1)>>",
1965
+ )
1966
+ ],
1967
+ ),
1968
+ ],
1969
+ )
1970
+ wk = hf.Workflow.from_template_data(
1971
+ template_name="test_loop",
1972
+ path=tmp_path,
1973
+ tasks=[
1974
+ hf.Task(schema=ts1, inputs={"p1": 101}),
1975
+ ],
1976
+ loops=[
1977
+ hf.Loop(name="inner", tasks=[0], num_iterations=1),
1978
+ hf.Loop(name="middle", tasks=[0], num_iterations=1),
1979
+ hf.Loop(name="outer", tasks=[0], num_iterations=1),
1980
+ ],
1981
+ )
1982
+ assert wk.loops.inner.get_child_loops() == []
1983
+ assert wk.loops.middle.get_child_loops() == [wk.loops.inner]
1984
+ assert wk.loops.outer.get_child_loops() == [wk.loops.middle, wk.loops.inner]
1985
+
1986
+
1987
+ def test_multi_nested_loops(null_config, tmp_path: Path):
1988
+ ts1 = hf.TaskSchema(
1989
+ objective="t1",
1990
+ inputs=[hf.SchemaInput("p1")],
1991
+ outputs=[hf.SchemaOutput("p1")],
1992
+ actions=[
1993
+ hf.Action(
1994
+ commands=[
1995
+ hf.Command(
1996
+ "Write-Output (<<parameter:p1>> + 100)",
1997
+ stdout="<<int(parameter:p1)>>",
1998
+ )
1999
+ ],
2000
+ ),
2001
+ ],
2002
+ )
2003
+ wk = hf.Workflow.from_template_data(
2004
+ template_name="test_loop",
2005
+ path=tmp_path,
2006
+ tasks=[hf.Task(schema=ts1, inputs={"p1": 101})],
2007
+ loops=[
2008
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2009
+ hf.Loop(name="middle_1", tasks=[0], num_iterations=3),
2010
+ hf.Loop(name="middle_2", tasks=[0], num_iterations=2),
2011
+ hf.Loop(name="outer", tasks=[0], num_iterations=2),
2012
+ ],
2013
+ )
2014
+ pathway = wk.get_iteration_task_pathway(ret_iter_IDs=True)
2015
+ assert len(pathway) == 2 * 3 * 2 * 2
2016
+ assert wk.get_iteration_task_pathway(ret_iter_IDs=True) == [
2017
+ (0, {"inner": 0, "middle_1": 0, "middle_2": 0, "outer": 0}, (0,)),
2018
+ (0, {"inner": 1, "middle_1": 0, "middle_2": 0, "outer": 0}, (1,)),
2019
+ (0, {"inner": 0, "middle_1": 1, "middle_2": 0, "outer": 0}, (2,)),
2020
+ (0, {"inner": 1, "middle_1": 1, "middle_2": 0, "outer": 0}, (3,)),
2021
+ (0, {"inner": 0, "middle_1": 2, "middle_2": 0, "outer": 0}, (4,)),
2022
+ (0, {"inner": 1, "middle_1": 2, "middle_2": 0, "outer": 0}, (5,)),
2023
+ (0, {"inner": 0, "middle_1": 0, "middle_2": 1, "outer": 0}, (6,)),
2024
+ (0, {"inner": 1, "middle_1": 0, "middle_2": 1, "outer": 0}, (7,)),
2025
+ (0, {"inner": 0, "middle_1": 1, "middle_2": 1, "outer": 0}, (8,)),
2026
+ (0, {"inner": 1, "middle_1": 1, "middle_2": 1, "outer": 0}, (9,)),
2027
+ (0, {"inner": 0, "middle_1": 2, "middle_2": 1, "outer": 0}, (10,)),
2028
+ (0, {"inner": 1, "middle_1": 2, "middle_2": 1, "outer": 0}, (11,)),
2029
+ (0, {"inner": 0, "middle_1": 0, "middle_2": 0, "outer": 1}, (12,)),
2030
+ (0, {"inner": 1, "middle_1": 0, "middle_2": 0, "outer": 1}, (13,)),
2031
+ (0, {"inner": 0, "middle_1": 1, "middle_2": 0, "outer": 1}, (14,)),
2032
+ (0, {"inner": 1, "middle_1": 1, "middle_2": 0, "outer": 1}, (15,)),
2033
+ (0, {"inner": 0, "middle_1": 2, "middle_2": 0, "outer": 1}, (16,)),
2034
+ (0, {"inner": 1, "middle_1": 2, "middle_2": 0, "outer": 1}, (17,)),
2035
+ (0, {"inner": 0, "middle_1": 0, "middle_2": 1, "outer": 1}, (18,)),
2036
+ (0, {"inner": 1, "middle_1": 0, "middle_2": 1, "outer": 1}, (19,)),
2037
+ (0, {"inner": 0, "middle_1": 1, "middle_2": 1, "outer": 1}, (20,)),
2038
+ (0, {"inner": 1, "middle_1": 1, "middle_2": 1, "outer": 1}, (21,)),
2039
+ (0, {"inner": 0, "middle_1": 2, "middle_2": 1, "outer": 1}, (22,)),
2040
+ (0, {"inner": 1, "middle_1": 2, "middle_2": 1, "outer": 1}, (23,)),
2041
+ ]
2042
+
2043
+
2044
+ def test_nested_loop_input_from_parent_loop_task(null_config, tmp_path: Path):
2045
+ """Test that an input in a nested-loop task is correctly sourced from latest
2046
+ iteration of the parent loop."""
2047
+ wk = make_workflow(
2048
+ schemas_spec=[
2049
+ ({"p1": None}, ("p2", "p3")),
2050
+ ({"p2": None}, ("p4",)),
2051
+ ({"p4": None, "p3": None}, ("p2", "p1")), # testing p3 source
2052
+ ],
2053
+ path=tmp_path,
2054
+ local_inputs={0: {"p1": 101}},
2055
+ loops=[
2056
+ hf.Loop(name="inner", tasks=[1, 2], num_iterations=3),
2057
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=2),
2058
+ ],
2059
+ )
2060
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
2061
+ assert len(pathway) == 14
2062
+ p3_out_idx = [i[2][0]["outputs.p3"] for i in pathway if i[0] == 0]
2063
+ p3_inp_idx = [i[2][0]["inputs.p3"] for i in pathway if i[0] == 2]
2064
+ assert len(p3_out_idx) == 2 # 2 outer iterations
2065
+ assert len(p3_inp_idx) == 6 # 2 * 3 iterations
2066
+ assert p3_inp_idx == [p3_out_idx[0]] * 3 + [p3_out_idx[1]] * 3
2067
+
2068
+
2069
+ def test_doubly_nested_loop_input_from_parent_loop_task(null_config, tmp_path: Path):
2070
+ """Test that an input in a doubly-nested-loop task is correctly sourced from latest
2071
+ iteration of the parent loop."""
2072
+ # test source of p6 in final task:
2073
+ wk = make_workflow(
2074
+ schemas_spec=[
2075
+ ({"p5": None}, ("p6", "p1")),
2076
+ ({"p1": None}, ("p2", "p3")),
2077
+ ({"p2": None}, ("p4",)),
2078
+ ({"p4": None, "p3": None, "p6": None}, ("p2", "p1", "p5")),
2079
+ ],
2080
+ path=tmp_path,
2081
+ local_inputs={0: {"p5": 101}},
2082
+ loops=[
2083
+ hf.Loop(name="inner", tasks=[2, 3], num_iterations=3),
2084
+ hf.Loop(name="middle", tasks=[1, 2, 3], num_iterations=3),
2085
+ hf.Loop(name="outer", tasks=[0, 1, 2, 3], num_iterations=3),
2086
+ ],
2087
+ overwrite=True,
2088
+ )
2089
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
2090
+ assert len(pathway) == 66
2091
+
2092
+ p6_out_idx = [i[2][0]["outputs.p6"] for i in pathway if i[0] == 0]
2093
+ p6_inp_idx = [i[2][0]["inputs.p6"] for i in pathway if i[0] == 3]
2094
+ assert len(p6_out_idx) == 3 # 2 outer iterations
2095
+ assert len(p6_inp_idx) == 27 # 3 * 3 * 3 iterations
2096
+ assert p6_inp_idx == [p6_out_idx[0]] * 9 + [p6_out_idx[1]] * 9 + [p6_out_idx[2]] * 9
2097
+
2098
+
2099
+ def test_loop_non_input_task_input_from_element_group(null_config, tmp_path: Path):
2100
+ """Test correct sourcing of an element group input within a loop, for a task that is
2101
+ not that loop's "input task" with respect to that parameter."""
2102
+ s1 = hf.TaskSchema(
2103
+ objective="t1",
2104
+ inputs=[hf.SchemaInput("p1")],
2105
+ outputs=[hf.SchemaOutput("p2"), hf.SchemaOutput("p3")],
2106
+ actions=[
2107
+ hf.Action(
2108
+ commands=[
2109
+ hf.Command(
2110
+ command="echo $((<<parameter:p1>> + 1))",
2111
+ stdout="<<parameter:p2>>",
2112
+ stderr="<<parameter:p3>>",
2113
+ )
2114
+ ]
2115
+ )
2116
+ ],
2117
+ )
2118
+ s2 = hf.TaskSchema(
2119
+ objective="t2",
2120
+ inputs=[hf.SchemaInput("p2", group="my_group")],
2121
+ outputs=[hf.SchemaOutput("p4")],
2122
+ actions=[
2123
+ hf.Action(
2124
+ commands=[
2125
+ hf.Command(
2126
+ command="echo $((<<sum(parameter:p2)>> + 1))",
2127
+ stdout="<<parameter:p4>>",
2128
+ )
2129
+ ]
2130
+ )
2131
+ ],
2132
+ )
2133
+ s3 = hf.TaskSchema(
2134
+ objective="t3",
2135
+ inputs=[hf.SchemaInput("p3", group="my_group"), hf.SchemaInput("p4")],
2136
+ outputs=[hf.SchemaOutput("p2")],
2137
+ actions=[
2138
+ hf.Action(
2139
+ commands=[
2140
+ hf.Command(
2141
+ command="echo $((<<sum(parameter:p3)>> + <<parameter:p4>>))",
2142
+ stdout="<<parameter:p2>>",
2143
+ )
2144
+ ]
2145
+ )
2146
+ ],
2147
+ )
2148
+ wk = hf.Workflow.from_template_data(
2149
+ template_name="test_loop",
2150
+ path=tmp_path,
2151
+ tasks=[
2152
+ hf.Task(
2153
+ schema=s1,
2154
+ sequences=[hf.ValueSequence("inputs.p1", values=[1, 2, 3])],
2155
+ groups=[hf.ElementGroup("my_group")],
2156
+ ),
2157
+ hf.Task(schema=s2),
2158
+ hf.Task(schema=s3), # test source of p3 (should be group from t1)
2159
+ ],
2160
+ loops=[hf.Loop(name="inner", tasks=[1, 2], num_iterations=2)],
2161
+ )
2162
+ pathway = wk.get_iteration_task_pathway(ret_data_idx=True)
2163
+ assert len(pathway) == 5
2164
+
2165
+ expected = [i["outputs.p3"] for i in pathway[0][2]]
2166
+ for i in pathway:
2167
+ if i[0] == 2: # task 3
2168
+ assert i[2][0]["inputs.p3"] == expected
2169
+
2170
+
2171
+ @pytest.mark.integration
2172
+ def test_multi_task_loop_termination(null_config, tmp_path: Path):
2173
+ s1 = hf.TaskSchema(
2174
+ objective="t1",
2175
+ inputs=[hf.SchemaInput("p1")],
2176
+ outputs=[hf.SchemaOutput("p2")],
2177
+ actions=[
2178
+ hf.Action(
2179
+ commands=[
2180
+ hf.Command(
2181
+ command="echo $((<<parameter:p1>> + 1))",
2182
+ stdout="<<int(parameter:p2)>>",
2183
+ )
2184
+ ]
2185
+ )
2186
+ ],
2187
+ )
2188
+ s2 = hf.TaskSchema(
2189
+ objective="t2",
2190
+ inputs=[hf.SchemaInput("p2")],
2191
+ outputs=[hf.SchemaOutput("p1")],
2192
+ actions=[
2193
+ hf.Action(
2194
+ commands=[
2195
+ hf.Command(
2196
+ command="echo $((<<parameter:p2>> + 1))",
2197
+ stdout="<<int(parameter:p1)>>",
2198
+ )
2199
+ ]
2200
+ )
2201
+ ],
2202
+ )
2203
+ tasks = [
2204
+ hf.Task(schema=s1, inputs={"p1": 0}),
2205
+ hf.Task(schema=s2),
2206
+ ]
2207
+ wk = hf.Workflow.from_template_data(
2208
+ tasks=tasks,
2209
+ loops=[
2210
+ hf.Loop(
2211
+ tasks=[0, 1],
2212
+ num_iterations=3,
2213
+ termination=hf.Rule(
2214
+ path="outputs.p1",
2215
+ condition={"value.greater_than": 3}, # should stop after 2nd iter
2216
+ ),
2217
+ )
2218
+ ],
2219
+ path=tmp_path,
2220
+ template_name="test_loops",
2221
+ )
2222
+ wk.submit(wait=True, add_to_known=False)
2223
+ for task in wk.tasks:
2224
+ for element in task.elements:
2225
+ for iter_i in element.iterations:
2226
+ skips = (i.skip for i in iter_i.action_runs)
2227
+ if iter_i.loop_idx[wk.loops[0].name] > 1:
2228
+ assert all(skips)
2229
+ assert iter_i.loop_skipped
2230
+ else:
2231
+ assert not any(skips)
2232
+
2233
+
2234
+ @pytest.mark.integration
2235
+ def test_multi_task_loop_termination_task(null_config, tmp_path: Path):
2236
+ """Specify non-default task at which to check for termination."""
2237
+ s1 = hf.TaskSchema(
2238
+ objective="t1",
2239
+ inputs=[hf.SchemaInput("p1")],
2240
+ outputs=[hf.SchemaOutput("p2")],
2241
+ actions=[
2242
+ hf.Action(
2243
+ commands=[
2244
+ hf.Command(
2245
+ command="echo $((<<parameter:p1>> + 1))",
2246
+ stdout="<<int(parameter:p2)>>",
2247
+ )
2248
+ ]
2249
+ )
2250
+ ],
2251
+ )
2252
+ s2 = hf.TaskSchema(
2253
+ objective="t2",
2254
+ inputs=[hf.SchemaInput("p2")],
2255
+ outputs=[hf.SchemaOutput("p1")],
2256
+ actions=[
2257
+ hf.Action(
2258
+ commands=[
2259
+ hf.Command(
2260
+ command="echo $((<<parameter:p2>> + 1))",
2261
+ stdout="<<int(parameter:p1)>>",
2262
+ )
2263
+ ]
2264
+ )
2265
+ ],
2266
+ )
2267
+ tasks = [
2268
+ hf.Task(schema=s1, inputs={"p1": 0}),
2269
+ hf.Task(schema=s2),
2270
+ ]
2271
+ wk = hf.Workflow.from_template_data(
2272
+ tasks=tasks,
2273
+ resources={"any": {"write_app_logs": True}},
2274
+ loops=[
2275
+ hf.Loop(
2276
+ tasks=[0, 1],
2277
+ num_iterations=3,
2278
+ termination_task=0, # default would be final task (1)
2279
+ termination=hf.Rule(
2280
+ path="inputs.p1",
2281
+ condition={
2282
+ "value.greater_than": 3
2283
+ }, # should stop after first task of final iteration
2284
+ ),
2285
+ )
2286
+ ],
2287
+ path=tmp_path,
2288
+ template_name="test_loops",
2289
+ )
2290
+ wk.submit(wait=True, add_to_known=False, status=False)
2291
+ runs_t0 = [j for i in wk.tasks[0].elements[0].iterations for j in i.action_runs]
2292
+ runs_t1 = [j for i in wk.tasks[1].elements[0].iterations for j in i.action_runs]
2293
+
2294
+ assert [i.skip for i in runs_t0] == [0, 0, 0]
2295
+ assert [i.skip for i in runs_t1] == [0, 0, SkipReason.LOOP_TERMINATION.value]
2296
+
2297
+
2298
+ @pytest.mark.integration
2299
+ @pytest.mark.skip(reason="need to fix loop termination for multiple elements")
2300
+ def test_multi_task_loop_termination_multi_element(null_config, tmp_path: Path):
2301
+ s1 = hf.TaskSchema(
2302
+ objective="t1",
2303
+ inputs=[hf.SchemaInput("p1")],
2304
+ outputs=[hf.SchemaOutput("p2")],
2305
+ actions=[
2306
+ hf.Action(
2307
+ commands=[
2308
+ hf.Command(
2309
+ command="echo $((<<parameter:p1>> + 1))",
2310
+ stdout="<<int(parameter:p2)>>",
2311
+ )
2312
+ ]
2313
+ )
2314
+ ],
2315
+ )
2316
+ s2 = hf.TaskSchema(
2317
+ objective="t2",
2318
+ inputs=[hf.SchemaInput("p2")],
2319
+ outputs=[hf.SchemaOutput("p1")],
2320
+ actions=[
2321
+ hf.Action(
2322
+ commands=[
2323
+ hf.Command(
2324
+ command="echo $((<<parameter:p2>> + 1))",
2325
+ stdout="<<int(parameter:p1)>>",
2326
+ )
2327
+ ]
2328
+ )
2329
+ ],
2330
+ )
2331
+ tasks = [
2332
+ hf.Task(schema=s1, sequences=[hf.ValueSequence(path="inputs.p1", values=[0, 1])]),
2333
+ hf.Task(schema=s2),
2334
+ ]
2335
+ wk = hf.Workflow.from_template_data(
2336
+ tasks=tasks,
2337
+ loops=[
2338
+ hf.Loop(
2339
+ tasks=[0, 1],
2340
+ num_iterations=3,
2341
+ termination=hf.Rule(
2342
+ path="outputs.p1",
2343
+ condition={
2344
+ "value.greater_than": 3
2345
+ }, # should stop after 2nd iter (element 0), 1st iter (element 1)
2346
+ ),
2347
+ )
2348
+ ],
2349
+ path=tmp_path,
2350
+ template_name="test_loops",
2351
+ )
2352
+ wk.submit(wait=True, add_to_known=False)
2353
+ expected_num_iters = [2, 1]
2354
+ for task in wk.tasks:
2355
+ for element in task.elements:
2356
+ for iter_i in element.iterations:
2357
+ skips = (i.skip for i in iter_i.action_runs)
2358
+ if (
2359
+ iter_i.loop_idx[wk.loops[0].name]
2360
+ > expected_num_iters[element.index] - 1
2361
+ ):
2362
+ assert all(skips)
2363
+ assert iter_i.loop_skipped
2364
+ else:
2365
+ assert not any(skips)
2366
+
2367
+
2368
+ def test_loop_termination_task_default(null_config):
2369
+ loop = hf.Loop(
2370
+ tasks=[0, 1],
2371
+ num_iterations=3,
2372
+ )
2373
+ assert loop.termination_task_insert_ID == 1
2374
+
2375
+
2376
+ def test_loop_termination_task_non_default_specified(null_config):
2377
+ loop = hf.Loop(
2378
+ tasks=[0, 1],
2379
+ num_iterations=3,
2380
+ termination_task=0,
2381
+ )
2382
+ assert loop.termination_task_insert_ID == 0
2383
+
2384
+
2385
+ def test_loop_termination_task_default_specified(null_config):
2386
+ loop = hf.Loop(
2387
+ tasks=[0, 1],
2388
+ num_iterations=3,
2389
+ termination_task=1,
2390
+ )
2391
+ assert loop.termination_task_insert_ID == 1
2392
+
2393
+
2394
+ def test_loop_termination_task_raise_on_bad_task(null_config):
2395
+ with pytest.raises(ValueError):
2396
+ hf.Loop(
2397
+ tasks=[0, 1],
2398
+ num_iterations=3,
2399
+ termination_task=2,
2400
+ )
2401
+
2402
+
2403
+ @pytest.mark.parametrize("num_iters", [1, 2])
2404
+ def test_inner_loop_num_added_iterations_on_reload(null_config, tmp_path, num_iters):
2405
+ # this tests that the pending num_added_iterations are saved correctly when adding
2406
+ # loop iterations
2407
+ s1, s2 = make_schemas(
2408
+ ({"p2": None}, ("p2",), "t1"),
2409
+ ({"p2": None}, ("p2",), "t2"),
2410
+ )
2411
+ tasks = [
2412
+ hf.Task(s1, inputs={"p2": 100}),
2413
+ hf.Task(s2),
2414
+ ]
2415
+
2416
+ loops = [
2417
+ hf.Loop(name="inner", tasks=[0], num_iterations=num_iters),
2418
+ hf.Loop(name="outer", tasks=[0, 1], num_iterations=2),
2419
+ ]
2420
+
2421
+ wk = hf.Workflow.from_template_data(
2422
+ template_name="test_loop_num_added_iters_reload",
2423
+ tasks=tasks,
2424
+ loops=loops,
2425
+ path=tmp_path,
2426
+ )
2427
+
2428
+ wk = wk.reload()
2429
+ assert wk.loops.inner.num_added_iterations == {
2430
+ (0,): num_iters,
2431
+ (1,): num_iters,
2432
+ }
2433
+
2434
+
2435
+ @pytest.mark.parametrize("num_outer_iters", [1, 2])
2436
+ def test_outer_loop_num_added_iterations_on_reload(
2437
+ null_config, tmp_path, num_outer_iters
2438
+ ):
2439
+ # this tests that the pending num_added_iterations are saved correctly when adding
2440
+ # loop iterations
2441
+
2442
+ s1, s2 = make_schemas(
2443
+ ({"p2": None}, ("p2",), "t1"),
2444
+ ({"p2": None}, ("p2",), "t2"),
2445
+ )
2446
+ tasks = [
2447
+ hf.Task(s1, inputs={"p2": 100}),
2448
+ hf.Task(s2),
2449
+ ]
2450
+
2451
+ loops = [
2452
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2453
+ hf.Loop(name="outer", tasks=[0, 1], num_iterations=num_outer_iters),
2454
+ ]
2455
+
2456
+ wk = hf.Workflow.from_template_data(
2457
+ template_name="test_loop_num_added_iters_reload",
2458
+ tasks=tasks,
2459
+ loops=loops,
2460
+ path=tmp_path,
2461
+ )
2462
+
2463
+ wk = wk.reload()
2464
+ if num_outer_iters == 1:
2465
+ assert wk.loops.inner.num_added_iterations == {(0,): 2}
2466
+ elif num_outer_iters == 2:
2467
+ assert wk.loops.inner.num_added_iterations == {(0,): 2, (1,): 2}
2468
+
2469
+
2470
+ def test_multi_nested_loop_num_added_iterations_on_reload(null_config, tmp_path: Path):
2471
+ s1, s2, s3 = make_schemas(
2472
+ ({"p2": None}, ("p2",), "t1"),
2473
+ ({"p2": None}, ("p2",), "t2"),
2474
+ ({"p2": None}, ("p2",), "t3"),
2475
+ )
2476
+ tasks = [
2477
+ hf.Task(s1, inputs={"p2": 100}),
2478
+ hf.Task(s2),
2479
+ hf.Task(s3),
2480
+ ]
2481
+
2482
+ loops = [
2483
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2484
+ hf.Loop(name="middle", tasks=[0, 1], num_iterations=3),
2485
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=4),
2486
+ ]
2487
+
2488
+ wk = hf.Workflow.from_template_data(
2489
+ template_name="test_loop_num_added_iters_reload",
2490
+ tasks=tasks,
2491
+ loops=loops,
2492
+ path=tmp_path,
2493
+ )
2494
+
2495
+ wk = wk.reload()
2496
+ for loop in wk.loops:
2497
+ print(loop.num_added_iterations)
2498
+
2499
+ assert wk.loops.inner.num_added_iterations == {
2500
+ (0, 0): 2,
2501
+ (1, 0): 2,
2502
+ (2, 0): 2,
2503
+ (0, 1): 2,
2504
+ (1, 1): 2,
2505
+ (2, 1): 2,
2506
+ (0, 2): 2,
2507
+ (1, 2): 2,
2508
+ (2, 2): 2,
2509
+ (0, 3): 2,
2510
+ (1, 3): 2,
2511
+ (2, 3): 2,
2512
+ }
2513
+ assert wk.loops.middle.num_added_iterations == {(0,): 3, (1,): 3, (2,): 3, (3,): 3}
2514
+ assert wk.loops.outer.num_added_iterations == {(): 4}
2515
+
2516
+
2517
+ def test_multi_nested_loop_num_added_iterations_on_reload_single_iter_inner(
2518
+ null_config, tmp_path: Path
2519
+ ):
2520
+ s1, s2, s3 = make_schemas(
2521
+ ({"p2": None}, ("p2",), "t1"),
2522
+ ({"p2": None}, ("p2",), "t2"),
2523
+ ({"p2": None}, ("p2",), "t3"),
2524
+ )
2525
+ tasks = [
2526
+ hf.Task(s1, inputs={"p2": 100}),
2527
+ hf.Task(s2),
2528
+ hf.Task(s3),
2529
+ ]
2530
+
2531
+ loops = [
2532
+ hf.Loop(name="inner", tasks=[0], num_iterations=1),
2533
+ hf.Loop(name="middle", tasks=[0, 1], num_iterations=3),
2534
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=4),
2535
+ ]
2536
+
2537
+ wk = hf.Workflow.from_template_data(
2538
+ template_name="test_loop_num_added_iters_reload",
2539
+ tasks=tasks,
2540
+ loops=loops,
2541
+ path=tmp_path,
2542
+ )
2543
+
2544
+ wk = wk.reload()
2545
+ for loop in wk.loops:
2546
+ print(loop.num_added_iterations)
2547
+
2548
+ assert wk.loops.inner.num_added_iterations == {
2549
+ (0, 0): 1,
2550
+ (1, 0): 1,
2551
+ (2, 0): 1,
2552
+ (0, 1): 1,
2553
+ (1, 1): 1,
2554
+ (2, 1): 1,
2555
+ (0, 2): 1,
2556
+ (1, 2): 1,
2557
+ (2, 2): 1,
2558
+ (0, 3): 1,
2559
+ (1, 3): 1,
2560
+ (2, 3): 1,
2561
+ }
2562
+ assert wk.loops.middle.num_added_iterations == {(0,): 3, (1,): 3, (2,): 3, (3,): 3}
2563
+ assert wk.loops.outer.num_added_iterations == {(): 4}
2564
+
2565
+
2566
+ def test_multi_nested_loop_num_added_iterations_on_reload_single_iter_middle(
2567
+ null_config, tmp_path: Path
2568
+ ):
2569
+ s1, s2, s3 = make_schemas(
2570
+ ({"p2": None}, ("p2",), "t1"),
2571
+ ({"p2": None}, ("p2",), "t2"),
2572
+ ({"p2": None}, ("p2",), "t3"),
2573
+ )
2574
+ tasks = [
2575
+ hf.Task(s1, inputs={"p2": 100}),
2576
+ hf.Task(s2),
2577
+ hf.Task(s3),
2578
+ ]
2579
+
2580
+ loops = [
2581
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2582
+ hf.Loop(name="middle", tasks=[0, 1], num_iterations=1),
2583
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=4),
2584
+ ]
2585
+
2586
+ wk = hf.Workflow.from_template_data(
2587
+ template_name="test_loop_num_added_iters_reload",
2588
+ tasks=tasks,
2589
+ loops=loops,
2590
+ path=tmp_path,
2591
+ )
2592
+
2593
+ wk = wk.reload()
2594
+ for loop in wk.loops:
2595
+ print(loop.num_added_iterations)
2596
+
2597
+ assert wk.loops.inner.num_added_iterations == {
2598
+ (0, 0): 2,
2599
+ (0, 1): 2,
2600
+ (0, 2): 2,
2601
+ (0, 3): 2,
2602
+ }
2603
+ assert wk.loops.middle.num_added_iterations == {(0,): 1, (1,): 1, (2,): 1, (3,): 1}
2604
+ assert wk.loops.outer.num_added_iterations == {(): 4}
2605
+
2606
+
2607
+ def test_multi_nested_loop_num_added_iterations_on_reload_single_iter_outer(
2608
+ null_config, tmp_path: Path
2609
+ ):
2610
+ s1, s2, s3 = make_schemas(
2611
+ ({"p2": None}, ("p2",), "t1"),
2612
+ ({"p2": None}, ("p2",), "t2"),
2613
+ ({"p2": None}, ("p2",), "t3"),
2614
+ )
2615
+ tasks = [
2616
+ hf.Task(s1, inputs={"p2": 100}),
2617
+ hf.Task(s2),
2618
+ hf.Task(s3),
2619
+ ]
2620
+
2621
+ loops = [
2622
+ hf.Loop(name="inner", tasks=[0], num_iterations=2),
2623
+ hf.Loop(name="middle", tasks=[0, 1], num_iterations=3),
2624
+ hf.Loop(name="outer", tasks=[0, 1, 2], num_iterations=1),
2625
+ ]
2626
+
2627
+ wk = hf.Workflow.from_template_data(
2628
+ template_name="test_loop_num_added_iters_reload",
2629
+ tasks=tasks,
2630
+ loops=loops,
2631
+ path=tmp_path,
2632
+ )
2633
+
2634
+ wk = wk.reload()
2635
+ for loop in wk.loops:
2636
+ print(loop.num_added_iterations)
2637
+
2638
+ assert wk.loops.inner.num_added_iterations == {
2639
+ (0, 0): 2,
2640
+ (1, 0): 2,
2641
+ (2, 0): 2,
2642
+ }
2643
+ assert wk.loops.middle.num_added_iterations == {(0,): 3}
2644
+ assert wk.loops.outer.num_added_iterations == {(): 1}
2645
+
2646
+
2647
+ @pytest.mark.parametrize("store", ["json", "zarr"])
2648
+ def test_updated_data_idx(null_config, tmp_path: Path, store):
2649
+ s1, s2 = make_schemas(
2650
+ (
2651
+ {"p0": None, "p1": None},
2652
+ (
2653
+ "p0",
2654
+ "p2",
2655
+ ),
2656
+ ),
2657
+ ({"p2": None}, ("p3",)),
2658
+ )
2659
+ wk = hf.Workflow.from_template_data(
2660
+ template_name="loop_update_test",
2661
+ tasks=[
2662
+ hf.Task(s1, inputs={"p0": 1}),
2663
+ hf.Task(s2),
2664
+ ],
2665
+ path=tmp_path,
2666
+ loops=[hf.Loop(tasks=[0], num_iterations=2)],
2667
+ store=store,
2668
+ )
2669
+
2670
+ runs = wk.get_all_EARs()
2671
+ assert runs[1].get_data_idx()["inputs.p2"] == runs[2].get_data_idx()["outputs.p2"]
2672
+
2673
+
2674
+ # TODO: test loop termination across jobscripts