hpcflow-new2 0.2.0a189__py3-none-any.whl → 0.2.0a199__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (176) hide show
  1. hpcflow/__pyinstaller/hook-hpcflow.py +9 -6
  2. hpcflow/_version.py +1 -1
  3. hpcflow/app.py +1 -0
  4. hpcflow/data/scripts/bad_script.py +2 -0
  5. hpcflow/data/scripts/do_nothing.py +2 -0
  6. hpcflow/data/scripts/env_specifier_test/input_file_generator_pass_env_spec.py +4 -0
  7. hpcflow/data/scripts/env_specifier_test/main_script_test_pass_env_spec.py +8 -0
  8. hpcflow/data/scripts/env_specifier_test/output_file_parser_pass_env_spec.py +4 -0
  9. hpcflow/data/scripts/env_specifier_test/v1/input_file_generator_basic.py +4 -0
  10. hpcflow/data/scripts/env_specifier_test/v1/main_script_test_direct_in_direct_out.py +7 -0
  11. hpcflow/data/scripts/env_specifier_test/v1/output_file_parser_basic.py +4 -0
  12. hpcflow/data/scripts/env_specifier_test/v2/main_script_test_direct_in_direct_out.py +7 -0
  13. hpcflow/data/scripts/input_file_generator_basic.py +3 -0
  14. hpcflow/data/scripts/input_file_generator_basic_FAIL.py +3 -0
  15. hpcflow/data/scripts/input_file_generator_test_stdout_stderr.py +8 -0
  16. hpcflow/data/scripts/main_script_test_direct_in.py +3 -0
  17. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2.py +6 -0
  18. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed.py +6 -0
  19. hpcflow/data/scripts/main_script_test_direct_in_direct_out_2_fail_allowed_group.py +7 -0
  20. hpcflow/data/scripts/main_script_test_direct_in_direct_out_3.py +6 -0
  21. hpcflow/data/scripts/main_script_test_direct_in_group_direct_out_3.py +6 -0
  22. hpcflow/data/scripts/main_script_test_direct_in_group_one_fail_direct_out_3.py +6 -0
  23. hpcflow/data/scripts/main_script_test_hdf5_in_obj.py +1 -1
  24. hpcflow/data/scripts/main_script_test_hdf5_in_obj_2.py +12 -0
  25. hpcflow/data/scripts/main_script_test_hdf5_out_obj.py +1 -1
  26. hpcflow/data/scripts/main_script_test_json_out_FAIL.py +3 -0
  27. hpcflow/data/scripts/main_script_test_shell_env_vars.py +12 -0
  28. hpcflow/data/scripts/main_script_test_std_out_std_err.py +6 -0
  29. hpcflow/data/scripts/output_file_parser_basic.py +3 -0
  30. hpcflow/data/scripts/output_file_parser_basic_FAIL.py +7 -0
  31. hpcflow/data/scripts/output_file_parser_test_stdout_stderr.py +8 -0
  32. hpcflow/data/scripts/script_exit_test.py +5 -0
  33. hpcflow/data/template_components/environments.yaml +1 -1
  34. hpcflow/sdk/__init__.py +26 -15
  35. hpcflow/sdk/app.py +2192 -768
  36. hpcflow/sdk/cli.py +506 -296
  37. hpcflow/sdk/cli_common.py +105 -7
  38. hpcflow/sdk/config/__init__.py +1 -1
  39. hpcflow/sdk/config/callbacks.py +115 -43
  40. hpcflow/sdk/config/cli.py +126 -103
  41. hpcflow/sdk/config/config.py +674 -318
  42. hpcflow/sdk/config/config_file.py +131 -95
  43. hpcflow/sdk/config/errors.py +125 -84
  44. hpcflow/sdk/config/types.py +148 -0
  45. hpcflow/sdk/core/__init__.py +25 -1
  46. hpcflow/sdk/core/actions.py +1771 -1059
  47. hpcflow/sdk/core/app_aware.py +24 -0
  48. hpcflow/sdk/core/cache.py +139 -79
  49. hpcflow/sdk/core/command_files.py +263 -287
  50. hpcflow/sdk/core/commands.py +145 -112
  51. hpcflow/sdk/core/element.py +828 -535
  52. hpcflow/sdk/core/enums.py +192 -0
  53. hpcflow/sdk/core/environment.py +74 -93
  54. hpcflow/sdk/core/errors.py +455 -52
  55. hpcflow/sdk/core/execute.py +207 -0
  56. hpcflow/sdk/core/json_like.py +540 -272
  57. hpcflow/sdk/core/loop.py +751 -347
  58. hpcflow/sdk/core/loop_cache.py +164 -47
  59. hpcflow/sdk/core/object_list.py +370 -207
  60. hpcflow/sdk/core/parameters.py +1100 -627
  61. hpcflow/sdk/core/rule.py +59 -41
  62. hpcflow/sdk/core/run_dir_files.py +21 -37
  63. hpcflow/sdk/core/skip_reason.py +7 -0
  64. hpcflow/sdk/core/task.py +1649 -1339
  65. hpcflow/sdk/core/task_schema.py +308 -196
  66. hpcflow/sdk/core/test_utils.py +191 -114
  67. hpcflow/sdk/core/types.py +440 -0
  68. hpcflow/sdk/core/utils.py +485 -309
  69. hpcflow/sdk/core/validation.py +82 -9
  70. hpcflow/sdk/core/workflow.py +2544 -1178
  71. hpcflow/sdk/core/zarr_io.py +98 -137
  72. hpcflow/sdk/data/workflow_spec_schema.yaml +2 -0
  73. hpcflow/sdk/demo/cli.py +53 -33
  74. hpcflow/sdk/helper/cli.py +18 -15
  75. hpcflow/sdk/helper/helper.py +75 -63
  76. hpcflow/sdk/helper/watcher.py +61 -28
  77. hpcflow/sdk/log.py +122 -71
  78. hpcflow/sdk/persistence/__init__.py +8 -31
  79. hpcflow/sdk/persistence/base.py +1360 -606
  80. hpcflow/sdk/persistence/defaults.py +6 -0
  81. hpcflow/sdk/persistence/discovery.py +38 -0
  82. hpcflow/sdk/persistence/json.py +568 -188
  83. hpcflow/sdk/persistence/pending.py +382 -179
  84. hpcflow/sdk/persistence/store_resource.py +39 -23
  85. hpcflow/sdk/persistence/types.py +318 -0
  86. hpcflow/sdk/persistence/utils.py +14 -11
  87. hpcflow/sdk/persistence/zarr.py +1337 -433
  88. hpcflow/sdk/runtime.py +44 -41
  89. hpcflow/sdk/submission/{jobscript_info.py → enums.py} +39 -12
  90. hpcflow/sdk/submission/jobscript.py +1651 -692
  91. hpcflow/sdk/submission/schedulers/__init__.py +167 -39
  92. hpcflow/sdk/submission/schedulers/direct.py +121 -81
  93. hpcflow/sdk/submission/schedulers/sge.py +170 -129
  94. hpcflow/sdk/submission/schedulers/slurm.py +291 -268
  95. hpcflow/sdk/submission/schedulers/utils.py +12 -2
  96. hpcflow/sdk/submission/shells/__init__.py +14 -15
  97. hpcflow/sdk/submission/shells/base.py +150 -29
  98. hpcflow/sdk/submission/shells/bash.py +283 -173
  99. hpcflow/sdk/submission/shells/os_version.py +31 -30
  100. hpcflow/sdk/submission/shells/powershell.py +228 -170
  101. hpcflow/sdk/submission/submission.py +1014 -335
  102. hpcflow/sdk/submission/types.py +140 -0
  103. hpcflow/sdk/typing.py +182 -12
  104. hpcflow/sdk/utils/arrays.py +71 -0
  105. hpcflow/sdk/utils/deferred_file.py +55 -0
  106. hpcflow/sdk/utils/hashing.py +16 -0
  107. hpcflow/sdk/utils/patches.py +12 -0
  108. hpcflow/sdk/utils/strings.py +33 -0
  109. hpcflow/tests/api/test_api.py +32 -0
  110. hpcflow/tests/conftest.py +27 -6
  111. hpcflow/tests/data/multi_path_sequences.yaml +29 -0
  112. hpcflow/tests/data/workflow_test_run_abort.yaml +34 -35
  113. hpcflow/tests/schedulers/sge/test_sge_submission.py +36 -0
  114. hpcflow/tests/schedulers/slurm/test_slurm_submission.py +5 -2
  115. hpcflow/tests/scripts/test_input_file_generators.py +282 -0
  116. hpcflow/tests/scripts/test_main_scripts.py +866 -85
  117. hpcflow/tests/scripts/test_non_snippet_script.py +46 -0
  118. hpcflow/tests/scripts/test_ouput_file_parsers.py +353 -0
  119. hpcflow/tests/shells/wsl/test_wsl_submission.py +12 -4
  120. hpcflow/tests/unit/test_action.py +262 -75
  121. hpcflow/tests/unit/test_action_rule.py +9 -4
  122. hpcflow/tests/unit/test_app.py +33 -6
  123. hpcflow/tests/unit/test_cache.py +46 -0
  124. hpcflow/tests/unit/test_cli.py +134 -1
  125. hpcflow/tests/unit/test_command.py +71 -54
  126. hpcflow/tests/unit/test_config.py +142 -16
  127. hpcflow/tests/unit/test_config_file.py +21 -18
  128. hpcflow/tests/unit/test_element.py +58 -62
  129. hpcflow/tests/unit/test_element_iteration.py +50 -1
  130. hpcflow/tests/unit/test_element_set.py +29 -19
  131. hpcflow/tests/unit/test_group.py +4 -2
  132. hpcflow/tests/unit/test_input_source.py +116 -93
  133. hpcflow/tests/unit/test_input_value.py +29 -24
  134. hpcflow/tests/unit/test_jobscript_unit.py +757 -0
  135. hpcflow/tests/unit/test_json_like.py +44 -35
  136. hpcflow/tests/unit/test_loop.py +1396 -84
  137. hpcflow/tests/unit/test_meta_task.py +325 -0
  138. hpcflow/tests/unit/test_multi_path_sequences.py +229 -0
  139. hpcflow/tests/unit/test_object_list.py +17 -12
  140. hpcflow/tests/unit/test_parameter.py +29 -7
  141. hpcflow/tests/unit/test_persistence.py +237 -42
  142. hpcflow/tests/unit/test_resources.py +20 -18
  143. hpcflow/tests/unit/test_run.py +117 -6
  144. hpcflow/tests/unit/test_run_directories.py +29 -0
  145. hpcflow/tests/unit/test_runtime.py +2 -1
  146. hpcflow/tests/unit/test_schema_input.py +23 -15
  147. hpcflow/tests/unit/test_shell.py +23 -2
  148. hpcflow/tests/unit/test_slurm.py +8 -7
  149. hpcflow/tests/unit/test_submission.py +38 -89
  150. hpcflow/tests/unit/test_task.py +352 -247
  151. hpcflow/tests/unit/test_task_schema.py +33 -20
  152. hpcflow/tests/unit/test_utils.py +9 -11
  153. hpcflow/tests/unit/test_value_sequence.py +15 -12
  154. hpcflow/tests/unit/test_workflow.py +114 -83
  155. hpcflow/tests/unit/test_workflow_template.py +0 -1
  156. hpcflow/tests/unit/utils/test_arrays.py +40 -0
  157. hpcflow/tests/unit/utils/test_deferred_file_writer.py +34 -0
  158. hpcflow/tests/unit/utils/test_hashing.py +65 -0
  159. hpcflow/tests/unit/utils/test_patches.py +5 -0
  160. hpcflow/tests/unit/utils/test_redirect_std.py +50 -0
  161. hpcflow/tests/workflows/__init__.py +0 -0
  162. hpcflow/tests/workflows/test_directory_structure.py +31 -0
  163. hpcflow/tests/workflows/test_jobscript.py +334 -1
  164. hpcflow/tests/workflows/test_run_status.py +198 -0
  165. hpcflow/tests/workflows/test_skip_downstream.py +696 -0
  166. hpcflow/tests/workflows/test_submission.py +140 -0
  167. hpcflow/tests/workflows/test_workflows.py +160 -15
  168. hpcflow/tests/workflows/test_zip.py +18 -0
  169. hpcflow/viz_demo.ipynb +6587 -3
  170. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/METADATA +8 -4
  171. hpcflow_new2-0.2.0a199.dist-info/RECORD +221 -0
  172. hpcflow/sdk/core/parallel.py +0 -21
  173. hpcflow_new2-0.2.0a189.dist-info/RECORD +0 -158
  174. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/LICENSE +0 -0
  175. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/WHEEL +0 -0
  176. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a199.dist-info}/entry_points.txt +0 -0
@@ -1,16 +1,18 @@
1
+ from __future__ import annotations
1
2
  import os
3
+ from pathlib import Path
2
4
  import pytest
3
5
  from hpcflow.app import app as hf
4
6
  from hpcflow.sdk.core.errors import UnsupportedSchedulerError
5
7
 
6
8
 
7
- def test_init_scope_equivalence_simple():
9
+ def test_init_scope_equivalence_simple() -> None:
8
10
  rs1 = hf.ResourceSpec(scope=hf.ActionScope.any(), num_cores=1)
9
11
  rs2 = hf.ResourceSpec(scope="any", num_cores=1)
10
12
  assert rs1 == rs2
11
13
 
12
14
 
13
- def test_init_scope_equivalence_with_kwargs():
15
+ def test_init_scope_equivalence_with_kwargs() -> None:
14
16
  rs1 = hf.ResourceSpec(
15
17
  scope=hf.ActionScope.input_file_generator(file="my_file"), num_cores=1
16
18
  )
@@ -18,32 +20,32 @@ def test_init_scope_equivalence_with_kwargs():
18
20
  assert rs1 == rs2
19
21
 
20
22
 
21
- def test_init_no_args():
23
+ def test_init_no_args() -> None:
22
24
  rs1 = hf.ResourceSpec()
23
25
  rs2 = hf.ResourceSpec(scope="any")
24
26
  assert rs1 == rs2
25
27
 
26
28
 
27
- def test_resource_list_raise_on_identical_scopes():
29
+ def test_resource_list_raise_on_identical_scopes() -> None:
28
30
  with pytest.raises(ValueError):
29
31
  hf.ResourceList.normalise([{"scope": "any"}, {"scope": "any"}])
30
32
 
31
33
 
32
- def test_merge_other_same_scope():
34
+ def test_merge_other_same_scope() -> None:
33
35
  res_lst_1 = hf.ResourceList.from_json_like({"any": {"num_cores": 1}})
34
36
  res_lst_2 = hf.ResourceList.from_json_like({"any": {}})
35
37
  res_lst_2.merge_other(res_lst_1)
36
38
  assert res_lst_2 == hf.ResourceList.from_json_like({"any": {"num_cores": 1}})
37
39
 
38
40
 
39
- def test_merge_other_same_scope_no_overwrite():
41
+ def test_merge_other_same_scope_no_overwrite() -> None:
40
42
  res_lst_1 = hf.ResourceList.from_json_like({"any": {"num_cores": 1}})
41
43
  res_lst_2 = hf.ResourceList.from_json_like({"any": {"num_cores": 2}})
42
44
  res_lst_2.merge_other(res_lst_1)
43
45
  assert res_lst_2 == hf.ResourceList.from_json_like({"any": {"num_cores": 2}})
44
46
 
45
47
 
46
- def test_merge_other_multi_scope():
48
+ def test_merge_other_multi_scope() -> None:
47
49
  res_lst_1 = hf.ResourceList.from_json_like({"any": {"num_cores": 1}})
48
50
  res_lst_2 = hf.ResourceList.from_json_like({"any": {}, "main": {"num_cores": 3}})
49
51
  res_lst_2.merge_other(res_lst_1)
@@ -53,7 +55,7 @@ def test_merge_other_multi_scope():
53
55
 
54
56
 
55
57
  @pytest.mark.parametrize("store", ["json", "zarr"])
56
- def test_merge_other_persistent_workflow_reload(null_config, tmp_path, store):
58
+ def test_merge_other_persistent_workflow_reload(null_config, tmp_path: Path, store: str):
57
59
  wkt = hf.WorkflowTemplate(
58
60
  name="test_load",
59
61
  resources={"any": {"num_cores": 2}},
@@ -70,7 +72,7 @@ def test_merge_other_persistent_workflow_reload(null_config, tmp_path, store):
70
72
 
71
73
 
72
74
  @pytest.mark.parametrize("store", ["json", "zarr"])
73
- def test_use_persistent_resource_spec(null_config, tmp_path, store):
75
+ def test_use_persistent_resource_spec(null_config, tmp_path: Path, store: str):
74
76
  # create a workflow from which we can use a resource spec in a new workflow:
75
77
  num_cores_check = 2
76
78
  wk_base = hf.Workflow.from_template_data(
@@ -104,7 +106,7 @@ def test_use_persistent_resource_spec(null_config, tmp_path, store):
104
106
 
105
107
 
106
108
  @pytest.mark.parametrize("store", ["json", "zarr"])
107
- def test_use_persistent_resource_list(null_config, tmp_path, store):
109
+ def test_use_persistent_resource_list(null_config, tmp_path: Path, store: str):
108
110
  # create a workflow from which we can use the resource list in a new workflow:
109
111
  num_cores_check = 2
110
112
  wk_base = hf.Workflow.from_template_data(
@@ -138,7 +140,7 @@ def test_use_persistent_resource_list(null_config, tmp_path, store):
138
140
 
139
141
 
140
142
  @pytest.mark.parametrize("store", ["json", "zarr"])
141
- def test_default_scheduler_set(new_null_config, tmp_path, store):
143
+ def test_default_scheduler_set(new_null_config, tmp_path: Path, store: str):
142
144
  wk = hf.Workflow.from_template_data(
143
145
  template_name="wk",
144
146
  path=tmp_path,
@@ -154,21 +156,21 @@ def test_default_scheduler_set(new_null_config, tmp_path, store):
154
156
  assert wk.submissions[0].jobscripts[0].scheduler_name == hf.config.default_scheduler
155
157
 
156
158
 
157
- def test_scheduler_case_insensitive(null_config):
159
+ def test_scheduler_case_insensitive(null_config) -> None:
158
160
  rs1 = hf.ResourceSpec(scheduler="direct")
159
161
  rs2 = hf.ResourceSpec(scheduler="dIrEcT")
160
162
  assert rs1 == rs2
161
163
  assert rs1.scheduler == rs2.scheduler == "direct"
162
164
 
163
165
 
164
- def test_scheduler_strip(null_config):
166
+ def test_scheduler_strip(null_config) -> None:
165
167
  rs1 = hf.ResourceSpec(scheduler=" direct ")
166
168
  rs2 = hf.ResourceSpec(scheduler="direct")
167
169
  assert rs1 == rs2
168
170
  assert rs1.scheduler == rs2.scheduler == "direct"
169
171
 
170
172
 
171
- def test_shell_case_insensitive(null_config):
173
+ def test_shell_case_insensitive(null_config) -> None:
172
174
  shell_name = "bash" if os.name == "posix" else "powershell"
173
175
  shell_name_title = shell_name
174
176
  n = shell_name_title[0]
@@ -180,7 +182,7 @@ def test_shell_case_insensitive(null_config):
180
182
  assert rs1.shell == rs2.shell == shell_name
181
183
 
182
184
 
183
- def test_shell_strip(null_config):
185
+ def test_shell_strip(null_config) -> None:
184
186
  shell_name = "bash" if os.name == "posix" else "powershell"
185
187
  rs1 = hf.ResourceSpec(shell=f" {shell_name} ")
186
188
  rs2 = hf.ResourceSpec(shell=shell_name)
@@ -195,14 +197,14 @@ def test_os_name_case_insensitive(null_config):
195
197
  assert rs1.os_name == rs2.os_name == "nt"
196
198
 
197
199
 
198
- def test_os_name_strip(null_config):
200
+ def test_os_name_strip(null_config) -> None:
199
201
  rs1 = hf.ResourceSpec(os_name=" nt ")
200
202
  rs2 = hf.ResourceSpec(os_name="nt")
201
203
  assert rs1 == rs2
202
204
  assert rs1.os_name == rs2.os_name == "nt"
203
205
 
204
206
 
205
- def test_raise_on_unsupported_scheduler(new_null_config, tmp_path):
207
+ def test_raise_on_unsupported_scheduler(new_null_config, tmp_path: Path):
206
208
  # slurm not supported by default config file:
207
209
  wk = hf.Workflow.from_template_data(
208
210
  template_name="wk1",
@@ -219,7 +221,7 @@ def test_raise_on_unsupported_scheduler(new_null_config, tmp_path):
219
221
  wk.add_submission()
220
222
 
221
223
 
222
- def test_can_use_non_default_scheduler(new_null_config, tmp_path):
224
+ def test_can_use_non_default_scheduler(new_null_config, tmp_path: Path):
223
225
  # for either OS choose a compatible scheduler not set by default:
224
226
  if os.name == "nt":
225
227
  opt_scheduler = "direct_posix" # i.e for WSL
@@ -1,7 +1,16 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+
5
+ import pytest
6
+
7
+ from pathlib import Path
1
8
  from hpcflow.app import app as hf
9
+ from hpcflow.sdk.core.actions import SkipReason
10
+ from hpcflow.sdk.core.test_utils import make_workflow_to_run_command
2
11
 
3
12
 
4
- def test_compose_commands_no_shell_var(null_config, tmp_path):
13
+ def test_compose_commands_no_shell_var(null_config, tmp_path: Path):
5
14
  ts = hf.TaskSchema(
6
15
  objective="test_compose_commands",
7
16
  actions=[hf.Action(commands=[hf.Command(command="Start-Sleep 10")])],
@@ -12,13 +21,14 @@ def test_compose_commands_no_shell_var(null_config, tmp_path):
12
21
  tasks=[hf.Task(schema=ts)],
13
22
  )
14
23
  sub = wk.add_submission()
24
+ assert sub is not None
15
25
  js = sub.jobscripts[0]
16
26
  run = wk.tasks[0].elements[0].iterations[0].action_runs[0]
17
- _, shell_vars = run.compose_commands(jobscript=js, JS_action_idx=0)
27
+ _, shell_vars = run.compose_commands(environments=sub.environments, shell=js.shell)
18
28
  assert shell_vars == {0: []}
19
29
 
20
30
 
21
- def test_compose_commands_single_shell_var(null_config, tmp_path):
31
+ def test_compose_commands_single_shell_var(null_config, tmp_path: Path):
22
32
  ts = hf.TaskSchema(
23
33
  objective="test_compose_commands",
24
34
  inputs=[hf.SchemaInput("p1")],
@@ -40,13 +50,14 @@ def test_compose_commands_single_shell_var(null_config, tmp_path):
40
50
  tasks=[hf.Task(schema=ts, inputs={"p1": 101})],
41
51
  )
42
52
  sub = wk.add_submission()
53
+ assert sub is not None
43
54
  js = sub.jobscripts[0]
44
55
  run = wk.tasks[0].elements[0].iterations[0].action_runs[0]
45
- _, shell_vars = run.compose_commands(jobscript=js, JS_action_idx=0)
56
+ _, shell_vars = run.compose_commands(environments=sub.environments, shell=js.shell)
46
57
  assert shell_vars == {0: [("outputs.p1", "parameter_p1", "stdout")]}
47
58
 
48
59
 
49
- def test_compose_commands_multi_single_shell_var(null_config, tmp_path):
60
+ def test_compose_commands_multi_single_shell_var(null_config, tmp_path: Path):
50
61
  ts = hf.TaskSchema(
51
62
  objective="test_compose_commands",
52
63
  inputs=[hf.SchemaInput("p1")],
@@ -69,7 +80,107 @@ def test_compose_commands_multi_single_shell_var(null_config, tmp_path):
69
80
  tasks=[hf.Task(schema=ts, inputs={"p1": 101})],
70
81
  )
71
82
  sub = wk.add_submission()
83
+ assert sub is not None
72
84
  js = sub.jobscripts[0]
73
85
  run = wk.tasks[0].elements[0].iterations[0].action_runs[0]
74
- _, shell_vars = run.compose_commands(jobscript=js, JS_action_idx=0)
86
+ _, shell_vars = run.compose_commands(environments=sub.environments, shell=js.shell)
75
87
  assert shell_vars == {0: [], 1: [("outputs.p1", "parameter_p1", "stdout")]}
88
+
89
+
90
+ @pytest.mark.integration
91
+ def test_run_dir_diff_new_file(null_config, tmp_path):
92
+ if os.name == "nt":
93
+ command = "New-Item -Path 'new_file.txt' -ItemType File"
94
+ else:
95
+ command = "touch new_file.txt"
96
+ wk = make_workflow_to_run_command(
97
+ command=command,
98
+ requires_dir=True,
99
+ path=tmp_path,
100
+ name="w2",
101
+ overwrite=True,
102
+ )
103
+ wk.submit(wait=True, add_to_known=False, status=False)
104
+ assert wk.get_all_EARs()[0].dir_diff.files_created == ["new_file.txt"]
105
+
106
+
107
+ @pytest.mark.integration
108
+ def test_run_skip_reason_upstream_failure(null_config, tmp_path):
109
+ ts = hf.TaskSchema(
110
+ objective="t1",
111
+ inputs=[hf.SchemaInput("p1")],
112
+ outputs=[hf.SchemaInput("p2")],
113
+ actions=[
114
+ hf.Action(
115
+ commands=[
116
+ hf.Command(
117
+ command="echo $(( <<parameter:p1>> + 100 ))",
118
+ stdout="<<parameter:p2>>",
119
+ ),
120
+ hf.Command(command="exit 1"),
121
+ ]
122
+ ),
123
+ hf.Action(
124
+ commands=[
125
+ hf.Command(
126
+ command="echo $(( <<parameter:p2>> + 100 ))",
127
+ stdout="<<parameter:p2>>",
128
+ ),
129
+ ]
130
+ ), # should be skipped due to failure of action 0
131
+ ],
132
+ )
133
+ wk = hf.Workflow.from_template_data(
134
+ template_name="test_skip_reason",
135
+ path=tmp_path,
136
+ tasks=[hf.Task(schema=ts, inputs={"p1": 100})],
137
+ )
138
+ wk.submit(wait=True, add_to_known=False, status=False)
139
+ runs = wk.get_all_EARs()
140
+ assert not runs[0].success
141
+ assert not runs[1].success
142
+ assert runs[0].skip_reason is SkipReason.NOT_SKIPPED
143
+ assert runs[1].skip_reason is SkipReason.UPSTREAM_FAILURE
144
+
145
+
146
+ @pytest.mark.integration
147
+ def test_run_skip_reason_loop_termination(null_config, tmp_path):
148
+ ts = hf.TaskSchema(
149
+ objective="t1",
150
+ inputs=[hf.SchemaInput("p1")],
151
+ outputs=[hf.SchemaInput("p1")],
152
+ actions=[
153
+ hf.Action(
154
+ commands=[
155
+ hf.Command(
156
+ command="echo $(( <<parameter:p1>> + 100 ))",
157
+ stdout="<<int(parameter:p1)>>",
158
+ ),
159
+ ]
160
+ ),
161
+ ],
162
+ )
163
+ loop_term = hf.Rule(path="outputs.p1", condition={"value.equal_to": 300})
164
+ wk = hf.Workflow.from_template_data(
165
+ template_name="test_skip_reason",
166
+ path=tmp_path,
167
+ tasks=[hf.Task(schema=ts, inputs={"p1": 100})],
168
+ loops=[
169
+ hf.Loop(name="my_loop", tasks=[0], termination=loop_term, num_iterations=3)
170
+ ],
171
+ )
172
+ # loop should terminate after the second iteration
173
+ wk.submit(wait=True, add_to_known=False, status=False)
174
+ runs = wk.get_all_EARs()
175
+
176
+ assert runs[0].get("outputs.p1") == 200
177
+ assert runs[1].get("outputs.p1") == 300
178
+ assert not runs[2].get("outputs.p1")
179
+
180
+ assert runs[0].success
181
+ assert runs[1].success
182
+ assert not runs[2].success
183
+
184
+ assert runs[0].skip_reason is SkipReason.NOT_SKIPPED
185
+ assert runs[1].skip_reason is SkipReason.NOT_SKIPPED
186
+ assert runs[2].skip_reason is SkipReason.LOOP_TERMINATION
@@ -0,0 +1,29 @@
1
+ from pathlib import Path
2
+ import pytest
3
+ from hpcflow.app import app as hf
4
+ from hpcflow.sdk.core.test_utils import make_workflow
5
+
6
+
7
+ @pytest.mark.parametrize("store", ["json", "zarr"])
8
+ def test_run_directories(null_config, tmp_path, store):
9
+ wk = make_workflow(
10
+ schemas_spec=[
11
+ [{"p1": None}, ("p1",), "t1"],
12
+ [{"p2": None}, ("p2",), "t2", {"requires_dir": True}],
13
+ ],
14
+ local_inputs={0: ("p1",)},
15
+ local_sequences={1: [("inputs.p2", 2, 0)]},
16
+ path=tmp_path,
17
+ store=store,
18
+ )
19
+ lp_0 = hf.Loop(name="my_loop", tasks=[1], num_iterations=2)
20
+ wk.add_loop(lp_0)
21
+ sub = wk.add_submission() # populates run directories
22
+
23
+ run_dirs = wk.get_run_directories()
24
+
25
+ assert run_dirs[0] is None
26
+ assert str(run_dirs[1]) == str(Path(wk.path).joinpath("execute/t_1/e_0/i_0"))
27
+ assert str(run_dirs[2]) == str(Path(wk.path).joinpath("execute/t_1/e_1/i_0"))
28
+ assert str(run_dirs[3]) == str(Path(wk.path).joinpath("execute/t_1/e_0/i_1"))
29
+ assert str(run_dirs[4]) == str(Path(wk.path).joinpath("execute/t_1/e_1/i_1"))
@@ -1,7 +1,8 @@
1
+ from __future__ import annotations
1
2
  from hpcflow.app import app as hf
2
3
 
3
4
 
4
- def test_in_pytest_if_not_frozen():
5
+ def test_in_pytest_if_not_frozen() -> None:
5
6
  """This is to check we can get the correct invocation command when running non-frozen
6
7
  tests (when frozen the invocation command is just the executable file)."""
7
8
  if not hf.run_time_info.is_frozen:
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+ from pathlib import Path
1
3
  import pytest
2
4
 
3
5
  from hpcflow.app import app as hf
@@ -11,19 +13,19 @@ def null_config(tmp_path):
11
13
  hf.load_config(config_dir=tmp_path)
12
14
 
13
15
 
14
- def test_null_default_value(null_config):
16
+ def test_null_default_value(null_config) -> None:
15
17
  p1 = hf.Parameter("p1")
16
18
  p1_inp = hf.SchemaInput(parameter=p1)
17
19
  assert "default_value" not in p1_inp.labels[""]
18
20
 
19
21
 
20
- def test_null_default_value_property(null_config):
22
+ def test_null_default_value_property(null_config) -> None:
21
23
  p1 = hf.Parameter("p1")
22
24
  p1_inp = hf.SchemaInput(parameter=p1)
23
25
  assert p1_inp.default_value is NullDefault.NULL
24
26
 
25
27
 
26
- def test_none_default_value(null_config):
28
+ def test_none_default_value(null_config) -> None:
27
29
  """A `None` default value is set with a value of `None`"""
28
30
  p1 = hf.Parameter("p1")
29
31
  p1_inp = hf.SchemaInput(parameter=p1, default_value=None)
@@ -32,7 +34,7 @@ def test_none_default_value(null_config):
32
34
  assert p1_inp.labels[""]["default_value"].value == def_val_exp.value
33
35
 
34
36
 
35
- def test_from_json_like_labels_and_default(null_config):
37
+ def test_from_json_like_labels_and_default(null_config) -> None:
36
38
  json_like = {
37
39
  "parameter": "p1",
38
40
  "labels": {"0": {}},
@@ -45,7 +47,7 @@ def test_from_json_like_labels_and_default(null_config):
45
47
  assert inp.labels["0"]["default_value"].value == None
46
48
 
47
49
 
48
- def test_element_get_removes_schema_param_trivial_label(null_config, tmp_path):
50
+ def test_element_get_removes_schema_param_trivial_label(null_config, tmp_path: Path):
49
51
  p1_val = 101
50
52
  label = "my_label"
51
53
  s1 = hf.TaskSchema(
@@ -61,7 +63,7 @@ def test_element_get_removes_schema_param_trivial_label(null_config, tmp_path):
61
63
  assert wk.tasks[0].elements[0].get("inputs") == {"p1": p1_val}
62
64
 
63
65
 
64
- def test_element_inputs_removes_schema_param_trivial_label(null_config, tmp_path):
66
+ def test_element_inputs_removes_schema_param_trivial_label(null_config, tmp_path: Path):
65
67
  p1_val = 101
66
68
  label = "my_label"
67
69
  s1 = hf.TaskSchema(
@@ -91,7 +93,9 @@ def test_element_inputs_removes_schema_param_trivial_label(null_config, tmp_path
91
93
  assert element.iterations[0].action_runs[0].inputs._get_prefixed_names() == ["p1"]
92
94
 
93
95
 
94
- def test_element_get_does_not_removes_multiple_schema_param_label(null_config, tmp_path):
96
+ def test_element_get_does_not_removes_multiple_schema_param_label(
97
+ null_config, tmp_path: Path
98
+ ):
95
99
  p1_val = 101
96
100
  label = "my_label"
97
101
  s1 = hf.TaskSchema(
@@ -109,7 +113,7 @@ def test_element_get_does_not_removes_multiple_schema_param_label(null_config, t
109
113
 
110
114
 
111
115
  def test_element_inputs_does_not_remove_multiple_schema_param_label(
112
- null_config, tmp_path
116
+ null_config, tmp_path: Path
113
117
  ):
114
118
  p1_val = 101
115
119
  label = "my_label"
@@ -142,7 +146,9 @@ def test_element_inputs_does_not_remove_multiple_schema_param_label(
142
146
  ]
143
147
 
144
148
 
145
- def test_get_input_values_for_multiple_schema_input_single_label(null_config, tmp_path):
149
+ def test_get_input_values_for_multiple_schema_input_single_label(
150
+ null_config, tmp_path: Path
151
+ ):
146
152
  p1_val = 101
147
153
  label = "my_label"
148
154
  s1 = hf.TaskSchema(
@@ -170,7 +176,7 @@ def test_get_input_values_for_multiple_schema_input_single_label(null_config, tm
170
176
  assert run.get_input_values() == {"p2": 201, "p1": 101}
171
177
 
172
178
 
173
- def test_get_input_values_subset(null_config, tmp_path):
179
+ def test_get_input_values_subset(null_config, tmp_path: Path):
174
180
  p1_val = 101
175
181
  s1 = hf.TaskSchema(
176
182
  objective="t1",
@@ -195,7 +201,7 @@ def test_get_input_values_subset(null_config, tmp_path):
195
201
  assert run.get_input_values(inputs=("p1",)) == {"p1": 101}
196
202
 
197
203
 
198
- def test_get_input_values_subset_labelled_label_dict_False(null_config, tmp_path):
204
+ def test_get_input_values_subset_labelled_label_dict_False(null_config, tmp_path: Path):
199
205
  p1_val = 101
200
206
  s1 = hf.TaskSchema(
201
207
  objective="t1",
@@ -229,7 +235,7 @@ def test_get_input_values_subset_labelled_label_dict_False(null_config, tmp_path
229
235
  assert run.get_input_values(inputs=("p1[one]",), label_dict=False) == {"p1[one]": 101}
230
236
 
231
237
 
232
- def test_get_input_values_subset_labelled_label_dict_True(null_config, tmp_path):
238
+ def test_get_input_values_subset_labelled_label_dict_True(null_config, tmp_path: Path):
233
239
  p1_val = 101
234
240
  s1 = hf.TaskSchema(
235
241
  objective="t1",
@@ -265,7 +271,7 @@ def test_get_input_values_subset_labelled_label_dict_True(null_config, tmp_path)
265
271
  }
266
272
 
267
273
 
268
- def test_get_input_values_for_multiple_schema_input(null_config, tmp_path):
274
+ def test_get_input_values_for_multiple_schema_input(null_config, tmp_path: Path):
269
275
  p1_val = 101
270
276
  label = "my_label"
271
277
  s1 = hf.TaskSchema(
@@ -293,7 +299,9 @@ def test_get_input_values_for_multiple_schema_input(null_config, tmp_path):
293
299
  assert run.get_input_values() == {"p2": 201, "p1": {label: 101}}
294
300
 
295
301
 
296
- def test_get_input_values_for_multiple_schema_input_with_object(null_config, tmp_path):
302
+ def test_get_input_values_for_multiple_schema_input_with_object(
303
+ null_config, tmp_path: Path
304
+ ):
297
305
  p1_val = P1(a=101)
298
306
  label = "my_label"
299
307
  s1 = hf.TaskSchema(
@@ -324,7 +332,7 @@ def test_get_input_values_for_multiple_schema_input_with_object(null_config, tmp
324
332
 
325
333
 
326
334
  @pytest.mark.integration
327
- def test_get_input_values_all_iterations(null_config, tmp_path):
335
+ def test_get_input_values_all_iterations(null_config, tmp_path: Path):
328
336
  s1 = hf.TaskSchema(
329
337
  objective="t1",
330
338
  inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
@@ -1,7 +1,8 @@
1
+ from __future__ import annotations
1
2
  from hpcflow.sdk.submission.shells import ALL_SHELLS
2
3
 
3
4
 
4
- def test_process_JS_header_args_app_invoc_windows_powershell():
5
+ def test_process_JS_header_args_app_invoc_windows_powershell() -> None:
5
6
  """
6
7
  Three types of invocation commands exist:
7
8
  1. the frozen app executable
@@ -40,7 +41,7 @@ def test_process_JS_header_args_app_invoc_windows_powershell():
40
41
  assert processed["app_invoc"] == j
41
42
 
42
43
 
43
- def test_process_JS_header_args_app_invoc_bash():
44
+ def test_process_JS_header_args_app_invoc_bash() -> None:
44
45
  """
45
46
  Three types of invocation commands exist:
46
47
  1. the frozen app executable
@@ -76,3 +77,23 @@ def test_process_JS_header_args_app_invoc_bash():
76
77
  for i, j in zip(app_invocs, expected):
77
78
  processed = shell.process_JS_header_args({"app_invoc": i})
78
79
  assert processed["app_invoc"] == j
80
+
81
+
82
+ def test_format_array_powershell():
83
+ shell = ALL_SHELLS["powershell"]["nt"]()
84
+ assert shell.format_array([1, 2, 3]) == "@(1, 2, 3)"
85
+
86
+
87
+ def test_format_array_get_item_powershell():
88
+ shell = ALL_SHELLS["powershell"]["nt"]()
89
+ assert shell.format_array_get_item("my_arr", 3) == "$my_arr[3]"
90
+
91
+
92
+ def test_format_array_bash():
93
+ shell = ALL_SHELLS["bash"]["posix"]()
94
+ assert shell.format_array([1, 2, 3]) == "(1 2 3)"
95
+
96
+
97
+ def test_format_array_get_item_bash():
98
+ shell = ALL_SHELLS["bash"]["posix"]()
99
+ assert shell.format_array_get_item("my_arr", 3) == r"${my_arr[3]}"
@@ -1,37 +1,38 @@
1
+ from __future__ import annotations
1
2
  from hpcflow.sdk.submission.schedulers.slurm import SlurmPosix
2
3
 
3
4
 
4
- def test_parse_job_ID_simple():
5
+ def test_parse_job_ID_simple() -> None:
5
6
  assert SlurmPosix._parse_job_IDs("123") == ("123", None)
6
7
 
7
8
 
8
- def test_parse_job_ID_simple_array_item():
9
+ def test_parse_job_ID_simple_array_item() -> None:
9
10
  assert SlurmPosix._parse_job_IDs("123_10") == ("123", [9])
10
11
 
11
12
 
12
- def test_parse_job_ID_array_simple_range():
13
+ def test_parse_job_ID_array_simple_range() -> None:
13
14
  assert SlurmPosix._parse_job_IDs("3397752_[9-11]") == ("3397752", [8, 9, 10])
14
15
 
15
16
 
16
- def test_parse_job_ID_array_simple_multiple_range():
17
+ def test_parse_job_ID_array_simple_multiple_range() -> None:
17
18
  assert SlurmPosix._parse_job_IDs("49203_[3-5,9-11]") == (
18
19
  "49203",
19
20
  [2, 3, 4, 8, 9, 10],
20
21
  )
21
22
 
22
23
 
23
- def test_parse_job_ID_array_simple_mixed_range():
24
+ def test_parse_job_ID_array_simple_mixed_range() -> None:
24
25
  assert SlurmPosix._parse_job_IDs("30627658_[5,8-10]") == (
25
26
  "30627658",
26
27
  [4, 7, 8, 9],
27
28
  )
28
29
 
29
30
 
30
- def test_parse_job_ID_array_simple_range_with_max_concurrent():
31
+ def test_parse_job_ID_array_simple_range_with_max_concurrent() -> None:
31
32
  assert SlurmPosix._parse_job_IDs("3397752_[9-11%2]") == ("3397752", [8, 9, 10])
32
33
 
33
34
 
34
- def test_parse_job_ID_array_simple_multiple_range_max_concurrent():
35
+ def test_parse_job_ID_array_simple_multiple_range_max_concurrent() -> None:
35
36
  assert SlurmPosix._parse_job_IDs("49203_[3-5%1,9-11%2]") == (
36
37
  "49203",
37
38
  [2, 3, 4, 8, 9, 10],