hpcflow-new2 0.2.0a189__py3-none-any.whl → 0.2.0a190__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. hpcflow/__pyinstaller/hook-hpcflow.py +8 -6
  2. hpcflow/_version.py +1 -1
  3. hpcflow/app.py +1 -0
  4. hpcflow/data/scripts/main_script_test_hdf5_in_obj.py +1 -1
  5. hpcflow/data/scripts/main_script_test_hdf5_out_obj.py +1 -1
  6. hpcflow/sdk/__init__.py +21 -15
  7. hpcflow/sdk/app.py +2133 -770
  8. hpcflow/sdk/cli.py +281 -250
  9. hpcflow/sdk/cli_common.py +6 -2
  10. hpcflow/sdk/config/__init__.py +1 -1
  11. hpcflow/sdk/config/callbacks.py +77 -42
  12. hpcflow/sdk/config/cli.py +126 -103
  13. hpcflow/sdk/config/config.py +578 -311
  14. hpcflow/sdk/config/config_file.py +131 -95
  15. hpcflow/sdk/config/errors.py +112 -85
  16. hpcflow/sdk/config/types.py +145 -0
  17. hpcflow/sdk/core/actions.py +1054 -994
  18. hpcflow/sdk/core/app_aware.py +24 -0
  19. hpcflow/sdk/core/cache.py +81 -63
  20. hpcflow/sdk/core/command_files.py +275 -185
  21. hpcflow/sdk/core/commands.py +111 -107
  22. hpcflow/sdk/core/element.py +724 -503
  23. hpcflow/sdk/core/enums.py +192 -0
  24. hpcflow/sdk/core/environment.py +74 -93
  25. hpcflow/sdk/core/errors.py +398 -51
  26. hpcflow/sdk/core/json_like.py +540 -272
  27. hpcflow/sdk/core/loop.py +380 -334
  28. hpcflow/sdk/core/loop_cache.py +160 -43
  29. hpcflow/sdk/core/object_list.py +370 -207
  30. hpcflow/sdk/core/parameters.py +728 -600
  31. hpcflow/sdk/core/rule.py +59 -41
  32. hpcflow/sdk/core/run_dir_files.py +33 -22
  33. hpcflow/sdk/core/task.py +1546 -1325
  34. hpcflow/sdk/core/task_schema.py +240 -196
  35. hpcflow/sdk/core/test_utils.py +126 -88
  36. hpcflow/sdk/core/types.py +387 -0
  37. hpcflow/sdk/core/utils.py +410 -305
  38. hpcflow/sdk/core/validation.py +82 -9
  39. hpcflow/sdk/core/workflow.py +1192 -1028
  40. hpcflow/sdk/core/zarr_io.py +98 -137
  41. hpcflow/sdk/demo/cli.py +46 -33
  42. hpcflow/sdk/helper/cli.py +18 -16
  43. hpcflow/sdk/helper/helper.py +75 -63
  44. hpcflow/sdk/helper/watcher.py +61 -28
  45. hpcflow/sdk/log.py +83 -59
  46. hpcflow/sdk/persistence/__init__.py +8 -31
  47. hpcflow/sdk/persistence/base.py +988 -586
  48. hpcflow/sdk/persistence/defaults.py +6 -0
  49. hpcflow/sdk/persistence/discovery.py +38 -0
  50. hpcflow/sdk/persistence/json.py +408 -153
  51. hpcflow/sdk/persistence/pending.py +158 -123
  52. hpcflow/sdk/persistence/store_resource.py +37 -22
  53. hpcflow/sdk/persistence/types.py +307 -0
  54. hpcflow/sdk/persistence/utils.py +14 -11
  55. hpcflow/sdk/persistence/zarr.py +477 -420
  56. hpcflow/sdk/runtime.py +44 -41
  57. hpcflow/sdk/submission/{jobscript_info.py → enums.py} +39 -12
  58. hpcflow/sdk/submission/jobscript.py +444 -404
  59. hpcflow/sdk/submission/schedulers/__init__.py +133 -40
  60. hpcflow/sdk/submission/schedulers/direct.py +97 -71
  61. hpcflow/sdk/submission/schedulers/sge.py +132 -126
  62. hpcflow/sdk/submission/schedulers/slurm.py +263 -268
  63. hpcflow/sdk/submission/schedulers/utils.py +7 -2
  64. hpcflow/sdk/submission/shells/__init__.py +14 -15
  65. hpcflow/sdk/submission/shells/base.py +102 -29
  66. hpcflow/sdk/submission/shells/bash.py +72 -55
  67. hpcflow/sdk/submission/shells/os_version.py +31 -30
  68. hpcflow/sdk/submission/shells/powershell.py +37 -29
  69. hpcflow/sdk/submission/submission.py +203 -257
  70. hpcflow/sdk/submission/types.py +143 -0
  71. hpcflow/sdk/typing.py +163 -12
  72. hpcflow/tests/conftest.py +8 -6
  73. hpcflow/tests/schedulers/slurm/test_slurm_submission.py +5 -2
  74. hpcflow/tests/scripts/test_main_scripts.py +60 -30
  75. hpcflow/tests/shells/wsl/test_wsl_submission.py +6 -4
  76. hpcflow/tests/unit/test_action.py +86 -75
  77. hpcflow/tests/unit/test_action_rule.py +9 -4
  78. hpcflow/tests/unit/test_app.py +13 -6
  79. hpcflow/tests/unit/test_cli.py +1 -1
  80. hpcflow/tests/unit/test_command.py +71 -54
  81. hpcflow/tests/unit/test_config.py +20 -15
  82. hpcflow/tests/unit/test_config_file.py +21 -18
  83. hpcflow/tests/unit/test_element.py +58 -62
  84. hpcflow/tests/unit/test_element_iteration.py +3 -1
  85. hpcflow/tests/unit/test_element_set.py +29 -19
  86. hpcflow/tests/unit/test_group.py +4 -2
  87. hpcflow/tests/unit/test_input_source.py +116 -93
  88. hpcflow/tests/unit/test_input_value.py +29 -24
  89. hpcflow/tests/unit/test_json_like.py +44 -35
  90. hpcflow/tests/unit/test_loop.py +65 -58
  91. hpcflow/tests/unit/test_object_list.py +17 -12
  92. hpcflow/tests/unit/test_parameter.py +16 -7
  93. hpcflow/tests/unit/test_persistence.py +48 -35
  94. hpcflow/tests/unit/test_resources.py +20 -18
  95. hpcflow/tests/unit/test_run.py +8 -3
  96. hpcflow/tests/unit/test_runtime.py +2 -1
  97. hpcflow/tests/unit/test_schema_input.py +23 -15
  98. hpcflow/tests/unit/test_shell.py +3 -2
  99. hpcflow/tests/unit/test_slurm.py +8 -7
  100. hpcflow/tests/unit/test_submission.py +39 -19
  101. hpcflow/tests/unit/test_task.py +352 -247
  102. hpcflow/tests/unit/test_task_schema.py +33 -20
  103. hpcflow/tests/unit/test_utils.py +9 -11
  104. hpcflow/tests/unit/test_value_sequence.py +15 -12
  105. hpcflow/tests/unit/test_workflow.py +114 -83
  106. hpcflow/tests/unit/test_workflow_template.py +0 -1
  107. hpcflow/tests/workflows/test_jobscript.py +2 -1
  108. hpcflow/tests/workflows/test_workflows.py +18 -13
  109. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a190.dist-info}/METADATA +2 -1
  110. hpcflow_new2-0.2.0a190.dist-info/RECORD +165 -0
  111. hpcflow/sdk/core/parallel.py +0 -21
  112. hpcflow_new2-0.2.0a189.dist-info/RECORD +0 -158
  113. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a190.dist-info}/LICENSE +0 -0
  114. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a190.dist-info}/WHEEL +0 -0
  115. {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a190.dist-info}/entry_points.txt +0 -0
@@ -1,49 +1,61 @@
1
+ from __future__ import annotations
1
2
  from pathlib import Path
3
+ from typing import cast, TYPE_CHECKING
2
4
  import numpy as np
3
- import zarr
5
+ import zarr # type: ignore
4
6
  import pytest
5
7
  from hpcflow.sdk.core.test_utils import make_test_data_YAML_workflow
6
- from hpcflow.sdk.persistence.base import StoreEAR, StoreElement, StoreElementIter
7
- from hpcflow.sdk.persistence.json import JSONPersistentStore
8
-
8
+ from hpcflow.sdk.persistence.json import (
9
+ JSONPersistentStore,
10
+ JsonStoreElement,
11
+ JsonStoreElementIter,
12
+ JsonStoreEAR,
13
+ )
9
14
  from hpcflow.app import app as hf
10
15
 
16
+ if TYPE_CHECKING:
17
+ from hpcflow.sdk.persistence.zarr import ZarrPersistentStore
18
+
11
19
 
12
20
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
13
- def test_store_pending_add_task(tmp_path):
21
+ def test_store_pending_add_task(tmp_path: Path):
14
22
  """Check expected pending state after adding a task."""
15
23
 
16
24
  # make store: 0 tasks:
17
- store = JSONPersistentStore.make_test_store_from_spec([], dir=tmp_path)
25
+ store = JSONPersistentStore.make_test_store_from_spec(hf, [], dir=tmp_path)
18
26
  task_ID = store.add_task()
19
27
  assert store._pending.add_tasks == {task_ID: []}
20
28
 
21
29
 
22
30
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
23
- def test_store_pending_add_element(tmp_path):
31
+ def test_store_pending_add_element(tmp_path: Path):
24
32
  """Check expected pending state after adding an element."""
25
33
 
26
34
  # make store: 1 task with 0 elements:
27
35
  store = JSONPersistentStore.make_test_store_from_spec(app=hf, spec=[{}], dir=tmp_path)
28
36
  elem_ID = store.add_element(task_ID=0)
29
37
  assert store._pending.add_elements == {
30
- elem_ID: StoreElement(
38
+ elem_ID: JsonStoreElement(
31
39
  id_=elem_ID,
32
40
  is_pending=True,
33
- element_idx=0,
41
+ es_idx=0,
34
42
  task_ID=0,
35
43
  iteration_IDs=[],
44
+ index=0,
45
+ seq_idx={},
46
+ src_idx={},
36
47
  )
37
48
  } and store._pending.add_task_element_IDs == {0: [0]}
38
49
 
39
50
 
40
51
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
41
52
  @pytest.mark.parametrize("elem_ID", [0, 1])
42
- def test_store_pending_add_element_iter(tmp_path, elem_ID):
53
+ def test_store_pending_add_element_iter(tmp_path: Path, elem_ID: int):
43
54
  """Check expected pending state after adding an element iteration."""
44
55
 
45
56
  # make store: 1 task with 2 elements and 0 iterations:
46
57
  store = JSONPersistentStore.make_test_store_from_spec(
58
+ hf,
47
59
  [{"elements": [{}, {}]}],
48
60
  dir=tmp_path,
49
61
  )
@@ -53,23 +65,25 @@ def test_store_pending_add_element_iter(tmp_path, elem_ID):
53
65
  schema_parameters=[],
54
66
  )
55
67
  assert store._pending.add_elem_iters == {
56
- iter_ID: StoreElementIter(
68
+ iter_ID: JsonStoreElementIter(
57
69
  id_=iter_ID,
58
70
  is_pending=True,
59
71
  element_ID=elem_ID,
60
72
  EAR_IDs={},
61
73
  data_idx={},
62
74
  schema_parameters=[],
75
+ EARs_initialised=False,
63
76
  )
64
77
  } and store._pending.add_elem_iter_IDs == {elem_ID: [iter_ID]}
65
78
 
66
79
 
67
80
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
68
- def test_store_pending_add_EAR(tmp_path):
81
+ def test_store_pending_add_EAR(tmp_path: Path):
69
82
  """Check expected pending state after adding an EAR."""
70
83
 
71
84
  # make store: 1 task with 1 element and 1 iteration:
72
85
  store = JSONPersistentStore.make_test_store_from_spec(
86
+ hf,
73
87
  [{"elements": [{"iterations": [{}]}]}],
74
88
  dir=tmp_path,
75
89
  )
@@ -81,7 +95,7 @@ def test_store_pending_add_EAR(tmp_path):
81
95
  metadata={},
82
96
  )
83
97
  assert store._pending.add_EARs == {
84
- EAR_ID: StoreEAR(
98
+ EAR_ID: JsonStoreEAR(
85
99
  id_=EAR_ID,
86
100
  is_pending=True,
87
101
  elem_iter_ID=0,
@@ -94,21 +108,21 @@ def test_store_pending_add_EAR(tmp_path):
94
108
 
95
109
 
96
110
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
97
- def test_get_task_elements_task_is_pending(tmp_path):
111
+ def test_get_task_elements_task_is_pending(tmp_path: Path):
98
112
  """Check we get an empty list when getting all task elements of a pending task to
99
113
  which no elements have been added."""
100
114
  # make store: 0 tasks:
101
- store = JSONPersistentStore.make_test_store_from_spec([], dir=tmp_path)
115
+ store = JSONPersistentStore.make_test_store_from_spec(hf, [], dir=tmp_path)
102
116
  task_ID = store.add_task()
103
117
  assert store.get_task_elements(task_ID, slice(0, None)) == []
104
118
 
105
119
 
106
120
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
107
- def test_get_task_elements_single_element_is_pending(tmp_path):
121
+ def test_get_task_elements_single_element_is_pending(tmp_path: Path):
108
122
  """Check expected return when getting all task elements of a persistent task that has
109
123
  a single pending element."""
110
124
  # make store: 1 task
111
- store = JSONPersistentStore.make_test_store_from_spec([{}], dir=tmp_path)
125
+ store = JSONPersistentStore.make_test_store_from_spec(hf, [{}], dir=tmp_path)
112
126
  store.add_element(task_ID=0)
113
127
  assert store.get_task_elements(0, slice(0, None)) == [
114
128
  {
@@ -123,12 +137,12 @@ def test_get_task_elements_single_element_is_pending(tmp_path):
123
137
 
124
138
 
125
139
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
126
- def test_get_task_elements_multi_element_one_pending(tmp_path):
140
+ def test_get_task_elements_multi_element_one_pending(tmp_path: Path):
127
141
  """Check expected return when getting all task elements of a persistent task that has
128
142
  a persistent element and a pending element."""
129
143
  # make store: 1 task with 1 element:
130
144
  store = JSONPersistentStore.make_test_store_from_spec(
131
- [{"elements": [{}]}], dir=tmp_path
145
+ hf, [{"elements": [{}]}], dir=tmp_path
132
146
  )
133
147
  store.add_element(task_ID=0)
134
148
  assert store.get_task_elements(0, slice(0, None)) == [
@@ -152,12 +166,12 @@ def test_get_task_elements_multi_element_one_pending(tmp_path):
152
166
 
153
167
 
154
168
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
155
- def test_get_task_elements_single_element_iter_pending(tmp_path):
169
+ def test_get_task_elements_single_element_iter_pending(tmp_path: Path):
156
170
  """Check expected return when getting all task elements of a persistent task that has
157
171
  a persistent element with a pending iteration."""
158
172
  # make store: 1 task with 1 element:
159
173
  store = JSONPersistentStore.make_test_store_from_spec(
160
- [{"elements": [{}]}], dir=tmp_path
174
+ hf, [{"elements": [{}]}], dir=tmp_path
161
175
  )
162
176
  store.add_element_iteration(element_ID=0, data_idx={}, schema_parameters=[])
163
177
  assert store.get_task_elements(0, slice(0, None)) == [
@@ -183,12 +197,12 @@ def test_get_task_elements_single_element_iter_pending(tmp_path):
183
197
 
184
198
 
185
199
  @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
186
- def test_get_task_elements_single_element_iter_EAR_pending(tmp_path):
200
+ def test_get_task_elements_single_element_iter_EAR_pending(tmp_path: Path):
187
201
  """Check expected return when getting all task elements of a persistent task that has
188
202
  a persistent element with a persistent iteration and a pending EAR"""
189
203
  # make store: 1 task with 1 element with 1 iteration:
190
204
  store = JSONPersistentStore.make_test_store_from_spec(
191
- [{"elements": [{"iterations": [{}]}]}], dir=tmp_path
205
+ hf, [{"elements": [{"iterations": [{}]}]}], dir=tmp_path
192
206
  )
193
207
  store.add_EAR(elem_iter_ID=0, action_idx=0, commands_idx=[], data_idx={}, metadata={})
194
208
  assert store.get_task_elements(0, slice(0, None)) == [
@@ -225,7 +239,7 @@ def test_get_task_elements_single_element_iter_EAR_pending(tmp_path):
225
239
  ]
226
240
 
227
241
 
228
- def test_make_zarr_store_zstd_compressor(null_config, tmp_path):
242
+ def test_make_zarr_store_zstd_compressor(null_config, tmp_path: Path):
229
243
  wk = make_test_data_YAML_workflow(
230
244
  workflow_name="workflow_1.yaml",
231
245
  path=tmp_path,
@@ -234,7 +248,7 @@ def test_make_zarr_store_zstd_compressor(null_config, tmp_path):
234
248
  )
235
249
 
236
250
 
237
- def test_make_zarr_store_no_compressor(null_config, tmp_path):
251
+ def test_make_zarr_store_no_compressor(null_config, tmp_path: Path):
238
252
  wk = make_test_data_YAML_workflow(
239
253
  workflow_name="workflow_1.yaml",
240
254
  path=tmp_path,
@@ -244,7 +258,7 @@ def test_make_zarr_store_no_compressor(null_config, tmp_path):
244
258
 
245
259
 
246
260
  @pytest.mark.integration
247
- def test_zarr_rechunk_data_equivalent(null_config, tmp_path):
261
+ def test_zarr_rechunk_data_equivalent(null_config, tmp_path: Path):
248
262
  t1 = hf.Task(
249
263
  schema=hf.task_schemas.test_t1_conditional_OS,
250
264
  inputs={"p1": 101},
@@ -259,7 +273,7 @@ def test_zarr_rechunk_data_equivalent(null_config, tmp_path):
259
273
  wk.submit(wait=True, status=False, add_to_known=False)
260
274
  wk.rechunk_runs(backup=True, status=False, chunk_size=None) # None -> one chunk
261
275
 
262
- arr = wk._store._get_EARs_arr()
276
+ arr = cast("ZarrPersistentStore", wk._store)._get_EARs_arr()
263
277
  assert arr.chunks == arr.shape
264
278
 
265
279
  bak_path = (Path(wk.path) / arr.path).with_suffix(".bak")
@@ -275,7 +289,7 @@ def test_zarr_rechunk_data_equivalent(null_config, tmp_path):
275
289
 
276
290
 
277
291
  @pytest.mark.integration
278
- def test_zarr_rechunk_data_equivalent_custom_chunk_size(null_config, tmp_path):
292
+ def test_zarr_rechunk_data_equivalent_custom_chunk_size(null_config, tmp_path: Path):
279
293
  t1 = hf.Task(
280
294
  schema=hf.task_schemas.test_t1_conditional_OS,
281
295
  inputs={"p1": 101},
@@ -290,7 +304,7 @@ def test_zarr_rechunk_data_equivalent_custom_chunk_size(null_config, tmp_path):
290
304
  wk.submit(wait=True, status=False, add_to_known=False)
291
305
  wk.rechunk_runs(backup=True, status=False, chunk_size=2)
292
306
 
293
- arr = wk._store._get_EARs_arr()
307
+ arr = cast("ZarrPersistentStore", wk._store)._get_EARs_arr()
294
308
  assert arr.chunks == (2,)
295
309
 
296
310
  bak_path = (Path(wk.path) / arr.path).with_suffix(".bak")
@@ -303,7 +317,7 @@ def test_zarr_rechunk_data_equivalent_custom_chunk_size(null_config, tmp_path):
303
317
 
304
318
 
305
319
  @pytest.mark.integration
306
- def test_zarr_rechunk_data_no_backup_load_runs(null_config, tmp_path):
320
+ def test_zarr_rechunk_data_no_backup_load_runs(null_config, tmp_path: Path):
307
321
  t1 = hf.Task(
308
322
  schema=hf.task_schemas.test_t1_conditional_OS,
309
323
  inputs={"p1": 101},
@@ -318,7 +332,7 @@ def test_zarr_rechunk_data_no_backup_load_runs(null_config, tmp_path):
318
332
  wk.submit(wait=True, status=False, add_to_known=False)
319
333
  wk.rechunk_runs(backup=False, status=False)
320
334
 
321
- arr = wk._store._get_EARs_arr()
335
+ arr = cast("ZarrPersistentStore", wk._store)._get_EARs_arr()
322
336
 
323
337
  bak_path = (Path(wk.path) / arr.path).with_suffix(".bak")
324
338
  assert not bak_path.is_file()
@@ -331,7 +345,7 @@ def test_zarr_rechunk_data_no_backup_load_runs(null_config, tmp_path):
331
345
 
332
346
 
333
347
  @pytest.mark.integration
334
- def test_zarr_rechunk_data_no_backup_load_parameter_base(null_config, tmp_path):
348
+ def test_zarr_rechunk_data_no_backup_load_parameter_base(null_config, tmp_path: Path):
335
349
  t1 = hf.Task(
336
350
  schema=hf.task_schemas.test_t1_conditional_OS,
337
351
  inputs={"p1": 101},
@@ -346,13 +360,12 @@ def test_zarr_rechunk_data_no_backup_load_parameter_base(null_config, tmp_path):
346
360
  wk.submit(wait=True, status=False, add_to_known=False)
347
361
  wk.rechunk_parameter_base(backup=False, status=False)
348
362
 
349
- arr = wk._store._get_parameter_base_array()
363
+ arr = cast("ZarrPersistentStore", wk._store)._get_parameter_base_array()
350
364
 
351
365
  bak_path = (Path(wk.path) / arr.path).with_suffix(".bak")
352
366
  assert not bak_path.is_file()
353
367
 
354
368
  # check we can load parameters:
355
- params = wk.get_all_parameters()
356
369
  param_IDs = []
357
- for i in params:
370
+ for i in wk.get_all_parameters():
358
371
  param_IDs.append(i.id_)
@@ -1,16 +1,18 @@
1
+ from __future__ import annotations
1
2
  import os
3
+ from pathlib import Path
2
4
  import pytest
3
5
  from hpcflow.app import app as hf
4
6
  from hpcflow.sdk.core.errors import UnsupportedSchedulerError
5
7
 
6
8
 
7
- def test_init_scope_equivalence_simple():
9
+ def test_init_scope_equivalence_simple() -> None:
8
10
  rs1 = hf.ResourceSpec(scope=hf.ActionScope.any(), num_cores=1)
9
11
  rs2 = hf.ResourceSpec(scope="any", num_cores=1)
10
12
  assert rs1 == rs2
11
13
 
12
14
 
13
- def test_init_scope_equivalence_with_kwargs():
15
+ def test_init_scope_equivalence_with_kwargs() -> None:
14
16
  rs1 = hf.ResourceSpec(
15
17
  scope=hf.ActionScope.input_file_generator(file="my_file"), num_cores=1
16
18
  )
@@ -18,32 +20,32 @@ def test_init_scope_equivalence_with_kwargs():
18
20
  assert rs1 == rs2
19
21
 
20
22
 
21
- def test_init_no_args():
23
+ def test_init_no_args() -> None:
22
24
  rs1 = hf.ResourceSpec()
23
25
  rs2 = hf.ResourceSpec(scope="any")
24
26
  assert rs1 == rs2
25
27
 
26
28
 
27
- def test_resource_list_raise_on_identical_scopes():
29
+ def test_resource_list_raise_on_identical_scopes() -> None:
28
30
  with pytest.raises(ValueError):
29
31
  hf.ResourceList.normalise([{"scope": "any"}, {"scope": "any"}])
30
32
 
31
33
 
32
- def test_merge_other_same_scope():
34
+ def test_merge_other_same_scope() -> None:
33
35
  res_lst_1 = hf.ResourceList.from_json_like({"any": {"num_cores": 1}})
34
36
  res_lst_2 = hf.ResourceList.from_json_like({"any": {}})
35
37
  res_lst_2.merge_other(res_lst_1)
36
38
  assert res_lst_2 == hf.ResourceList.from_json_like({"any": {"num_cores": 1}})
37
39
 
38
40
 
39
- def test_merge_other_same_scope_no_overwrite():
41
+ def test_merge_other_same_scope_no_overwrite() -> None:
40
42
  res_lst_1 = hf.ResourceList.from_json_like({"any": {"num_cores": 1}})
41
43
  res_lst_2 = hf.ResourceList.from_json_like({"any": {"num_cores": 2}})
42
44
  res_lst_2.merge_other(res_lst_1)
43
45
  assert res_lst_2 == hf.ResourceList.from_json_like({"any": {"num_cores": 2}})
44
46
 
45
47
 
46
- def test_merge_other_multi_scope():
48
+ def test_merge_other_multi_scope() -> None:
47
49
  res_lst_1 = hf.ResourceList.from_json_like({"any": {"num_cores": 1}})
48
50
  res_lst_2 = hf.ResourceList.from_json_like({"any": {}, "main": {"num_cores": 3}})
49
51
  res_lst_2.merge_other(res_lst_1)
@@ -53,7 +55,7 @@ def test_merge_other_multi_scope():
53
55
 
54
56
 
55
57
  @pytest.mark.parametrize("store", ["json", "zarr"])
56
- def test_merge_other_persistent_workflow_reload(null_config, tmp_path, store):
58
+ def test_merge_other_persistent_workflow_reload(null_config, tmp_path: Path, store: str):
57
59
  wkt = hf.WorkflowTemplate(
58
60
  name="test_load",
59
61
  resources={"any": {"num_cores": 2}},
@@ -70,7 +72,7 @@ def test_merge_other_persistent_workflow_reload(null_config, tmp_path, store):
70
72
 
71
73
 
72
74
  @pytest.mark.parametrize("store", ["json", "zarr"])
73
- def test_use_persistent_resource_spec(null_config, tmp_path, store):
75
+ def test_use_persistent_resource_spec(null_config, tmp_path: Path, store: str):
74
76
  # create a workflow from which we can use a resource spec in a new workflow:
75
77
  num_cores_check = 2
76
78
  wk_base = hf.Workflow.from_template_data(
@@ -104,7 +106,7 @@ def test_use_persistent_resource_spec(null_config, tmp_path, store):
104
106
 
105
107
 
106
108
  @pytest.mark.parametrize("store", ["json", "zarr"])
107
- def test_use_persistent_resource_list(null_config, tmp_path, store):
109
+ def test_use_persistent_resource_list(null_config, tmp_path: Path, store: str):
108
110
  # create a workflow from which we can use the resource list in a new workflow:
109
111
  num_cores_check = 2
110
112
  wk_base = hf.Workflow.from_template_data(
@@ -138,7 +140,7 @@ def test_use_persistent_resource_list(null_config, tmp_path, store):
138
140
 
139
141
 
140
142
  @pytest.mark.parametrize("store", ["json", "zarr"])
141
- def test_default_scheduler_set(new_null_config, tmp_path, store):
143
+ def test_default_scheduler_set(new_null_config, tmp_path: Path, store: str):
142
144
  wk = hf.Workflow.from_template_data(
143
145
  template_name="wk",
144
146
  path=tmp_path,
@@ -154,21 +156,21 @@ def test_default_scheduler_set(new_null_config, tmp_path, store):
154
156
  assert wk.submissions[0].jobscripts[0].scheduler_name == hf.config.default_scheduler
155
157
 
156
158
 
157
- def test_scheduler_case_insensitive(null_config):
159
+ def test_scheduler_case_insensitive(null_config) -> None:
158
160
  rs1 = hf.ResourceSpec(scheduler="direct")
159
161
  rs2 = hf.ResourceSpec(scheduler="dIrEcT")
160
162
  assert rs1 == rs2
161
163
  assert rs1.scheduler == rs2.scheduler == "direct"
162
164
 
163
165
 
164
- def test_scheduler_strip(null_config):
166
+ def test_scheduler_strip(null_config) -> None:
165
167
  rs1 = hf.ResourceSpec(scheduler=" direct ")
166
168
  rs2 = hf.ResourceSpec(scheduler="direct")
167
169
  assert rs1 == rs2
168
170
  assert rs1.scheduler == rs2.scheduler == "direct"
169
171
 
170
172
 
171
- def test_shell_case_insensitive(null_config):
173
+ def test_shell_case_insensitive(null_config) -> None:
172
174
  shell_name = "bash" if os.name == "posix" else "powershell"
173
175
  shell_name_title = shell_name
174
176
  n = shell_name_title[0]
@@ -180,7 +182,7 @@ def test_shell_case_insensitive(null_config):
180
182
  assert rs1.shell == rs2.shell == shell_name
181
183
 
182
184
 
183
- def test_shell_strip(null_config):
185
+ def test_shell_strip(null_config) -> None:
184
186
  shell_name = "bash" if os.name == "posix" else "powershell"
185
187
  rs1 = hf.ResourceSpec(shell=f" {shell_name} ")
186
188
  rs2 = hf.ResourceSpec(shell=shell_name)
@@ -195,14 +197,14 @@ def test_os_name_case_insensitive(null_config):
195
197
  assert rs1.os_name == rs2.os_name == "nt"
196
198
 
197
199
 
198
- def test_os_name_strip(null_config):
200
+ def test_os_name_strip(null_config) -> None:
199
201
  rs1 = hf.ResourceSpec(os_name=" nt ")
200
202
  rs2 = hf.ResourceSpec(os_name="nt")
201
203
  assert rs1 == rs2
202
204
  assert rs1.os_name == rs2.os_name == "nt"
203
205
 
204
206
 
205
- def test_raise_on_unsupported_scheduler(new_null_config, tmp_path):
207
+ def test_raise_on_unsupported_scheduler(new_null_config, tmp_path: Path):
206
208
  # slurm not supported by default config file:
207
209
  wk = hf.Workflow.from_template_data(
208
210
  template_name="wk1",
@@ -219,7 +221,7 @@ def test_raise_on_unsupported_scheduler(new_null_config, tmp_path):
219
221
  wk.add_submission()
220
222
 
221
223
 
222
- def test_can_use_non_default_scheduler(new_null_config, tmp_path):
224
+ def test_can_use_non_default_scheduler(new_null_config, tmp_path: Path):
223
225
  # for either OS choose a compatible scheduler not set by default:
224
226
  if os.name == "nt":
225
227
  opt_scheduler = "direct_posix" # i.e for WSL
@@ -1,7 +1,9 @@
1
+ from __future__ import annotations
2
+ from pathlib import Path
1
3
  from hpcflow.app import app as hf
2
4
 
3
5
 
4
- def test_compose_commands_no_shell_var(null_config, tmp_path):
6
+ def test_compose_commands_no_shell_var(null_config, tmp_path: Path):
5
7
  ts = hf.TaskSchema(
6
8
  objective="test_compose_commands",
7
9
  actions=[hf.Action(commands=[hf.Command(command="Start-Sleep 10")])],
@@ -12,13 +14,14 @@ def test_compose_commands_no_shell_var(null_config, tmp_path):
12
14
  tasks=[hf.Task(schema=ts)],
13
15
  )
14
16
  sub = wk.add_submission()
17
+ assert sub is not None
15
18
  js = sub.jobscripts[0]
16
19
  run = wk.tasks[0].elements[0].iterations[0].action_runs[0]
17
20
  _, shell_vars = run.compose_commands(jobscript=js, JS_action_idx=0)
18
21
  assert shell_vars == {0: []}
19
22
 
20
23
 
21
- def test_compose_commands_single_shell_var(null_config, tmp_path):
24
+ def test_compose_commands_single_shell_var(null_config, tmp_path: Path):
22
25
  ts = hf.TaskSchema(
23
26
  objective="test_compose_commands",
24
27
  inputs=[hf.SchemaInput("p1")],
@@ -40,13 +43,14 @@ def test_compose_commands_single_shell_var(null_config, tmp_path):
40
43
  tasks=[hf.Task(schema=ts, inputs={"p1": 101})],
41
44
  )
42
45
  sub = wk.add_submission()
46
+ assert sub is not None
43
47
  js = sub.jobscripts[0]
44
48
  run = wk.tasks[0].elements[0].iterations[0].action_runs[0]
45
49
  _, shell_vars = run.compose_commands(jobscript=js, JS_action_idx=0)
46
50
  assert shell_vars == {0: [("outputs.p1", "parameter_p1", "stdout")]}
47
51
 
48
52
 
49
- def test_compose_commands_multi_single_shell_var(null_config, tmp_path):
53
+ def test_compose_commands_multi_single_shell_var(null_config, tmp_path: Path):
50
54
  ts = hf.TaskSchema(
51
55
  objective="test_compose_commands",
52
56
  inputs=[hf.SchemaInput("p1")],
@@ -69,6 +73,7 @@ def test_compose_commands_multi_single_shell_var(null_config, tmp_path):
69
73
  tasks=[hf.Task(schema=ts, inputs={"p1": 101})],
70
74
  )
71
75
  sub = wk.add_submission()
76
+ assert sub is not None
72
77
  js = sub.jobscripts[0]
73
78
  run = wk.tasks[0].elements[0].iterations[0].action_runs[0]
74
79
  _, shell_vars = run.compose_commands(jobscript=js, JS_action_idx=0)
@@ -1,7 +1,8 @@
1
+ from __future__ import annotations
1
2
  from hpcflow.app import app as hf
2
3
 
3
4
 
4
- def test_in_pytest_if_not_frozen():
5
+ def test_in_pytest_if_not_frozen() -> None:
5
6
  """This is to check we can get the correct invocation command when running non-frozen
6
7
  tests (when frozen the invocation command is just the executable file)."""
7
8
  if not hf.run_time_info.is_frozen:
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+ from pathlib import Path
1
3
  import pytest
2
4
 
3
5
  from hpcflow.app import app as hf
@@ -11,19 +13,19 @@ def null_config(tmp_path):
11
13
  hf.load_config(config_dir=tmp_path)
12
14
 
13
15
 
14
- def test_null_default_value(null_config):
16
+ def test_null_default_value(null_config) -> None:
15
17
  p1 = hf.Parameter("p1")
16
18
  p1_inp = hf.SchemaInput(parameter=p1)
17
19
  assert "default_value" not in p1_inp.labels[""]
18
20
 
19
21
 
20
- def test_null_default_value_property(null_config):
22
+ def test_null_default_value_property(null_config) -> None:
21
23
  p1 = hf.Parameter("p1")
22
24
  p1_inp = hf.SchemaInput(parameter=p1)
23
25
  assert p1_inp.default_value is NullDefault.NULL
24
26
 
25
27
 
26
- def test_none_default_value(null_config):
28
+ def test_none_default_value(null_config) -> None:
27
29
  """A `None` default value is set with a value of `None`"""
28
30
  p1 = hf.Parameter("p1")
29
31
  p1_inp = hf.SchemaInput(parameter=p1, default_value=None)
@@ -32,7 +34,7 @@ def test_none_default_value(null_config):
32
34
  assert p1_inp.labels[""]["default_value"].value == def_val_exp.value
33
35
 
34
36
 
35
- def test_from_json_like_labels_and_default(null_config):
37
+ def test_from_json_like_labels_and_default(null_config) -> None:
36
38
  json_like = {
37
39
  "parameter": "p1",
38
40
  "labels": {"0": {}},
@@ -45,7 +47,7 @@ def test_from_json_like_labels_and_default(null_config):
45
47
  assert inp.labels["0"]["default_value"].value == None
46
48
 
47
49
 
48
- def test_element_get_removes_schema_param_trivial_label(null_config, tmp_path):
50
+ def test_element_get_removes_schema_param_trivial_label(null_config, tmp_path: Path):
49
51
  p1_val = 101
50
52
  label = "my_label"
51
53
  s1 = hf.TaskSchema(
@@ -61,7 +63,7 @@ def test_element_get_removes_schema_param_trivial_label(null_config, tmp_path):
61
63
  assert wk.tasks[0].elements[0].get("inputs") == {"p1": p1_val}
62
64
 
63
65
 
64
- def test_element_inputs_removes_schema_param_trivial_label(null_config, tmp_path):
66
+ def test_element_inputs_removes_schema_param_trivial_label(null_config, tmp_path: Path):
65
67
  p1_val = 101
66
68
  label = "my_label"
67
69
  s1 = hf.TaskSchema(
@@ -91,7 +93,9 @@ def test_element_inputs_removes_schema_param_trivial_label(null_config, tmp_path
91
93
  assert element.iterations[0].action_runs[0].inputs._get_prefixed_names() == ["p1"]
92
94
 
93
95
 
94
- def test_element_get_does_not_removes_multiple_schema_param_label(null_config, tmp_path):
96
+ def test_element_get_does_not_removes_multiple_schema_param_label(
97
+ null_config, tmp_path: Path
98
+ ):
95
99
  p1_val = 101
96
100
  label = "my_label"
97
101
  s1 = hf.TaskSchema(
@@ -109,7 +113,7 @@ def test_element_get_does_not_removes_multiple_schema_param_label(null_config, t
109
113
 
110
114
 
111
115
  def test_element_inputs_does_not_remove_multiple_schema_param_label(
112
- null_config, tmp_path
116
+ null_config, tmp_path: Path
113
117
  ):
114
118
  p1_val = 101
115
119
  label = "my_label"
@@ -142,7 +146,9 @@ def test_element_inputs_does_not_remove_multiple_schema_param_label(
142
146
  ]
143
147
 
144
148
 
145
- def test_get_input_values_for_multiple_schema_input_single_label(null_config, tmp_path):
149
+ def test_get_input_values_for_multiple_schema_input_single_label(
150
+ null_config, tmp_path: Path
151
+ ):
146
152
  p1_val = 101
147
153
  label = "my_label"
148
154
  s1 = hf.TaskSchema(
@@ -170,7 +176,7 @@ def test_get_input_values_for_multiple_schema_input_single_label(null_config, tm
170
176
  assert run.get_input_values() == {"p2": 201, "p1": 101}
171
177
 
172
178
 
173
- def test_get_input_values_subset(null_config, tmp_path):
179
+ def test_get_input_values_subset(null_config, tmp_path: Path):
174
180
  p1_val = 101
175
181
  s1 = hf.TaskSchema(
176
182
  objective="t1",
@@ -195,7 +201,7 @@ def test_get_input_values_subset(null_config, tmp_path):
195
201
  assert run.get_input_values(inputs=("p1",)) == {"p1": 101}
196
202
 
197
203
 
198
- def test_get_input_values_subset_labelled_label_dict_False(null_config, tmp_path):
204
+ def test_get_input_values_subset_labelled_label_dict_False(null_config, tmp_path: Path):
199
205
  p1_val = 101
200
206
  s1 = hf.TaskSchema(
201
207
  objective="t1",
@@ -229,7 +235,7 @@ def test_get_input_values_subset_labelled_label_dict_False(null_config, tmp_path
229
235
  assert run.get_input_values(inputs=("p1[one]",), label_dict=False) == {"p1[one]": 101}
230
236
 
231
237
 
232
- def test_get_input_values_subset_labelled_label_dict_True(null_config, tmp_path):
238
+ def test_get_input_values_subset_labelled_label_dict_True(null_config, tmp_path: Path):
233
239
  p1_val = 101
234
240
  s1 = hf.TaskSchema(
235
241
  objective="t1",
@@ -265,7 +271,7 @@ def test_get_input_values_subset_labelled_label_dict_True(null_config, tmp_path)
265
271
  }
266
272
 
267
273
 
268
- def test_get_input_values_for_multiple_schema_input(null_config, tmp_path):
274
+ def test_get_input_values_for_multiple_schema_input(null_config, tmp_path: Path):
269
275
  p1_val = 101
270
276
  label = "my_label"
271
277
  s1 = hf.TaskSchema(
@@ -293,7 +299,9 @@ def test_get_input_values_for_multiple_schema_input(null_config, tmp_path):
293
299
  assert run.get_input_values() == {"p2": 201, "p1": {label: 101}}
294
300
 
295
301
 
296
- def test_get_input_values_for_multiple_schema_input_with_object(null_config, tmp_path):
302
+ def test_get_input_values_for_multiple_schema_input_with_object(
303
+ null_config, tmp_path: Path
304
+ ):
297
305
  p1_val = P1(a=101)
298
306
  label = "my_label"
299
307
  s1 = hf.TaskSchema(
@@ -324,7 +332,7 @@ def test_get_input_values_for_multiple_schema_input_with_object(null_config, tmp
324
332
 
325
333
 
326
334
  @pytest.mark.integration
327
- def test_get_input_values_all_iterations(null_config, tmp_path):
335
+ def test_get_input_values_all_iterations(null_config, tmp_path: Path):
328
336
  s1 = hf.TaskSchema(
329
337
  objective="t1",
330
338
  inputs=[hf.SchemaInput(parameter=hf.Parameter("p1"))],
@@ -1,7 +1,8 @@
1
+ from __future__ import annotations
1
2
  from hpcflow.sdk.submission.shells import ALL_SHELLS
2
3
 
3
4
 
4
- def test_process_JS_header_args_app_invoc_windows_powershell():
5
+ def test_process_JS_header_args_app_invoc_windows_powershell() -> None:
5
6
  """
6
7
  Three types of invocation commands exist:
7
8
  1. the frozen app executable
@@ -40,7 +41,7 @@ def test_process_JS_header_args_app_invoc_windows_powershell():
40
41
  assert processed["app_invoc"] == j
41
42
 
42
43
 
43
- def test_process_JS_header_args_app_invoc_bash():
44
+ def test_process_JS_header_args_app_invoc_bash() -> None:
44
45
  """
45
46
  Three types of invocation commands exist:
46
47
  1. the frozen app executable