hpcflow-new2 0.2.0a189__py3-none-any.whl → 0.2.0a190__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hpcflow/__pyinstaller/hook-hpcflow.py +8 -6
- hpcflow/_version.py +1 -1
- hpcflow/app.py +1 -0
- hpcflow/data/scripts/main_script_test_hdf5_in_obj.py +1 -1
- hpcflow/data/scripts/main_script_test_hdf5_out_obj.py +1 -1
- hpcflow/sdk/__init__.py +21 -15
- hpcflow/sdk/app.py +2133 -770
- hpcflow/sdk/cli.py +281 -250
- hpcflow/sdk/cli_common.py +6 -2
- hpcflow/sdk/config/__init__.py +1 -1
- hpcflow/sdk/config/callbacks.py +77 -42
- hpcflow/sdk/config/cli.py +126 -103
- hpcflow/sdk/config/config.py +578 -311
- hpcflow/sdk/config/config_file.py +131 -95
- hpcflow/sdk/config/errors.py +112 -85
- hpcflow/sdk/config/types.py +145 -0
- hpcflow/sdk/core/actions.py +1054 -994
- hpcflow/sdk/core/app_aware.py +24 -0
- hpcflow/sdk/core/cache.py +81 -63
- hpcflow/sdk/core/command_files.py +275 -185
- hpcflow/sdk/core/commands.py +111 -107
- hpcflow/sdk/core/element.py +724 -503
- hpcflow/sdk/core/enums.py +192 -0
- hpcflow/sdk/core/environment.py +74 -93
- hpcflow/sdk/core/errors.py +398 -51
- hpcflow/sdk/core/json_like.py +540 -272
- hpcflow/sdk/core/loop.py +380 -334
- hpcflow/sdk/core/loop_cache.py +160 -43
- hpcflow/sdk/core/object_list.py +370 -207
- hpcflow/sdk/core/parameters.py +728 -600
- hpcflow/sdk/core/rule.py +59 -41
- hpcflow/sdk/core/run_dir_files.py +33 -22
- hpcflow/sdk/core/task.py +1546 -1325
- hpcflow/sdk/core/task_schema.py +240 -196
- hpcflow/sdk/core/test_utils.py +126 -88
- hpcflow/sdk/core/types.py +387 -0
- hpcflow/sdk/core/utils.py +410 -305
- hpcflow/sdk/core/validation.py +82 -9
- hpcflow/sdk/core/workflow.py +1192 -1028
- hpcflow/sdk/core/zarr_io.py +98 -137
- hpcflow/sdk/demo/cli.py +46 -33
- hpcflow/sdk/helper/cli.py +18 -16
- hpcflow/sdk/helper/helper.py +75 -63
- hpcflow/sdk/helper/watcher.py +61 -28
- hpcflow/sdk/log.py +83 -59
- hpcflow/sdk/persistence/__init__.py +8 -31
- hpcflow/sdk/persistence/base.py +988 -586
- hpcflow/sdk/persistence/defaults.py +6 -0
- hpcflow/sdk/persistence/discovery.py +38 -0
- hpcflow/sdk/persistence/json.py +408 -153
- hpcflow/sdk/persistence/pending.py +158 -123
- hpcflow/sdk/persistence/store_resource.py +37 -22
- hpcflow/sdk/persistence/types.py +307 -0
- hpcflow/sdk/persistence/utils.py +14 -11
- hpcflow/sdk/persistence/zarr.py +477 -420
- hpcflow/sdk/runtime.py +44 -41
- hpcflow/sdk/submission/{jobscript_info.py → enums.py} +39 -12
- hpcflow/sdk/submission/jobscript.py +444 -404
- hpcflow/sdk/submission/schedulers/__init__.py +133 -40
- hpcflow/sdk/submission/schedulers/direct.py +97 -71
- hpcflow/sdk/submission/schedulers/sge.py +132 -126
- hpcflow/sdk/submission/schedulers/slurm.py +263 -268
- hpcflow/sdk/submission/schedulers/utils.py +7 -2
- hpcflow/sdk/submission/shells/__init__.py +14 -15
- hpcflow/sdk/submission/shells/base.py +102 -29
- hpcflow/sdk/submission/shells/bash.py +72 -55
- hpcflow/sdk/submission/shells/os_version.py +31 -30
- hpcflow/sdk/submission/shells/powershell.py +37 -29
- hpcflow/sdk/submission/submission.py +203 -257
- hpcflow/sdk/submission/types.py +143 -0
- hpcflow/sdk/typing.py +163 -12
- hpcflow/tests/conftest.py +8 -6
- hpcflow/tests/schedulers/slurm/test_slurm_submission.py +5 -2
- hpcflow/tests/scripts/test_main_scripts.py +60 -30
- hpcflow/tests/shells/wsl/test_wsl_submission.py +6 -4
- hpcflow/tests/unit/test_action.py +86 -75
- hpcflow/tests/unit/test_action_rule.py +9 -4
- hpcflow/tests/unit/test_app.py +13 -6
- hpcflow/tests/unit/test_cli.py +1 -1
- hpcflow/tests/unit/test_command.py +71 -54
- hpcflow/tests/unit/test_config.py +20 -15
- hpcflow/tests/unit/test_config_file.py +21 -18
- hpcflow/tests/unit/test_element.py +58 -62
- hpcflow/tests/unit/test_element_iteration.py +3 -1
- hpcflow/tests/unit/test_element_set.py +29 -19
- hpcflow/tests/unit/test_group.py +4 -2
- hpcflow/tests/unit/test_input_source.py +116 -93
- hpcflow/tests/unit/test_input_value.py +29 -24
- hpcflow/tests/unit/test_json_like.py +44 -35
- hpcflow/tests/unit/test_loop.py +65 -58
- hpcflow/tests/unit/test_object_list.py +17 -12
- hpcflow/tests/unit/test_parameter.py +16 -7
- hpcflow/tests/unit/test_persistence.py +48 -35
- hpcflow/tests/unit/test_resources.py +20 -18
- hpcflow/tests/unit/test_run.py +8 -3
- hpcflow/tests/unit/test_runtime.py +2 -1
- hpcflow/tests/unit/test_schema_input.py +23 -15
- hpcflow/tests/unit/test_shell.py +3 -2
- hpcflow/tests/unit/test_slurm.py +8 -7
- hpcflow/tests/unit/test_submission.py +39 -19
- hpcflow/tests/unit/test_task.py +352 -247
- hpcflow/tests/unit/test_task_schema.py +33 -20
- hpcflow/tests/unit/test_utils.py +9 -11
- hpcflow/tests/unit/test_value_sequence.py +15 -12
- hpcflow/tests/unit/test_workflow.py +114 -83
- hpcflow/tests/unit/test_workflow_template.py +0 -1
- hpcflow/tests/workflows/test_jobscript.py +2 -1
- hpcflow/tests/workflows/test_workflows.py +18 -13
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a190.dist-info}/METADATA +2 -1
- hpcflow_new2-0.2.0a190.dist-info/RECORD +165 -0
- hpcflow/sdk/core/parallel.py +0 -21
- hpcflow_new2-0.2.0a189.dist-info/RECORD +0 -158
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a190.dist-info}/LICENSE +0 -0
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a190.dist-info}/WHEEL +0 -0
- {hpcflow_new2-0.2.0a189.dist-info → hpcflow_new2-0.2.0a190.dist-info}/entry_points.txt +0 -0
@@ -1,7 +1,9 @@
|
|
1
|
+
from __future__ import annotations
|
1
2
|
import copy
|
2
3
|
from dataclasses import dataclass
|
3
4
|
from pathlib import Path
|
4
5
|
from textwrap import dedent
|
6
|
+
from typing import TYPE_CHECKING
|
5
7
|
|
6
8
|
import pytest
|
7
9
|
|
@@ -11,16 +13,23 @@ from hpcflow.sdk.core.errors import (
|
|
11
13
|
WorkflowBatchUpdateFailedError,
|
12
14
|
WorkflowNotFoundError,
|
13
15
|
)
|
14
|
-
from hpcflow.sdk.core.parameters import ParameterValue
|
15
16
|
from hpcflow.sdk.core.test_utils import (
|
16
17
|
make_workflow,
|
17
18
|
P1_parameter_cls as P1,
|
18
19
|
make_test_data_YAML_workflow,
|
19
20
|
)
|
20
21
|
|
22
|
+
if TYPE_CHECKING:
|
23
|
+
from collections.abc import Iterator
|
24
|
+
from hpcflow.sdk.core.actions import Action, ActionEnvironment
|
25
|
+
from hpcflow.sdk.core.command_files import FileSpec
|
26
|
+
from hpcflow.sdk.core.parameters import Parameter
|
27
|
+
from hpcflow.sdk.core.task_schema import TaskSchema
|
28
|
+
from hpcflow.sdk.core.workflow import Workflow
|
29
|
+
|
21
30
|
|
22
31
|
@pytest.fixture
|
23
|
-
def persistent_workflow(null_config):
|
32
|
+
def persistent_workflow(null_config) -> Iterator[Workflow]:
|
24
33
|
tmp_dir = hf._ensure_user_runtime_dir().joinpath("test_data")
|
25
34
|
tmp_dir.mkdir(exist_ok=True)
|
26
35
|
wk = make_test_data_YAML_workflow("workflow_1.yaml", path=tmp_dir, overwrite=True)
|
@@ -44,16 +53,18 @@ def test_workflow_zip(persistent_workflow):
|
|
44
53
|
Path(zip_path).unlink()
|
45
54
|
|
46
55
|
|
47
|
-
def modify_workflow_metadata_on_disk(workflow):
|
56
|
+
def modify_workflow_metadata_on_disk(workflow: Workflow):
|
48
57
|
"""Make a non-sense change to the on-disk metadata."""
|
49
58
|
assert workflow.store_format == "zarr"
|
50
|
-
wk_md = workflow._store.load_metadata()
|
59
|
+
wk_md = workflow._store.load_metadata() # type: ignore
|
51
60
|
changed_md = copy.deepcopy(wk_md)
|
52
61
|
changed_md["new_key"] = "new_value"
|
53
|
-
workflow._store._get_root_group(mode="r+").attrs.put(changed_md)
|
62
|
+
workflow._store._get_root_group(mode="r+").attrs.put(changed_md) # type: ignore
|
54
63
|
|
55
64
|
|
56
|
-
def make_workflow_w1_with_config_kwargs(
|
65
|
+
def make_workflow_w1_with_config_kwargs(
|
66
|
+
config_kwargs, path, param_p1: Parameter, param_p2: Parameter
|
67
|
+
) -> Workflow:
|
57
68
|
hf.load_config(**config_kwargs)
|
58
69
|
s1 = hf.TaskSchema("ts1", actions=[], inputs=[param_p1], outputs=[param_p2])
|
59
70
|
t1 = hf.Task(schema=s1, inputs=[hf.InputValue(param_p1, 101)])
|
@@ -68,37 +79,37 @@ def null_config(tmp_path):
|
|
68
79
|
|
69
80
|
|
70
81
|
@pytest.fixture
|
71
|
-
def empty_workflow(null_config, tmp_path):
|
82
|
+
def empty_workflow(null_config, tmp_path) -> Workflow:
|
72
83
|
return hf.Workflow.from_template(hf.WorkflowTemplate(name="w1"), path=tmp_path)
|
73
84
|
|
74
85
|
|
75
86
|
@pytest.fixture
|
76
|
-
def param_p1(null_config):
|
87
|
+
def param_p1(null_config) -> Parameter:
|
77
88
|
return hf.Parameter("p1")
|
78
89
|
|
79
90
|
|
80
91
|
@pytest.fixture
|
81
|
-
def param_p1c(null_config):
|
92
|
+
def param_p1c(null_config) -> Parameter:
|
82
93
|
return hf.Parameter("p1c")
|
83
94
|
|
84
95
|
|
85
96
|
@pytest.fixture
|
86
|
-
def param_p2():
|
97
|
+
def param_p2() -> Parameter:
|
87
98
|
return hf.Parameter("p2")
|
88
99
|
|
89
100
|
|
90
101
|
@pytest.fixture
|
91
|
-
def param_p3(null_config):
|
102
|
+
def param_p3(null_config) -> Parameter:
|
92
103
|
return hf.Parameter("p3")
|
93
104
|
|
94
105
|
|
95
106
|
@pytest.fixture
|
96
|
-
def act_env_1(null_config):
|
107
|
+
def act_env_1(null_config) -> ActionEnvironment:
|
97
108
|
return hf.ActionEnvironment("env_1")
|
98
109
|
|
99
110
|
|
100
111
|
@pytest.fixture
|
101
|
-
def act_1(null_config, act_env_1):
|
112
|
+
def act_1(null_config, act_env_1: ActionEnvironment) -> Action:
|
102
113
|
return hf.Action(
|
103
114
|
commands=[hf.Command("<<parameter:p1>>")],
|
104
115
|
environments=[act_env_1],
|
@@ -106,7 +117,7 @@ def act_1(null_config, act_env_1):
|
|
106
117
|
|
107
118
|
|
108
119
|
@pytest.fixture
|
109
|
-
def act_2(null_config, act_env_1):
|
120
|
+
def act_2(null_config, act_env_1: ActionEnvironment) -> Action:
|
110
121
|
return hf.Action(
|
111
122
|
commands=[hf.Command("<<parameter:p2>> <<parameter:p3>>")],
|
112
123
|
environments=[act_env_1],
|
@@ -116,12 +127,17 @@ def act_2(null_config, act_env_1):
|
|
116
127
|
@pytest.fixture
|
117
128
|
def file_spec_fs1(
|
118
129
|
null_config,
|
119
|
-
):
|
130
|
+
) -> FileSpec:
|
120
131
|
return hf.FileSpec(label="file1", name="file1.txt")
|
121
132
|
|
122
133
|
|
123
134
|
@pytest.fixture
|
124
|
-
def act_3(
|
135
|
+
def act_3(
|
136
|
+
null_config,
|
137
|
+
act_env_1: ActionEnvironment,
|
138
|
+
param_p2: Parameter,
|
139
|
+
file_spec_fs1: FileSpec,
|
140
|
+
) -> Action:
|
125
141
|
return hf.Action(
|
126
142
|
commands=[hf.Command("<<parameter:p1>>")],
|
127
143
|
output_file_parsers=[
|
@@ -132,25 +148,29 @@ def act_3(null_config, act_env_1, param_p2, file_spec_fs1):
|
|
132
148
|
|
133
149
|
|
134
150
|
@pytest.fixture
|
135
|
-
def schema_s1(null_config, param_p1, act_1):
|
151
|
+
def schema_s1(null_config, param_p1: Parameter, act_1: Action) -> TaskSchema:
|
136
152
|
return hf.TaskSchema("ts1", actions=[act_1], inputs=[param_p1])
|
137
153
|
|
138
154
|
|
139
155
|
@pytest.fixture
|
140
|
-
def schema_s2(
|
156
|
+
def schema_s2(
|
157
|
+
null_config, param_p2: Parameter, param_p3: Parameter, act_2: Action
|
158
|
+
) -> TaskSchema:
|
141
159
|
return hf.TaskSchema("ts2", actions=[act_2], inputs=[param_p2, param_p3])
|
142
160
|
|
143
161
|
|
144
162
|
@pytest.fixture
|
145
|
-
def schema_s3(
|
163
|
+
def schema_s3(
|
164
|
+
null_config, param_p1: Parameter, param_p2: Parameter, act_3: Action
|
165
|
+
) -> TaskSchema:
|
146
166
|
return hf.TaskSchema("ts1", actions=[act_3], inputs=[param_p1], outputs=[param_p2])
|
147
167
|
|
148
168
|
|
149
169
|
@pytest.fixture
|
150
170
|
def schema_s4(
|
151
171
|
null_config,
|
152
|
-
param_p1,
|
153
|
-
):
|
172
|
+
param_p1: Parameter,
|
173
|
+
) -> TaskSchema:
|
154
174
|
return hf.TaskSchema(
|
155
175
|
objective="t1",
|
156
176
|
inputs=[hf.SchemaInput(parameter=param_p1)],
|
@@ -166,8 +186,8 @@ def schema_s4(
|
|
166
186
|
@pytest.fixture
|
167
187
|
def schema_s4c(
|
168
188
|
null_config,
|
169
|
-
param_p1c,
|
170
|
-
):
|
189
|
+
param_p1c: Parameter,
|
190
|
+
) -> TaskSchema:
|
171
191
|
return hf.TaskSchema(
|
172
192
|
objective="t1",
|
173
193
|
inputs=[hf.SchemaInput(parameter=param_p1c)],
|
@@ -181,41 +201,47 @@ def schema_s4c(
|
|
181
201
|
|
182
202
|
|
183
203
|
@pytest.fixture
|
184
|
-
def workflow_w1(
|
204
|
+
def workflow_w1(
|
205
|
+
null_config, tmp_path: Path, schema_s3: TaskSchema, param_p1: Parameter
|
206
|
+
) -> Workflow:
|
185
207
|
t1 = hf.Task(schema=schema_s3, inputs=[hf.InputValue(param_p1, 101)])
|
186
208
|
wkt = hf.WorkflowTemplate(name="w1", tasks=[t1])
|
187
209
|
return hf.Workflow.from_template(wkt, path=tmp_path)
|
188
210
|
|
189
211
|
|
190
|
-
def test_make_empty_workflow(null_config, empty_workflow):
|
212
|
+
def test_make_empty_workflow(null_config, empty_workflow: Workflow):
|
191
213
|
assert empty_workflow.path is not None
|
192
214
|
|
193
215
|
|
194
|
-
def test_raise_on_missing_workflow(null_config, tmp_path):
|
216
|
+
def test_raise_on_missing_workflow(null_config, tmp_path: Path):
|
195
217
|
with pytest.raises(WorkflowNotFoundError):
|
196
218
|
hf.Workflow(tmp_path)
|
197
219
|
|
198
220
|
|
199
|
-
def test_add_empty_task(empty_workflow, schema_s1):
|
221
|
+
def test_add_empty_task(empty_workflow: Workflow, schema_s1: TaskSchema):
|
200
222
|
t1 = hf.Task(schema=schema_s1)
|
201
223
|
wk_t1 = empty_workflow._add_empty_task(t1)
|
202
224
|
assert len(empty_workflow.tasks) == 1 and wk_t1.index == 0 and wk_t1.name == "ts1"
|
203
225
|
|
204
226
|
|
205
|
-
def test_raise_on_missing_inputs_add_first_task(
|
227
|
+
def test_raise_on_missing_inputs_add_first_task(
|
228
|
+
empty_workflow: Workflow, schema_s1: TaskSchema, param_p1: Parameter
|
229
|
+
):
|
206
230
|
t1 = hf.Task(schema=schema_s1)
|
207
231
|
with pytest.raises(MissingInputs) as exc_info:
|
208
232
|
empty_workflow.add_task(t1)
|
209
233
|
|
210
|
-
assert exc_info.value.missing_inputs ==
|
234
|
+
assert exc_info.value.missing_inputs == (param_p1.typ,)
|
211
235
|
|
212
236
|
|
213
|
-
def test_raise_on_missing_inputs_add_second_task(
|
237
|
+
def test_raise_on_missing_inputs_add_second_task(
|
238
|
+
workflow_w1: Workflow, schema_s2: TaskSchema, param_p3: Parameter
|
239
|
+
):
|
214
240
|
t2 = hf.Task(schema=schema_s2)
|
215
241
|
with pytest.raises(MissingInputs) as exc_info:
|
216
242
|
workflow_w1.add_task(t2)
|
217
243
|
|
218
|
-
assert exc_info.value.missing_inputs ==
|
244
|
+
assert exc_info.value.missing_inputs == (param_p3.typ,) # p2 comes from existing task
|
219
245
|
|
220
246
|
|
221
247
|
@pytest.mark.skip(reason="TODO: Not implemented.")
|
@@ -301,42 +327,45 @@ def test_WorkflowTemplate_from_YAML_string_with_and_without_element_sets_equival
|
|
301
327
|
assert wkt_1 == wkt_2
|
302
328
|
|
303
329
|
|
304
|
-
def test_store_has_pending_during_add_task(
|
330
|
+
def test_store_has_pending_during_add_task(
|
331
|
+
workflow_w1: Workflow, schema_s2: TaskSchema, param_p3: Parameter
|
332
|
+
):
|
305
333
|
t2 = hf.Task(schema=schema_s2, inputs=[hf.InputValue(param_p3, 301)])
|
306
334
|
with workflow_w1.batch_update():
|
307
335
|
workflow_w1.add_task(t2)
|
308
336
|
assert workflow_w1._store.has_pending
|
309
337
|
|
310
338
|
|
311
|
-
def test_empty_batch_update_does_nothing(workflow_w1):
|
339
|
+
def test_empty_batch_update_does_nothing(workflow_w1: Workflow):
|
312
340
|
with workflow_w1.batch_update():
|
313
341
|
assert not workflow_w1._store.has_pending
|
314
342
|
|
315
343
|
|
316
344
|
@pytest.mark.skip("need to re-implement `is_modified_on_disk`")
|
317
|
-
def test_is_modified_on_disk_when_metadata_changed(workflow_w1):
|
345
|
+
def test_is_modified_on_disk_when_metadata_changed(workflow_w1: Workflow):
|
318
346
|
# this is ZarrPersistentStore-specific; might want to consider a refactor later
|
319
347
|
with workflow_w1._store.cached_load():
|
320
348
|
modify_workflow_metadata_on_disk(workflow_w1)
|
321
|
-
assert workflow_w1._store.is_modified_on_disk()
|
349
|
+
assert workflow_w1._store.is_modified_on_disk() # type: ignore
|
322
350
|
|
323
351
|
|
324
352
|
@pytest.mark.skip("need to re-implement `is_modified_on_disk`")
|
325
|
-
def test_batch_update_abort_if_modified_on_disk(
|
353
|
+
def test_batch_update_abort_if_modified_on_disk(
|
354
|
+
workflow_w1: Workflow, schema_s2: TaskSchema, param_p3: Parameter
|
355
|
+
):
|
326
356
|
t2 = hf.Task(schema=schema_s2, inputs=[hf.InputValue(param_p3, 301)])
|
327
357
|
with pytest.raises(WorkflowBatchUpdateFailedError):
|
328
|
-
with workflow_w1._store.cached_load():
|
329
|
-
|
330
|
-
|
331
|
-
modify_workflow_metadata_on_disk(workflow_w1)
|
358
|
+
with workflow_w1._store.cached_load(), workflow_w1.batch_update():
|
359
|
+
workflow_w1.add_task(t2)
|
360
|
+
modify_workflow_metadata_on_disk(workflow_w1)
|
332
361
|
|
333
362
|
|
334
|
-
def test_closest_task_input_source_chosen(null_config, tmp_path):
|
363
|
+
def test_closest_task_input_source_chosen(null_config, tmp_path: Path):
|
335
364
|
wk = make_workflow(
|
336
365
|
schemas_spec=[
|
337
|
-
|
338
|
-
|
339
|
-
|
366
|
+
({"p1": None}, ("p1",), "t1"),
|
367
|
+
({"p1": None}, ("p1",), "t2"),
|
368
|
+
({"p1": None}, ("p1",), "t3"),
|
340
369
|
],
|
341
370
|
local_inputs={0: ("p1",)},
|
342
371
|
path=tmp_path,
|
@@ -366,10 +395,10 @@ def test_WorkflowTemplate_from_JSON_string_without_element_sets(null_config):
|
|
366
395
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
367
396
|
def test_equivalent_element_input_parameter_value_class_and_kwargs(
|
368
397
|
null_config,
|
369
|
-
tmp_path,
|
370
|
-
store,
|
371
|
-
schema_s4c,
|
372
|
-
param_p1c,
|
398
|
+
tmp_path: Path,
|
399
|
+
store: str,
|
400
|
+
schema_s4c: TaskSchema,
|
401
|
+
param_p1c: Parameter,
|
373
402
|
):
|
374
403
|
a_value = 101
|
375
404
|
t1_1 = hf.Task(
|
@@ -395,10 +424,10 @@ def test_equivalent_element_input_parameter_value_class_and_kwargs(
|
|
395
424
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
396
425
|
def test_equivalent_element_input_parameter_value_class_method_and_kwargs(
|
397
426
|
null_config,
|
398
|
-
tmp_path,
|
399
|
-
store,
|
400
|
-
schema_s4c,
|
401
|
-
param_p1c,
|
427
|
+
tmp_path: Path,
|
428
|
+
store: str,
|
429
|
+
schema_s4c: TaskSchema,
|
430
|
+
param_p1c: Parameter,
|
402
431
|
):
|
403
432
|
b_val = 50
|
404
433
|
c_val = 51
|
@@ -432,7 +461,7 @@ def test_equivalent_element_input_parameter_value_class_method_and_kwargs(
|
|
432
461
|
|
433
462
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
434
463
|
def test_input_value_class_expected_value(
|
435
|
-
null_config, tmp_path, store, schema_s4c, param_p1c
|
464
|
+
null_config, tmp_path: Path, store: str, schema_s4c: TaskSchema, param_p1c: Parameter
|
436
465
|
):
|
437
466
|
a_value = 101
|
438
467
|
t1_value_exp = P1(a=a_value)
|
@@ -459,7 +488,7 @@ def test_input_value_class_expected_value(
|
|
459
488
|
|
460
489
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
461
490
|
def test_input_value_class_method_expected_value(
|
462
|
-
null_config, tmp_path, store, schema_s4c, param_p1c
|
491
|
+
null_config, tmp_path: Path, store: str, schema_s4c: TaskSchema, param_p1c: Parameter
|
463
492
|
):
|
464
493
|
b_val = 50
|
465
494
|
c_val = 51
|
@@ -493,10 +522,10 @@ def test_input_value_class_method_expected_value(
|
|
493
522
|
|
494
523
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
495
524
|
def test_equivalent_element_input_sequence_parameter_value_class_and_kwargs(
|
496
|
-
null_config, tmp_path, store, schema_s4c
|
525
|
+
null_config, tmp_path: Path, store: str, schema_s4c: TaskSchema
|
497
526
|
):
|
498
527
|
data = {"a": 101}
|
499
|
-
obj = P1(**data)
|
528
|
+
obj = P1(**data) # type: ignore[arg-type] # python/mypy#15317
|
500
529
|
t1_1 = hf.Task(
|
501
530
|
schema=[schema_s4c],
|
502
531
|
sequences=[hf.ValueSequence(path="inputs.p1c", values=[obj], nesting_order=0)],
|
@@ -518,7 +547,7 @@ def test_equivalent_element_input_sequence_parameter_value_class_and_kwargs(
|
|
518
547
|
|
519
548
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
520
549
|
def test_equivalent_element_input_sequence_parameter_value_class_method_and_kwargs(
|
521
|
-
null_config, tmp_path, store, schema_s4c
|
550
|
+
null_config, tmp_path: Path, store: str, schema_s4c: TaskSchema
|
522
551
|
):
|
523
552
|
data = {"b": 50, "c": 51}
|
524
553
|
obj = P1.from_data(**data)
|
@@ -549,9 +578,11 @@ def test_equivalent_element_input_sequence_parameter_value_class_method_and_kwar
|
|
549
578
|
|
550
579
|
|
551
580
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
552
|
-
def test_sequence_value_class_expected_value(
|
581
|
+
def test_sequence_value_class_expected_value(
|
582
|
+
null_config, tmp_path: Path, store: str, schema_s4c: TaskSchema
|
583
|
+
):
|
553
584
|
data = {"a": 101}
|
554
|
-
obj = P1(**data)
|
585
|
+
obj = P1(**data) # type: ignore[arg-type] # python/mypy#15317
|
555
586
|
t1_1 = hf.Task(
|
556
587
|
schema=[schema_s4c],
|
557
588
|
sequences=[hf.ValueSequence(path="inputs.p1c", values=[obj], nesting_order=0)],
|
@@ -574,7 +605,7 @@ def test_sequence_value_class_expected_value(null_config, tmp_path, store, schem
|
|
574
605
|
|
575
606
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
576
607
|
def test_sequence_value_class_method_expected_value(
|
577
|
-
null_config, tmp_path, store, schema_s4c
|
608
|
+
null_config, tmp_path: Path, store: str, schema_s4c: TaskSchema
|
578
609
|
):
|
579
610
|
data = {"b": 50, "c": 51}
|
580
611
|
obj = P1.from_data(**data)
|
@@ -607,7 +638,7 @@ def test_sequence_value_class_method_expected_value(
|
|
607
638
|
|
608
639
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
609
640
|
def test_expected_element_input_parameter_value_class_merge_sequence(
|
610
|
-
null_config, tmp_path, store, schema_s4c, param_p1c
|
641
|
+
null_config, tmp_path: Path, store: str, schema_s4c: TaskSchema, param_p1c: Parameter
|
611
642
|
):
|
612
643
|
a_val = 101
|
613
644
|
d_val = 201
|
@@ -632,7 +663,7 @@ def test_expected_element_input_parameter_value_class_merge_sequence(
|
|
632
663
|
|
633
664
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
634
665
|
def test_expected_element_input_parameter_value_class_method_merge_sequence(
|
635
|
-
null_config, tmp_path, store, schema_s4c, param_p1c
|
666
|
+
null_config, tmp_path: Path, store: str, schema_s4c: TaskSchema, param_p1c: Parameter
|
636
667
|
):
|
637
668
|
b_val = 50
|
638
669
|
c_val = 51
|
@@ -661,14 +692,10 @@ def test_expected_element_input_parameter_value_class_method_merge_sequence(
|
|
661
692
|
|
662
693
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
663
694
|
def test_upstream_input_source_merge_with_current_input_modification(
|
664
|
-
null_config, tmp_path, store
|
695
|
+
null_config, tmp_path: Path, store: str, param_p2: Parameter
|
665
696
|
):
|
666
|
-
s1 = hf.TaskSchema(
|
667
|
-
|
668
|
-
)
|
669
|
-
s2 = hf.TaskSchema(
|
670
|
-
objective="t2", inputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))]
|
671
|
-
)
|
697
|
+
s1 = hf.TaskSchema(objective="t1", inputs=[hf.SchemaInput(parameter=param_p2)])
|
698
|
+
s2 = hf.TaskSchema(objective="t2", inputs=[hf.SchemaInput(parameter=param_p2)])
|
672
699
|
tasks = [
|
673
700
|
hf.Task(schema=s1, inputs=[hf.InputValue("p2", {"a": 101})]),
|
674
701
|
hf.Task(schema=s2, inputs=[hf.InputValue("p2", value=102, path="b")]),
|
@@ -679,17 +706,17 @@ def test_upstream_input_source_merge_with_current_input_modification(
|
|
679
706
|
template_name="temp",
|
680
707
|
store=store,
|
681
708
|
)
|
682
|
-
|
709
|
+
p2 = wk.tasks[1].elements[0].inputs.p2
|
710
|
+
assert isinstance(p2, hf.ElementParameter)
|
711
|
+
assert p2.value == {"a": 101, "b": 102}
|
683
712
|
|
684
713
|
|
685
714
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
686
|
-
def test_upstream_input_source_with_sub_parameter(
|
687
|
-
|
688
|
-
|
689
|
-
)
|
690
|
-
s2 = hf.TaskSchema(
|
691
|
-
objective="t2", inputs=[hf.SchemaInput(parameter=hf.Parameter("p2"))]
|
692
|
-
)
|
715
|
+
def test_upstream_input_source_with_sub_parameter(
|
716
|
+
null_config, tmp_path: Path, store: str, param_p2: Parameter
|
717
|
+
):
|
718
|
+
s1 = hf.TaskSchema(objective="t1", inputs=[hf.SchemaInput(parameter=param_p2)])
|
719
|
+
s2 = hf.TaskSchema(objective="t2", inputs=[hf.SchemaInput(parameter=param_p2)])
|
693
720
|
tasks = [
|
694
721
|
hf.Task(
|
695
722
|
schema=s1,
|
@@ -706,11 +733,13 @@ def test_upstream_input_source_with_sub_parameter(null_config, tmp_path, store):
|
|
706
733
|
template_name="temp",
|
707
734
|
store=store,
|
708
735
|
)
|
709
|
-
|
736
|
+
p2 = wk.tasks[1].elements[0].inputs.p2
|
737
|
+
assert isinstance(p2, hf.ElementParameter)
|
738
|
+
assert p2.value == {"a": 101, "b": 102}
|
710
739
|
|
711
740
|
|
712
741
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
713
|
-
def test_from_template_data_workflow_reload(null_config, tmp_path, store):
|
742
|
+
def test_from_template_data_workflow_reload(null_config, tmp_path: Path, store: str):
|
714
743
|
wk_name = "temp"
|
715
744
|
t1 = hf.Task(schema=hf.task_schemas.test_t1_ps, inputs=[hf.InputValue("p1", 101)])
|
716
745
|
wk = hf.Workflow.from_template_data(
|
@@ -727,7 +756,7 @@ def test_from_template_data_workflow_reload(null_config, tmp_path, store):
|
|
727
756
|
|
728
757
|
|
729
758
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
730
|
-
def test_from_template_workflow_reload(null_config, tmp_path, store):
|
759
|
+
def test_from_template_workflow_reload(null_config, tmp_path: Path, store: str):
|
731
760
|
wk_name = "temp"
|
732
761
|
t1 = hf.Task(schema=hf.task_schemas.test_t1_ps, inputs=[hf.InputValue("p1", 101)])
|
733
762
|
wkt = hf.WorkflowTemplate(name=wk_name, tasks=[t1])
|
@@ -744,7 +773,7 @@ def test_from_template_workflow_reload(null_config, tmp_path, store):
|
|
744
773
|
|
745
774
|
|
746
775
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
747
|
-
def test_from_YAML_str_template_workflow_reload(null_config, tmp_path, store):
|
776
|
+
def test_from_YAML_str_template_workflow_reload(null_config, tmp_path: Path, store: str):
|
748
777
|
yaml_str = dedent(
|
749
778
|
"""
|
750
779
|
name: temp
|
@@ -767,7 +796,7 @@ def test_from_YAML_str_template_workflow_reload(null_config, tmp_path, store):
|
|
767
796
|
|
768
797
|
|
769
798
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
770
|
-
def test_from_template_workflow_add_task_reload(null_config, tmp_path, store):
|
799
|
+
def test_from_template_workflow_add_task_reload(null_config, tmp_path: Path, store: str):
|
771
800
|
wk_name = "temp"
|
772
801
|
t1 = hf.Task(schema=hf.task_schemas.test_t1_ps, inputs=[hf.InputValue("p1", 101)])
|
773
802
|
wkt = hf.WorkflowTemplate(name=wk_name)
|
@@ -785,7 +814,9 @@ def test_from_template_workflow_add_task_reload(null_config, tmp_path, store):
|
|
785
814
|
|
786
815
|
|
787
816
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
788
|
-
def test_batch_update_mode_false_after_empty_workflow_init(
|
817
|
+
def test_batch_update_mode_false_after_empty_workflow_init(
|
818
|
+
null_config, tmp_path: Path, store: str
|
819
|
+
):
|
789
820
|
wk_name = "temp"
|
790
821
|
wk = hf.Workflow.from_template_data(
|
791
822
|
tasks=[],
|
@@ -1,4 +1,5 @@
|
|
1
1
|
import os
|
2
|
+
from pathlib import Path
|
2
3
|
import pytest
|
3
4
|
|
4
5
|
from hpcflow.app import app as hf
|
@@ -6,7 +7,7 @@ from hpcflow.app import app as hf
|
|
6
7
|
|
7
8
|
@pytest.mark.integration
|
8
9
|
@pytest.mark.parametrize("exit_code", [0, 1, 98, -1, -123124])
|
9
|
-
def test_action_exit_code_parsing(null_config, tmp_path, exit_code):
|
10
|
+
def test_action_exit_code_parsing(null_config, tmp_path: Path, exit_code: int):
|
10
11
|
act = hf.Action(commands=[hf.Command(command=f"exit {exit_code}")])
|
11
12
|
s1 = hf.TaskSchema(
|
12
13
|
objective="t1",
|
@@ -1,8 +1,9 @@
|
|
1
1
|
import os
|
2
|
+
from pathlib import Path
|
2
3
|
import time
|
3
4
|
import pytest
|
4
5
|
from hpcflow.app import app as hf
|
5
|
-
from hpcflow.sdk.core.
|
6
|
+
from hpcflow.sdk.core.enums import EARStatus
|
6
7
|
from hpcflow.sdk.core.test_utils import (
|
7
8
|
P1_parameter_cls as P1,
|
8
9
|
P1_sub_parameter_cls as P1_sub,
|
@@ -11,26 +12,30 @@ from hpcflow.sdk.core.test_utils import (
|
|
11
12
|
|
12
13
|
|
13
14
|
@pytest.mark.integration
|
14
|
-
def test_workflow_1(tmp_path, new_null_config):
|
15
|
+
def test_workflow_1(tmp_path: Path, new_null_config):
|
15
16
|
wk = make_test_data_YAML_workflow("workflow_1.yaml", path=tmp_path)
|
16
17
|
wk.submit(wait=True, add_to_known=False)
|
17
|
-
|
18
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
19
|
+
assert isinstance(p2, hf.ElementParameter)
|
20
|
+
assert p2.value == "201"
|
18
21
|
|
19
22
|
|
20
23
|
@pytest.mark.integration
|
21
|
-
def test_workflow_1_with_working_dir_with_spaces(tmp_path, new_null_config):
|
24
|
+
def test_workflow_1_with_working_dir_with_spaces(tmp_path: Path, new_null_config):
|
22
25
|
workflow_dir = tmp_path / "sub path with spaces"
|
23
26
|
workflow_dir.mkdir()
|
24
27
|
wk = make_test_data_YAML_workflow("workflow_1.yaml", path=workflow_dir)
|
25
28
|
wk.submit(wait=True, add_to_known=False)
|
26
|
-
|
29
|
+
p2 = wk.tasks[0].elements[0].outputs.p2
|
30
|
+
assert isinstance(p2, hf.ElementParameter)
|
31
|
+
assert p2.value == "201"
|
27
32
|
|
28
33
|
|
29
34
|
@pytest.mark.integration
|
30
35
|
@pytest.mark.skip(
|
31
36
|
reason="Sometimes fails on MacOS GHAs runner; too slow on Windows + Linux"
|
32
37
|
)
|
33
|
-
def test_run_abort(tmp_path, new_null_config):
|
38
|
+
def test_run_abort(tmp_path: Path, new_null_config):
|
34
39
|
wk = make_test_data_YAML_workflow("workflow_test_run_abort.yaml", path=tmp_path)
|
35
40
|
wk.submit(add_to_known=False)
|
36
41
|
|
@@ -56,7 +61,7 @@ def test_run_abort(tmp_path, new_null_config):
|
|
56
61
|
|
57
62
|
@pytest.mark.integration
|
58
63
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
59
|
-
def test_multi_command_action_stdout_parsing(null_config, tmp_path, store):
|
64
|
+
def test_multi_command_action_stdout_parsing(null_config, tmp_path: Path, store: str):
|
60
65
|
if os.name == "nt":
|
61
66
|
cmds = [
|
62
67
|
"Write-Output (<<parameter:p1>> + 100)",
|
@@ -98,7 +103,7 @@ def test_multi_command_action_stdout_parsing(null_config, tmp_path, store):
|
|
98
103
|
|
99
104
|
@pytest.mark.integration
|
100
105
|
@pytest.mark.parametrize("store", ["json", "zarr"])
|
101
|
-
def test_element_get_group(null_config, tmp_path, store):
|
106
|
+
def test_element_get_group(null_config, tmp_path: Path, store: str):
|
102
107
|
if os.name == "nt":
|
103
108
|
cmd = "Write-Output (<<parameter:p1c>> + 100)"
|
104
109
|
else:
|
@@ -146,7 +151,7 @@ def test_element_get_group(null_config, tmp_path, store):
|
|
146
151
|
|
147
152
|
|
148
153
|
@pytest.mark.integration
|
149
|
-
def test_element_get_sub_object_group(null_config, tmp_path):
|
154
|
+
def test_element_get_sub_object_group(null_config, tmp_path: Path):
|
150
155
|
if os.name == "nt":
|
151
156
|
cmd = "Write-Output (<<parameter:p1c>> + 100)"
|
152
157
|
else:
|
@@ -196,7 +201,7 @@ def test_element_get_sub_object_group(null_config, tmp_path):
|
|
196
201
|
|
197
202
|
|
198
203
|
@pytest.mark.integration
|
199
|
-
def test_element_get_sub_data_group(null_config, tmp_path):
|
204
|
+
def test_element_get_sub_data_group(null_config, tmp_path: Path):
|
200
205
|
if os.name == "nt":
|
201
206
|
cmd = "Write-Output (<<parameter:p1c>> + 100)"
|
202
207
|
else:
|
@@ -243,7 +248,7 @@ def test_element_get_sub_data_group(null_config, tmp_path):
|
|
243
248
|
|
244
249
|
|
245
250
|
@pytest.mark.integration
|
246
|
-
def test_input_source_labels_and_groups(null_config, tmp_path):
|
251
|
+
def test_input_source_labels_and_groups(null_config, tmp_path: Path):
|
247
252
|
"""This is structurally the same as the `fit_yield_functions` MatFlow workflow."""
|
248
253
|
if os.name == "nt":
|
249
254
|
cmds = [
|
@@ -338,7 +343,7 @@ def test_input_source_labels_and_groups(null_config, tmp_path):
|
|
338
343
|
|
339
344
|
|
340
345
|
@pytest.mark.integration
|
341
|
-
def test_loop_simple(null_config, tmp_path):
|
346
|
+
def test_loop_simple(null_config, tmp_path: Path):
|
342
347
|
if os.name == "nt":
|
343
348
|
cmd = "Write-Output (<<parameter:p1>> + 100)"
|
344
349
|
else:
|
@@ -362,7 +367,7 @@ def test_loop_simple(null_config, tmp_path):
|
|
362
367
|
|
363
368
|
|
364
369
|
@pytest.mark.integration
|
365
|
-
def test_loop_termination_multi_element(null_config, tmp_path):
|
370
|
+
def test_loop_termination_multi_element(null_config, tmp_path: Path):
|
366
371
|
if os.name == "nt":
|
367
372
|
cmds = [
|
368
373
|
"Write-Output (<<parameter:p1>> + 100)",
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: hpcflow-new2
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.0a190
|
4
4
|
Summary: Computational workflow management
|
5
5
|
License: MIT
|
6
6
|
Author: aplowman
|
@@ -28,6 +28,7 @@ Requires-Dist: reretry (>=0.11.8,<0.12.0)
|
|
28
28
|
Requires-Dist: rich (>=13.4.2,<14.0.0)
|
29
29
|
Requires-Dist: ruamel-yaml (>=0.18.6,<0.19.0)
|
30
30
|
Requires-Dist: termcolor (>=1.1.0,<2.0.0)
|
31
|
+
Requires-Dist: typing-extensions (>=4.12.2,<5.0.0)
|
31
32
|
Requires-Dist: valida (>=0.7.5,<0.8.0)
|
32
33
|
Requires-Dist: watchdog (>=2.1.9,<3.0.0)
|
33
34
|
Requires-Dist: zarr (>=2.17.2,<3.0.0)
|