hpcflow-new2 0.2.0a50__py3-none-any.whl → 0.2.0a52__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. hpcflow/_version.py +1 -1
  2. hpcflow/sdk/__init__.py +1 -1
  3. hpcflow/sdk/api.py +1 -1
  4. hpcflow/sdk/app.py +20 -11
  5. hpcflow/sdk/cli.py +34 -59
  6. hpcflow/sdk/core/__init__.py +13 -1
  7. hpcflow/sdk/core/actions.py +235 -126
  8. hpcflow/sdk/core/command_files.py +32 -24
  9. hpcflow/sdk/core/element.py +110 -114
  10. hpcflow/sdk/core/errors.py +57 -0
  11. hpcflow/sdk/core/loop.py +18 -34
  12. hpcflow/sdk/core/parameters.py +5 -3
  13. hpcflow/sdk/core/task.py +135 -131
  14. hpcflow/sdk/core/task_schema.py +11 -4
  15. hpcflow/sdk/core/utils.py +110 -2
  16. hpcflow/sdk/core/workflow.py +964 -676
  17. hpcflow/sdk/data/template_components/environments.yaml +0 -44
  18. hpcflow/sdk/data/template_components/task_schemas.yaml +52 -10
  19. hpcflow/sdk/persistence/__init__.py +21 -33
  20. hpcflow/sdk/persistence/base.py +1340 -458
  21. hpcflow/sdk/persistence/json.py +424 -546
  22. hpcflow/sdk/persistence/pending.py +563 -0
  23. hpcflow/sdk/persistence/store_resource.py +131 -0
  24. hpcflow/sdk/persistence/utils.py +57 -0
  25. hpcflow/sdk/persistence/zarr.py +852 -841
  26. hpcflow/sdk/submission/jobscript.py +133 -112
  27. hpcflow/sdk/submission/shells/bash.py +62 -16
  28. hpcflow/sdk/submission/shells/powershell.py +87 -16
  29. hpcflow/sdk/submission/submission.py +59 -35
  30. hpcflow/tests/unit/test_element.py +4 -9
  31. hpcflow/tests/unit/test_persistence.py +218 -0
  32. hpcflow/tests/unit/test_task.py +11 -12
  33. hpcflow/tests/unit/test_utils.py +82 -0
  34. hpcflow/tests/unit/test_workflow.py +3 -1
  35. {hpcflow_new2-0.2.0a50.dist-info → hpcflow_new2-0.2.0a52.dist-info}/METADATA +3 -1
  36. {hpcflow_new2-0.2.0a50.dist-info → hpcflow_new2-0.2.0a52.dist-info}/RECORD +38 -34
  37. {hpcflow_new2-0.2.0a50.dist-info → hpcflow_new2-0.2.0a52.dist-info}/WHEEL +0 -0
  38. {hpcflow_new2-0.2.0a50.dist-info → hpcflow_new2-0.2.0a52.dist-info}/entry_points.txt +0 -0
@@ -1,6 +1,7 @@
1
1
  import subprocess
2
2
  from textwrap import dedent, indent
3
3
  from typing import Dict, Optional
4
+ from hpcflow.sdk.core import ABORT_EXIT_CODE
4
5
  from hpcflow.sdk.submission.shells import Shell
5
6
  from hpcflow.sdk.submission.shells.os_version import get_OS_info_windows
6
7
 
@@ -21,6 +22,7 @@ class WindowsPowerShell(Shell):
21
22
  function {workflow_app_alias} {{
22
23
  & {{
23
24
  {env_setup}{app_invoc} `
25
+ --with-config log_file_path "$pwd/{app_package_name}.log" `
24
26
  --config-dir "{config_dir}" `
25
27
  --config-invocation-key "{config_invoc_key}" `
26
28
  $args
@@ -40,6 +42,16 @@ class WindowsPowerShell(Shell):
40
42
  return $path
41
43
  }}
42
44
 
45
+ function StartJobHere($block) {{
46
+ $jobInitBlock = [scriptblock]::Create(@"
47
+ Function wkflow_app {{ $function:wkflow_app }}
48
+ Function get_nth_line {{ $function:get_nth_line }}
49
+ Function JoinMultiPath {{ $function:JoinMultiPath }}
50
+ Set-Location '$pwd'
51
+ "@)
52
+ Start-Job -InitializationScript $jobInitBlock -Script $block
53
+ }}
54
+
43
55
  $WK_PATH = $(Get-Location)
44
56
  $SUB_IDX = {sub_idx}
45
57
  $JS_IDX = {js_idx}
@@ -56,13 +68,13 @@ class WindowsPowerShell(Shell):
56
68
  )
57
69
  JS_MAIN = dedent(
58
70
  """\
59
- $elem_need_EARs = get_nth_line $EAR_ID_FILE $JS_elem_idx
71
+ $elem_EAR_IDs = get_nth_line $EAR_ID_FILE $JS_elem_idx
60
72
  $elem_run_dirs = get_nth_line $ELEM_RUN_DIR_FILE $JS_elem_idx
61
73
 
62
74
  for ($JS_act_idx = 0; $JS_act_idx -lt {num_actions}; $JS_act_idx += 1) {{
63
75
 
64
- $need_EAR = ($elem_need_EARs -split "{EAR_files_delimiter}")[$JS_act_idx]
65
- if ($need_EAR -eq 0) {{
76
+ $EAR_ID = ($elem_EAR_IDs -split "{EAR_files_delimiter}")[$JS_act_idx]
77
+ if ($EAR_ID -eq -1) {{
66
78
  continue
67
79
  }}
68
80
 
@@ -70,11 +82,20 @@ class WindowsPowerShell(Shell):
70
82
  $run_dir_abs = "$WK_PATH\\$run_dir"
71
83
  Set-Location $run_dir_abs
72
84
 
73
- {workflow_app_alias} internal workflow $WK_PATH write-commands $SUB_IDX $JS_IDX $JS_elem_idx $JS_act_idx
74
- {workflow_app_alias} internal workflow $WK_PATH set-ear-start $SUB_IDX $JS_IDX $JS_elem_idx $JS_act_idx
85
+ $skip = {workflow_app_alias} internal workflow $WK_PATH get-ear-skipped $EAR_ID
86
+ if ($skip -eq "1") {{
87
+ continue
88
+ }}
89
+
90
+ {workflow_app_alias} internal workflow $WK_PATH write-commands $SUB_IDX $JS_IDX $JS_act_idx $EAR_ID
91
+ {workflow_app_alias} internal workflow $WK_PATH set-ear-start $EAR_ID
75
92
 
76
93
  . (Join-Path $run_dir_abs "{commands_file_name}")
77
- {workflow_app_alias} internal workflow $WK_PATH set-ear-end $SUB_IDX $JS_IDX $JS_elem_idx $JS_act_idx
94
+
95
+ $exit_code = $LASTEXITCODE
96
+ $global:LASTEXITCODE = $null
97
+
98
+ {workflow_app_alias} internal workflow $WK_PATH set-ear-end $EAR_ID $exit_code
78
99
 
79
100
  }}
80
101
  """
@@ -125,24 +146,74 @@ class WindowsPowerShell(Shell):
125
146
  def format_stream_assignment(self, shell_var_name, command):
126
147
  return f"${shell_var_name} = {command}"
127
148
 
128
- def format_save_parameter(self, workflow_app_alias, param_name, shell_var_name):
149
+ def format_save_parameter(
150
+ self, workflow_app_alias, param_name, shell_var_name, EAR_ID
151
+ ):
129
152
  return (
130
153
  f"{workflow_app_alias}"
131
154
  f" internal workflow $WK_PATH save-parameter {param_name} ${shell_var_name}"
132
- f" $SUB_IDX $JS_IDX $JS_elem_idx $JS_act_idx"
155
+ f" {EAR_ID}"
133
156
  f"\n"
134
157
  )
135
158
 
136
- def wrap_in_subshell(self, commands: str) -> str:
159
+ def wrap_in_subshell(self, commands: str, abortable: bool) -> str:
137
160
  """Format commands to run within a child scope.
138
161
 
139
- This assumes commands ends in a newline.
162
+ This assumes `commands` ends in a newline.
140
163
 
141
164
  """
142
165
  commands = indent(commands, self.JS_INDENT)
143
- return dedent(
144
- """\
145
- & {{
146
- {commands}}}
147
- """
148
- ).format(commands=commands)
166
+ if abortable:
167
+ # run commands as a background job, and poll a file to check for abort
168
+ # requests:
169
+ return dedent(
170
+ """\
171
+ $job = StartJobHere {{
172
+ $WK_PATH = $using:WK_PATH
173
+ $SUB_IDX = $using:SUB_IDX
174
+ $JS_IDX = $using:JS_IDX
175
+ $EAR_ID = $using:EAR_ID
176
+
177
+ {commands}
178
+ if ($LASTEXITCODE -ne 0) {{
179
+ throw
180
+ }}
181
+ }}
182
+
183
+ $is_abort = $null
184
+ while ($true) {{
185
+ $abort_file = JoinMultiPath $WK_PATH artifacts submissions $SUB_IDX abort_EARs.txt
186
+ $is_abort = get_nth_line $abort_file $EAR_ID
187
+ if ($job.State -ne "Running") {{
188
+ break
189
+ }}
190
+ elseif ($is_abort -eq "1") {{
191
+ Stop-Job -Job $job
192
+ Wait-Job -Job $job
193
+ break
194
+ }}
195
+ else {{
196
+ Start-Sleep 1 # TODO: TEMP: increase for production
197
+ }}
198
+ }}
199
+ $result = Receive-Job -job $job
200
+ Write-Host $result
201
+ if ($job.state -eq "Completed") {{
202
+ exit 0
203
+ }}
204
+ elseif ($is_abort -eq "1") {{
205
+ exit {abort_exit_code}
206
+ }}
207
+ else {{
208
+ exit 1
209
+ }}
210
+ """
211
+ ).format(commands=commands, abort_exit_code=ABORT_EXIT_CODE)
212
+ else:
213
+ # run commands in "foreground":
214
+ return dedent(
215
+ """\
216
+ & {{
217
+ {commands}}}
218
+ """
219
+ ).format(commands=commands)
@@ -47,32 +47,23 @@ class Submission(JSONLike):
47
47
  self,
48
48
  index: int,
49
49
  jobscripts: List[app.Jobscript],
50
- workflow: app.Workflow,
50
+ workflow: Optional[app.Workflow] = None,
51
51
  submission_attempts: Optional[List] = None,
52
52
  JS_parallelism: Optional[bool] = None,
53
53
  ):
54
54
  self._index = index
55
55
  self._jobscripts = jobscripts
56
- self._workflow = workflow
57
56
  self._submission_attempts = submission_attempts or []
58
57
  self._JS_parallelism = JS_parallelism
59
58
 
59
+ if workflow:
60
+ self.workflow = workflow
61
+
60
62
  self._set_parent_refs()
61
63
 
62
64
  for js_idx, js in enumerate(self.jobscripts):
63
65
  js._index = js_idx
64
66
 
65
- # if JS_parallelism explicitly requested but store doesn't support, raise:
66
- supports_JS_para = self.workflow._store.features.jobscript_parallelism
67
- if self.JS_parallelism:
68
- if not supports_JS_para:
69
- raise ValueError(
70
- f"Store type {self.workflow._store!r} does not support jobscript "
71
- f"parallelism."
72
- )
73
- elif self.JS_parallelism is None:
74
- self._JS_parallelism = supports_JS_para
75
-
76
67
  def to_dict(self):
77
68
  dct = super().to_dict()
78
69
  del dct["_workflow"]
@@ -100,6 +91,20 @@ class Submission(JSONLike):
100
91
  def workflow(self) -> List:
101
92
  return self._workflow
102
93
 
94
+ @workflow.setter
95
+ def workflow(self, wk):
96
+ self._workflow = wk
97
+ # if JS_parallelism explicitly requested but store doesn't support, raise:
98
+ supports_JS_para = self.workflow._store._features.jobscript_parallelism
99
+ if self.JS_parallelism:
100
+ if not supports_JS_para:
101
+ raise ValueError(
102
+ f"Store type {self.workflow._store!r} does not support jobscript "
103
+ f"parallelism."
104
+ )
105
+ elif self.JS_parallelism is None:
106
+ self._JS_parallelism = supports_JS_para
107
+
103
108
  @property
104
109
  def jobscript_indices(self) -> Tuple[int]:
105
110
  """All associated jobscript indices."""
@@ -136,6 +141,23 @@ class Submission(JSONLike):
136
141
  def path(self):
137
142
  return self.workflow.submissions_path / str(self.index)
138
143
 
144
+ @property
145
+ def all_EAR_IDs(self):
146
+ return [i for js in self.jobscripts for i in js.EAR_ID.flatten()]
147
+
148
+ @property
149
+ def abort_EARs_file_name(self):
150
+ return f"abort_EARs.txt"
151
+
152
+ @property
153
+ def abort_EARs_file_path(self):
154
+ return self.path / self.abort_EARs_file_name
155
+
156
+ def _write_abort_EARs_file(self):
157
+ with self.abort_EARs_file_path.open(mode="wt", newline="\n") as fp:
158
+ # write a single line for each EAR currently in the workflow:
159
+ fp.write("\n".join("0" for _ in range(self.workflow.num_EARs)) + "\n")
160
+
139
161
  def get_unique_schedulers(self) -> Dict[Tuple[int], Scheduler]:
140
162
  """Get a unique schedulers and which jobscripts they correspond to."""
141
163
  js_idx = []
@@ -168,23 +190,23 @@ class Submission(JSONLike):
168
190
 
169
191
  return shell_js_idx
170
192
 
171
- def prepare_EAR_submission_idx_update(self) -> List[Tuple[int, int, int, int]]:
172
- """For all EARs in this submission (across all jobscripts), return a tuple of indices
173
- that can be passed to `Workflow.set_EAR_submission_index`."""
174
- indices = []
175
- for js in self.jobscripts:
176
- for ear_idx_i, ear_idx_j in js.EARs.items():
177
- # task insert ID, iteration idx, action idx, run idx:
178
- indices.append((ear_idx_i[0], ear_idx_j[0], ear_idx_j[1], ear_idx_j[2]))
179
- return indices
180
-
181
- def get_EAR_run_dirs(self) -> Dict[Tuple(int, int, int), Path]:
182
- indices = []
183
- for js in self.jobscripts:
184
- for ear_idx_i, ear_idx_j in js.EARs.items():
185
- # task insert ID, iteration idx, action idx, run idx:
186
- indices.append((ear_idx_i[0], ear_idx_j[0], ear_idx_j[1], ear_idx_j[2]))
187
- return indices
193
+ # def prepare_EAR_submission_idx_update(self) -> List[Tuple[int, int, int, int]]:
194
+ # """For all EARs in this submission (across all jobscripts), return a tuple of indices
195
+ # that can be passed to `Workflow.set_EAR_submission_index`."""
196
+ # indices = []
197
+ # for js in self.jobscripts:
198
+ # for ear_idx_i, ear_idx_j in js.EARs.items():
199
+ # # task insert ID, iteration idx, action idx, run idx:
200
+ # indices.append((ear_idx_i[0], ear_idx_j[0], ear_idx_j[1], ear_idx_j[2]))
201
+ # return indices
202
+
203
+ # def get_EAR_run_dirs(self) -> Dict[Tuple(int, int, int), Path]:
204
+ # indices = []
205
+ # for js in self.jobscripts:
206
+ # for ear_idx_i, ear_idx_j in js.EARs.items():
207
+ # # task insert ID, iteration idx, action idx, run idx:
208
+ # indices.append((ear_idx_i[0], ear_idx_j[0], ear_idx_j[1], ear_idx_j[2]))
209
+ # return indices
188
210
 
189
211
  def _raise_failure(self, submitted_js_idx, exceptions):
190
212
  msg = f"Some jobscripts in submission index {self.index} could not be submitted"
@@ -217,13 +239,12 @@ class Submission(JSONLike):
217
239
 
218
240
  def _append_submission_attempt(self, submitted_js_idx: List[int]):
219
241
  self._submission_attempts.append(submitted_js_idx)
220
- self.workflow._store.append_submission_attempt(
242
+ self.workflow._store.add_submission_attempt(
221
243
  sub_idx=self.index, submitted_js_idx=submitted_js_idx
222
244
  )
223
245
 
224
246
  def submit(
225
247
  self,
226
- task_artifacts_path,
227
248
  ignore_errors=False,
228
249
  print_stdout=False,
229
250
  ) -> List[int]:
@@ -248,8 +269,6 @@ class Submission(JSONLike):
248
269
  js_vers_info[js_idx] = {}
249
270
  js_vers_info[js_idx].update(vers_info)
250
271
 
251
- # self.jobscripts[js_idx]._set_version_info(vers_info)
252
-
253
272
  for js_indices, shell in self.get_unique_shells().items():
254
273
  try:
255
274
  vers_info = shell.get_version_info()
@@ -267,7 +286,11 @@ class Submission(JSONLike):
267
286
  for js_idx, vers_info_i in js_vers_info.items():
268
287
  self.jobscripts[js_idx]._set_version_info(vers_info_i)
269
288
 
289
+ # TODO: a submission should only be "submitted" once shouldn't it?
270
290
  self.path.mkdir(exist_ok=True)
291
+ if not self.abort_EARs_file_path.is_file():
292
+ self._write_abort_EARs_file()
293
+
271
294
  scheduler_refs = {} # map jobscript `index` to scheduler job IDs
272
295
  submitted_js_idx = []
273
296
  errs = []
@@ -285,10 +308,11 @@ class Submission(JSONLike):
285
308
 
286
309
  try:
287
310
  scheduler_refs[js.index] = js.submit(
288
- task_artifacts_path,
289
311
  scheduler_refs,
290
312
  print_stdout=print_stdout,
291
313
  )
314
+ # note: currently for direct exec, this is not reached, so submission_status
315
+ # stays as pending.
292
316
  submitted_js_idx.append(js.index)
293
317
 
294
318
  except JobscriptSubmissionFailure as err:
@@ -1,6 +1,5 @@
1
1
  import pytest
2
2
  from hpcflow.app import app as hf
3
- from hpcflow.sdk.core.actions import ElementID
4
3
  from hpcflow.sdk.core.test_utils import make_schemas
5
4
 
6
5
 
@@ -44,10 +43,8 @@ def test_element_dependent_tasks(workflow_w1):
44
43
  def test_element_element_dependencies(workflow_w1):
45
44
  assert all(
46
45
  (
47
- workflow_w1.tasks.t2.elements[0].get_element_dependencies()
48
- == [ElementID(0, 0)],
49
- workflow_w1.tasks.t2.elements[1].get_element_dependencies()
50
- == [ElementID(0, 1)],
46
+ workflow_w1.tasks.t2.elements[0].get_element_dependencies() == [0],
47
+ workflow_w1.tasks.t2.elements[1].get_element_dependencies() == [1],
51
48
  )
52
49
  )
53
50
 
@@ -55,9 +52,7 @@ def test_element_element_dependencies(workflow_w1):
55
52
  def test_element_dependent_elements(workflow_w1):
56
53
  assert all(
57
54
  (
58
- workflow_w1.tasks.t1.elements[0].get_dependent_elements()
59
- == [ElementID(1, 0)],
60
- workflow_w1.tasks.t1.elements[1].get_dependent_elements()
61
- == [ElementID(1, 1)],
55
+ workflow_w1.tasks.t1.elements[0].get_dependent_elements() == [2],
56
+ workflow_w1.tasks.t1.elements[1].get_dependent_elements() == [3],
62
57
  )
63
58
  )
@@ -0,0 +1,218 @@
1
+ import pytest
2
+ from hpcflow.sdk.persistence.base import StoreEAR, StoreElement, StoreElementIter
3
+ from hpcflow.sdk.persistence.json import JSONPersistentStore
4
+
5
+ from hpcflow.app import app as hf
6
+
7
+
8
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
9
+ def test_store_pending_add_task(tmp_path):
10
+ """Check expected pending state after adding a task."""
11
+
12
+ # make store: 0 tasks:
13
+ store = JSONPersistentStore.make_test_store_from_spec([], dir=tmp_path)
14
+ task_ID = store.add_task()
15
+ assert store._pending.add_tasks == {task_ID: []}
16
+
17
+
18
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
19
+ def test_store_pending_add_element(tmp_path):
20
+ """Check expected pending state after adding an element."""
21
+
22
+ # make store: 1 task with 0 elements:
23
+ store = JSONPersistentStore.make_test_store_from_spec(app=hf, spec=[{}], dir=tmp_path)
24
+ elem_ID = store.add_element(task_ID=0)
25
+ assert store._pending.add_elements == {
26
+ elem_ID: StoreElement(
27
+ id_=elem_ID,
28
+ is_pending=True,
29
+ element_idx=0,
30
+ task_ID=0,
31
+ iteration_IDs=[],
32
+ )
33
+ } and store._pending.add_task_element_IDs == {0: [0]}
34
+
35
+
36
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
37
+ @pytest.mark.parametrize("elem_ID", [0, 1])
38
+ def test_store_pending_add_element_iter(tmp_path, elem_ID):
39
+ """Check expected pending state after adding an element iteration."""
40
+
41
+ # make store: 1 task with 2 elements and 0 iterations:
42
+ store = JSONPersistentStore.make_test_store_from_spec(
43
+ [{"elements": [{}, {}]}],
44
+ dir=tmp_path,
45
+ )
46
+ iter_ID = store.add_element_iteration(
47
+ element_ID=elem_ID,
48
+ data_idx={},
49
+ schema_parameters=[],
50
+ )
51
+ assert store._pending.add_elem_iters == {
52
+ iter_ID: StoreElementIter(
53
+ id_=iter_ID,
54
+ is_pending=True,
55
+ element_ID=elem_ID,
56
+ EAR_IDs={},
57
+ data_idx={},
58
+ schema_parameters=[],
59
+ )
60
+ } and store._pending.add_elem_iter_IDs == {elem_ID: [iter_ID]}
61
+
62
+
63
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
64
+ def test_store_pending_add_EAR(tmp_path):
65
+ """Check expected pending state after adding an EAR."""
66
+
67
+ # make store: 1 task with 1 element and 1 iteration:
68
+ store = JSONPersistentStore.make_test_store_from_spec(
69
+ [{"elements": [{"iterations": [{}]}]}],
70
+ dir=tmp_path,
71
+ )
72
+ EAR_ID = store.add_EAR(
73
+ elem_iter_ID=0,
74
+ action_idx=0,
75
+ data_idx={},
76
+ metadata={},
77
+ )
78
+ assert store._pending.add_EARs == {
79
+ EAR_ID: StoreEAR(
80
+ id_=EAR_ID,
81
+ is_pending=True,
82
+ elem_iter_ID=0,
83
+ action_idx=0,
84
+ data_idx={},
85
+ metadata={},
86
+ )
87
+ }
88
+
89
+
90
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
91
+ def test_get_task_elements_task_is_pending(tmp_path):
92
+ """Check we get an empty list when getting all task elements of a pending task to
93
+ which no elements have been added."""
94
+ # make store: 0 tasks:
95
+ store = JSONPersistentStore.make_test_store_from_spec([], dir=tmp_path)
96
+ task_ID = store.add_task()
97
+ assert store.get_task_elements(task_ID, slice(0, None)) == []
98
+
99
+
100
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
101
+ def test_get_task_elements_single_element_is_pending(tmp_path):
102
+ """Check expected return when getting all task elements of a persistent task that has
103
+ a single pending element."""
104
+ # make store: 1 task
105
+ store = JSONPersistentStore.make_test_store_from_spec([{}], dir=tmp_path)
106
+ store.add_element(task_ID=0)
107
+ assert store.get_task_elements(0, slice(0, None)) == [
108
+ {
109
+ "id": 0,
110
+ "is_pending": True,
111
+ "element_idx": 0,
112
+ "iteration_IDs": [],
113
+ "task_ID": 0,
114
+ "iterations": [],
115
+ }
116
+ ]
117
+
118
+
119
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
120
+ def test_get_task_elements_multi_element_one_pending(tmp_path):
121
+ """Check expected return when getting all task elements of a persistent task that has
122
+ a persistent element and a pending element."""
123
+ # make store: 1 task with 1 element:
124
+ store = JSONPersistentStore.make_test_store_from_spec(
125
+ [{"elements": [{}]}], dir=tmp_path
126
+ )
127
+ store.add_element(task_ID=0)
128
+ assert store.get_task_elements(0, slice(0, None)) == [
129
+ {
130
+ "id": 0,
131
+ "is_pending": False,
132
+ "element_idx": 0,
133
+ "iteration_IDs": [],
134
+ "task_ID": 0,
135
+ "iterations": [],
136
+ },
137
+ {
138
+ "id": 1,
139
+ "is_pending": True,
140
+ "element_idx": 1,
141
+ "iteration_IDs": [],
142
+ "task_ID": 0,
143
+ "iterations": [],
144
+ },
145
+ ]
146
+
147
+
148
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
149
+ def test_get_task_elements_single_element_iter_pending(tmp_path):
150
+ """Check expected return when getting all task elements of a persistent task that has
151
+ a persistent element with a pending iteration."""
152
+ # make store: 1 task with 1 element:
153
+ store = JSONPersistentStore.make_test_store_from_spec(
154
+ [{"elements": [{}]}], dir=tmp_path
155
+ )
156
+ store.add_element_iteration(element_ID=0, data_idx={}, schema_parameters=[])
157
+ assert store.get_task_elements(0, slice(0, None)) == [
158
+ {
159
+ "id": 0,
160
+ "is_pending": False,
161
+ "element_idx": 0,
162
+ "iteration_IDs": [0],
163
+ "task_ID": 0,
164
+ "iterations": [
165
+ {
166
+ "id": 0,
167
+ "is_pending": True,
168
+ "element_ID": 0,
169
+ "EAR_IDs": {},
170
+ "data_idx": {},
171
+ "schema_parameters": [],
172
+ "EARs": {},
173
+ }
174
+ ],
175
+ },
176
+ ]
177
+
178
+
179
+ @pytest.mark.skip("need to refactor `make_test_store_from_spec`")
180
+ def test_get_task_elements_single_element_iter_EAR_pending(tmp_path):
181
+ """Check expected return when getting all task elements of a persistent task that has
182
+ a persistent element with a persistent iteration and a pending EAR"""
183
+ # make store: 1 task with 1 element with 1 iteration:
184
+ store = JSONPersistentStore.make_test_store_from_spec(
185
+ [{"elements": [{"iterations": [{}]}]}], dir=tmp_path
186
+ )
187
+ store.add_EAR(elem_iter_ID=0, action_idx=0, data_idx={}, metadata={})
188
+ assert store.get_task_elements(0, slice(0, None)) == [
189
+ {
190
+ "id": 0,
191
+ "is_pending": False,
192
+ "element_idx": 0,
193
+ "iteration_IDs": [0],
194
+ "task_ID": 0,
195
+ "iterations": [
196
+ {
197
+ "id": 0,
198
+ "is_pending": False,
199
+ "element_ID": 0,
200
+ "EAR_IDs": {0: [0]},
201
+ "data_idx": {},
202
+ "schema_parameters": [],
203
+ "EARs": {
204
+ 0: [
205
+ {
206
+ "id_": 0,
207
+ "is_pending": True,
208
+ "elem_iter_ID": 0,
209
+ "action_idx": 0,
210
+ "data_idx": {},
211
+ "metadata": {},
212
+ }
213
+ ]
214
+ },
215
+ },
216
+ ],
217
+ },
218
+ ]
@@ -1,7 +1,6 @@
1
1
  import copy
2
2
  import pytest
3
3
  from hpcflow.app import app as hf
4
- from hpcflow.sdk.core.actions import ElementID
5
4
  from hpcflow.sdk.core.errors import (
6
5
  MissingInputs,
7
6
  TaskTemplateInvalidNesting,
@@ -644,7 +643,7 @@ def test_task_element_dependencies(tmp_path):
644
643
  nesting_orders={1: {"inputs.p2": 0}},
645
644
  path=tmp_path,
646
645
  )
647
- assert wk.tasks.t2.get_element_dependencies() == [ElementID(0, 0), ElementID(0, 1)]
646
+ assert wk.tasks.t2.get_element_dependencies() == [0, 1]
648
647
 
649
648
 
650
649
  def test_task_dependent_elements(tmp_path):
@@ -657,7 +656,7 @@ def test_task_dependent_elements(tmp_path):
657
656
  nesting_orders={1: {"inputs.p2": 0}},
658
657
  path=tmp_path,
659
658
  )
660
- assert wk.tasks.t1.get_dependent_elements() == [ElementID(1, 0), ElementID(1, 1)]
659
+ assert wk.tasks.t1.get_dependent_elements() == [2, 3]
661
660
 
662
661
 
663
662
  def test_task_add_elements_without_propagation_expected_workflow_num_elements(
@@ -708,9 +707,9 @@ def test_task_add_elements_without_propagation_expected_new_data_index(
708
707
  nesting_orders={1: {"inputs.p2": 0}},
709
708
  path=tmp_path,
710
709
  )
711
- data_index = [sorted(i.get_data_idx().keys()) for i in wk.tasks.t1.elements]
710
+ data_index = [sorted(i.get_data_idx().keys()) for i in wk.tasks.t1.elements[:]]
712
711
  wk.tasks.t1.add_elements(inputs=[hf.InputValue(param_p1, 103)])
713
- data_index_new = [sorted(i.get_data_idx().keys()) for i in wk.tasks.t1.elements]
712
+ data_index_new = [sorted(i.get_data_idx().keys()) for i in wk.tasks.t1.elements[:]]
714
713
  new_elems = data_index_new[len(data_index) :]
715
714
  assert new_elems == [["inputs.p1", "outputs.p2", "resources.any"]]
716
715
 
@@ -1281,14 +1280,14 @@ def test_no_change_to_tasks_metadata_on_add_task_failure(tmp_path):
1281
1280
  local_inputs={0: ("p1",)},
1282
1281
  path=tmp_path,
1283
1282
  )
1284
- tasks_meta = copy.deepcopy(wk._store.get_all_tasks_metadata())
1283
+ tasks_meta = copy.deepcopy(wk._store.get_tasks())
1285
1284
 
1286
1285
  s2 = make_schemas([[{"p1": None, "p3": None}, ()]])
1287
1286
  t2 = hf.Task(schemas=s2)
1288
1287
  with pytest.raises(MissingInputs) as exc_info:
1289
1288
  wk.add_task(t2)
1290
1289
 
1291
- assert wk._store.get_all_tasks_metadata() == tasks_meta
1290
+ assert wk._store.get_tasks() == tasks_meta
1292
1291
 
1293
1292
 
1294
1293
  def test_no_change_to_parameter_data_on_add_task_failure(tmp_path, param_p2, param_p3):
@@ -1297,13 +1296,13 @@ def test_no_change_to_parameter_data_on_add_task_failure(tmp_path, param_p2, par
1297
1296
  local_inputs={0: ("p1",)},
1298
1297
  path=tmp_path,
1299
1298
  )
1300
- param_data = copy.deepcopy(wk.get_all_parameter_data())
1299
+ param_data = copy.deepcopy(wk.get_all_parameters())
1301
1300
  s2 = make_schemas([[{"p1": None, "p2": None, "p3": None}, ()]])
1302
1301
  t2 = hf.Task(schemas=s2, inputs=[hf.InputValue(param_p2, 201)])
1303
1302
  with pytest.raises(MissingInputs) as exc_info:
1304
1303
  wk.add_task(t2)
1305
1304
 
1306
- assert wk.get_all_parameter_data() == param_data
1305
+ assert wk.get_all_parameters() == param_data
1307
1306
 
1308
1307
 
1309
1308
  def test_expected_additional_parameter_data_on_add_task(tmp_path, param_p3):
@@ -1321,7 +1320,7 @@ def test_expected_additional_parameter_data_on_add_task(tmp_path, param_p3):
1321
1320
  param_data_new = wk.get_all_parameter_data()
1322
1321
 
1323
1322
  new_keys = set(param_data_new.keys()) - set(param_data.keys())
1324
- new_data = [param_data_new[k][1] for k in new_keys]
1323
+ new_data = [param_data_new[k] for k in new_keys]
1325
1324
 
1326
1325
  # one new key for resources, one for param_p3 value
1327
1326
  res = {k: None for k in hf.ResourceSpec.ALLOWED_PARAMETERS}
@@ -1337,7 +1336,7 @@ def test_parameters_accepted_on_add_task(tmp_path, param_p3):
1337
1336
  s2 = make_schemas([[{"p1": None, "p3": None}, ()]])
1338
1337
  t2 = hf.Task(schemas=s2, inputs=[hf.InputValue(param_p3, 301)])
1339
1338
  wk.add_task(t2)
1340
- assert not wk._store._pending["parameter_data"]
1339
+ assert not wk._store._pending.add_parameters
1341
1340
 
1342
1341
 
1343
1342
  def test_parameters_pending_during_add_task(tmp_path, param_p3):
@@ -1350,7 +1349,7 @@ def test_parameters_pending_during_add_task(tmp_path, param_p3):
1350
1349
  t2 = hf.Task(schemas=s2, inputs=[hf.InputValue(param_p3, 301)])
1351
1350
  with wk.batch_update():
1352
1351
  wk.add_task(t2)
1353
- assert wk._store._pending["parameter_data"]
1352
+ assert wk._store._pending.add_parameters
1354
1353
 
1355
1354
 
1356
1355
  def test_add_task_after(workflow_w0):