prefect-client 3.1.6__py3-none-any.whl → 3.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. prefect/_experimental/__init__.py +0 -0
  2. prefect/_experimental/lineage.py +181 -0
  3. prefect/_internal/compatibility/async_dispatch.py +38 -9
  4. prefect/_internal/pydantic/v2_validated_func.py +15 -10
  5. prefect/_internal/retries.py +15 -6
  6. prefect/_internal/schemas/bases.py +2 -1
  7. prefect/_internal/schemas/validators.py +5 -4
  8. prefect/_version.py +3 -3
  9. prefect/blocks/core.py +144 -17
  10. prefect/blocks/system.py +2 -1
  11. prefect/client/orchestration.py +106 -0
  12. prefect/client/schemas/actions.py +5 -5
  13. prefect/client/schemas/filters.py +1 -1
  14. prefect/client/schemas/objects.py +5 -5
  15. prefect/client/schemas/responses.py +1 -2
  16. prefect/client/schemas/schedules.py +1 -1
  17. prefect/client/subscriptions.py +2 -1
  18. prefect/client/utilities.py +15 -1
  19. prefect/context.py +1 -1
  20. prefect/deployments/flow_runs.py +3 -3
  21. prefect/deployments/runner.py +14 -14
  22. prefect/deployments/steps/core.py +3 -1
  23. prefect/deployments/steps/pull.py +60 -12
  24. prefect/events/clients.py +55 -4
  25. prefect/events/filters.py +1 -1
  26. prefect/events/related.py +2 -1
  27. prefect/events/schemas/events.py +1 -1
  28. prefect/events/utilities.py +2 -0
  29. prefect/events/worker.py +8 -0
  30. prefect/flow_engine.py +41 -81
  31. prefect/flow_runs.py +4 -2
  32. prefect/flows.py +4 -6
  33. prefect/results.py +43 -22
  34. prefect/runner/runner.py +129 -18
  35. prefect/runner/storage.py +3 -3
  36. prefect/serializers.py +28 -24
  37. prefect/settings/__init__.py +1 -0
  38. prefect/settings/base.py +3 -2
  39. prefect/settings/models/api.py +4 -0
  40. prefect/settings/models/experiments.py +5 -0
  41. prefect/settings/models/runner.py +8 -0
  42. prefect/settings/models/server/api.py +7 -1
  43. prefect/task_engine.py +34 -26
  44. prefect/task_worker.py +43 -25
  45. prefect/tasks.py +118 -125
  46. prefect/telemetry/instrumentation.py +1 -1
  47. prefect/telemetry/processors.py +10 -7
  48. prefect/telemetry/run_telemetry.py +157 -33
  49. prefect/types/__init__.py +4 -1
  50. prefect/variables.py +127 -19
  51. {prefect_client-3.1.6.dist-info → prefect_client-3.1.8.dist-info}/METADATA +2 -1
  52. {prefect_client-3.1.6.dist-info → prefect_client-3.1.8.dist-info}/RECORD +55 -53
  53. {prefect_client-3.1.6.dist-info → prefect_client-3.1.8.dist-info}/LICENSE +0 -0
  54. {prefect_client-3.1.6.dist-info → prefect_client-3.1.8.dist-info}/WHEEL +0 -0
  55. {prefect_client-3.1.6.dist-info → prefect_client-3.1.8.dist-info}/top_level.txt +0 -0
prefect/flow_engine.py CHANGED
@@ -24,10 +24,8 @@ from uuid import UUID
24
24
 
25
25
  from anyio import CancelScope
26
26
  from opentelemetry import propagate, trace
27
- from opentelemetry.trace import Tracer, get_tracer
28
27
  from typing_extensions import ParamSpec
29
28
 
30
- import prefect
31
29
  from prefect import Task
32
30
  from prefect.client.orchestration import PrefectClient, SyncPrefectClient, get_client
33
31
  from prefect.client.schemas import FlowRun, TaskRun
@@ -72,7 +70,12 @@ from prefect.states import (
72
70
  exception_to_failed_state,
73
71
  return_value_to_state,
74
72
  )
75
- from prefect.telemetry.run_telemetry import OTELSetter
73
+ from prefect.telemetry.run_telemetry import (
74
+ LABELS_TRACEPARENT_KEY,
75
+ TRACEPARENT_KEY,
76
+ OTELSetter,
77
+ RunTelemetry,
78
+ )
76
79
  from prefect.types import KeyValueLabels
77
80
  from prefect.utilities._engine import get_hook_name, resolve_custom_flow_run_name
78
81
  from prefect.utilities.annotations import NotSet
@@ -95,8 +98,6 @@ from prefect.utilities.urls import url_for
95
98
 
96
99
  P = ParamSpec("P")
97
100
  R = TypeVar("R")
98
- LABELS_TRACEPARENT_KEY = "__OTEL_TRACEPARENT"
99
- TRACEPARENT_KEY = "traceparent"
100
101
 
101
102
 
102
103
  class FlowRunTimeoutError(TimeoutError):
@@ -136,10 +137,7 @@ class BaseFlowRunEngine(Generic[P, R]):
136
137
  _is_started: bool = False
137
138
  short_circuit: bool = False
138
139
  _flow_run_name_set: bool = False
139
- _tracer: Tracer = field(
140
- default_factory=lambda: get_tracer("prefect", prefect.__version__)
141
- )
142
- _span: Optional[trace.Span] = None
140
+ _telemetry: RunTelemetry = field(default_factory=RunTelemetry)
143
141
 
144
142
  def __post_init__(self):
145
143
  if self.flow is None and self.flow_run_id is None:
@@ -152,21 +150,6 @@ class BaseFlowRunEngine(Generic[P, R]):
152
150
  def state(self) -> State:
153
151
  return self.flow_run.state # type: ignore
154
152
 
155
- def _end_span_on_success(self):
156
- if not self._span:
157
- return
158
- self._span.set_status(trace.Status(trace.StatusCode.OK))
159
- self._span.end(time.time_ns())
160
- self._span = None
161
-
162
- def _end_span_on_error(self, exc: BaseException, description: Optional[str]):
163
- if not self._span:
164
- return
165
- self._span.record_exception(exc)
166
- self._span.set_status(trace.Status(trace.StatusCode.ERROR, description))
167
- self._span.end(time.time_ns())
168
- self._span = None
169
-
170
153
  def is_running(self) -> bool:
171
154
  if getattr(self, "flow_run", None) is None:
172
155
  return False
@@ -185,6 +168,7 @@ class BaseFlowRunEngine(Generic[P, R]):
185
168
  self, span: trace.Span, client: Union[SyncPrefectClient, PrefectClient]
186
169
  ):
187
170
  parent_flow_run_ctx = FlowRunContext.get()
171
+
188
172
  if parent_flow_run_ctx and parent_flow_run_ctx.flow_run:
189
173
  if traceparent := parent_flow_run_ctx.flow_run.labels.get(
190
174
  LABELS_TRACEPARENT_KEY
@@ -194,6 +178,7 @@ class BaseFlowRunEngine(Generic[P, R]):
194
178
  carrier={TRACEPARENT_KEY: traceparent},
195
179
  setter=OTELSetter(),
196
180
  )
181
+
197
182
  else:
198
183
  carrier: KeyValueLabels = {}
199
184
  propagate.get_global_textmap().inject(
@@ -315,16 +300,7 @@ class FlowRunEngine(BaseFlowRunEngine[P, R]):
315
300
  self.flow_run.state_name = state.name # type: ignore
316
301
  self.flow_run.state_type = state.type # type: ignore
317
302
 
318
- if self._span:
319
- self._span.add_event(
320
- state.name or state.type,
321
- {
322
- "prefect.state.message": state.message or "",
323
- "prefect.state.type": state.type,
324
- "prefect.state.name": state.name or state.type,
325
- "prefect.state.id": str(state.id),
326
- },
327
- )
303
+ self._telemetry.update_state(state)
328
304
  return state
329
305
 
330
306
  def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]":
@@ -374,7 +350,7 @@ class FlowRunEngine(BaseFlowRunEngine[P, R]):
374
350
  self.set_state(terminal_state)
375
351
  self._return_value = resolved_result
376
352
 
377
- self._end_span_on_success()
353
+ self._telemetry.end_span_on_success()
378
354
 
379
355
  return result
380
356
 
@@ -406,8 +382,8 @@ class FlowRunEngine(BaseFlowRunEngine[P, R]):
406
382
  )
407
383
  state = self.set_state(Running())
408
384
  self._raised = exc
409
-
410
- self._end_span_on_error(exc, state.message)
385
+ self._telemetry.record_exception(exc)
386
+ self._telemetry.end_span_on_failure(state.message)
411
387
 
412
388
  return state
413
389
 
@@ -426,8 +402,8 @@ class FlowRunEngine(BaseFlowRunEngine[P, R]):
426
402
  )
427
403
  self.set_state(state)
428
404
  self._raised = exc
429
-
430
- self._end_span_on_error(exc, message)
405
+ self._telemetry.record_exception(exc)
406
+ self._telemetry.end_span_on_failure(message)
431
407
 
432
408
  def handle_crash(self, exc: BaseException) -> None:
433
409
  state = run_coro_as_sync(exception_to_crashed_state(exc))
@@ -435,8 +411,8 @@ class FlowRunEngine(BaseFlowRunEngine[P, R]):
435
411
  self.logger.debug("Crash details:", exc_info=exc)
436
412
  self.set_state(state, force=True)
437
413
  self._raised = exc
438
-
439
- self._end_span_on_error(exc, state.message if state else "")
414
+ self._telemetry.record_exception(exc)
415
+ self._telemetry.end_span_on_failure(state.message if state else None)
440
416
 
441
417
  def load_subflow_run(
442
418
  self,
@@ -681,19 +657,12 @@ class FlowRunEngine(BaseFlowRunEngine[P, R]):
681
657
  empirical_policy=self.flow_run.empirical_policy,
682
658
  )
683
659
 
684
- span = self._tracer.start_span(
685
- name=self.flow_run.name,
686
- attributes={
687
- **self.flow_run.labels,
688
- "prefect.run.type": "flow",
689
- "prefect.run.id": str(self.flow_run.id),
690
- "prefect.tags": self.flow_run.tags,
691
- "prefect.flow.name": self.flow.name,
692
- },
660
+ self._telemetry.start_span(
661
+ name=self.flow.name,
662
+ run=self.flow_run,
663
+ client=self.client,
664
+ parameters=self.parameters,
693
665
  )
694
- self._update_otel_labels(span, self.client)
695
-
696
- self._span = span
697
666
 
698
667
  try:
699
668
  yield self
@@ -736,7 +705,9 @@ class FlowRunEngine(BaseFlowRunEngine[P, R]):
736
705
  @contextmanager
737
706
  def start(self) -> Generator[None, None, None]:
738
707
  with self.initialize_run():
739
- with trace.use_span(self._span) if self._span else nullcontext():
708
+ with trace.use_span(
709
+ self._telemetry.span
710
+ ) if self._telemetry.span else nullcontext():
740
711
  self.begin_run()
741
712
 
742
713
  if self.state.is_running():
@@ -892,16 +863,7 @@ class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
892
863
  self.flow_run.state_name = state.name # type: ignore
893
864
  self.flow_run.state_type = state.type # type: ignore
894
865
 
895
- if self._span:
896
- self._span.add_event(
897
- state.name or state.type,
898
- {
899
- "prefect.state.message": state.message or "",
900
- "prefect.state.type": state.type,
901
- "prefect.state.name": state.name or state.type,
902
- "prefect.state.id": str(state.id),
903
- },
904
- )
866
+ self._telemetry.update_state(state)
905
867
  return state
906
868
 
907
869
  async def result(self, raise_on_failure: bool = True) -> "Union[R, State, None]":
@@ -949,7 +911,7 @@ class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
949
911
  await self.set_state(terminal_state)
950
912
  self._return_value = resolved_result
951
913
 
952
- self._end_span_on_success()
914
+ self._telemetry.end_span_on_success()
953
915
 
954
916
  return result
955
917
 
@@ -979,8 +941,8 @@ class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
979
941
  )
980
942
  state = await self.set_state(Running())
981
943
  self._raised = exc
982
-
983
- self._end_span_on_error(exc, state.message)
944
+ self._telemetry.record_exception(exc)
945
+ self._telemetry.end_span_on_failure(state.message)
984
946
 
985
947
  return state
986
948
 
@@ -1000,7 +962,8 @@ class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
1000
962
  await self.set_state(state)
1001
963
  self._raised = exc
1002
964
 
1003
- self._end_span_on_error(exc, message)
965
+ self._telemetry.record_exception(exc)
966
+ self._telemetry.end_span_on_failure(message)
1004
967
 
1005
968
  async def handle_crash(self, exc: BaseException) -> None:
1006
969
  # need to shield from asyncio cancellation to ensure we update the state
@@ -1012,7 +975,8 @@ class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
1012
975
  await self.set_state(state, force=True)
1013
976
  self._raised = exc
1014
977
 
1015
- self._end_span_on_error(exc, state.message)
978
+ self._telemetry.record_exception(exc)
979
+ self._telemetry.end_span_on_failure(state.message)
1016
980
 
1017
981
  async def load_subflow_run(
1018
982
  self,
@@ -1255,18 +1219,12 @@ class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
1255
1219
  empirical_policy=self.flow_run.empirical_policy,
1256
1220
  )
1257
1221
 
1258
- span = self._tracer.start_span(
1259
- name=self.flow_run.name,
1260
- attributes={
1261
- **self.flow_run.labels,
1262
- "prefect.run.type": "flow",
1263
- "prefect.run.id": str(self.flow_run.id),
1264
- "prefect.tags": self.flow_run.tags,
1265
- "prefect.flow.name": self.flow.name,
1266
- },
1222
+ await self._telemetry.async_start_span(
1223
+ name=self.flow.name,
1224
+ run=self.flow_run,
1225
+ client=self.client,
1226
+ parameters=self.parameters,
1267
1227
  )
1268
- self._update_otel_labels(span, self.client)
1269
- self._span = span
1270
1228
 
1271
1229
  try:
1272
1230
  yield self
@@ -1309,7 +1267,9 @@ class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
1309
1267
  @asynccontextmanager
1310
1268
  async def start(self) -> AsyncGenerator[None, None]:
1311
1269
  async with self.initialize_run():
1312
- with trace.use_span(self._span) if self._span else nullcontext():
1270
+ with trace.use_span(
1271
+ self._telemetry.span
1272
+ ) if self._telemetry.span else nullcontext():
1313
1273
  await self.begin_run()
1314
1274
 
1315
1275
  if self.state.is_running():
prefect/flow_runs.py CHANGED
@@ -1,6 +1,6 @@
1
1
  from typing import (
2
2
  TYPE_CHECKING,
3
- Dict,
3
+ Any,
4
4
  Optional,
5
5
  Type,
6
6
  TypeVar,
@@ -430,7 +430,9 @@ async def suspend_flow_run(
430
430
 
431
431
 
432
432
  @sync_compatible
433
- async def resume_flow_run(flow_run_id, run_input: Optional[Dict] = None):
433
+ async def resume_flow_run(
434
+ flow_run_id: UUID, run_input: Optional[dict[str, Any]] = None
435
+ ) -> None:
434
436
  """
435
437
  Resumes a paused flow.
436
438
 
prefect/flows.py CHANGED
@@ -564,14 +564,12 @@ class Flow(Generic[P, R]):
564
564
  "Cannot mix Pydantic v1 and v2 types as arguments to a flow."
565
565
  )
566
566
 
567
+ validated_fn_kwargs = dict(arbitrary_types_allowed=True)
568
+
567
569
  if has_v1_models:
568
- validated_fn = V1ValidatedFunction(
569
- self.fn, config={"arbitrary_types_allowed": True}
570
- )
570
+ validated_fn = V1ValidatedFunction(self.fn, config=validated_fn_kwargs)
571
571
  else:
572
- validated_fn = V2ValidatedFunction(
573
- self.fn, config=pydantic.ConfigDict(arbitrary_types_allowed=True)
574
- )
572
+ validated_fn = V2ValidatedFunction(self.fn, config=validated_fn_kwargs)
575
573
 
576
574
  try:
577
575
  with warnings.catch_warnings():
prefect/results.py CHANGED
@@ -35,10 +35,13 @@ from pydantic import (
35
35
  model_validator,
36
36
  )
37
37
  from pydantic_core import PydanticUndefinedType
38
- from pydantic_extra_types.pendulum_dt import DateTime
39
38
  from typing_extensions import ParamSpec, Self
40
39
 
41
40
  import prefect
41
+ from prefect._experimental.lineage import (
42
+ emit_result_read_event,
43
+ emit_result_write_event,
44
+ )
42
45
  from prefect._internal.compatibility import deprecated
43
46
  from prefect._internal.compatibility.deprecated import deprecated_field
44
47
  from prefect.blocks.core import Block
@@ -57,6 +60,7 @@ from prefect.locking.protocol import LockManager
57
60
  from prefect.logging import get_logger
58
61
  from prefect.serializers import PickleSerializer, Serializer
59
62
  from prefect.settings.context import get_current_settings
63
+ from prefect.types import DateTime
60
64
  from prefect.utilities.annotations import NotSet
61
65
  from prefect.utilities.asyncutils import sync_compatible
62
66
  from prefect.utilities.pydantic import get_dispatch_key, lookup_type, register_base_type
@@ -129,7 +133,7 @@ async def resolve_result_storage(
129
133
  elif isinstance(result_storage, Path):
130
134
  storage_block = LocalFileSystem(basepath=str(result_storage))
131
135
  elif isinstance(result_storage, str):
132
- storage_block = await Block.load(result_storage, client=client)
136
+ storage_block = await Block.aload(result_storage, client=client)
133
137
  storage_block_id = storage_block._block_document_id
134
138
  assert storage_block_id is not None, "Loaded storage blocks must have ids"
135
139
  elif isinstance(result_storage, UUID):
@@ -168,7 +172,7 @@ async def get_or_create_default_task_scheduling_storage() -> ResultStorage:
168
172
  default_block = settings.tasks.scheduling.default_storage_block
169
173
 
170
174
  if default_block is not None:
171
- return await Block.load(default_block)
175
+ return await Block.aload(default_block)
172
176
 
173
177
  # otherwise, use the local file system
174
178
  basepath = settings.results.local_storage_path
@@ -232,6 +236,10 @@ def _format_user_supplied_storage_key(key: str) -> str:
232
236
  T = TypeVar("T")
233
237
 
234
238
 
239
+ def default_cache() -> LRUCache[str, "ResultRecord[Any]"]:
240
+ return LRUCache(maxsize=1000)
241
+
242
+
235
243
  def result_storage_discriminator(x: Any) -> str:
236
244
  if isinstance(x, dict):
237
245
  if "block_type_slug" in x:
@@ -284,7 +292,7 @@ class ResultStore(BaseModel):
284
292
  cache_result_in_memory: bool = Field(default=True)
285
293
  serializer: Serializer = Field(default_factory=get_default_result_serializer)
286
294
  storage_key_fn: Callable[[], str] = Field(default=DEFAULT_STORAGE_KEY_FN)
287
- cache: LRUCache = Field(default_factory=lambda: LRUCache(maxsize=1000))
295
+ cache: LRUCache[str, "ResultRecord[Any]"] = Field(default_factory=default_cache)
288
296
 
289
297
  # Deprecated fields
290
298
  persist_result: Optional[bool] = Field(default=None)
@@ -319,7 +327,7 @@ class ResultStore(BaseModel):
319
327
  return self.model_copy(update=update)
320
328
 
321
329
  @sync_compatible
322
- async def update_for_task(self: Self, task: "Task") -> Self:
330
+ async def update_for_task(self: Self, task: "Task[P, R]") -> Self:
323
331
  """
324
332
  Create a new result store for a task.
325
333
 
@@ -446,8 +454,15 @@ class ResultStore(BaseModel):
446
454
  """
447
455
  return await self._exists(key=key, _sync=False)
448
456
 
457
+ def _resolved_key_path(self, key: str) -> str:
458
+ if self.result_storage_block_id is None and hasattr(
459
+ self.result_storage, "_resolve_path"
460
+ ):
461
+ return str(self.result_storage._resolve_path(key))
462
+ return key
463
+
449
464
  @sync_compatible
450
- async def _read(self, key: str, holder: str) -> "ResultRecord":
465
+ async def _read(self, key: str, holder: str) -> "ResultRecord[Any]":
451
466
  """
452
467
  Read a result record from storage.
453
468
 
@@ -465,8 +480,12 @@ class ResultStore(BaseModel):
465
480
  if self.lock_manager is not None and not self.is_lock_holder(key, holder):
466
481
  await self.await_for_lock(key)
467
482
 
468
- if key in self.cache:
469
- return self.cache[key]
483
+ resolved_key_path = self._resolved_key_path(key)
484
+
485
+ if resolved_key_path in self.cache:
486
+ cached_result = self.cache[resolved_key_path]
487
+ await emit_result_read_event(self, resolved_key_path, cached=True)
488
+ return cached_result
470
489
 
471
490
  if self.result_storage is None:
472
491
  self.result_storage = await get_default_result_storage()
@@ -478,31 +497,28 @@ class ResultStore(BaseModel):
478
497
  metadata.storage_key is not None
479
498
  ), "Did not find storage key in metadata"
480
499
  result_content = await self.result_storage.read_path(metadata.storage_key)
481
- result_record = ResultRecord.deserialize_from_result_and_metadata(
500
+ result_record: ResultRecord[
501
+ Any
502
+ ] = ResultRecord.deserialize_from_result_and_metadata(
482
503
  result=result_content, metadata=metadata_content
483
504
  )
505
+ await emit_result_read_event(self, resolved_key_path)
484
506
  else:
485
507
  content = await self.result_storage.read_path(key)
486
- result_record = ResultRecord.deserialize(
508
+ result_record: ResultRecord[Any] = ResultRecord.deserialize(
487
509
  content, backup_serializer=self.serializer
488
510
  )
511
+ await emit_result_read_event(self, resolved_key_path)
489
512
 
490
513
  if self.cache_result_in_memory:
491
- if self.result_storage_block_id is None and hasattr(
492
- self.result_storage, "_resolve_path"
493
- ):
494
- cache_key = str(self.result_storage._resolve_path(key))
495
- else:
496
- cache_key = key
497
-
498
- self.cache[cache_key] = result_record
514
+ self.cache[resolved_key_path] = result_record
499
515
  return result_record
500
516
 
501
517
  def read(
502
518
  self,
503
519
  key: str,
504
520
  holder: Optional[str] = None,
505
- ) -> "ResultRecord":
521
+ ) -> "ResultRecord[Any]":
506
522
  """
507
523
  Read a result record from storage.
508
524
 
@@ -520,7 +536,7 @@ class ResultStore(BaseModel):
520
536
  self,
521
537
  key: str,
522
538
  holder: Optional[str] = None,
523
- ) -> "ResultRecord":
539
+ ) -> "ResultRecord[Any]":
524
540
  """
525
541
  Read a result record from storage.
526
542
 
@@ -663,12 +679,13 @@ class ResultStore(BaseModel):
663
679
  base_key,
664
680
  content=result_record.serialize_metadata(),
665
681
  )
682
+ await emit_result_write_event(self, result_record.metadata.storage_key)
666
683
  # Otherwise, write the result metadata and result together
667
684
  else:
668
685
  await self.result_storage.write_path(
669
686
  result_record.metadata.storage_key, content=result_record.serialize()
670
687
  )
671
-
688
+ await emit_result_write_event(self, result_record.metadata.storage_key)
672
689
  if self.cache_result_in_memory:
673
690
  self.cache[key] = result_record
674
691
 
@@ -898,7 +915,11 @@ class ResultStore(BaseModel):
898
915
  )
899
916
 
900
917
  @sync_compatible
901
- async def read_parameters(self, identifier: UUID) -> Dict[str, Any]:
918
+ async def read_parameters(self, identifier: UUID) -> dict[str, Any]:
919
+ if self.result_storage is None:
920
+ raise ValueError(
921
+ "Result store is not configured - must have a result storage block to read parameters"
922
+ )
902
923
  record = ResultRecord.deserialize(
903
924
  await self.result_storage.read_path(f"parameters/{identifier}")
904
925
  )