prefect-client 3.1.7__py3-none-any.whl → 3.1.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. prefect/__init__.py +53 -59
  2. prefect/_internal/concurrency/services.py +6 -4
  3. prefect/_version.py +3 -3
  4. prefect/agent.py +3 -1
  5. prefect/client/cloud.py +0 -21
  6. prefect/client/orchestration.py +18 -0
  7. prefect/client/schemas/objects.py +11 -0
  8. prefect/client/utilities.py +1 -15
  9. prefect/deployments/deployments.py +4 -2
  10. prefect/deployments/runner.py +3 -1
  11. prefect/engine.py +2 -1
  12. prefect/events/filters.py +2 -8
  13. prefect/exceptions.py +31 -41
  14. prefect/filesystems.py +2 -2
  15. prefect/flow_engine.py +2 -2
  16. prefect/flows.py +228 -185
  17. prefect/infrastructure/__init__.py +3 -1
  18. prefect/infrastructure/base.py +3 -1
  19. prefect/results.py +76 -19
  20. prefect/runner/runner.py +131 -21
  21. prefect/settings/__init__.py +1 -0
  22. prefect/settings/base.py +3 -2
  23. prefect/settings/models/api.py +4 -0
  24. prefect/settings/models/runner.py +8 -0
  25. prefect/settings/models/server/api.py +7 -1
  26. prefect/states.py +22 -10
  27. prefect/task_engine.py +1 -1
  28. prefect/telemetry/instrumentation.py +9 -10
  29. prefect/telemetry/services.py +67 -0
  30. prefect/utilities/engine.py +15 -1
  31. prefect/utilities/importtools.py +28 -21
  32. prefect/variables.py +2 -2
  33. prefect/workers/base.py +6 -12
  34. prefect/workers/block.py +3 -1
  35. prefect/workers/cloud.py +3 -1
  36. {prefect_client-3.1.7.dist-info → prefect_client-3.1.9.dist-info}/METADATA +1 -1
  37. {prefect_client-3.1.7.dist-info → prefect_client-3.1.9.dist-info}/RECORD +40 -39
  38. {prefect_client-3.1.7.dist-info → prefect_client-3.1.9.dist-info}/LICENSE +0 -0
  39. {prefect_client-3.1.7.dist-info → prefect_client-3.1.9.dist-info}/WHEEL +0 -0
  40. {prefect_client-3.1.7.dist-info → prefect_client-3.1.9.dist-info}/top_level.txt +0 -0
prefect/results.py CHANGED
@@ -233,6 +233,29 @@ def _format_user_supplied_storage_key(key: str) -> str:
233
233
  return key.format(**runtime_vars, parameters=prefect.runtime.task_run.parameters)
234
234
 
235
235
 
236
+ async def _call_explicitly_async_block_method(
237
+ block: Union[WritableFileSystem, NullFileSystem],
238
+ method: str,
239
+ args: tuple[Any, ...],
240
+ kwargs: dict[str, Any],
241
+ ) -> Any:
242
+ """
243
+ TODO: remove this once we have explicit async methods on all storage blocks
244
+
245
+ see https://github.com/PrefectHQ/prefect/issues/15008
246
+ """
247
+ if hasattr(block, f"a{method}"): # explicit async method
248
+ return await getattr(block.__class__.__name__, f"a{method}")(*args, **kwargs)
249
+ elif hasattr(getattr(block, method, None), "aio"): # sync_compatible
250
+ return await getattr(block, method).aio(block, *args, **kwargs)
251
+ else: # should not happen in prefect, but users can override impls
252
+ maybe_coro = getattr(block, method)(*args, **kwargs)
253
+ if inspect.isawaitable(maybe_coro):
254
+ return await maybe_coro
255
+ else:
256
+ return maybe_coro
257
+
258
+
236
259
  T = TypeVar("T")
237
260
 
238
261
 
@@ -405,7 +428,9 @@ class ResultStore(BaseModel):
405
428
  # TODO: Add an `exists` method to commonly used storage blocks
406
429
  # so the entire payload doesn't need to be read
407
430
  try:
408
- metadata_content = await self.metadata_storage.read_path(key)
431
+ metadata_content = await _call_explicitly_async_block_method(
432
+ self.metadata_storage, "read_path", (key,), {}
433
+ )
409
434
  if metadata_content is None:
410
435
  return False
411
436
  metadata = ResultRecordMetadata.load_bytes(metadata_content)
@@ -414,7 +439,9 @@ class ResultStore(BaseModel):
414
439
  return False
415
440
  else:
416
441
  try:
417
- content = await self.result_storage.read_path(key)
442
+ content = await _call_explicitly_async_block_method(
443
+ self.result_storage, "read_path", (key,), {}
444
+ )
418
445
  if content is None:
419
446
  return False
420
447
  record = ResultRecord.deserialize(content)
@@ -491,12 +518,22 @@ class ResultStore(BaseModel):
491
518
  self.result_storage = await get_default_result_storage()
492
519
 
493
520
  if self.metadata_storage is not None:
494
- metadata_content = await self.metadata_storage.read_path(key)
521
+ metadata_content = await _call_explicitly_async_block_method(
522
+ self.metadata_storage,
523
+ "read_path",
524
+ (key,),
525
+ {},
526
+ )
495
527
  metadata = ResultRecordMetadata.load_bytes(metadata_content)
496
528
  assert (
497
529
  metadata.storage_key is not None
498
530
  ), "Did not find storage key in metadata"
499
- result_content = await self.result_storage.read_path(metadata.storage_key)
531
+ result_content = await _call_explicitly_async_block_method(
532
+ self.result_storage,
533
+ "read_path",
534
+ (metadata.storage_key,),
535
+ {},
536
+ )
500
537
  result_record: ResultRecord[
501
538
  Any
502
539
  ] = ResultRecord.deserialize_from_result_and_metadata(
@@ -504,7 +541,12 @@ class ResultStore(BaseModel):
504
541
  )
505
542
  await emit_result_read_event(self, resolved_key_path)
506
543
  else:
507
- content = await self.result_storage.read_path(key)
544
+ content = await _call_explicitly_async_block_method(
545
+ self.result_storage,
546
+ "read_path",
547
+ (key,),
548
+ {},
549
+ )
508
550
  result_record: ResultRecord[Any] = ResultRecord.deserialize(
509
551
  content, backup_serializer=self.serializer
510
552
  )
@@ -555,7 +597,7 @@ class ResultStore(BaseModel):
555
597
  obj: Any,
556
598
  key: Optional[str] = None,
557
599
  expiration: Optional[DateTime] = None,
558
- ) -> "ResultRecord":
600
+ ) -> "ResultRecord[Any]":
559
601
  """
560
602
  Create a result record.
561
603
 
@@ -671,19 +713,26 @@ class ResultStore(BaseModel):
671
713
 
672
714
  # If metadata storage is configured, write result and metadata separately
673
715
  if self.metadata_storage is not None:
674
- await self.result_storage.write_path(
675
- result_record.metadata.storage_key,
676
- content=result_record.serialize_result(),
716
+ await _call_explicitly_async_block_method(
717
+ self.result_storage,
718
+ "write_path",
719
+ (result_record.metadata.storage_key,),
720
+ {"content": result_record.serialize_result()},
677
721
  )
678
- await self.metadata_storage.write_path(
679
- base_key,
680
- content=result_record.serialize_metadata(),
722
+ await _call_explicitly_async_block_method(
723
+ self.metadata_storage,
724
+ "write_path",
725
+ (base_key,),
726
+ {"content": result_record.serialize_metadata()},
681
727
  )
682
728
  await emit_result_write_event(self, result_record.metadata.storage_key)
683
729
  # Otherwise, write the result metadata and result together
684
730
  else:
685
- await self.result_storage.write_path(
686
- result_record.metadata.storage_key, content=result_record.serialize()
731
+ await _call_explicitly_async_block_method(
732
+ self.result_storage,
733
+ "write_path",
734
+ (result_record.metadata.storage_key,),
735
+ {"content": result_record.serialize()},
687
736
  )
688
737
  await emit_result_write_event(self, result_record.metadata.storage_key)
689
738
  if self.cache_result_in_memory:
@@ -910,8 +959,11 @@ class ResultStore(BaseModel):
910
959
  serializer=self.serializer, storage_key=str(identifier)
911
960
  ),
912
961
  )
913
- await self.result_storage.write_path(
914
- f"parameters/{identifier}", content=record.serialize()
962
+ await _call_explicitly_async_block_method(
963
+ self.result_storage,
964
+ "write_path",
965
+ (f"parameters/{identifier}",),
966
+ {"content": record.serialize()},
915
967
  )
916
968
 
917
969
  @sync_compatible
@@ -921,7 +973,12 @@ class ResultStore(BaseModel):
921
973
  "Result store is not configured - must have a result storage block to read parameters"
922
974
  )
923
975
  record = ResultRecord.deserialize(
924
- await self.result_storage.read_path(f"parameters/{identifier}")
976
+ await _call_explicitly_async_block_method(
977
+ self.result_storage,
978
+ "read_path",
979
+ (f"parameters/{identifier}",),
980
+ {},
981
+ )
925
982
  )
926
983
  return record.result
927
984
 
@@ -976,7 +1033,7 @@ class ResultRecordMetadata(BaseModel):
976
1033
  """
977
1034
  return cls.model_validate_json(data)
978
1035
 
979
- def __eq__(self, other):
1036
+ def __eq__(self, other: Any) -> bool:
980
1037
  if not isinstance(other, ResultRecordMetadata):
981
1038
  return False
982
1039
  return (
@@ -1050,7 +1107,7 @@ class ResultRecord(BaseModel, Generic[R]):
1050
1107
 
1051
1108
  @model_validator(mode="before")
1052
1109
  @classmethod
1053
- def coerce_old_format(cls, value: Any):
1110
+ def coerce_old_format(cls, value: Any) -> Any:
1054
1111
  if isinstance(value, dict):
1055
1112
  if "data" in value:
1056
1113
  value["result"] = value.pop("data")
prefect/runner/runner.py CHANGED
@@ -47,11 +47,12 @@ from typing import (
47
47
  TYPE_CHECKING,
48
48
  Any,
49
49
  Callable,
50
+ Coroutine,
50
51
  Dict,
51
52
  Iterable,
52
53
  List,
53
54
  Optional,
54
- Set,
55
+ TypedDict,
55
56
  Union,
56
57
  )
57
58
  from uuid import UUID, uuid4
@@ -59,6 +60,7 @@ from uuid import UUID, uuid4
59
60
  import anyio
60
61
  import anyio.abc
61
62
  import pendulum
63
+ from cachetools import LRUCache
62
64
 
63
65
  from prefect._internal.concurrency.api import (
64
66
  create_call,
@@ -94,8 +96,6 @@ from prefect.logging.loggers import PrefectLogAdapter, flow_run_logger, get_logg
94
96
  from prefect.runner.storage import RunnerStorage
95
97
  from prefect.settings import (
96
98
  PREFECT_API_URL,
97
- PREFECT_RUNNER_POLL_FREQUENCY,
98
- PREFECT_RUNNER_PROCESS_LIMIT,
99
99
  PREFECT_RUNNER_SERVER_ENABLE,
100
100
  get_current_settings,
101
101
  )
@@ -123,19 +123,25 @@ from prefect.utilities.services import (
123
123
  from prefect.utilities.slugify import slugify
124
124
 
125
125
  if TYPE_CHECKING:
126
- from prefect.client.schemas.objects import Deployment
126
+ from prefect.client.schemas.responses import DeploymentResponse
127
127
  from prefect.client.types.flexible_schedule_list import FlexibleScheduleList
128
128
  from prefect.deployments.runner import RunnerDeployment
129
129
 
130
130
  __all__ = ["Runner"]
131
131
 
132
132
 
133
+ class ProcessMapEntry(TypedDict):
134
+ flow_run: FlowRun
135
+ pid: int
136
+
137
+
133
138
  class Runner:
134
139
  def __init__(
135
140
  self,
136
141
  name: Optional[str] = None,
137
142
  query_seconds: Optional[float] = None,
138
143
  prefetch_seconds: float = 10,
144
+ heartbeat_seconds: Optional[float] = None,
139
145
  limit: Optional[int] = None,
140
146
  pause_on_shutdown: bool = True,
141
147
  webserver: bool = False,
@@ -149,6 +155,9 @@ class Runner:
149
155
  query_seconds: The number of seconds to wait between querying for
150
156
  scheduled flow runs; defaults to `PREFECT_RUNNER_POLL_FREQUENCY`
151
157
  prefetch_seconds: The number of seconds to prefetch flow runs for.
158
+ heartbeat_seconds: The number of seconds to wait between emitting
159
+ flow run heartbeats. The runner will not emit heartbeats if the value is None.
160
+ Defaults to `PREFECT_RUNNER_HEARTBEAT_FREQUENCY`.
152
161
  limit: The maximum number of flow runs this runner should be running at
153
162
  pause_on_shutdown: A boolean for whether or not to automatically pause
154
163
  deployment schedules on shutdown; defaults to `True`
@@ -180,6 +189,8 @@ class Runner:
180
189
  asyncio.run(runner.start())
181
190
  ```
182
191
  """
192
+ settings = get_current_settings()
193
+
183
194
  if name and ("/" in name or "%" in name):
184
195
  raise ValueError("Runner name cannot contain '/' or '%'")
185
196
  self.name = Path(name).stem if name is not None else f"runner-{uuid4()}"
@@ -188,19 +199,24 @@ class Runner:
188
199
  self.started = False
189
200
  self.stopping = False
190
201
  self.pause_on_shutdown = pause_on_shutdown
191
- self.limit = limit or PREFECT_RUNNER_PROCESS_LIMIT.value()
202
+ self.limit = limit or settings.runner.process_limit
192
203
  self.webserver = webserver
193
204
 
194
- self.query_seconds = query_seconds or PREFECT_RUNNER_POLL_FREQUENCY.value()
205
+ self.query_seconds = query_seconds or settings.runner.poll_frequency
195
206
  self._prefetch_seconds = prefetch_seconds
207
+ self.heartbeat_seconds = (
208
+ heartbeat_seconds or settings.runner.heartbeat_frequency
209
+ )
210
+ if self.heartbeat_seconds is not None and self.heartbeat_seconds < 30:
211
+ raise ValueError("Heartbeat must be 30 seconds or greater.")
196
212
 
197
213
  self._limiter: Optional[anyio.CapacityLimiter] = None
198
214
  self._client = get_client()
199
- self._submitting_flow_run_ids = set()
200
- self._cancelling_flow_run_ids = set()
201
- self._scheduled_task_scopes = set()
202
- self._deployment_ids: Set[UUID] = set()
203
- self._flow_run_process_map: dict[UUID, dict[str, Any]] = dict()
215
+ self._submitting_flow_run_ids: set[UUID] = set()
216
+ self._cancelling_flow_run_ids: set[UUID] = set()
217
+ self._scheduled_task_scopes: set[UUID] = set()
218
+ self._deployment_ids: set[UUID] = set()
219
+ self._flow_run_process_map: dict[UUID, ProcessMapEntry] = dict()
204
220
 
205
221
  self._tmp_dir: Path = (
206
222
  Path(tempfile.gettempdir()) / "runner_storage" / str(uuid4())
@@ -210,6 +226,12 @@ class Runner:
210
226
 
211
227
  self._loop: Optional[asyncio.AbstractEventLoop] = None
212
228
 
229
+ # Caching
230
+ self._deployment_cache: LRUCache[UUID, "DeploymentResponse"] = LRUCache(
231
+ maxsize=100
232
+ )
233
+ self._flow_cache: LRUCache[UUID, "APIFlow"] = LRUCache(maxsize=100)
234
+
213
235
  @sync_compatible
214
236
  async def add_deployment(
215
237
  self,
@@ -234,7 +256,7 @@ class Runner:
234
256
  @sync_compatible
235
257
  async def add_flow(
236
258
  self,
237
- flow: Flow,
259
+ flow: Flow[Any, Any],
238
260
  name: Optional[str] = None,
239
261
  interval: Optional[
240
262
  Union[
@@ -249,7 +271,7 @@ class Runner:
249
271
  paused: Optional[bool] = None,
250
272
  schedules: Optional["FlexibleScheduleList"] = None,
251
273
  concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
252
- parameters: Optional[dict] = None,
274
+ parameters: Optional[dict[str, Any]] = None,
253
275
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
254
276
  description: Optional[str] = None,
255
277
  tags: Optional[List[str]] = None,
@@ -336,7 +358,7 @@ class Runner:
336
358
  else:
337
359
  return next(s for s in self._storage_objs if s == storage)
338
360
 
339
- def handle_sigterm(self, signum, frame):
361
+ def handle_sigterm(self, **kwargs: Any) -> None:
340
362
  """
341
363
  Gracefully shuts down the runner when a SIGTERM is received.
342
364
  """
@@ -441,6 +463,16 @@ class Runner:
441
463
  jitter_range=0.3,
442
464
  )
443
465
  )
466
+ if self.heartbeat_seconds is not None:
467
+ loops_task_group.start_soon(
468
+ partial(
469
+ critical_service_loop,
470
+ workload=runner._emit_flow_run_heartbeats,
471
+ interval=self.heartbeat_seconds,
472
+ run_once=run_once,
473
+ jitter_range=0.3,
474
+ )
475
+ )
444
476
 
445
477
  def execute_in_background(
446
478
  self, func: Callable[..., Any], *args: Any, **kwargs: Any
@@ -538,6 +570,15 @@ class Runner:
538
570
  jitter_range=0.3,
539
571
  )
540
572
  )
573
+ if self.heartbeat_seconds is not None:
574
+ tg.start_soon(
575
+ partial(
576
+ critical_service_loop,
577
+ workload=self._emit_flow_run_heartbeats,
578
+ interval=self.heartbeat_seconds,
579
+ jitter_range=0.3,
580
+ )
581
+ )
541
582
 
542
583
  def _get_flow_run_logger(self, flow_run: "FlowRun") -> PrefectLogAdapter:
543
584
  return flow_run_logger(flow_run=flow_run).getChild(
@@ -850,18 +891,84 @@ class Runner:
850
891
  "message": state_msg or "Flow run was cancelled successfully."
851
892
  },
852
893
  )
894
+
895
+ flow, deployment = await self._get_flow_and_deployment(flow_run)
896
+ self._emit_flow_run_cancelled_event(
897
+ flow_run=flow_run, flow=flow, deployment=deployment
898
+ )
899
+ run_logger.info(f"Cancelled flow run '{flow_run.name}'!")
900
+
901
+ async def _get_flow_and_deployment(
902
+ self, flow_run: "FlowRun"
903
+ ) -> tuple[Optional["APIFlow"], Optional["DeploymentResponse"]]:
904
+ deployment: Optional["DeploymentResponse"] = (
905
+ self._deployment_cache.get(flow_run.deployment_id)
906
+ if flow_run.deployment_id
907
+ else None
908
+ )
909
+ flow: Optional["APIFlow"] = self._flow_cache.get(flow_run.flow_id)
910
+ if not deployment and flow_run.deployment_id is not None:
853
911
  try:
854
912
  deployment = await self._client.read_deployment(flow_run.deployment_id)
913
+ self._deployment_cache[flow_run.deployment_id] = deployment
855
914
  except ObjectNotFound:
856
915
  deployment = None
916
+ if not flow:
857
917
  try:
858
918
  flow = await self._client.read_flow(flow_run.flow_id)
919
+ self._flow_cache[flow_run.flow_id] = flow
859
920
  except ObjectNotFound:
860
921
  flow = None
861
- self._emit_flow_run_cancelled_event(
862
- flow_run=flow_run, flow=flow, deployment=deployment
922
+ return flow, deployment
923
+
924
+ async def _emit_flow_run_heartbeats(self):
925
+ coros: list[Coroutine[Any, Any, Any]] = []
926
+ for entry in self._flow_run_process_map.values():
927
+ coros.append(self._emit_flow_run_heartbeat(entry["flow_run"]))
928
+ await asyncio.gather(*coros)
929
+
930
+ async def _emit_flow_run_heartbeat(self, flow_run: "FlowRun"):
931
+ from prefect import __version__
932
+
933
+ related: list[RelatedResource] = []
934
+ tags: list[str] = []
935
+
936
+ flow, deployment = await self._get_flow_and_deployment(flow_run)
937
+ if deployment:
938
+ related.append(
939
+ RelatedResource(
940
+ {
941
+ "prefect.resource.id": f"prefect.deployment.{deployment.id}",
942
+ "prefect.resource.role": "deployment",
943
+ "prefect.resource.name": deployment.name,
944
+ }
945
+ )
863
946
  )
864
- run_logger.info(f"Cancelled flow run '{flow_run.name}'!")
947
+ tags.extend(deployment.tags)
948
+ if flow:
949
+ related.append(
950
+ RelatedResource(
951
+ {
952
+ "prefect.resource.id": f"prefect.flow.{flow.id}",
953
+ "prefect.resource.role": "flow",
954
+ "prefect.resource.name": flow.name,
955
+ }
956
+ )
957
+ )
958
+ tags.extend(flow_run.tags)
959
+
960
+ related = [RelatedResource.model_validate(r) for r in related]
961
+ related += tags_as_related_resources(set(tags))
962
+
963
+ emit_event(
964
+ event="prefect.flow-run.heartbeat",
965
+ resource={
966
+ "prefect.resource.id": f"prefect.flow-run.{flow_run.id}",
967
+ "prefect.resource.name": flow_run.name,
968
+ "prefect.version": __version__,
969
+ },
970
+ related=related,
971
+ )
865
972
 
866
973
  def _event_resource(self):
867
974
  from prefect import __version__
@@ -876,7 +983,7 @@ class Runner:
876
983
  self,
877
984
  flow_run: "FlowRun",
878
985
  flow: "Optional[APIFlow]",
879
- deployment: "Optional[Deployment]",
986
+ deployment: "Optional[DeploymentResponse]",
880
987
  ):
881
988
  related: list[RelatedResource] = []
882
989
  tags: list[str] = []
@@ -920,6 +1027,7 @@ class Runner:
920
1027
  resource=self._event_resource(),
921
1028
  related=related,
922
1029
  )
1030
+ self._logger.debug(f"Emitted flow run heartbeat event for {flow_run.id}")
923
1031
 
924
1032
  async def _get_scheduled_flow_runs(
925
1033
  self,
@@ -1052,6 +1160,9 @@ class Runner:
1052
1160
  self._flow_run_process_map[flow_run.id] = dict(
1053
1161
  pid=readiness_result, flow_run=flow_run
1054
1162
  )
1163
+ # Heartbeats are opt-in and only emitted if a heartbeat frequency is set
1164
+ if self.heartbeat_seconds is not None:
1165
+ await self._emit_flow_run_heartbeat(flow_run)
1055
1166
 
1056
1167
  run_logger.info(f"Completed submission of flow run '{flow_run.id}'")
1057
1168
  else:
@@ -1098,9 +1209,8 @@ class Runner:
1098
1209
  )
1099
1210
  # Mark the task as started to prevent agent crash
1100
1211
  task_status.started(exc)
1101
- await self._propose_crashed_state(
1102
- flow_run, "Flow run process could not be started"
1103
- )
1212
+ message = f"Flow run process could not be started:\n{exc!r}"
1213
+ await self._propose_crashed_state(flow_run, message)
1104
1214
  else:
1105
1215
  run_logger.exception(
1106
1216
  f"An error occurred while monitoring flow run '{flow_run.id}'. "
@@ -53,6 +53,7 @@ __all__ = [ # noqa: F822
53
53
  "temporary_settings",
54
54
  "DEFAULT_PROFILES_PATH",
55
55
  # add public settings here for auto-completion
56
+ "PREFECT_API_AUTH_STRING", # type: ignore
56
57
  "PREFECT_API_KEY", # type: ignore
57
58
  "PREFECT_API_URL", # type: ignore
58
59
  "PREFECT_UI_URL", # type: ignore
prefect/settings/base.py CHANGED
@@ -192,7 +192,7 @@ def _add_environment_variables(
192
192
 
193
193
 
194
194
  def _build_settings_config(
195
- path: Tuple[str, ...] = tuple(),
195
+ path: Tuple[str, ...] = tuple(), frozen: bool = False
196
196
  ) -> PrefectSettingsConfigDict:
197
197
  env_prefix = f"PREFECT_{'_'.join(path).upper()}_" if path else "PREFECT_"
198
198
  return PrefectSettingsConfigDict(
@@ -202,7 +202,8 @@ def _build_settings_config(
202
202
  toml_file="prefect.toml",
203
203
  prefect_toml_table_header=path,
204
204
  pyproject_toml_table_header=("tool", "prefect", *path),
205
- json_schema_extra=_add_environment_variables,
205
+ json_schema_extra=_add_environment_variables, # type: ignore
206
+ frozen=frozen,
206
207
  )
207
208
 
208
209
 
@@ -19,6 +19,10 @@ class APISettings(PrefectBaseSettings):
19
19
  default=None,
20
20
  description="The URL of the Prefect API. If not set, the client will attempt to infer it.",
21
21
  )
22
+ auth_string: Optional[SecretStr] = Field(
23
+ default=None,
24
+ description="The auth string used for basic authentication with a self-hosted Prefect API. Should be kept secret.",
25
+ )
22
26
  key: Optional[SecretStr] = Field(
23
27
  default=None,
24
28
  description="The API key used for authentication with the Prefect API. Should be kept secret.",
@@ -1,3 +1,5 @@
1
+ from typing import Optional
2
+
1
3
  from pydantic import Field
2
4
 
3
5
  from prefect.settings.base import PrefectBaseSettings, _build_settings_config
@@ -54,6 +56,12 @@ class RunnerSettings(PrefectBaseSettings):
54
56
  description="Number of seconds a runner should wait between queries for scheduled work.",
55
57
  )
56
58
 
59
+ heartbeat_frequency: Optional[int] = Field(
60
+ default=None,
61
+ description="Number of seconds a runner should wait between heartbeats for flow runs.",
62
+ ge=30,
63
+ )
64
+
57
65
  server: RunnerServerSettings = Field(
58
66
  default_factory=RunnerServerSettings,
59
67
  description="Settings for controlling runner server behavior",
@@ -1,6 +1,7 @@
1
1
  from datetime import timedelta
2
+ from typing import Optional
2
3
 
3
- from pydantic import AliasChoices, AliasPath, Field
4
+ from pydantic import AliasChoices, AliasPath, Field, SecretStr
4
5
 
5
6
  from prefect.settings.base import PrefectBaseSettings, _build_settings_config
6
7
 
@@ -12,6 +13,11 @@ class ServerAPISettings(PrefectBaseSettings):
12
13
 
13
14
  model_config = _build_settings_config(("server", "api"))
14
15
 
16
+ auth_string: Optional[SecretStr] = Field(
17
+ default=None,
18
+ description="A string to use for basic authentication with the API; typically in the form 'user:password' but can be any string.",
19
+ )
20
+
15
21
  host: str = Field(
16
22
  default="127.0.0.1",
17
23
  description="The API's host address (defaults to `127.0.0.1`).",
prefect/states.py CHANGED
@@ -11,6 +11,7 @@ from typing import Any, Dict, Iterable, Optional, Type
11
11
  import anyio
12
12
  import httpx
13
13
  import pendulum
14
+ from opentelemetry import propagate
14
15
  from typing_extensions import TypeGuard
15
16
 
16
17
  from prefect._internal.compatibility import deprecated
@@ -588,6 +589,16 @@ class StateGroup:
588
589
  return f"StateGroup<{self.counts_message()}>"
589
590
 
590
591
 
592
+ def _traced(cls: Type[State[R]], **kwargs: Any) -> State[R]:
593
+ state_details = StateDetails.model_validate(kwargs.pop("state_details", {}))
594
+
595
+ carrier = {}
596
+ propagate.inject(carrier)
597
+ state_details.traceparent = carrier.get("traceparent")
598
+
599
+ return cls(**kwargs, state_details=state_details)
600
+
601
+
591
602
  def Scheduled(
592
603
  cls: Type[State[R]] = State,
593
604
  scheduled_time: Optional[datetime.datetime] = None,
@@ -605,7 +616,7 @@ def Scheduled(
605
616
  raise ValueError("An extra scheduled_time was provided in state_details")
606
617
  state_details.scheduled_time = scheduled_time
607
618
 
608
- return cls(type=StateType.SCHEDULED, state_details=state_details, **kwargs)
619
+ return _traced(cls, type=StateType.SCHEDULED, state_details=state_details, **kwargs)
609
620
 
610
621
 
611
622
  def Completed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
@@ -614,7 +625,8 @@ def Completed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
614
625
  Returns:
615
626
  State: a Completed state
616
627
  """
617
- return cls(type=StateType.COMPLETED, **kwargs)
628
+
629
+ return _traced(cls, type=StateType.COMPLETED, **kwargs)
618
630
 
619
631
 
620
632
  def Running(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
@@ -623,7 +635,7 @@ def Running(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
623
635
  Returns:
624
636
  State: a Running state
625
637
  """
626
- return cls(type=StateType.RUNNING, **kwargs)
638
+ return _traced(cls, type=StateType.RUNNING, **kwargs)
627
639
 
628
640
 
629
641
  def Failed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
@@ -632,7 +644,7 @@ def Failed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
632
644
  Returns:
633
645
  State: a Failed state
634
646
  """
635
- return cls(type=StateType.FAILED, **kwargs)
647
+ return _traced(cls, type=StateType.FAILED, **kwargs)
636
648
 
637
649
 
638
650
  def Crashed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
@@ -641,7 +653,7 @@ def Crashed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
641
653
  Returns:
642
654
  State: a Crashed state
643
655
  """
644
- return cls(type=StateType.CRASHED, **kwargs)
656
+ return _traced(cls, type=StateType.CRASHED, **kwargs)
645
657
 
646
658
 
647
659
  def Cancelling(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
@@ -650,7 +662,7 @@ def Cancelling(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
650
662
  Returns:
651
663
  State: a Cancelling state
652
664
  """
653
- return cls(type=StateType.CANCELLING, **kwargs)
665
+ return _traced(cls, type=StateType.CANCELLING, **kwargs)
654
666
 
655
667
 
656
668
  def Cancelled(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
@@ -659,7 +671,7 @@ def Cancelled(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
659
671
  Returns:
660
672
  State: a Cancelled state
661
673
  """
662
- return cls(type=StateType.CANCELLED, **kwargs)
674
+ return _traced(cls, type=StateType.CANCELLED, **kwargs)
663
675
 
664
676
 
665
677
  def Pending(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
@@ -668,7 +680,7 @@ def Pending(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
668
680
  Returns:
669
681
  State: a Pending state
670
682
  """
671
- return cls(type=StateType.PENDING, **kwargs)
683
+ return _traced(cls, type=StateType.PENDING, **kwargs)
672
684
 
673
685
 
674
686
  def Paused(
@@ -704,7 +716,7 @@ def Paused(
704
716
  state_details.pause_reschedule = reschedule
705
717
  state_details.pause_key = pause_key
706
718
 
707
- return cls(type=StateType.PAUSED, state_details=state_details, **kwargs)
719
+ return _traced(cls, type=StateType.PAUSED, state_details=state_details, **kwargs)
708
720
 
709
721
 
710
722
  def Suspended(
@@ -766,7 +778,7 @@ def Retrying(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
766
778
  Returns:
767
779
  State: a Retrying state
768
780
  """
769
- return cls(type=StateType.RUNNING, name="Retrying", **kwargs)
781
+ return _traced(cls, type=StateType.RUNNING, name="Retrying", **kwargs)
770
782
 
771
783
 
772
784
  def Late(
prefect/task_engine.py CHANGED
@@ -182,7 +182,7 @@ class BaseTaskRunEngine(Generic[P, R]):
182
182
  return_data=True,
183
183
  max_depth=-1,
184
184
  remove_annotations=True,
185
- context={},
185
+ context={"parameter_name": parameter},
186
186
  )
187
187
  except UpstreamTaskError:
188
188
  raise