prefect-client 3.2.9__py3-none-any.whl → 3.2.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
prefect/_build_info.py CHANGED
@@ -1,5 +1,5 @@
1
1
  # Generated by versioningit
2
- __version__ = "3.2.9"
3
- __build_date__ = "2025-02-28 20:11:13.828993+00:00"
4
- __git_commit__ = "27eb408c48e16b1f16718b884ce7753d5b32ae35"
2
+ __version__ = "3.2.10"
3
+ __build_date__ = "2025-03-05 18:07:05.465362+00:00"
4
+ __git_commit__ = "3a61a72641ef7ba00d874f725351b1017927a247"
5
5
  __dirty__ = False
@@ -3,6 +3,8 @@ from typing import TYPE_CHECKING, Any, Iterable, Optional, Union
3
3
  from uuid import UUID
4
4
 
5
5
  import anyio
6
+ from opentelemetry import trace
7
+ from opentelemetry.instrumentation.utils import is_instrumentation_enabled
6
8
 
7
9
  import prefect
8
10
  from prefect._result_records import ResultRecordMetadata
@@ -12,9 +14,7 @@ from prefect.context import FlowRunContext, TaskRunContext
12
14
  from prefect.logging import get_logger
13
15
  from prefect.states import Pending, Scheduled
14
16
  from prefect.tasks import Task
15
- from prefect.telemetry.run_telemetry import (
16
- LABELS_TRACEPARENT_KEY,
17
- )
17
+ from prefect.telemetry.run_telemetry import LABELS_TRACEPARENT_KEY, RunTelemetry
18
18
  from prefect.types._datetime import now
19
19
  from prefect.utilities.asyncutils import sync_compatible
20
20
  from prefect.utilities.slugify import slugify
@@ -164,6 +164,8 @@ async def run_deployment(
164
164
 
165
165
  if flow_run_ctx and flow_run_ctx.flow_run:
166
166
  traceparent = flow_run_ctx.flow_run.labels.get(LABELS_TRACEPARENT_KEY)
167
+ elif is_instrumentation_enabled():
168
+ traceparent = RunTelemetry.traceparent_from_span(span=trace.get_current_span())
167
169
  else:
168
170
  traceparent = None
169
171
 
prefect/events/clients.py CHANGED
@@ -21,6 +21,7 @@ from urllib.parse import urlparse
21
21
  from urllib.request import proxy_bypass
22
22
  from uuid import UUID
23
23
 
24
+ import certifi
24
25
  import orjson
25
26
  from cachetools import TTLCache
26
27
  from prometheus_client import Counter
@@ -38,6 +39,7 @@ from prefect.events import Event
38
39
  from prefect.logging import get_logger
39
40
  from prefect.settings import (
40
41
  PREFECT_API_KEY,
42
+ PREFECT_API_SSL_CERT_FILE,
41
43
  PREFECT_API_TLS_INSECURE_SKIP_VERIFY,
42
44
  PREFECT_API_URL,
43
45
  PREFECT_CLOUD_API_URL,
@@ -123,12 +125,19 @@ class WebsocketProxyConnect(connect):
123
125
  self._host = host
124
126
  self._port = port
125
127
 
126
- if PREFECT_API_TLS_INSECURE_SKIP_VERIFY:
128
+ if PREFECT_API_TLS_INSECURE_SKIP_VERIFY and u.scheme == "wss":
127
129
  # Create an unverified context for insecure connections
128
130
  ctx = ssl.create_default_context()
129
131
  ctx.check_hostname = False
130
132
  ctx.verify_mode = ssl.CERT_NONE
131
133
  self._kwargs.setdefault("ssl", ctx)
134
+ elif u.scheme == "wss":
135
+ cert_file = PREFECT_API_SSL_CERT_FILE.value()
136
+ if not cert_file:
137
+ cert_file = certifi.where()
138
+ # Create a verified context with the certificate file
139
+ ctx = ssl.create_default_context(cafile=cert_file)
140
+ self._kwargs.setdefault("ssl", ctx)
132
141
 
133
142
  async def _proxy_connect(self: Self) -> ClientConnection:
134
143
  if self._proxy:
prefect/flows.py CHANGED
@@ -2162,6 +2162,14 @@ def serve(
2162
2162
 
2163
2163
  runner = Runner(pause_on_shutdown=pause_on_shutdown, limit=limit, **kwargs)
2164
2164
  for deployment in args:
2165
+ if deployment.work_pool_name:
2166
+ warnings.warn(
2167
+ "Work pools are not necessary for served deployments - "
2168
+ "the `work_pool_name` argument will be ignored. Omit the "
2169
+ f"`work_pool_name` argument from `to_deployment` for {deployment.name!r}.",
2170
+ UserWarning,
2171
+ )
2172
+ deployment.work_pool_name = None
2165
2173
  runner.add_deployment(deployment)
2166
2174
 
2167
2175
  if print_starting_message:
prefect/runner/runner.py CHANGED
@@ -108,6 +108,7 @@ from prefect.settings import (
108
108
  get_current_settings,
109
109
  )
110
110
  from prefect.states import (
111
+ AwaitingRetry,
111
112
  Crashed,
112
113
  Pending,
113
114
  exception_to_failed_state,
@@ -120,7 +121,7 @@ from prefect.utilities.asyncutils import (
120
121
  is_async_fn,
121
122
  sync_compatible,
122
123
  )
123
- from prefect.utilities.engine import propose_state
124
+ from prefect.utilities.engine import propose_state, propose_state_sync
124
125
  from prefect.utilities.processutils import (
125
126
  get_sys_executable,
126
127
  run_process,
@@ -226,6 +227,7 @@ class Runner:
226
227
  )
227
228
  if self.heartbeat_seconds is not None and self.heartbeat_seconds < 30:
228
229
  raise ValueError("Heartbeat must be 30 seconds or greater.")
230
+ self._heartbeat_task: asyncio.Task[None] | None = None
229
231
 
230
232
  self._limiter: anyio.CapacityLimiter | None = None
231
233
  self._client: PrefectClient = get_client()
@@ -236,6 +238,8 @@ class Runner:
236
238
  self._flow_run_process_map: dict[UUID, ProcessMapEntry] = dict()
237
239
  self.__flow_run_process_map_lock: asyncio.Lock | None = None
238
240
  self._flow_run_bundle_map: dict[UUID, SerializedBundle] = dict()
241
+ # Flip to True when we are rescheduling flow runs to avoid marking flow runs as crashed
242
+ self._rescheduling: bool = False
239
243
 
240
244
  self._tmp_dir: Path = (
241
245
  Path(tempfile.gettempdir()) / "runner_storage" / str(uuid4())
@@ -506,16 +510,6 @@ class Runner:
506
510
  jitter_range=0.3,
507
511
  )
508
512
  )
509
- if self.heartbeat_seconds is not None:
510
- loops_task_group.start_soon(
511
- partial(
512
- critical_service_loop,
513
- workload=runner._emit_flow_run_heartbeats,
514
- interval=self.heartbeat_seconds,
515
- run_once=run_once,
516
- jitter_range=0.3,
517
- )
518
- )
519
513
 
520
514
  def execute_in_background(
521
515
  self, func: Callable[..., Any], *args: Any, **kwargs: Any
@@ -568,9 +562,9 @@ class Runner:
568
562
  flow_run_id: UUID,
569
563
  entrypoint: str | None = None,
570
564
  command: str | None = None,
571
- cwd: Path | None = None,
565
+ cwd: Path | str | None = None,
572
566
  env: dict[str, str | None] | None = None,
573
- task_status: anyio.abc.TaskStatus[int] | None = None,
567
+ task_status: anyio.abc.TaskStatus[int] = anyio.TASK_STATUS_IGNORED,
574
568
  stream_output: bool = True,
575
569
  ) -> anyio.abc.Process | None:
576
570
  """
@@ -594,7 +588,9 @@ class Runner:
594
588
  self._submitting_flow_run_ids.add(flow_run_id)
595
589
  flow_run = await self._client.read_flow_run(flow_run_id)
596
590
 
597
- process: anyio.abc.Process = await self._runs_task_group.start(
591
+ process: (
592
+ anyio.abc.Process | Exception
593
+ ) = await self._runs_task_group.start(
598
594
  partial(
599
595
  self._submit_run_and_capture_errors,
600
596
  flow_run=flow_run,
@@ -605,8 +601,13 @@ class Runner:
605
601
  stream_output=stream_output,
606
602
  ),
607
603
  )
608
- if task_status:
609
- task_status.started(process.pid)
604
+ if isinstance(process, Exception):
605
+ return
606
+
607
+ task_status.started(process.pid)
608
+
609
+ if self.heartbeat_seconds is not None:
610
+ await self._emit_flow_run_heartbeat(flow_run)
610
611
 
611
612
  async with self._flow_run_process_map_lock:
612
613
  # Only add the process to the map if it is still running
@@ -632,15 +633,6 @@ class Runner:
632
633
  jitter_range=0.3,
633
634
  )
634
635
  )
635
- if self.heartbeat_seconds is not None:
636
- tg.start_soon(
637
- partial(
638
- critical_service_loop,
639
- workload=self._emit_flow_run_heartbeats,
640
- interval=self.heartbeat_seconds,
641
- jitter_range=0.3,
642
- )
643
- )
644
636
 
645
637
  return process
646
638
 
@@ -668,6 +660,9 @@ class Runner:
668
660
  await self._propose_crashed_state(flow_run, msg)
669
661
  raise RuntimeError(msg)
670
662
 
663
+ if self.heartbeat_seconds is not None:
664
+ await self._emit_flow_run_heartbeat(flow_run)
665
+
671
666
  self._flow_run_process_map[flow_run.id] = ProcessMapEntry(
672
667
  pid=process.pid, flow_run=flow_run
673
668
  )
@@ -683,16 +678,6 @@ class Runner:
683
678
  )
684
679
  )
685
680
  )
686
- if self.heartbeat_seconds is not None:
687
- tasks.append(
688
- asyncio.create_task(
689
- critical_service_loop(
690
- workload=self._emit_flow_run_heartbeats,
691
- interval=self.heartbeat_seconds,
692
- jitter_range=0.1,
693
- )
694
- )
695
- )
696
681
 
697
682
  await anyio.to_thread.run_sync(process.join)
698
683
 
@@ -768,10 +753,12 @@ class Runner:
768
753
  async def _run_process(
769
754
  self,
770
755
  flow_run: "FlowRun",
771
- task_status: anyio.abc.TaskStatus[anyio.abc.Process] | None = None,
756
+ task_status: anyio.abc.TaskStatus[
757
+ anyio.abc.Process
758
+ ] = anyio.TASK_STATUS_IGNORED,
772
759
  entrypoint: str | None = None,
773
760
  command: str | None = None,
774
- cwd: Path | None = None,
761
+ cwd: Path | str | None = None,
775
762
  env: dict[str, str | None] | None = None,
776
763
  stream_output: bool = True,
777
764
  ) -> anyio.abc.Process:
@@ -951,6 +938,45 @@ class Runner:
951
938
  # process ended right after the check above.
952
939
  return
953
940
 
941
+ def reschedule_current_flow_runs(
942
+ self,
943
+ ) -> None:
944
+ """
945
+ Reschedules all flow runs that are currently running.
946
+
947
+ This should only be called when the runner is shutting down because it kill all
948
+ child processes and short-circuit the crash detection logic.
949
+ """
950
+ self._rescheduling = True
951
+ # Create a new sync client because this will often run in a separate thread
952
+ # as part of a signal handler.
953
+ with get_client(sync_client=True) as client:
954
+ self._logger.info("Rescheduling flow runs...")
955
+ for process_info in self._flow_run_process_map.values():
956
+ flow_run = process_info["flow_run"]
957
+ run_logger = self._get_flow_run_logger(flow_run)
958
+ run_logger.info(
959
+ "Rescheduling flow run for resubmission in response to SIGTERM"
960
+ )
961
+ try:
962
+ propose_state_sync(client, AwaitingRetry(), flow_run_id=flow_run.id)
963
+ os.kill(process_info["pid"], signal.SIGTERM)
964
+ run_logger.info("Rescheduled flow run for resubmission")
965
+ except ProcessLookupError:
966
+ # Process may have already exited
967
+ pass
968
+ except Abort as exc:
969
+ run_logger.info(
970
+ (
971
+ "Aborted submission of flow run. "
972
+ f"Server sent an abort signal: {exc}"
973
+ ),
974
+ )
975
+ except Exception:
976
+ run_logger.exception(
977
+ "Failed to reschedule flow run",
978
+ )
979
+
954
980
  async def _pause_schedules(self):
955
981
  """
956
982
  Pauses all deployment schedules.
@@ -1381,7 +1407,7 @@ class Runner:
1381
1407
  task_status: anyio.abc.TaskStatus[anyio.abc.Process | Exception],
1382
1408
  entrypoint: str | None = None,
1383
1409
  command: str | None = None,
1384
- cwd: Path | None = None,
1410
+ cwd: Path | str | None = None,
1385
1411
  env: dict[str, str | None] | None = None,
1386
1412
  stream_output: bool = True,
1387
1413
  ) -> Union[Optional[int], Exception]:
@@ -1399,12 +1425,12 @@ class Runner:
1399
1425
  )
1400
1426
  status_code = process.returncode
1401
1427
  except Exception as exc:
1402
- if task_status:
1428
+ if not task_status._future.done():
1403
1429
  # This flow run was being submitted and did not start successfully
1404
1430
  run_logger.exception(
1405
1431
  f"Failed to start process for flow run '{flow_run.id}'."
1406
1432
  )
1407
- # Mark the task as started to prevent agent crash
1433
+ # Mark the task as started to prevent runner crash
1408
1434
  task_status.started(exc)
1409
1435
  message = f"Flow run process could not be started:\n{exc!r}"
1410
1436
  await self._propose_crashed_state(flow_run, message)
@@ -1421,7 +1447,7 @@ class Runner:
1421
1447
  async with self._flow_run_process_map_lock:
1422
1448
  self._flow_run_process_map.pop(flow_run.id, None)
1423
1449
 
1424
- if status_code != 0:
1450
+ if status_code != 0 and not self._rescheduling:
1425
1451
  await self._propose_crashed_state(
1426
1452
  flow_run,
1427
1453
  f"Flow run process exited with non-zero status code {status_code}.",
@@ -1637,6 +1663,15 @@ class Runner:
1637
1663
  if not hasattr(self, "_loops_task_group") or not self._loops_task_group:
1638
1664
  self._loops_task_group: anyio.abc.TaskGroup = anyio.create_task_group()
1639
1665
 
1666
+ if self.heartbeat_seconds is not None:
1667
+ self._heartbeat_task = asyncio.create_task(
1668
+ critical_service_loop(
1669
+ workload=self._emit_flow_run_heartbeats,
1670
+ interval=self.heartbeat_seconds,
1671
+ jitter_range=0.3,
1672
+ )
1673
+ )
1674
+
1640
1675
  self.started = True
1641
1676
  return self
1642
1677
 
@@ -1658,6 +1693,13 @@ class Runner:
1658
1693
  shutil.rmtree(str(self._tmp_dir))
1659
1694
  del self._runs_task_group, self._loops_task_group
1660
1695
 
1696
+ if self._heartbeat_task:
1697
+ self._heartbeat_task.cancel()
1698
+ try:
1699
+ await self._heartbeat_task
1700
+ except asyncio.CancelledError:
1701
+ pass
1702
+
1661
1703
  def __repr__(self) -> str:
1662
1704
  return f"Runner(name={self.name!r})"
1663
1705
 
prefect/runner/storage.py CHANGED
@@ -141,7 +141,8 @@ class GitRepository:
141
141
  self._credentials = credentials
142
142
  self._include_submodules = include_submodules
143
143
  repo_name = urlparse(url).path.split("/")[-1].replace(".git", "")
144
- default_name = f"{repo_name}-{branch}" if branch else repo_name
144
+ safe_branch = branch.replace("/", "-") if branch else None
145
+ default_name = f"{repo_name}-{safe_branch}" if safe_branch else repo_name
145
146
  self._name = name or default_name
146
147
  self._logger = get_logger(f"runner.storage.git-repository.{self._name}")
147
148
  self._storage_base_path = Path.cwd()
@@ -376,12 +376,12 @@ async def read_deployment(
376
376
  async def read_deployments(
377
377
  limit: int = dependencies.LimitBody(),
378
378
  offset: int = Body(0, ge=0),
379
- flows: schemas.filters.FlowFilter = None,
380
- flow_runs: schemas.filters.FlowRunFilter = None,
381
- task_runs: schemas.filters.TaskRunFilter = None,
382
- deployments: schemas.filters.DeploymentFilter = None,
383
- work_pools: schemas.filters.WorkPoolFilter = None,
384
- work_pool_queues: schemas.filters.WorkQueueFilter = None,
379
+ flows: Optional[schemas.filters.FlowFilter] = None,
380
+ flow_runs: Optional[schemas.filters.FlowRunFilter] = None,
381
+ task_runs: Optional[schemas.filters.TaskRunFilter] = None,
382
+ deployments: Optional[schemas.filters.DeploymentFilter] = None,
383
+ work_pools: Optional[schemas.filters.WorkPoolFilter] = None,
384
+ work_pool_queues: Optional[schemas.filters.WorkQueueFilter] = None,
385
385
  sort: schemas.sorting.DeploymentSort = Body(
386
386
  schemas.sorting.DeploymentSort.NAME_ASC
387
387
  ),
@@ -415,12 +415,12 @@ async def read_deployments(
415
415
  async def paginate_deployments(
416
416
  limit: int = dependencies.LimitBody(),
417
417
  page: int = Body(1, ge=1),
418
- flows: schemas.filters.FlowFilter = None,
419
- flow_runs: schemas.filters.FlowRunFilter = None,
420
- task_runs: schemas.filters.TaskRunFilter = None,
421
- deployments: schemas.filters.DeploymentFilter = None,
422
- work_pools: schemas.filters.WorkPoolFilter = None,
423
- work_pool_queues: schemas.filters.WorkQueueFilter = None,
418
+ flows: Optional[schemas.filters.FlowFilter] = None,
419
+ flow_runs: Optional[schemas.filters.FlowRunFilter] = None,
420
+ task_runs: Optional[schemas.filters.TaskRunFilter] = None,
421
+ deployments: Optional[schemas.filters.DeploymentFilter] = None,
422
+ work_pools: Optional[schemas.filters.WorkPoolFilter] = None,
423
+ work_pool_queues: Optional[schemas.filters.WorkQueueFilter] = None,
424
424
  sort: schemas.sorting.DeploymentSort = Body(
425
425
  schemas.sorting.DeploymentSort.NAME_ASC
426
426
  ),
@@ -474,7 +474,7 @@ async def paginate_deployments(
474
474
  @router.post("/get_scheduled_flow_runs")
475
475
  async def get_scheduled_flow_runs_for_deployments(
476
476
  background_tasks: BackgroundTasks,
477
- deployment_ids: List[UUID] = Body(
477
+ deployment_ids: list[UUID] = Body(
478
478
  default=..., description="The deployment IDs to get scheduled runs for"
479
479
  ),
480
480
  scheduled_before: DateTime = Body(
@@ -482,7 +482,7 @@ async def get_scheduled_flow_runs_for_deployments(
482
482
  ),
483
483
  limit: int = dependencies.LimitBody(),
484
484
  db: PrefectDBInterface = Depends(provide_database_interface),
485
- ) -> List[schemas.responses.FlowRunResponse]:
485
+ ) -> list[schemas.responses.FlowRunResponse]:
486
486
  """
487
487
  Get scheduled runs for a set of deployments. Used by a runner to poll for work.
488
488
  """
@@ -515,6 +515,7 @@ async def get_scheduled_flow_runs_for_deployments(
515
515
 
516
516
  background_tasks.add_task(
517
517
  mark_deployments_ready,
518
+ db=db,
518
519
  deployment_ids=deployment_ids,
519
520
  )
520
521
 
@@ -523,12 +524,12 @@ async def get_scheduled_flow_runs_for_deployments(
523
524
 
524
525
  @router.post("/count")
525
526
  async def count_deployments(
526
- flows: schemas.filters.FlowFilter = None,
527
- flow_runs: schemas.filters.FlowRunFilter = None,
528
- task_runs: schemas.filters.TaskRunFilter = None,
529
- deployments: schemas.filters.DeploymentFilter = None,
530
- work_pools: schemas.filters.WorkPoolFilter = None,
531
- work_pool_queues: schemas.filters.WorkQueueFilter = None,
527
+ flows: Optional[schemas.filters.FlowFilter] = None,
528
+ flow_runs: Optional[schemas.filters.FlowRunFilter] = None,
529
+ task_runs: Optional[schemas.filters.TaskRunFilter] = None,
530
+ deployments: Optional[schemas.filters.DeploymentFilter] = None,
531
+ work_pools: Optional[schemas.filters.WorkPoolFilter] = None,
532
+ work_pool_queues: Optional[schemas.filters.WorkQueueFilter] = None,
532
533
  db: PrefectDBInterface = Depends(provide_database_interface),
533
534
  ) -> int:
534
535
  """
@@ -250,8 +250,14 @@ def copy_directory(directory: str, path: str) -> None:
250
250
  if os.path.exists(destination):
251
251
  shutil.rmtree(destination)
252
252
  shutil.copytree(source, destination, symlinks=True)
253
+ # ensure copied files are writeable
254
+ for root, dirs, files in os.walk(destination):
255
+ for f in files:
256
+ os.chmod(os.path.join(root, f), 0o600)
253
257
  else:
254
258
  shutil.copy2(source, destination)
259
+ # Ensure copied file is writeable
260
+ os.chmod(destination, 0o600)
255
261
 
256
262
 
257
263
  async def custom_internal_exception_handler(
@@ -164,6 +164,7 @@ async def read_work_queue_runs(
164
164
 
165
165
  background_tasks.add_task(
166
166
  mark_work_queues_ready,
167
+ db=db,
167
168
  polled_work_queue_ids=[work_queue_id],
168
169
  ready_work_queue_ids=(
169
170
  [work_queue_id] if work_queue.status == WorkQueueStatus.NOT_READY else []
@@ -172,6 +173,7 @@ async def read_work_queue_runs(
172
173
 
173
174
  background_tasks.add_task(
174
175
  mark_deployments_ready,
176
+ db=db,
175
177
  work_queue_ids=[work_queue_id],
176
178
  )
177
179
 
@@ -375,6 +375,7 @@ async def get_scheduled_flow_runs(
375
375
 
376
376
  background_tasks.add_task(
377
377
  mark_work_queues_ready,
378
+ db=db,
378
379
  polled_work_queue_ids=[
379
380
  wq.id for wq in work_queues if wq.status != WorkQueueStatus.NOT_READY
380
381
  ],
@@ -385,6 +386,7 @@ async def get_scheduled_flow_runs(
385
386
 
386
387
  background_tasks.add_task(
387
388
  mark_deployments_ready,
389
+ db=db,
388
390
  work_queue_ids=[wq.id for wq in work_queues],
389
391
  )
390
392
 
@@ -132,7 +132,7 @@ class RunTelemetry:
132
132
  },
133
133
  )
134
134
 
135
- if traceparent := self._traceparent_from_span(self.span):
135
+ if traceparent := RunTelemetry.traceparent_from_span(self.span):
136
136
  run.labels[LABELS_TRACEPARENT_KEY] = traceparent
137
137
 
138
138
  return traceparent, self.span
@@ -150,7 +150,8 @@ class RunTelemetry:
150
150
  carrier = {TRACEPARENT_KEY: traceparent}
151
151
  return propagate.extract(carrier)
152
152
 
153
- def _traceparent_from_span(self, span: Span) -> str | None:
153
+ @staticmethod
154
+ def traceparent_from_span(span: Span) -> str | None:
154
155
  carrier: dict[str, Any] = {}
155
156
  propagate.inject(carrier, context=trace.set_span_in_context(span))
156
157
  return carrier.get(TRACEPARENT_KEY)
prefect/workers/base.py CHANGED
@@ -933,40 +933,32 @@ class BaseWorker(abc.ABC, Generic[C, V, R]):
933
933
  )
934
934
  )
935
935
 
936
- async def _check_flow_run(self, flow_run: "FlowRun") -> None:
936
+ async def _submit_run(self, flow_run: "FlowRun") -> None:
937
937
  """
938
- Performs a check on a submitted flow run to warn the user if the flow run
939
- was created from a deployment with a storage block.
938
+ Submits a given flow run for execution by the worker.
940
939
  """
940
+ run_logger = self.get_flow_run_logger(flow_run)
941
+
941
942
  if flow_run.deployment_id:
942
943
  assert self._client and self._client._started, (
943
944
  "Client must be started to check flow run deployment."
944
945
  )
945
- deployment = await self._client.read_deployment(flow_run.deployment_id)
946
- if deployment.storage_document_id:
947
- raise ValueError(
948
- f"Flow run {flow_run.id!r} was created from deployment"
949
- f" {deployment.name!r} which is configured with a storage block."
950
- " Please use an agent to execute this flow run."
951
- )
952
-
953
- async def _submit_run(self, flow_run: "FlowRun") -> None:
954
- """
955
- Submits a given flow run for execution by the worker.
956
- """
957
- run_logger = self.get_flow_run_logger(flow_run)
958
946
 
959
947
  try:
960
- await self._check_flow_run(flow_run)
961
- except (ValueError, ObjectNotFound):
948
+ await self._client.read_deployment(flow_run.deployment_id)
949
+ except ObjectNotFound:
962
950
  self._logger.exception(
963
- (
964
- "Flow run %s did not pass checks and will not be submitted for"
965
- " execution"
966
- ),
967
- flow_run.id,
951
+ f"Deployment {flow_run.deployment_id} no longer exists. "
952
+ f"Flow run {flow_run.id} will not be submitted for"
953
+ " execution"
968
954
  )
969
955
  self._submitting_flow_run_ids.remove(flow_run.id)
956
+ await self._mark_flow_run_as_cancelled(
957
+ flow_run,
958
+ state_updates=dict(
959
+ message=f"Deployment {flow_run.deployment_id} no longer exists, cancelled run."
960
+ ),
961
+ )
970
962
  return
971
963
 
972
964
  ready_to_submit = await self._propose_pending_state(flow_run)
@@ -16,7 +16,9 @@ checkout out the [Prefect docs](/concepts/work-pools/).
16
16
 
17
17
  from __future__ import annotations
18
18
 
19
+ import contextlib
19
20
  import os
21
+ import tempfile
20
22
  import threading
21
23
  from functools import partial
22
24
  from pathlib import Path
@@ -203,14 +205,23 @@ class ProcessWorker(
203
205
  configuration: ProcessJobConfiguration,
204
206
  task_status: Optional[anyio.abc.TaskStatus[int]] = None,
205
207
  ) -> ProcessWorkerResult:
206
- process = await self._runner.execute_flow_run(
207
- flow_run_id=flow_run.id,
208
- command=configuration.command,
209
- cwd=configuration.working_dir,
210
- env=configuration.env,
211
- stream_output=configuration.stream_output,
212
- task_status=task_status,
208
+ if task_status is None:
209
+ task_status = anyio.TASK_STATUS_IGNORED
210
+
211
+ working_dir_ctx = (
212
+ tempfile.TemporaryDirectory(suffix="prefect")
213
+ if not configuration.working_dir
214
+ else contextlib.nullcontext(configuration.working_dir)
213
215
  )
216
+ with working_dir_ctx as working_dir:
217
+ process = await self._runner.execute_flow_run(
218
+ flow_run_id=flow_run.id,
219
+ command=configuration.command,
220
+ cwd=working_dir,
221
+ env=configuration.env,
222
+ stream_output=configuration.stream_output,
223
+ task_status=task_status,
224
+ )
214
225
 
215
226
  if process is None or process.returncode is None:
216
227
  raise RuntimeError("Failed to start flow run process.")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: prefect-client
3
- Version: 3.2.9
3
+ Version: 3.2.10
4
4
  Summary: Workflow orchestration and management.
5
5
  Project-URL: Changelog, https://github.com/PrefectHQ/prefect/releases
6
6
  Project-URL: Documentation, https://docs.prefect.io
@@ -1,7 +1,7 @@
1
1
  prefect/.prefectignore,sha256=awSprvKT0vI8a64mEOLrMxhxqcO-b0ERQeYpA2rNKVQ,390
2
2
  prefect/__init__.py,sha256=iCdcC5ZmeewikCdnPEP6YBAjPNV5dvfxpYCTpw30Hkw,3685
3
3
  prefect/__main__.py,sha256=WFjw3kaYJY6pOTA7WDOgqjsz8zUEUZHCcj3P5wyVa-g,66
4
- prefect/_build_info.py,sha256=YVNMCSqdfJZSZWqetAM6lnrtSoA_cqDrXyw6RGNF-3s,180
4
+ prefect/_build_info.py,sha256=WbfStAdc-mwmaeLFXnTicNLoAChNnOcmuBnhT1CZ4cU,181
5
5
  prefect/_result_records.py,sha256=S6QmsODkehGVSzbMm6ig022PYbI6gNKz671p_8kBYx4,7789
6
6
  prefect/_waiters.py,sha256=Ia2ITaXdHzevtyWIgJoOg95lrEXQqNEOquHvw3T33UQ,9026
7
7
  prefect/agent.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
@@ -14,7 +14,7 @@ prefect/exceptions.py,sha256=-nih8qqdxRm6CX-4yrqwePVh8Mcpvla_V6N_KbdJsIU,11593
14
14
  prefect/filesystems.py,sha256=v5YqGB4uXf9Ew2VuB9VCSkawvYMMVvEtZf7w1VmAmr8,18036
15
15
  prefect/flow_engine.py,sha256=gR44YU7aCAbHEqoMDdxL1SDrtS5Xx1Kzg3M7FWjHcvY,58967
16
16
  prefect/flow_runs.py,sha256=ocbV3ioSBIFoLqExpI2YLteyHdWpHB3t0lrbfl-Ni1E,17256
17
- prefect/flows.py,sha256=u21wlvkE3X0NAoN9UlqEC37WHFFM4ziMxAB9XZABDjs,108636
17
+ prefect/flows.py,sha256=g7OVVer6m8nPtrR0MllWpa8_Iezh2G1maxvz8ZeNgbo,109029
18
18
  prefect/futures.py,sha256=ADd8ceFqX7A8Kw8aXaqvbYRG03uU82OEY30xrP5vrwY,23599
19
19
  prefect/main.py,sha256=hFeTTrr01qWKcRwZVEHVipyHEybS0VLTscFV6zG6GtY,2306
20
20
  prefect/plugins.py,sha256=FPRLR2mWVBMuOnlzeiTD9krlHONZH2rtYLD753JQDNQ,2516
@@ -137,7 +137,7 @@ prefect/concurrency/v1/sync.py,sha256=N_CHNkbV_eNQvDsJoJaehQo8H68MFlX6B1ObDZuYlT
137
137
  prefect/deployments/__init__.py,sha256=_wb7NxDKhq11z9MjYsPckmT3o6MRhGLRgCV9TmvYtew,1002
138
138
  prefect/deployments/base.py,sha256=KEc07W35yyzGJcV6GIZry8bKcNfvQk6JjJ99KKB6XpQ,11729
139
139
  prefect/deployments/deployments.py,sha256=K3Rgnpjxo_T8I8LMwlq24OKqZiZBTE8-YnPg-YGUStM,171
140
- prefect/deployments/flow_runs.py,sha256=VunxRsw4DyqVJHNjooDAPGJaGvSGucLX83SaxHO8ugU,7227
140
+ prefect/deployments/flow_runs.py,sha256=sHHV642rPjncXoMYAxIldW6wz-dipXvZqhrLAEH8vsQ,7466
141
141
  prefect/deployments/runner.py,sha256=FUmBuuF5X8o2wGf8aIdqjijDJy3FWuNYPpwaKhrVaHs,54052
142
142
  prefect/deployments/schedules.py,sha256=2eL1-w8qXtwKVkgfUK7cuamwpKK3X6tN1QYTDa_gWxU,2190
143
143
  prefect/deployments/steps/__init__.py,sha256=Dlz9VqMRyG1Gal8dj8vfGpPr0LyQhZdvcciozkK8WoY,206
@@ -148,7 +148,7 @@ prefect/docker/__init__.py,sha256=z6wdc6UFfiBG2jb9Jk64uCWVM04JKVWeVyDWwuuon8M,52
148
148
  prefect/docker/docker_image.py,sha256=cdvUEokGJXZDugfCekfmrhhpzrxTEW-FvWa2kDs5tVM,3092
149
149
  prefect/events/__init__.py,sha256=GtKl2bE--pJduTxelH2xy7SadlLJmmis8WR1EYixhuA,2094
150
150
  prefect/events/actions.py,sha256=A7jS8bo4zWGnrt3QfSoQs0uYC1xfKXio3IfU0XtTb5s,9129
151
- prefect/events/clients.py,sha256=IZhSLkHPslDwtKtOBCSuePVV2ifHghADmnFxmkahSO0,26845
151
+ prefect/events/clients.py,sha256=PiPWtqX_OSZCWD7ekCuS_toi7DQMVC7FbPmI5q8cf2I,27254
152
152
  prefect/events/filters.py,sha256=h9L6pukS9tD7Y8rGC3dt04KJINu0oJoti-flGLQTQQQ,8086
153
153
  prefect/events/related.py,sha256=Uh6-MoVENvP9dYYhjstb7eHesQUoDp-2PTMSP0nhPDI,6545
154
154
  prefect/events/utilities.py,sha256=4Bz-xiTOzi_EeDyIL9BzI7eMbRbBIIyayNvfO_BFyTw,2632
@@ -184,9 +184,9 @@ prefect/logging/highlighters.py,sha256=BCf_LNhFInIfGPqwuu8YVrGa4wVxNc4YXo2pYgftp
184
184
  prefect/logging/loggers.py,sha256=xkmHXsiuoPZZXcrrEgMA-ZQu0E-gW3tNVd4BIxWjnpM,12704
185
185
  prefect/logging/logging.yml,sha256=tT7gTyC4NmngFSqFkCdHaw7R0GPNPDDsTCGZQByiJAQ,3169
186
186
  prefect/runner/__init__.py,sha256=7U-vAOXFkzMfRz1q8Uv6Otsvc0OrPYLLP44srwkJ_8s,89
187
- prefect/runner/runner.py,sha256=_jGRxJlq8A-bqLdyYonRuTLHzDOKZ-ZMaOLV6vcTByE,63801
187
+ prefect/runner/runner.py,sha256=8JixM-hU4Oo4AMaXhWGEISUdZCg0AZdPGkP4lXxbMu0,65436
188
188
  prefect/runner/server.py,sha256=WDDjCbnd2F_3LZBpVX2Y398xpmHvxjyBLKVHWkh5QxI,11240
189
- prefect/runner/storage.py,sha256=Uxx_7SPm-F0LR1LUq64cT-xHL2ofd37hHqLHtRYjGW0,27527
189
+ prefect/runner/storage.py,sha256=14ABbc6xJfPLLichyBVebmoxTT5qPghrOm8cDEujalc,27604
190
190
  prefect/runner/submit.py,sha256=3Ey6H4XrhYhCII4AobpvzZf21vAunWlMu40zAjMC0gc,8353
191
191
  prefect/runner/utils.py,sha256=MLtoouDD6bh-JAIz0W3fMofKXEt0VfGsg6d8jf45OA0,3280
192
192
  prefect/runtime/__init__.py,sha256=JswiTlYRup2zXOYu8AqJ7czKtgcw9Kxo0tTbS6aWCqY,407
@@ -207,7 +207,7 @@ prefect/server/api/concurrency_limits.py,sha256=E5TB2cJPIZjnxnm1pGxUJnwMDz5CS58g
207
207
  prefect/server/api/concurrency_limits_v2.py,sha256=9JRAqc6E-MqpescewOU8aflPecsXCowCTxPvpYs2Plk,10097
208
208
  prefect/server/api/csrf_token.py,sha256=BwysSjQAhre7O0OY_LF3ZcIiO53FdMQroNT11Q6OcOM,1344
209
209
  prefect/server/api/dependencies.py,sha256=VujfcIGn41TGJxUunFHVabY5hE-6nY6uSHyhNFj8PdI,6634
210
- prefect/server/api/deployments.py,sha256=DWeZ7OIw7XGc8d2ldrI90kLV5LGqfUoMqdqXFYwkvNU,37706
210
+ prefect/server/api/deployments.py,sha256=2C7pCY2renhyPDfi_IuzWoEhH620ERWLmQSTAnxyywM,37901
211
211
  prefect/server/api/events.py,sha256=3-Qdt6ORxFv3nLoogQqvd72zEulJSoAmcqZto2OULuk,9907
212
212
  prefect/server/api/flow_run_notification_policies.py,sha256=F8xNm6bgZTC3nFe9xCUJS4NlU9tLXZ8fShtJqmhT2m4,4828
213
213
  prefect/server/api/flow_run_states.py,sha256=lIdxVE9CqLgtDCuH9bTaKkzHNL81FPrr11liPzvONrw,1661
@@ -218,15 +218,15 @@ prefect/server/api/middleware.py,sha256=WkyuyeJIfo9Q0GAIVU5gO6yIGNVwoHwuBah5AB5o
218
218
  prefect/server/api/root.py,sha256=CeumFYIM_BDvPicJH9ry5PO_02PZTLeMqbLMGGTh90o,942
219
219
  prefect/server/api/run_history.py,sha256=FHepAgo1AYFeuh7rrAVzo_o3hu8Uc8-4DeH5aD5VGgw,5995
220
220
  prefect/server/api/saved_searches.py,sha256=UjoqLLe245QVIs6q5Vk4vdODCOoYzciEEjhi7B8sYCE,3233
221
- prefect/server/api/server.py,sha256=W85DhOyIyq5_Cf5ep-YrNd8vF2IB-JzKzqMppIox9Fs,32082
221
+ prefect/server/api/server.py,sha256=iJX47L_ae4lskDqpOqFvYe3CwKykgw77HkHtqh0UODQ,32367
222
222
  prefect/server/api/task_run_states.py,sha256=e63OPpxPudv_CIB5oKr8Z8rfQ-Osjm9Zq0iHe8obnMo,1647
223
223
  prefect/server/api/task_runs.py,sha256=VY6MrolTi_vmiaE5my3WyRl5r256WKC7sfxxBE4Wnpw,12239
224
224
  prefect/server/api/task_workers.py,sha256=cFP9M8tsApDL_JpySn-x6fOYy9RnOeOgKiqOl_UVVQM,1042
225
225
  prefect/server/api/templates.py,sha256=92bLFfcahZUp5PVNTZPjl8uJSDj4ZYRTVdmTzZXkERg,1027
226
226
  prefect/server/api/validation.py,sha256=HxSNyH8yb_tI-kOfjXESRjJp6WQK6hYWBJsaBxUvY34,14490
227
227
  prefect/server/api/variables.py,sha256=SJaKuqInfQIEdMlJOemptBDN43KLFhlf_u9QwupDu7A,6185
228
- prefect/server/api/work_queues.py,sha256=iOl5CcZWHKPAD66ZK2NZTktSKimtRdHkh1deGH7SG-8,7558
229
- prefect/server/api/workers.py,sha256=VrtEyt-vIx77b_OvsR0CdtD5G0hEUF0XSZtJj1Sotl8,22572
228
+ prefect/server/api/work_queues.py,sha256=wBcbmkZDaQ5Ddi9wc8tNs6kYG_FdNtYwTCR0VkhPj2o,7588
229
+ prefect/server/api/workers.py,sha256=sGQzJED7E3uMP1jMdWAyB3d44xWBRtoHcTGY0oiEbm4,22602
230
230
  prefect/server/api/collections_data/views/aggregate-worker-metadata.json,sha256=gqrwGyylzBEzlFSPOJcMuUwdoK_zojpU0SZaBDgK5FE,79748
231
231
  prefect/server/api/static/prefect-logo-mark-gradient.png,sha256=ylRjJkI_JHCw8VbQasNnXQHwZW-sH-IQiUGSD3aWP1E,73430
232
232
  prefect/server/api/ui/__init__.py,sha256=TCXO4ZUZCqCbm2QoNvWNTErkzWiX2nSACuO-0Tiomvg,93
@@ -274,7 +274,7 @@ prefect/telemetry/bootstrap.py,sha256=tD6CXgMU3skgpKNscrRYZudwCWEEt6wjwMsulYpHeA
274
274
  prefect/telemetry/instrumentation.py,sha256=9JElKwEqKhErohjHNS4oAIXJRYCWVY0rfSnk4DUBna0,4454
275
275
  prefect/telemetry/logging.py,sha256=yn5D4D2GGRrAv0y8wlHPN7PZDmQucGjQT_YauK9M9Yo,727
276
276
  prefect/telemetry/processors.py,sha256=jw6j6LviOVxw3IBJe7cSjsxFk0zzY43jUmy6C9pcfCE,2272
277
- prefect/telemetry/run_telemetry.py,sha256=NcMVqOc_wQVGPlGpE8cfrz-lyCbkG1EOKpcbjsqMnGA,8264
277
+ prefect/telemetry/run_telemetry.py,sha256=xOrZEFf-1cbSdKe3ZlB4spbyJ3sxYbnb2u2yZiUSSqM,8282
278
278
  prefect/telemetry/services.py,sha256=DxgNNDTeWNtHBtioX8cjua4IrCbTiJJdYecx-gugg-w,2358
279
279
  prefect/types/__init__.py,sha256=yBjKxiQmSC7jXoo0UNmM3KZil1NBFS-BWGPfwSEaoJo,4621
280
280
  prefect/types/_datetime.py,sha256=eOsg5gkm4bATLWvK4lmLqHByxQdER6gfTFyafzj-DLk,3343
@@ -312,13 +312,13 @@ prefect/utilities/schema_tools/__init__.py,sha256=At3rMHd2g_Em2P3_dFQlFgqR_EpBwr
312
312
  prefect/utilities/schema_tools/hydration.py,sha256=NkRhWkNfxxFmVGhNDfmxdK_xeKaEhs3a42q83Sg9cT4,9436
313
313
  prefect/utilities/schema_tools/validation.py,sha256=Wix26IVR-ZJ32-6MX2pHhrwm3reB-Q4iB6_phn85OKE,10743
314
314
  prefect/workers/__init__.py,sha256=EaM1F0RZ-XIJaGeTKLsXDnfOPHzVWk5bk0_c4BVS44M,64
315
- prefect/workers/base.py,sha256=1lPZRj2hBDP1-lS2oqdfUXg0iZJ4Y1qw5C6THDG9iB0,49842
315
+ prefect/workers/base.py,sha256=_7_XRi-5v8jGqKelH12Lkf8CBUnXrq04JPlIMc6WhhM,49439
316
316
  prefect/workers/block.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
317
317
  prefect/workers/cloud.py,sha256=dPvG1jDGD5HSH7aM2utwtk6RaJ9qg13XjkA0lAIgQmY,287
318
- prefect/workers/process.py,sha256=p3BJ3YQoiffqIeKXTPocD9dTFniMj24QnlFPiDM41So,8502
318
+ prefect/workers/process.py,sha256=ozC-PGp1rjEG7CO1D7wZYM3oO_DeefccXy3rBLJK3kA,8892
319
319
  prefect/workers/server.py,sha256=SEuyScZ5nGm2OotdtbHjpvqJlTRVWCh29ND7FeL_fZA,1974
320
320
  prefect/workers/utilities.py,sha256=VfPfAlGtTuDj0-Kb8WlMgAuOfgXCdrGAnKMapPSBrwc,2483
321
- prefect_client-3.2.9.dist-info/METADATA,sha256=cmjLtGd6PRjFL-54S8bfSaDotSAqrn2tB3DPpgk5Bho,7192
322
- prefect_client-3.2.9.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
323
- prefect_client-3.2.9.dist-info/licenses/LICENSE,sha256=MCxsn8osAkzfxKC4CC_dLcUkU8DZLkyihZ8mGs3Ah3Q,11357
324
- prefect_client-3.2.9.dist-info/RECORD,,
321
+ prefect_client-3.2.10.dist-info/METADATA,sha256=NSATgsj4dT5OmRVYVVaSTKIcE3qPgwtbxKkrmLVG3dg,7193
322
+ prefect_client-3.2.10.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
323
+ prefect_client-3.2.10.dist-info/licenses/LICENSE,sha256=MCxsn8osAkzfxKC4CC_dLcUkU8DZLkyihZ8mGs3Ah3Q,11357
324
+ prefect_client-3.2.10.dist-info/RECORD,,