prefect-client 3.0.0rc20__py3-none-any.whl → 3.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. prefect/_internal/compatibility/deprecated.py +1 -1
  2. prefect/_internal/compatibility/migration.py +1 -1
  3. prefect/artifacts.py +1 -1
  4. prefect/blocks/core.py +3 -4
  5. prefect/blocks/notifications.py +31 -10
  6. prefect/blocks/system.py +4 -4
  7. prefect/blocks/webhook.py +11 -1
  8. prefect/client/cloud.py +2 -1
  9. prefect/client/orchestration.py +93 -21
  10. prefect/client/schemas/actions.py +2 -2
  11. prefect/client/schemas/objects.py +24 -6
  12. prefect/client/types/flexible_schedule_list.py +1 -1
  13. prefect/concurrency/asyncio.py +45 -6
  14. prefect/concurrency/services.py +1 -1
  15. prefect/concurrency/sync.py +21 -27
  16. prefect/concurrency/v1/asyncio.py +3 -0
  17. prefect/concurrency/v1/sync.py +4 -5
  18. prefect/context.py +11 -9
  19. prefect/deployments/runner.py +4 -3
  20. prefect/events/actions.py +6 -0
  21. prefect/exceptions.py +6 -0
  22. prefect/filesystems.py +5 -3
  23. prefect/flow_engine.py +22 -11
  24. prefect/flows.py +0 -2
  25. prefect/futures.py +2 -1
  26. prefect/locking/__init__.py +0 -0
  27. prefect/locking/filesystem.py +243 -0
  28. prefect/locking/memory.py +213 -0
  29. prefect/locking/protocol.py +122 -0
  30. prefect/logging/handlers.py +0 -2
  31. prefect/logging/loggers.py +0 -18
  32. prefect/logging/logging.yml +1 -0
  33. prefect/main.py +19 -5
  34. prefect/records/base.py +12 -0
  35. prefect/records/filesystem.py +10 -4
  36. prefect/records/memory.py +6 -0
  37. prefect/records/result_store.py +18 -6
  38. prefect/results.py +702 -205
  39. prefect/runner/runner.py +74 -5
  40. prefect/settings.py +11 -4
  41. prefect/states.py +40 -23
  42. prefect/task_engine.py +39 -37
  43. prefect/task_worker.py +6 -4
  44. prefect/tasks.py +24 -6
  45. prefect/transactions.py +116 -54
  46. prefect/utilities/callables.py +1 -3
  47. prefect/utilities/engine.py +16 -8
  48. prefect/utilities/importtools.py +1 -0
  49. prefect/utilities/urls.py +70 -12
  50. prefect/variables.py +34 -24
  51. prefect/workers/base.py +14 -6
  52. prefect/workers/process.py +1 -3
  53. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/METADATA +2 -2
  54. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/RECORD +57 -53
  55. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/LICENSE +0 -0
  56. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/WHEEL +0 -0
  57. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/top_level.txt +0 -0
@@ -6,6 +6,8 @@ import anyio
6
6
  import httpx
7
7
  import pendulum
8
8
 
9
+ from prefect._internal.compatibility.deprecated import deprecated_parameter
10
+
9
11
  try:
10
12
  from pendulum import Interval
11
13
  except ImportError:
@@ -14,6 +16,8 @@ except ImportError:
14
16
 
15
17
  from prefect.client.orchestration import get_client
16
18
  from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
19
+ from prefect.logging.loggers import get_run_logger
20
+ from prefect.utilities.asyncutils import sync_compatible
17
21
 
18
22
  from .context import ConcurrencyContext
19
23
  from .events import (
@@ -36,8 +40,9 @@ async def concurrency(
36
40
  names: Union[str, List[str]],
37
41
  occupy: int = 1,
38
42
  timeout_seconds: Optional[float] = None,
39
- create_if_missing: bool = True,
40
43
  max_retries: Optional[int] = None,
44
+ create_if_missing: Optional[bool] = None,
45
+ strict: bool = False,
41
46
  ) -> AsyncGenerator[None, None]:
42
47
  """A context manager that acquires and releases concurrency slots from the
43
48
  given concurrency limits.
@@ -47,11 +52,13 @@ async def concurrency(
47
52
  occupy: The number of slots to acquire and hold from each limit.
48
53
  timeout_seconds: The number of seconds to wait for the slots to be acquired before
49
54
  raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
50
- create_if_missing: Whether to create the concurrency limits if they do not exist.
51
55
  max_retries: The maximum number of retries to acquire the concurrency slots.
56
+ strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
57
+ Defaults to `False`.
52
58
 
53
59
  Raises:
54
60
  TimeoutError: If the slots are not acquired within the given timeout.
61
+ ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
55
62
 
56
63
  Example:
57
64
  A simple example of using the async `concurrency` context manager:
@@ -78,6 +85,7 @@ async def concurrency(
78
85
  timeout_seconds=timeout_seconds,
79
86
  create_if_missing=create_if_missing,
80
87
  max_retries=max_retries,
88
+ strict=strict,
81
89
  )
82
90
  acquisition_time = pendulum.now("UTC")
83
91
  emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
@@ -106,7 +114,8 @@ async def rate_limit(
106
114
  names: Union[str, List[str]],
107
115
  occupy: int = 1,
108
116
  timeout_seconds: Optional[float] = None,
109
- create_if_missing: Optional[bool] = True,
117
+ create_if_missing: Optional[bool] = None,
118
+ strict: bool = False,
110
119
  ) -> None:
111
120
  """Block execution until an `occupy` number of slots of the concurrency
112
121
  limits given in `names` are acquired. Requires that all given concurrency
@@ -117,7 +126,12 @@ async def rate_limit(
117
126
  occupy: The number of slots to acquire and hold from each limit.
118
127
  timeout_seconds: The number of seconds to wait for the slots to be acquired before
119
128
  raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
120
- create_if_missing: Whether to create the concurrency limits if they do not exist.
129
+ strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
130
+ Defaults to `False`.
131
+
132
+ Raises:
133
+ TimeoutError: If the slots are not acquired within the given timeout.
134
+ ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
121
135
  """
122
136
  if not names:
123
137
  return
@@ -130,17 +144,27 @@ async def rate_limit(
130
144
  mode="rate_limit",
131
145
  timeout_seconds=timeout_seconds,
132
146
  create_if_missing=create_if_missing,
147
+ strict=strict,
133
148
  )
134
149
  _emit_concurrency_acquisition_events(limits, occupy)
135
150
 
136
151
 
152
+ @sync_compatible
153
+ @deprecated_parameter(
154
+ name="create_if_missing",
155
+ start_date="Sep 2024",
156
+ end_date="Oct 2024",
157
+ when=lambda x: x is not None,
158
+ help="Limits must be explicitly created before acquiring concurrency slots; see `strict` if you want to enforce this behavior.",
159
+ )
137
160
  async def _acquire_concurrency_slots(
138
161
  names: List[str],
139
162
  slots: int,
140
163
  mode: Union[Literal["concurrency"], Literal["rate_limit"]] = "concurrency",
141
164
  timeout_seconds: Optional[float] = None,
142
- create_if_missing: Optional[bool] = True,
165
+ create_if_missing: Optional[bool] = None,
143
166
  max_retries: Optional[int] = None,
167
+ strict: bool = False,
144
168
  ) -> List[MinimalConcurrencyLimitResponse]:
145
169
  service = ConcurrencySlotAcquisitionService.instance(frozenset(names))
146
170
  future = service.send(
@@ -158,9 +182,24 @@ async def _acquire_concurrency_slots(
158
182
  f"Unable to acquire concurrency slots on {names!r}"
159
183
  ) from response_or_exception
160
184
 
161
- return _response_to_minimal_concurrency_limit_response(response_or_exception)
185
+ retval = _response_to_minimal_concurrency_limit_response(response_or_exception)
186
+
187
+ if strict and not retval:
188
+ raise ConcurrencySlotAcquisitionError(
189
+ f"Concurrency limits {names!r} must be created before acquiring slots"
190
+ )
191
+ elif not retval:
192
+ try:
193
+ logger = get_run_logger()
194
+ logger.warning(
195
+ f"Concurrency limits {names!r} do not exist - skipping acquisition."
196
+ )
197
+ except Exception:
198
+ pass
199
+ return retval
162
200
 
163
201
 
202
+ @sync_compatible
164
203
  async def _release_concurrency_slots(
165
204
  names: List[str], slots: int, occupancy_seconds: float
166
205
  ) -> List[MinimalConcurrencyLimitResponse]:
@@ -63,7 +63,7 @@ class ConcurrencySlotAcquisitionService(QueueService):
63
63
  slots: int,
64
64
  mode: str,
65
65
  timeout_seconds: Optional[float] = None,
66
- create_if_missing: Optional[bool] = False,
66
+ create_if_missing: Optional[bool] = None,
67
67
  max_retries: Optional[int] = None,
68
68
  ) -> httpx.Response:
69
69
  with timeout_async(seconds=timeout_seconds):
@@ -1,8 +1,5 @@
1
1
  from contextlib import contextmanager
2
2
  from typing import (
3
- Any,
4
- Awaitable,
5
- Callable,
6
3
  Generator,
7
4
  List,
8
5
  Optional,
@@ -19,8 +16,6 @@ except ImportError:
19
16
  # pendulum < 3
20
17
  from pendulum.period import Period as Interval # type: ignore
21
18
 
22
- from prefect._internal.concurrency.api import create_call, from_sync
23
- from prefect._internal.concurrency.event_loop import get_running_loop
24
19
  from prefect.client.schemas.responses import MinimalConcurrencyLimitResponse
25
20
 
26
21
  from .asyncio import (
@@ -40,8 +35,9 @@ def concurrency(
40
35
  names: Union[str, List[str]],
41
36
  occupy: int = 1,
42
37
  timeout_seconds: Optional[float] = None,
43
- create_if_missing: bool = True,
44
38
  max_retries: Optional[int] = None,
39
+ strict: bool = False,
40
+ create_if_missing: Optional[bool] = None,
45
41
  ) -> Generator[None, None, None]:
46
42
  """A context manager that acquires and releases concurrency slots from the
47
43
  given concurrency limits.
@@ -51,11 +47,13 @@ def concurrency(
51
47
  occupy: The number of slots to acquire and hold from each limit.
52
48
  timeout_seconds: The number of seconds to wait for the slots to be acquired before
53
49
  raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
54
- create_if_missing: Whether to create the concurrency limits if they do not exist.
55
50
  max_retries: The maximum number of retries to acquire the concurrency slots.
51
+ strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
52
+ Defaults to `False`.
56
53
 
57
54
  Raises:
58
55
  TimeoutError: If the slots are not acquired within the given timeout.
56
+ ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
59
57
 
60
58
  Example:
61
59
  A simple example of using the sync `concurrency` context manager:
@@ -76,13 +74,14 @@ def concurrency(
76
74
 
77
75
  names = names if isinstance(names, list) else [names]
78
76
 
79
- limits: List[MinimalConcurrencyLimitResponse] = _call_async_function_from_sync(
80
- _acquire_concurrency_slots,
77
+ limits: List[MinimalConcurrencyLimitResponse] = _acquire_concurrency_slots(
81
78
  names,
82
79
  occupy,
83
80
  timeout_seconds=timeout_seconds,
84
81
  create_if_missing=create_if_missing,
82
+ strict=strict,
85
83
  max_retries=max_retries,
84
+ _sync=True,
86
85
  )
87
86
  acquisition_time = pendulum.now("UTC")
88
87
  emitted_events = _emit_concurrency_acquisition_events(limits, occupy)
@@ -91,11 +90,11 @@ def concurrency(
91
90
  yield
92
91
  finally:
93
92
  occupancy_period = cast(Interval, pendulum.now("UTC") - acquisition_time)
94
- _call_async_function_from_sync(
95
- _release_concurrency_slots,
93
+ _release_concurrency_slots(
96
94
  names,
97
95
  occupy,
98
96
  occupancy_period.total_seconds(),
97
+ _sync=True,
99
98
  )
100
99
  _emit_concurrency_release_events(limits, occupy, emitted_events)
101
100
 
@@ -104,7 +103,8 @@ def rate_limit(
104
103
  names: Union[str, List[str]],
105
104
  occupy: int = 1,
106
105
  timeout_seconds: Optional[float] = None,
107
- create_if_missing: Optional[bool] = True,
106
+ create_if_missing: Optional[bool] = None,
107
+ strict: bool = False,
108
108
  ) -> None:
109
109
  """Block execution until an `occupy` number of slots of the concurrency
110
110
  limits given in `names` are acquired. Requires that all given concurrency
@@ -115,31 +115,25 @@ def rate_limit(
115
115
  occupy: The number of slots to acquire and hold from each limit.
116
116
  timeout_seconds: The number of seconds to wait for the slots to be acquired before
117
117
  raising a `TimeoutError`. A timeout of `None` will wait indefinitely.
118
- create_if_missing: Whether to create the concurrency limits if they do not exist.
118
+ strict: A boolean specifying whether to raise an error if the concurrency limit does not exist.
119
+ Defaults to `False`.
120
+
121
+ Raises:
122
+ TimeoutError: If the slots are not acquired within the given timeout.
123
+ ConcurrencySlotAcquisitionError: If the concurrency limit does not exist and `strict` is `True`.
119
124
  """
120
125
  if not names:
121
126
  return
122
127
 
123
128
  names = names if isinstance(names, list) else [names]
124
129
 
125
- limits = _call_async_function_from_sync(
126
- _acquire_concurrency_slots,
130
+ limits = _acquire_concurrency_slots(
127
131
  names,
128
132
  occupy,
129
133
  mode="rate_limit",
130
134
  timeout_seconds=timeout_seconds,
131
135
  create_if_missing=create_if_missing,
136
+ strict=strict,
137
+ _sync=True,
132
138
  )
133
139
  _emit_concurrency_acquisition_events(limits, occupy)
134
-
135
-
136
- def _call_async_function_from_sync(
137
- fn: Callable[..., Awaitable[T]], *args: Any, **kwargs: Any
138
- ) -> T:
139
- loop = get_running_loop()
140
- call = create_call(fn, *args, **kwargs)
141
-
142
- if loop is not None:
143
- return from_sync.call_soon_in_loop_thread(call).result()
144
- else:
145
- return call() # type: ignore [return-value]
@@ -16,6 +16,7 @@ except ImportError:
16
16
  from pendulum.period import Period as Interval # type: ignore
17
17
 
18
18
  from prefect.client.orchestration import get_client
19
+ from prefect.utilities.asyncutils import sync_compatible
19
20
 
20
21
  from .context import ConcurrencyContext
21
22
  from .events import (
@@ -98,6 +99,7 @@ async def concurrency(
98
99
  _emit_concurrency_release_events(limits, emitted_events, task_run_id)
99
100
 
100
101
 
102
+ @sync_compatible
101
103
  async def _acquire_concurrency_slots(
102
104
  names: List[str],
103
105
  task_run_id: UUID,
@@ -120,6 +122,7 @@ async def _acquire_concurrency_slots(
120
122
  return _response_to_concurrency_limit_response(response_or_exception)
121
123
 
122
124
 
125
+ @sync_compatible
123
126
  async def _release_concurrency_slots(
124
127
  names: List[str],
125
128
  task_run_id: UUID,
@@ -12,7 +12,6 @@ from uuid import UUID
12
12
  import pendulum
13
13
 
14
14
  from ...client.schemas.responses import MinimalConcurrencyLimitResponse
15
- from ..sync import _call_async_function_from_sync
16
15
 
17
16
  try:
18
17
  from pendulum import Interval
@@ -70,11 +69,11 @@ def concurrency(
70
69
 
71
70
  names = names if isinstance(names, list) else [names]
72
71
 
73
- limits: List[MinimalConcurrencyLimitResponse] = _call_async_function_from_sync(
74
- _acquire_concurrency_slots,
72
+ limits: List[MinimalConcurrencyLimitResponse] = _acquire_concurrency_slots(
75
73
  names,
76
74
  timeout_seconds=timeout_seconds,
77
75
  task_run_id=task_run_id,
76
+ _sync=True,
78
77
  )
79
78
  acquisition_time = pendulum.now("UTC")
80
79
  emitted_events = _emit_concurrency_acquisition_events(limits, task_run_id)
@@ -83,10 +82,10 @@ def concurrency(
83
82
  yield
84
83
  finally:
85
84
  occupancy_period = cast(Interval, pendulum.now("UTC") - acquisition_time)
86
- _call_async_function_from_sync(
87
- _release_concurrency_slots,
85
+ _release_concurrency_slots(
88
86
  names,
89
87
  task_run_id,
90
88
  occupancy_period.total_seconds(),
89
+ _sync=True,
91
90
  )
92
91
  _emit_concurrency_release_events(limits, emitted_events, task_run_id)
prefect/context.py CHANGED
@@ -40,11 +40,10 @@ from prefect.client.orchestration import PrefectClient, SyncPrefectClient, get_c
40
40
  from prefect.client.schemas import FlowRun, TaskRun
41
41
  from prefect.events.worker import EventsWorker
42
42
  from prefect.exceptions import MissingContextError
43
- from prefect.results import ResultFactory
43
+ from prefect.results import ResultStore, get_default_persist_setting
44
44
  from prefect.settings import PREFECT_HOME, Profile, Settings
45
45
  from prefect.states import State
46
46
  from prefect.task_runners import TaskRunner
47
- from prefect.utilities.asyncutils import run_coro_as_sync
48
47
  from prefect.utilities.services import start_client_metrics_server
49
48
 
50
49
  T = TypeVar("T")
@@ -95,20 +94,15 @@ def hydrated_context(
95
94
  flow_run_context = FlowRunContext(
96
95
  **flow_run_context,
97
96
  client=client,
98
- result_factory=run_coro_as_sync(ResultFactory.from_flow(flow)),
99
97
  task_runner=task_runner,
100
98
  detached=True,
101
99
  )
102
100
  stack.enter_context(flow_run_context)
103
101
  # Set up parent task run context
104
102
  if parent_task_run_context := serialized_context.get("task_run_context"):
105
- parent_task = parent_task_run_context["task"]
106
103
  task_run_context = TaskRunContext(
107
104
  **parent_task_run_context,
108
105
  client=client,
109
- result_factory=run_coro_as_sync(
110
- ResultFactory.from_autonomous_task(parent_task)
111
- ),
112
106
  )
113
107
  stack.enter_context(task_run_context)
114
108
  # Set up tags context
@@ -216,6 +210,7 @@ class SyncClientContext(ContextModel):
216
210
  self._context_stack += 1
217
211
  if self._context_stack == 1:
218
212
  self.client.__enter__()
213
+ self.client.raise_for_api_version_mismatch()
219
214
  return super().__enter__()
220
215
  else:
221
216
  return self
@@ -273,6 +268,7 @@ class AsyncClientContext(ContextModel):
273
268
  self._context_stack += 1
274
269
  if self._context_stack == 1:
275
270
  await self.client.__aenter__()
271
+ await self.client.raise_for_api_version_mismatch()
276
272
  return super().__enter__()
277
273
  else:
278
274
  return self
@@ -346,7 +342,8 @@ class EngineContext(RunContext):
346
342
  detached: bool = False
347
343
 
348
344
  # Result handling
349
- result_factory: ResultFactory
345
+ result_store: ResultStore
346
+ persist_result: bool = Field(default_factory=get_default_persist_setting)
350
347
 
351
348
  # Counter for task calls allowing unique
352
349
  task_run_dynamic_keys: Dict[str, int] = Field(default_factory=dict)
@@ -375,6 +372,8 @@ class EngineContext(RunContext):
375
372
  "log_prints",
376
373
  "start_time",
377
374
  "input_keyset",
375
+ "result_store",
376
+ "persist_result",
378
377
  },
379
378
  exclude_unset=True,
380
379
  )
@@ -399,7 +398,8 @@ class TaskRunContext(RunContext):
399
398
  parameters: Dict[str, Any]
400
399
 
401
400
  # Result handling
402
- result_factory: ResultFactory
401
+ result_store: ResultStore
402
+ persist_result: bool = Field(default_factory=get_default_persist_setting)
403
403
 
404
404
  __var__ = ContextVar("task_run")
405
405
 
@@ -412,6 +412,8 @@ class TaskRunContext(RunContext):
412
412
  "log_prints",
413
413
  "start_time",
414
414
  "input_keyset",
415
+ "result_store",
416
+ "persist_result",
415
417
  },
416
418
  exclude_unset=True,
417
419
  )
@@ -462,6 +462,7 @@ class RunnerDeployment(BaseModel):
462
462
  paused: Whether or not to set this deployment as paused.
463
463
  schedules: A list of schedule objects defining when to execute runs of this deployment.
464
464
  Used to define multiple schedules or additional scheduling options like `timezone`.
465
+ concurrency_limit: The maximum number of concurrent runs this deployment will allow.
465
466
  triggers: A list of triggers that should kick of a run of this flow.
466
467
  parameters: A dictionary of default parameter values to pass to runs of this flow.
467
468
  description: A description for the created deployment. Defaults to the flow's
@@ -508,7 +509,7 @@ class RunnerDeployment(BaseModel):
508
509
  no_file_location_error = (
509
510
  "Flows defined interactively cannot be deployed. Check out the"
510
511
  " quickstart guide for help getting started:"
511
- " https://docs.prefect.io/latest/getting-started/quickstart"
512
+ " https://docs.prefect.io/latest/get-started/quickstart"
512
513
  )
513
514
  ## first see if an entrypoint can be determined
514
515
  flow_file = getattr(flow, "__globals__", {}).get("__file__")
@@ -851,14 +852,14 @@ async def deploy(
851
852
  " or specify a remote storage location for the flow with `.from_source`."
852
853
  " If you are attempting to deploy a flow to a local process work pool,"
853
854
  " consider using `flow.serve` instead. See the documentation for more"
854
- " information: https://docs.prefect.io/latest/concepts/flows/#serving-a-flow"
855
+ " information: https://docs.prefect.io/latest/deploy/run-flows-in-local-processes"
855
856
  )
856
857
  elif work_pool.type == "process" and not ignore_warnings:
857
858
  console.print(
858
859
  "Looks like you're deploying to a process work pool. If you're creating a"
859
860
  " deployment for local development, calling `.serve` on your flow is a great"
860
861
  " way to get started. See the documentation for more information:"
861
- " https://docs.prefect.io/latest/concepts/flows/#serving-a-flow. "
862
+ " https://docs.prefect.io/latest/deploy/run-flows-in-local-processes "
862
863
  " Set `ignore_warnings=True` to suppress this message.",
863
864
  style="yellow",
864
865
  )
prefect/events/actions.py CHANGED
@@ -113,6 +113,12 @@ class CancelFlowRun(Action):
113
113
  type: Literal["cancel-flow-run"] = "cancel-flow-run"
114
114
 
115
115
 
116
+ class ResumeFlowRun(Action):
117
+ """Resumes a flow run associated with the trigger"""
118
+
119
+ type: Literal["resume-flow-run"] = "resume-flow-run"
120
+
121
+
116
122
  class SuspendFlowRun(Action):
117
123
  """Suspends a flow run associated with the trigger"""
118
124
 
prefect/exceptions.py CHANGED
@@ -418,3 +418,9 @@ class SerializationError(PrefectException):
418
418
  """
419
419
  Raised when an object cannot be serialized.
420
420
  """
421
+
422
+
423
+ class ConfigurationError(PrefectException):
424
+ """
425
+ Raised when a configuration is invalid.
426
+ """
prefect/filesystems.py CHANGED
@@ -84,7 +84,7 @@ class LocalFileSystem(WritableFileSystem, WritableDeploymentStorage):
84
84
  _block_type_name = "Local File System"
85
85
  _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/ad39089fa66d273b943394a68f003f7a19aa850e-48x48.png"
86
86
  _documentation_url = (
87
- "https://docs.prefect.io/concepts/filesystems/#local-filesystem"
87
+ "https://docs.prefect.io/latest/develop/results#specifying-a-default-filesystem"
88
88
  )
89
89
 
90
90
  basepath: Optional[str] = Field(
@@ -260,7 +260,7 @@ class RemoteFileSystem(WritableFileSystem, WritableDeploymentStorage):
260
260
  _block_type_name = "Remote File System"
261
261
  _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/e86b41bc0f9c99ba9489abeee83433b43d5c9365-48x48.png"
262
262
  _documentation_url = (
263
- "https://docs.prefect.io/concepts/filesystems/#remote-file-system"
263
+ "https://docs.prefect.io/latest/develop/results#specifying-a-default-filesystem"
264
264
  )
265
265
 
266
266
  basepath: str = Field(
@@ -433,7 +433,9 @@ class SMB(WritableFileSystem, WritableDeploymentStorage):
433
433
 
434
434
  _block_type_name = "SMB"
435
435
  _logo_url = "https://cdn.sanity.io/images/3ugk85nk/production/3f624663f7beb97d011d011bffd51ecf6c499efc-195x195.png"
436
- _documentation_url = "https://docs.prefect.io/concepts/filesystems/#smb"
436
+ _documentation_url = (
437
+ "https://docs.prefect.io/latest/develop/results#specifying-a-default-filesystem"
438
+ )
437
439
 
438
440
  share_path: str = Field(
439
441
  default=...,
prefect/flow_engine.py CHANGED
@@ -47,7 +47,12 @@ from prefect.logging.loggers import (
47
47
  get_run_logger,
48
48
  patch_print,
49
49
  )
50
- from prefect.results import BaseResult, ResultFactory
50
+ from prefect.results import (
51
+ BaseResult,
52
+ ResultStore,
53
+ get_result_store,
54
+ should_persist_result,
55
+ )
51
56
  from prefect.settings import PREFECT_DEBUG_MODE
52
57
  from prefect.states import (
53
58
  Failed,
@@ -202,7 +207,9 @@ class FlowRunEngine(Generic[P, R]):
202
207
  self.handle_exception(
203
208
  exc,
204
209
  msg=message,
205
- result_factory=run_coro_as_sync(ResultFactory.from_flow(self.flow)),
210
+ result_store=get_result_store().update_for_flow(
211
+ self.flow, _sync=True
212
+ ),
206
213
  )
207
214
  self.short_circuit = True
208
215
  self.call_hooks()
@@ -261,15 +268,15 @@ class FlowRunEngine(Generic[P, R]):
261
268
  return _result
262
269
 
263
270
  def handle_success(self, result: R) -> R:
264
- result_factory = getattr(FlowRunContext.get(), "result_factory", None)
265
- if result_factory is None:
266
- raise ValueError("Result factory is not set")
271
+ result_store = getattr(FlowRunContext.get(), "result_store", None)
272
+ if result_store is None:
273
+ raise ValueError("Result store is not set")
267
274
  resolved_result = resolve_futures_to_states(result)
268
275
  terminal_state = run_coro_as_sync(
269
276
  return_value_to_state(
270
277
  resolved_result,
271
- result_factory=result_factory,
272
- write_result=True,
278
+ result_store=result_store,
279
+ write_result=should_persist_result(),
273
280
  )
274
281
  )
275
282
  self.set_state(terminal_state)
@@ -280,15 +287,14 @@ class FlowRunEngine(Generic[P, R]):
280
287
  self,
281
288
  exc: Exception,
282
289
  msg: Optional[str] = None,
283
- result_factory: Optional[ResultFactory] = None,
290
+ result_store: Optional[ResultStore] = None,
284
291
  ) -> State:
285
292
  context = FlowRunContext.get()
286
293
  terminal_state = run_coro_as_sync(
287
294
  exception_to_failed_state(
288
295
  exc,
289
296
  message=msg or "Flow run encountered an exception:",
290
- result_factory=result_factory
291
- or getattr(context, "result_factory", None),
297
+ result_store=result_store or getattr(context, "result_store", None),
292
298
  write_result=True,
293
299
  )
294
300
  )
@@ -506,8 +512,13 @@ class FlowRunEngine(Generic[P, R]):
506
512
  flow_run=self.flow_run,
507
513
  parameters=self.parameters,
508
514
  client=client,
509
- result_factory=run_coro_as_sync(ResultFactory.from_flow(self.flow)),
515
+ result_store=get_result_store().update_for_flow(
516
+ self.flow, _sync=True
517
+ ),
510
518
  task_runner=task_runner,
519
+ persist_result=self.flow.persist_result
520
+ if self.flow.persist_result is not None
521
+ else should_persist_result(),
511
522
  )
512
523
  )
513
524
  stack.enter_context(ConcurrencyContextV1())
prefect/flows.py CHANGED
@@ -1032,8 +1032,6 @@ class Flow(Generic[P, R]):
1032
1032
  if not isinstance(storage, LocalStorage):
1033
1033
  storage.set_base_path(Path(tmpdir))
1034
1034
  await storage.pull_code()
1035
- storage.set_base_path(Path(tmpdir))
1036
- await storage.pull_code()
1037
1035
 
1038
1036
  full_entrypoint = str(storage.destination / entrypoint)
1039
1037
  flow: Flow = await from_async.wait_for_call_in_new_thread(
prefect/futures.py CHANGED
@@ -179,7 +179,8 @@ class PrefectConcurrentFuture(PrefectWrappedFuture[R, concurrent.futures.Future]
179
179
  local_logger = logger
180
180
  local_logger.warning(
181
181
  "A future was garbage collected before it resolved."
182
- " Please call `.wait()` or `.result()` on futures to ensure they resolve.",
182
+ " Please call `.wait()` or `.result()` on futures to ensure they resolve."
183
+ "\nSee https://docs.prefect.io/latest/develop/task-runners for more details.",
183
184
  )
184
185
 
185
186
 
File without changes