prefect-client 3.0.0rc20__py3-none-any.whl → 3.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (57) hide show
  1. prefect/_internal/compatibility/deprecated.py +1 -1
  2. prefect/_internal/compatibility/migration.py +1 -1
  3. prefect/artifacts.py +1 -1
  4. prefect/blocks/core.py +3 -4
  5. prefect/blocks/notifications.py +31 -10
  6. prefect/blocks/system.py +4 -4
  7. prefect/blocks/webhook.py +11 -1
  8. prefect/client/cloud.py +2 -1
  9. prefect/client/orchestration.py +93 -21
  10. prefect/client/schemas/actions.py +2 -2
  11. prefect/client/schemas/objects.py +24 -6
  12. prefect/client/types/flexible_schedule_list.py +1 -1
  13. prefect/concurrency/asyncio.py +45 -6
  14. prefect/concurrency/services.py +1 -1
  15. prefect/concurrency/sync.py +21 -27
  16. prefect/concurrency/v1/asyncio.py +3 -0
  17. prefect/concurrency/v1/sync.py +4 -5
  18. prefect/context.py +11 -9
  19. prefect/deployments/runner.py +4 -3
  20. prefect/events/actions.py +6 -0
  21. prefect/exceptions.py +6 -0
  22. prefect/filesystems.py +5 -3
  23. prefect/flow_engine.py +22 -11
  24. prefect/flows.py +0 -2
  25. prefect/futures.py +2 -1
  26. prefect/locking/__init__.py +0 -0
  27. prefect/locking/filesystem.py +243 -0
  28. prefect/locking/memory.py +213 -0
  29. prefect/locking/protocol.py +122 -0
  30. prefect/logging/handlers.py +0 -2
  31. prefect/logging/loggers.py +0 -18
  32. prefect/logging/logging.yml +1 -0
  33. prefect/main.py +19 -5
  34. prefect/records/base.py +12 -0
  35. prefect/records/filesystem.py +10 -4
  36. prefect/records/memory.py +6 -0
  37. prefect/records/result_store.py +18 -6
  38. prefect/results.py +702 -205
  39. prefect/runner/runner.py +74 -5
  40. prefect/settings.py +11 -4
  41. prefect/states.py +40 -23
  42. prefect/task_engine.py +39 -37
  43. prefect/task_worker.py +6 -4
  44. prefect/tasks.py +24 -6
  45. prefect/transactions.py +116 -54
  46. prefect/utilities/callables.py +1 -3
  47. prefect/utilities/engine.py +16 -8
  48. prefect/utilities/importtools.py +1 -0
  49. prefect/utilities/urls.py +70 -12
  50. prefect/variables.py +34 -24
  51. prefect/workers/base.py +14 -6
  52. prefect/workers/process.py +1 -3
  53. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/METADATA +2 -2
  54. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/RECORD +57 -53
  55. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/LICENSE +0 -0
  56. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/WHEEL +0 -0
  57. {prefect_client-3.0.0rc20.dist-info → prefect_client-3.0.2.dist-info}/top_level.txt +0 -0
prefect/runner/runner.py CHANGED
@@ -66,6 +66,11 @@ from prefect.client.schemas.filters import (
66
66
  )
67
67
  from prefect.client.schemas.objects import Flow as APIFlow
68
68
  from prefect.client.schemas.objects import FlowRun, State, StateType
69
+ from prefect.concurrency.asyncio import (
70
+ AcquireConcurrencySlotTimeoutError,
71
+ ConcurrencySlotAcquisitionError,
72
+ concurrency,
73
+ )
69
74
  from prefect.events import DeploymentTriggerTypes, TriggerTypes
70
75
  from prefect.events.related import tags_as_related_resources
71
76
  from prefect.events.schemas.events import RelatedResource
@@ -81,7 +86,12 @@ from prefect.settings import (
81
86
  PREFECT_RUNNER_SERVER_ENABLE,
82
87
  get_current_settings,
83
88
  )
84
- from prefect.states import Crashed, Pending, exception_to_failed_state
89
+ from prefect.states import (
90
+ AwaitingConcurrencySlot,
91
+ Crashed,
92
+ Pending,
93
+ exception_to_failed_state,
94
+ )
85
95
  from prefect.types.entrypoint import EntrypointType
86
96
  from prefect.utilities.asyncutils import (
87
97
  asyncnullcontext,
@@ -226,6 +236,7 @@ class Runner:
226
236
  rrule: Optional[Union[Iterable[str], str]] = None,
227
237
  paused: Optional[bool] = None,
228
238
  schedules: Optional["FlexibleScheduleList"] = None,
239
+ concurrency_limit: Optional[int] = None,
229
240
  parameters: Optional[dict] = None,
230
241
  triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
231
242
  description: Optional[str] = None,
@@ -248,6 +259,10 @@ class Runner:
248
259
  or a timedelta object. If a number is given, it will be interpreted as seconds.
249
260
  cron: A cron schedule of when to execute runs of this flow.
250
261
  rrule: An rrule schedule of when to execute runs of this flow.
262
+ paused: Whether or not to set the created deployment as paused.
263
+ schedules: A list of schedule objects defining when to execute runs of this flow.
264
+ Used to define multiple schedules or additional scheduling options like `timezone`.
265
+ concurrency_limit: The maximum number of concurrent runs of this flow to allow.
251
266
  triggers: A list of triggers that should kick of a run of this flow.
252
267
  parameters: A dictionary of default parameter values to pass to runs of this flow.
253
268
  description: A description for the created deployment. Defaults to the flow's
@@ -280,6 +295,7 @@ class Runner:
280
295
  version=version,
281
296
  enforce_parameter_schema=enforce_parameter_schema,
282
297
  entrypoint_type=entrypoint_type,
298
+ concurrency_limit=concurrency_limit,
283
299
  )
284
300
  return await self.add_deployment(deployment)
285
301
 
@@ -959,6 +975,7 @@ class Runner:
959
975
  """
960
976
  submittable_flow_runs = flow_run_response
961
977
  submittable_flow_runs.sort(key=lambda run: run.next_scheduled_start_time)
978
+
962
979
  for i, flow_run in enumerate(submittable_flow_runs):
963
980
  if flow_run.id in self._submitting_flow_run_ids:
964
981
  continue
@@ -1025,12 +1042,44 @@ class Runner:
1025
1042
  ) -> Union[Optional[int], Exception]:
1026
1043
  run_logger = self._get_flow_run_logger(flow_run)
1027
1044
 
1045
+ if flow_run.deployment_id:
1046
+ deployment = await self._client.read_deployment(flow_run.deployment_id)
1047
+ if deployment and deployment.concurrency_limit:
1048
+ limit_name = f"deployment:{deployment.id}"
1049
+ concurrency_ctx = concurrency
1050
+
1051
+ # ensure that the global concurrency limit is available
1052
+ # and up-to-date before attempting to acquire a slot
1053
+ await self._client.upsert_global_concurrency_limit_by_name(
1054
+ limit_name, deployment.concurrency_limit
1055
+ )
1056
+ else:
1057
+ limit_name = ""
1058
+ concurrency_ctx = asyncnullcontext
1059
+
1028
1060
  try:
1029
- status_code = await self._run_process(
1030
- flow_run=flow_run,
1031
- task_status=task_status,
1032
- entrypoint=entrypoint,
1061
+ async with concurrency_ctx(limit_name, max_retries=0, strict=True):
1062
+ status_code = await self._run_process(
1063
+ flow_run=flow_run,
1064
+ task_status=task_status,
1065
+ entrypoint=entrypoint,
1066
+ )
1067
+ except (
1068
+ AcquireConcurrencySlotTimeoutError,
1069
+ ConcurrencySlotAcquisitionError,
1070
+ ) as exc:
1071
+ self._logger.info(
1072
+ (
1073
+ "Deployment %s reached its concurrency limit when attempting to execute flow run %s. Will attempt to execute later."
1074
+ ),
1075
+ flow_run.deployment_id,
1076
+ flow_run.name,
1033
1077
  )
1078
+ await self._propose_scheduled_state(flow_run)
1079
+
1080
+ if not task_status._future.done():
1081
+ task_status.started(exc)
1082
+ return exc
1034
1083
  except Exception as exc:
1035
1084
  if not task_status._future.done():
1036
1085
  # This flow run was being submitted and did not start successfully
@@ -1116,6 +1165,26 @@ class Runner:
1116
1165
  exc_info=True,
1117
1166
  )
1118
1167
 
1168
+ async def _propose_scheduled_state(self, flow_run: "FlowRun") -> None:
1169
+ run_logger = self._get_flow_run_logger(flow_run)
1170
+ try:
1171
+ state = await propose_state(
1172
+ self._client,
1173
+ AwaitingConcurrencySlot(),
1174
+ flow_run_id=flow_run.id,
1175
+ )
1176
+ self._logger.info(f"Flow run {flow_run.id} now has state {state.name}")
1177
+ except Abort as exc:
1178
+ run_logger.info(
1179
+ (
1180
+ f"Aborted rescheduling of flow run '{flow_run.id}'. "
1181
+ f"Server sent an abort signal: {exc}"
1182
+ ),
1183
+ )
1184
+ pass
1185
+ except Exception:
1186
+ run_logger.exception(f"Failed to update state of flow run '{flow_run.id}'")
1187
+
1119
1188
  async def _propose_crashed_state(self, flow_run: "FlowRun", message: str) -> None:
1120
1189
  run_logger = self._get_flow_run_logger(flow_run)
1121
1190
  try:
prefect/settings.py CHANGED
@@ -637,7 +637,7 @@ PREFECT_API_KEY = Setting(
637
637
  )
638
638
  """API key used to authenticate with a the Prefect API. Defaults to `None`."""
639
639
 
640
- PREFECT_API_ENABLE_HTTP2 = Setting(bool, default=True)
640
+ PREFECT_API_ENABLE_HTTP2 = Setting(bool, default=False)
641
641
  """
642
642
  If true, enable support for HTTP/2 for communicating with an API.
643
643
 
@@ -2191,13 +2191,20 @@ def _write_profiles_to(path: Path, profiles: ProfilesCollection) -> None:
2191
2191
  return path.write_text(toml.dumps(profiles.to_dict()))
2192
2192
 
2193
2193
 
2194
- def load_profiles() -> ProfilesCollection:
2194
+ def load_profiles(include_defaults: bool = True) -> ProfilesCollection:
2195
2195
  """
2196
- Load all profiles from the default and current profile paths.
2196
+ Load profiles from the current profile path. Optionally include profiles from the
2197
+ default profile path.
2197
2198
  """
2198
- profiles = _read_profiles_from(DEFAULT_PROFILES_PATH)
2199
+ default_profiles = _read_profiles_from(DEFAULT_PROFILES_PATH)
2200
+
2201
+ if not include_defaults:
2202
+ if not PREFECT_PROFILES_PATH.value().exists():
2203
+ return ProfilesCollection([])
2204
+ return _read_profiles_from(PREFECT_PROFILES_PATH.value())
2199
2205
 
2200
2206
  user_profiles_path = PREFECT_PROFILES_PATH.value()
2207
+ profiles = default_profiles
2201
2208
  if user_profiles_path.exists():
2202
2209
  user_profiles = _read_profiles_from(user_profiles_path)
2203
2210
 
prefect/states.py CHANGED
@@ -25,7 +25,13 @@ from prefect.exceptions import (
25
25
  UnfinishedRun,
26
26
  )
27
27
  from prefect.logging.loggers import get_logger, get_run_logger
28
- from prefect.results import BaseResult, R, ResultFactory
28
+ from prefect.results import (
29
+ BaseResult,
30
+ R,
31
+ ResultRecord,
32
+ ResultRecordMetadata,
33
+ ResultStore,
34
+ )
29
35
  from prefect.settings import PREFECT_ASYNC_FETCH_STATE_RESULT
30
36
  from prefect.utilities.annotations import BaseAnnotation
31
37
  from prefect.utilities.asyncutils import in_async_main_thread, sync_compatible
@@ -92,7 +98,11 @@ async def _get_state_result_data_with_retries(
92
98
 
93
99
  for i in range(1, max_attempts + 1):
94
100
  try:
95
- return await state.data.get()
101
+ if isinstance(state.data, ResultRecordMetadata):
102
+ record = await ResultRecord._from_metadata(state.data)
103
+ return record.result
104
+ else:
105
+ return await state.data.get()
96
106
  except Exception as e:
97
107
  if i == max_attempts:
98
108
  raise
@@ -127,10 +137,12 @@ async def _get_state_result(
127
137
  ):
128
138
  raise await get_state_exception(state)
129
139
 
130
- if isinstance(state.data, BaseResult):
140
+ if isinstance(state.data, (BaseResult, ResultRecordMetadata)):
131
141
  result = await _get_state_result_data_with_retries(
132
142
  state, retry_result_failure=retry_result_failure
133
143
  )
144
+ elif isinstance(state.data, ResultRecord):
145
+ result = state.data.result
134
146
 
135
147
  elif state.data is None:
136
148
  if state.is_failed() or state.is_crashed() or state.is_cancelled():
@@ -167,7 +179,7 @@ def format_exception(exc: BaseException, tb: TracebackType = None) -> str:
167
179
 
168
180
  async def exception_to_crashed_state(
169
181
  exc: BaseException,
170
- result_factory: Optional[ResultFactory] = None,
182
+ result_store: Optional[ResultStore] = None,
171
183
  ) -> State:
172
184
  """
173
185
  Takes an exception that occurs _outside_ of user code and converts it to a
@@ -206,8 +218,8 @@ async def exception_to_crashed_state(
206
218
  f" {format_exception(exc)}"
207
219
  )
208
220
 
209
- if result_factory:
210
- data = await result_factory.create_result(exc)
221
+ if result_store:
222
+ data = result_store.create_result_record(exc)
211
223
  else:
212
224
  # Attach the exception for local usage, will not be available when retrieved
213
225
  # from the API
@@ -218,7 +230,7 @@ async def exception_to_crashed_state(
218
230
 
219
231
  async def exception_to_failed_state(
220
232
  exc: Optional[BaseException] = None,
221
- result_factory: Optional[ResultFactory] = None,
233
+ result_store: Optional[ResultStore] = None,
222
234
  write_result: bool = False,
223
235
  **kwargs,
224
236
  ) -> State:
@@ -239,11 +251,11 @@ async def exception_to_failed_state(
239
251
  else:
240
252
  pass
241
253
 
242
- if result_factory:
243
- data = await result_factory.create_result(exc)
254
+ if result_store:
255
+ data = result_store.create_result_record(exc)
244
256
  if write_result:
245
257
  try:
246
- await data.write()
258
+ await result_store.apersist_result_record(data)
247
259
  except Exception as exc:
248
260
  local_logger.warning(
249
261
  "Failed to write result: %s Execution will continue, but the result has not been written",
@@ -270,7 +282,7 @@ async def exception_to_failed_state(
270
282
 
271
283
  async def return_value_to_state(
272
284
  retval: R,
273
- result_factory: ResultFactory,
285
+ result_store: ResultStore,
274
286
  key: Optional[str] = None,
275
287
  expiration: Optional[datetime.datetime] = None,
276
288
  write_result: bool = False,
@@ -307,23 +319,23 @@ async def return_value_to_state(
307
319
  and not retval.state_details.task_run_id
308
320
  ):
309
321
  state = retval
310
- # Unless the user has already constructed a result explicitly, use the factory
322
+ # Unless the user has already constructed a result explicitly, use the store
311
323
  # to update the data to the correct type
312
- if not isinstance(state.data, BaseResult):
313
- result = await result_factory.create_result(
324
+ if not isinstance(state.data, (BaseResult, ResultRecord, ResultRecordMetadata)):
325
+ result_record = result_store.create_result_record(
314
326
  state.data,
315
327
  key=key,
316
328
  expiration=expiration,
317
329
  )
318
330
  if write_result:
319
331
  try:
320
- await result.write()
332
+ await result_store.apersist_result_record(result_record)
321
333
  except Exception as exc:
322
334
  local_logger.warning(
323
335
  "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
324
336
  exc,
325
337
  )
326
- state.data = result
338
+ state.data = result_record
327
339
  return state
328
340
 
329
341
  # Determine a new state from the aggregate of contained states
@@ -359,14 +371,14 @@ async def return_value_to_state(
359
371
  # TODO: We may actually want to set the data to a `StateGroup` object and just
360
372
  # allow it to be unpacked into a tuple and such so users can interact with
361
373
  # it
362
- result = await result_factory.create_result(
374
+ result_record = result_store.create_result_record(
363
375
  retval,
364
376
  key=key,
365
377
  expiration=expiration,
366
378
  )
367
379
  if write_result:
368
380
  try:
369
- await result.write()
381
+ await result_store.apersist_result_record(result_record)
370
382
  except Exception as exc:
371
383
  local_logger.warning(
372
384
  "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
@@ -375,7 +387,7 @@ async def return_value_to_state(
375
387
  return State(
376
388
  type=new_state_type,
377
389
  message=message,
378
- data=result,
390
+ data=result_record,
379
391
  )
380
392
 
381
393
  # Generators aren't portable, implicitly convert them to a list.
@@ -385,23 +397,23 @@ async def return_value_to_state(
385
397
  data = retval
386
398
 
387
399
  # Otherwise, they just gave data and this is a completed retval
388
- if isinstance(data, BaseResult):
400
+ if isinstance(data, (BaseResult, ResultRecord)):
389
401
  return Completed(data=data)
390
402
  else:
391
- result = await result_factory.create_result(
403
+ result_record = result_store.create_result_record(
392
404
  data,
393
405
  key=key,
394
406
  expiration=expiration,
395
407
  )
396
408
  if write_result:
397
409
  try:
398
- await result.write()
410
+ await result_store.apersist_result_record(result_record)
399
411
  except Exception as exc:
400
412
  local_logger.warning(
401
413
  "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
402
414
  exc,
403
415
  )
404
- return Completed(data=result)
416
+ return Completed(data=result_record)
405
417
 
406
418
 
407
419
  @sync_compatible
@@ -442,6 +454,11 @@ async def get_state_exception(state: State) -> BaseException:
442
454
 
443
455
  if isinstance(state.data, BaseResult):
444
456
  result = await _get_state_result_data_with_retries(state)
457
+ elif isinstance(state.data, ResultRecord):
458
+ result = state.data.result
459
+ elif isinstance(state.data, ResultRecordMetadata):
460
+ record = await ResultRecord._from_metadata(state.data)
461
+ result = record.result
445
462
  elif state.data is None:
446
463
  result = None
447
464
  else:
prefect/task_engine.py CHANGED
@@ -55,8 +55,13 @@ from prefect.exceptions import (
55
55
  )
56
56
  from prefect.futures import PrefectFuture
57
57
  from prefect.logging.loggers import get_logger, patch_print, task_run_logger
58
- from prefect.records.result_store import ResultFactoryStore
59
- from prefect.results import BaseResult, ResultFactory, _format_user_supplied_storage_key
58
+ from prefect.results import (
59
+ BaseResult,
60
+ ResultRecord,
61
+ _format_user_supplied_storage_key,
62
+ get_result_store,
63
+ should_persist_result,
64
+ )
60
65
  from prefect.settings import (
61
66
  PREFECT_DEBUG_MODE,
62
67
  PREFECT_TASKS_REFRESH_CACHE,
@@ -414,6 +419,8 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
414
419
  result = state.result(raise_on_failure=False, fetch=True)
415
420
  if inspect.isawaitable(result):
416
421
  result = run_coro_as_sync(result)
422
+ elif isinstance(state.data, ResultRecord):
423
+ result = state.data.result
417
424
  else:
418
425
  result = state.data
419
426
 
@@ -437,7 +444,8 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
437
444
  if inspect.isawaitable(_result):
438
445
  _result = run_coro_as_sync(_result)
439
446
  return _result
440
-
447
+ elif isinstance(self._return_value, ResultRecord):
448
+ return self._return_value.result
441
449
  # otherwise, return the value as is
442
450
  return self._return_value
443
451
 
@@ -450,10 +458,6 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
450
458
  return self._raised
451
459
 
452
460
  def handle_success(self, result: R, transaction: Transaction) -> R:
453
- result_factory = getattr(TaskRunContext.get(), "result_factory", None)
454
- if result_factory is None:
455
- raise ValueError("Result factory is not set")
456
-
457
461
  if self.task.cache_expiration is not None:
458
462
  expiration = pendulum.now("utc") + self.task.cache_expiration
459
463
  else:
@@ -462,7 +466,7 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
462
466
  terminal_state = run_coro_as_sync(
463
467
  return_value_to_state(
464
468
  result,
465
- result_factory=result_factory,
469
+ result_store=get_result_store(),
466
470
  key=transaction.key,
467
471
  expiration=expiration,
468
472
  )
@@ -534,12 +538,11 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
534
538
  # If the task fails, and we have retries left, set the task to retrying.
535
539
  if not self.handle_retry(exc):
536
540
  # If the task has no retries left, or the retry condition is not met, set the task to failed.
537
- context = TaskRunContext.get()
538
541
  state = run_coro_as_sync(
539
542
  exception_to_failed_state(
540
543
  exc,
541
544
  message="Task run encountered an exception",
542
- result_factory=getattr(context, "result_factory", None),
545
+ result_store=get_result_store(),
543
546
  write_result=True,
544
547
  )
545
548
  )
@@ -591,8 +594,13 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
591
594
  log_prints=log_prints,
592
595
  task_run=self.task_run,
593
596
  parameters=self.parameters,
594
- result_factory=run_coro_as_sync(ResultFactory.from_task(self.task)), # type: ignore
597
+ result_store=get_result_store().update_for_task(
598
+ self.task, _sync=True
599
+ ),
595
600
  client=client,
601
+ persist_result=self.task.persist_result
602
+ if self.task.persist_result is not None
603
+ else should_persist_result(),
596
604
  )
597
605
  )
598
606
  stack.enter_context(ConcurrencyContextV1())
@@ -717,17 +725,12 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
717
725
  else PREFECT_TASKS_REFRESH_CACHE.value()
718
726
  )
719
727
 
720
- result_factory = getattr(TaskRunContext.get(), "result_factory", None)
721
- if result_factory and result_factory.persist_result:
722
- store = ResultFactoryStore(result_factory=result_factory)
723
- else:
724
- store = None
725
-
726
728
  with transaction(
727
729
  key=self.compute_transaction_key(),
728
- store=store,
730
+ store=get_result_store(),
729
731
  overwrite=overwrite,
730
732
  logger=self.logger,
733
+ write_on_commit=should_persist_result(),
731
734
  ) as txn:
732
735
  yield txn
733
736
 
@@ -763,10 +766,10 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
763
766
  if transaction.is_committed():
764
767
  result = transaction.read()
765
768
  else:
766
- if self.task.tags:
769
+ if self.task_run.tags:
767
770
  # Acquire a concurrency slot for each tag, but only if a limit
768
771
  # matching the tag already exists.
769
- with concurrency(list(self.task.tags), self.task_run.id):
772
+ with concurrency(list(self.task_run.tags), self.task_run.id):
770
773
  result = call_with_parameters(self.task.fn, parameters)
771
774
  else:
772
775
  result = call_with_parameters(self.task.fn, parameters)
@@ -927,6 +930,8 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
927
930
  # Avoid fetching the result unless it is cached, otherwise we defeat
928
931
  # the purpose of disabling `cache_result_in_memory`
929
932
  result = await new_state.result(raise_on_failure=False, fetch=True)
933
+ elif isinstance(new_state.data, ResultRecord):
934
+ result = new_state.data.result
930
935
  else:
931
936
  result = new_state.data
932
937
 
@@ -947,7 +952,8 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
947
952
  # if the return value is a BaseResult, we need to fetch it
948
953
  if isinstance(self._return_value, BaseResult):
949
954
  return await self._return_value.get()
950
-
955
+ elif isinstance(self._return_value, ResultRecord):
956
+ return self._return_value.result
951
957
  # otherwise, return the value as is
952
958
  return self._return_value
953
959
 
@@ -960,10 +966,6 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
960
966
  return self._raised
961
967
 
962
968
  async def handle_success(self, result: R, transaction: Transaction) -> R:
963
- result_factory = getattr(TaskRunContext.get(), "result_factory", None)
964
- if result_factory is None:
965
- raise ValueError("Result factory is not set")
966
-
967
969
  if self.task.cache_expiration is not None:
968
970
  expiration = pendulum.now("utc") + self.task.cache_expiration
969
971
  else:
@@ -971,7 +973,7 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
971
973
 
972
974
  terminal_state = await return_value_to_state(
973
975
  result,
974
- result_factory=result_factory,
976
+ result_store=get_result_store(),
975
977
  key=transaction.key,
976
978
  expiration=expiration,
977
979
  )
@@ -1042,11 +1044,10 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1042
1044
  # If the task fails, and we have retries left, set the task to retrying.
1043
1045
  if not await self.handle_retry(exc):
1044
1046
  # If the task has no retries left, or the retry condition is not met, set the task to failed.
1045
- context = TaskRunContext.get()
1046
1047
  state = await exception_to_failed_state(
1047
1048
  exc,
1048
1049
  message="Task run encountered an exception",
1049
- result_factory=getattr(context, "result_factory", None),
1050
+ result_store=get_result_store(),
1050
1051
  )
1051
1052
  self.record_terminal_state_timing(state)
1052
1053
  await self.set_state(state)
@@ -1096,8 +1097,13 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1096
1097
  log_prints=log_prints,
1097
1098
  task_run=self.task_run,
1098
1099
  parameters=self.parameters,
1099
- result_factory=await ResultFactory.from_task(self.task), # type: ignore
1100
+ result_store=await get_result_store().update_for_task(
1101
+ self.task, _sync=False
1102
+ ),
1100
1103
  client=client,
1104
+ persist_result=self.task.persist_result
1105
+ if self.task.persist_result is not None
1106
+ else should_persist_result(),
1101
1107
  )
1102
1108
  )
1103
1109
  stack.enter_context(ConcurrencyContext())
@@ -1218,17 +1224,13 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1218
1224
  if self.task.refresh_cache is not None
1219
1225
  else PREFECT_TASKS_REFRESH_CACHE.value()
1220
1226
  )
1221
- result_factory = getattr(TaskRunContext.get(), "result_factory", None)
1222
- if result_factory and result_factory.persist_result:
1223
- store = ResultFactoryStore(result_factory=result_factory)
1224
- else:
1225
- store = None
1226
1227
 
1227
1228
  with transaction(
1228
1229
  key=self.compute_transaction_key(),
1229
- store=store,
1230
+ store=get_result_store(),
1230
1231
  overwrite=overwrite,
1231
1232
  logger=self.logger,
1233
+ write_on_commit=should_persist_result(),
1232
1234
  ) as txn:
1233
1235
  yield txn
1234
1236
 
@@ -1264,10 +1266,10 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1264
1266
  if transaction.is_committed():
1265
1267
  result = transaction.read()
1266
1268
  else:
1267
- if self.task.tags:
1269
+ if self.task_run.tags:
1268
1270
  # Acquire a concurrency slot for each tag, but only if a limit
1269
1271
  # matching the tag already exists.
1270
- async with aconcurrency(list(self.task.tags), self.task_run.id):
1272
+ async with aconcurrency(list(self.task_run.tags), self.task_run.id):
1271
1273
  result = await call_with_parameters(self.task.fn, parameters)
1272
1274
  else:
1273
1275
  result = await call_with_parameters(self.task.fn, parameters)
prefect/task_worker.py CHANGED
@@ -25,7 +25,7 @@ from prefect.client.orchestration import get_client
25
25
  from prefect.client.schemas.objects import TaskRun
26
26
  from prefect.client.subscriptions import Subscription
27
27
  from prefect.logging.loggers import get_logger
28
- from prefect.results import ResultFactory
28
+ from prefect.results import ResultStore, get_or_create_default_task_scheduling_storage
29
29
  from prefect.settings import (
30
30
  PREFECT_API_URL,
31
31
  PREFECT_TASK_SCHEDULING_DELETE_FAILED_SUBMISSIONS,
@@ -49,7 +49,7 @@ class StopTaskWorker(Exception):
49
49
 
50
50
 
51
51
  def should_try_to_read_parameters(task: Task, task_run: TaskRun) -> bool:
52
- """Determines whether a task run should read parameters from the result factory."""
52
+ """Determines whether a task run should read parameters from the result store."""
53
53
  new_enough_state_details = hasattr(
54
54
  task_run.state.state_details, "task_parameters_id"
55
55
  )
@@ -273,9 +273,11 @@ class TaskWorker:
273
273
  if should_try_to_read_parameters(task, task_run):
274
274
  parameters_id = task_run.state.state_details.task_parameters_id
275
275
  task.persist_result = True
276
- factory = await ResultFactory.from_autonomous_task(task)
276
+ store = await ResultStore(
277
+ result_storage=await get_or_create_default_task_scheduling_storage()
278
+ ).update_for_task(task)
277
279
  try:
278
- run_data = await factory.read_parameters(parameters_id)
280
+ run_data = await store.read_parameters(parameters_id)
279
281
  parameters = run_data.get("parameters", {})
280
282
  wait_for = run_data.get("wait_for", [])
281
283
  run_context = run_data.get("context", None)
prefect/tasks.py CHANGED
@@ -50,7 +50,12 @@ from prefect.context import (
50
50
  )
51
51
  from prefect.futures import PrefectDistributedFuture, PrefectFuture, PrefectFutureList
52
52
  from prefect.logging.loggers import get_logger
53
- from prefect.results import ResultFactory, ResultSerializer, ResultStorage
53
+ from prefect.results import (
54
+ ResultSerializer,
55
+ ResultStorage,
56
+ ResultStore,
57
+ get_or_create_default_task_scheduling_storage,
58
+ )
54
59
  from prefect.settings import (
55
60
  PREFECT_TASK_DEFAULT_RETRIES,
56
61
  PREFECT_TASK_DEFAULT_RETRY_DELAY_SECONDS,
@@ -201,8 +206,17 @@ def _generate_task_key(fn: Callable[..., Any]) -> str:
201
206
 
202
207
  qualname = fn.__qualname__.split(".")[-1]
203
208
 
209
+ try:
210
+ code_obj = getattr(fn, "__code__", None)
211
+ if code_obj is None:
212
+ code_obj = fn.__call__.__code__
213
+ except AttributeError:
214
+ raise AttributeError(
215
+ f"{fn} is not a standard Python function object and could not be converted to a task."
216
+ ) from None
217
+
204
218
  code_hash = (
205
- h[:NUM_CHARS_DYNAMIC_KEY] if (h := hash_objects(fn.__code__)) else "unknown"
219
+ h[:NUM_CHARS_DYNAMIC_KEY] if (h := hash_objects(code_obj)) else "unknown"
206
220
  )
207
221
 
208
222
  return f"{qualname}-{code_hash}"
@@ -752,14 +766,16 @@ class Task(Generic[P, R]):
752
766
  # TODO: Improve use of result storage for parameter storage / reference
753
767
  self.persist_result = True
754
768
 
755
- factory = await ResultFactory.from_autonomous_task(self, client=client)
769
+ store = await ResultStore(
770
+ result_storage=await get_or_create_default_task_scheduling_storage()
771
+ ).update_for_task(self)
756
772
  context = serialize_context()
757
773
  data: Dict[str, Any] = {"context": context}
758
774
  if parameters:
759
775
  data["parameters"] = parameters
760
776
  if wait_for:
761
777
  data["wait_for"] = wait_for
762
- await factory.store_parameters(parameters_id, data)
778
+ await store.store_parameters(parameters_id, data)
763
779
 
764
780
  # collect task inputs
765
781
  task_inputs = {
@@ -853,14 +869,16 @@ class Task(Generic[P, R]):
853
869
  # TODO: Improve use of result storage for parameter storage / reference
854
870
  self.persist_result = True
855
871
 
856
- factory = await ResultFactory.from_autonomous_task(self, client=client)
872
+ store = await ResultStore(
873
+ result_storage=await get_or_create_default_task_scheduling_storage()
874
+ ).update_for_task(task)
857
875
  context = serialize_context()
858
876
  data: Dict[str, Any] = {"context": context}
859
877
  if parameters:
860
878
  data["parameters"] = parameters
861
879
  if wait_for:
862
880
  data["wait_for"] = wait_for
863
- await factory.store_parameters(parameters_id, data)
881
+ await store.store_parameters(parameters_id, data)
864
882
 
865
883
  # collect task inputs
866
884
  task_inputs = {