prefect-client 3.0.1__py3-none-any.whl → 3.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. prefect/_internal/compatibility/deprecated.py +1 -1
  2. prefect/blocks/core.py +5 -4
  3. prefect/blocks/notifications.py +21 -0
  4. prefect/blocks/webhook.py +17 -1
  5. prefect/cache_policies.py +98 -28
  6. prefect/client/orchestration.py +42 -20
  7. prefect/client/schemas/actions.py +10 -2
  8. prefect/client/schemas/filters.py +4 -2
  9. prefect/client/schemas/objects.py +48 -6
  10. prefect/client/schemas/responses.py +15 -1
  11. prefect/client/types/flexible_schedule_list.py +1 -1
  12. prefect/concurrency/asyncio.py +45 -6
  13. prefect/concurrency/services.py +1 -1
  14. prefect/concurrency/sync.py +21 -27
  15. prefect/concurrency/v1/asyncio.py +3 -0
  16. prefect/concurrency/v1/sync.py +4 -5
  17. prefect/context.py +6 -6
  18. prefect/deployments/runner.py +43 -5
  19. prefect/events/actions.py +6 -0
  20. prefect/flow_engine.py +12 -4
  21. prefect/flows.py +15 -11
  22. prefect/locking/filesystem.py +243 -0
  23. prefect/logging/handlers.py +0 -2
  24. prefect/logging/loggers.py +0 -18
  25. prefect/logging/logging.yml +1 -0
  26. prefect/main.py +19 -5
  27. prefect/plugins.py +9 -1
  28. prefect/records/base.py +12 -0
  29. prefect/records/filesystem.py +6 -2
  30. prefect/records/memory.py +6 -0
  31. prefect/records/result_store.py +6 -0
  32. prefect/results.py +192 -29
  33. prefect/runner/runner.py +74 -6
  34. prefect/settings.py +31 -1
  35. prefect/states.py +34 -17
  36. prefect/task_engine.py +58 -43
  37. prefect/transactions.py +113 -52
  38. prefect/utilities/asyncutils.py +7 -0
  39. prefect/utilities/collections.py +3 -2
  40. prefect/utilities/engine.py +20 -9
  41. prefect/utilities/importtools.py +1 -0
  42. prefect/utilities/urls.py +70 -12
  43. prefect/workers/base.py +10 -8
  44. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/METADATA +1 -1
  45. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/RECORD +48 -47
  46. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/LICENSE +0 -0
  47. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/WHEEL +0 -0
  48. {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/top_level.txt +0 -0
prefect/states.py CHANGED
@@ -25,7 +25,13 @@ from prefect.exceptions import (
25
25
  UnfinishedRun,
26
26
  )
27
27
  from prefect.logging.loggers import get_logger, get_run_logger
28
- from prefect.results import BaseResult, R, ResultStore
28
+ from prefect.results import (
29
+ BaseResult,
30
+ R,
31
+ ResultRecord,
32
+ ResultRecordMetadata,
33
+ ResultStore,
34
+ )
29
35
  from prefect.settings import PREFECT_ASYNC_FETCH_STATE_RESULT
30
36
  from prefect.utilities.annotations import BaseAnnotation
31
37
  from prefect.utilities.asyncutils import in_async_main_thread, sync_compatible
@@ -92,7 +98,11 @@ async def _get_state_result_data_with_retries(
92
98
 
93
99
  for i in range(1, max_attempts + 1):
94
100
  try:
95
- return await state.data.get()
101
+ if isinstance(state.data, ResultRecordMetadata):
102
+ record = await ResultRecord._from_metadata(state.data)
103
+ return record.result
104
+ else:
105
+ return await state.data.get()
96
106
  except Exception as e:
97
107
  if i == max_attempts:
98
108
  raise
@@ -127,10 +137,12 @@ async def _get_state_result(
127
137
  ):
128
138
  raise await get_state_exception(state)
129
139
 
130
- if isinstance(state.data, BaseResult):
140
+ if isinstance(state.data, (BaseResult, ResultRecordMetadata)):
131
141
  result = await _get_state_result_data_with_retries(
132
142
  state, retry_result_failure=retry_result_failure
133
143
  )
144
+ elif isinstance(state.data, ResultRecord):
145
+ result = state.data.result
134
146
 
135
147
  elif state.data is None:
136
148
  if state.is_failed() or state.is_crashed() or state.is_cancelled():
@@ -207,7 +219,7 @@ async def exception_to_crashed_state(
207
219
  )
208
220
 
209
221
  if result_store:
210
- data = await result_store.create_result(exc)
222
+ data = result_store.create_result_record(exc)
211
223
  else:
212
224
  # Attach the exception for local usage, will not be available when retrieved
213
225
  # from the API
@@ -240,10 +252,10 @@ async def exception_to_failed_state(
240
252
  pass
241
253
 
242
254
  if result_store:
243
- data = await result_store.create_result(exc)
255
+ data = result_store.create_result_record(exc)
244
256
  if write_result:
245
257
  try:
246
- await data.write()
258
+ await result_store.apersist_result_record(data)
247
259
  except Exception as exc:
248
260
  local_logger.warning(
249
261
  "Failed to write result: %s Execution will continue, but the result has not been written",
@@ -309,21 +321,21 @@ async def return_value_to_state(
309
321
  state = retval
310
322
  # Unless the user has already constructed a result explicitly, use the store
311
323
  # to update the data to the correct type
312
- if not isinstance(state.data, BaseResult):
313
- result = await result_store.create_result(
324
+ if not isinstance(state.data, (BaseResult, ResultRecord, ResultRecordMetadata)):
325
+ result_record = result_store.create_result_record(
314
326
  state.data,
315
327
  key=key,
316
328
  expiration=expiration,
317
329
  )
318
330
  if write_result:
319
331
  try:
320
- await result.write()
332
+ await result_store.apersist_result_record(result_record)
321
333
  except Exception as exc:
322
334
  local_logger.warning(
323
335
  "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
324
336
  exc,
325
337
  )
326
- state.data = result
338
+ state.data = result_record
327
339
  return state
328
340
 
329
341
  # Determine a new state from the aggregate of contained states
@@ -359,14 +371,14 @@ async def return_value_to_state(
359
371
  # TODO: We may actually want to set the data to a `StateGroup` object and just
360
372
  # allow it to be unpacked into a tuple and such so users can interact with
361
373
  # it
362
- result = await result_store.create_result(
374
+ result_record = result_store.create_result_record(
363
375
  retval,
364
376
  key=key,
365
377
  expiration=expiration,
366
378
  )
367
379
  if write_result:
368
380
  try:
369
- await result.write()
381
+ await result_store.apersist_result_record(result_record)
370
382
  except Exception as exc:
371
383
  local_logger.warning(
372
384
  "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
@@ -375,7 +387,7 @@ async def return_value_to_state(
375
387
  return State(
376
388
  type=new_state_type,
377
389
  message=message,
378
- data=result,
390
+ data=result_record,
379
391
  )
380
392
 
381
393
  # Generators aren't portable, implicitly convert them to a list.
@@ -385,23 +397,23 @@ async def return_value_to_state(
385
397
  data = retval
386
398
 
387
399
  # Otherwise, they just gave data and this is a completed retval
388
- if isinstance(data, BaseResult):
400
+ if isinstance(data, (BaseResult, ResultRecord)):
389
401
  return Completed(data=data)
390
402
  else:
391
- result = await result_store.create_result(
403
+ result_record = result_store.create_result_record(
392
404
  data,
393
405
  key=key,
394
406
  expiration=expiration,
395
407
  )
396
408
  if write_result:
397
409
  try:
398
- await result.write()
410
+ await result_store.apersist_result_record(result_record)
399
411
  except Exception as exc:
400
412
  local_logger.warning(
401
413
  "Encountered an error while persisting result: %s Execution will continue, but the result has not been persisted",
402
414
  exc,
403
415
  )
404
- return Completed(data=result)
416
+ return Completed(data=result_record)
405
417
 
406
418
 
407
419
  @sync_compatible
@@ -442,6 +454,11 @@ async def get_state_exception(state: State) -> BaseException:
442
454
 
443
455
  if isinstance(state.data, BaseResult):
444
456
  result = await _get_state_result_data_with_retries(state)
457
+ elif isinstance(state.data, ResultRecord):
458
+ result = state.data.result
459
+ elif isinstance(state.data, ResultRecordMetadata):
460
+ record = await ResultRecord._from_metadata(state.data)
461
+ result = record.result
445
462
  elif state.data is None:
446
463
  result = None
447
464
  else:
prefect/task_engine.py CHANGED
@@ -55,11 +55,12 @@ from prefect.exceptions import (
55
55
  )
56
56
  from prefect.futures import PrefectFuture
57
57
  from prefect.logging.loggers import get_logger, patch_print, task_run_logger
58
- from prefect.records.result_store import ResultRecordStore
59
58
  from prefect.results import (
60
59
  BaseResult,
60
+ ResultRecord,
61
61
  _format_user_supplied_storage_key,
62
- get_current_result_store,
62
+ get_result_store,
63
+ should_persist_result,
63
64
  )
64
65
  from prefect.settings import (
65
66
  PREFECT_DEBUG_MODE,
@@ -76,7 +77,7 @@ from prefect.states import (
76
77
  exception_to_failed_state,
77
78
  return_value_to_state,
78
79
  )
79
- from prefect.transactions import Transaction, transaction
80
+ from prefect.transactions import IsolationLevel, Transaction, transaction
80
81
  from prefect.utilities.annotations import NotSet
81
82
  from prefect.utilities.asyncutils import run_coro_as_sync
82
83
  from prefect.utilities.callables import call_with_parameters, parameters_to_args_kwargs
@@ -363,7 +364,6 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
363
364
  new_state = Running()
364
365
 
365
366
  self.task_run.start_time = new_state.timestamp
366
- self.task_run.run_count += 1
367
367
 
368
368
  flow_run_context = FlowRunContext.get()
369
369
  if flow_run_context and flow_run_context.flow_run:
@@ -411,6 +411,9 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
411
411
  self.task_run.state_type = new_state.type
412
412
  self.task_run.state_name = new_state.name
413
413
 
414
+ if new_state.is_running():
415
+ self.task_run.run_count += 1
416
+
414
417
  if new_state.is_final():
415
418
  if isinstance(state.data, BaseResult) and state.data.has_cached_object():
416
419
  # Avoid fetching the result unless it is cached, otherwise we defeat
@@ -418,6 +421,8 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
418
421
  result = state.result(raise_on_failure=False, fetch=True)
419
422
  if inspect.isawaitable(result):
420
423
  result = run_coro_as_sync(result)
424
+ elif isinstance(state.data, ResultRecord):
425
+ result = state.data.result
421
426
  else:
422
427
  result = state.data
423
428
 
@@ -441,7 +446,8 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
441
446
  if inspect.isawaitable(_result):
442
447
  _result = run_coro_as_sync(_result)
443
448
  return _result
444
-
449
+ elif isinstance(self._return_value, ResultRecord):
450
+ return self._return_value.result
445
451
  # otherwise, return the value as is
446
452
  return self._return_value
447
453
 
@@ -454,10 +460,6 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
454
460
  return self._raised
455
461
 
456
462
  def handle_success(self, result: R, transaction: Transaction) -> R:
457
- result_store = getattr(TaskRunContext.get(), "result_store", None)
458
- if result_store is None:
459
- raise ValueError("Result store is not set")
460
-
461
463
  if self.task.cache_expiration is not None:
462
464
  expiration = pendulum.now("utc") + self.task.cache_expiration
463
465
  else:
@@ -466,7 +468,7 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
466
468
  terminal_state = run_coro_as_sync(
467
469
  return_value_to_state(
468
470
  result,
469
- result_store=result_store,
471
+ result_store=get_result_store(),
470
472
  key=transaction.key,
471
473
  expiration=expiration,
472
474
  )
@@ -511,7 +513,6 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
511
513
  else:
512
514
  delay = None
513
515
  new_state = Retrying()
514
- self.task_run.run_count += 1
515
516
 
516
517
  self.logger.info(
517
518
  "Task run failed with exception: %r - " "Retry %s/%s will start %s",
@@ -538,12 +539,11 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
538
539
  # If the task fails, and we have retries left, set the task to retrying.
539
540
  if not self.handle_retry(exc):
540
541
  # If the task has no retries left, or the retry condition is not met, set the task to failed.
541
- context = TaskRunContext.get()
542
542
  state = run_coro_as_sync(
543
543
  exception_to_failed_state(
544
544
  exc,
545
545
  message="Task run encountered an exception",
546
- result_store=getattr(context, "result_store", None),
546
+ result_store=get_result_store(),
547
547
  write_result=True,
548
548
  )
549
549
  )
@@ -595,10 +595,13 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
595
595
  log_prints=log_prints,
596
596
  task_run=self.task_run,
597
597
  parameters=self.parameters,
598
- result_store=get_current_result_store().update_for_task(
598
+ result_store=get_result_store().update_for_task(
599
599
  self.task, _sync=True
600
600
  ),
601
601
  client=client,
602
+ persist_result=self.task.persist_result
603
+ if self.task.persist_result is not None
604
+ else should_persist_result(),
602
605
  )
603
606
  )
604
607
  stack.enter_context(ConcurrencyContextV1())
@@ -690,8 +693,9 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
690
693
  if scheduled_time := self.state.state_details.scheduled_time:
691
694
  sleep_time = (scheduled_time - pendulum.now("utc")).total_seconds()
692
695
  await anyio.sleep(sleep_time if sleep_time > 0 else 0)
696
+ new_state = Retrying() if self.state.name == "AwaitingRetry" else Running()
693
697
  self.set_state(
694
- Retrying() if self.state.name == "AwaitingRetry" else Running(),
698
+ new_state,
695
699
  force=True,
696
700
  )
697
701
 
@@ -723,17 +727,21 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
723
727
  else PREFECT_TASKS_REFRESH_CACHE.value()
724
728
  )
725
729
 
726
- result_store = getattr(TaskRunContext.get(), "result_store", None)
727
- if result_store and result_store.persist_result:
728
- store = ResultRecordStore(result_store=result_store)
729
- else:
730
- store = None
730
+ isolation_level = (
731
+ IsolationLevel(self.task.cache_policy.isolation_level)
732
+ if self.task.cache_policy
733
+ and self.task.cache_policy is not NotSet
734
+ and self.task.cache_policy.isolation_level is not None
735
+ else None
736
+ )
731
737
 
732
738
  with transaction(
733
739
  key=self.compute_transaction_key(),
734
- store=store,
740
+ store=get_result_store(),
735
741
  overwrite=overwrite,
736
742
  logger=self.logger,
743
+ write_on_commit=should_persist_result(),
744
+ isolation_level=isolation_level,
737
745
  ) as txn:
738
746
  yield txn
739
747
 
@@ -769,10 +777,10 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
769
777
  if transaction.is_committed():
770
778
  result = transaction.read()
771
779
  else:
772
- if self.task.tags:
780
+ if self.task_run.tags:
773
781
  # Acquire a concurrency slot for each tag, but only if a limit
774
782
  # matching the tag already exists.
775
- with concurrency(list(self.task.tags), self.task_run.id):
783
+ with concurrency(list(self.task_run.tags), self.task_run.id):
776
784
  result = call_with_parameters(self.task.fn, parameters)
777
785
  else:
778
786
  result = call_with_parameters(self.task.fn, parameters)
@@ -877,7 +885,6 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
877
885
  new_state = Running()
878
886
 
879
887
  self.task_run.start_time = new_state.timestamp
880
- self.task_run.run_count += 1
881
888
 
882
889
  flow_run_context = FlowRunContext.get()
883
890
  if flow_run_context:
@@ -925,6 +932,9 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
925
932
  self.task_run.state_type = new_state.type
926
933
  self.task_run.state_name = new_state.name
927
934
 
935
+ if new_state.is_running():
936
+ self.task_run.run_count += 1
937
+
928
938
  if new_state.is_final():
929
939
  if (
930
940
  isinstance(new_state.data, BaseResult)
@@ -933,6 +943,8 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
933
943
  # Avoid fetching the result unless it is cached, otherwise we defeat
934
944
  # the purpose of disabling `cache_result_in_memory`
935
945
  result = await new_state.result(raise_on_failure=False, fetch=True)
946
+ elif isinstance(new_state.data, ResultRecord):
947
+ result = new_state.data.result
936
948
  else:
937
949
  result = new_state.data
938
950
 
@@ -953,7 +965,8 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
953
965
  # if the return value is a BaseResult, we need to fetch it
954
966
  if isinstance(self._return_value, BaseResult):
955
967
  return await self._return_value.get()
956
-
968
+ elif isinstance(self._return_value, ResultRecord):
969
+ return self._return_value.result
957
970
  # otherwise, return the value as is
958
971
  return self._return_value
959
972
 
@@ -966,10 +979,6 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
966
979
  return self._raised
967
980
 
968
981
  async def handle_success(self, result: R, transaction: Transaction) -> R:
969
- result_store = getattr(TaskRunContext.get(), "result_store", None)
970
- if result_store is None:
971
- raise ValueError("Result store is not set")
972
-
973
982
  if self.task.cache_expiration is not None:
974
983
  expiration = pendulum.now("utc") + self.task.cache_expiration
975
984
  else:
@@ -977,7 +986,7 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
977
986
 
978
987
  terminal_state = await return_value_to_state(
979
988
  result,
980
- result_store=result_store,
989
+ result_store=get_result_store(),
981
990
  key=transaction.key,
982
991
  expiration=expiration,
983
992
  )
@@ -1021,7 +1030,6 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1021
1030
  else:
1022
1031
  delay = None
1023
1032
  new_state = Retrying()
1024
- self.task_run.run_count += 1
1025
1033
 
1026
1034
  self.logger.info(
1027
1035
  "Task run failed with exception: %r - " "Retry %s/%s will start %s",
@@ -1048,11 +1056,10 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1048
1056
  # If the task fails, and we have retries left, set the task to retrying.
1049
1057
  if not await self.handle_retry(exc):
1050
1058
  # If the task has no retries left, or the retry condition is not met, set the task to failed.
1051
- context = TaskRunContext.get()
1052
1059
  state = await exception_to_failed_state(
1053
1060
  exc,
1054
1061
  message="Task run encountered an exception",
1055
- result_store=getattr(context, "result_store", None),
1062
+ result_store=get_result_store(),
1056
1063
  )
1057
1064
  self.record_terminal_state_timing(state)
1058
1065
  await self.set_state(state)
@@ -1102,10 +1109,13 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1102
1109
  log_prints=log_prints,
1103
1110
  task_run=self.task_run,
1104
1111
  parameters=self.parameters,
1105
- result_store=await get_current_result_store().update_for_task(
1112
+ result_store=await get_result_store().update_for_task(
1106
1113
  self.task, _sync=False
1107
1114
  ),
1108
1115
  client=client,
1116
+ persist_result=self.task.persist_result
1117
+ if self.task.persist_result is not None
1118
+ else should_persist_result(),
1109
1119
  )
1110
1120
  )
1111
1121
  stack.enter_context(ConcurrencyContext())
@@ -1192,8 +1202,9 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1192
1202
  if scheduled_time := self.state.state_details.scheduled_time:
1193
1203
  sleep_time = (scheduled_time - pendulum.now("utc")).total_seconds()
1194
1204
  await anyio.sleep(sleep_time if sleep_time > 0 else 0)
1205
+ new_state = Retrying() if self.state.name == "AwaitingRetry" else Running()
1195
1206
  await self.set_state(
1196
- Retrying() if self.state.name == "AwaitingRetry" else Running(),
1207
+ new_state,
1197
1208
  force=True,
1198
1209
  )
1199
1210
 
@@ -1226,17 +1237,21 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1226
1237
  if self.task.refresh_cache is not None
1227
1238
  else PREFECT_TASKS_REFRESH_CACHE.value()
1228
1239
  )
1229
- result_store = getattr(TaskRunContext.get(), "result_store", None)
1230
- if result_store and result_store.persist_result:
1231
- store = ResultRecordStore(result_store=result_store)
1232
- else:
1233
- store = None
1240
+ isolation_level = (
1241
+ IsolationLevel(self.task.cache_policy.isolation_level)
1242
+ if self.task.cache_policy
1243
+ and self.task.cache_policy is not NotSet
1244
+ and self.task.cache_policy.isolation_level is not None
1245
+ else None
1246
+ )
1234
1247
 
1235
1248
  with transaction(
1236
1249
  key=self.compute_transaction_key(),
1237
- store=store,
1250
+ store=get_result_store(),
1238
1251
  overwrite=overwrite,
1239
1252
  logger=self.logger,
1253
+ write_on_commit=should_persist_result(),
1254
+ isolation_level=isolation_level,
1240
1255
  ) as txn:
1241
1256
  yield txn
1242
1257
 
@@ -1272,10 +1287,10 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
1272
1287
  if transaction.is_committed():
1273
1288
  result = transaction.read()
1274
1289
  else:
1275
- if self.task.tags:
1290
+ if self.task_run.tags:
1276
1291
  # Acquire a concurrency slot for each tag, but only if a limit
1277
1292
  # matching the tag already exists.
1278
- async with aconcurrency(list(self.task.tags), self.task_run.id):
1293
+ async with aconcurrency(list(self.task_run.tags), self.task_run.id):
1279
1294
  result = await call_with_parameters(self.task.fn, parameters)
1280
1295
  else:
1281
1296
  result = await call_with_parameters(self.task.fn, parameters)