prefect-client 3.0.0rc18__py3-none-any.whl → 3.0.0rc20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +0 -3
- prefect/_internal/concurrency/services.py +14 -0
- prefect/_internal/schemas/bases.py +1 -0
- prefect/blocks/core.py +41 -30
- prefect/blocks/system.py +48 -12
- prefect/client/cloud.py +56 -7
- prefect/client/collections.py +1 -1
- prefect/client/orchestration.py +111 -8
- prefect/client/schemas/objects.py +40 -2
- prefect/concurrency/asyncio.py +8 -2
- prefect/concurrency/services.py +16 -6
- prefect/concurrency/sync.py +4 -1
- prefect/concurrency/v1/__init__.py +0 -0
- prefect/concurrency/v1/asyncio.py +143 -0
- prefect/concurrency/v1/context.py +27 -0
- prefect/concurrency/v1/events.py +61 -0
- prefect/concurrency/v1/services.py +116 -0
- prefect/concurrency/v1/sync.py +92 -0
- prefect/context.py +2 -2
- prefect/deployments/flow_runs.py +0 -7
- prefect/deployments/runner.py +11 -0
- prefect/events/clients.py +41 -0
- prefect/events/related.py +72 -73
- prefect/events/utilities.py +2 -0
- prefect/events/worker.py +12 -3
- prefect/exceptions.py +6 -0
- prefect/flow_engine.py +5 -0
- prefect/flows.py +9 -2
- prefect/logging/handlers.py +4 -1
- prefect/main.py +8 -6
- prefect/records/base.py +74 -18
- prefect/records/filesystem.py +207 -0
- prefect/records/memory.py +16 -3
- prefect/records/result_store.py +19 -14
- prefect/results.py +232 -169
- prefect/runner/runner.py +7 -4
- prefect/settings.py +14 -15
- prefect/states.py +73 -18
- prefect/task_engine.py +127 -221
- prefect/task_worker.py +7 -39
- prefect/tasks.py +0 -7
- prefect/transactions.py +89 -27
- prefect/utilities/annotations.py +4 -3
- prefect/utilities/asyncutils.py +4 -4
- prefect/utilities/callables.py +1 -3
- prefect/utilities/dispatch.py +16 -11
- prefect/utilities/engine.py +1 -4
- prefect/utilities/schema_tools/hydration.py +13 -0
- prefect/workers/base.py +78 -18
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/METADATA +3 -4
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/RECORD +54 -48
- prefect/manifests.py +0 -21
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.0rc18.dist-info → prefect_client-3.0.0rc20.dist-info}/top_level.txt +0 -0
prefect/task_engine.py
CHANGED
@@ -5,6 +5,7 @@ import time
|
|
5
5
|
from asyncio import CancelledError
|
6
6
|
from contextlib import ExitStack, asynccontextmanager, contextmanager
|
7
7
|
from dataclasses import dataclass, field
|
8
|
+
from functools import partial
|
8
9
|
from textwrap import dedent
|
9
10
|
from typing import (
|
10
11
|
Any,
|
@@ -33,9 +34,10 @@ from prefect import Task
|
|
33
34
|
from prefect.client.orchestration import PrefectClient, SyncPrefectClient, get_client
|
34
35
|
from prefect.client.schemas import TaskRun
|
35
36
|
from prefect.client.schemas.objects import State, TaskRunInput
|
36
|
-
from prefect.concurrency.asyncio import concurrency as aconcurrency
|
37
37
|
from prefect.concurrency.context import ConcurrencyContext
|
38
|
-
from prefect.concurrency.
|
38
|
+
from prefect.concurrency.v1.asyncio import concurrency as aconcurrency
|
39
|
+
from prefect.concurrency.v1.context import ConcurrencyContext as ConcurrencyContextV1
|
40
|
+
from prefect.concurrency.v1.sync import concurrency
|
39
41
|
from prefect.context import (
|
40
42
|
AsyncClientContext,
|
41
43
|
FlowRunContext,
|
@@ -57,14 +59,12 @@ from prefect.records.result_store import ResultFactoryStore
|
|
57
59
|
from prefect.results import BaseResult, ResultFactory, _format_user_supplied_storage_key
|
58
60
|
from prefect.settings import (
|
59
61
|
PREFECT_DEBUG_MODE,
|
60
|
-
PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION,
|
61
62
|
PREFECT_TASKS_REFRESH_CACHE,
|
62
63
|
)
|
63
64
|
from prefect.states import (
|
64
65
|
AwaitingRetry,
|
65
66
|
Completed,
|
66
67
|
Failed,
|
67
|
-
Paused,
|
68
68
|
Pending,
|
69
69
|
Retrying,
|
70
70
|
Running,
|
@@ -81,8 +81,6 @@ from prefect.utilities.engine import (
|
|
81
81
|
_get_hook_name,
|
82
82
|
emit_task_run_state_change_event,
|
83
83
|
link_state_to_result,
|
84
|
-
propose_state,
|
85
|
-
propose_state_sync,
|
86
84
|
resolve_to_final_result,
|
87
85
|
)
|
88
86
|
from prefect.utilities.math import clamped_poisson_interval
|
@@ -195,14 +193,13 @@ class BaseTaskRunEngine(Generic[P, R]):
|
|
195
193
|
)
|
196
194
|
|
197
195
|
def record_terminal_state_timing(self, state: State) -> None:
|
198
|
-
if
|
199
|
-
|
200
|
-
self.task_run.end_time = state.timestamp
|
196
|
+
if self.task_run and self.task_run.start_time and not self.task_run.end_time:
|
197
|
+
self.task_run.end_time = state.timestamp
|
201
198
|
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
199
|
+
if self.task_run.state.is_running():
|
200
|
+
self.task_run.total_run_time += (
|
201
|
+
state.timestamp - self.task_run.state.timestamp
|
202
|
+
)
|
206
203
|
|
207
204
|
def is_running(self) -> bool:
|
208
205
|
"""Whether or not the engine is currently running a task."""
|
@@ -361,15 +358,14 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
361
358
|
|
362
359
|
new_state = Running()
|
363
360
|
|
364
|
-
|
365
|
-
|
366
|
-
self.task_run.run_count += 1
|
361
|
+
self.task_run.start_time = new_state.timestamp
|
362
|
+
self.task_run.run_count += 1
|
367
363
|
|
368
|
-
|
369
|
-
|
370
|
-
|
371
|
-
|
372
|
-
|
364
|
+
flow_run_context = FlowRunContext.get()
|
365
|
+
if flow_run_context and flow_run_context.flow_run:
|
366
|
+
# Carry forward any task run information from the flow run
|
367
|
+
flow_run = flow_run_context.flow_run
|
368
|
+
self.task_run.flow_run_run_count = flow_run.run_count
|
373
369
|
|
374
370
|
state = self.set_state(new_state)
|
375
371
|
|
@@ -400,51 +396,28 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
400
396
|
if not self.task_run:
|
401
397
|
raise ValueError("Task run is not set")
|
402
398
|
|
403
|
-
|
404
|
-
self.task_run.state = new_state = state
|
405
|
-
|
406
|
-
# Ensure that the state_details are populated with the current run IDs
|
407
|
-
new_state.state_details.task_run_id = self.task_run.id
|
408
|
-
new_state.state_details.flow_run_id = self.task_run.flow_run_id
|
409
|
-
|
410
|
-
# Predictively update the de-normalized task_run.state_* attributes
|
411
|
-
self.task_run.state_id = new_state.id
|
412
|
-
self.task_run.state_type = new_state.type
|
413
|
-
self.task_run.state_name = new_state.name
|
399
|
+
self.task_run.state = new_state = state
|
414
400
|
|
415
|
-
|
416
|
-
|
417
|
-
|
418
|
-
and state.data.has_cached_object()
|
419
|
-
):
|
420
|
-
# Avoid fetching the result unless it is cached, otherwise we defeat
|
421
|
-
# the purpose of disabling `cache_result_in_memory`
|
422
|
-
result = state.result(raise_on_failure=False, fetch=True)
|
423
|
-
if inspect.isawaitable(result):
|
424
|
-
result = run_coro_as_sync(result)
|
425
|
-
else:
|
426
|
-
result = state.data
|
401
|
+
# Ensure that the state_details are populated with the current run IDs
|
402
|
+
new_state.state_details.task_run_id = self.task_run.id
|
403
|
+
new_state.state_details.flow_run_id = self.task_run.flow_run_id
|
427
404
|
|
428
|
-
|
405
|
+
# Predictively update the de-normalized task_run.state_* attributes
|
406
|
+
self.task_run.state_id = new_state.id
|
407
|
+
self.task_run.state_type = new_state.type
|
408
|
+
self.task_run.state_name = new_state.name
|
429
409
|
|
430
|
-
|
431
|
-
|
432
|
-
|
433
|
-
|
434
|
-
)
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
if new_state.state_details.pause_reschedule:
|
440
|
-
# If we're being asked to pause and reschedule, we should exit the
|
441
|
-
# task and expect to be resumed later.
|
442
|
-
raise
|
410
|
+
if new_state.is_final():
|
411
|
+
if isinstance(state.data, BaseResult) and state.data.has_cached_object():
|
412
|
+
# Avoid fetching the result unless it is cached, otherwise we defeat
|
413
|
+
# the purpose of disabling `cache_result_in_memory`
|
414
|
+
result = state.result(raise_on_failure=False, fetch=True)
|
415
|
+
if inspect.isawaitable(result):
|
416
|
+
result = run_coro_as_sync(result)
|
417
|
+
else:
|
418
|
+
result = state.data
|
443
419
|
|
444
|
-
|
445
|
-
# that has an in-memory result attached to it; using the API state
|
446
|
-
# could result in losing that reference
|
447
|
-
self.task_run.state = new_state
|
420
|
+
link_state_to_result(state, result)
|
448
421
|
|
449
422
|
# emit a state change event
|
450
423
|
self._last_event = emit_task_run_state_change_event(
|
@@ -492,13 +465,16 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
492
465
|
result_factory=result_factory,
|
493
466
|
key=transaction.key,
|
494
467
|
expiration=expiration,
|
495
|
-
# defer persistence to transaction commit
|
496
|
-
defer_persistence=True,
|
497
468
|
)
|
498
469
|
)
|
470
|
+
|
471
|
+
# Avoid logging when running this rollback hook since it is not user-defined
|
472
|
+
handle_rollback = partial(self.handle_rollback)
|
473
|
+
handle_rollback.log_on_run = False
|
474
|
+
|
499
475
|
transaction.stage(
|
500
476
|
terminal_state.data,
|
501
|
-
on_rollback_hooks=[
|
477
|
+
on_rollback_hooks=[handle_rollback] + self.task.on_rollback_hooks,
|
502
478
|
on_commit_hooks=self.task.on_commit_hooks,
|
503
479
|
)
|
504
480
|
if transaction.is_committed():
|
@@ -531,8 +507,7 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
531
507
|
else:
|
532
508
|
delay = None
|
533
509
|
new_state = Retrying()
|
534
|
-
|
535
|
-
self.task_run.run_count += 1
|
510
|
+
self.task_run.run_count += 1
|
536
511
|
|
537
512
|
self.logger.info(
|
538
513
|
"Task run failed with exception: %r - " "Retry %s/%s will start %s",
|
@@ -565,6 +540,7 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
565
540
|
exc,
|
566
541
|
message="Task run encountered an exception",
|
567
542
|
result_factory=getattr(context, "result_factory", None),
|
543
|
+
write_result=True,
|
568
544
|
)
|
569
545
|
)
|
570
546
|
self.record_terminal_state_timing(state)
|
@@ -606,8 +582,6 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
606
582
|
if not self.task_run:
|
607
583
|
raise ValueError("Task run is not set")
|
608
584
|
|
609
|
-
if not PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION:
|
610
|
-
self.task_run = client.read_task_run(self.task_run.id)
|
611
585
|
with ExitStack() as stack:
|
612
586
|
if log_prints := should_log_prints(self.task):
|
613
587
|
stack.enter_context(patch_print())
|
@@ -621,6 +595,7 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
621
595
|
client=client,
|
622
596
|
)
|
623
597
|
)
|
598
|
+
stack.enter_context(ConcurrencyContextV1())
|
624
599
|
stack.enter_context(ConcurrencyContext())
|
625
600
|
|
626
601
|
self.logger = task_run_logger(task_run=self.task_run, task=self.task) # type: ignore
|
@@ -631,12 +606,6 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
631
606
|
task=self.task, parameters=self.parameters
|
632
607
|
)
|
633
608
|
|
634
|
-
if not PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION:
|
635
|
-
# update the task run name if necessary
|
636
|
-
self.client.set_task_run_name(
|
637
|
-
task_run_id=self.task_run.id, name=task_run_name
|
638
|
-
)
|
639
|
-
|
640
609
|
self.logger.extra["task_run_name"] = task_run_name
|
641
610
|
self.logger.debug(
|
642
611
|
f"Renamed task run {self.task_run.name!r} to {task_run_name!r}"
|
@@ -660,42 +629,23 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
660
629
|
self._client = client_ctx.client
|
661
630
|
self._is_started = True
|
662
631
|
try:
|
663
|
-
if
|
664
|
-
|
665
|
-
self.
|
666
|
-
|
667
|
-
|
668
|
-
|
669
|
-
|
670
|
-
|
671
|
-
|
672
|
-
extra_task_inputs=dependencies,
|
673
|
-
)
|
674
|
-
)
|
675
|
-
# Emit an event to capture that the task run was in the `PENDING` state.
|
676
|
-
self._last_event = emit_task_run_state_change_event(
|
677
|
-
task_run=self.task_run,
|
678
|
-
initial_state=None,
|
679
|
-
validated_state=self.task_run.state,
|
680
|
-
)
|
681
|
-
else:
|
682
|
-
if not self.task_run:
|
683
|
-
self.task_run = run_coro_as_sync(
|
684
|
-
self.task.create_run(
|
685
|
-
id=task_run_id,
|
686
|
-
parameters=self.parameters,
|
687
|
-
flow_run_context=FlowRunContext.get(),
|
688
|
-
parent_task_run_context=TaskRunContext.get(),
|
689
|
-
wait_for=self.wait_for,
|
690
|
-
extra_task_inputs=dependencies,
|
691
|
-
)
|
692
|
-
)
|
693
|
-
# Emit an event to capture that the task run was in the `PENDING` state.
|
694
|
-
self._last_event = emit_task_run_state_change_event(
|
695
|
-
task_run=self.task_run,
|
696
|
-
initial_state=None,
|
697
|
-
validated_state=self.task_run.state,
|
632
|
+
if not self.task_run:
|
633
|
+
self.task_run = run_coro_as_sync(
|
634
|
+
self.task.create_local_run(
|
635
|
+
id=task_run_id,
|
636
|
+
parameters=self.parameters,
|
637
|
+
flow_run_context=FlowRunContext.get(),
|
638
|
+
parent_task_run_context=TaskRunContext.get(),
|
639
|
+
wait_for=self.wait_for,
|
640
|
+
extra_task_inputs=dependencies,
|
698
641
|
)
|
642
|
+
)
|
643
|
+
# Emit an event to capture that the task run was in the `PENDING` state.
|
644
|
+
self._last_event = emit_task_run_state_change_event(
|
645
|
+
task_run=self.task_run,
|
646
|
+
initial_state=None,
|
647
|
+
validated_state=self.task_run.state,
|
648
|
+
)
|
699
649
|
|
700
650
|
with self.setup_run_context():
|
701
651
|
# setup_run_context might update the task run name, so log creation here
|
@@ -760,17 +710,22 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
760
710
|
|
761
711
|
@contextmanager
|
762
712
|
def transaction_context(self) -> Generator[Transaction, None, None]:
|
763
|
-
result_factory = getattr(TaskRunContext.get(), "result_factory", None)
|
764
|
-
|
765
713
|
# refresh cache setting is now repurposes as overwrite transaction record
|
766
714
|
overwrite = (
|
767
715
|
self.task.refresh_cache
|
768
716
|
if self.task.refresh_cache is not None
|
769
717
|
else PREFECT_TASKS_REFRESH_CACHE.value()
|
770
718
|
)
|
719
|
+
|
720
|
+
result_factory = getattr(TaskRunContext.get(), "result_factory", None)
|
721
|
+
if result_factory and result_factory.persist_result:
|
722
|
+
store = ResultFactoryStore(result_factory=result_factory)
|
723
|
+
else:
|
724
|
+
store = None
|
725
|
+
|
771
726
|
with transaction(
|
772
727
|
key=self.compute_transaction_key(),
|
773
|
-
store=
|
728
|
+
store=store,
|
774
729
|
overwrite=overwrite,
|
775
730
|
logger=self.logger,
|
776
731
|
) as txn:
|
@@ -808,15 +763,10 @@ class SyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
808
763
|
if transaction.is_committed():
|
809
764
|
result = transaction.read()
|
810
765
|
else:
|
811
|
-
if
|
812
|
-
PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION.value()
|
813
|
-
and self.task.tags
|
814
|
-
):
|
766
|
+
if self.task.tags:
|
815
767
|
# Acquire a concurrency slot for each tag, but only if a limit
|
816
768
|
# matching the tag already exists.
|
817
|
-
with concurrency(
|
818
|
-
list(self.task.tags), occupy=1, create_if_missing=False
|
819
|
-
):
|
769
|
+
with concurrency(list(self.task.tags), self.task_run.id):
|
820
770
|
result = call_with_parameters(self.task.fn, parameters)
|
821
771
|
else:
|
822
772
|
result = call_with_parameters(self.task.fn, parameters)
|
@@ -920,15 +870,14 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
920
870
|
|
921
871
|
new_state = Running()
|
922
872
|
|
923
|
-
|
924
|
-
|
925
|
-
self.task_run.run_count += 1
|
873
|
+
self.task_run.start_time = new_state.timestamp
|
874
|
+
self.task_run.run_count += 1
|
926
875
|
|
927
|
-
|
928
|
-
|
929
|
-
|
930
|
-
|
931
|
-
|
876
|
+
flow_run_context = FlowRunContext.get()
|
877
|
+
if flow_run_context:
|
878
|
+
# Carry forward any task run information from the flow run
|
879
|
+
flow_run = flow_run_context.flow_run
|
880
|
+
self.task_run.flow_run_run_count = flow_run.run_count
|
932
881
|
|
933
882
|
state = await self.set_state(new_state)
|
934
883
|
|
@@ -959,49 +908,29 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
959
908
|
if not self.task_run:
|
960
909
|
raise ValueError("Task run is not set")
|
961
910
|
|
962
|
-
|
963
|
-
self.task_run.state = new_state = state
|
911
|
+
self.task_run.state = new_state = state
|
964
912
|
|
965
|
-
|
966
|
-
|
967
|
-
|
913
|
+
# Ensure that the state_details are populated with the current run IDs
|
914
|
+
new_state.state_details.task_run_id = self.task_run.id
|
915
|
+
new_state.state_details.flow_run_id = self.task_run.flow_run_id
|
968
916
|
|
969
|
-
|
970
|
-
|
971
|
-
|
972
|
-
|
917
|
+
# Predictively update the de-normalized task_run.state_* attributes
|
918
|
+
self.task_run.state_id = new_state.id
|
919
|
+
self.task_run.state_type = new_state.type
|
920
|
+
self.task_run.state_name = new_state.name
|
973
921
|
|
974
|
-
|
975
|
-
|
976
|
-
|
977
|
-
|
978
|
-
|
979
|
-
|
980
|
-
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
|
985
|
-
link_state_to_result(new_state, result)
|
986
|
-
|
987
|
-
else:
|
988
|
-
try:
|
989
|
-
new_state = await propose_state(
|
990
|
-
self.client, state, task_run_id=self.task_run.id, force=force
|
991
|
-
)
|
992
|
-
except Pause as exc:
|
993
|
-
# We shouldn't get a pause signal without a state, but if this happens,
|
994
|
-
# just use a Paused state to assume an in-process pause.
|
995
|
-
new_state = exc.state if exc.state else Paused()
|
996
|
-
if new_state.state_details.pause_reschedule:
|
997
|
-
# If we're being asked to pause and reschedule, we should exit the
|
998
|
-
# task and expect to be resumed later.
|
999
|
-
raise
|
922
|
+
if new_state.is_final():
|
923
|
+
if (
|
924
|
+
isinstance(new_state.data, BaseResult)
|
925
|
+
and new_state.data.has_cached_object()
|
926
|
+
):
|
927
|
+
# Avoid fetching the result unless it is cached, otherwise we defeat
|
928
|
+
# the purpose of disabling `cache_result_in_memory`
|
929
|
+
result = await new_state.result(raise_on_failure=False, fetch=True)
|
930
|
+
else:
|
931
|
+
result = new_state.data
|
1000
932
|
|
1001
|
-
|
1002
|
-
# that has an in-memory result attached to it; using the API state
|
1003
|
-
# could result in losing that reference
|
1004
|
-
self.task_run.state = new_state
|
933
|
+
link_state_to_result(new_state, result)
|
1005
934
|
|
1006
935
|
# emit a state change event
|
1007
936
|
self._last_event = emit_task_run_state_change_event(
|
@@ -1045,12 +974,15 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1045
974
|
result_factory=result_factory,
|
1046
975
|
key=transaction.key,
|
1047
976
|
expiration=expiration,
|
1048
|
-
# defer persistence to transaction commit
|
1049
|
-
defer_persistence=True,
|
1050
977
|
)
|
978
|
+
|
979
|
+
# Avoid logging when running this rollback hook since it is not user-defined
|
980
|
+
handle_rollback = partial(self.handle_rollback)
|
981
|
+
handle_rollback.log_on_run = False
|
982
|
+
|
1051
983
|
transaction.stage(
|
1052
984
|
terminal_state.data,
|
1053
|
-
on_rollback_hooks=[
|
985
|
+
on_rollback_hooks=[handle_rollback] + self.task.on_rollback_hooks,
|
1054
986
|
on_commit_hooks=self.task.on_commit_hooks,
|
1055
987
|
)
|
1056
988
|
if transaction.is_committed():
|
@@ -1083,8 +1015,7 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1083
1015
|
else:
|
1084
1016
|
delay = None
|
1085
1017
|
new_state = Retrying()
|
1086
|
-
|
1087
|
-
self.task_run.run_count += 1
|
1018
|
+
self.task_run.run_count += 1
|
1088
1019
|
|
1089
1020
|
self.logger.info(
|
1090
1021
|
"Task run failed with exception: %r - " "Retry %s/%s will start %s",
|
@@ -1156,8 +1087,6 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1156
1087
|
if not self.task_run:
|
1157
1088
|
raise ValueError("Task run is not set")
|
1158
1089
|
|
1159
|
-
if not PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION:
|
1160
|
-
self.task_run = await client.read_task_run(self.task_run.id)
|
1161
1090
|
with ExitStack() as stack:
|
1162
1091
|
if log_prints := should_log_prints(self.task):
|
1163
1092
|
stack.enter_context(patch_print())
|
@@ -1179,11 +1108,6 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1179
1108
|
task_run_name = _resolve_custom_task_run_name(
|
1180
1109
|
task=self.task, parameters=self.parameters
|
1181
1110
|
)
|
1182
|
-
if not PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION:
|
1183
|
-
# update the task run name if necessary
|
1184
|
-
await self.client.set_task_run_name(
|
1185
|
-
task_run_id=self.task_run.id, name=task_run_name
|
1186
|
-
)
|
1187
1111
|
self.logger.extra["task_run_name"] = task_run_name
|
1188
1112
|
self.logger.debug(
|
1189
1113
|
f"Renamed task run {self.task_run.name!r} to {task_run_name!r}"
|
@@ -1207,38 +1131,21 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1207
1131
|
self._client = get_client()
|
1208
1132
|
self._is_started = True
|
1209
1133
|
try:
|
1210
|
-
if
|
1211
|
-
|
1212
|
-
|
1213
|
-
|
1214
|
-
|
1215
|
-
|
1216
|
-
|
1217
|
-
|
1218
|
-
|
1219
|
-
|
1220
|
-
|
1221
|
-
self.
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
)
|
1226
|
-
else:
|
1227
|
-
if not self.task_run:
|
1228
|
-
self.task_run = await self.task.create_run(
|
1229
|
-
id=task_run_id,
|
1230
|
-
parameters=self.parameters,
|
1231
|
-
flow_run_context=FlowRunContext.get(),
|
1232
|
-
parent_task_run_context=TaskRunContext.get(),
|
1233
|
-
wait_for=self.wait_for,
|
1234
|
-
extra_task_inputs=dependencies,
|
1235
|
-
)
|
1236
|
-
# Emit an event to capture that the task run was in the `PENDING` state.
|
1237
|
-
self._last_event = emit_task_run_state_change_event(
|
1238
|
-
task_run=self.task_run,
|
1239
|
-
initial_state=None,
|
1240
|
-
validated_state=self.task_run.state,
|
1241
|
-
)
|
1134
|
+
if not self.task_run:
|
1135
|
+
self.task_run = await self.task.create_local_run(
|
1136
|
+
id=task_run_id,
|
1137
|
+
parameters=self.parameters,
|
1138
|
+
flow_run_context=FlowRunContext.get(),
|
1139
|
+
parent_task_run_context=TaskRunContext.get(),
|
1140
|
+
wait_for=self.wait_for,
|
1141
|
+
extra_task_inputs=dependencies,
|
1142
|
+
)
|
1143
|
+
# Emit an event to capture that the task run was in the `PENDING` state.
|
1144
|
+
self._last_event = emit_task_run_state_change_event(
|
1145
|
+
task_run=self.task_run,
|
1146
|
+
initial_state=None,
|
1147
|
+
validated_state=self.task_run.state,
|
1148
|
+
)
|
1242
1149
|
|
1243
1150
|
async with self.setup_run_context():
|
1244
1151
|
# setup_run_context might update the task run name, so log creation here
|
@@ -1305,17 +1212,21 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1305
1212
|
|
1306
1213
|
@asynccontextmanager
|
1307
1214
|
async def transaction_context(self) -> AsyncGenerator[Transaction, None]:
|
1308
|
-
result_factory = getattr(TaskRunContext.get(), "result_factory", None)
|
1309
|
-
|
1310
1215
|
# refresh cache setting is now repurposes as overwrite transaction record
|
1311
1216
|
overwrite = (
|
1312
1217
|
self.task.refresh_cache
|
1313
1218
|
if self.task.refresh_cache is not None
|
1314
1219
|
else PREFECT_TASKS_REFRESH_CACHE.value()
|
1315
1220
|
)
|
1221
|
+
result_factory = getattr(TaskRunContext.get(), "result_factory", None)
|
1222
|
+
if result_factory and result_factory.persist_result:
|
1223
|
+
store = ResultFactoryStore(result_factory=result_factory)
|
1224
|
+
else:
|
1225
|
+
store = None
|
1226
|
+
|
1316
1227
|
with transaction(
|
1317
1228
|
key=self.compute_transaction_key(),
|
1318
|
-
store=
|
1229
|
+
store=store,
|
1319
1230
|
overwrite=overwrite,
|
1320
1231
|
logger=self.logger,
|
1321
1232
|
) as txn:
|
@@ -1353,15 +1264,10 @@ class AsyncTaskRunEngine(BaseTaskRunEngine[P, R]):
|
|
1353
1264
|
if transaction.is_committed():
|
1354
1265
|
result = transaction.read()
|
1355
1266
|
else:
|
1356
|
-
if
|
1357
|
-
PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION.value()
|
1358
|
-
and self.task.tags
|
1359
|
-
):
|
1267
|
+
if self.task.tags:
|
1360
1268
|
# Acquire a concurrency slot for each tag, but only if a limit
|
1361
1269
|
# matching the tag already exists.
|
1362
|
-
async with aconcurrency(
|
1363
|
-
list(self.task.tags), occupy=1, create_if_missing=False
|
1364
|
-
):
|
1270
|
+
async with aconcurrency(list(self.task.tags), self.task_run.id):
|
1365
1271
|
result = await call_with_parameters(self.task.fn, parameters)
|
1366
1272
|
else:
|
1367
1273
|
result = await call_with_parameters(self.task.fn, parameters)
|
prefect/task_worker.py
CHANGED
@@ -24,19 +24,17 @@ from prefect.cache_policies import DEFAULT, NONE
|
|
24
24
|
from prefect.client.orchestration import get_client
|
25
25
|
from prefect.client.schemas.objects import TaskRun
|
26
26
|
from prefect.client.subscriptions import Subscription
|
27
|
-
from prefect.exceptions import Abort, PrefectHTTPStatusError
|
28
27
|
from prefect.logging.loggers import get_logger
|
29
28
|
from prefect.results import ResultFactory
|
30
29
|
from prefect.settings import (
|
31
30
|
PREFECT_API_URL,
|
32
|
-
PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION,
|
33
31
|
PREFECT_TASK_SCHEDULING_DELETE_FAILED_SUBMISSIONS,
|
34
32
|
)
|
35
33
|
from prefect.states import Pending
|
36
34
|
from prefect.task_engine import run_task_async, run_task_sync
|
37
35
|
from prefect.utilities.annotations import NotSet
|
38
36
|
from prefect.utilities.asyncutils import asyncnullcontext, sync_compatible
|
39
|
-
from prefect.utilities.engine import emit_task_run_state_change_event
|
37
|
+
from prefect.utilities.engine import emit_task_run_state_change_event
|
40
38
|
from prefect.utilities.processutils import _register_signal
|
41
39
|
from prefect.utilities.services import start_client_metrics_server
|
42
40
|
from prefect.utilities.urls import url_for
|
@@ -294,42 +292,12 @@ class TaskWorker:
|
|
294
292
|
return
|
295
293
|
|
296
294
|
initial_state = task_run.state
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
state = new_state
|
304
|
-
task_run.state = state
|
305
|
-
else:
|
306
|
-
try:
|
307
|
-
new_state = Pending()
|
308
|
-
new_state.state_details.deferred = True
|
309
|
-
state = await propose_state(
|
310
|
-
client=get_client(), # TODO prove that we cannot use self._client here
|
311
|
-
state=new_state,
|
312
|
-
task_run_id=task_run.id,
|
313
|
-
)
|
314
|
-
except Abort as exc:
|
315
|
-
logger.exception(
|
316
|
-
f"Failed to submit task run {task_run.id!r} to engine", exc_info=exc
|
317
|
-
)
|
318
|
-
return
|
319
|
-
except PrefectHTTPStatusError as exc:
|
320
|
-
if exc.response.status_code == 404:
|
321
|
-
logger.warning(
|
322
|
-
f"Task run {task_run.id!r} not found. It may have been deleted."
|
323
|
-
)
|
324
|
-
return
|
325
|
-
raise
|
326
|
-
|
327
|
-
if not state.is_pending():
|
328
|
-
logger.warning(
|
329
|
-
f"Cancelling submission of task run {task_run.id!r} -"
|
330
|
-
f" server returned a non-pending state {state.type.value!r}."
|
331
|
-
)
|
332
|
-
return
|
295
|
+
new_state = Pending()
|
296
|
+
new_state.state_details.deferred = True
|
297
|
+
new_state.state_details.task_run_id = task_run.id
|
298
|
+
new_state.state_details.flow_run_id = task_run.flow_run_id
|
299
|
+
state = new_state
|
300
|
+
task_run.state = state
|
333
301
|
|
334
302
|
emit_task_run_state_change_event(
|
335
303
|
task_run=task_run,
|
prefect/tasks.py
CHANGED
@@ -52,7 +52,6 @@ from prefect.futures import PrefectDistributedFuture, PrefectFuture, PrefectFutu
|
|
52
52
|
from prefect.logging.loggers import get_logger
|
53
53
|
from prefect.results import ResultFactory, ResultSerializer, ResultStorage
|
54
54
|
from prefect.settings import (
|
55
|
-
PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION,
|
56
55
|
PREFECT_TASK_DEFAULT_RETRIES,
|
57
56
|
PREFECT_TASK_DEFAULT_RETRY_DELAY_SECONDS,
|
58
57
|
)
|
@@ -815,12 +814,6 @@ class Task(Generic[P, R]):
|
|
815
814
|
extra_task_inputs: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
816
815
|
deferred: bool = False,
|
817
816
|
) -> TaskRun:
|
818
|
-
if not PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION:
|
819
|
-
raise RuntimeError(
|
820
|
-
"Cannot call `Task.create_local_run` unless "
|
821
|
-
"PREFECT_EXPERIMENTAL_ENABLE_CLIENT_SIDE_TASK_ORCHESTRATION is True"
|
822
|
-
)
|
823
|
-
|
824
817
|
from prefect.utilities.engine import (
|
825
818
|
_dynamic_key_for_task_run,
|
826
819
|
collect_task_run_inputs_sync,
|