prefect-client 3.0.0rc2__py3-none-any.whl → 3.0.0rc4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +0 -1
- prefect/_internal/compatibility/migration.py +124 -0
- prefect/_internal/concurrency/__init__.py +2 -2
- prefect/_internal/concurrency/primitives.py +1 -0
- prefect/_internal/pydantic/annotations/pendulum.py +2 -2
- prefect/_internal/pytz.py +1 -1
- prefect/blocks/core.py +1 -1
- prefect/client/orchestration.py +96 -22
- prefect/client/schemas/actions.py +1 -1
- prefect/client/schemas/filters.py +6 -0
- prefect/client/schemas/objects.py +10 -3
- prefect/client/subscriptions.py +6 -5
- prefect/context.py +1 -27
- prefect/deployments/__init__.py +3 -0
- prefect/deployments/base.py +4 -2
- prefect/deployments/deployments.py +3 -0
- prefect/deployments/steps/pull.py +1 -0
- prefect/deployments/steps/utility.py +2 -1
- prefect/engine.py +3 -0
- prefect/events/cli/automations.py +1 -1
- prefect/events/clients.py +7 -1
- prefect/exceptions.py +9 -0
- prefect/filesystems.py +22 -11
- prefect/flow_engine.py +195 -153
- prefect/flows.py +95 -36
- prefect/futures.py +9 -1
- prefect/infrastructure/provisioners/container_instance.py +1 -0
- prefect/infrastructure/provisioners/ecs.py +2 -2
- prefect/input/__init__.py +4 -0
- prefect/logging/formatters.py +2 -2
- prefect/logging/handlers.py +2 -2
- prefect/logging/loggers.py +1 -1
- prefect/plugins.py +1 -0
- prefect/records/cache_policies.py +3 -3
- prefect/records/result_store.py +10 -3
- prefect/results.py +47 -73
- prefect/runner/runner.py +1 -1
- prefect/runner/server.py +1 -1
- prefect/runtime/__init__.py +1 -0
- prefect/runtime/deployment.py +1 -0
- prefect/runtime/flow_run.py +1 -0
- prefect/runtime/task_run.py +1 -0
- prefect/settings.py +16 -3
- prefect/states.py +15 -4
- prefect/task_engine.py +195 -39
- prefect/task_runners.py +9 -3
- prefect/task_runs.py +26 -12
- prefect/task_worker.py +149 -20
- prefect/tasks.py +153 -71
- prefect/transactions.py +85 -15
- prefect/types/__init__.py +10 -3
- prefect/utilities/asyncutils.py +3 -3
- prefect/utilities/callables.py +16 -4
- prefect/utilities/collections.py +120 -57
- prefect/utilities/dockerutils.py +5 -3
- prefect/utilities/engine.py +11 -0
- prefect/utilities/filesystem.py +4 -5
- prefect/utilities/importtools.py +29 -0
- prefect/utilities/services.py +2 -2
- prefect/utilities/urls.py +195 -0
- prefect/utilities/visualization.py +1 -0
- prefect/variables.py +4 -0
- prefect/workers/base.py +35 -0
- {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/METADATA +2 -2
- {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/RECORD +68 -66
- prefect/blocks/kubernetes.py +0 -115
- {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.0rc2.dist-info → prefect_client-3.0.0rc4.dist-info}/top_level.txt +0 -0
prefect/task_engine.py
CHANGED
@@ -3,8 +3,10 @@ import logging
|
|
3
3
|
import time
|
4
4
|
from contextlib import ExitStack, contextmanager
|
5
5
|
from dataclasses import dataclass, field
|
6
|
+
from textwrap import dedent
|
6
7
|
from typing import (
|
7
8
|
Any,
|
9
|
+
AsyncGenerator,
|
8
10
|
Callable,
|
9
11
|
Coroutine,
|
10
12
|
Dict,
|
@@ -25,7 +27,6 @@ import pendulum
|
|
25
27
|
from typing_extensions import ParamSpec
|
26
28
|
|
27
29
|
from prefect import Task
|
28
|
-
from prefect._internal.concurrency.api import create_call, from_sync
|
29
30
|
from prefect.client.orchestration import SyncPrefectClient
|
30
31
|
from prefect.client.schemas import TaskRun
|
31
32
|
from prefect.client.schemas.objects import State, TaskRunInput
|
@@ -43,7 +44,6 @@ from prefect.exceptions import (
|
|
43
44
|
UpstreamTaskError,
|
44
45
|
)
|
45
46
|
from prefect.futures import PrefectFuture
|
46
|
-
from prefect.logging.handlers import APILogHandler
|
47
47
|
from prefect.logging.loggers import get_logger, patch_print, task_run_logger
|
48
48
|
from prefect.records.result_store import ResultFactoryStore
|
49
49
|
from prefect.results import ResultFactory, _format_user_supplied_storage_key
|
@@ -64,11 +64,12 @@ from prefect.states import (
|
|
64
64
|
)
|
65
65
|
from prefect.transactions import Transaction, transaction
|
66
66
|
from prefect.utilities.asyncutils import run_coro_as_sync
|
67
|
-
from prefect.utilities.callables import call_with_parameters
|
67
|
+
from prefect.utilities.callables import call_with_parameters, parameters_to_args_kwargs
|
68
68
|
from prefect.utilities.collections import visit_collection
|
69
69
|
from prefect.utilities.engine import (
|
70
70
|
_get_hook_name,
|
71
71
|
emit_task_run_state_change_event,
|
72
|
+
link_state_to_result,
|
72
73
|
propose_state_sync,
|
73
74
|
resolve_to_final_result,
|
74
75
|
)
|
@@ -219,7 +220,7 @@ class TaskRunEngine(Generic[P, R]):
|
|
219
220
|
return_data=False,
|
220
221
|
max_depth=-1,
|
221
222
|
remove_annotations=True,
|
222
|
-
context={},
|
223
|
+
context={"current_task_run": self.task_run, "current_task": self.task},
|
223
224
|
)
|
224
225
|
|
225
226
|
def begin_run(self):
|
@@ -298,9 +299,17 @@ class TaskRunEngine(Generic[P, R]):
|
|
298
299
|
if result_factory is None:
|
299
300
|
raise ValueError("Result factory is not set")
|
300
301
|
|
302
|
+
if self.task.cache_expiration is not None:
|
303
|
+
expiration = pendulum.now("utc") + self.task.cache_expiration
|
304
|
+
else:
|
305
|
+
expiration = None
|
306
|
+
|
301
307
|
terminal_state = run_coro_as_sync(
|
302
308
|
return_value_to_state(
|
303
|
-
result,
|
309
|
+
result,
|
310
|
+
result_factory=result_factory,
|
311
|
+
key=transaction.key,
|
312
|
+
expiration=expiration,
|
304
313
|
)
|
305
314
|
)
|
306
315
|
transaction.stage(
|
@@ -333,10 +342,24 @@ class TaskRunEngine(Generic[P, R]):
|
|
333
342
|
scheduled_time=pendulum.now("utc").add(seconds=delay)
|
334
343
|
)
|
335
344
|
else:
|
345
|
+
delay = None
|
336
346
|
new_state = Retrying()
|
347
|
+
|
348
|
+
self.logger.info(
|
349
|
+
f"Task run failed with exception {exc!r} - "
|
350
|
+
f"Retry {self.retries + 1}/{self.task.retries} will start "
|
351
|
+
f"{str(delay) + ' second(s) from now' if delay else 'immediately'}"
|
352
|
+
)
|
353
|
+
|
337
354
|
self.set_state(new_state, force=True)
|
338
355
|
self.retries = self.retries + 1
|
339
356
|
return True
|
357
|
+
elif self.retries >= self.task.retries:
|
358
|
+
self.logger.error(
|
359
|
+
f"Task run failed with exception {exc!r} - Retries are exhausted"
|
360
|
+
)
|
361
|
+
return False
|
362
|
+
|
340
363
|
return False
|
341
364
|
|
342
365
|
def handle_exception(self, exc: Exception) -> None:
|
@@ -373,7 +396,7 @@ class TaskRunEngine(Generic[P, R]):
|
|
373
396
|
self.set_state(state, force=True)
|
374
397
|
|
375
398
|
@contextmanager
|
376
|
-
def
|
399
|
+
def setup_run_context(self, client: Optional[SyncPrefectClient] = None):
|
377
400
|
from prefect.utilities.engine import (
|
378
401
|
_resolve_custom_task_run_name,
|
379
402
|
should_log_prints,
|
@@ -394,9 +417,7 @@ class TaskRunEngine(Generic[P, R]):
|
|
394
417
|
log_prints=log_prints,
|
395
418
|
task_run=self.task_run,
|
396
419
|
parameters=self.parameters,
|
397
|
-
result_factory=run_coro_as_sync(
|
398
|
-
ResultFactory.from_autonomous_task(self.task)
|
399
|
-
), # type: ignore
|
420
|
+
result_factory=run_coro_as_sync(ResultFactory.from_task(self.task)), # type: ignore
|
400
421
|
client=client,
|
401
422
|
)
|
402
423
|
)
|
@@ -444,9 +465,6 @@ class TaskRunEngine(Generic[P, R]):
|
|
444
465
|
extra_task_inputs=dependencies,
|
445
466
|
)
|
446
467
|
)
|
447
|
-
self.logger.info(
|
448
|
-
f"Created task run {self.task_run.name!r} for task {self.task.name!r}"
|
449
|
-
)
|
450
468
|
# Emit an event to capture that the task run was in the `PENDING` state.
|
451
469
|
self._last_event = emit_task_run_state_change_event(
|
452
470
|
task_run=self.task_run,
|
@@ -454,13 +472,20 @@ class TaskRunEngine(Generic[P, R]):
|
|
454
472
|
validated_state=self.task_run.state,
|
455
473
|
)
|
456
474
|
|
457
|
-
|
475
|
+
with self.setup_run_context():
|
476
|
+
# setup_run_context might update the task run name, so log creation here
|
477
|
+
self.logger.info(
|
478
|
+
f"Created task run {self.task_run.name!r} for task {self.task.name!r}"
|
479
|
+
)
|
480
|
+
yield self
|
458
481
|
|
459
482
|
except Exception:
|
460
483
|
# regular exceptions are caught and re-raised to the user
|
461
484
|
raise
|
462
|
-
except (Pause, Abort):
|
485
|
+
except (Pause, Abort) as exc:
|
463
486
|
# Do not capture internal signals as crashes
|
487
|
+
if isinstance(exc, Abort):
|
488
|
+
self.logger.error("Task run was aborted: %s", exc)
|
464
489
|
raise
|
465
490
|
except GeneratorExit:
|
466
491
|
# Do not capture generator exits as crashes
|
@@ -474,18 +499,37 @@ class TaskRunEngine(Generic[P, R]):
|
|
474
499
|
display_state = (
|
475
500
|
repr(self.state) if PREFECT_DEBUG_MODE else str(self.state)
|
476
501
|
)
|
477
|
-
self.
|
478
|
-
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
502
|
+
level = logging.INFO if self.state.is_completed() else logging.ERROR
|
503
|
+
msg = f"Finished in state {display_state}"
|
504
|
+
if self.state.is_pending():
|
505
|
+
msg += (
|
506
|
+
"\nPlease wait for all submitted tasks to complete"
|
507
|
+
" before exiting your flow by calling `.wait()` on the "
|
508
|
+
"`PrefectFuture` returned from your `.submit()` calls."
|
509
|
+
)
|
510
|
+
msg += dedent(
|
511
|
+
"""
|
512
|
+
|
513
|
+
Example:
|
483
514
|
|
484
|
-
|
485
|
-
|
486
|
-
|
487
|
-
|
515
|
+
from prefect import flow, task
|
516
|
+
|
517
|
+
@task
|
518
|
+
def say_hello(name):
|
519
|
+
print f"Hello, {name}!"
|
520
|
+
|
521
|
+
@flow
|
522
|
+
def example_flow():
|
523
|
+
say_hello.submit(name="Marvin)
|
524
|
+
say_hello.wait()
|
525
|
+
|
526
|
+
example_flow()
|
527
|
+
"""
|
488
528
|
)
|
529
|
+
self.logger.log(
|
530
|
+
level=level,
|
531
|
+
msg=msg,
|
532
|
+
)
|
489
533
|
|
490
534
|
self._is_started = False
|
491
535
|
self._client = None
|
@@ -499,10 +543,8 @@ class TaskRunEngine(Generic[P, R]):
|
|
499
543
|
async def wait_until_ready(self):
|
500
544
|
"""Waits until the scheduled time (if its the future), then enters Running."""
|
501
545
|
if scheduled_time := self.state.state_details.scheduled_time:
|
502
|
-
|
503
|
-
|
504
|
-
)
|
505
|
-
await anyio.sleep((scheduled_time - pendulum.now("utc")).total_seconds())
|
546
|
+
sleep_time = (scheduled_time - pendulum.now("utc")).total_seconds()
|
547
|
+
await anyio.sleep(sleep_time if sleep_time > 0 else 0)
|
506
548
|
self.set_state(
|
507
549
|
Retrying() if self.state.name == "AwaitingRetry" else Running(),
|
508
550
|
force=True,
|
@@ -521,15 +563,11 @@ class TaskRunEngine(Generic[P, R]):
|
|
521
563
|
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
522
564
|
) -> Generator[None, None, None]:
|
523
565
|
with self.initialize_run(task_run_id=task_run_id, dependencies=dependencies):
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
self.
|
529
|
-
try:
|
530
|
-
yield
|
531
|
-
finally:
|
532
|
-
self.call_hooks()
|
566
|
+
self.begin_run()
|
567
|
+
try:
|
568
|
+
yield
|
569
|
+
finally:
|
570
|
+
self.call_hooks()
|
533
571
|
|
534
572
|
@contextmanager
|
535
573
|
def transaction_context(self) -> Generator[Transaction, None, None]:
|
@@ -552,9 +590,12 @@ class TaskRunEngine(Generic[P, R]):
|
|
552
590
|
def run_context(self):
|
553
591
|
timeout_context = timeout_async if self.task.isasync else timeout
|
554
592
|
# reenter the run context to ensure it is up to date for every run
|
555
|
-
with self.
|
593
|
+
with self.setup_run_context():
|
556
594
|
try:
|
557
595
|
with timeout_context(seconds=self.task.timeout_seconds):
|
596
|
+
self.logger.debug(
|
597
|
+
f"Executing task {self.task.name!r} for task run {self.task_run.name!r}..."
|
598
|
+
)
|
558
599
|
yield self
|
559
600
|
except TimeoutError as exc:
|
560
601
|
self.handle_timeout(exc)
|
@@ -641,6 +682,117 @@ async def run_task_async(
|
|
641
682
|
return engine.state if return_type == "state" else engine.result()
|
642
683
|
|
643
684
|
|
685
|
+
def run_generator_task_sync(
|
686
|
+
task: Task[P, R],
|
687
|
+
task_run_id: Optional[UUID] = None,
|
688
|
+
task_run: Optional[TaskRun] = None,
|
689
|
+
parameters: Optional[Dict[str, Any]] = None,
|
690
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
691
|
+
return_type: Literal["state", "result"] = "result",
|
692
|
+
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
693
|
+
context: Optional[Dict[str, Any]] = None,
|
694
|
+
) -> Generator[R, None, None]:
|
695
|
+
if return_type != "result":
|
696
|
+
raise ValueError("The return_type for a generator task must be 'result'")
|
697
|
+
|
698
|
+
engine = TaskRunEngine[P, R](
|
699
|
+
task=task,
|
700
|
+
parameters=parameters,
|
701
|
+
task_run=task_run,
|
702
|
+
wait_for=wait_for,
|
703
|
+
context=context,
|
704
|
+
)
|
705
|
+
|
706
|
+
with engine.start(task_run_id=task_run_id, dependencies=dependencies):
|
707
|
+
while engine.is_running():
|
708
|
+
run_coro_as_sync(engine.wait_until_ready())
|
709
|
+
with engine.run_context(), engine.transaction_context() as txn:
|
710
|
+
# TODO: generators should default to commit_mode=OFF
|
711
|
+
# because they are dynamic by definition
|
712
|
+
# for now we just prevent this branch explicitly
|
713
|
+
if False and txn.is_committed():
|
714
|
+
txn.read()
|
715
|
+
else:
|
716
|
+
call_args, call_kwargs = parameters_to_args_kwargs(
|
717
|
+
task.fn, engine.parameters or {}
|
718
|
+
)
|
719
|
+
gen = task.fn(*call_args, **call_kwargs)
|
720
|
+
try:
|
721
|
+
while True:
|
722
|
+
gen_result = next(gen)
|
723
|
+
# link the current state to the result for dependency tracking
|
724
|
+
#
|
725
|
+
# TODO: this could grow the task_run_result
|
726
|
+
# dictionary in an unbounded way, so finding a
|
727
|
+
# way to periodically clean it up (using
|
728
|
+
# weakrefs or similar) would be good
|
729
|
+
link_state_to_result(engine.state, gen_result)
|
730
|
+
yield gen_result
|
731
|
+
except StopIteration as exc:
|
732
|
+
engine.handle_success(exc.value, transaction=txn)
|
733
|
+
except GeneratorExit as exc:
|
734
|
+
engine.handle_success(None, transaction=txn)
|
735
|
+
gen.throw(exc)
|
736
|
+
|
737
|
+
return engine.result()
|
738
|
+
|
739
|
+
|
740
|
+
async def run_generator_task_async(
|
741
|
+
task: Task[P, R],
|
742
|
+
task_run_id: Optional[UUID] = None,
|
743
|
+
task_run: Optional[TaskRun] = None,
|
744
|
+
parameters: Optional[Dict[str, Any]] = None,
|
745
|
+
wait_for: Optional[Iterable[PrefectFuture]] = None,
|
746
|
+
return_type: Literal["state", "result"] = "result",
|
747
|
+
dependencies: Optional[Dict[str, Set[TaskRunInput]]] = None,
|
748
|
+
context: Optional[Dict[str, Any]] = None,
|
749
|
+
) -> AsyncGenerator[R, None]:
|
750
|
+
if return_type != "result":
|
751
|
+
raise ValueError("The return_type for a generator task must be 'result'")
|
752
|
+
engine = TaskRunEngine[P, R](
|
753
|
+
task=task,
|
754
|
+
parameters=parameters,
|
755
|
+
task_run=task_run,
|
756
|
+
wait_for=wait_for,
|
757
|
+
context=context,
|
758
|
+
)
|
759
|
+
|
760
|
+
with engine.start(task_run_id=task_run_id, dependencies=dependencies):
|
761
|
+
while engine.is_running():
|
762
|
+
await engine.wait_until_ready()
|
763
|
+
with engine.run_context(), engine.transaction_context() as txn:
|
764
|
+
# TODO: generators should default to commit_mode=OFF
|
765
|
+
# because they are dynamic by definition
|
766
|
+
# for now we just prevent this branch explicitly
|
767
|
+
if False and txn.is_committed():
|
768
|
+
txn.read()
|
769
|
+
else:
|
770
|
+
call_args, call_kwargs = parameters_to_args_kwargs(
|
771
|
+
task.fn, engine.parameters or {}
|
772
|
+
)
|
773
|
+
gen = task.fn(*call_args, **call_kwargs)
|
774
|
+
try:
|
775
|
+
while True:
|
776
|
+
# can't use anext in Python < 3.10
|
777
|
+
gen_result = await gen.__anext__()
|
778
|
+
# link the current state to the result for dependency tracking
|
779
|
+
#
|
780
|
+
# TODO: this could grow the task_run_result
|
781
|
+
# dictionary in an unbounded way, so finding a
|
782
|
+
# way to periodically clean it up (using
|
783
|
+
# weakrefs or similar) would be good
|
784
|
+
link_state_to_result(engine.state, gen_result)
|
785
|
+
yield gen_result
|
786
|
+
except (StopAsyncIteration, GeneratorExit) as exc:
|
787
|
+
engine.handle_success(None, transaction=txn)
|
788
|
+
if isinstance(exc, GeneratorExit):
|
789
|
+
gen.throw(exc)
|
790
|
+
|
791
|
+
# async generators can't return, but we can raise failures here
|
792
|
+
if engine.state.is_failed():
|
793
|
+
engine.result()
|
794
|
+
|
795
|
+
|
644
796
|
def run_task(
|
645
797
|
task: Task[P, Union[R, Coroutine[Any, Any, R]]],
|
646
798
|
task_run_id: Optional[UUID] = None,
|
@@ -680,7 +832,11 @@ def run_task(
|
|
680
832
|
dependencies=dependencies,
|
681
833
|
context=context,
|
682
834
|
)
|
683
|
-
if task.isasync:
|
835
|
+
if task.isasync and task.isgenerator:
|
836
|
+
return run_generator_task_async(**kwargs)
|
837
|
+
elif task.isgenerator:
|
838
|
+
return run_generator_task_sync(**kwargs)
|
839
|
+
elif task.isasync:
|
684
840
|
return run_task_async(**kwargs)
|
685
841
|
else:
|
686
842
|
return run_task_sync(**kwargs)
|
prefect/task_runners.py
CHANGED
@@ -202,12 +202,13 @@ class TaskRunner(abc.ABC, Generic[F]):
|
|
202
202
|
|
203
203
|
|
204
204
|
class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture]):
|
205
|
-
def __init__(self):
|
205
|
+
def __init__(self, max_workers: Optional[int] = None):
|
206
206
|
super().__init__()
|
207
207
|
self._executor: Optional[ThreadPoolExecutor] = None
|
208
|
+
self._max_workers = max_workers
|
208
209
|
|
209
210
|
def duplicate(self) -> "ThreadPoolTaskRunner":
|
210
|
-
return type(self)()
|
211
|
+
return type(self)(max_workers=self._max_workers)
|
211
212
|
|
212
213
|
def submit(
|
213
214
|
self,
|
@@ -278,7 +279,7 @@ class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture]):
|
|
278
279
|
|
279
280
|
def __enter__(self):
|
280
281
|
super().__enter__()
|
281
|
-
self._executor = ThreadPoolExecutor()
|
282
|
+
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
|
282
283
|
return self
|
283
284
|
|
284
285
|
def __exit__(self, exc_type, exc_value, traceback):
|
@@ -287,6 +288,11 @@ class ThreadPoolTaskRunner(TaskRunner[PrefectConcurrentFuture]):
|
|
287
288
|
self._executor = None
|
288
289
|
super().__exit__(exc_type, exc_value, traceback)
|
289
290
|
|
291
|
+
def __eq__(self, value: object) -> bool:
|
292
|
+
if not isinstance(value, ThreadPoolTaskRunner):
|
293
|
+
return False
|
294
|
+
return self._max_workers == value._max_workers
|
295
|
+
|
290
296
|
|
291
297
|
# Here, we alias ConcurrentTaskRunner to ThreadPoolTaskRunner for backwards compatibility
|
292
298
|
ConcurrentTaskRunner = ThreadPoolTaskRunner
|
prefect/task_runs.py
CHANGED
@@ -71,7 +71,7 @@ class TaskRunWaiter:
|
|
71
71
|
self.logger = get_logger("TaskRunWaiter")
|
72
72
|
self._consumer_task: Optional[asyncio.Task] = None
|
73
73
|
self._observed_completed_task_runs: TTLCache[uuid.UUID, bool] = TTLCache(
|
74
|
-
maxsize=
|
74
|
+
maxsize=10000, ttl=600
|
75
75
|
)
|
76
76
|
self._completion_events: Dict[uuid.UUID, asyncio.Event] = {}
|
77
77
|
self._loop: Optional[asyncio.AbstractEventLoop] = None
|
@@ -85,20 +85,25 @@ class TaskRunWaiter:
|
|
85
85
|
"""
|
86
86
|
if self._started:
|
87
87
|
return
|
88
|
-
self.logger.
|
88
|
+
self.logger.debug("Starting TaskRunWaiter")
|
89
89
|
loop_thread = get_global_loop()
|
90
90
|
|
91
91
|
if not asyncio.get_running_loop() == loop_thread._loop:
|
92
92
|
raise RuntimeError("TaskRunWaiter must run on the global loop thread.")
|
93
93
|
|
94
94
|
self._loop = loop_thread._loop
|
95
|
-
|
95
|
+
|
96
|
+
consumer_started = asyncio.Event()
|
97
|
+
self._consumer_task = self._loop.create_task(
|
98
|
+
self._consume_events(consumer_started)
|
99
|
+
)
|
100
|
+
asyncio.run_coroutine_threadsafe(consumer_started.wait(), self._loop)
|
96
101
|
|
97
102
|
loop_thread.add_shutdown_call(create_call(self.stop))
|
98
103
|
atexit.register(self.stop)
|
99
104
|
self._started = True
|
100
105
|
|
101
|
-
async def _consume_events(self):
|
106
|
+
async def _consume_events(self, consumer_started: asyncio.Event):
|
102
107
|
async with get_events_subscriber(
|
103
108
|
filter=EventFilter(
|
104
109
|
event=EventNameFilter(
|
@@ -109,9 +114,10 @@ class TaskRunWaiter:
|
|
109
114
|
)
|
110
115
|
)
|
111
116
|
) as subscriber:
|
117
|
+
consumer_started.set()
|
112
118
|
async for event in subscriber:
|
113
119
|
try:
|
114
|
-
self.logger.
|
120
|
+
self.logger.debug(
|
115
121
|
f"Received event: {event.resource['prefect.resource.id']}"
|
116
122
|
)
|
117
123
|
task_run_id = uuid.UUID(
|
@@ -119,6 +125,7 @@ class TaskRunWaiter:
|
|
119
125
|
"prefect.task-run.", ""
|
120
126
|
)
|
121
127
|
)
|
128
|
+
|
122
129
|
with self._observed_completed_task_runs_lock:
|
123
130
|
# Cache the task run ID for a short period of time to avoid
|
124
131
|
# unnecessary waits
|
@@ -172,14 +179,21 @@ class TaskRunWaiter:
|
|
172
179
|
# when the event is received
|
173
180
|
instance._completion_events[task_run_id] = finished_event
|
174
181
|
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
182
|
+
try:
|
183
|
+
# Now check one more time whether the task run arrived before we start to
|
184
|
+
# wait on it, in case it came in while we were setting up the event above.
|
185
|
+
with instance._observed_completed_task_runs_lock:
|
186
|
+
if task_run_id in instance._observed_completed_task_runs:
|
187
|
+
return
|
179
188
|
|
180
|
-
|
181
|
-
|
182
|
-
|
189
|
+
with anyio.move_on_after(delay=timeout):
|
190
|
+
await from_async.wait_for_call_in_loop_thread(
|
191
|
+
create_call(finished_event.wait)
|
192
|
+
)
|
193
|
+
finally:
|
194
|
+
with instance._completion_events_lock:
|
195
|
+
# Remove the event from the cache after it has been waited on
|
196
|
+
instance._completion_events.pop(task_run_id, None)
|
183
197
|
|
184
198
|
@classmethod
|
185
199
|
def instance(cls):
|