prefect-client 3.1.8__py3-none-any.whl → 3.1.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/__init__.py +53 -59
- prefect/_internal/concurrency/services.py +6 -4
- prefect/_version.py +3 -3
- prefect/agent.py +3 -1
- prefect/client/cloud.py +0 -21
- prefect/client/schemas/objects.py +11 -0
- prefect/client/utilities.py +1 -15
- prefect/deployments/deployments.py +4 -2
- prefect/deployments/runner.py +3 -1
- prefect/engine.py +2 -1
- prefect/events/filters.py +2 -8
- prefect/exceptions.py +31 -41
- prefect/filesystems.py +2 -2
- prefect/flow_engine.py +2 -2
- prefect/flows.py +228 -185
- prefect/infrastructure/__init__.py +3 -1
- prefect/infrastructure/base.py +3 -1
- prefect/results.py +76 -19
- prefect/runner/runner.py +2 -3
- prefect/states.py +22 -10
- prefect/task_engine.py +1 -1
- prefect/telemetry/instrumentation.py +9 -10
- prefect/telemetry/services.py +67 -0
- prefect/utilities/engine.py +15 -1
- prefect/utilities/importtools.py +28 -21
- prefect/variables.py +2 -2
- prefect/workers/base.py +6 -12
- prefect/workers/block.py +3 -1
- prefect/workers/cloud.py +3 -1
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.9.dist-info}/METADATA +1 -1
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.9.dist-info}/RECORD +34 -33
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.9.dist-info}/LICENSE +0 -0
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.9.dist-info}/WHEEL +0 -0
- {prefect_client-3.1.8.dist-info → prefect_client-3.1.9.dist-info}/top_level.txt +0 -0
prefect/results.py
CHANGED
@@ -233,6 +233,29 @@ def _format_user_supplied_storage_key(key: str) -> str:
|
|
233
233
|
return key.format(**runtime_vars, parameters=prefect.runtime.task_run.parameters)
|
234
234
|
|
235
235
|
|
236
|
+
async def _call_explicitly_async_block_method(
|
237
|
+
block: Union[WritableFileSystem, NullFileSystem],
|
238
|
+
method: str,
|
239
|
+
args: tuple[Any, ...],
|
240
|
+
kwargs: dict[str, Any],
|
241
|
+
) -> Any:
|
242
|
+
"""
|
243
|
+
TODO: remove this once we have explicit async methods on all storage blocks
|
244
|
+
|
245
|
+
see https://github.com/PrefectHQ/prefect/issues/15008
|
246
|
+
"""
|
247
|
+
if hasattr(block, f"a{method}"): # explicit async method
|
248
|
+
return await getattr(block.__class__.__name__, f"a{method}")(*args, **kwargs)
|
249
|
+
elif hasattr(getattr(block, method, None), "aio"): # sync_compatible
|
250
|
+
return await getattr(block, method).aio(block, *args, **kwargs)
|
251
|
+
else: # should not happen in prefect, but users can override impls
|
252
|
+
maybe_coro = getattr(block, method)(*args, **kwargs)
|
253
|
+
if inspect.isawaitable(maybe_coro):
|
254
|
+
return await maybe_coro
|
255
|
+
else:
|
256
|
+
return maybe_coro
|
257
|
+
|
258
|
+
|
236
259
|
T = TypeVar("T")
|
237
260
|
|
238
261
|
|
@@ -405,7 +428,9 @@ class ResultStore(BaseModel):
|
|
405
428
|
# TODO: Add an `exists` method to commonly used storage blocks
|
406
429
|
# so the entire payload doesn't need to be read
|
407
430
|
try:
|
408
|
-
metadata_content = await
|
431
|
+
metadata_content = await _call_explicitly_async_block_method(
|
432
|
+
self.metadata_storage, "read_path", (key,), {}
|
433
|
+
)
|
409
434
|
if metadata_content is None:
|
410
435
|
return False
|
411
436
|
metadata = ResultRecordMetadata.load_bytes(metadata_content)
|
@@ -414,7 +439,9 @@ class ResultStore(BaseModel):
|
|
414
439
|
return False
|
415
440
|
else:
|
416
441
|
try:
|
417
|
-
content = await
|
442
|
+
content = await _call_explicitly_async_block_method(
|
443
|
+
self.result_storage, "read_path", (key,), {}
|
444
|
+
)
|
418
445
|
if content is None:
|
419
446
|
return False
|
420
447
|
record = ResultRecord.deserialize(content)
|
@@ -491,12 +518,22 @@ class ResultStore(BaseModel):
|
|
491
518
|
self.result_storage = await get_default_result_storage()
|
492
519
|
|
493
520
|
if self.metadata_storage is not None:
|
494
|
-
metadata_content = await
|
521
|
+
metadata_content = await _call_explicitly_async_block_method(
|
522
|
+
self.metadata_storage,
|
523
|
+
"read_path",
|
524
|
+
(key,),
|
525
|
+
{},
|
526
|
+
)
|
495
527
|
metadata = ResultRecordMetadata.load_bytes(metadata_content)
|
496
528
|
assert (
|
497
529
|
metadata.storage_key is not None
|
498
530
|
), "Did not find storage key in metadata"
|
499
|
-
result_content = await
|
531
|
+
result_content = await _call_explicitly_async_block_method(
|
532
|
+
self.result_storage,
|
533
|
+
"read_path",
|
534
|
+
(metadata.storage_key,),
|
535
|
+
{},
|
536
|
+
)
|
500
537
|
result_record: ResultRecord[
|
501
538
|
Any
|
502
539
|
] = ResultRecord.deserialize_from_result_and_metadata(
|
@@ -504,7 +541,12 @@ class ResultStore(BaseModel):
|
|
504
541
|
)
|
505
542
|
await emit_result_read_event(self, resolved_key_path)
|
506
543
|
else:
|
507
|
-
content = await
|
544
|
+
content = await _call_explicitly_async_block_method(
|
545
|
+
self.result_storage,
|
546
|
+
"read_path",
|
547
|
+
(key,),
|
548
|
+
{},
|
549
|
+
)
|
508
550
|
result_record: ResultRecord[Any] = ResultRecord.deserialize(
|
509
551
|
content, backup_serializer=self.serializer
|
510
552
|
)
|
@@ -555,7 +597,7 @@ class ResultStore(BaseModel):
|
|
555
597
|
obj: Any,
|
556
598
|
key: Optional[str] = None,
|
557
599
|
expiration: Optional[DateTime] = None,
|
558
|
-
) -> "ResultRecord":
|
600
|
+
) -> "ResultRecord[Any]":
|
559
601
|
"""
|
560
602
|
Create a result record.
|
561
603
|
|
@@ -671,19 +713,26 @@ class ResultStore(BaseModel):
|
|
671
713
|
|
672
714
|
# If metadata storage is configured, write result and metadata separately
|
673
715
|
if self.metadata_storage is not None:
|
674
|
-
await
|
675
|
-
|
676
|
-
|
716
|
+
await _call_explicitly_async_block_method(
|
717
|
+
self.result_storage,
|
718
|
+
"write_path",
|
719
|
+
(result_record.metadata.storage_key,),
|
720
|
+
{"content": result_record.serialize_result()},
|
677
721
|
)
|
678
|
-
await
|
679
|
-
|
680
|
-
|
722
|
+
await _call_explicitly_async_block_method(
|
723
|
+
self.metadata_storage,
|
724
|
+
"write_path",
|
725
|
+
(base_key,),
|
726
|
+
{"content": result_record.serialize_metadata()},
|
681
727
|
)
|
682
728
|
await emit_result_write_event(self, result_record.metadata.storage_key)
|
683
729
|
# Otherwise, write the result metadata and result together
|
684
730
|
else:
|
685
|
-
await
|
686
|
-
|
731
|
+
await _call_explicitly_async_block_method(
|
732
|
+
self.result_storage,
|
733
|
+
"write_path",
|
734
|
+
(result_record.metadata.storage_key,),
|
735
|
+
{"content": result_record.serialize()},
|
687
736
|
)
|
688
737
|
await emit_result_write_event(self, result_record.metadata.storage_key)
|
689
738
|
if self.cache_result_in_memory:
|
@@ -910,8 +959,11 @@ class ResultStore(BaseModel):
|
|
910
959
|
serializer=self.serializer, storage_key=str(identifier)
|
911
960
|
),
|
912
961
|
)
|
913
|
-
await
|
914
|
-
|
962
|
+
await _call_explicitly_async_block_method(
|
963
|
+
self.result_storage,
|
964
|
+
"write_path",
|
965
|
+
(f"parameters/{identifier}",),
|
966
|
+
{"content": record.serialize()},
|
915
967
|
)
|
916
968
|
|
917
969
|
@sync_compatible
|
@@ -921,7 +973,12 @@ class ResultStore(BaseModel):
|
|
921
973
|
"Result store is not configured - must have a result storage block to read parameters"
|
922
974
|
)
|
923
975
|
record = ResultRecord.deserialize(
|
924
|
-
await
|
976
|
+
await _call_explicitly_async_block_method(
|
977
|
+
self.result_storage,
|
978
|
+
"read_path",
|
979
|
+
(f"parameters/{identifier}",),
|
980
|
+
{},
|
981
|
+
)
|
925
982
|
)
|
926
983
|
return record.result
|
927
984
|
|
@@ -976,7 +1033,7 @@ class ResultRecordMetadata(BaseModel):
|
|
976
1033
|
"""
|
977
1034
|
return cls.model_validate_json(data)
|
978
1035
|
|
979
|
-
def __eq__(self, other):
|
1036
|
+
def __eq__(self, other: Any) -> bool:
|
980
1037
|
if not isinstance(other, ResultRecordMetadata):
|
981
1038
|
return False
|
982
1039
|
return (
|
@@ -1050,7 +1107,7 @@ class ResultRecord(BaseModel, Generic[R]):
|
|
1050
1107
|
|
1051
1108
|
@model_validator(mode="before")
|
1052
1109
|
@classmethod
|
1053
|
-
def coerce_old_format(cls, value: Any):
|
1110
|
+
def coerce_old_format(cls, value: Any) -> Any:
|
1054
1111
|
if isinstance(value, dict):
|
1055
1112
|
if "data" in value:
|
1056
1113
|
value["result"] = value.pop("data")
|
prefect/runner/runner.py
CHANGED
@@ -1209,9 +1209,8 @@ class Runner:
|
|
1209
1209
|
)
|
1210
1210
|
# Mark the task as started to prevent agent crash
|
1211
1211
|
task_status.started(exc)
|
1212
|
-
|
1213
|
-
|
1214
|
-
)
|
1212
|
+
message = f"Flow run process could not be started:\n{exc!r}"
|
1213
|
+
await self._propose_crashed_state(flow_run, message)
|
1215
1214
|
else:
|
1216
1215
|
run_logger.exception(
|
1217
1216
|
f"An error occurred while monitoring flow run '{flow_run.id}'. "
|
prefect/states.py
CHANGED
@@ -11,6 +11,7 @@ from typing import Any, Dict, Iterable, Optional, Type
|
|
11
11
|
import anyio
|
12
12
|
import httpx
|
13
13
|
import pendulum
|
14
|
+
from opentelemetry import propagate
|
14
15
|
from typing_extensions import TypeGuard
|
15
16
|
|
16
17
|
from prefect._internal.compatibility import deprecated
|
@@ -588,6 +589,16 @@ class StateGroup:
|
|
588
589
|
return f"StateGroup<{self.counts_message()}>"
|
589
590
|
|
590
591
|
|
592
|
+
def _traced(cls: Type[State[R]], **kwargs: Any) -> State[R]:
|
593
|
+
state_details = StateDetails.model_validate(kwargs.pop("state_details", {}))
|
594
|
+
|
595
|
+
carrier = {}
|
596
|
+
propagate.inject(carrier)
|
597
|
+
state_details.traceparent = carrier.get("traceparent")
|
598
|
+
|
599
|
+
return cls(**kwargs, state_details=state_details)
|
600
|
+
|
601
|
+
|
591
602
|
def Scheduled(
|
592
603
|
cls: Type[State[R]] = State,
|
593
604
|
scheduled_time: Optional[datetime.datetime] = None,
|
@@ -605,7 +616,7 @@ def Scheduled(
|
|
605
616
|
raise ValueError("An extra scheduled_time was provided in state_details")
|
606
617
|
state_details.scheduled_time = scheduled_time
|
607
618
|
|
608
|
-
return cls
|
619
|
+
return _traced(cls, type=StateType.SCHEDULED, state_details=state_details, **kwargs)
|
609
620
|
|
610
621
|
|
611
622
|
def Completed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -614,7 +625,8 @@ def Completed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
614
625
|
Returns:
|
615
626
|
State: a Completed state
|
616
627
|
"""
|
617
|
-
|
628
|
+
|
629
|
+
return _traced(cls, type=StateType.COMPLETED, **kwargs)
|
618
630
|
|
619
631
|
|
620
632
|
def Running(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -623,7 +635,7 @@ def Running(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
623
635
|
Returns:
|
624
636
|
State: a Running state
|
625
637
|
"""
|
626
|
-
return cls
|
638
|
+
return _traced(cls, type=StateType.RUNNING, **kwargs)
|
627
639
|
|
628
640
|
|
629
641
|
def Failed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -632,7 +644,7 @@ def Failed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
632
644
|
Returns:
|
633
645
|
State: a Failed state
|
634
646
|
"""
|
635
|
-
return cls
|
647
|
+
return _traced(cls, type=StateType.FAILED, **kwargs)
|
636
648
|
|
637
649
|
|
638
650
|
def Crashed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -641,7 +653,7 @@ def Crashed(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
641
653
|
Returns:
|
642
654
|
State: a Crashed state
|
643
655
|
"""
|
644
|
-
return cls
|
656
|
+
return _traced(cls, type=StateType.CRASHED, **kwargs)
|
645
657
|
|
646
658
|
|
647
659
|
def Cancelling(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -650,7 +662,7 @@ def Cancelling(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
650
662
|
Returns:
|
651
663
|
State: a Cancelling state
|
652
664
|
"""
|
653
|
-
return cls
|
665
|
+
return _traced(cls, type=StateType.CANCELLING, **kwargs)
|
654
666
|
|
655
667
|
|
656
668
|
def Cancelled(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -659,7 +671,7 @@ def Cancelled(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
659
671
|
Returns:
|
660
672
|
State: a Cancelled state
|
661
673
|
"""
|
662
|
-
return cls
|
674
|
+
return _traced(cls, type=StateType.CANCELLED, **kwargs)
|
663
675
|
|
664
676
|
|
665
677
|
def Pending(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
@@ -668,7 +680,7 @@ def Pending(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
668
680
|
Returns:
|
669
681
|
State: a Pending state
|
670
682
|
"""
|
671
|
-
return cls
|
683
|
+
return _traced(cls, type=StateType.PENDING, **kwargs)
|
672
684
|
|
673
685
|
|
674
686
|
def Paused(
|
@@ -704,7 +716,7 @@ def Paused(
|
|
704
716
|
state_details.pause_reschedule = reschedule
|
705
717
|
state_details.pause_key = pause_key
|
706
718
|
|
707
|
-
return cls
|
719
|
+
return _traced(cls, type=StateType.PAUSED, state_details=state_details, **kwargs)
|
708
720
|
|
709
721
|
|
710
722
|
def Suspended(
|
@@ -766,7 +778,7 @@ def Retrying(cls: Type[State[R]] = State, **kwargs: Any) -> State[R]:
|
|
766
778
|
Returns:
|
767
779
|
State: a Retrying state
|
768
780
|
"""
|
769
|
-
return cls
|
781
|
+
return _traced(cls, type=StateType.RUNNING, name="Retrying", **kwargs)
|
770
782
|
|
771
783
|
|
772
784
|
def Late(
|
prefect/task_engine.py
CHANGED
@@ -7,9 +7,7 @@ from uuid import UUID
|
|
7
7
|
|
8
8
|
from opentelemetry import metrics, trace
|
9
9
|
from opentelemetry._logs import set_logger_provider
|
10
|
-
from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
|
11
10
|
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
|
12
|
-
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
13
11
|
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
|
14
12
|
from opentelemetry.sdk._logs.export import SimpleLogRecordProcessor
|
15
13
|
from opentelemetry.sdk.metrics import MeterProvider
|
@@ -19,6 +17,7 @@ from opentelemetry.sdk.trace import TracerProvider
|
|
19
17
|
|
20
18
|
from .logging import set_log_handler
|
21
19
|
from .processors import InFlightSpanProcessor
|
20
|
+
from .services import QueueingLogExporter, QueueingSpanExporter
|
22
21
|
|
23
22
|
if TYPE_CHECKING:
|
24
23
|
from opentelemetry.sdk._logs import LoggerProvider
|
@@ -83,11 +82,10 @@ def _setup_trace_provider(
|
|
83
82
|
resource: Resource, headers: dict[str, str], telemetry_url: str
|
84
83
|
) -> TracerProvider:
|
85
84
|
trace_provider = TracerProvider(resource=resource)
|
86
|
-
|
87
|
-
|
88
|
-
headers=headers,
|
85
|
+
queueing_span_exporter = QueueingSpanExporter.instance(
|
86
|
+
_url_join(telemetry_url, "v1/traces"), tuple(headers.items())
|
89
87
|
)
|
90
|
-
trace_provider.add_span_processor(InFlightSpanProcessor(
|
88
|
+
trace_provider.add_span_processor(InFlightSpanProcessor(queueing_span_exporter))
|
91
89
|
trace.set_tracer_provider(trace_provider)
|
92
90
|
|
93
91
|
return trace_provider
|
@@ -112,11 +110,12 @@ def _setup_logger_provider(
|
|
112
110
|
resource: Resource, headers: dict[str, str], telemetry_url: str
|
113
111
|
) -> LoggerProvider:
|
114
112
|
logger_provider = LoggerProvider(resource=resource)
|
115
|
-
|
116
|
-
|
117
|
-
|
113
|
+
queueing_log_exporter = QueueingLogExporter.instance(
|
114
|
+
_url_join(telemetry_url, "v1/logs"), tuple(headers.items())
|
115
|
+
)
|
116
|
+
logger_provider.add_log_record_processor(
|
117
|
+
SimpleLogRecordProcessor(queueing_log_exporter)
|
118
118
|
)
|
119
|
-
logger_provider.add_log_record_processor(SimpleLogRecordProcessor(otlp_exporter))
|
120
119
|
set_logger_provider(logger_provider)
|
121
120
|
log_handler = LoggingHandler(level=logging.NOTSET, logger_provider=logger_provider)
|
122
121
|
|
@@ -0,0 +1,67 @@
|
|
1
|
+
from abc import abstractmethod
|
2
|
+
from typing import Union
|
3
|
+
|
4
|
+
from opentelemetry.exporter.otlp.proto.http._log_exporter import OTLPLogExporter
|
5
|
+
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
|
6
|
+
from opentelemetry.sdk._logs import LogData
|
7
|
+
from opentelemetry.sdk._logs.export import LogExporter
|
8
|
+
from opentelemetry.sdk.trace import ReadableSpan
|
9
|
+
from opentelemetry.sdk.trace.export import SpanExporter
|
10
|
+
|
11
|
+
from prefect._internal.concurrency.services import BatchedQueueService
|
12
|
+
|
13
|
+
|
14
|
+
class BaseQueueingExporter(BatchedQueueService):
|
15
|
+
_max_batch_size = 512
|
16
|
+
_min_interval = 2.0
|
17
|
+
_otlp_exporter: Union[SpanExporter, LogExporter]
|
18
|
+
|
19
|
+
def export(self, batch: list[Union[ReadableSpan, LogData]]) -> None:
|
20
|
+
for item in batch:
|
21
|
+
self.send(item)
|
22
|
+
|
23
|
+
@abstractmethod
|
24
|
+
def _export_batch(self, items: list[Union[ReadableSpan, LogData]]) -> None:
|
25
|
+
pass
|
26
|
+
|
27
|
+
async def _handle_batch(self, items: list[Union[ReadableSpan, LogData]]) -> None:
|
28
|
+
try:
|
29
|
+
self._export_batch(items)
|
30
|
+
except Exception as e:
|
31
|
+
self._logger.exception(f"Failed to export batch: {e}")
|
32
|
+
raise
|
33
|
+
|
34
|
+
def shutdown(self) -> None:
|
35
|
+
if self._stopped:
|
36
|
+
return
|
37
|
+
|
38
|
+
self.drain()
|
39
|
+
self._otlp_exporter.shutdown()
|
40
|
+
|
41
|
+
|
42
|
+
class QueueingSpanExporter(BaseQueueingExporter, SpanExporter):
|
43
|
+
_otlp_exporter: OTLPSpanExporter
|
44
|
+
|
45
|
+
def __init__(self, endpoint: str, headers: tuple[tuple[str, str]]):
|
46
|
+
super().__init__()
|
47
|
+
self._otlp_exporter = OTLPSpanExporter(
|
48
|
+
endpoint=endpoint,
|
49
|
+
headers=dict(headers),
|
50
|
+
)
|
51
|
+
|
52
|
+
def _export_batch(self, items: list[ReadableSpan]) -> None:
|
53
|
+
self._otlp_exporter.export(items)
|
54
|
+
|
55
|
+
|
56
|
+
class QueueingLogExporter(BaseQueueingExporter, LogExporter):
|
57
|
+
_otlp_exporter: OTLPLogExporter
|
58
|
+
|
59
|
+
def __init__(self, endpoint: str, headers: tuple[tuple[str, str]]):
|
60
|
+
super().__init__()
|
61
|
+
self._otlp_exporter = OTLPLogExporter(
|
62
|
+
endpoint=endpoint,
|
63
|
+
headers=dict(headers),
|
64
|
+
)
|
65
|
+
|
66
|
+
def _export_batch(self, items: list[LogData]) -> None:
|
67
|
+
self._otlp_exporter.export(items)
|
prefect/utilities/engine.py
CHANGED
@@ -18,6 +18,7 @@ from typing import (
|
|
18
18
|
from uuid import UUID
|
19
19
|
|
20
20
|
import anyio
|
21
|
+
from opentelemetry import propagate, trace
|
21
22
|
from typing_extensions import TypeIs
|
22
23
|
|
23
24
|
import prefect
|
@@ -767,6 +768,19 @@ def resolve_to_final_result(expr: Any, context: dict[str, Any]) -> Any:
|
|
767
768
|
result = state.result(raise_on_failure=False, fetch=True)
|
768
769
|
if asyncio.iscoroutine(result):
|
769
770
|
result = run_coro_as_sync(result)
|
771
|
+
|
772
|
+
if state.state_details.traceparent:
|
773
|
+
parameter_context = propagate.extract(
|
774
|
+
{"traceparent": state.state_details.traceparent}
|
775
|
+
)
|
776
|
+
trace.get_current_span().add_link(
|
777
|
+
context=trace.get_current_span(parameter_context).get_span_context(),
|
778
|
+
attributes={
|
779
|
+
"prefect.input.name": context["parameter_name"],
|
780
|
+
"prefect.input.type": type(result).__name__,
|
781
|
+
},
|
782
|
+
)
|
783
|
+
|
770
784
|
return result
|
771
785
|
|
772
786
|
|
@@ -796,7 +810,7 @@ def resolve_inputs_sync(
|
|
796
810
|
return_data=return_data,
|
797
811
|
max_depth=max_depth,
|
798
812
|
remove_annotations=True,
|
799
|
-
context={},
|
813
|
+
context={"parameter_name": parameter},
|
800
814
|
)
|
801
815
|
except UpstreamTaskError:
|
802
816
|
raise
|
prefect/utilities/importtools.py
CHANGED
@@ -4,6 +4,7 @@ import importlib.util
|
|
4
4
|
import os
|
5
5
|
import runpy
|
6
6
|
import sys
|
7
|
+
import threading
|
7
8
|
import warnings
|
8
9
|
from collections.abc import Iterable, Sequence
|
9
10
|
from importlib.abc import Loader, MetaPathFinder
|
@@ -23,6 +24,16 @@ from prefect.utilities.filesystem import filename, is_local_path, tmpchdir
|
|
23
24
|
|
24
25
|
logger: Logger = get_logger(__name__)
|
25
26
|
|
27
|
+
_sys_path_lock: Optional[threading.Lock] = None
|
28
|
+
|
29
|
+
|
30
|
+
def _get_sys_path_lock() -> threading.Lock:
|
31
|
+
"""Get the global sys.path lock, initializing it if necessary."""
|
32
|
+
global _sys_path_lock
|
33
|
+
if _sys_path_lock is None:
|
34
|
+
_sys_path_lock = threading.Lock()
|
35
|
+
return _sys_path_lock
|
36
|
+
|
26
37
|
|
27
38
|
def to_qualified_name(obj: Any) -> str:
|
28
39
|
"""
|
@@ -135,32 +146,26 @@ def objects_from_script(
|
|
135
146
|
|
136
147
|
|
137
148
|
def load_script_as_module(path: str) -> ModuleType:
|
138
|
-
"""
|
139
|
-
Execute a script at the given path.
|
149
|
+
"""Execute a script at the given path.
|
140
150
|
|
141
|
-
Sets the module name to
|
151
|
+
Sets the module name to a unique identifier to ensure thread safety.
|
152
|
+
Uses a lock to safely modify sys.path for relative imports.
|
142
153
|
|
143
154
|
If an exception occurs during execution of the script, a
|
144
155
|
`prefect.exceptions.ScriptError` is created to wrap the exception and raised.
|
145
|
-
|
146
|
-
During the duration of this function call, `sys` is modified to support loading.
|
147
|
-
These changes are reverted after completion, but this function is not thread safe
|
148
|
-
and use of it in threaded contexts may result in undesirable behavior.
|
149
|
-
|
150
|
-
See https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
|
151
156
|
"""
|
152
|
-
# We will add the parent directory to search locations to support relative imports
|
153
|
-
# during execution of the script
|
154
157
|
if not path.endswith(".py"):
|
155
158
|
raise ValueError(f"The provided path does not point to a python file: {path!r}")
|
156
159
|
|
157
160
|
parent_path = str(Path(path).resolve().parent)
|
158
161
|
working_directory = os.getcwd()
|
159
162
|
|
163
|
+
# Generate unique module name for thread safety
|
164
|
+
module_name = f"__prefect_loader_{id(path)}__"
|
165
|
+
|
160
166
|
spec = importlib.util.spec_from_file_location(
|
161
|
-
|
167
|
+
module_name,
|
162
168
|
path,
|
163
|
-
# Support explicit relative imports i.e. `from .foo import bar`
|
164
169
|
submodule_search_locations=[parent_path, working_directory],
|
165
170
|
)
|
166
171
|
if TYPE_CHECKING:
|
@@ -168,19 +173,21 @@ def load_script_as_module(path: str) -> ModuleType:
|
|
168
173
|
assert spec.loader is not None
|
169
174
|
|
170
175
|
module = importlib.util.module_from_spec(spec)
|
171
|
-
sys.modules[
|
176
|
+
sys.modules[module_name] = module
|
172
177
|
|
173
|
-
# Support implicit relative imports i.e. `from foo import bar`
|
174
|
-
sys.path.insert(0, working_directory)
|
175
|
-
sys.path.insert(0, parent_path)
|
176
178
|
try:
|
177
|
-
|
179
|
+
with _get_sys_path_lock():
|
180
|
+
sys.path.insert(0, working_directory)
|
181
|
+
sys.path.insert(0, parent_path)
|
182
|
+
try:
|
183
|
+
spec.loader.exec_module(module)
|
184
|
+
finally:
|
185
|
+
sys.path.remove(parent_path)
|
186
|
+
sys.path.remove(working_directory)
|
178
187
|
except Exception as exc:
|
179
188
|
raise ScriptError(user_exc=exc, path=path) from exc
|
180
189
|
finally:
|
181
|
-
sys.modules.pop(
|
182
|
-
sys.path.remove(parent_path)
|
183
|
-
sys.path.remove(working_directory)
|
190
|
+
sys.modules.pop(module_name)
|
184
191
|
|
185
192
|
return module
|
186
193
|
|
prefect/variables.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
from typing import Optional
|
1
|
+
from typing import Any, Callable, Optional
|
2
2
|
|
3
3
|
from pydantic import BaseModel, Field
|
4
4
|
|
@@ -256,4 +256,4 @@ class Variable(BaseModel):
|
|
256
256
|
return False
|
257
257
|
|
258
258
|
|
259
|
-
__getattr__ = getattr_migration(__name__)
|
259
|
+
__getattr__: Callable[[str], Any] = getattr_migration(__name__)
|
prefect/workers/base.py
CHANGED
@@ -18,7 +18,6 @@ from typing_extensions import Literal
|
|
18
18
|
import prefect
|
19
19
|
from prefect._internal.schemas.validators import return_v_or_none
|
20
20
|
from prefect.client.base import ServerType
|
21
|
-
from prefect.client.cloud import CloudClient, get_cloud_client
|
22
21
|
from prefect.client.orchestration import PrefectClient, get_client
|
23
22
|
from prefect.client.schemas.actions import WorkPoolCreate, WorkPoolUpdate
|
24
23
|
from prefect.client.schemas.objects import (
|
@@ -441,7 +440,6 @@ class BaseWorker(abc.ABC):
|
|
441
440
|
self._exit_stack: AsyncExitStack = AsyncExitStack()
|
442
441
|
self._runs_task_group: Optional[anyio.abc.TaskGroup] = None
|
443
442
|
self._client: Optional[PrefectClient] = None
|
444
|
-
self._cloud_client: Optional[CloudClient] = None
|
445
443
|
self._last_polled_time: pendulum.DateTime = pendulum.now("utc")
|
446
444
|
self._limit = limit
|
447
445
|
self._limiter: Optional[anyio.CapacityLimiter] = None
|
@@ -637,11 +635,6 @@ class BaseWorker(abc.ABC):
|
|
637
635
|
await self._exit_stack.enter_async_context(self._client)
|
638
636
|
await self._exit_stack.enter_async_context(self._runs_task_group)
|
639
637
|
|
640
|
-
if self._client.server_type == ServerType.CLOUD:
|
641
|
-
self._cloud_client = await self._exit_stack.enter_async_context(
|
642
|
-
get_cloud_client()
|
643
|
-
)
|
644
|
-
|
645
638
|
self.is_setup = True
|
646
639
|
|
647
640
|
async def teardown(self, *exc_info):
|
@@ -989,6 +982,8 @@ class BaseWorker(abc.ABC):
|
|
989
982
|
try:
|
990
983
|
configuration = await self._get_configuration(flow_run)
|
991
984
|
submitted_event = self._emit_flow_run_submitted_event(configuration)
|
985
|
+
await self._give_worker_labels_to_flow_run(flow_run.id)
|
986
|
+
|
992
987
|
result = await self.run(
|
993
988
|
flow_run=flow_run,
|
994
989
|
task_status=task_status,
|
@@ -1002,9 +997,8 @@ class BaseWorker(abc.ABC):
|
|
1002
997
|
)
|
1003
998
|
# Mark the task as started to prevent agent crash
|
1004
999
|
task_status.started(exc)
|
1005
|
-
|
1006
|
-
|
1007
|
-
)
|
1000
|
+
message = f"Flow run could not be submitted to infrastructure:\n{exc!r}"
|
1001
|
+
await self._propose_crashed_state(flow_run, message)
|
1008
1002
|
else:
|
1009
1003
|
run_logger.exception(
|
1010
1004
|
f"An error occurred while monitoring flow run '{flow_run.id}'. "
|
@@ -1221,7 +1215,7 @@ class BaseWorker(abc.ABC):
|
|
1221
1215
|
"""
|
1222
1216
|
Give this worker's identifying labels to the specified flow run.
|
1223
1217
|
"""
|
1224
|
-
if self.
|
1218
|
+
if self._client:
|
1225
1219
|
labels: KeyValueLabels = {
|
1226
1220
|
"prefect.worker.name": self.name,
|
1227
1221
|
"prefect.worker.type": self.type,
|
@@ -1235,7 +1229,7 @@ class BaseWorker(abc.ABC):
|
|
1235
1229
|
}
|
1236
1230
|
)
|
1237
1231
|
|
1238
|
-
await self.
|
1232
|
+
await self._client.update_flow_run_labels(flow_run_id, labels)
|
1239
1233
|
|
1240
1234
|
async def __aenter__(self):
|
1241
1235
|
self._logger.debug("Entering worker context...")
|
prefect/workers/block.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
"""
|
2
2
|
2024-06-27: This surfaces an actionable error message for moved or removed objects in Prefect 3.0 upgrade.
|
3
3
|
"""
|
4
|
+
from typing import Any, Callable
|
5
|
+
|
4
6
|
from prefect._internal.compatibility.migration import getattr_migration
|
5
7
|
|
6
|
-
__getattr__ = getattr_migration(__name__)
|
8
|
+
__getattr__: Callable[[str], Any] = getattr_migration(__name__)
|
prefect/workers/cloud.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1
1
|
"""
|
2
2
|
2024-06-27: This surfaces an actionable error message for moved or removed objects in Prefect 3.0 upgrade.
|
3
3
|
"""
|
4
|
+
from typing import Any, Callable
|
5
|
+
|
4
6
|
from prefect._internal.compatibility.migration import getattr_migration
|
5
7
|
|
6
|
-
__getattr__ = getattr_migration(__name__)
|
8
|
+
__getattr__: Callable[[str], Any] = getattr_migration(__name__)
|