prefect-client 3.0.1__py3-none-any.whl → 3.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_internal/compatibility/deprecated.py +1 -1
- prefect/blocks/core.py +5 -4
- prefect/blocks/notifications.py +21 -0
- prefect/blocks/webhook.py +17 -1
- prefect/cache_policies.py +98 -28
- prefect/client/orchestration.py +42 -20
- prefect/client/schemas/actions.py +10 -2
- prefect/client/schemas/filters.py +4 -2
- prefect/client/schemas/objects.py +48 -6
- prefect/client/schemas/responses.py +15 -1
- prefect/client/types/flexible_schedule_list.py +1 -1
- prefect/concurrency/asyncio.py +45 -6
- prefect/concurrency/services.py +1 -1
- prefect/concurrency/sync.py +21 -27
- prefect/concurrency/v1/asyncio.py +3 -0
- prefect/concurrency/v1/sync.py +4 -5
- prefect/context.py +6 -6
- prefect/deployments/runner.py +43 -5
- prefect/events/actions.py +6 -0
- prefect/flow_engine.py +12 -4
- prefect/flows.py +15 -11
- prefect/locking/filesystem.py +243 -0
- prefect/logging/handlers.py +0 -2
- prefect/logging/loggers.py +0 -18
- prefect/logging/logging.yml +1 -0
- prefect/main.py +19 -5
- prefect/plugins.py +9 -1
- prefect/records/base.py +12 -0
- prefect/records/filesystem.py +6 -2
- prefect/records/memory.py +6 -0
- prefect/records/result_store.py +6 -0
- prefect/results.py +192 -29
- prefect/runner/runner.py +74 -6
- prefect/settings.py +31 -1
- prefect/states.py +34 -17
- prefect/task_engine.py +58 -43
- prefect/transactions.py +113 -52
- prefect/utilities/asyncutils.py +7 -0
- prefect/utilities/collections.py +3 -2
- prefect/utilities/engine.py +20 -9
- prefect/utilities/importtools.py +1 -0
- prefect/utilities/urls.py +70 -12
- prefect/workers/base.py +10 -8
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/METADATA +1 -1
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/RECORD +48 -47
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/LICENSE +0 -0
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/WHEEL +0 -0
- {prefect_client-3.0.1.dist-info → prefect_client-3.0.3.dist-info}/top_level.txt +0 -0
prefect/transactions.py
CHANGED
@@ -18,11 +18,21 @@ from pydantic import Field, PrivateAttr
|
|
18
18
|
from typing_extensions import Self
|
19
19
|
|
20
20
|
from prefect.context import ContextModel
|
21
|
-
from prefect.exceptions import
|
21
|
+
from prefect.exceptions import (
|
22
|
+
ConfigurationError,
|
23
|
+
MissingContextError,
|
24
|
+
SerializationError,
|
25
|
+
)
|
22
26
|
from prefect.logging.loggers import get_logger, get_run_logger
|
23
27
|
from prefect.records import RecordStore
|
24
28
|
from prefect.records.base import TransactionRecord
|
25
|
-
from prefect.results import
|
29
|
+
from prefect.results import (
|
30
|
+
BaseResult,
|
31
|
+
ResultRecord,
|
32
|
+
ResultStore,
|
33
|
+
get_result_store,
|
34
|
+
should_persist_result,
|
35
|
+
)
|
26
36
|
from prefect.utilities.annotations import NotSet
|
27
37
|
from prefect.utilities.collections import AutoEnum
|
28
38
|
from prefect.utilities.engine import _get_hook_name
|
@@ -66,19 +76,91 @@ class Transaction(ContextModel):
|
|
66
76
|
logger: Union[logging.Logger, logging.LoggerAdapter] = Field(
|
67
77
|
default_factory=partial(get_logger, "transactions")
|
68
78
|
)
|
79
|
+
write_on_commit: bool = True
|
69
80
|
_stored_values: Dict[str, Any] = PrivateAttr(default_factory=dict)
|
70
81
|
_staged_value: Any = None
|
71
82
|
__var__: ContextVar = ContextVar("transaction")
|
72
83
|
|
73
84
|
def set(self, name: str, value: Any) -> None:
|
85
|
+
"""
|
86
|
+
Set a stored value in the transaction.
|
87
|
+
|
88
|
+
Args:
|
89
|
+
name: The name of the value to set
|
90
|
+
value: The value to set
|
91
|
+
|
92
|
+
Examples:
|
93
|
+
Set a value for use later in the transaction:
|
94
|
+
```python
|
95
|
+
with transaction() as txn:
|
96
|
+
txn.set("key", "value")
|
97
|
+
...
|
98
|
+
assert txn.get("key") == "value"
|
99
|
+
```
|
100
|
+
"""
|
74
101
|
self._stored_values[name] = value
|
75
102
|
|
76
103
|
def get(self, name: str, default: Any = NotSet) -> Any:
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
104
|
+
"""
|
105
|
+
Get a stored value from the transaction.
|
106
|
+
|
107
|
+
Child transactions will return values from their parents unless a value with
|
108
|
+
the same name is set in the child transaction.
|
109
|
+
|
110
|
+
Direct changes to returned values will not update the stored value. To update the
|
111
|
+
stored value, use the `set` method.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
name: The name of the value to get
|
115
|
+
default: The default value to return if the value is not found
|
116
|
+
|
117
|
+
Returns:
|
118
|
+
The value from the transaction
|
119
|
+
|
120
|
+
Examples:
|
121
|
+
Get a value from the transaction:
|
122
|
+
```python
|
123
|
+
with transaction() as txn:
|
124
|
+
txn.set("key", "value")
|
125
|
+
...
|
126
|
+
assert txn.get("key") == "value"
|
127
|
+
```
|
128
|
+
|
129
|
+
Get a value from a parent transaction:
|
130
|
+
```python
|
131
|
+
with transaction() as parent:
|
132
|
+
parent.set("key", "parent_value")
|
133
|
+
with transaction() as child:
|
134
|
+
assert child.get("key") == "parent_value"
|
135
|
+
```
|
136
|
+
|
137
|
+
Update a stored value:
|
138
|
+
```python
|
139
|
+
with transaction() as txn:
|
140
|
+
txn.set("key", [1, 2, 3])
|
141
|
+
value = txn.get("key")
|
142
|
+
value.append(4)
|
143
|
+
# Stored value is not updated until `.set` is called
|
144
|
+
assert value == [1, 2, 3, 4]
|
145
|
+
assert txn.get("key") == [1, 2, 3]
|
146
|
+
|
147
|
+
txn.set("key", value)
|
148
|
+
assert txn.get("key") == [1, 2, 3, 4]
|
149
|
+
```
|
150
|
+
"""
|
151
|
+
# deepcopy to prevent mutation of stored values
|
152
|
+
value = copy.deepcopy(self._stored_values.get(name, NotSet))
|
153
|
+
if value is NotSet:
|
154
|
+
# if there's a parent transaction, get the value from the parent
|
155
|
+
parent = self.get_parent()
|
156
|
+
if parent is not None:
|
157
|
+
value = parent.get(name, default)
|
158
|
+
# if there's no parent transaction, use the default
|
159
|
+
elif default is not NotSet:
|
160
|
+
value = default
|
161
|
+
else:
|
162
|
+
raise ValueError(f"Could not retrieve value for unknown key: {name}")
|
163
|
+
return value
|
82
164
|
|
83
165
|
def is_committed(self) -> bool:
|
84
166
|
return self.state == TransactionState.COMMITTED
|
@@ -101,8 +183,6 @@ class Transaction(ContextModel):
|
|
101
183
|
"Context already entered. Context enter calls cannot be nested."
|
102
184
|
)
|
103
185
|
parent = get_transaction()
|
104
|
-
if parent:
|
105
|
-
self._stored_values = copy.deepcopy(parent._stored_values)
|
106
186
|
# set default commit behavior; either inherit from parent or set a default of eager
|
107
187
|
if self.commit_mode is None:
|
108
188
|
self.commit_mode = parent.commit_mode if parent else CommitMode.LAZY
|
@@ -118,8 +198,10 @@ class Transaction(ContextModel):
|
|
118
198
|
and self.key
|
119
199
|
and not self.store.supports_isolation_level(self.isolation_level)
|
120
200
|
):
|
121
|
-
raise
|
122
|
-
f"Isolation level {self.isolation_level.name} is not supported by
|
201
|
+
raise ConfigurationError(
|
202
|
+
f"Isolation level {self.isolation_level.name} is not supported by provided "
|
203
|
+
"configuration. Please ensure you've provided a lock file directory or lock "
|
204
|
+
"manager when using the SERIALIZABLE isolation level."
|
123
205
|
)
|
124
206
|
|
125
207
|
# this needs to go before begin, which could set the state to committed
|
@@ -229,14 +311,21 @@ class Transaction(ContextModel):
|
|
229
311
|
for hook in self.on_commit_hooks:
|
230
312
|
self.run_hook(hook, "commit")
|
231
313
|
|
232
|
-
if self.store and self.key:
|
314
|
+
if self.store and self.key and self.write_on_commit:
|
233
315
|
if isinstance(self.store, ResultStore):
|
234
316
|
if isinstance(self._staged_value, BaseResult):
|
235
|
-
self.store.write(
|
317
|
+
self.store.write(
|
318
|
+
key=self.key, obj=self._staged_value.get(_sync=True)
|
319
|
+
)
|
320
|
+
elif isinstance(self._staged_value, ResultRecord):
|
321
|
+
self.store.persist_result_record(
|
322
|
+
result_record=self._staged_value
|
323
|
+
)
|
236
324
|
else:
|
237
|
-
self.store.write(self.key, self._staged_value)
|
325
|
+
self.store.write(key=self.key, obj=self._staged_value)
|
238
326
|
else:
|
239
|
-
self.store.write(self.key, self._staged_value)
|
327
|
+
self.store.write(key=self.key, result=self._staged_value)
|
328
|
+
|
240
329
|
self.state = TransactionState.COMMITTED
|
241
330
|
if (
|
242
331
|
self.store
|
@@ -287,7 +376,7 @@ class Transaction(ContextModel):
|
|
287
376
|
|
288
377
|
def stage(
|
289
378
|
self,
|
290
|
-
value:
|
379
|
+
value: Any,
|
291
380
|
on_rollback_hooks: Optional[List] = None,
|
292
381
|
on_commit_hooks: Optional[List] = None,
|
293
382
|
) -> None:
|
@@ -349,6 +438,7 @@ def transaction(
|
|
349
438
|
commit_mode: Optional[CommitMode] = None,
|
350
439
|
isolation_level: Optional[IsolationLevel] = None,
|
351
440
|
overwrite: bool = False,
|
441
|
+
write_on_commit: Optional[bool] = None,
|
352
442
|
logger: Union[logging.Logger, logging.LoggerAdapter, None] = None,
|
353
443
|
) -> Generator[Transaction, None, None]:
|
354
444
|
"""
|
@@ -361,48 +451,16 @@ def transaction(
|
|
361
451
|
- commit_mode: The commit mode controlling when the transaction and
|
362
452
|
child transactions are committed
|
363
453
|
- overwrite: Whether to overwrite an existing transaction record in the store
|
454
|
+
- write_on_commit: Whether to write the result to the store on commit. If not provided,
|
455
|
+
will default will be determined by the current run context. If no run context is
|
456
|
+
available, the value of `PREFECT_RESULTS_PERSIST_BY_DEFAULT` will be used.
|
364
457
|
|
365
458
|
Yields:
|
366
459
|
- Transaction: An object representing the transaction state
|
367
460
|
"""
|
368
461
|
# if there is no key, we won't persist a record
|
369
462
|
if key and not store:
|
370
|
-
|
371
|
-
from prefect.results import ResultStore, get_default_result_storage
|
372
|
-
|
373
|
-
flow_run_context = FlowRunContext.get()
|
374
|
-
task_run_context = TaskRunContext.get()
|
375
|
-
existing_store = getattr(task_run_context, "result_store", None) or getattr(
|
376
|
-
flow_run_context, "result_store", None
|
377
|
-
)
|
378
|
-
|
379
|
-
new_store: ResultStore
|
380
|
-
if existing_store and existing_store.result_storage_block_id:
|
381
|
-
new_store = existing_store.model_copy(
|
382
|
-
update={
|
383
|
-
"persist_result": True,
|
384
|
-
}
|
385
|
-
)
|
386
|
-
else:
|
387
|
-
default_storage = get_default_result_storage(_sync=True)
|
388
|
-
if existing_store:
|
389
|
-
new_store = existing_store.model_copy(
|
390
|
-
update={
|
391
|
-
"persist_result": True,
|
392
|
-
"storage_block": default_storage,
|
393
|
-
"storage_block_id": default_storage._block_document_id,
|
394
|
-
}
|
395
|
-
)
|
396
|
-
else:
|
397
|
-
new_store = ResultStore(
|
398
|
-
persist_result=True,
|
399
|
-
result_storage=default_storage,
|
400
|
-
)
|
401
|
-
from prefect.records.result_store import ResultRecordStore
|
402
|
-
|
403
|
-
store = ResultRecordStore(
|
404
|
-
result_store=new_store,
|
405
|
-
)
|
463
|
+
store = get_result_store()
|
406
464
|
|
407
465
|
try:
|
408
466
|
logger = logger or get_run_logger()
|
@@ -415,6 +473,9 @@ def transaction(
|
|
415
473
|
commit_mode=commit_mode,
|
416
474
|
isolation_level=isolation_level,
|
417
475
|
overwrite=overwrite,
|
476
|
+
write_on_commit=write_on_commit
|
477
|
+
if write_on_commit is not None
|
478
|
+
else should_persist_result(),
|
418
479
|
logger=logger,
|
419
480
|
) as txn:
|
420
481
|
yield txn
|
prefect/utilities/asyncutils.py
CHANGED
@@ -341,6 +341,13 @@ def sync_compatible(
|
|
341
341
|
will submit the async method to the event loop.
|
342
342
|
- If we cannot find an event loop, we will create a new one and run the async method
|
343
343
|
then tear down the loop.
|
344
|
+
|
345
|
+
Note: Type checkers will infer functions decorated with `@sync_compatible` are synchronous. If
|
346
|
+
you want to use the decorated function in an async context, you will need to ignore the types
|
347
|
+
and "cast" the return type to a coroutine. For example:
|
348
|
+
```
|
349
|
+
python result: Coroutine = sync_compatible(my_async_function)(arg1, arg2) # type: ignore
|
350
|
+
```
|
344
351
|
"""
|
345
352
|
|
346
353
|
@wraps(async_fn)
|
prefect/utilities/collections.py
CHANGED
@@ -18,7 +18,6 @@ from typing import (
|
|
18
18
|
Generator,
|
19
19
|
Hashable,
|
20
20
|
Iterable,
|
21
|
-
Iterator,
|
22
21
|
List,
|
23
22
|
Optional,
|
24
23
|
Set,
|
@@ -192,7 +191,9 @@ def extract_instances(
|
|
192
191
|
return ret
|
193
192
|
|
194
193
|
|
195
|
-
def batched_iterable(
|
194
|
+
def batched_iterable(
|
195
|
+
iterable: Iterable[T], size: int
|
196
|
+
) -> Generator[Tuple[T, ...], None, None]:
|
196
197
|
"""
|
197
198
|
Yield batches of a certain size from an iterable
|
198
199
|
|
prefect/utilities/engine.py
CHANGED
@@ -44,12 +44,11 @@ from prefect.exceptions import (
|
|
44
44
|
)
|
45
45
|
from prefect.flows import Flow
|
46
46
|
from prefect.futures import PrefectFuture
|
47
|
-
from prefect.futures import PrefectFuture as NewPrefectFuture
|
48
47
|
from prefect.logging.loggers import (
|
49
48
|
get_logger,
|
50
49
|
task_run_logger,
|
51
50
|
)
|
52
|
-
from prefect.results import BaseResult
|
51
|
+
from prefect.results import BaseResult, ResultRecord, should_persist_result
|
53
52
|
from prefect.settings import (
|
54
53
|
PREFECT_LOGGING_LOG_PRINTS,
|
55
54
|
)
|
@@ -122,7 +121,7 @@ async def collect_task_run_inputs(expr: Any, max_depth: int = -1) -> Set[TaskRun
|
|
122
121
|
|
123
122
|
|
124
123
|
def collect_task_run_inputs_sync(
|
125
|
-
expr: Any, future_cls: Any =
|
124
|
+
expr: Any, future_cls: Any = PrefectFuture, max_depth: int = -1
|
126
125
|
) -> Set[TaskRunInput]:
|
127
126
|
"""
|
128
127
|
This function recurses through an expression to generate a set of any discernible
|
@@ -131,7 +130,7 @@ def collect_task_run_inputs_sync(
|
|
131
130
|
|
132
131
|
Examples:
|
133
132
|
>>> task_inputs = {
|
134
|
-
>>> k:
|
133
|
+
>>> k: collect_task_run_inputs_sync(v) for k, v in parameters.items()
|
135
134
|
>>> }
|
136
135
|
"""
|
137
136
|
# TODO: This function needs to be updated to detect parameters and constants
|
@@ -401,6 +400,8 @@ async def propose_state(
|
|
401
400
|
# Avoid fetching the result unless it is cached, otherwise we defeat
|
402
401
|
# the purpose of disabling `cache_result_in_memory`
|
403
402
|
result = await state.result(raise_on_failure=False, fetch=True)
|
403
|
+
elif isinstance(state.data, ResultRecord):
|
404
|
+
result = state.data.result
|
404
405
|
else:
|
405
406
|
result = state.data
|
406
407
|
|
@@ -504,6 +505,8 @@ def propose_state_sync(
|
|
504
505
|
result = state.result(raise_on_failure=False, fetch=True)
|
505
506
|
if inspect.isawaitable(result):
|
506
507
|
result = run_coro_as_sync(result)
|
508
|
+
elif isinstance(state.data, ResultRecord):
|
509
|
+
result = state.data.result
|
507
510
|
else:
|
508
511
|
result = state.data
|
509
512
|
|
@@ -624,6 +627,9 @@ def link_state_to_result(state: State, result: Any) -> None:
|
|
624
627
|
"""
|
625
628
|
|
626
629
|
flow_run_context = FlowRunContext.get()
|
630
|
+
# Drop the data field to avoid holding a strong reference to the result
|
631
|
+
# Holding large user objects in memory can cause memory bloat
|
632
|
+
linked_state = state.model_copy(update={"data": None})
|
627
633
|
|
628
634
|
def link_if_trackable(obj: Any) -> None:
|
629
635
|
"""Track connection between a task run result and its associated state if it has a unique ID.
|
@@ -640,7 +646,7 @@ def link_state_to_result(state: State, result: Any) -> None:
|
|
640
646
|
):
|
641
647
|
state.state_details.untrackable_result = True
|
642
648
|
return
|
643
|
-
flow_run_context.task_run_results[id(obj)] =
|
649
|
+
flow_run_context.task_run_results[id(obj)] = linked_state
|
644
650
|
|
645
651
|
if flow_run_context:
|
646
652
|
visit_collection(expr=result, visit_fn=link_if_trackable, max_depth=1)
|
@@ -732,6 +738,13 @@ def emit_task_run_state_change_event(
|
|
732
738
|
) -> Event:
|
733
739
|
state_message_truncation_length = 100_000
|
734
740
|
|
741
|
+
if isinstance(validated_state.data, ResultRecord) and should_persist_result():
|
742
|
+
data = validated_state.data.metadata.model_dump(mode="json")
|
743
|
+
elif isinstance(validated_state.data, BaseResult):
|
744
|
+
data = validated_state.data.model_dump(mode="json")
|
745
|
+
else:
|
746
|
+
data = None
|
747
|
+
|
735
748
|
return emit_event(
|
736
749
|
id=validated_state.id,
|
737
750
|
occurred=validated_state.timestamp,
|
@@ -770,9 +783,7 @@ def emit_task_run_state_change_event(
|
|
770
783
|
exclude_unset=True,
|
771
784
|
exclude={"flow_run_id", "task_run_id"},
|
772
785
|
),
|
773
|
-
"data":
|
774
|
-
if isinstance(validated_state.data, BaseResult)
|
775
|
-
else None,
|
786
|
+
"data": data,
|
776
787
|
},
|
777
788
|
"task_run": task_run.model_dump(
|
778
789
|
mode="json",
|
@@ -822,7 +833,7 @@ def resolve_to_final_result(expr, context):
|
|
822
833
|
if isinstance(context.get("annotation"), quote):
|
823
834
|
raise StopVisiting()
|
824
835
|
|
825
|
-
if isinstance(expr,
|
836
|
+
if isinstance(expr, PrefectFuture):
|
826
837
|
upstream_task_run = context.get("current_task_run")
|
827
838
|
upstream_task = context.get("current_task")
|
828
839
|
if (
|
prefect/utilities/importtools.py
CHANGED
@@ -398,6 +398,7 @@ def safe_load_namespace(
|
|
398
398
|
# Save original sys.path and modify it
|
399
399
|
original_sys_path = sys.path.copy()
|
400
400
|
sys.path.insert(0, parent_dir)
|
401
|
+
sys.path.insert(0, file_dir)
|
401
402
|
|
402
403
|
# Create a temporary module for import context
|
403
404
|
temp_module = ModuleType(package_name)
|
prefect/utilities/urls.py
CHANGED
@@ -1,17 +1,22 @@
|
|
1
1
|
import inspect
|
2
|
+
import ipaddress
|
3
|
+
import socket
|
2
4
|
import urllib.parse
|
3
|
-
from typing import Any, Literal, Optional, Union
|
5
|
+
from typing import TYPE_CHECKING, Any, Literal, Optional, Union
|
6
|
+
from urllib.parse import urlparse
|
4
7
|
from uuid import UUID
|
5
8
|
|
6
9
|
from pydantic import BaseModel
|
7
10
|
|
8
11
|
from prefect import settings
|
9
|
-
from prefect.blocks.core import Block
|
10
|
-
from prefect.events.schemas.automations import Automation
|
11
|
-
from prefect.events.schemas.events import ReceivedEvent, Resource
|
12
|
-
from prefect.futures import PrefectFuture
|
13
12
|
from prefect.logging.loggers import get_logger
|
14
|
-
|
13
|
+
|
14
|
+
if TYPE_CHECKING:
|
15
|
+
from prefect.blocks.core import Block
|
16
|
+
from prefect.events.schemas.automations import Automation
|
17
|
+
from prefect.events.schemas.events import ReceivedEvent, Resource
|
18
|
+
from prefect.futures import PrefectFuture
|
19
|
+
from prefect.variables import Variable
|
15
20
|
|
16
21
|
logger = get_logger("utilities.urls")
|
17
22
|
|
@@ -58,6 +63,54 @@ URLType = Literal["ui", "api"]
|
|
58
63
|
RUN_TYPES = {"flow-run", "task-run"}
|
59
64
|
|
60
65
|
|
66
|
+
def validate_restricted_url(url: str):
|
67
|
+
"""
|
68
|
+
Validate that the provided URL is safe for outbound requests. This prevents
|
69
|
+
attacks like SSRF (Server Side Request Forgery), where an attacker can make
|
70
|
+
requests to internal services (like the GCP metadata service, localhost addresses,
|
71
|
+
or in-cluster Kubernetes services)
|
72
|
+
|
73
|
+
Args:
|
74
|
+
url: The URL to validate.
|
75
|
+
|
76
|
+
Raises:
|
77
|
+
ValueError: If the URL is a restricted URL.
|
78
|
+
"""
|
79
|
+
|
80
|
+
try:
|
81
|
+
parsed_url = urlparse(url)
|
82
|
+
except ValueError:
|
83
|
+
raise ValueError(f"{url!r} is not a valid URL.")
|
84
|
+
|
85
|
+
if parsed_url.scheme not in ("http", "https"):
|
86
|
+
raise ValueError(
|
87
|
+
f"{url!r} is not a valid URL. Only HTTP and HTTPS URLs are allowed."
|
88
|
+
)
|
89
|
+
|
90
|
+
hostname = parsed_url.hostname or ""
|
91
|
+
|
92
|
+
# Remove IPv6 brackets if present
|
93
|
+
if hostname.startswith("[") and hostname.endswith("]"):
|
94
|
+
hostname = hostname[1:-1]
|
95
|
+
|
96
|
+
if not hostname:
|
97
|
+
raise ValueError(f"{url!r} is not a valid URL.")
|
98
|
+
|
99
|
+
try:
|
100
|
+
ip_address = socket.gethostbyname(hostname)
|
101
|
+
ip = ipaddress.ip_address(ip_address)
|
102
|
+
except socket.gaierror:
|
103
|
+
try:
|
104
|
+
ip = ipaddress.ip_address(hostname)
|
105
|
+
except ValueError:
|
106
|
+
raise ValueError(f"{url!r} is not a valid URL. It could not be resolved.")
|
107
|
+
|
108
|
+
if ip.is_private:
|
109
|
+
raise ValueError(
|
110
|
+
f"{url!r} is not a valid URL. It resolves to the private address {ip}."
|
111
|
+
)
|
112
|
+
|
113
|
+
|
61
114
|
def convert_class_to_name(obj: Any) -> str:
|
62
115
|
"""
|
63
116
|
Convert CamelCase class name to dash-separated lowercase name
|
@@ -69,12 +122,12 @@ def convert_class_to_name(obj: Any) -> str:
|
|
69
122
|
|
70
123
|
def url_for(
|
71
124
|
obj: Union[
|
72
|
-
PrefectFuture,
|
73
|
-
Block,
|
74
|
-
Variable,
|
75
|
-
Automation,
|
76
|
-
Resource,
|
77
|
-
ReceivedEvent,
|
125
|
+
"PrefectFuture",
|
126
|
+
"Block",
|
127
|
+
"Variable",
|
128
|
+
"Automation",
|
129
|
+
"Resource",
|
130
|
+
"ReceivedEvent",
|
78
131
|
BaseModel,
|
79
132
|
str,
|
80
133
|
],
|
@@ -105,6 +158,11 @@ def url_for(
|
|
105
158
|
url_for(obj=my_flow_run)
|
106
159
|
url_for("flow-run", obj_id="123e4567-e89b-12d3-a456-426614174000")
|
107
160
|
"""
|
161
|
+
from prefect.blocks.core import Block
|
162
|
+
from prefect.events.schemas.automations import Automation
|
163
|
+
from prefect.events.schemas.events import ReceivedEvent, Resource
|
164
|
+
from prefect.futures import PrefectFuture
|
165
|
+
|
108
166
|
if isinstance(obj, PrefectFuture):
|
109
167
|
name = "task-run"
|
110
168
|
elif isinstance(obj, Block):
|
prefect/workers/base.py
CHANGED
@@ -146,6 +146,12 @@ class BaseJobConfiguration(BaseModel):
|
|
146
146
|
)
|
147
147
|
variables.update(values)
|
148
148
|
|
149
|
+
# deep merge `env`
|
150
|
+
if isinstance(job_config.get("env"), dict) and (
|
151
|
+
hardcoded_env := variables.get("env")
|
152
|
+
):
|
153
|
+
job_config["env"] = hardcoded_env | job_config.get("env")
|
154
|
+
|
149
155
|
populated_configuration = apply_values(template=job_config, values=variables)
|
150
156
|
populated_configuration = await resolve_block_document_references(
|
151
157
|
template=populated_configuration, client=client
|
@@ -863,19 +869,15 @@ class BaseWorker(abc.ABC):
|
|
863
869
|
|
864
870
|
if flow_run.deployment_id:
|
865
871
|
deployment = await self._client.read_deployment(flow_run.deployment_id)
|
866
|
-
if deployment and deployment.
|
867
|
-
limit_name =
|
868
|
-
concurrency_limit = deployment.concurrency_limit
|
872
|
+
if deployment and deployment.global_concurrency_limit:
|
873
|
+
limit_name = deployment.global_concurrency_limit.name
|
869
874
|
concurrency_ctx = concurrency
|
870
875
|
else:
|
871
|
-
limit_name =
|
872
|
-
concurrency_limit = None
|
876
|
+
limit_name = ""
|
873
877
|
concurrency_ctx = asyncnullcontext
|
874
878
|
|
875
879
|
try:
|
876
|
-
async with concurrency_ctx(
|
877
|
-
limit_name, occupy=concurrency_limit, max_retries=0
|
878
|
-
):
|
880
|
+
async with concurrency_ctx(limit_name, max_retries=0, strict=True):
|
879
881
|
configuration = await self._get_configuration(flow_run, deployment)
|
880
882
|
submitted_event = self._emit_flow_run_submitted_event(configuration)
|
881
883
|
result = await self.run(
|