prefect-client 3.2.15.dev9__py3-none-any.whl → 3.2.16.dev1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- prefect/_build_info.py +3 -3
- prefect/_internal/compatibility/deprecated.py +28 -23
- prefect/_internal/pydantic/v2_schema.py +0 -14
- prefect/_internal/schemas/bases.py +6 -3
- prefect/_internal/schemas/validators.py +9 -2
- prefect/blocks/system.py +7 -6
- prefect/client/cloud.py +0 -1
- prefect/client/orchestration/__init__.py +0 -1
- prefect/client/orchestration/_concurrency_limits/client.py +0 -4
- prefect/client/schemas/objects.py +54 -25
- prefect/client/schemas/schedules.py +6 -5
- prefect/concurrency/_asyncio.py +1 -12
- prefect/concurrency/asyncio.py +0 -4
- prefect/concurrency/services.py +1 -3
- prefect/concurrency/sync.py +1 -6
- prefect/context.py +4 -1
- prefect/events/clients.py +3 -3
- prefect/events/filters.py +7 -2
- prefect/events/related.py +5 -3
- prefect/events/schemas/events.py +4 -4
- prefect/events/utilities.py +4 -3
- prefect/exceptions.py +1 -1
- prefect/flow_engine.py +2 -11
- prefect/futures.py +3 -12
- prefect/locking/filesystem.py +3 -2
- prefect/logging/formatters.py +1 -1
- prefect/logging/handlers.py +2 -2
- prefect/main.py +5 -5
- prefect/results.py +2 -1
- prefect/runner/runner.py +5 -3
- prefect/runner/server.py +2 -2
- prefect/runtime/flow_run.py +11 -6
- prefect/server/api/concurrency_limits_v2.py +12 -8
- prefect/server/api/deployments.py +4 -2
- prefect/server/api/ui/flows.py +7 -2
- prefect/server/api/ui/task_runs.py +3 -3
- prefect/states.py +10 -35
- prefect/task_engine.py +16 -9
- prefect/task_worker.py +6 -3
- prefect/tasks.py +5 -0
- prefect/telemetry/bootstrap.py +3 -1
- prefect/telemetry/instrumentation.py +13 -4
- prefect/telemetry/logging.py +3 -1
- prefect/types/_datetime.py +190 -77
- prefect/utilities/collections.py +6 -12
- prefect/utilities/dockerutils.py +14 -5
- prefect/utilities/engine.py +3 -8
- prefect/workers/base.py +15 -10
- prefect/workers/server.py +0 -1
- {prefect_client-3.2.15.dev9.dist-info → prefect_client-3.2.16.dev1.dist-info}/METADATA +6 -3
- {prefect_client-3.2.15.dev9.dist-info → prefect_client-3.2.16.dev1.dist-info}/RECORD +53 -55
- prefect/_internal/pydantic/annotations/__init__.py +0 -0
- prefect/_internal/pydantic/annotations/pendulum.py +0 -78
- {prefect_client-3.2.15.dev9.dist-info → prefect_client-3.2.16.dev1.dist-info}/WHEEL +0 -0
- {prefect_client-3.2.15.dev9.dist-info → prefect_client-3.2.16.dev1.dist-info}/licenses/LICENSE +0 -0
prefect/context.py
CHANGED
@@ -20,6 +20,7 @@ from typing_extensions import Self
|
|
20
20
|
import prefect.logging
|
21
21
|
import prefect.logging.configuration
|
22
22
|
import prefect.settings
|
23
|
+
import prefect.types._datetime
|
23
24
|
from prefect._internal.compatibility.migration import getattr_migration
|
24
25
|
from prefect.client.orchestration import PrefectClient, SyncPrefectClient, get_client
|
25
26
|
from prefect.client.schemas import FlowRun, TaskRun
|
@@ -315,7 +316,9 @@ class RunContext(ContextModel):
|
|
315
316
|
|
316
317
|
start_client_metrics_server()
|
317
318
|
|
318
|
-
start_time: DateTime = Field(
|
319
|
+
start_time: DateTime = Field(
|
320
|
+
default_factory=lambda: prefect.types._datetime.now("UTC")
|
321
|
+
)
|
319
322
|
input_keyset: Optional[dict[str, dict[str, str]]] = None
|
320
323
|
client: Union[PrefectClient, SyncPrefectClient]
|
321
324
|
|
prefect/events/clients.py
CHANGED
@@ -35,6 +35,7 @@ from websockets.exceptions import (
|
|
35
35
|
ConnectionClosedOK,
|
36
36
|
)
|
37
37
|
|
38
|
+
import prefect.types._datetime
|
38
39
|
from prefect.events import Event
|
39
40
|
from prefect.logging import get_logger
|
40
41
|
from prefect.settings import (
|
@@ -46,7 +47,6 @@ from prefect.settings import (
|
|
46
47
|
PREFECT_DEBUG_MODE,
|
47
48
|
PREFECT_SERVER_ALLOW_EPHEMERAL_MODE,
|
48
49
|
)
|
49
|
-
from prefect.types._datetime import add_years, now
|
50
50
|
|
51
51
|
if TYPE_CHECKING:
|
52
52
|
from prefect.events.filters import EventFilter
|
@@ -665,8 +665,8 @@ class PrefectEventSubscriber:
|
|
665
665
|
from prefect.events.filters import EventOccurredFilter
|
666
666
|
|
667
667
|
self._filter.occurred = EventOccurredFilter(
|
668
|
-
since=now("UTC") - timedelta(minutes=1),
|
669
|
-
until=
|
668
|
+
since=prefect.types._datetime.now("UTC") - timedelta(minutes=1),
|
669
|
+
until=prefect.types._datetime.now("UTC") + timedelta(days=365),
|
670
670
|
)
|
671
671
|
|
672
672
|
logger.debug(" filtering events since %s...", self._filter.occurred.since)
|
prefect/events/filters.py
CHANGED
@@ -1,8 +1,10 @@
|
|
1
|
+
import datetime
|
1
2
|
from typing import Optional
|
2
3
|
from uuid import UUID
|
3
4
|
|
4
5
|
from pydantic import Field
|
5
6
|
|
7
|
+
import prefect.types._datetime
|
6
8
|
from prefect._internal.schemas.bases import PrefectBaseModel
|
7
9
|
from prefect.types import DateTime
|
8
10
|
from prefect.utilities.collections import AutoEnum
|
@@ -59,11 +61,14 @@ class EventDataFilter(PrefectBaseModel, extra="forbid"): # type: ignore[call-ar
|
|
59
61
|
|
60
62
|
class EventOccurredFilter(EventDataFilter):
|
61
63
|
since: DateTime = Field(
|
62
|
-
default_factory=lambda:
|
64
|
+
default_factory=lambda: prefect.types._datetime.start_of_day(
|
65
|
+
prefect.types._datetime.now("UTC")
|
66
|
+
)
|
67
|
+
- datetime.timedelta(days=180),
|
63
68
|
description="Only include events after this time (inclusive)",
|
64
69
|
)
|
65
70
|
until: DateTime = Field(
|
66
|
-
default_factory=lambda:
|
71
|
+
default_factory=lambda: prefect.types._datetime.now("UTC"),
|
67
72
|
description="Only include events prior to this time (inclusive)",
|
68
73
|
)
|
69
74
|
|
prefect/events/related.py
CHANGED
@@ -14,7 +14,7 @@ from typing import (
|
|
14
14
|
)
|
15
15
|
from uuid import UUID
|
16
16
|
|
17
|
-
|
17
|
+
import prefect.types._datetime
|
18
18
|
|
19
19
|
from .schemas.events import RelatedResource
|
20
20
|
|
@@ -23,7 +23,9 @@ if TYPE_CHECKING:
|
|
23
23
|
from prefect.client.orchestration import PrefectClient
|
24
24
|
|
25
25
|
ResourceCacheEntry = Dict[str, Union[str, "ObjectBaseModel", None]]
|
26
|
-
RelatedResourceCache = Dict[
|
26
|
+
RelatedResourceCache = Dict[
|
27
|
+
str, Tuple[ResourceCacheEntry, prefect.types._datetime.DateTime]
|
28
|
+
]
|
27
29
|
|
28
30
|
MAX_CACHE_SIZE = 100
|
29
31
|
RESOURCE_CACHE: RelatedResourceCache = {}
|
@@ -205,7 +207,7 @@ async def _get_and_cache_related_object(
|
|
205
207
|
"object": obj_,
|
206
208
|
}
|
207
209
|
|
208
|
-
cache[cache_key] = (entry, now("UTC"))
|
210
|
+
cache[cache_key] = (entry, prefect.types._datetime.now("UTC"))
|
209
211
|
|
210
212
|
# In the case of a worker or agent this cache could be long-lived. To keep
|
211
213
|
# from running out of memory only keep `MAX_CACHE_SIZE` entries in the
|
prefect/events/schemas/events.py
CHANGED
@@ -24,12 +24,12 @@ from pydantic import (
|
|
24
24
|
)
|
25
25
|
from typing_extensions import Annotated, Self
|
26
26
|
|
27
|
+
import prefect.types._datetime
|
27
28
|
from prefect._internal.schemas.bases import PrefectBaseModel
|
28
29
|
from prefect.logging import get_logger
|
29
30
|
from prefect.settings import (
|
30
31
|
PREFECT_EVENTS_MAXIMUM_LABELS_PER_RESOURCE,
|
31
32
|
)
|
32
|
-
from prefect.types import DateTime
|
33
33
|
|
34
34
|
from .labelling import Labelled
|
35
35
|
|
@@ -115,8 +115,8 @@ class Event(PrefectBaseModel):
|
|
115
115
|
|
116
116
|
model_config: ClassVar[ConfigDict] = ConfigDict(extra="ignore")
|
117
117
|
|
118
|
-
occurred: DateTime = Field(
|
119
|
-
default_factory=lambda:
|
118
|
+
occurred: prefect.types._datetime.DateTime = Field(
|
119
|
+
default_factory=lambda: prefect.types._datetime.now("UTC"),
|
120
120
|
description="When the event happened from the sender's perspective",
|
121
121
|
)
|
122
122
|
event: str = Field(description="The name of the event that happened")
|
@@ -184,7 +184,7 @@ class ReceivedEvent(Event):
|
|
184
184
|
|
185
185
|
model_config: ClassVar[ConfigDict] = ConfigDict(from_attributes=True)
|
186
186
|
|
187
|
-
received: DateTime = Field(
|
187
|
+
received: prefect.types._datetime.DateTime = Field(
|
188
188
|
...,
|
189
189
|
description="When the event was received by Prefect Cloud",
|
190
190
|
)
|
prefect/events/utilities.py
CHANGED
@@ -1,10 +1,11 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
|
+
import datetime
|
3
4
|
from datetime import timedelta
|
4
5
|
from typing import Any
|
5
6
|
from uuid import UUID
|
6
7
|
|
7
|
-
|
8
|
+
import prefect.types._datetime
|
8
9
|
|
9
10
|
from .clients import (
|
10
11
|
AssertingEventsClient,
|
@@ -21,7 +22,7 @@ TIGHT_TIMING = timedelta(minutes=5)
|
|
21
22
|
def emit_event(
|
22
23
|
event: str,
|
23
24
|
resource: dict[str, str],
|
24
|
-
occurred:
|
25
|
+
occurred: datetime.datetime | None = None,
|
25
26
|
related: list[dict[str, str]] | list[RelatedResource] | None = None,
|
26
27
|
payload: dict[str, Any] | None = None,
|
27
28
|
id: UUID | None = None,
|
@@ -69,7 +70,7 @@ def emit_event(
|
|
69
70
|
}
|
70
71
|
|
71
72
|
if occurred is None:
|
72
|
-
occurred =
|
73
|
+
occurred = prefect.types._datetime.now("UTC")
|
73
74
|
event_kwargs["occurred"] = occurred
|
74
75
|
|
75
76
|
if related is not None:
|
prefect/exceptions.py
CHANGED
@@ -8,7 +8,7 @@ from collections.abc import Iterable
|
|
8
8
|
from types import ModuleType, TracebackType
|
9
9
|
from typing import TYPE_CHECKING, Any, Callable, Optional
|
10
10
|
|
11
|
-
from httpx
|
11
|
+
from httpx import HTTPStatusError
|
12
12
|
from pydantic import ValidationError
|
13
13
|
from typing_extensions import Self
|
14
14
|
|
prefect/flow_engine.py
CHANGED
@@ -355,11 +355,7 @@ class FlowRunEngine(BaseFlowRunEngine[P, R]):
|
|
355
355
|
# the State was Prefect-created.
|
356
356
|
# TODO: Remove the need to get the result from a State except in cases where the return value
|
357
357
|
# is a State object.
|
358
|
-
_result = self.state.result(raise_on_failure=raise_on_failure,
|
359
|
-
# state.result is a `sync_compatible` function that may or may not return an awaitable
|
360
|
-
# depending on whether the parent frame is sync or not
|
361
|
-
if asyncio.iscoroutine(_result):
|
362
|
-
_result = run_coro_as_sync(_result)
|
358
|
+
_result = self.state.result(raise_on_failure=raise_on_failure, _sync=True) # type: ignore
|
363
359
|
return _result
|
364
360
|
|
365
361
|
def handle_success(self, result: R) -> R:
|
@@ -924,12 +920,7 @@ class AsyncFlowRunEngine(BaseFlowRunEngine[P, R]):
|
|
924
920
|
# the State was Prefect-created.
|
925
921
|
# TODO: Remove the need to get the result from a State except in cases where the return value
|
926
922
|
# is a State object.
|
927
|
-
|
928
|
-
# state.result is a `sync_compatible` function that may or may not return an awaitable
|
929
|
-
# depending on whether the parent frame is sync or not
|
930
|
-
if asyncio.iscoroutine(_result):
|
931
|
-
_result = await _result
|
932
|
-
return _result
|
923
|
+
return await self.state.aresult(raise_on_failure=raise_on_failure) # type: ignore
|
933
924
|
|
934
925
|
async def handle_success(self, result: R) -> R:
|
935
926
|
result_store = getattr(FlowRunContext.get(), "result_store", None)
|
prefect/futures.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1
1
|
from __future__ import annotations
|
2
2
|
|
3
3
|
import abc
|
4
|
-
import asyncio
|
5
4
|
import concurrent.futures
|
6
5
|
import threading
|
7
6
|
import uuid
|
@@ -221,12 +220,8 @@ class PrefectConcurrentFuture(PrefectWrappedFuture[R, concurrent.futures.Future[
|
|
221
220
|
return future_result
|
222
221
|
|
223
222
|
_result = self._final_state.result(
|
224
|
-
raise_on_failure=raise_on_failure,
|
223
|
+
raise_on_failure=raise_on_failure, _sync=True
|
225
224
|
)
|
226
|
-
# state.result is a `sync_compatible` function that may or may not return an awaitable
|
227
|
-
# depending on whether the parent frame is sync or not
|
228
|
-
if asyncio.iscoroutine(_result):
|
229
|
-
_result = run_coro_as_sync(_result)
|
230
225
|
return _result
|
231
226
|
|
232
227
|
def __del__(self) -> None:
|
@@ -316,9 +311,7 @@ class PrefectDistributedFuture(PrefectTaskRunFuture[R]):
|
|
316
311
|
f"Task run {self.task_run_id} did not complete within {timeout} seconds"
|
317
312
|
)
|
318
313
|
|
319
|
-
return await self._final_state.
|
320
|
-
raise_on_failure=raise_on_failure, fetch=True
|
321
|
-
)
|
314
|
+
return await self._final_state.aresult(raise_on_failure=raise_on_failure)
|
322
315
|
|
323
316
|
def add_done_callback(self, fn: Callable[[PrefectFuture[R]], None]) -> None:
|
324
317
|
if self._final_state:
|
@@ -433,9 +426,7 @@ class PrefectFlowRunFuture(PrefectFuture[R]):
|
|
433
426
|
f"Task run {self.task_run_id} did not complete within {timeout} seconds"
|
434
427
|
)
|
435
428
|
|
436
|
-
return await self._final_state.
|
437
|
-
raise_on_failure=raise_on_failure, fetch=True
|
438
|
-
)
|
429
|
+
return await self._final_state.aresult(raise_on_failure=raise_on_failure)
|
439
430
|
|
440
431
|
def add_done_callback(self, fn: Callable[[PrefectFuture[R]], None]) -> None:
|
441
432
|
if self._final_state:
|
prefect/locking/filesystem.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
import datetime
|
1
2
|
import time
|
2
3
|
from datetime import timedelta
|
3
4
|
from logging import Logger
|
@@ -9,7 +10,7 @@ import pydantic_core
|
|
9
10
|
from typing_extensions import TypedDict
|
10
11
|
|
11
12
|
from prefect.logging.loggers import get_logger
|
12
|
-
from prefect.types._datetime import
|
13
|
+
from prefect.types._datetime import now, parse_datetime
|
13
14
|
|
14
15
|
from .protocol import LockManager
|
15
16
|
|
@@ -27,7 +28,7 @@ class _LockInfo(TypedDict):
|
|
27
28
|
"""
|
28
29
|
|
29
30
|
holder: str
|
30
|
-
expiration: Optional[
|
31
|
+
expiration: Optional[datetime.datetime]
|
31
32
|
path: Path
|
32
33
|
|
33
34
|
|
prefect/logging/formatters.py
CHANGED
prefect/logging/handlers.py
CHANGED
@@ -236,7 +236,7 @@ class APILogHandler(logging.Handler):
|
|
236
236
|
worker_id=worker_id,
|
237
237
|
name=record.name,
|
238
238
|
level=record.levelno,
|
239
|
-
timestamp=from_timestamp(getattr(record, "created", None) or time.time()),
|
239
|
+
timestamp=from_timestamp(getattr(record, "created", None) or time.time()), # pyright: ignore[reportArgumentType] DateTime is split into two types depending on Python version
|
240
240
|
message=self.format(record),
|
241
241
|
).model_dump(mode="json")
|
242
242
|
|
@@ -277,7 +277,7 @@ class WorkerAPILogHandler(APILogHandler):
|
|
277
277
|
worker_id=worker_id,
|
278
278
|
name=record.name,
|
279
279
|
level=record.levelno,
|
280
|
-
timestamp=from_timestamp(getattr(record, "created", None) or time.time()),
|
280
|
+
timestamp=from_timestamp(getattr(record, "created", None) or time.time()), # pyright: ignore[reportArgumentType] DateTime is split into two types depending on Python version
|
281
281
|
message=self.format(record),
|
282
282
|
).model_dump(mode="json")
|
283
283
|
|
prefect/main.py
CHANGED
@@ -11,13 +11,13 @@ from prefect._result_records import ResultRecordMetadata
|
|
11
11
|
from prefect.flow_runs import pause_flow_run, resume_flow_run, suspend_flow_run
|
12
12
|
from prefect.client.orchestration import get_client
|
13
13
|
from prefect.client.cloud import get_cloud_client
|
14
|
-
import prefect.variables
|
15
|
-
import prefect.runtime
|
14
|
+
import prefect.variables # pyright: ignore[reportUnusedImport] # TODO: Does this need to be imported here?
|
15
|
+
import prefect.runtime # pyright: ignore[reportUnusedImport] # TODO: Does this need to be imported here?
|
16
16
|
|
17
17
|
# Import modules that register types
|
18
|
-
import prefect.serializers
|
19
|
-
import prefect.blocks.notifications
|
20
|
-
import prefect.blocks.system
|
18
|
+
import prefect.serializers # pyright: ignore[reportUnusedImport]
|
19
|
+
import prefect.blocks.notifications # pyright: ignore[reportUnusedImport]
|
20
|
+
import prefect.blocks.system # pyright: ignore[reportUnusedImport]
|
21
21
|
|
22
22
|
# Initialize the process-wide profile and registry at import time
|
23
23
|
import prefect.context
|
prefect/results.py
CHANGED
@@ -31,6 +31,7 @@ from pydantic import (
|
|
31
31
|
from typing_extensions import ParamSpec, Self
|
32
32
|
|
33
33
|
import prefect
|
34
|
+
import prefect.types._datetime
|
34
35
|
from prefect._internal.compatibility.async_dispatch import async_dispatch
|
35
36
|
from prefect._result_records import R, ResultRecord, ResultRecordMetadata
|
36
37
|
from prefect.blocks.core import Block
|
@@ -519,7 +520,7 @@ class ResultStore(BaseModel):
|
|
519
520
|
if metadata.expiration:
|
520
521
|
# if the result has an expiration,
|
521
522
|
# check if it is still in the future
|
522
|
-
exists = metadata.expiration >
|
523
|
+
exists = metadata.expiration > prefect.types._datetime.now("UTC")
|
523
524
|
else:
|
524
525
|
exists = True
|
525
526
|
return exists
|
prefect/runner/runner.py
CHANGED
@@ -113,7 +113,7 @@ from prefect.states import (
|
|
113
113
|
Pending,
|
114
114
|
exception_to_failed_state,
|
115
115
|
)
|
116
|
-
from prefect.types._datetime import
|
116
|
+
from prefect.types._datetime import now
|
117
117
|
from prefect.types.entrypoint import EntrypointType
|
118
118
|
from prefect.utilities.annotations import NotSet
|
119
119
|
from prefect.utilities.asyncutils import (
|
@@ -992,7 +992,7 @@ class Runner:
|
|
992
992
|
if self.stopping:
|
993
993
|
return
|
994
994
|
runs_response = await self._get_scheduled_flow_runs()
|
995
|
-
self.last_polled:
|
995
|
+
self.last_polled: datetime.datetime = now("UTC")
|
996
996
|
return await self._submit_scheduled_flow_runs(flow_run_response=runs_response)
|
997
997
|
|
998
998
|
async def _check_for_cancelled_flow_runs(
|
@@ -1258,7 +1258,9 @@ class Runner:
|
|
1258
1258
|
"""
|
1259
1259
|
Retrieve scheduled flow runs for this runner.
|
1260
1260
|
"""
|
1261
|
-
scheduled_before =
|
1261
|
+
scheduled_before = now("UTC") + datetime.timedelta(
|
1262
|
+
seconds=int(self._prefetch_seconds)
|
1263
|
+
)
|
1262
1264
|
self._logger.debug(
|
1263
1265
|
f"Querying for flow runs scheduled before {scheduled_before}"
|
1264
1266
|
)
|
prefect/runner/server.py
CHANGED
@@ -23,7 +23,7 @@ from prefect.settings import (
|
|
23
23
|
PREFECT_RUNNER_SERVER_MISSED_POLLS_TOLERANCE,
|
24
24
|
PREFECT_RUNNER_SERVER_PORT,
|
25
25
|
)
|
26
|
-
from prefect.types._datetime import
|
26
|
+
from prefect.types._datetime import now as now_fn
|
27
27
|
from prefect.utilities.asyncutils import run_coro_as_sync
|
28
28
|
from prefect.utilities.importtools import load_script_as_module
|
29
29
|
|
@@ -56,7 +56,7 @@ def perform_health_check(
|
|
56
56
|
)
|
57
57
|
|
58
58
|
def _health_check():
|
59
|
-
now =
|
59
|
+
now = now_fn("UTC")
|
60
60
|
poll_delay = (now - runner.last_polled).total_seconds()
|
61
61
|
|
62
62
|
if TYPE_CHECKING:
|
prefect/runtime/flow_run.py
CHANGED
@@ -25,12 +25,13 @@ from __future__ import annotations
|
|
25
25
|
import os
|
26
26
|
from datetime import datetime
|
27
27
|
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
28
|
+
from zoneinfo import ZoneInfo
|
28
29
|
|
29
30
|
from prefect._internal.concurrency.api import create_call, from_sync
|
30
31
|
from prefect.client.orchestration import get_client
|
31
32
|
from prefect.context import FlowRunContext, TaskRunContext
|
32
33
|
from prefect.settings import PREFECT_API_URL, PREFECT_UI_URL
|
33
|
-
from prefect.types._datetime import DateTime,
|
34
|
+
from prefect.types._datetime import DateTime, now, parse_datetime
|
34
35
|
|
35
36
|
if TYPE_CHECKING:
|
36
37
|
from prefect.client.schemas.objects import Flow, FlowRun, TaskRun
|
@@ -53,10 +54,14 @@ __all__ = [
|
|
53
54
|
]
|
54
55
|
|
55
56
|
|
56
|
-
def _parse_datetime_UTC(dt: str) ->
|
57
|
-
|
58
|
-
|
59
|
-
|
57
|
+
def _parse_datetime_UTC(dt: str) -> datetime:
|
58
|
+
parsed_dt = parse_datetime(dt)
|
59
|
+
if parsed_dt.tzinfo is None:
|
60
|
+
# if the datetime is naive, assume it is UTCff
|
61
|
+
return parsed_dt.replace(tzinfo=ZoneInfo("UTC"))
|
62
|
+
else:
|
63
|
+
# if the datetime is timezone-aware, convert to UTC
|
64
|
+
return parsed_dt.astimezone(ZoneInfo("UTC"))
|
60
65
|
|
61
66
|
|
62
67
|
type_cast: dict[
|
@@ -67,7 +72,7 @@ type_cast: dict[
|
|
67
72
|
int: int,
|
68
73
|
float: float,
|
69
74
|
str: str,
|
70
|
-
|
75
|
+
datetime: _parse_datetime_UTC,
|
71
76
|
# for optional defined attributes, when real value is NoneType, use str
|
72
77
|
type(None): str,
|
73
78
|
}
|
@@ -156,15 +156,18 @@ async def bulk_increment_active_slots(
|
|
156
156
|
slots: int = Body(..., gt=0),
|
157
157
|
names: List[str] = Body(..., min_items=1),
|
158
158
|
mode: Literal["concurrency", "rate_limit"] = Body("concurrency"),
|
159
|
-
create_if_missing: Optional[bool] = Body(
|
159
|
+
create_if_missing: Optional[bool] = Body(
|
160
|
+
None,
|
161
|
+
deprecated="Limits must be explicitly created before acquiring concurrency slots.",
|
162
|
+
),
|
160
163
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
161
164
|
) -> List[MinimalConcurrencyLimitResponse]:
|
162
165
|
async with db.session_context(begin_transaction=True) as session:
|
163
166
|
limits = [
|
164
167
|
schemas.core.ConcurrencyLimitV2.model_validate(limit)
|
165
168
|
for limit in (
|
166
|
-
await models.concurrency_limits_v2.
|
167
|
-
session=session, names=names
|
169
|
+
await models.concurrency_limits_v2.bulk_read_concurrency_limits(
|
170
|
+
session=session, names=names
|
168
171
|
)
|
169
172
|
)
|
170
173
|
]
|
@@ -246,14 +249,15 @@ async def bulk_decrement_active_slots(
|
|
246
249
|
slots: int = Body(..., gt=0),
|
247
250
|
names: List[str] = Body(..., min_items=1),
|
248
251
|
occupancy_seconds: Optional[float] = Body(None, gt=0.0),
|
249
|
-
create_if_missing: bool = Body(
|
252
|
+
create_if_missing: bool = Body(
|
253
|
+
None,
|
254
|
+
deprecated="Limits must be explicitly created before decrementing active slots.",
|
255
|
+
),
|
250
256
|
db: PrefectDBInterface = Depends(provide_database_interface),
|
251
257
|
) -> List[MinimalConcurrencyLimitResponse]:
|
252
258
|
async with db.session_context(begin_transaction=True) as session:
|
253
|
-
limits = (
|
254
|
-
|
255
|
-
session=session, names=names, create_if_missing=create_if_missing
|
256
|
-
)
|
259
|
+
limits = await models.concurrency_limits_v2.bulk_read_concurrency_limits(
|
260
|
+
session=session, names=names
|
257
261
|
)
|
258
262
|
|
259
263
|
if not limits:
|
@@ -568,8 +568,10 @@ async def delete_deployment(
|
|
568
568
|
@router.post("/{id}/schedule")
|
569
569
|
async def schedule_deployment(
|
570
570
|
deployment_id: UUID = Path(..., description="The deployment id", alias="id"),
|
571
|
-
start_time:
|
572
|
-
|
571
|
+
start_time: datetime.datetime = Body(
|
572
|
+
None, description="The earliest date to schedule"
|
573
|
+
),
|
574
|
+
end_time: datetime.datetime = Body(None, description="The latest date to schedule"),
|
573
575
|
# Workaround for the fact that FastAPI does not let us configure ser_json_timedelta
|
574
576
|
# to represent timedeltas as floats in JSON.
|
575
577
|
min_time: float = Body(
|
prefect/server/api/ui/flows.py
CHANGED
@@ -3,6 +3,7 @@ from __future__ import annotations
|
|
3
3
|
from datetime import datetime
|
4
4
|
from typing import TYPE_CHECKING, Dict, List, Optional
|
5
5
|
from uuid import UUID
|
6
|
+
from zoneinfo import ZoneInfo
|
6
7
|
|
7
8
|
import sqlalchemy as sa
|
8
9
|
from fastapi import Body, Depends
|
@@ -15,7 +16,7 @@ from prefect.server.utilities.database import UUID as UUIDTypeDecorator
|
|
15
16
|
from prefect.server.utilities.schemas import PrefectBaseModel
|
16
17
|
from prefect.server.utilities.server import PrefectRouter
|
17
18
|
from prefect.types import DateTime
|
18
|
-
from prefect.types._datetime import create_datetime_instance
|
19
|
+
from prefect.types._datetime import create_datetime_instance, parse_datetime
|
19
20
|
|
20
21
|
if TYPE_CHECKING:
|
21
22
|
import logging
|
@@ -162,7 +163,11 @@ async def next_runs_by_flow(
|
|
162
163
|
name=result.name,
|
163
164
|
state_name=result.state_name,
|
164
165
|
state_type=result.state_type,
|
165
|
-
next_scheduled_start_time=
|
166
|
+
next_scheduled_start_time=parse_datetime(
|
167
|
+
result.next_scheduled_start_time
|
168
|
+
).replace(tzinfo=ZoneInfo("UTC"))
|
169
|
+
if isinstance(result.next_scheduled_start_time, str)
|
170
|
+
else result.next_scheduled_start_time,
|
166
171
|
)
|
167
172
|
for result in results.all()
|
168
173
|
}
|
@@ -62,14 +62,14 @@ async def read_dashboard_task_run_counts(
|
|
62
62
|
)
|
63
63
|
|
64
64
|
bucket_count = 20
|
65
|
-
start_time = task_runs.start_time.after_.
|
65
|
+
start_time = task_runs.start_time.after_.replace(microsecond=0, second=0)
|
66
66
|
end_time = (
|
67
67
|
end_of_period(task_runs.start_time.before_, "minute")
|
68
68
|
if task_runs.start_time.before_
|
69
69
|
else end_of_period(now("UTC"), "minute")
|
70
70
|
)
|
71
71
|
window = end_time - start_time
|
72
|
-
delta = window
|
72
|
+
delta = window / bucket_count
|
73
73
|
|
74
74
|
async with db.session_context(begin_transaction=False) as session:
|
75
75
|
# Gather the raw counts. The counts are divided into buckets of time
|
@@ -85,7 +85,7 @@ async def read_dashboard_task_run_counts(
|
|
85
85
|
start_time.minute,
|
86
86
|
start_time.second,
|
87
87
|
start_time.microsecond,
|
88
|
-
start_time.
|
88
|
+
start_time.tzinfo,
|
89
89
|
)
|
90
90
|
bucket_expression = sa.func.floor(
|
91
91
|
sa.func.date_diff_seconds(db.TaskRun.start_time, start_datetime)
|
prefect/states.py
CHANGED
@@ -5,7 +5,6 @@ import datetime
|
|
5
5
|
import sys
|
6
6
|
import traceback
|
7
7
|
import uuid
|
8
|
-
import warnings
|
9
8
|
from collections import Counter
|
10
9
|
from types import GeneratorType, TracebackType
|
11
10
|
from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Type
|
@@ -15,7 +14,6 @@ import httpx
|
|
15
14
|
from opentelemetry import propagate
|
16
15
|
from typing_extensions import TypeGuard
|
17
16
|
|
18
|
-
from prefect._internal.compatibility import deprecated
|
19
17
|
from prefect.client.schemas.objects import State, StateDetails, StateType
|
20
18
|
from prefect.exceptions import (
|
21
19
|
CancelledRun,
|
@@ -28,9 +26,9 @@ from prefect.exceptions import (
|
|
28
26
|
UnfinishedRun,
|
29
27
|
)
|
30
28
|
from prefect.logging.loggers import get_logger, get_run_logger
|
31
|
-
from prefect.types._datetime import
|
29
|
+
from prefect.types._datetime import now
|
32
30
|
from prefect.utilities.annotations import BaseAnnotation
|
33
|
-
from prefect.utilities.asyncutils import
|
31
|
+
from prefect.utilities.asyncutils import sync_compatible
|
34
32
|
from prefect.utilities.collections import ensure_iterable
|
35
33
|
|
36
34
|
if TYPE_CHECKING:
|
@@ -73,17 +71,9 @@ def to_state_create(state: State) -> "StateCreate":
|
|
73
71
|
)
|
74
72
|
|
75
73
|
|
76
|
-
|
77
|
-
"fetch",
|
78
|
-
when=lambda fetch: fetch is not True,
|
79
|
-
start_date="Oct 2024",
|
80
|
-
end_date="Jan 2025",
|
81
|
-
help="Please ensure you are awaiting the call to `result()` when calling in an async context.",
|
82
|
-
)
|
83
|
-
def get_state_result(
|
74
|
+
async def get_state_result(
|
84
75
|
state: "State[R]",
|
85
76
|
raise_on_failure: bool = True,
|
86
|
-
fetch: bool = True,
|
87
77
|
retry_result_failure: bool = True,
|
88
78
|
) -> "R":
|
89
79
|
"""
|
@@ -92,25 +82,11 @@ def get_state_result(
|
|
92
82
|
See `State.result()`
|
93
83
|
"""
|
94
84
|
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
"the future. Pass `fetch=True` and `await` the call to get rid of "
|
101
|
-
"this warning."
|
102
|
-
),
|
103
|
-
DeprecationWarning,
|
104
|
-
stacklevel=2,
|
105
|
-
)
|
106
|
-
|
107
|
-
return state.data
|
108
|
-
else:
|
109
|
-
return _get_state_result(
|
110
|
-
state,
|
111
|
-
raise_on_failure=raise_on_failure,
|
112
|
-
retry_result_failure=retry_result_failure,
|
113
|
-
)
|
85
|
+
return await _get_state_result(
|
86
|
+
state,
|
87
|
+
raise_on_failure=raise_on_failure,
|
88
|
+
retry_result_failure=retry_result_failure,
|
89
|
+
)
|
114
90
|
|
115
91
|
|
116
92
|
RESULT_READ_MAXIMUM_ATTEMPTS = 10
|
@@ -155,7 +131,6 @@ async def _get_state_result_data_with_retries(
|
|
155
131
|
await asyncio.sleep(RESULT_READ_RETRY_DELAY)
|
156
132
|
|
157
133
|
|
158
|
-
@sync_compatible
|
159
134
|
async def _get_state_result(
|
160
135
|
state: "State[R]", raise_on_failure: bool, retry_result_failure: bool = True
|
161
136
|
) -> "R":
|
@@ -759,9 +734,9 @@ def Paused(
|
|
759
734
|
pass
|
760
735
|
else:
|
761
736
|
state_details.pause_timeout = (
|
762
|
-
|
737
|
+
pause_expiration_time
|
763
738
|
if pause_expiration_time
|
764
|
-
else now() +
|
739
|
+
else now() + datetime.timedelta(seconds=timeout_seconds or 0)
|
765
740
|
)
|
766
741
|
|
767
742
|
state_details.pause_reschedule = reschedule
|