prefect-client 3.2.1__py3-none-any.whl → 3.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (72) hide show
  1. prefect/__init__.py +15 -8
  2. prefect/_build_info.py +5 -0
  3. prefect/_internal/schemas/bases.py +4 -7
  4. prefect/_internal/schemas/validators.py +5 -6
  5. prefect/_result_records.py +6 -1
  6. prefect/client/orchestration/__init__.py +18 -6
  7. prefect/client/schemas/schedules.py +2 -2
  8. prefect/concurrency/asyncio.py +4 -3
  9. prefect/concurrency/sync.py +3 -3
  10. prefect/concurrency/v1/asyncio.py +3 -3
  11. prefect/concurrency/v1/sync.py +3 -3
  12. prefect/deployments/flow_runs.py +2 -2
  13. prefect/docker/docker_image.py +2 -3
  14. prefect/engine.py +1 -1
  15. prefect/events/clients.py +4 -3
  16. prefect/events/related.py +3 -5
  17. prefect/flows.py +11 -5
  18. prefect/locking/filesystem.py +8 -8
  19. prefect/logging/handlers.py +7 -11
  20. prefect/main.py +0 -2
  21. prefect/runtime/flow_run.py +10 -17
  22. prefect/server/api/__init__.py +34 -0
  23. prefect/server/api/admin.py +85 -0
  24. prefect/server/api/artifacts.py +224 -0
  25. prefect/server/api/automations.py +239 -0
  26. prefect/server/api/block_capabilities.py +25 -0
  27. prefect/server/api/block_documents.py +164 -0
  28. prefect/server/api/block_schemas.py +153 -0
  29. prefect/server/api/block_types.py +211 -0
  30. prefect/server/api/clients.py +246 -0
  31. prefect/server/api/collections.py +75 -0
  32. prefect/server/api/concurrency_limits.py +286 -0
  33. prefect/server/api/concurrency_limits_v2.py +269 -0
  34. prefect/server/api/csrf_token.py +38 -0
  35. prefect/server/api/dependencies.py +196 -0
  36. prefect/server/api/deployments.py +941 -0
  37. prefect/server/api/events.py +300 -0
  38. prefect/server/api/flow_run_notification_policies.py +120 -0
  39. prefect/server/api/flow_run_states.py +52 -0
  40. prefect/server/api/flow_runs.py +867 -0
  41. prefect/server/api/flows.py +210 -0
  42. prefect/server/api/logs.py +43 -0
  43. prefect/server/api/middleware.py +73 -0
  44. prefect/server/api/root.py +35 -0
  45. prefect/server/api/run_history.py +170 -0
  46. prefect/server/api/saved_searches.py +99 -0
  47. prefect/server/api/server.py +891 -0
  48. prefect/server/api/task_run_states.py +52 -0
  49. prefect/server/api/task_runs.py +342 -0
  50. prefect/server/api/task_workers.py +31 -0
  51. prefect/server/api/templates.py +35 -0
  52. prefect/server/api/ui/__init__.py +3 -0
  53. prefect/server/api/ui/flow_runs.py +128 -0
  54. prefect/server/api/ui/flows.py +173 -0
  55. prefect/server/api/ui/schemas.py +63 -0
  56. prefect/server/api/ui/task_runs.py +175 -0
  57. prefect/server/api/validation.py +382 -0
  58. prefect/server/api/variables.py +181 -0
  59. prefect/server/api/work_queues.py +230 -0
  60. prefect/server/api/workers.py +656 -0
  61. prefect/settings/sources.py +18 -5
  62. prefect/states.py +3 -3
  63. prefect/task_engine.py +3 -3
  64. prefect/types/_datetime.py +82 -3
  65. prefect/utilities/dockerutils.py +2 -2
  66. prefect/workers/base.py +5 -5
  67. {prefect_client-3.2.1.dist-info → prefect_client-3.2.3.dist-info}/METADATA +10 -15
  68. {prefect_client-3.2.1.dist-info → prefect_client-3.2.3.dist-info}/RECORD +70 -32
  69. {prefect_client-3.2.1.dist-info → prefect_client-3.2.3.dist-info}/WHEEL +1 -2
  70. prefect/_version.py +0 -21
  71. prefect_client-3.2.1.dist-info/top_level.txt +0 -1
  72. {prefect_client-3.2.1.dist-info → prefect_client-3.2.3.dist-info/licenses}/LICENSE +0 -0
@@ -0,0 +1,173 @@
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime
4
+ from typing import TYPE_CHECKING, Dict, List, Optional
5
+ from uuid import UUID
6
+
7
+ import sqlalchemy as sa
8
+ from fastapi import Body, Depends
9
+ from pydantic import Field, field_validator
10
+
11
+ from prefect.logging import get_logger
12
+ from prefect.server.database import PrefectDBInterface, provide_database_interface
13
+ from prefect.server.schemas.states import StateType
14
+ from prefect.server.utilities.database import UUID as UUIDTypeDecorator
15
+ from prefect.server.utilities.schemas import PrefectBaseModel
16
+ from prefect.server.utilities.server import PrefectRouter
17
+ from prefect.types import DateTime
18
+ from prefect.types._datetime import create_datetime_instance
19
+
20
+ if TYPE_CHECKING:
21
+ import logging
22
+
23
+ logger: "logging.Logger" = get_logger()
24
+
25
+ router: PrefectRouter = PrefectRouter(prefix="/ui/flows", tags=["Flows", "UI"])
26
+
27
+
28
+ class SimpleNextFlowRun(PrefectBaseModel):
29
+ id: UUID = Field(default=..., description="The flow run id.")
30
+ flow_id: UUID = Field(default=..., description="The flow id.")
31
+ name: str = Field(default=..., description="The flow run name")
32
+ state_name: str = Field(default=..., description="The state name.")
33
+ state_type: StateType = Field(default=..., description="The state type.")
34
+ next_scheduled_start_time: DateTime = Field(
35
+ default=..., description="The next scheduled start time"
36
+ )
37
+
38
+ @field_validator("next_scheduled_start_time", mode="before")
39
+ @classmethod
40
+ def validate_next_scheduled_start_time(cls, v: DateTime | datetime) -> DateTime:
41
+ if isinstance(v, datetime):
42
+ return create_datetime_instance(v)
43
+ return v
44
+
45
+
46
+ @router.post("/count-deployments")
47
+ async def count_deployments_by_flow(
48
+ flow_ids: List[UUID] = Body(default=..., embed=True, max_items=200),
49
+ db: PrefectDBInterface = Depends(provide_database_interface),
50
+ ) -> Dict[UUID, int]:
51
+ """
52
+ Get deployment counts by flow id.
53
+ """
54
+ async with db.session_context() as session:
55
+ query = (
56
+ sa.select(
57
+ db.Deployment.flow_id,
58
+ sa.func.count(db.Deployment.id).label("deployment_count"),
59
+ )
60
+ .where(db.Deployment.flow_id.in_(flow_ids))
61
+ .group_by(db.Deployment.flow_id)
62
+ )
63
+
64
+ results = await session.execute(query)
65
+
66
+ deployment_counts_by_flow = {
67
+ flow_id: deployment_count for flow_id, deployment_count in results.all()
68
+ }
69
+
70
+ return {
71
+ flow_id: deployment_counts_by_flow.get(flow_id, 0) for flow_id in flow_ids
72
+ }
73
+
74
+
75
+ def _get_postgres_next_runs_query(flow_ids: List[UUID]):
76
+ # Here we use the raw query because CROSS LATERAL JOINS are very
77
+ # difficult to express correctly in sqlalchemy.
78
+ raw_query = sa.text(
79
+ """
80
+ SELECT fr.id, fr.name, fr.flow_id, fr.state_name, fr.state_type, fr.state_name, fr.next_scheduled_start_time
81
+ FROM (
82
+ SELECT DISTINCT flow_id FROM flow_run
83
+ WHERE flow_id IN :flow_ids
84
+ AND state_type = 'SCHEDULED'
85
+ ) AS unique_flows
86
+ CROSS JOIN LATERAL (
87
+ SELECT *
88
+ FROM flow_run fr
89
+ WHERE fr.flow_id = unique_flows.flow_id
90
+ AND fr.state_type = 'SCHEDULED'
91
+ ORDER BY fr.next_scheduled_start_time ASC
92
+ LIMIT 1
93
+ ) fr;
94
+ """
95
+ )
96
+
97
+ bindparams = [
98
+ sa.bindparam(
99
+ "flow_ids",
100
+ flow_ids,
101
+ expanding=True,
102
+ type_=UUIDTypeDecorator,
103
+ ),
104
+ ]
105
+
106
+ query = raw_query.bindparams(*bindparams)
107
+ return query
108
+
109
+
110
+ def _get_sqlite_next_runs_query(flow_ids: List[UUID]):
111
+ raw_query = sa.text(
112
+ """
113
+ WITH min_times AS (
114
+ SELECT flow_id, MIN(next_scheduled_start_time) AS min_next_scheduled_start_time
115
+ FROM flow_run
116
+ WHERE flow_id IN :flow_ids
117
+ AND state_type = 'SCHEDULED'
118
+ GROUP BY flow_id
119
+ )
120
+ SELECT fr.id, fr.name, fr.flow_id, fr.state_name, fr.state_type, fr.next_scheduled_start_time
121
+ FROM flow_run fr
122
+ JOIN min_times mt ON fr.flow_id = mt.flow_id AND fr.next_scheduled_start_time = mt.min_next_scheduled_start_time
123
+ WHERE fr.state_type = 'SCHEDULED';
124
+
125
+ """
126
+ )
127
+
128
+ bindparams = [
129
+ sa.bindparam(
130
+ "flow_ids",
131
+ flow_ids,
132
+ expanding=True,
133
+ type_=UUIDTypeDecorator,
134
+ ),
135
+ ]
136
+
137
+ query = raw_query.bindparams(*bindparams)
138
+ return query
139
+
140
+
141
+ @router.post("/next-runs")
142
+ async def next_runs_by_flow(
143
+ flow_ids: List[UUID] = Body(default=..., embed=True, max_items=200),
144
+ db: PrefectDBInterface = Depends(provide_database_interface),
145
+ ) -> Dict[UUID, Optional[SimpleNextFlowRun]]:
146
+ """
147
+ Get the next flow run by flow id.
148
+ """
149
+
150
+ async with db.session_context() as session:
151
+ if db.dialect.name == "postgresql":
152
+ query = _get_postgres_next_runs_query(flow_ids=flow_ids)
153
+ else:
154
+ query = _get_sqlite_next_runs_query(flow_ids=flow_ids)
155
+
156
+ results = await session.execute(query)
157
+
158
+ results_by_flow_id = {
159
+ UUID(str(result.flow_id)): SimpleNextFlowRun(
160
+ id=result.id,
161
+ flow_id=result.flow_id,
162
+ name=result.name,
163
+ state_name=result.state_name,
164
+ state_type=result.state_type,
165
+ next_scheduled_start_time=result.next_scheduled_start_time,
166
+ )
167
+ for result in results.all()
168
+ }
169
+
170
+ response = {
171
+ flow_id: results_by_flow_id.get(flow_id, None) for flow_id in flow_ids
172
+ }
173
+ return response
@@ -0,0 +1,63 @@
1
+ from typing import TYPE_CHECKING, Any
2
+
3
+ from fastapi import Body, Depends, HTTPException, status
4
+
5
+ from prefect.logging import get_logger
6
+ from prefect.server.database import PrefectDBInterface, provide_database_interface
7
+ from prefect.server.schemas.responses import SchemaValuesValidationResponse
8
+ from prefect.server.utilities.server import APIRouter
9
+ from prefect.utilities.schema_tools.hydration import HydrationContext, hydrate
10
+ from prefect.utilities.schema_tools.validation import (
11
+ CircularSchemaRefError,
12
+ build_error_obj,
13
+ is_valid_schema,
14
+ preprocess_schema,
15
+ validate,
16
+ )
17
+
18
+ if TYPE_CHECKING:
19
+ import logging
20
+
21
+ router: APIRouter = APIRouter(prefix="/ui/schemas", tags=["UI", "Schemas"])
22
+
23
+ logger: "logging.Logger" = get_logger("server.api.ui.schemas")
24
+
25
+
26
+ @router.post("/validate")
27
+ async def validate_obj(
28
+ json_schema: dict[str, Any] = Body(
29
+ ...,
30
+ embed=True,
31
+ alias="schema",
32
+ json_schema_extra={"additionalProperties": True},
33
+ ),
34
+ values: dict[str, Any] = Body(
35
+ ..., embed=True, json_schema_extra={"additionalProperties": True}
36
+ ),
37
+ db: PrefectDBInterface = Depends(provide_database_interface),
38
+ ) -> SchemaValuesValidationResponse:
39
+ schema = preprocess_schema(json_schema)
40
+
41
+ try:
42
+ is_valid_schema(schema, preprocess=False)
43
+ except ValueError as exc:
44
+ raise HTTPException(
45
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY, detail=str(exc)
46
+ )
47
+
48
+ async with db.session_context() as session:
49
+ ctx = await HydrationContext.build(
50
+ session=session, render_jinja=False, render_workspace_variables=True
51
+ )
52
+
53
+ hydrated_values = hydrate(values, ctx)
54
+ try:
55
+ errors = validate(hydrated_values, schema, preprocess=False)
56
+ except CircularSchemaRefError:
57
+ raise HTTPException(
58
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
59
+ detail="Invalid schema: Unable to validate schema with circular references.",
60
+ )
61
+ error_obj = build_error_obj(errors)
62
+
63
+ return error_obj
@@ -0,0 +1,175 @@
1
+ from datetime import datetime
2
+ from typing import TYPE_CHECKING, List, Optional
3
+
4
+ import sqlalchemy as sa
5
+ from fastapi import Depends, HTTPException, status
6
+ from pydantic import Field, model_serializer
7
+
8
+ import prefect.server.schemas as schemas
9
+ from prefect.logging import get_logger
10
+ from prefect.server import models
11
+ from prefect.server.database import PrefectDBInterface, provide_database_interface
12
+ from prefect.server.utilities.schemas.bases import PrefectBaseModel
13
+ from prefect.server.utilities.server import PrefectRouter
14
+ from prefect.types._datetime import end_of_period, now
15
+
16
+ if TYPE_CHECKING:
17
+ import logging
18
+
19
+ logger: "logging.Logger" = get_logger("server.api.ui.task_runs")
20
+
21
+ router: PrefectRouter = PrefectRouter(prefix="/ui/task_runs", tags=["Task Runs", "UI"])
22
+
23
+ FAILED_STATES = [schemas.states.StateType.CRASHED, schemas.states.StateType.FAILED]
24
+
25
+
26
+ class TaskRunCount(PrefectBaseModel):
27
+ completed: int = Field(
28
+ default=..., description="The number of completed task runs."
29
+ )
30
+ failed: int = Field(default=..., description="The number of failed task runs.")
31
+
32
+ @model_serializer
33
+ def ser_model(self) -> dict[str, int]:
34
+ return {
35
+ "completed": int(self.completed),
36
+ "failed": int(self.failed),
37
+ }
38
+
39
+
40
+ @router.post("/dashboard/counts")
41
+ async def read_dashboard_task_run_counts(
42
+ task_runs: schemas.filters.TaskRunFilter,
43
+ flows: Optional[schemas.filters.FlowFilter] = None,
44
+ flow_runs: Optional[schemas.filters.FlowRunFilter] = None,
45
+ deployments: Optional[schemas.filters.DeploymentFilter] = None,
46
+ work_pools: Optional[schemas.filters.WorkPoolFilter] = None,
47
+ work_queues: Optional[schemas.filters.WorkQueueFilter] = None,
48
+ db: PrefectDBInterface = Depends(provide_database_interface),
49
+ ) -> List[TaskRunCount]:
50
+ if task_runs.start_time is None or task_runs.start_time.after_ is None:
51
+ raise HTTPException(
52
+ status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
53
+ detail="task_runs.start_time.after_ is required",
54
+ )
55
+
56
+ # We only care about task runs that are in a terminal state, all others
57
+ # should be ignored.
58
+ task_runs.state = schemas.filters.TaskRunFilterState(
59
+ type=schemas.filters.TaskRunFilterStateType(
60
+ any_=list(schemas.states.TERMINAL_STATES)
61
+ )
62
+ )
63
+
64
+ bucket_count = 20
65
+ start_time = task_runs.start_time.after_.start_of("minute")
66
+ end_time = (
67
+ end_of_period(task_runs.start_time.before_, "minute")
68
+ if task_runs.start_time.before_
69
+ else end_of_period(now("UTC"), "minute")
70
+ )
71
+ window = end_time - start_time
72
+ delta = window.as_timedelta() / bucket_count
73
+
74
+ async with db.session_context(begin_transaction=False) as session:
75
+ # Gather the raw counts. The counts are divided into buckets of time
76
+ # and each bucket contains the number of successful and failed task
77
+ # runs.
78
+ # SQLAlchemy doesn't play nicely with our DateTime type so we convert it
79
+ # to a datetime object.
80
+ start_datetime = datetime(
81
+ start_time.year,
82
+ start_time.month,
83
+ start_time.day,
84
+ start_time.hour,
85
+ start_time.minute,
86
+ start_time.second,
87
+ start_time.microsecond,
88
+ start_time.timezone,
89
+ )
90
+ bucket_expression = sa.func.floor(
91
+ sa.func.date_diff_seconds(db.TaskRun.start_time, start_datetime)
92
+ / delta.total_seconds()
93
+ ).label("bucket")
94
+
95
+ raw_counts = (
96
+ (
97
+ await models.task_runs._apply_task_run_filters(
98
+ db,
99
+ sa.select(
100
+ bucket_expression,
101
+ sa.func.min(db.TaskRun.end_time).label("oldest"),
102
+ sa.func.sum(
103
+ sa.case(
104
+ (
105
+ db.TaskRun.state_type.in_(FAILED_STATES),
106
+ 1,
107
+ ),
108
+ else_=0,
109
+ )
110
+ ).label("failed_count"),
111
+ sa.func.sum(
112
+ sa.case(
113
+ (
114
+ db.TaskRun.state_type.notin_(FAILED_STATES),
115
+ 1,
116
+ ),
117
+ else_=0,
118
+ )
119
+ ).label("successful_count"),
120
+ ),
121
+ flow_filter=flows,
122
+ flow_run_filter=flow_runs,
123
+ task_run_filter=task_runs,
124
+ deployment_filter=deployments,
125
+ work_pool_filter=work_pools,
126
+ work_queue_filter=work_queues,
127
+ )
128
+ )
129
+ .group_by("bucket", db.TaskRun.start_time)
130
+ .subquery()
131
+ )
132
+
133
+ # Aggregate the raw counts by bucket
134
+ query = (
135
+ sa.select(
136
+ raw_counts.c.bucket.label("bucket"),
137
+ sa.func.min(raw_counts.c.oldest).label("oldest"),
138
+ sa.func.sum(raw_counts.c.failed_count).label("failed_count"),
139
+ sa.func.sum(raw_counts.c.successful_count).label("successful_count"),
140
+ )
141
+ .select_from(raw_counts)
142
+ .group_by(raw_counts.c.bucket)
143
+ .order_by(sa.asc("oldest"))
144
+ )
145
+
146
+ result = await session.execute(query)
147
+
148
+ # Ensure that all buckets of time are present in the result even if no
149
+ # matching task runs occurred during the given time period.
150
+ buckets = [TaskRunCount(completed=0, failed=0) for _ in range(bucket_count)]
151
+
152
+ for row in result:
153
+ index = int(row.bucket)
154
+ buckets[index].completed = row.successful_count
155
+ buckets[index].failed = row.failed_count
156
+
157
+ return buckets
158
+
159
+
160
+ @router.post("/count")
161
+ async def read_task_run_counts_by_state(
162
+ flows: Optional[schemas.filters.FlowFilter] = None,
163
+ flow_runs: Optional[schemas.filters.FlowRunFilter] = None,
164
+ task_runs: Optional[schemas.filters.TaskRunFilter] = None,
165
+ deployments: Optional[schemas.filters.DeploymentFilter] = None,
166
+ db: PrefectDBInterface = Depends(provide_database_interface),
167
+ ) -> schemas.states.CountByState:
168
+ async with db.session_context(begin_transaction=False) as session:
169
+ return await models.task_runs.count_task_runs_by_state(
170
+ session=session,
171
+ flow_filter=flows,
172
+ flow_run_filter=flow_runs,
173
+ task_run_filter=task_runs,
174
+ deployment_filter=deployments,
175
+ )