planar 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- planar/.__init__.py.un~ +0 -0
- planar/._version.py.un~ +0 -0
- planar/.app.py.un~ +0 -0
- planar/.cli.py.un~ +0 -0
- planar/.config.py.un~ +0 -0
- planar/.context.py.un~ +0 -0
- planar/.db.py.un~ +0 -0
- planar/.di.py.un~ +0 -0
- planar/.engine.py.un~ +0 -0
- planar/.files.py.un~ +0 -0
- planar/.log_context.py.un~ +0 -0
- planar/.log_metadata.py.un~ +0 -0
- planar/.logging.py.un~ +0 -0
- planar/.object_registry.py.un~ +0 -0
- planar/.otel.py.un~ +0 -0
- planar/.server.py.un~ +0 -0
- planar/.session.py.un~ +0 -0
- planar/.sqlalchemy.py.un~ +0 -0
- planar/.task_local.py.un~ +0 -0
- planar/.test_app.py.un~ +0 -0
- planar/.test_config.py.un~ +0 -0
- planar/.test_object_config.py.un~ +0 -0
- planar/.test_sqlalchemy.py.un~ +0 -0
- planar/.test_utils.py.un~ +0 -0
- planar/.util.py.un~ +0 -0
- planar/.utils.py.un~ +0 -0
- planar/__init__.py +26 -0
- planar/_version.py +1 -0
- planar/ai/.__init__.py.un~ +0 -0
- planar/ai/._models.py.un~ +0 -0
- planar/ai/.agent.py.un~ +0 -0
- planar/ai/.agent_utils.py.un~ +0 -0
- planar/ai/.events.py.un~ +0 -0
- planar/ai/.files.py.un~ +0 -0
- planar/ai/.models.py.un~ +0 -0
- planar/ai/.providers.py.un~ +0 -0
- planar/ai/.pydantic_ai.py.un~ +0 -0
- planar/ai/.pydantic_ai_agent.py.un~ +0 -0
- planar/ai/.pydantic_ai_provider.py.un~ +0 -0
- planar/ai/.step.py.un~ +0 -0
- planar/ai/.test_agent.py.un~ +0 -0
- planar/ai/.test_agent_serialization.py.un~ +0 -0
- planar/ai/.test_providers.py.un~ +0 -0
- planar/ai/.utils.py.un~ +0 -0
- planar/ai/__init__.py +15 -0
- planar/ai/agent.py +457 -0
- planar/ai/agent_utils.py +205 -0
- planar/ai/models.py +140 -0
- planar/ai/providers.py +1088 -0
- planar/ai/test_agent.py +1298 -0
- planar/ai/test_agent_serialization.py +229 -0
- planar/ai/test_providers.py +463 -0
- planar/ai/utils.py +102 -0
- planar/app.py +494 -0
- planar/cli.py +282 -0
- planar/config.py +544 -0
- planar/db/.db.py.un~ +0 -0
- planar/db/__init__.py +17 -0
- planar/db/alembic/env.py +136 -0
- planar/db/alembic/script.py.mako +28 -0
- planar/db/alembic/versions/3476068c153c_initial_system_tables_migration.py +339 -0
- planar/db/alembic.ini +128 -0
- planar/db/db.py +318 -0
- planar/files/.config.py.un~ +0 -0
- planar/files/.local.py.un~ +0 -0
- planar/files/.local_filesystem.py.un~ +0 -0
- planar/files/.model.py.un~ +0 -0
- planar/files/.models.py.un~ +0 -0
- planar/files/.s3.py.un~ +0 -0
- planar/files/.storage.py.un~ +0 -0
- planar/files/.test_files.py.un~ +0 -0
- planar/files/__init__.py +2 -0
- planar/files/models.py +162 -0
- planar/files/storage/.__init__.py.un~ +0 -0
- planar/files/storage/.base.py.un~ +0 -0
- planar/files/storage/.config.py.un~ +0 -0
- planar/files/storage/.context.py.un~ +0 -0
- planar/files/storage/.local_directory.py.un~ +0 -0
- planar/files/storage/.test_local_directory.py.un~ +0 -0
- planar/files/storage/.test_s3.py.un~ +0 -0
- planar/files/storage/base.py +61 -0
- planar/files/storage/config.py +44 -0
- planar/files/storage/context.py +15 -0
- planar/files/storage/local_directory.py +188 -0
- planar/files/storage/s3.py +220 -0
- planar/files/storage/test_local_directory.py +162 -0
- planar/files/storage/test_s3.py +299 -0
- planar/files/test_files.py +283 -0
- planar/human/.human.py.un~ +0 -0
- planar/human/.test_human.py.un~ +0 -0
- planar/human/__init__.py +2 -0
- planar/human/human.py +458 -0
- planar/human/models.py +80 -0
- planar/human/test_human.py +385 -0
- planar/logging/.__init__.py.un~ +0 -0
- planar/logging/.attributes.py.un~ +0 -0
- planar/logging/.formatter.py.un~ +0 -0
- planar/logging/.logger.py.un~ +0 -0
- planar/logging/.otel.py.un~ +0 -0
- planar/logging/.tracer.py.un~ +0 -0
- planar/logging/__init__.py +10 -0
- planar/logging/attributes.py +54 -0
- planar/logging/context.py +14 -0
- planar/logging/formatter.py +113 -0
- planar/logging/logger.py +114 -0
- planar/logging/otel.py +51 -0
- planar/modeling/.mixin.py.un~ +0 -0
- planar/modeling/.storage.py.un~ +0 -0
- planar/modeling/__init__.py +0 -0
- planar/modeling/field_helpers.py +59 -0
- planar/modeling/json_schema_generator.py +94 -0
- planar/modeling/mixins/__init__.py +10 -0
- planar/modeling/mixins/auditable.py +52 -0
- planar/modeling/mixins/test_auditable.py +97 -0
- planar/modeling/mixins/test_timestamp.py +134 -0
- planar/modeling/mixins/test_uuid_primary_key.py +52 -0
- planar/modeling/mixins/timestamp.py +53 -0
- planar/modeling/mixins/uuid_primary_key.py +19 -0
- planar/modeling/orm/.planar_base_model.py.un~ +0 -0
- planar/modeling/orm/__init__.py +18 -0
- planar/modeling/orm/planar_base_entity.py +29 -0
- planar/modeling/orm/query_filter_builder.py +122 -0
- planar/modeling/orm/reexports.py +15 -0
- planar/object_config/.object_config.py.un~ +0 -0
- planar/object_config/__init__.py +11 -0
- planar/object_config/models.py +114 -0
- planar/object_config/object_config.py +378 -0
- planar/object_registry.py +100 -0
- planar/registry_items.py +65 -0
- planar/routers/.__init__.py.un~ +0 -0
- planar/routers/.agents_router.py.un~ +0 -0
- planar/routers/.crud.py.un~ +0 -0
- planar/routers/.decision.py.un~ +0 -0
- planar/routers/.event.py.un~ +0 -0
- planar/routers/.file_attachment.py.un~ +0 -0
- planar/routers/.files.py.un~ +0 -0
- planar/routers/.files_router.py.un~ +0 -0
- planar/routers/.human.py.un~ +0 -0
- planar/routers/.info.py.un~ +0 -0
- planar/routers/.models.py.un~ +0 -0
- planar/routers/.object_config_router.py.un~ +0 -0
- planar/routers/.rule.py.un~ +0 -0
- planar/routers/.test_object_config_router.py.un~ +0 -0
- planar/routers/.test_workflow_router.py.un~ +0 -0
- planar/routers/.workflow.py.un~ +0 -0
- planar/routers/__init__.py +13 -0
- planar/routers/agents_router.py +197 -0
- planar/routers/entity_router.py +143 -0
- planar/routers/event.py +91 -0
- planar/routers/files.py +142 -0
- planar/routers/human.py +151 -0
- planar/routers/info.py +131 -0
- planar/routers/models.py +170 -0
- planar/routers/object_config_router.py +133 -0
- planar/routers/rule.py +108 -0
- planar/routers/test_agents_router.py +174 -0
- planar/routers/test_object_config_router.py +367 -0
- planar/routers/test_routes_security.py +169 -0
- planar/routers/test_rule_router.py +470 -0
- planar/routers/test_workflow_router.py +274 -0
- planar/routers/workflow.py +468 -0
- planar/rules/.decorator.py.un~ +0 -0
- planar/rules/.runner.py.un~ +0 -0
- planar/rules/.test_rules.py.un~ +0 -0
- planar/rules/__init__.py +23 -0
- planar/rules/decorator.py +184 -0
- planar/rules/models.py +355 -0
- planar/rules/rule_configuration.py +191 -0
- planar/rules/runner.py +64 -0
- planar/rules/test_rules.py +750 -0
- planar/scaffold_templates/app/__init__.py.j2 +0 -0
- planar/scaffold_templates/app/db/entities.py.j2 +11 -0
- planar/scaffold_templates/app/flows/process_invoice.py.j2 +67 -0
- planar/scaffold_templates/main.py.j2 +13 -0
- planar/scaffold_templates/planar.dev.yaml.j2 +34 -0
- planar/scaffold_templates/planar.prod.yaml.j2 +28 -0
- planar/scaffold_templates/pyproject.toml.j2 +10 -0
- planar/security/.jwt_middleware.py.un~ +0 -0
- planar/security/auth_context.py +148 -0
- planar/security/authorization.py +388 -0
- planar/security/default_policies.cedar +77 -0
- planar/security/jwt_middleware.py +116 -0
- planar/security/security_context.py +18 -0
- planar/security/tests/test_authorization_context.py +78 -0
- planar/security/tests/test_cedar_basics.py +41 -0
- planar/security/tests/test_cedar_policies.py +158 -0
- planar/security/tests/test_jwt_principal_context.py +179 -0
- planar/session.py +40 -0
- planar/sse/.constants.py.un~ +0 -0
- planar/sse/.example.html.un~ +0 -0
- planar/sse/.hub.py.un~ +0 -0
- planar/sse/.model.py.un~ +0 -0
- planar/sse/.proxy.py.un~ +0 -0
- planar/sse/constants.py +1 -0
- planar/sse/example.html +126 -0
- planar/sse/hub.py +216 -0
- planar/sse/model.py +8 -0
- planar/sse/proxy.py +257 -0
- planar/task_local.py +37 -0
- planar/test_app.py +51 -0
- planar/test_cli.py +372 -0
- planar/test_config.py +512 -0
- planar/test_object_config.py +527 -0
- planar/test_object_registry.py +14 -0
- planar/test_sqlalchemy.py +158 -0
- planar/test_utils.py +105 -0
- planar/testing/.client.py.un~ +0 -0
- planar/testing/.memory_storage.py.un~ +0 -0
- planar/testing/.planar_test_client.py.un~ +0 -0
- planar/testing/.predictable_tracer.py.un~ +0 -0
- planar/testing/.synchronizable_tracer.py.un~ +0 -0
- planar/testing/.test_memory_storage.py.un~ +0 -0
- planar/testing/.workflow_observer.py.un~ +0 -0
- planar/testing/__init__.py +0 -0
- planar/testing/memory_storage.py +78 -0
- planar/testing/planar_test_client.py +54 -0
- planar/testing/synchronizable_tracer.py +153 -0
- planar/testing/test_memory_storage.py +143 -0
- planar/testing/workflow_observer.py +73 -0
- planar/utils.py +70 -0
- planar/workflows/.__init__.py.un~ +0 -0
- planar/workflows/.builtin_steps.py.un~ +0 -0
- planar/workflows/.concurrency_tracing.py.un~ +0 -0
- planar/workflows/.context.py.un~ +0 -0
- planar/workflows/.contrib.py.un~ +0 -0
- planar/workflows/.decorators.py.un~ +0 -0
- planar/workflows/.durable_test.py.un~ +0 -0
- planar/workflows/.errors.py.un~ +0 -0
- planar/workflows/.events.py.un~ +0 -0
- planar/workflows/.exceptions.py.un~ +0 -0
- planar/workflows/.execution.py.un~ +0 -0
- planar/workflows/.human.py.un~ +0 -0
- planar/workflows/.lock.py.un~ +0 -0
- planar/workflows/.misc.py.un~ +0 -0
- planar/workflows/.model.py.un~ +0 -0
- planar/workflows/.models.py.un~ +0 -0
- planar/workflows/.notifications.py.un~ +0 -0
- planar/workflows/.orchestrator.py.un~ +0 -0
- planar/workflows/.runtime.py.un~ +0 -0
- planar/workflows/.serialization.py.un~ +0 -0
- planar/workflows/.step.py.un~ +0 -0
- planar/workflows/.step_core.py.un~ +0 -0
- planar/workflows/.sub_workflow_runner.py.un~ +0 -0
- planar/workflows/.sub_workflow_scheduler.py.un~ +0 -0
- planar/workflows/.test_concurrency.py.un~ +0 -0
- planar/workflows/.test_concurrency_detection.py.un~ +0 -0
- planar/workflows/.test_human.py.un~ +0 -0
- planar/workflows/.test_lock_timeout.py.un~ +0 -0
- planar/workflows/.test_orchestrator.py.un~ +0 -0
- planar/workflows/.test_race_conditions.py.un~ +0 -0
- planar/workflows/.test_serialization.py.un~ +0 -0
- planar/workflows/.test_suspend_deserialization.py.un~ +0 -0
- planar/workflows/.test_workflow.py.un~ +0 -0
- planar/workflows/.tracing.py.un~ +0 -0
- planar/workflows/.types.py.un~ +0 -0
- planar/workflows/.util.py.un~ +0 -0
- planar/workflows/.utils.py.un~ +0 -0
- planar/workflows/.workflow.py.un~ +0 -0
- planar/workflows/.workflow_wrapper.py.un~ +0 -0
- planar/workflows/.wrappers.py.un~ +0 -0
- planar/workflows/__init__.py +42 -0
- planar/workflows/context.py +44 -0
- planar/workflows/contrib.py +190 -0
- planar/workflows/decorators.py +217 -0
- planar/workflows/events.py +185 -0
- planar/workflows/exceptions.py +34 -0
- planar/workflows/execution.py +198 -0
- planar/workflows/lock.py +229 -0
- planar/workflows/misc.py +5 -0
- planar/workflows/models.py +154 -0
- planar/workflows/notifications.py +96 -0
- planar/workflows/orchestrator.py +383 -0
- planar/workflows/query.py +256 -0
- planar/workflows/serialization.py +409 -0
- planar/workflows/step_core.py +373 -0
- planar/workflows/step_metadata.py +357 -0
- planar/workflows/step_testing_utils.py +86 -0
- planar/workflows/sub_workflow_runner.py +191 -0
- planar/workflows/test_concurrency_detection.py +120 -0
- planar/workflows/test_lock_timeout.py +140 -0
- planar/workflows/test_serialization.py +1195 -0
- planar/workflows/test_suspend_deserialization.py +231 -0
- planar/workflows/test_workflow.py +1967 -0
- planar/workflows/tracing.py +106 -0
- planar/workflows/wrappers.py +41 -0
- planar-0.5.0.dist-info/METADATA +285 -0
- planar-0.5.0.dist-info/RECORD +289 -0
- planar-0.5.0.dist-info/WHEEL +4 -0
- planar-0.5.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,256 @@
|
|
1
|
+
from typing import Any
|
2
|
+
|
3
|
+
from sqlalchemy import types
|
4
|
+
from sqlmodel import case, cast, col, literal, select
|
5
|
+
from sqlmodel import func as sql_func
|
6
|
+
from sqlmodel.ext.asyncio.session import AsyncSession
|
7
|
+
|
8
|
+
from planar.logging import get_logger
|
9
|
+
from planar.modeling.orm.query_filter_builder import build_paginated_query
|
10
|
+
from planar.routers.models import DurationStats
|
11
|
+
from planar.utils import utc_now
|
12
|
+
from planar.workflows import LockedResource, Workflow
|
13
|
+
from planar.workflows.models import WorkflowStatus, workflow_lock_join_cond
|
14
|
+
|
15
|
+
|
16
|
+
def build_effective_status_case():
|
17
|
+
"""Build SQL case expression for calculating effective workflow status."""
|
18
|
+
now = utc_now()
|
19
|
+
return case(
|
20
|
+
# When lock_until is not null and in the future, it's RUNNING
|
21
|
+
(
|
22
|
+
(col(LockedResource.lock_until).isnot(None))
|
23
|
+
& (col(LockedResource.lock_until) > now),
|
24
|
+
literal("running"),
|
25
|
+
),
|
26
|
+
# When wakeup_at is set or waiting_for_event is set, it's SUSPENDED
|
27
|
+
(
|
28
|
+
(col(Workflow.wakeup_at).isnot(None))
|
29
|
+
| (col(Workflow.waiting_for_event).isnot(None)),
|
30
|
+
literal("suspended"),
|
31
|
+
),
|
32
|
+
else_=sql_func.lower(cast(Workflow.status, types.Text)),
|
33
|
+
)
|
34
|
+
|
35
|
+
|
36
|
+
logger = get_logger(__name__)
|
37
|
+
|
38
|
+
|
39
|
+
async def get_bulk_workflow_run_statuses(
|
40
|
+
workflow_names: list[str],
|
41
|
+
session: AsyncSession,
|
42
|
+
filters: list[tuple[Any, str, Any]] = [],
|
43
|
+
) -> dict[str, dict[WorkflowStatus, int]]:
|
44
|
+
"""
|
45
|
+
Get the status counts for multiple workflows in a single query.
|
46
|
+
"""
|
47
|
+
logger.debug(
|
48
|
+
"getting bulk workflow run statuses",
|
49
|
+
workflow_names=workflow_names,
|
50
|
+
filters=filters,
|
51
|
+
)
|
52
|
+
if not workflow_names:
|
53
|
+
return {}
|
54
|
+
|
55
|
+
status_query = (
|
56
|
+
select(
|
57
|
+
col(Workflow.function_name).label("workflow_name"),
|
58
|
+
build_effective_status_case().label("effective_status"),
|
59
|
+
sql_func.count().label("count"),
|
60
|
+
)
|
61
|
+
.select_from(Workflow)
|
62
|
+
.outerjoin(LockedResource, workflow_lock_join_cond())
|
63
|
+
.where(col(Workflow.function_name).in_(workflow_names))
|
64
|
+
.group_by(col(Workflow.function_name), "effective_status")
|
65
|
+
)
|
66
|
+
|
67
|
+
status_counts = (
|
68
|
+
await session.exec(
|
69
|
+
build_paginated_query(
|
70
|
+
status_query,
|
71
|
+
filters=filters,
|
72
|
+
)[0]
|
73
|
+
)
|
74
|
+
).all()
|
75
|
+
logger.debug("raw status counts from db", status_counts=status_counts)
|
76
|
+
|
77
|
+
# Group results by workflow name
|
78
|
+
bulk_statuses: dict[str, dict[WorkflowStatus, int]] = {}
|
79
|
+
for workflow_name, status_str, count in status_counts:
|
80
|
+
if workflow_name not in bulk_statuses:
|
81
|
+
bulk_statuses[workflow_name] = {}
|
82
|
+
|
83
|
+
# Convert the status string directly to WorkflowStatus enum
|
84
|
+
try:
|
85
|
+
status = WorkflowStatus(status_str)
|
86
|
+
bulk_statuses[workflow_name][status] = count
|
87
|
+
except ValueError:
|
88
|
+
# Skip invalid status strings (shouldn't happen with valid data)
|
89
|
+
logger.exception(
|
90
|
+
"invalid status string encountered for workflow",
|
91
|
+
status_str=status_str,
|
92
|
+
workflow_name=workflow_name,
|
93
|
+
)
|
94
|
+
pass
|
95
|
+
|
96
|
+
# Ensure all requested workflows have an entry, even if empty
|
97
|
+
for workflow_name in workflow_names:
|
98
|
+
if workflow_name not in bulk_statuses:
|
99
|
+
bulk_statuses[workflow_name] = {}
|
100
|
+
|
101
|
+
logger.debug("returning bulk statuses", bulk_statuses=bulk_statuses)
|
102
|
+
return bulk_statuses
|
103
|
+
|
104
|
+
|
105
|
+
async def get_workflow_run_statuses(
|
106
|
+
workflow_name: str, session: AsyncSession, filters: list[tuple[Any, str, Any]] = []
|
107
|
+
) -> dict[WorkflowStatus, int]:
|
108
|
+
"""
|
109
|
+
Get the status counts for a workflow.
|
110
|
+
"""
|
111
|
+
bulk_result = await get_bulk_workflow_run_statuses(
|
112
|
+
[workflow_name], session, filters
|
113
|
+
)
|
114
|
+
return bulk_result.get(workflow_name, {})
|
115
|
+
|
116
|
+
|
117
|
+
async def calculate_bulk_workflow_duration_stats(
|
118
|
+
workflow_names: list[str],
|
119
|
+
session: AsyncSession,
|
120
|
+
filters: list[tuple[Any, str, Any]] = [],
|
121
|
+
) -> dict[str, DurationStats | None]:
|
122
|
+
"""Calculate min, avg, and max execution duration for multiple workflows in a single query."""
|
123
|
+
logger.debug(
|
124
|
+
"calculating bulk workflow duration stats",
|
125
|
+
workflow_names=workflow_names,
|
126
|
+
filters=filters,
|
127
|
+
)
|
128
|
+
if not workflow_names:
|
129
|
+
return {}
|
130
|
+
|
131
|
+
duration_query = (
|
132
|
+
select(
|
133
|
+
col(Workflow.function_name).label("workflow_name"),
|
134
|
+
sql_func.cast(
|
135
|
+
sql_func.min(
|
136
|
+
(
|
137
|
+
sql_func.extract("epoch", col(Workflow.updated_at))
|
138
|
+
- sql_func.extract("epoch", col(Workflow.created_at))
|
139
|
+
)
|
140
|
+
),
|
141
|
+
types.Integer,
|
142
|
+
).label("min_duration"),
|
143
|
+
sql_func.cast(
|
144
|
+
sql_func.avg(
|
145
|
+
(
|
146
|
+
sql_func.extract("epoch", col(Workflow.updated_at))
|
147
|
+
- sql_func.extract("epoch", col(Workflow.created_at))
|
148
|
+
)
|
149
|
+
),
|
150
|
+
types.Integer,
|
151
|
+
).label("avg_duration"),
|
152
|
+
sql_func.cast(
|
153
|
+
sql_func.max(
|
154
|
+
(
|
155
|
+
sql_func.extract("epoch", col(Workflow.updated_at))
|
156
|
+
- sql_func.extract("epoch", col(Workflow.created_at))
|
157
|
+
)
|
158
|
+
),
|
159
|
+
types.Integer,
|
160
|
+
).label("max_duration"),
|
161
|
+
)
|
162
|
+
.where(
|
163
|
+
col(Workflow.function_name).in_(workflow_names),
|
164
|
+
(
|
165
|
+
(Workflow.status == WorkflowStatus.SUCCEEDED)
|
166
|
+
| (Workflow.status == WorkflowStatus.FAILED)
|
167
|
+
),
|
168
|
+
)
|
169
|
+
.group_by(col(Workflow.function_name))
|
170
|
+
)
|
171
|
+
|
172
|
+
completed_workflows = (
|
173
|
+
await session.exec(
|
174
|
+
build_paginated_query(
|
175
|
+
duration_query,
|
176
|
+
filters=filters,
|
177
|
+
)[0]
|
178
|
+
)
|
179
|
+
).all()
|
180
|
+
logger.debug("raw duration stats from db", completed_workflows=completed_workflows)
|
181
|
+
|
182
|
+
# Group results by workflow name
|
183
|
+
bulk_durations: dict[str, DurationStats | None] = {}
|
184
|
+
for workflow_name, min_duration, avg_duration, max_duration in completed_workflows:
|
185
|
+
if min_duration is not None:
|
186
|
+
bulk_durations[workflow_name] = DurationStats(
|
187
|
+
min_seconds=min_duration,
|
188
|
+
avg_seconds=avg_duration,
|
189
|
+
max_seconds=max_duration,
|
190
|
+
)
|
191
|
+
else:
|
192
|
+
bulk_durations[workflow_name] = None
|
193
|
+
|
194
|
+
# Ensure all requested workflows have an entry, even if None
|
195
|
+
for workflow_name in workflow_names:
|
196
|
+
if workflow_name not in bulk_durations:
|
197
|
+
bulk_durations[workflow_name] = None
|
198
|
+
|
199
|
+
logger.debug("returning bulk durations", bulk_durations=bulk_durations)
|
200
|
+
return bulk_durations
|
201
|
+
|
202
|
+
|
203
|
+
async def calculate_workflow_duration_stats(
|
204
|
+
session: AsyncSession, function_name: str, filters: list[tuple[Any, str, Any]] = []
|
205
|
+
) -> DurationStats | None:
|
206
|
+
"""Calculate min, avg, and max execution duration for a workflow."""
|
207
|
+
bulk_result = await calculate_bulk_workflow_duration_stats(
|
208
|
+
[function_name], session, filters
|
209
|
+
)
|
210
|
+
return bulk_result.get(function_name)
|
211
|
+
|
212
|
+
|
213
|
+
async def calculate_effective_status(
|
214
|
+
session: AsyncSession, workflow: Workflow
|
215
|
+
) -> WorkflowStatus:
|
216
|
+
"""Calculate the effective status for a workflow, considering virtual states."""
|
217
|
+
logger.debug("calculating effective status for workflow", workflow_id=workflow.id)
|
218
|
+
effective_status_str = (
|
219
|
+
await session.exec(
|
220
|
+
select(build_effective_status_case())
|
221
|
+
.select_from(Workflow)
|
222
|
+
.outerjoin(LockedResource, workflow_lock_join_cond())
|
223
|
+
.where(Workflow.id == workflow.id)
|
224
|
+
)
|
225
|
+
).first()
|
226
|
+
logger.debug(
|
227
|
+
"effective status string from db for workflow",
|
228
|
+
workflow_id=workflow.id,
|
229
|
+
effective_status_str=effective_status_str,
|
230
|
+
)
|
231
|
+
|
232
|
+
# Convert the status string directly to WorkflowStatus enum
|
233
|
+
if effective_status_str is None:
|
234
|
+
logger.debug(
|
235
|
+
"effective status string is none, returning original status",
|
236
|
+
original_status=workflow.status,
|
237
|
+
)
|
238
|
+
return workflow.status
|
239
|
+
|
240
|
+
try:
|
241
|
+
status = WorkflowStatus(effective_status_str)
|
242
|
+
logger.debug(
|
243
|
+
"converted effective status for workflow",
|
244
|
+
workflow_id=workflow.id,
|
245
|
+
status=status,
|
246
|
+
)
|
247
|
+
return status
|
248
|
+
except ValueError:
|
249
|
+
# Fallback to the workflow's actual status if conversion fails
|
250
|
+
logger.exception(
|
251
|
+
"invalid effective status string, falling back to actual status",
|
252
|
+
effective_status_str=effective_status_str,
|
253
|
+
workflow_id=workflow.id,
|
254
|
+
actual_status=workflow.status,
|
255
|
+
)
|
256
|
+
return workflow.status
|
@@ -0,0 +1,409 @@
|
|
1
|
+
"""
|
2
|
+
NOTE: The naming of this module and its functions is not entirely accurate. The actual
|
3
|
+
serialization to/from JSON is handled by SQLAlchemy. This module is responsible for
|
4
|
+
converting Python objects (like Pydantic models, custom classes, and primitive types)
|
5
|
+
to/from types that are JSON-serializable, so they can be properly stored and retrieved
|
6
|
+
from the database.
|
7
|
+
"""
|
8
|
+
|
9
|
+
import inspect
|
10
|
+
import uuid
|
11
|
+
from dataclasses import fields, is_dataclass
|
12
|
+
from datetime import datetime, timedelta
|
13
|
+
from decimal import Decimal
|
14
|
+
from types import UnionType
|
15
|
+
from typing import (
|
16
|
+
Any,
|
17
|
+
Callable,
|
18
|
+
Dict,
|
19
|
+
Generic,
|
20
|
+
List,
|
21
|
+
Sequence,
|
22
|
+
Tuple,
|
23
|
+
Type,
|
24
|
+
TypeVar,
|
25
|
+
Union,
|
26
|
+
cast,
|
27
|
+
get_args,
|
28
|
+
get_origin,
|
29
|
+
get_type_hints,
|
30
|
+
)
|
31
|
+
|
32
|
+
from pydantic import BaseModel
|
33
|
+
|
34
|
+
from planar.logging import get_logger
|
35
|
+
|
36
|
+
logger = get_logger(__name__)
|
37
|
+
|
38
|
+
# Type variable for pydantic models
|
39
|
+
ModelT = TypeVar("ModelT", bound=BaseModel)
|
40
|
+
|
41
|
+
|
42
|
+
def is_pydantic_model(obj_type: Any) -> bool:
|
43
|
+
"""Check if a type is a pydantic model."""
|
44
|
+
return inspect.isclass(obj_type) and issubclass(obj_type, BaseModel)
|
45
|
+
|
46
|
+
|
47
|
+
# Custom serialization for primitives without a Pydantic wrapper
|
48
|
+
def serialize_primitive(value: Any) -> Any:
|
49
|
+
"""Convert a primitive type into a JSON-serializable form."""
|
50
|
+
if isinstance(value, (bool, int, float)):
|
51
|
+
return value
|
52
|
+
elif isinstance(value, Decimal):
|
53
|
+
# Preserve precision by converting to a string
|
54
|
+
return str(value)
|
55
|
+
elif isinstance(value, uuid.UUID):
|
56
|
+
return str(value)
|
57
|
+
elif isinstance(value, datetime):
|
58
|
+
return value.isoformat()
|
59
|
+
elif isinstance(value, timedelta):
|
60
|
+
# Represent timedelta as a dict
|
61
|
+
return {
|
62
|
+
"days": value.days,
|
63
|
+
"seconds": value.seconds,
|
64
|
+
"microseconds": value.microseconds,
|
65
|
+
}
|
66
|
+
else:
|
67
|
+
return value
|
68
|
+
|
69
|
+
|
70
|
+
def deserialize_primitive(value: Any, type_hint: Type) -> Any:
|
71
|
+
"""Convert a JSON-serializable representation back to a primitive type."""
|
72
|
+
if type_hint is bool:
|
73
|
+
return bool(value)
|
74
|
+
elif type_hint is int:
|
75
|
+
return int(value)
|
76
|
+
elif type_hint is float:
|
77
|
+
return float(value)
|
78
|
+
elif type_hint is Decimal:
|
79
|
+
return Decimal(value)
|
80
|
+
elif type_hint is uuid.UUID:
|
81
|
+
return uuid.UUID(value)
|
82
|
+
elif type_hint is datetime:
|
83
|
+
return datetime.fromisoformat(value)
|
84
|
+
elif type_hint is timedelta:
|
85
|
+
if isinstance(value, dict) and all(
|
86
|
+
k in value for k in ("days", "seconds", "microseconds")
|
87
|
+
):
|
88
|
+
return timedelta(**value)
|
89
|
+
elif isinstance(value, (int, float)):
|
90
|
+
return timedelta(seconds=value)
|
91
|
+
else:
|
92
|
+
raise ValueError(f"Cannot deserialize {value} as timedelta")
|
93
|
+
else:
|
94
|
+
return value
|
95
|
+
|
96
|
+
|
97
|
+
def serialize_value(value: Any) -> Any:
|
98
|
+
"""
|
99
|
+
Serialize a value based on its runtime type.
|
100
|
+
|
101
|
+
- If it's a Pydantic model, call model_dump(mode="json").
|
102
|
+
- For supported primitives, use the custom serializer.
|
103
|
+
- Otherwise, return the value as is.
|
104
|
+
"""
|
105
|
+
if value is None:
|
106
|
+
return None
|
107
|
+
|
108
|
+
if isinstance(value, BaseModel):
|
109
|
+
return value.model_dump(mode="json")
|
110
|
+
|
111
|
+
if isinstance(value, type(BaseModel)):
|
112
|
+
return cast(BaseModel, value).model_json_schema()
|
113
|
+
|
114
|
+
if is_dataclass(value) and not isinstance(value, type):
|
115
|
+
return {f.name: serialize_value(getattr(value, f.name)) for f in fields(value)}
|
116
|
+
|
117
|
+
# Handle lists
|
118
|
+
if isinstance(value, list):
|
119
|
+
return [serialize_value(v) for v in value]
|
120
|
+
|
121
|
+
if isinstance(value, (bool, int, float, Decimal, uuid.UUID, datetime, timedelta)):
|
122
|
+
return serialize_primitive(value)
|
123
|
+
|
124
|
+
return value
|
125
|
+
|
126
|
+
|
127
|
+
def deserialize_value(
|
128
|
+
value: Any,
|
129
|
+
type_hint: Type | UnionType | None = None,
|
130
|
+
) -> Any:
|
131
|
+
"""
|
132
|
+
Deserialize a value based on the provided type hint.
|
133
|
+
|
134
|
+
- If the type hint is a Union, try each candidate.
|
135
|
+
- If it's a Pydantic model, use model_validate().
|
136
|
+
- For supported primitives, use the custom deserializer.
|
137
|
+
- Otherwise, return the value as is.
|
138
|
+
"""
|
139
|
+
if value is None or type_hint is None:
|
140
|
+
return value
|
141
|
+
|
142
|
+
# Handle Union types by trying each candidate.
|
143
|
+
origin = get_origin(type_hint)
|
144
|
+
if origin is Union or origin is UnionType:
|
145
|
+
for candidate in get_args(type_hint):
|
146
|
+
if candidate is type(None):
|
147
|
+
continue
|
148
|
+
try:
|
149
|
+
return deserialize_value(value, candidate)
|
150
|
+
except Exception:
|
151
|
+
continue
|
152
|
+
raise ValueError(f"Could not deserialize {value} into any type in {type_hint}")
|
153
|
+
|
154
|
+
if origin is list:
|
155
|
+
inner_type = get_args(type_hint)[0]
|
156
|
+
deserialized_list = []
|
157
|
+
for item in value:
|
158
|
+
deserialized_list.append(deserialize_value(item, inner_type))
|
159
|
+
return deserialized_list
|
160
|
+
|
161
|
+
# Handle Type[T] (i.e., type hints like type[Person])
|
162
|
+
if origin is type:
|
163
|
+
inner_type = get_args(type_hint)[0]
|
164
|
+
if inspect.isclass(inner_type) and issubclass(inner_type, BaseModel):
|
165
|
+
if (
|
166
|
+
"title" not in value
|
167
|
+
or value["title"] != inner_type.__name__
|
168
|
+
or "type" not in value
|
169
|
+
or value["type"] != "object"
|
170
|
+
):
|
171
|
+
raise ValueError(f"Invalid type hint {type_hint} for {value}")
|
172
|
+
# Techincally, no deserialization is needed since the type hint is already the
|
173
|
+
# Pydantic class we need.
|
174
|
+
# To be more strict we could check inner_type.model_json_schema() == value,
|
175
|
+
# but that would not allow backwards compatibility in Pydantic model changes.
|
176
|
+
return inner_type
|
177
|
+
|
178
|
+
if is_pydantic_model(type_hint):
|
179
|
+
return cast(BaseModel, type_hint).model_validate(value)
|
180
|
+
|
181
|
+
if inspect.isclass(type_hint) and is_dataclass(type_hint):
|
182
|
+
kwargs = {}
|
183
|
+
if isinstance(value, dict):
|
184
|
+
for f in fields(type_hint):
|
185
|
+
kwargs[f.name] = deserialize_value(
|
186
|
+
value.get(f.name), cast(Type | UnionType | None, f.type)
|
187
|
+
)
|
188
|
+
return type_hint(**kwargs)
|
189
|
+
|
190
|
+
# Check if type_hint is a plain type before passing to deserialize_primitive
|
191
|
+
if isinstance(type_hint, type) and type_hint in (
|
192
|
+
bool,
|
193
|
+
int,
|
194
|
+
float,
|
195
|
+
Decimal,
|
196
|
+
uuid.UUID,
|
197
|
+
datetime,
|
198
|
+
timedelta,
|
199
|
+
):
|
200
|
+
return deserialize_primitive(value, type_hint)
|
201
|
+
|
202
|
+
return value
|
203
|
+
|
204
|
+
|
205
|
+
def serialize_args(
|
206
|
+
func: Callable, args: Sequence[Any], kwargs: Dict[str, Any]
|
207
|
+
) -> tuple[List[Any], Dict[str, Any]]:
|
208
|
+
"""
|
209
|
+
Serialize function arguments based solely on their runtime values.
|
210
|
+
"""
|
211
|
+
serialized_args = [serialize_value(arg) for arg in (args or [])]
|
212
|
+
serialized_kwargs = {
|
213
|
+
key: serialize_value(val) for key, val in (kwargs or {}).items()
|
214
|
+
}
|
215
|
+
|
216
|
+
return serialized_args, serialized_kwargs
|
217
|
+
|
218
|
+
|
219
|
+
def deserialize_args(
|
220
|
+
func: Callable, args: List[Any], kwargs: Dict[str, Any]
|
221
|
+
) -> tuple[List[Any], Dict[str, Any]]:
|
222
|
+
"""
|
223
|
+
Deserialize function arguments using the function signature's type hints.
|
224
|
+
"""
|
225
|
+
type_hints = get_type_hints(func)
|
226
|
+
deserialized_args = []
|
227
|
+
deserialized_kwargs = {}
|
228
|
+
|
229
|
+
sig = inspect.signature(func)
|
230
|
+
param_names = list(sig.parameters.keys())
|
231
|
+
|
232
|
+
for i, arg in enumerate(args or []):
|
233
|
+
if i < len(param_names):
|
234
|
+
param_name = param_names[i]
|
235
|
+
type_hint = type_hints.get(param_name)
|
236
|
+
deserialized_args.append(deserialize_value(arg, type_hint))
|
237
|
+
else:
|
238
|
+
deserialized_args.append(arg)
|
239
|
+
|
240
|
+
for key, val in (kwargs or {}).items():
|
241
|
+
type_hint = type_hints.get(key)
|
242
|
+
deserialized_kwargs[key] = deserialize_value(val, type_hint)
|
243
|
+
|
244
|
+
return deserialized_args, deserialized_kwargs
|
245
|
+
|
246
|
+
|
247
|
+
def serialize_result(func: Callable, result: Any) -> Any:
|
248
|
+
"""
|
249
|
+
Serialize a function result based solely on its runtime value.
|
250
|
+
"""
|
251
|
+
if result is None:
|
252
|
+
return None
|
253
|
+
|
254
|
+
return serialize_value(result)
|
255
|
+
|
256
|
+
|
257
|
+
# --- Simplified Generic Type Handling Helpers ---
|
258
|
+
|
259
|
+
|
260
|
+
def _get_generic_metadata(type_hint: Any) -> Tuple[Type | None, Tuple[Any, ...] | None]:
|
261
|
+
"""Gets origin and args, handling standard typing and Pydantic generics."""
|
262
|
+
origin = get_origin(type_hint)
|
263
|
+
args = get_args(type_hint)
|
264
|
+
if origin is None and hasattr(type_hint, "__pydantic_generic_metadata__"):
|
265
|
+
metadata = getattr(type_hint, "__pydantic_generic_metadata__", {})
|
266
|
+
origin = metadata.get("origin")
|
267
|
+
args = metadata.get("args")
|
268
|
+
if not (origin and args is not None):
|
269
|
+
return None, None # Return None if Pydantic metadata is incomplete
|
270
|
+
return origin, args
|
271
|
+
|
272
|
+
|
273
|
+
def _infer_concrete_type_from_args(
|
274
|
+
func: Callable,
|
275
|
+
func_args: Sequence[Any],
|
276
|
+
func_kwargs: Dict[str, Any],
|
277
|
+
target_typevar: TypeVar,
|
278
|
+
) -> Type | None:
|
279
|
+
"""Infers the concrete type for a TypeVar based on function parameters (T or list[T])."""
|
280
|
+
type_hints = get_type_hints(func)
|
281
|
+
sig = inspect.signature(func)
|
282
|
+
try:
|
283
|
+
bound_args = sig.bind(*func_args, **func_kwargs)
|
284
|
+
bound_args.apply_defaults()
|
285
|
+
except TypeError:
|
286
|
+
return None # Cannot infer if binding fails
|
287
|
+
|
288
|
+
for param_name, arg_value in bound_args.arguments.items():
|
289
|
+
logger.info("parameter info", param_name=param_name, arg_value=arg_value)
|
290
|
+
param_hint = type_hints.get(param_name)
|
291
|
+
if param_hint is None or arg_value is None:
|
292
|
+
continue
|
293
|
+
|
294
|
+
# Direct Match: param is T
|
295
|
+
if (
|
296
|
+
isinstance(param_hint, TypeVar)
|
297
|
+
and param_hint.__name__ == target_typevar.__name__
|
298
|
+
):
|
299
|
+
return type(arg_value)
|
300
|
+
|
301
|
+
# List Match: param is list[T]
|
302
|
+
param_origin, param_args = _get_generic_metadata(param_hint)
|
303
|
+
if (
|
304
|
+
param_origin in (list, List)
|
305
|
+
and param_args
|
306
|
+
and isinstance(param_args[0], TypeVar)
|
307
|
+
and param_args[0].__name__ == target_typevar.__name__
|
308
|
+
):
|
309
|
+
if isinstance(arg_value, list) and arg_value:
|
310
|
+
# Use type of first non-None element
|
311
|
+
element_type = next(
|
312
|
+
(type(el) for el in arg_value if el is not None), None
|
313
|
+
)
|
314
|
+
if element_type:
|
315
|
+
return element_type
|
316
|
+
return None # TypeVar not found or could not infer type
|
317
|
+
|
318
|
+
|
319
|
+
def deserialize_result(
|
320
|
+
func: Callable,
|
321
|
+
result: Any,
|
322
|
+
return_type: Type | None = None,
|
323
|
+
args: Sequence[Any] | None = None,
|
324
|
+
kwargs: Dict[str, Any] | None = None,
|
325
|
+
) -> Any:
|
326
|
+
if result is None:
|
327
|
+
return None
|
328
|
+
|
329
|
+
args = args or []
|
330
|
+
kwargs = kwargs or {}
|
331
|
+
|
332
|
+
# Use explicit return_type if provided
|
333
|
+
if return_type is not None:
|
334
|
+
logger.debug("using explicitly provided return type", return_type=return_type)
|
335
|
+
return deserialize_value(result, return_type)
|
336
|
+
|
337
|
+
# Otherwise, fallback to inferring from function signature
|
338
|
+
logger.debug("inferring return type from function signature")
|
339
|
+
type_hints = get_type_hints(func)
|
340
|
+
return_type_hint = type_hints.get("return")
|
341
|
+
|
342
|
+
if return_type_hint is None:
|
343
|
+
logger.debug(
|
344
|
+
"no return type hint found in signature, deserializing without hint"
|
345
|
+
)
|
346
|
+
return deserialize_value(result, None)
|
347
|
+
|
348
|
+
return_origin, return_args = _get_generic_metadata(return_type_hint)
|
349
|
+
|
350
|
+
# Check if inference is needed for generics based on signature
|
351
|
+
is_generic_base_model = (
|
352
|
+
return_origin
|
353
|
+
and inspect.isclass(return_origin)
|
354
|
+
and issubclass(return_origin, BaseModel)
|
355
|
+
and issubclass(return_origin, Generic)
|
356
|
+
)
|
357
|
+
|
358
|
+
# Only attempt inference if the signature hint is a Generic Pydantic model
|
359
|
+
# with a single TypeVar argument.
|
360
|
+
target_typevar = None
|
361
|
+
is_list_wrapped = False
|
362
|
+
if is_generic_base_model and return_args and len(return_args) == 1:
|
363
|
+
type_arg = return_args[0]
|
364
|
+
if isinstance(type_arg, TypeVar):
|
365
|
+
target_typevar = type_arg
|
366
|
+
else:
|
367
|
+
arg_origin, inner_args = _get_generic_metadata(type_arg)
|
368
|
+
if (
|
369
|
+
arg_origin in (list, List)
|
370
|
+
and inner_args
|
371
|
+
and isinstance(inner_args[0], TypeVar)
|
372
|
+
):
|
373
|
+
target_typevar = inner_args[0]
|
374
|
+
is_list_wrapped = True
|
375
|
+
|
376
|
+
if target_typevar is None:
|
377
|
+
# Not a generic type requiring inference, or inference not possible/needed.
|
378
|
+
logger.debug(
|
379
|
+
"using signature return type hint directly",
|
380
|
+
return_type_hint=return_type_hint,
|
381
|
+
)
|
382
|
+
return deserialize_value(result, return_type_hint)
|
383
|
+
|
384
|
+
# Infer the concrete type for T
|
385
|
+
concrete_type = _infer_concrete_type_from_args(func, args, kwargs, target_typevar)
|
386
|
+
|
387
|
+
if not concrete_type:
|
388
|
+
logger.warning(
|
389
|
+
"could not infer concrete type, using original hint",
|
390
|
+
target_typevar=str(target_typevar),
|
391
|
+
return_type_hint=return_type_hint,
|
392
|
+
)
|
393
|
+
return deserialize_value(result, return_type_hint)
|
394
|
+
|
395
|
+
# Construct the final concrete type and deserialize
|
396
|
+
try:
|
397
|
+
final_arg = list[concrete_type] if is_list_wrapped else concrete_type # type: ignore
|
398
|
+
concrete_return_type = return_origin[final_arg] # type: ignore
|
399
|
+
logger.debug(
|
400
|
+
"constructed concrete return type",
|
401
|
+
concrete_return_type=concrete_return_type,
|
402
|
+
)
|
403
|
+
return deserialize_value(result, concrete_return_type)
|
404
|
+
except Exception:
|
405
|
+
logger.exception(
|
406
|
+
"error reconstructing/deserializing",
|
407
|
+
return_origin=return_origin,
|
408
|
+
)
|
409
|
+
return deserialize_value(result, return_type_hint) # Fallback
|