planar 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- planar/.__init__.py.un~ +0 -0
- planar/._version.py.un~ +0 -0
- planar/.app.py.un~ +0 -0
- planar/.cli.py.un~ +0 -0
- planar/.config.py.un~ +0 -0
- planar/.context.py.un~ +0 -0
- planar/.db.py.un~ +0 -0
- planar/.di.py.un~ +0 -0
- planar/.engine.py.un~ +0 -0
- planar/.files.py.un~ +0 -0
- planar/.log_context.py.un~ +0 -0
- planar/.log_metadata.py.un~ +0 -0
- planar/.logging.py.un~ +0 -0
- planar/.object_registry.py.un~ +0 -0
- planar/.otel.py.un~ +0 -0
- planar/.server.py.un~ +0 -0
- planar/.session.py.un~ +0 -0
- planar/.sqlalchemy.py.un~ +0 -0
- planar/.task_local.py.un~ +0 -0
- planar/.test_app.py.un~ +0 -0
- planar/.test_config.py.un~ +0 -0
- planar/.test_object_config.py.un~ +0 -0
- planar/.test_sqlalchemy.py.un~ +0 -0
- planar/.test_utils.py.un~ +0 -0
- planar/.util.py.un~ +0 -0
- planar/.utils.py.un~ +0 -0
- planar/__init__.py +26 -0
- planar/_version.py +1 -0
- planar/ai/.__init__.py.un~ +0 -0
- planar/ai/._models.py.un~ +0 -0
- planar/ai/.agent.py.un~ +0 -0
- planar/ai/.agent_utils.py.un~ +0 -0
- planar/ai/.events.py.un~ +0 -0
- planar/ai/.files.py.un~ +0 -0
- planar/ai/.models.py.un~ +0 -0
- planar/ai/.providers.py.un~ +0 -0
- planar/ai/.pydantic_ai.py.un~ +0 -0
- planar/ai/.pydantic_ai_agent.py.un~ +0 -0
- planar/ai/.pydantic_ai_provider.py.un~ +0 -0
- planar/ai/.step.py.un~ +0 -0
- planar/ai/.test_agent.py.un~ +0 -0
- planar/ai/.test_agent_serialization.py.un~ +0 -0
- planar/ai/.test_providers.py.un~ +0 -0
- planar/ai/.utils.py.un~ +0 -0
- planar/ai/__init__.py +15 -0
- planar/ai/agent.py +457 -0
- planar/ai/agent_utils.py +205 -0
- planar/ai/models.py +140 -0
- planar/ai/providers.py +1088 -0
- planar/ai/test_agent.py +1298 -0
- planar/ai/test_agent_serialization.py +229 -0
- planar/ai/test_providers.py +463 -0
- planar/ai/utils.py +102 -0
- planar/app.py +494 -0
- planar/cli.py +282 -0
- planar/config.py +544 -0
- planar/db/.db.py.un~ +0 -0
- planar/db/__init__.py +17 -0
- planar/db/alembic/env.py +136 -0
- planar/db/alembic/script.py.mako +28 -0
- planar/db/alembic/versions/3476068c153c_initial_system_tables_migration.py +339 -0
- planar/db/alembic.ini +128 -0
- planar/db/db.py +318 -0
- planar/files/.config.py.un~ +0 -0
- planar/files/.local.py.un~ +0 -0
- planar/files/.local_filesystem.py.un~ +0 -0
- planar/files/.model.py.un~ +0 -0
- planar/files/.models.py.un~ +0 -0
- planar/files/.s3.py.un~ +0 -0
- planar/files/.storage.py.un~ +0 -0
- planar/files/.test_files.py.un~ +0 -0
- planar/files/__init__.py +2 -0
- planar/files/models.py +162 -0
- planar/files/storage/.__init__.py.un~ +0 -0
- planar/files/storage/.base.py.un~ +0 -0
- planar/files/storage/.config.py.un~ +0 -0
- planar/files/storage/.context.py.un~ +0 -0
- planar/files/storage/.local_directory.py.un~ +0 -0
- planar/files/storage/.test_local_directory.py.un~ +0 -0
- planar/files/storage/.test_s3.py.un~ +0 -0
- planar/files/storage/base.py +61 -0
- planar/files/storage/config.py +44 -0
- planar/files/storage/context.py +15 -0
- planar/files/storage/local_directory.py +188 -0
- planar/files/storage/s3.py +220 -0
- planar/files/storage/test_local_directory.py +162 -0
- planar/files/storage/test_s3.py +299 -0
- planar/files/test_files.py +283 -0
- planar/human/.human.py.un~ +0 -0
- planar/human/.test_human.py.un~ +0 -0
- planar/human/__init__.py +2 -0
- planar/human/human.py +458 -0
- planar/human/models.py +80 -0
- planar/human/test_human.py +385 -0
- planar/logging/.__init__.py.un~ +0 -0
- planar/logging/.attributes.py.un~ +0 -0
- planar/logging/.formatter.py.un~ +0 -0
- planar/logging/.logger.py.un~ +0 -0
- planar/logging/.otel.py.un~ +0 -0
- planar/logging/.tracer.py.un~ +0 -0
- planar/logging/__init__.py +10 -0
- planar/logging/attributes.py +54 -0
- planar/logging/context.py +14 -0
- planar/logging/formatter.py +113 -0
- planar/logging/logger.py +114 -0
- planar/logging/otel.py +51 -0
- planar/modeling/.mixin.py.un~ +0 -0
- planar/modeling/.storage.py.un~ +0 -0
- planar/modeling/__init__.py +0 -0
- planar/modeling/field_helpers.py +59 -0
- planar/modeling/json_schema_generator.py +94 -0
- planar/modeling/mixins/__init__.py +10 -0
- planar/modeling/mixins/auditable.py +52 -0
- planar/modeling/mixins/test_auditable.py +97 -0
- planar/modeling/mixins/test_timestamp.py +134 -0
- planar/modeling/mixins/test_uuid_primary_key.py +52 -0
- planar/modeling/mixins/timestamp.py +53 -0
- planar/modeling/mixins/uuid_primary_key.py +19 -0
- planar/modeling/orm/.planar_base_model.py.un~ +0 -0
- planar/modeling/orm/__init__.py +18 -0
- planar/modeling/orm/planar_base_entity.py +29 -0
- planar/modeling/orm/query_filter_builder.py +122 -0
- planar/modeling/orm/reexports.py +15 -0
- planar/object_config/.object_config.py.un~ +0 -0
- planar/object_config/__init__.py +11 -0
- planar/object_config/models.py +114 -0
- planar/object_config/object_config.py +378 -0
- planar/object_registry.py +100 -0
- planar/registry_items.py +65 -0
- planar/routers/.__init__.py.un~ +0 -0
- planar/routers/.agents_router.py.un~ +0 -0
- planar/routers/.crud.py.un~ +0 -0
- planar/routers/.decision.py.un~ +0 -0
- planar/routers/.event.py.un~ +0 -0
- planar/routers/.file_attachment.py.un~ +0 -0
- planar/routers/.files.py.un~ +0 -0
- planar/routers/.files_router.py.un~ +0 -0
- planar/routers/.human.py.un~ +0 -0
- planar/routers/.info.py.un~ +0 -0
- planar/routers/.models.py.un~ +0 -0
- planar/routers/.object_config_router.py.un~ +0 -0
- planar/routers/.rule.py.un~ +0 -0
- planar/routers/.test_object_config_router.py.un~ +0 -0
- planar/routers/.test_workflow_router.py.un~ +0 -0
- planar/routers/.workflow.py.un~ +0 -0
- planar/routers/__init__.py +13 -0
- planar/routers/agents_router.py +197 -0
- planar/routers/entity_router.py +143 -0
- planar/routers/event.py +91 -0
- planar/routers/files.py +142 -0
- planar/routers/human.py +151 -0
- planar/routers/info.py +131 -0
- planar/routers/models.py +170 -0
- planar/routers/object_config_router.py +133 -0
- planar/routers/rule.py +108 -0
- planar/routers/test_agents_router.py +174 -0
- planar/routers/test_object_config_router.py +367 -0
- planar/routers/test_routes_security.py +169 -0
- planar/routers/test_rule_router.py +470 -0
- planar/routers/test_workflow_router.py +274 -0
- planar/routers/workflow.py +468 -0
- planar/rules/.decorator.py.un~ +0 -0
- planar/rules/.runner.py.un~ +0 -0
- planar/rules/.test_rules.py.un~ +0 -0
- planar/rules/__init__.py +23 -0
- planar/rules/decorator.py +184 -0
- planar/rules/models.py +355 -0
- planar/rules/rule_configuration.py +191 -0
- planar/rules/runner.py +64 -0
- planar/rules/test_rules.py +750 -0
- planar/scaffold_templates/app/__init__.py.j2 +0 -0
- planar/scaffold_templates/app/db/entities.py.j2 +11 -0
- planar/scaffold_templates/app/flows/process_invoice.py.j2 +67 -0
- planar/scaffold_templates/main.py.j2 +13 -0
- planar/scaffold_templates/planar.dev.yaml.j2 +34 -0
- planar/scaffold_templates/planar.prod.yaml.j2 +28 -0
- planar/scaffold_templates/pyproject.toml.j2 +10 -0
- planar/security/.jwt_middleware.py.un~ +0 -0
- planar/security/auth_context.py +148 -0
- planar/security/authorization.py +388 -0
- planar/security/default_policies.cedar +77 -0
- planar/security/jwt_middleware.py +116 -0
- planar/security/security_context.py +18 -0
- planar/security/tests/test_authorization_context.py +78 -0
- planar/security/tests/test_cedar_basics.py +41 -0
- planar/security/tests/test_cedar_policies.py +158 -0
- planar/security/tests/test_jwt_principal_context.py +179 -0
- planar/session.py +40 -0
- planar/sse/.constants.py.un~ +0 -0
- planar/sse/.example.html.un~ +0 -0
- planar/sse/.hub.py.un~ +0 -0
- planar/sse/.model.py.un~ +0 -0
- planar/sse/.proxy.py.un~ +0 -0
- planar/sse/constants.py +1 -0
- planar/sse/example.html +126 -0
- planar/sse/hub.py +216 -0
- planar/sse/model.py +8 -0
- planar/sse/proxy.py +257 -0
- planar/task_local.py +37 -0
- planar/test_app.py +51 -0
- planar/test_cli.py +372 -0
- planar/test_config.py +512 -0
- planar/test_object_config.py +527 -0
- planar/test_object_registry.py +14 -0
- planar/test_sqlalchemy.py +158 -0
- planar/test_utils.py +105 -0
- planar/testing/.client.py.un~ +0 -0
- planar/testing/.memory_storage.py.un~ +0 -0
- planar/testing/.planar_test_client.py.un~ +0 -0
- planar/testing/.predictable_tracer.py.un~ +0 -0
- planar/testing/.synchronizable_tracer.py.un~ +0 -0
- planar/testing/.test_memory_storage.py.un~ +0 -0
- planar/testing/.workflow_observer.py.un~ +0 -0
- planar/testing/__init__.py +0 -0
- planar/testing/memory_storage.py +78 -0
- planar/testing/planar_test_client.py +54 -0
- planar/testing/synchronizable_tracer.py +153 -0
- planar/testing/test_memory_storage.py +143 -0
- planar/testing/workflow_observer.py +73 -0
- planar/utils.py +70 -0
- planar/workflows/.__init__.py.un~ +0 -0
- planar/workflows/.builtin_steps.py.un~ +0 -0
- planar/workflows/.concurrency_tracing.py.un~ +0 -0
- planar/workflows/.context.py.un~ +0 -0
- planar/workflows/.contrib.py.un~ +0 -0
- planar/workflows/.decorators.py.un~ +0 -0
- planar/workflows/.durable_test.py.un~ +0 -0
- planar/workflows/.errors.py.un~ +0 -0
- planar/workflows/.events.py.un~ +0 -0
- planar/workflows/.exceptions.py.un~ +0 -0
- planar/workflows/.execution.py.un~ +0 -0
- planar/workflows/.human.py.un~ +0 -0
- planar/workflows/.lock.py.un~ +0 -0
- planar/workflows/.misc.py.un~ +0 -0
- planar/workflows/.model.py.un~ +0 -0
- planar/workflows/.models.py.un~ +0 -0
- planar/workflows/.notifications.py.un~ +0 -0
- planar/workflows/.orchestrator.py.un~ +0 -0
- planar/workflows/.runtime.py.un~ +0 -0
- planar/workflows/.serialization.py.un~ +0 -0
- planar/workflows/.step.py.un~ +0 -0
- planar/workflows/.step_core.py.un~ +0 -0
- planar/workflows/.sub_workflow_runner.py.un~ +0 -0
- planar/workflows/.sub_workflow_scheduler.py.un~ +0 -0
- planar/workflows/.test_concurrency.py.un~ +0 -0
- planar/workflows/.test_concurrency_detection.py.un~ +0 -0
- planar/workflows/.test_human.py.un~ +0 -0
- planar/workflows/.test_lock_timeout.py.un~ +0 -0
- planar/workflows/.test_orchestrator.py.un~ +0 -0
- planar/workflows/.test_race_conditions.py.un~ +0 -0
- planar/workflows/.test_serialization.py.un~ +0 -0
- planar/workflows/.test_suspend_deserialization.py.un~ +0 -0
- planar/workflows/.test_workflow.py.un~ +0 -0
- planar/workflows/.tracing.py.un~ +0 -0
- planar/workflows/.types.py.un~ +0 -0
- planar/workflows/.util.py.un~ +0 -0
- planar/workflows/.utils.py.un~ +0 -0
- planar/workflows/.workflow.py.un~ +0 -0
- planar/workflows/.workflow_wrapper.py.un~ +0 -0
- planar/workflows/.wrappers.py.un~ +0 -0
- planar/workflows/__init__.py +42 -0
- planar/workflows/context.py +44 -0
- planar/workflows/contrib.py +190 -0
- planar/workflows/decorators.py +217 -0
- planar/workflows/events.py +185 -0
- planar/workflows/exceptions.py +34 -0
- planar/workflows/execution.py +198 -0
- planar/workflows/lock.py +229 -0
- planar/workflows/misc.py +5 -0
- planar/workflows/models.py +154 -0
- planar/workflows/notifications.py +96 -0
- planar/workflows/orchestrator.py +383 -0
- planar/workflows/query.py +256 -0
- planar/workflows/serialization.py +409 -0
- planar/workflows/step_core.py +373 -0
- planar/workflows/step_metadata.py +357 -0
- planar/workflows/step_testing_utils.py +86 -0
- planar/workflows/sub_workflow_runner.py +191 -0
- planar/workflows/test_concurrency_detection.py +120 -0
- planar/workflows/test_lock_timeout.py +140 -0
- planar/workflows/test_serialization.py +1195 -0
- planar/workflows/test_suspend_deserialization.py +231 -0
- planar/workflows/test_workflow.py +1967 -0
- planar/workflows/tracing.py +106 -0
- planar/workflows/wrappers.py +41 -0
- planar-0.5.0.dist-info/METADATA +285 -0
- planar-0.5.0.dist-info/RECORD +289 -0
- planar-0.5.0.dist-info/WHEEL +4 -0
- planar-0.5.0.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,185 @@
|
|
1
|
+
"""
|
2
|
+
Event system for workflow engine.
|
3
|
+
|
4
|
+
This module provides functions for emitting and checking events that workflows
|
5
|
+
might be waiting for.
|
6
|
+
"""
|
7
|
+
|
8
|
+
from datetime import datetime
|
9
|
+
from typing import Any, Dict, Optional, cast
|
10
|
+
from uuid import UUID, uuid4
|
11
|
+
|
12
|
+
from sqlmodel import col, select, update
|
13
|
+
|
14
|
+
from planar.logging import get_logger
|
15
|
+
from planar.session import get_session
|
16
|
+
from planar.workflows.models import Workflow, WorkflowEvent
|
17
|
+
from planar.workflows.orchestrator import WorkflowOrchestrator
|
18
|
+
from planar.workflows.tracing import trace
|
19
|
+
|
20
|
+
logger = get_logger(__name__)
|
21
|
+
|
22
|
+
|
23
|
+
async def emit_event(
|
24
|
+
event_key: str,
|
25
|
+
payload: Optional[Dict[str, Any]] = None,
|
26
|
+
workflow_id: Optional[UUID] = None,
|
27
|
+
) -> tuple[WorkflowEvent, int]:
|
28
|
+
"""
|
29
|
+
Emit a new event that workflows might be waiting for.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
event_key: The event identifier
|
33
|
+
payload: Optional data to include with the event
|
34
|
+
workflow_id: Optional workflow ID if the event is targeted at a specific workflow
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
The created event record
|
38
|
+
"""
|
39
|
+
logger.debug(
|
40
|
+
"emitting event",
|
41
|
+
event_key=event_key,
|
42
|
+
workflow_id=str(workflow_id),
|
43
|
+
payload_keys=list(payload.keys()) if payload else None,
|
44
|
+
)
|
45
|
+
await trace("enter", event_key=event_key)
|
46
|
+
session = get_session()
|
47
|
+
|
48
|
+
select_condition = col(Workflow.waiting_for_event) == event_key
|
49
|
+
if workflow_id:
|
50
|
+
select_condition &= col(Workflow.id) == workflow_id
|
51
|
+
update_query = (
|
52
|
+
update(Workflow)
|
53
|
+
.where(select_condition)
|
54
|
+
.values(waiting_for_event=None, wakeup_at=None)
|
55
|
+
.returning(col(Workflow.id))
|
56
|
+
)
|
57
|
+
|
58
|
+
async def transaction():
|
59
|
+
# Update affected events
|
60
|
+
workflow_ids = (await session.exec(cast(Any, update_query))).all()
|
61
|
+
logger.info(
|
62
|
+
"event woke up workflows", event_key=event_key, count=len(workflow_ids)
|
63
|
+
)
|
64
|
+
await trace(
|
65
|
+
"wake-affected-workflows", event_key=event_key, count=len(workflow_ids)
|
66
|
+
)
|
67
|
+
# Create the event record
|
68
|
+
event = WorkflowEvent(
|
69
|
+
id=uuid4(),
|
70
|
+
event_key=event_key,
|
71
|
+
workflow_id=workflow_id,
|
72
|
+
payload=payload or {},
|
73
|
+
)
|
74
|
+
session.add(event)
|
75
|
+
logger.debug("event record created", event_key=event_key, event_id=event.id)
|
76
|
+
await trace("add-event-record", event_key=event_key)
|
77
|
+
|
78
|
+
return event, workflow_ids
|
79
|
+
|
80
|
+
event, workflow_ids = await session.run_transaction(transaction)
|
81
|
+
await trace("commit", event_key=event_key)
|
82
|
+
logger.info("event committed to database", event_key=event_key, event_id=event.id)
|
83
|
+
|
84
|
+
if workflow_ids and WorkflowOrchestrator.is_set():
|
85
|
+
logger.debug("requesting orchestrator poll due to event", event_key=event_key)
|
86
|
+
WorkflowOrchestrator.get().poll_soon()
|
87
|
+
|
88
|
+
await trace("return", event_key=event_key)
|
89
|
+
return event, len(workflow_ids)
|
90
|
+
|
91
|
+
|
92
|
+
async def check_event_exists(
|
93
|
+
event_key: str, since: Optional[datetime] = None, workflow_id: Optional[UUID] = None
|
94
|
+
) -> bool:
|
95
|
+
"""
|
96
|
+
Check if an event with the given key exists, optionally after a specific time.
|
97
|
+
|
98
|
+
Args:
|
99
|
+
event_key: The event identifier
|
100
|
+
since: Only consider events after this time
|
101
|
+
workflow_id: Optional workflow ID to check for workflow-specific events
|
102
|
+
|
103
|
+
Returns:
|
104
|
+
True if a matching event exists, False otherwise
|
105
|
+
"""
|
106
|
+
logger.debug(
|
107
|
+
"checking if event exists",
|
108
|
+
event_key=event_key,
|
109
|
+
since=since,
|
110
|
+
)
|
111
|
+
session = get_session()
|
112
|
+
|
113
|
+
# Start building the query
|
114
|
+
query = select(WorkflowEvent).where(WorkflowEvent.event_key == event_key)
|
115
|
+
|
116
|
+
# If a timestamp is provided, only check for events after that time
|
117
|
+
if since:
|
118
|
+
query = query.where(WorkflowEvent.timestamp > since)
|
119
|
+
|
120
|
+
# If a workflow ID is provided, check for events specific to that workflow
|
121
|
+
# or global events (no workflow ID)
|
122
|
+
if workflow_id:
|
123
|
+
query = query.where(
|
124
|
+
(col(WorkflowEvent.workflow_id) == workflow_id)
|
125
|
+
| (col(WorkflowEvent.workflow_id).is_(None))
|
126
|
+
)
|
127
|
+
|
128
|
+
# Execute the query and check if any result exists
|
129
|
+
event = (await session.exec(query)).first()
|
130
|
+
exists = event is not None
|
131
|
+
logger.debug("event exists check result", event_key=event_key, exists=exists)
|
132
|
+
return exists
|
133
|
+
|
134
|
+
|
135
|
+
async def get_latest_event(
|
136
|
+
event_key: str, since: Optional[datetime] = None, workflow_id: Optional[UUID] = None
|
137
|
+
) -> Optional[WorkflowEvent]:
|
138
|
+
"""
|
139
|
+
Get the most recent event with the given key.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
event_key: The event identifier
|
143
|
+
since: Only consider events after this time
|
144
|
+
workflow_id: Optional workflow ID to check for workflow-specific events
|
145
|
+
|
146
|
+
Returns:
|
147
|
+
The most recent matching event, or None if no match found
|
148
|
+
"""
|
149
|
+
logger.debug(
|
150
|
+
"getting latest event",
|
151
|
+
event_key=event_key,
|
152
|
+
since=since,
|
153
|
+
)
|
154
|
+
session = get_session()
|
155
|
+
|
156
|
+
# Start building the query
|
157
|
+
query = select(WorkflowEvent).where(WorkflowEvent.event_key == event_key)
|
158
|
+
|
159
|
+
# If a timestamp is provided, only check for events after that time
|
160
|
+
if since:
|
161
|
+
query = query.where(WorkflowEvent.timestamp > since)
|
162
|
+
|
163
|
+
# If a workflow ID is provided, check for events specific to that workflow
|
164
|
+
# or global events (no workflow ID)
|
165
|
+
if workflow_id:
|
166
|
+
query = query.where(
|
167
|
+
(col(WorkflowEvent.workflow_id) == workflow_id)
|
168
|
+
| (col(WorkflowEvent.workflow_id).is_(None))
|
169
|
+
)
|
170
|
+
|
171
|
+
# Order by timestamp descending and get the first (most recent) result
|
172
|
+
query = query.order_by(col(WorkflowEvent.timestamp).desc())
|
173
|
+
|
174
|
+
# Execute the query and return the first result (or None)
|
175
|
+
event = (await session.exec(query)).first()
|
176
|
+
if event:
|
177
|
+
logger.debug(
|
178
|
+
"latest event found",
|
179
|
+
event_key=event_key,
|
180
|
+
event_id=event.id,
|
181
|
+
timestamp=event.timestamp,
|
182
|
+
)
|
183
|
+
else:
|
184
|
+
logger.debug("no event found with given criteria", event_key=event_key)
|
185
|
+
return event
|
@@ -0,0 +1,34 @@
|
|
1
|
+
import builtins
|
2
|
+
from typing import Any
|
3
|
+
|
4
|
+
|
5
|
+
class StepError(Exception):
|
6
|
+
def __init__(self, type: str, message: str):
|
7
|
+
self.type = type
|
8
|
+
self.original_message = message
|
9
|
+
super().__init__(f"{type}: {message}")
|
10
|
+
|
11
|
+
|
12
|
+
class WorkflowError(StepError):
|
13
|
+
pass
|
14
|
+
|
15
|
+
|
16
|
+
class LockResourceFailed(Exception):
|
17
|
+
pass
|
18
|
+
|
19
|
+
|
20
|
+
class NonDeterministicStepCallError(Exception):
|
21
|
+
"""Raised when a step call is not deterministic compared to previous executions."""
|
22
|
+
|
23
|
+
pass
|
24
|
+
|
25
|
+
|
26
|
+
def try_restore_exception(exception: dict[str, Any]) -> Exception:
|
27
|
+
exc_type = exception["type"]
|
28
|
+
exc_message = exception["message"]
|
29
|
+
# Try to get the exception class from the builtins module
|
30
|
+
exc_class = getattr(builtins, exc_type, None)
|
31
|
+
if isinstance(exc_class, type) and issubclass(exc_class, Exception):
|
32
|
+
return exc_class(exc_message)
|
33
|
+
# Fallback to a custom exception if not found
|
34
|
+
return StepError(exc_type, exc_message)
|
@@ -0,0 +1,198 @@
|
|
1
|
+
import traceback
|
2
|
+
from datetime import timedelta
|
3
|
+
from typing import Any, Callable, Coroutine, Generic
|
4
|
+
from weakref import WeakValueDictionary
|
5
|
+
|
6
|
+
import planar.workflows.notifications as notifications
|
7
|
+
from planar.logging import get_logger
|
8
|
+
from planar.session import get_session
|
9
|
+
from planar.utils import R, T, U
|
10
|
+
from planar.workflows.context import ExecutionContext, delete_context, set_context
|
11
|
+
from planar.workflows.exceptions import try_restore_exception
|
12
|
+
from planar.workflows.lock import lock_workflow
|
13
|
+
from planar.workflows.models import Workflow, WorkflowStatus
|
14
|
+
from planar.workflows.serialization import (
|
15
|
+
deserialize_args,
|
16
|
+
deserialize_result,
|
17
|
+
serialize_result,
|
18
|
+
)
|
19
|
+
from planar.workflows.step_core import Suspend
|
20
|
+
from planar.workflows.tracing import trace
|
21
|
+
|
22
|
+
_DEFAULT_LOCK_DURATION = timedelta(minutes=10)
|
23
|
+
_WORKFLOW_FUNCTION_REGISTRY: WeakValueDictionary[str, Callable[..., Any]] = (
|
24
|
+
WeakValueDictionary()
|
25
|
+
)
|
26
|
+
|
27
|
+
logger = get_logger(__name__)
|
28
|
+
|
29
|
+
|
30
|
+
class YieldWrapper:
|
31
|
+
def __init__(self, value):
|
32
|
+
self.value = value
|
33
|
+
|
34
|
+
def __await__(self):
|
35
|
+
return (yield self.value)
|
36
|
+
|
37
|
+
|
38
|
+
def register_workflow_function(name: str, func: Callable[..., Any]):
|
39
|
+
_WORKFLOW_FUNCTION_REGISTRY[name] = func
|
40
|
+
|
41
|
+
|
42
|
+
class StepperResult(Generic[R]):
|
43
|
+
def __init__(self, *, value: R | None, suspend: Suspend | None):
|
44
|
+
self.value = value
|
45
|
+
self.suspend = suspend
|
46
|
+
|
47
|
+
|
48
|
+
async def stepper(coro: Coroutine[T, U, R]) -> StepperResult[R]:
|
49
|
+
logger.debug("stepper started")
|
50
|
+
coroutine_iterator = coro.__await__()
|
51
|
+
try:
|
52
|
+
yielded = next(coroutine_iterator) # Start the coroutine
|
53
|
+
while True:
|
54
|
+
if isinstance(yielded, Suspend):
|
55
|
+
logger.debug("stepper encountered suspend")
|
56
|
+
return StepperResult(value=None, suspend=yielded)
|
57
|
+
else:
|
58
|
+
try:
|
59
|
+
result = await YieldWrapper(yielded)
|
60
|
+
except BaseException as e:
|
61
|
+
# if an exception is raised by the event loop
|
62
|
+
# (most likely a cancellation), propagate it to the coroutine
|
63
|
+
logger.debug(
|
64
|
+
"stepper propagating exception to coroutine",
|
65
|
+
exception_type=type(e).__name__,
|
66
|
+
)
|
67
|
+
yielded = coroutine_iterator.throw(e)
|
68
|
+
continue
|
69
|
+
# send the result back to the coroutine
|
70
|
+
yielded = coroutine_iterator.send(result)
|
71
|
+
except StopIteration as e:
|
72
|
+
logger.debug("stepper finished with stopiteration")
|
73
|
+
return StepperResult(value=e.value, suspend=None)
|
74
|
+
|
75
|
+
|
76
|
+
def workflow_result(workflow: Workflow):
|
77
|
+
if workflow.status == WorkflowStatus.SUCCEEDED:
|
78
|
+
original_fn = _WORKFLOW_FUNCTION_REGISTRY[workflow.function_name]
|
79
|
+
return deserialize_result(original_fn, workflow.result)
|
80
|
+
elif workflow.status == WorkflowStatus.FAILED:
|
81
|
+
assert workflow.error
|
82
|
+
raise try_restore_exception(workflow.error)
|
83
|
+
assert False, "May only be called on finished workflows"
|
84
|
+
|
85
|
+
|
86
|
+
async def execute(workflow: Workflow):
|
87
|
+
logger.debug(
|
88
|
+
"executing workflow",
|
89
|
+
workflow_id=workflow.id,
|
90
|
+
function_name=workflow.function_name,
|
91
|
+
)
|
92
|
+
session = get_session()
|
93
|
+
original_fn = _WORKFLOW_FUNCTION_REGISTRY[workflow.function_name]
|
94
|
+
serialized_args = workflow.args or []
|
95
|
+
serialized_kwargs = workflow.kwargs or {}
|
96
|
+
args, kwargs = deserialize_args(original_fn, serialized_args, serialized_kwargs)
|
97
|
+
|
98
|
+
# Cache the workflow id here to avoid "Was IO attempted in an unexpected
|
99
|
+
# place?" SQLAlchemy errors when acessing expired attributes in an
|
100
|
+
# AsyncSession.
|
101
|
+
#
|
102
|
+
# Even though we unconditionally set expire_on_commit=True on
|
103
|
+
# PlanarSession, this is still necessary because SQLAlchemy will expire all
|
104
|
+
# attributes of the workflow object on a session rollback. More details:
|
105
|
+
# https://github.com/sqlalchemy/sqlalchemy/discussions/8282#discussioncomment-3213994
|
106
|
+
workflow_id = workflow.id
|
107
|
+
set_context(
|
108
|
+
ExecutionContext(
|
109
|
+
workflow=workflow,
|
110
|
+
workflow_id=workflow_id,
|
111
|
+
)
|
112
|
+
)
|
113
|
+
logger.debug("execution context set for workflow", workflow_id=workflow_id)
|
114
|
+
|
115
|
+
try:
|
116
|
+
stepper_result = await stepper(original_fn(*args, **kwargs))
|
117
|
+
logger.debug(
|
118
|
+
"stepper result for workflow",
|
119
|
+
workflow_id=workflow_id,
|
120
|
+
has_suspend=stepper_result.suspend is not None,
|
121
|
+
has_value=stepper_result.value is not None,
|
122
|
+
)
|
123
|
+
if stepper_result.suspend:
|
124
|
+
if stepper_result.suspend.exception:
|
125
|
+
logger.error(
|
126
|
+
"workflow suspended due to an exception from stepper",
|
127
|
+
workflow_id=workflow_id,
|
128
|
+
exception=str(stepper_result.suspend.exception),
|
129
|
+
)
|
130
|
+
raise stepper_result.suspend.exception
|
131
|
+
workflow.status = WorkflowStatus.PENDING
|
132
|
+
logger.info(
|
133
|
+
"workflow suspended",
|
134
|
+
workflow_id=workflow_id,
|
135
|
+
wakeup_at=workflow.wakeup_at,
|
136
|
+
event_key=workflow.waiting_for_event,
|
137
|
+
)
|
138
|
+
return stepper_result.suspend
|
139
|
+
workflow.status = WorkflowStatus.SUCCEEDED
|
140
|
+
workflow.result = serialize_result(original_fn, stepper_result.value)
|
141
|
+
logger.info(
|
142
|
+
"workflow succeeded", workflow_id=workflow_id, result=workflow.result
|
143
|
+
)
|
144
|
+
return stepper_result.value
|
145
|
+
except Exception as e:
|
146
|
+
logger.exception("exception during workflow execution", workflow_id=workflow_id)
|
147
|
+
workflow.status = WorkflowStatus.FAILED
|
148
|
+
workflow.error = {
|
149
|
+
"type": type(e).__name__,
|
150
|
+
"message": str(e),
|
151
|
+
"traceback": str(traceback.format_exc()),
|
152
|
+
}
|
153
|
+
raise e
|
154
|
+
finally:
|
155
|
+
delete_context()
|
156
|
+
logger.debug("execution context deleted for workflow", workflow_id=workflow_id)
|
157
|
+
await session.commit()
|
158
|
+
# notify after committing to the db
|
159
|
+
if workflow.status == WorkflowStatus.SUCCEEDED:
|
160
|
+
notifications.workflow_succeeded(workflow)
|
161
|
+
elif workflow.status == WorkflowStatus.FAILED:
|
162
|
+
notifications.workflow_failed(workflow)
|
163
|
+
else:
|
164
|
+
notifications.workflow_suspended(workflow)
|
165
|
+
|
166
|
+
|
167
|
+
async def lock_and_execute(
|
168
|
+
workflow: Workflow,
|
169
|
+
lock_duration: timedelta = _DEFAULT_LOCK_DURATION,
|
170
|
+
):
|
171
|
+
logger.debug("attempting to lock and execute workflow", workflow_id=workflow.id)
|
172
|
+
session = get_session()
|
173
|
+
|
174
|
+
async with lock_workflow(workflow, lock_duration):
|
175
|
+
logger.debug("lock acquired for workflow", workflow_id=workflow.id)
|
176
|
+
async with session.begin_read():
|
177
|
+
await session.refresh(workflow)
|
178
|
+
|
179
|
+
if workflow.status != WorkflowStatus.PENDING:
|
180
|
+
logger.info(
|
181
|
+
"workflow is not pending, returning existing result",
|
182
|
+
workflow_id=workflow.id,
|
183
|
+
status=workflow.status,
|
184
|
+
)
|
185
|
+
return workflow_result(workflow)
|
186
|
+
|
187
|
+
notifications.workflow_resumed(workflow)
|
188
|
+
logger.info("workflow resumed", workflow_id=workflow.id)
|
189
|
+
|
190
|
+
# Execute until the next suspend or completion
|
191
|
+
result = await execute(workflow)
|
192
|
+
await trace("return", workflow_id=workflow.id)
|
193
|
+
logger.debug(
|
194
|
+
"execution finished for workflow",
|
195
|
+
workflow_id=workflow.id,
|
196
|
+
result_type=type(result),
|
197
|
+
)
|
198
|
+
return result
|
planar/workflows/lock.py
ADDED
@@ -0,0 +1,229 @@
|
|
1
|
+
import asyncio
|
2
|
+
import traceback
|
3
|
+
from contextlib import asynccontextmanager
|
4
|
+
from datetime import timedelta
|
5
|
+
|
6
|
+
from sqlalchemy.exc import IntegrityError, OperationalError
|
7
|
+
from sqlalchemy.orm.exc import StaleDataError, UnmappedInstanceError
|
8
|
+
|
9
|
+
from planar.db import new_session
|
10
|
+
from planar.logging import get_logger
|
11
|
+
from planar.session import engine_var, get_session
|
12
|
+
from planar.utils import utc_now
|
13
|
+
from planar.workflows.exceptions import LockResourceFailed
|
14
|
+
from planar.workflows.models import LockedResource, Workflow, workflow_exec_lock_key
|
15
|
+
from planar.workflows.tracing import trace
|
16
|
+
|
17
|
+
_DEFAULT_RETRY_INTERVAL_SECONDS = 5
|
18
|
+
_DEFAULT_RETRY_COUNT = 30 # with default retry interval, wait 2.5 minutes
|
19
|
+
|
20
|
+
logger = get_logger(__name__)
|
21
|
+
|
22
|
+
|
23
|
+
async def lock_heartbeat(
|
24
|
+
lock: LockedResource,
|
25
|
+
lock_duration: timedelta,
|
26
|
+
lock_owner: asyncio.Task,
|
27
|
+
):
|
28
|
+
lock_key = lock.lock_key
|
29
|
+
engine = engine_var.get()
|
30
|
+
heartbeat_interval = lock_duration / 3
|
31
|
+
await trace(
|
32
|
+
"enter",
|
33
|
+
lock_key=lock_key,
|
34
|
+
lock_duration=lock_duration.total_seconds(),
|
35
|
+
lock_heartbeat_interval=heartbeat_interval.total_seconds(),
|
36
|
+
)
|
37
|
+
logger.debug(
|
38
|
+
"lock heartbeat started",
|
39
|
+
lock_key=lock_key,
|
40
|
+
duration_seconds=lock_duration.total_seconds(),
|
41
|
+
interval_seconds=heartbeat_interval.total_seconds(),
|
42
|
+
)
|
43
|
+
async with new_session(engine) as session:
|
44
|
+
try:
|
45
|
+
async with session.begin():
|
46
|
+
session.add(lock)
|
47
|
+
while True:
|
48
|
+
try:
|
49
|
+
sleep_seconds = heartbeat_interval.total_seconds()
|
50
|
+
await trace("sleep", lock_key=lock_key, sleep_seconds=sleep_seconds)
|
51
|
+
await asyncio.sleep(sleep_seconds)
|
52
|
+
# Renew the lock
|
53
|
+
async with session.begin():
|
54
|
+
lock.lock_until = utc_now() + lock_duration
|
55
|
+
await trace(
|
56
|
+
"renew-lock",
|
57
|
+
lock_key=lock_key,
|
58
|
+
lock_until=lock.lock_until,
|
59
|
+
)
|
60
|
+
logger.debug(
|
61
|
+
"lock renewed",
|
62
|
+
lock_key=lock_key,
|
63
|
+
lock_until=lock.lock_until,
|
64
|
+
)
|
65
|
+
await trace("commit", lock_key=lock_key)
|
66
|
+
except StaleDataError:
|
67
|
+
logger.exception(
|
68
|
+
"stale data error in lock heartbeat, cancelling owner task",
|
69
|
+
lock_key=lock_key,
|
70
|
+
)
|
71
|
+
await trace("stale-data-error", lock_key=lock_key)
|
72
|
+
# This would happen if the process paused for too long and some
|
73
|
+
# other worker acquired the lock. Some possible causes:
|
74
|
+
#
|
75
|
+
# - "stop the world" GC that took too long
|
76
|
+
# - Some network call took too long. For example, imagine that
|
77
|
+
# `await session.commit()` took minutes to return after the
|
78
|
+
# changes were actually committed
|
79
|
+
#
|
80
|
+
# No matter what the cause was (and however unlikely it is to
|
81
|
+
# happen in practice), it is no longer safe to continue
|
82
|
+
# processing this resource. Kill the main task.
|
83
|
+
lock_owner.cancel()
|
84
|
+
break
|
85
|
+
except asyncio.CancelledError:
|
86
|
+
logger.debug("lock heartbeat cancelled by owner", lock_key=lock_key)
|
87
|
+
# Cancelled by the lock owner
|
88
|
+
break
|
89
|
+
except Exception:
|
90
|
+
logger.exception(
|
91
|
+
"exception in lock heartbeat, cancelling owner task",
|
92
|
+
lock_key=lock_key,
|
93
|
+
)
|
94
|
+
|
95
|
+
await trace(
|
96
|
+
"exception",
|
97
|
+
lock_key=lock_key,
|
98
|
+
traceback=traceback.format_exc(),
|
99
|
+
)
|
100
|
+
# similarly to the `StaleDataError, kill the main task
|
101
|
+
lock_owner.cancel()
|
102
|
+
break
|
103
|
+
finally:
|
104
|
+
# ensure the lock object is detached from the session
|
105
|
+
try:
|
106
|
+
session.expunge(lock)
|
107
|
+
except UnmappedInstanceError:
|
108
|
+
# it is possible that the lock was not added to the session yet
|
109
|
+
pass
|
110
|
+
await trace("exit", lock_key=lock_key)
|
111
|
+
logger.debug("lock heartbeat stopped", lock_key=lock_key)
|
112
|
+
|
113
|
+
|
114
|
+
@asynccontextmanager
|
115
|
+
async def lock_resource(
|
116
|
+
lock_key: str,
|
117
|
+
lock_duration: timedelta,
|
118
|
+
retry_count: int = _DEFAULT_RETRY_COUNT,
|
119
|
+
retry_interval_seconds: int = _DEFAULT_RETRY_INTERVAL_SECONDS,
|
120
|
+
):
|
121
|
+
assert retry_count >= 0
|
122
|
+
await trace("enter", lock_key=lock_key)
|
123
|
+
logger.debug(
|
124
|
+
"attempting to lock resource",
|
125
|
+
lock_key=lock_key,
|
126
|
+
duration_seconds=lock_duration.total_seconds(),
|
127
|
+
retries=retry_count,
|
128
|
+
)
|
129
|
+
session = get_session()
|
130
|
+
|
131
|
+
lock = None
|
132
|
+
for remaining in range(retry_count, -1, -1):
|
133
|
+
try:
|
134
|
+
async with session.begin():
|
135
|
+
lock = LockedResource(
|
136
|
+
lock_key=lock_key,
|
137
|
+
lock_until=utc_now() + lock_duration,
|
138
|
+
)
|
139
|
+
session.add(lock)
|
140
|
+
await trace("add-locked-resource", lock_key=lock_key)
|
141
|
+
await trace("commit")
|
142
|
+
logger.info(
|
143
|
+
"resource locked", lock_key=lock_key, lock_until=lock.lock_until
|
144
|
+
)
|
145
|
+
# This LockedResource instance will be passed to the heartbeat task
|
146
|
+
# which will use a different session to manage it.
|
147
|
+
session.expunge(lock)
|
148
|
+
break
|
149
|
+
except (OperationalError, IntegrityError) as e:
|
150
|
+
logger.exception(
|
151
|
+
"failed to acquire lock for resource on attempt",
|
152
|
+
lock_key=lock_key,
|
153
|
+
attempt=retry_count - remaining + 1,
|
154
|
+
)
|
155
|
+
await trace("add-locked-resource-error", lock_key=lock_key, error=str(e))
|
156
|
+
finally:
|
157
|
+
# ensure the session is ready for re-use after an exception
|
158
|
+
await session.rollback()
|
159
|
+
lock = None
|
160
|
+
await trace("retry", lock_key=lock_key, remaining_retry_count=remaining)
|
161
|
+
if remaining > 0:
|
162
|
+
logger.debug(
|
163
|
+
"retrying lock for resource",
|
164
|
+
lock_key=lock_key,
|
165
|
+
retry_interval_seconds=retry_interval_seconds,
|
166
|
+
retries_left=remaining,
|
167
|
+
)
|
168
|
+
await asyncio.sleep(retry_interval_seconds)
|
169
|
+
|
170
|
+
if lock is None:
|
171
|
+
logger.warning(
|
172
|
+
"failed to lock resource after all attempts",
|
173
|
+
lock_key=lock_key,
|
174
|
+
attempts=retry_count + 1,
|
175
|
+
)
|
176
|
+
await trace("no-remaining-retries", lock_key=lock_key)
|
177
|
+
raise LockResourceFailed(f'Failed to lock resource "{lock_key}"')
|
178
|
+
|
179
|
+
# Start the heartbeat to renew the lock periodically
|
180
|
+
await trace("start-heartbeat", lock_key=lock_key)
|
181
|
+
current_task = asyncio.current_task()
|
182
|
+
assert current_task
|
183
|
+
heartbeat_task = asyncio.create_task(
|
184
|
+
lock_heartbeat(lock, lock_duration, current_task)
|
185
|
+
)
|
186
|
+
|
187
|
+
try:
|
188
|
+
await trace("yield", lock_key=lock_key)
|
189
|
+
yield
|
190
|
+
finally:
|
191
|
+
# Stop the heartbeat
|
192
|
+
await trace("cancel-heartbeat", lock_key=lock_key)
|
193
|
+
heartbeat_task.cancel()
|
194
|
+
try:
|
195
|
+
await heartbeat_task
|
196
|
+
except asyncio.CancelledError:
|
197
|
+
pass
|
198
|
+
if session.in_transaction():
|
199
|
+
# Session should not be in a transaction here. This is probably the
|
200
|
+
# result of an exception which was not handled by calling rollback.
|
201
|
+
# We'll do it here because we need to release the lock, which has
|
202
|
+
# to be done in another transaction, but leave a warning in the
|
203
|
+
# logs
|
204
|
+
logger.warning(
|
205
|
+
"session is still in transaction, rolling back", lock_key=lock_key
|
206
|
+
)
|
207
|
+
await session.rollback()
|
208
|
+
async with session.begin():
|
209
|
+
session.add(lock)
|
210
|
+
await session.delete(lock)
|
211
|
+
await trace("release-lock", lock_key=lock_key)
|
212
|
+
await trace("exit", lock_key=lock_key)
|
213
|
+
|
214
|
+
|
215
|
+
@asynccontextmanager
|
216
|
+
async def lock_workflow(
|
217
|
+
workflow: Workflow,
|
218
|
+
lock_duration: timedelta,
|
219
|
+
retry_count: int = _DEFAULT_RETRY_COUNT,
|
220
|
+
retry_interval_seconds: int = _DEFAULT_RETRY_INTERVAL_SECONDS,
|
221
|
+
):
|
222
|
+
lock_key = workflow_exec_lock_key(workflow.id)
|
223
|
+
async with lock_resource(
|
224
|
+
lock_key,
|
225
|
+
lock_duration,
|
226
|
+
retry_count=retry_count,
|
227
|
+
retry_interval_seconds=retry_interval_seconds,
|
228
|
+
):
|
229
|
+
yield
|