edda-framework 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edda/channels.py +34 -9
- edda/context.py +28 -1
- edda/storage/models.py +0 -23
- edda/storage/protocol.py +18 -36
- edda/storage/sqlalchemy_storage.py +110 -226
- {edda_framework-0.8.0.dist-info → edda_framework-0.9.0.dist-info}/METADATA +10 -66
- {edda_framework-0.8.0.dist-info → edda_framework-0.9.0.dist-info}/RECORD +10 -10
- {edda_framework-0.8.0.dist-info → edda_framework-0.9.0.dist-info}/WHEEL +0 -0
- {edda_framework-0.8.0.dist-info → edda_framework-0.9.0.dist-info}/entry_points.txt +0 -0
- {edda_framework-0.8.0.dist-info → edda_framework-0.9.0.dist-info}/licenses/LICENSE +0 -0
edda/channels.py
CHANGED
|
@@ -445,15 +445,40 @@ async def publish(
|
|
|
445
445
|
message_id = await storage.publish_to_channel(channel, data, full_metadata)
|
|
446
446
|
|
|
447
447
|
# Wake up waiting subscribers
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
448
|
+
# If in a transaction, defer delivery until after commit to ensure atomicity
|
|
449
|
+
if storage.in_transaction():
|
|
450
|
+
# Capture current values for the closure
|
|
451
|
+
_storage = storage
|
|
452
|
+
_channel = channel
|
|
453
|
+
_message_id = message_id
|
|
454
|
+
_data = data
|
|
455
|
+
_metadata = full_metadata
|
|
456
|
+
_target_instance_id = target_instance_id
|
|
457
|
+
_worker_id = effective_worker_id
|
|
458
|
+
|
|
459
|
+
async def deferred_wake() -> None:
|
|
460
|
+
await _wake_waiting_subscribers(
|
|
461
|
+
_storage,
|
|
462
|
+
_channel,
|
|
463
|
+
_message_id,
|
|
464
|
+
_data,
|
|
465
|
+
_metadata,
|
|
466
|
+
target_instance_id=_target_instance_id,
|
|
467
|
+
worker_id=_worker_id,
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
storage.register_post_commit_callback(deferred_wake)
|
|
471
|
+
else:
|
|
472
|
+
# Not in transaction - deliver immediately
|
|
473
|
+
await _wake_waiting_subscribers(
|
|
474
|
+
storage,
|
|
475
|
+
channel,
|
|
476
|
+
message_id,
|
|
477
|
+
data,
|
|
478
|
+
full_metadata,
|
|
479
|
+
target_instance_id=target_instance_id,
|
|
480
|
+
worker_id=effective_worker_id,
|
|
481
|
+
)
|
|
457
482
|
|
|
458
483
|
return message_id
|
|
459
484
|
|
edda/context.py
CHANGED
|
@@ -5,7 +5,7 @@ This module provides the WorkflowContext class for workflow execution,
|
|
|
5
5
|
managing state, history, and replay during workflow execution.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
-
from collections.abc import AsyncIterator
|
|
8
|
+
from collections.abc import AsyncIterator, Awaitable, Callable
|
|
9
9
|
from contextlib import asynccontextmanager
|
|
10
10
|
from typing import TYPE_CHECKING, Any, cast
|
|
11
11
|
|
|
@@ -451,6 +451,33 @@ class WorkflowContext:
|
|
|
451
451
|
"""
|
|
452
452
|
return self.storage.in_transaction()
|
|
453
453
|
|
|
454
|
+
def register_post_commit(self, callback: Callable[[], Awaitable[None]]) -> None:
|
|
455
|
+
"""
|
|
456
|
+
Register a callback to be executed after the current transaction commits.
|
|
457
|
+
|
|
458
|
+
The callback will be executed after the top-level transaction commits successfully.
|
|
459
|
+
If the transaction is rolled back, the callback will NOT be executed.
|
|
460
|
+
This is useful for deferring side effects (like message delivery) until after
|
|
461
|
+
the transaction has been committed.
|
|
462
|
+
|
|
463
|
+
Args:
|
|
464
|
+
callback: An async function to call after commit.
|
|
465
|
+
|
|
466
|
+
Raises:
|
|
467
|
+
RuntimeError: If not in a transaction.
|
|
468
|
+
|
|
469
|
+
Example:
|
|
470
|
+
async with ctx.transaction():
|
|
471
|
+
# Save order to database
|
|
472
|
+
await ctx.storage.append_history(...)
|
|
473
|
+
|
|
474
|
+
# Defer message delivery until after commit
|
|
475
|
+
async def deliver_notifications():
|
|
476
|
+
await notify_subscribers(order_id)
|
|
477
|
+
ctx.register_post_commit(deliver_notifications)
|
|
478
|
+
"""
|
|
479
|
+
self.storage.register_post_commit_callback(callback)
|
|
480
|
+
|
|
454
481
|
async def recur(self, **kwargs: Any) -> None:
|
|
455
482
|
"""
|
|
456
483
|
Restart the workflow with fresh history (Erlang-style tail recursion).
|
edda/storage/models.py
CHANGED
|
@@ -136,27 +136,6 @@ WORKFLOW_TIMER_SUBSCRIPTIONS_INDEXES = [
|
|
|
136
136
|
"CREATE INDEX IF NOT EXISTS idx_timer_subscriptions_instance ON workflow_timer_subscriptions(instance_id);",
|
|
137
137
|
]
|
|
138
138
|
|
|
139
|
-
# SQL schema for message subscriptions (for wait_message)
|
|
140
|
-
WORKFLOW_MESSAGE_SUBSCRIPTIONS_TABLE = """
|
|
141
|
-
CREATE TABLE IF NOT EXISTS workflow_message_subscriptions (
|
|
142
|
-
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
143
|
-
instance_id TEXT NOT NULL,
|
|
144
|
-
channel TEXT NOT NULL,
|
|
145
|
-
activity_id TEXT,
|
|
146
|
-
timeout_at TEXT,
|
|
147
|
-
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
148
|
-
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
|
|
149
|
-
CONSTRAINT unique_instance_channel UNIQUE (instance_id, channel)
|
|
150
|
-
);
|
|
151
|
-
"""
|
|
152
|
-
|
|
153
|
-
# Indexes for message subscriptions
|
|
154
|
-
WORKFLOW_MESSAGE_SUBSCRIPTIONS_INDEXES = [
|
|
155
|
-
"CREATE INDEX IF NOT EXISTS idx_message_subscriptions_channel ON workflow_message_subscriptions(channel);",
|
|
156
|
-
"CREATE INDEX IF NOT EXISTS idx_message_subscriptions_timeout ON workflow_message_subscriptions(timeout_at);",
|
|
157
|
-
"CREATE INDEX IF NOT EXISTS idx_message_subscriptions_instance ON workflow_message_subscriptions(instance_id);",
|
|
158
|
-
]
|
|
159
|
-
|
|
160
139
|
# SQL schema for group memberships (Erlang pg style)
|
|
161
140
|
WORKFLOW_GROUP_MEMBERSHIPS_TABLE = """
|
|
162
141
|
CREATE TABLE IF NOT EXISTS workflow_group_memberships (
|
|
@@ -306,7 +285,6 @@ ALL_TABLES = [
|
|
|
306
285
|
WORKFLOW_HISTORY_ARCHIVE_TABLE,
|
|
307
286
|
WORKFLOW_COMPENSATIONS_TABLE,
|
|
308
287
|
WORKFLOW_TIMER_SUBSCRIPTIONS_TABLE,
|
|
309
|
-
WORKFLOW_MESSAGE_SUBSCRIPTIONS_TABLE,
|
|
310
288
|
WORKFLOW_GROUP_MEMBERSHIPS_TABLE,
|
|
311
289
|
OUTBOX_EVENTS_TABLE,
|
|
312
290
|
# Channel-based Message Queue System
|
|
@@ -324,7 +302,6 @@ ALL_INDEXES = (
|
|
|
324
302
|
+ WORKFLOW_HISTORY_ARCHIVE_INDEXES
|
|
325
303
|
+ WORKFLOW_COMPENSATIONS_INDEXES
|
|
326
304
|
+ WORKFLOW_TIMER_SUBSCRIPTIONS_INDEXES
|
|
327
|
-
+ WORKFLOW_MESSAGE_SUBSCRIPTIONS_INDEXES
|
|
328
305
|
+ WORKFLOW_GROUP_MEMBERSHIPS_INDEXES
|
|
329
306
|
+ OUTBOX_EVENTS_INDEXES
|
|
330
307
|
# Channel-based Message Queue System
|
edda/storage/protocol.py
CHANGED
|
@@ -5,6 +5,7 @@ This module defines the StorageProtocol using Python's structural typing (Protoc
|
|
|
5
5
|
Any storage implementation that conforms to this protocol can be used with Edda.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
from collections.abc import Awaitable, Callable
|
|
8
9
|
from datetime import datetime
|
|
9
10
|
from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
|
|
10
11
|
|
|
@@ -104,6 +105,21 @@ class StorageProtocol(Protocol):
|
|
|
104
105
|
"""
|
|
105
106
|
...
|
|
106
107
|
|
|
108
|
+
def register_post_commit_callback(self, callback: Callable[[], Awaitable[None]]) -> None:
|
|
109
|
+
"""
|
|
110
|
+
Register a callback to be executed after the current transaction commits.
|
|
111
|
+
|
|
112
|
+
The callback will be executed after the top-level transaction commits successfully.
|
|
113
|
+
If the transaction is rolled back, the callback will NOT be executed.
|
|
114
|
+
|
|
115
|
+
Args:
|
|
116
|
+
callback: An async function to call after commit.
|
|
117
|
+
|
|
118
|
+
Raises:
|
|
119
|
+
RuntimeError: If not in a transaction.
|
|
120
|
+
"""
|
|
121
|
+
...
|
|
122
|
+
|
|
107
123
|
# -------------------------------------------------------------------------
|
|
108
124
|
# Workflow Definition Methods
|
|
109
125
|
# -------------------------------------------------------------------------
|
|
@@ -713,39 +729,6 @@ class StorageProtocol(Protocol):
|
|
|
713
729
|
# Message Subscription Methods (for wait_message)
|
|
714
730
|
# -------------------------------------------------------------------------
|
|
715
731
|
|
|
716
|
-
async def register_message_subscription_and_release_lock(
|
|
717
|
-
self,
|
|
718
|
-
instance_id: str,
|
|
719
|
-
worker_id: str,
|
|
720
|
-
channel: str,
|
|
721
|
-
timeout_at: datetime | None = None,
|
|
722
|
-
activity_id: str | None = None,
|
|
723
|
-
) -> None:
|
|
724
|
-
"""
|
|
725
|
-
Atomically register message subscription and release workflow lock.
|
|
726
|
-
|
|
727
|
-
This method performs the following operations in a SINGLE database transaction:
|
|
728
|
-
1. Register message subscription (INSERT into workflow_message_subscriptions)
|
|
729
|
-
2. Update current activity (UPDATE workflow_instances.current_activity_id)
|
|
730
|
-
3. Update status to 'waiting_for_event'
|
|
731
|
-
4. Release lock (UPDATE workflow_instances set locked_by=NULL)
|
|
732
|
-
|
|
733
|
-
This ensures that when a workflow calls wait_message(), the subscription is
|
|
734
|
-
registered and the lock is released atomically, preventing race conditions
|
|
735
|
-
in distributed environments (distributed coroutines pattern).
|
|
736
|
-
|
|
737
|
-
Args:
|
|
738
|
-
instance_id: Workflow instance ID
|
|
739
|
-
worker_id: Worker ID that currently holds the lock
|
|
740
|
-
channel: Channel name to wait on
|
|
741
|
-
timeout_at: Optional timeout timestamp
|
|
742
|
-
activity_id: Current activity ID to record
|
|
743
|
-
|
|
744
|
-
Raises:
|
|
745
|
-
RuntimeError: If the worker doesn't hold the lock (sanity check)
|
|
746
|
-
"""
|
|
747
|
-
...
|
|
748
|
-
|
|
749
732
|
async def find_waiting_instances_by_channel(
|
|
750
733
|
self,
|
|
751
734
|
channel: str,
|
|
@@ -911,9 +894,8 @@ class StorageProtocol(Protocol):
|
|
|
911
894
|
|
|
912
895
|
Removes entries from:
|
|
913
896
|
- workflow_timer_subscriptions
|
|
914
|
-
-
|
|
915
|
-
-
|
|
916
|
-
- channel_message_claims (new)
|
|
897
|
+
- channel_subscriptions
|
|
898
|
+
- channel_message_claims
|
|
917
899
|
|
|
918
900
|
Args:
|
|
919
901
|
instance_id: Workflow instance ID to clean up
|
|
@@ -8,7 +8,7 @@ and transactional outbox pattern.
|
|
|
8
8
|
|
|
9
9
|
import json
|
|
10
10
|
import logging
|
|
11
|
-
from collections.abc import AsyncIterator
|
|
11
|
+
from collections.abc import AsyncIterator, Awaitable, Callable
|
|
12
12
|
from contextlib import asynccontextmanager
|
|
13
13
|
from contextvars import ContextVar
|
|
14
14
|
from dataclasses import dataclass, field
|
|
@@ -285,31 +285,6 @@ class OutboxEvent(Base):
|
|
|
285
285
|
)
|
|
286
286
|
|
|
287
287
|
|
|
288
|
-
class WorkflowMessageSubscription(Base):
|
|
289
|
-
"""Message subscriptions (for wait_message)."""
|
|
290
|
-
|
|
291
|
-
__tablename__ = "workflow_message_subscriptions"
|
|
292
|
-
|
|
293
|
-
id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True)
|
|
294
|
-
instance_id: Mapped[str] = mapped_column(String(255))
|
|
295
|
-
channel: Mapped[str] = mapped_column(String(255))
|
|
296
|
-
activity_id: Mapped[str | None] = mapped_column(String(255), nullable=True)
|
|
297
|
-
timeout_at: Mapped[datetime | None] = mapped_column(DateTime(timezone=True), nullable=True)
|
|
298
|
-
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), server_default=func.now())
|
|
299
|
-
|
|
300
|
-
__table_args__ = (
|
|
301
|
-
ForeignKeyConstraint(
|
|
302
|
-
["instance_id"],
|
|
303
|
-
["workflow_instances.instance_id"],
|
|
304
|
-
ondelete="CASCADE",
|
|
305
|
-
),
|
|
306
|
-
UniqueConstraint("instance_id", "channel", name="unique_instance_channel"),
|
|
307
|
-
Index("idx_message_subscriptions_channel", "channel"),
|
|
308
|
-
Index("idx_message_subscriptions_timeout", "timeout_at"),
|
|
309
|
-
Index("idx_message_subscriptions_instance", "instance_id"),
|
|
310
|
-
)
|
|
311
|
-
|
|
312
|
-
|
|
313
288
|
class WorkflowGroupMembership(Base):
|
|
314
289
|
"""Group memberships (Erlang pg style for broadcast messaging)."""
|
|
315
290
|
|
|
@@ -508,6 +483,9 @@ class TransactionContext:
|
|
|
508
483
|
session: "AsyncSession | None" = None
|
|
509
484
|
"""The actual session for this transaction"""
|
|
510
485
|
|
|
486
|
+
post_commit_callbacks: list[Callable[[], Awaitable[None]]] = field(default_factory=list)
|
|
487
|
+
"""Callbacks to execute after successful top-level commit"""
|
|
488
|
+
|
|
511
489
|
|
|
512
490
|
# Context variable for transaction state (asyncio-safe)
|
|
513
491
|
_transaction_context: ContextVar[TransactionContext | None] = ContextVar(
|
|
@@ -861,7 +839,7 @@ class SQLAlchemyStorage:
|
|
|
861
839
|
Example:
|
|
862
840
|
>>> # SQLite: datetime(timeout_at) <= datetime('now')
|
|
863
841
|
>>> # PostgreSQL/MySQL: timeout_at <= NOW()
|
|
864
|
-
>>> self._make_datetime_comparable(
|
|
842
|
+
>>> self._make_datetime_comparable(ChannelSubscription.timeout_at)
|
|
865
843
|
>>> <= self._get_current_time_expr()
|
|
866
844
|
"""
|
|
867
845
|
if self.engine.dialect.name == "sqlite":
|
|
@@ -913,11 +891,16 @@ class SQLAlchemyStorage:
|
|
|
913
891
|
if ctx is None or ctx.depth == 0:
|
|
914
892
|
raise RuntimeError("Not in a transaction")
|
|
915
893
|
|
|
894
|
+
# Capture callbacks before any state changes
|
|
895
|
+
callbacks_to_execute: list[Callable[[], Awaitable[None]]] = []
|
|
896
|
+
|
|
916
897
|
if ctx.depth == 1:
|
|
917
898
|
# Top-level transaction - commit the session
|
|
918
899
|
logger.debug("Committing top-level transaction")
|
|
919
900
|
await ctx.session.commit() # type: ignore[union-attr]
|
|
920
901
|
await ctx.session.close() # type: ignore[union-attr]
|
|
902
|
+
# Capture callbacks to execute after clearing context
|
|
903
|
+
callbacks_to_execute = ctx.post_commit_callbacks.copy()
|
|
921
904
|
else:
|
|
922
905
|
# Nested transaction - commit the savepoint
|
|
923
906
|
nested_tx = ctx.savepoint_stack.pop()
|
|
@@ -929,6 +912,12 @@ class SQLAlchemyStorage:
|
|
|
929
912
|
if ctx.depth == 0:
|
|
930
913
|
# All transactions completed - clear context
|
|
931
914
|
_transaction_context.set(None)
|
|
915
|
+
# Execute post-commit callbacks after successful top-level commit
|
|
916
|
+
for callback in callbacks_to_execute:
|
|
917
|
+
try:
|
|
918
|
+
await callback()
|
|
919
|
+
except Exception as e:
|
|
920
|
+
logger.error(f"Post-commit callback failed: {e}")
|
|
932
921
|
|
|
933
922
|
async def rollback_transaction(self) -> None:
|
|
934
923
|
"""
|
|
@@ -968,6 +957,26 @@ class SQLAlchemyStorage:
|
|
|
968
957
|
ctx = _transaction_context.get()
|
|
969
958
|
return ctx is not None and ctx.depth > 0
|
|
970
959
|
|
|
960
|
+
def register_post_commit_callback(self, callback: Callable[[], Awaitable[None]]) -> None:
|
|
961
|
+
"""
|
|
962
|
+
Register a callback to be executed after the current transaction commits.
|
|
963
|
+
|
|
964
|
+
The callback will be executed after the top-level transaction commits successfully.
|
|
965
|
+
If the transaction is rolled back, the callback will NOT be executed.
|
|
966
|
+
If not in a transaction, the callback will be executed immediately.
|
|
967
|
+
|
|
968
|
+
Args:
|
|
969
|
+
callback: An async function to call after commit.
|
|
970
|
+
|
|
971
|
+
Raises:
|
|
972
|
+
RuntimeError: If not in a transaction.
|
|
973
|
+
"""
|
|
974
|
+
ctx = _transaction_context.get()
|
|
975
|
+
if ctx is None or ctx.depth == 0:
|
|
976
|
+
raise RuntimeError("Cannot register post-commit callback: not in a transaction")
|
|
977
|
+
ctx.post_commit_callbacks.append(callback)
|
|
978
|
+
logger.debug(f"Registered post-commit callback: {callback}")
|
|
979
|
+
|
|
971
980
|
async def _commit_if_not_in_transaction(self, session: AsyncSession) -> None:
|
|
972
981
|
"""
|
|
973
982
|
Commit session if not in a transaction (auto-commit mode).
|
|
@@ -2247,12 +2256,17 @@ class SQLAlchemyStorage:
|
|
|
2247
2256
|
Only running or waiting_for_event workflows can be cancelled.
|
|
2248
2257
|
This method atomically:
|
|
2249
2258
|
1. Checks current status
|
|
2250
|
-
2. Updates status to 'cancelled' if allowed
|
|
2259
|
+
2. Updates status to 'cancelled' if allowed (with atomic status check)
|
|
2251
2260
|
3. Clears locks
|
|
2252
2261
|
4. Records cancellation metadata
|
|
2253
2262
|
5. Removes event subscriptions (if waiting for event)
|
|
2254
2263
|
6. Removes timer subscriptions (if waiting for timer)
|
|
2255
2264
|
|
|
2265
|
+
The UPDATE includes a status condition in WHERE clause to prevent
|
|
2266
|
+
TOCTOU (time-of-check to time-of-use) race conditions. If the status
|
|
2267
|
+
changes between SELECT and UPDATE, the UPDATE will affect 0 rows
|
|
2268
|
+
and the cancellation will fail safely.
|
|
2269
|
+
|
|
2256
2270
|
Args:
|
|
2257
2271
|
instance_id: Workflow instance to cancel
|
|
2258
2272
|
cancelled_by: Who/what triggered the cancellation
|
|
@@ -2262,6 +2276,14 @@ class SQLAlchemyStorage:
|
|
|
2262
2276
|
|
|
2263
2277
|
Note: Uses LOCK operation session (separate from external session).
|
|
2264
2278
|
"""
|
|
2279
|
+
cancellable_statuses = (
|
|
2280
|
+
"running",
|
|
2281
|
+
"waiting_for_event",
|
|
2282
|
+
"waiting_for_timer",
|
|
2283
|
+
"waiting_for_message",
|
|
2284
|
+
"compensating",
|
|
2285
|
+
)
|
|
2286
|
+
|
|
2265
2287
|
session = self._get_session_for_operation(is_lock_operation=True)
|
|
2266
2288
|
async with self._session_scope(session) as session, session.begin():
|
|
2267
2289
|
# Get current instance status
|
|
@@ -2278,35 +2300,43 @@ class SQLAlchemyStorage:
|
|
|
2278
2300
|
|
|
2279
2301
|
# Only allow cancellation of running, waiting, or compensating workflows
|
|
2280
2302
|
# compensating workflows can be marked as cancelled after compensation completes
|
|
2281
|
-
if current_status not in
|
|
2282
|
-
"running",
|
|
2283
|
-
"waiting_for_event",
|
|
2284
|
-
"waiting_for_timer",
|
|
2285
|
-
"waiting_for_message",
|
|
2286
|
-
"compensating",
|
|
2287
|
-
):
|
|
2303
|
+
if current_status not in cancellable_statuses:
|
|
2288
2304
|
# Already completed, failed, or cancelled
|
|
2289
2305
|
return False
|
|
2290
2306
|
|
|
2291
2307
|
# Update status to cancelled and record metadata
|
|
2308
|
+
# IMPORTANT: Include status condition in WHERE clause to prevent TOCTOU race
|
|
2309
|
+
# If another worker changed the status between SELECT and UPDATE,
|
|
2310
|
+
# this UPDATE will affect 0 rows and we'll return False
|
|
2292
2311
|
cancellation_metadata = {
|
|
2293
2312
|
"cancelled_by": cancelled_by,
|
|
2294
2313
|
"cancelled_at": datetime.now(UTC).isoformat(),
|
|
2295
2314
|
"previous_status": current_status,
|
|
2296
2315
|
}
|
|
2297
2316
|
|
|
2298
|
-
await session.execute(
|
|
2317
|
+
update_result = await session.execute(
|
|
2299
2318
|
update(WorkflowInstance)
|
|
2300
|
-
.where(
|
|
2319
|
+
.where(
|
|
2320
|
+
and_(
|
|
2321
|
+
WorkflowInstance.instance_id == instance_id,
|
|
2322
|
+
WorkflowInstance.status == current_status, # Atomic check
|
|
2323
|
+
)
|
|
2324
|
+
)
|
|
2301
2325
|
.values(
|
|
2302
2326
|
status="cancelled",
|
|
2303
2327
|
output_data=json.dumps(cancellation_metadata),
|
|
2304
2328
|
locked_by=None,
|
|
2305
2329
|
locked_at=None,
|
|
2330
|
+
lock_expires_at=None,
|
|
2306
2331
|
updated_at=func.now(),
|
|
2307
2332
|
)
|
|
2308
2333
|
)
|
|
2309
2334
|
|
|
2335
|
+
if update_result.rowcount == 0: # type: ignore[attr-defined]
|
|
2336
|
+
# Status changed between SELECT and UPDATE (race condition)
|
|
2337
|
+
# Another worker may have resumed/modified the workflow
|
|
2338
|
+
return False
|
|
2339
|
+
|
|
2310
2340
|
# Remove timer subscriptions if waiting for timer
|
|
2311
2341
|
if current_status == "waiting_for_timer":
|
|
2312
2342
|
await session.execute(
|
|
@@ -2315,12 +2345,13 @@ class SQLAlchemyStorage:
|
|
|
2315
2345
|
)
|
|
2316
2346
|
)
|
|
2317
2347
|
|
|
2318
|
-
#
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
|
|
2348
|
+
# Clear channel subscriptions if waiting for event/message
|
|
2349
|
+
if current_status in ("waiting_for_event", "waiting_for_message"):
|
|
2350
|
+
await session.execute(
|
|
2351
|
+
update(ChannelSubscription)
|
|
2352
|
+
.where(ChannelSubscription.instance_id == instance_id)
|
|
2353
|
+
.values(activity_id=None, timeout_at=None)
|
|
2322
2354
|
)
|
|
2323
|
-
)
|
|
2324
2355
|
|
|
2325
2356
|
return True
|
|
2326
2357
|
|
|
@@ -2328,100 +2359,6 @@ class SQLAlchemyStorage:
|
|
|
2328
2359
|
# Message Subscription Methods
|
|
2329
2360
|
# -------------------------------------------------------------------------
|
|
2330
2361
|
|
|
2331
|
-
async def register_message_subscription_and_release_lock(
|
|
2332
|
-
self,
|
|
2333
|
-
instance_id: str,
|
|
2334
|
-
worker_id: str,
|
|
2335
|
-
channel: str,
|
|
2336
|
-
timeout_at: datetime | None = None,
|
|
2337
|
-
activity_id: str | None = None,
|
|
2338
|
-
) -> None:
|
|
2339
|
-
"""
|
|
2340
|
-
Atomically register a message subscription and release the workflow lock.
|
|
2341
|
-
|
|
2342
|
-
This is called when a workflow executes wait_message() and needs to:
|
|
2343
|
-
1. Verify lock ownership (RuntimeError if mismatch - full rollback)
|
|
2344
|
-
2. Register a subscription for the channel
|
|
2345
|
-
3. Update the workflow status to waiting_for_message
|
|
2346
|
-
4. Release the lock
|
|
2347
|
-
|
|
2348
|
-
All operations happen in a single transaction for atomicity.
|
|
2349
|
-
|
|
2350
|
-
Args:
|
|
2351
|
-
instance_id: Workflow instance ID
|
|
2352
|
-
worker_id: Worker ID that must hold the current lock (verified before release)
|
|
2353
|
-
channel: Channel name to subscribe to
|
|
2354
|
-
timeout_at: Optional absolute timeout time
|
|
2355
|
-
activity_id: Activity ID for the wait operation
|
|
2356
|
-
|
|
2357
|
-
Raises:
|
|
2358
|
-
RuntimeError: If the worker does not hold the lock (entire operation rolls back)
|
|
2359
|
-
"""
|
|
2360
|
-
session = self._get_session_for_operation(is_lock_operation=True)
|
|
2361
|
-
async with self._session_scope(session) as session, session.begin():
|
|
2362
|
-
# 1. Verify we hold the lock (sanity check - fail fast if not)
|
|
2363
|
-
result = await session.execute(
|
|
2364
|
-
select(WorkflowInstance.locked_by).where(
|
|
2365
|
-
WorkflowInstance.instance_id == instance_id
|
|
2366
|
-
)
|
|
2367
|
-
)
|
|
2368
|
-
row = result.one_or_none()
|
|
2369
|
-
|
|
2370
|
-
if row is None:
|
|
2371
|
-
raise RuntimeError(f"Workflow instance {instance_id} not found")
|
|
2372
|
-
|
|
2373
|
-
current_lock_holder = row[0]
|
|
2374
|
-
if current_lock_holder != worker_id:
|
|
2375
|
-
raise RuntimeError(
|
|
2376
|
-
f"Cannot release lock: worker {worker_id} does not hold lock "
|
|
2377
|
-
f"for {instance_id} (held by: {current_lock_holder})"
|
|
2378
|
-
)
|
|
2379
|
-
|
|
2380
|
-
# 2. Register subscription (delete then insert for cross-database compatibility)
|
|
2381
|
-
await session.execute(
|
|
2382
|
-
delete(WorkflowMessageSubscription).where(
|
|
2383
|
-
and_(
|
|
2384
|
-
WorkflowMessageSubscription.instance_id == instance_id,
|
|
2385
|
-
WorkflowMessageSubscription.channel == channel,
|
|
2386
|
-
)
|
|
2387
|
-
)
|
|
2388
|
-
)
|
|
2389
|
-
|
|
2390
|
-
subscription = WorkflowMessageSubscription(
|
|
2391
|
-
instance_id=instance_id,
|
|
2392
|
-
channel=channel,
|
|
2393
|
-
activity_id=activity_id,
|
|
2394
|
-
timeout_at=timeout_at,
|
|
2395
|
-
)
|
|
2396
|
-
session.add(subscription)
|
|
2397
|
-
|
|
2398
|
-
# 3. Update workflow status and activity
|
|
2399
|
-
await session.execute(
|
|
2400
|
-
update(WorkflowInstance)
|
|
2401
|
-
.where(WorkflowInstance.instance_id == instance_id)
|
|
2402
|
-
.values(
|
|
2403
|
-
status="waiting_for_message",
|
|
2404
|
-
current_activity_id=activity_id,
|
|
2405
|
-
updated_at=func.now(),
|
|
2406
|
-
)
|
|
2407
|
-
)
|
|
2408
|
-
|
|
2409
|
-
# 4. Release the lock (with ownership check for extra safety)
|
|
2410
|
-
await session.execute(
|
|
2411
|
-
update(WorkflowInstance)
|
|
2412
|
-
.where(
|
|
2413
|
-
and_(
|
|
2414
|
-
WorkflowInstance.instance_id == instance_id,
|
|
2415
|
-
WorkflowInstance.locked_by == worker_id,
|
|
2416
|
-
)
|
|
2417
|
-
)
|
|
2418
|
-
.values(
|
|
2419
|
-
locked_by=None,
|
|
2420
|
-
locked_at=None,
|
|
2421
|
-
lock_expires_at=None,
|
|
2422
|
-
)
|
|
2423
|
-
)
|
|
2424
|
-
|
|
2425
2362
|
async def find_waiting_instances_by_channel(self, channel: str) -> list[dict[str, Any]]:
|
|
2426
2363
|
"""
|
|
2427
2364
|
Find all workflow instances waiting on a specific channel.
|
|
@@ -2434,9 +2371,13 @@ class SQLAlchemyStorage:
|
|
|
2434
2371
|
"""
|
|
2435
2372
|
session = self._get_session_for_operation()
|
|
2436
2373
|
async with self._session_scope(session) as session:
|
|
2374
|
+
# Query ChannelSubscription table for waiting instances
|
|
2437
2375
|
result = await session.execute(
|
|
2438
|
-
select(
|
|
2439
|
-
|
|
2376
|
+
select(ChannelSubscription).where(
|
|
2377
|
+
and_(
|
|
2378
|
+
ChannelSubscription.channel == channel,
|
|
2379
|
+
ChannelSubscription.activity_id.isnot(None), # Only waiting subscriptions
|
|
2380
|
+
)
|
|
2440
2381
|
)
|
|
2441
2382
|
)
|
|
2442
2383
|
subscriptions = result.scalars().all()
|
|
@@ -2446,7 +2387,7 @@ class SQLAlchemyStorage:
|
|
|
2446
2387
|
"channel": sub.channel,
|
|
2447
2388
|
"activity_id": sub.activity_id,
|
|
2448
2389
|
"timeout_at": sub.timeout_at.isoformat() if sub.timeout_at else None,
|
|
2449
|
-
"created_at": sub.
|
|
2390
|
+
"created_at": sub.subscribed_at.isoformat() if sub.subscribed_at else None,
|
|
2450
2391
|
}
|
|
2451
2392
|
for sub in subscriptions
|
|
2452
2393
|
]
|
|
@@ -2459,8 +2400,7 @@ class SQLAlchemyStorage:
|
|
|
2459
2400
|
"""
|
|
2460
2401
|
Remove a message subscription.
|
|
2461
2402
|
|
|
2462
|
-
This method
|
|
2463
|
-
and clears waiting state from the ChannelSubscription table.
|
|
2403
|
+
This method clears waiting state from the ChannelSubscription table.
|
|
2464
2404
|
|
|
2465
2405
|
Args:
|
|
2466
2406
|
instance_id: Workflow instance ID
|
|
@@ -2468,16 +2408,6 @@ class SQLAlchemyStorage:
|
|
|
2468
2408
|
"""
|
|
2469
2409
|
session = self._get_session_for_operation()
|
|
2470
2410
|
async with self._session_scope(session) as session:
|
|
2471
|
-
# Remove from legacy WorkflowMessageSubscription table
|
|
2472
|
-
await session.execute(
|
|
2473
|
-
delete(WorkflowMessageSubscription).where(
|
|
2474
|
-
and_(
|
|
2475
|
-
WorkflowMessageSubscription.instance_id == instance_id,
|
|
2476
|
-
WorkflowMessageSubscription.channel == channel,
|
|
2477
|
-
)
|
|
2478
|
-
)
|
|
2479
|
-
)
|
|
2480
|
-
|
|
2481
2411
|
# Clear waiting state from ChannelSubscription table
|
|
2482
2412
|
# Don't delete the subscription - just clear the waiting state
|
|
2483
2413
|
await session.execute(
|
|
@@ -2527,10 +2457,11 @@ class SQLAlchemyStorage:
|
|
|
2527
2457
|
session = self._get_session_for_operation()
|
|
2528
2458
|
async with self._session_scope(session) as session:
|
|
2529
2459
|
result = await session.execute(
|
|
2530
|
-
select(
|
|
2460
|
+
select(ChannelSubscription).where(
|
|
2531
2461
|
and_(
|
|
2532
|
-
|
|
2533
|
-
|
|
2462
|
+
ChannelSubscription.instance_id == instance_id,
|
|
2463
|
+
ChannelSubscription.channel == channel,
|
|
2464
|
+
ChannelSubscription.activity_id.isnot(None), # Only waiting subscriptions
|
|
2534
2465
|
)
|
|
2535
2466
|
)
|
|
2536
2467
|
)
|
|
@@ -2555,10 +2486,13 @@ class SQLAlchemyStorage:
|
|
|
2555
2486
|
async with self._session_scope(session) as session:
|
|
2556
2487
|
# Re-check subscription (may have been removed by another worker)
|
|
2557
2488
|
result = await session.execute(
|
|
2558
|
-
select(
|
|
2489
|
+
select(ChannelSubscription).where(
|
|
2559
2490
|
and_(
|
|
2560
|
-
|
|
2561
|
-
|
|
2491
|
+
ChannelSubscription.instance_id == instance_id,
|
|
2492
|
+
ChannelSubscription.channel == channel,
|
|
2493
|
+
ChannelSubscription.activity_id.isnot(
|
|
2494
|
+
None
|
|
2495
|
+
), # Only waiting subscriptions
|
|
2562
2496
|
)
|
|
2563
2497
|
)
|
|
2564
2498
|
)
|
|
@@ -2607,14 +2541,16 @@ class SQLAlchemyStorage:
|
|
|
2607
2541
|
)
|
|
2608
2542
|
session.add(history_entry)
|
|
2609
2543
|
|
|
2610
|
-
#
|
|
2544
|
+
# Clear waiting state from subscription (don't delete)
|
|
2611
2545
|
await session.execute(
|
|
2612
|
-
|
|
2546
|
+
update(ChannelSubscription)
|
|
2547
|
+
.where(
|
|
2613
2548
|
and_(
|
|
2614
|
-
|
|
2615
|
-
|
|
2549
|
+
ChannelSubscription.instance_id == instance_id,
|
|
2550
|
+
ChannelSubscription.channel == channel,
|
|
2616
2551
|
)
|
|
2617
2552
|
)
|
|
2553
|
+
.values(activity_id=None, timeout_at=None)
|
|
2618
2554
|
)
|
|
2619
2555
|
|
|
2620
2556
|
# Update status to 'running' (ready for resumption)
|
|
@@ -2641,8 +2577,6 @@ class SQLAlchemyStorage:
|
|
|
2641
2577
|
"""
|
|
2642
2578
|
Find all message subscriptions that have timed out.
|
|
2643
2579
|
|
|
2644
|
-
This method queries both the legacy WorkflowMessageSubscription table and
|
|
2645
|
-
the ChannelSubscription table for expired subscriptions.
|
|
2646
2580
|
JOINs with WorkflowInstance to ensure instance exists and avoid N+1 queries.
|
|
2647
2581
|
|
|
2648
2582
|
Returns:
|
|
@@ -2650,47 +2584,8 @@ class SQLAlchemyStorage:
|
|
|
2650
2584
|
"""
|
|
2651
2585
|
session = self._get_session_for_operation()
|
|
2652
2586
|
async with self._session_scope(session) as session:
|
|
2653
|
-
results: list[dict[str, Any]] = []
|
|
2654
|
-
|
|
2655
|
-
# Query legacy WorkflowMessageSubscription table with JOIN to verify instance exists
|
|
2656
|
-
legacy_result = await session.execute(
|
|
2657
|
-
select(
|
|
2658
|
-
WorkflowMessageSubscription.instance_id,
|
|
2659
|
-
WorkflowMessageSubscription.channel,
|
|
2660
|
-
WorkflowMessageSubscription.activity_id,
|
|
2661
|
-
WorkflowMessageSubscription.timeout_at,
|
|
2662
|
-
WorkflowMessageSubscription.created_at,
|
|
2663
|
-
WorkflowInstance.workflow_name,
|
|
2664
|
-
)
|
|
2665
|
-
.join(
|
|
2666
|
-
WorkflowInstance,
|
|
2667
|
-
WorkflowMessageSubscription.instance_id == WorkflowInstance.instance_id,
|
|
2668
|
-
)
|
|
2669
|
-
.where(
|
|
2670
|
-
and_(
|
|
2671
|
-
WorkflowMessageSubscription.timeout_at.isnot(None),
|
|
2672
|
-
self._make_datetime_comparable(WorkflowMessageSubscription.timeout_at)
|
|
2673
|
-
<= self._get_current_time_expr(),
|
|
2674
|
-
)
|
|
2675
|
-
)
|
|
2676
|
-
)
|
|
2677
|
-
legacy_rows = legacy_result.all()
|
|
2678
|
-
results.extend(
|
|
2679
|
-
[
|
|
2680
|
-
{
|
|
2681
|
-
"instance_id": row[0],
|
|
2682
|
-
"channel": row[1],
|
|
2683
|
-
"activity_id": row[2],
|
|
2684
|
-
"timeout_at": row[3],
|
|
2685
|
-
"created_at": row[4],
|
|
2686
|
-
"workflow_name": row[5],
|
|
2687
|
-
}
|
|
2688
|
-
for row in legacy_rows
|
|
2689
|
-
]
|
|
2690
|
-
)
|
|
2691
|
-
|
|
2692
2587
|
# Query ChannelSubscription table with JOIN
|
|
2693
|
-
|
|
2588
|
+
result = await session.execute(
|
|
2694
2589
|
select(
|
|
2695
2590
|
ChannelSubscription.instance_id,
|
|
2696
2591
|
ChannelSubscription.channel,
|
|
@@ -2712,22 +2607,18 @@ class SQLAlchemyStorage:
|
|
|
2712
2607
|
)
|
|
2713
2608
|
)
|
|
2714
2609
|
)
|
|
2715
|
-
|
|
2716
|
-
|
|
2717
|
-
|
|
2718
|
-
|
|
2719
|
-
|
|
2720
|
-
|
|
2721
|
-
|
|
2722
|
-
|
|
2723
|
-
|
|
2724
|
-
|
|
2725
|
-
|
|
2726
|
-
|
|
2727
|
-
]
|
|
2728
|
-
)
|
|
2729
|
-
|
|
2730
|
-
return results
|
|
2610
|
+
rows = result.all()
|
|
2611
|
+
return [
|
|
2612
|
+
{
|
|
2613
|
+
"instance_id": row[0],
|
|
2614
|
+
"channel": row[1],
|
|
2615
|
+
"activity_id": row[2],
|
|
2616
|
+
"timeout_at": row[3],
|
|
2617
|
+
"created_at": row[4], # subscribed_at as created_at for compatibility
|
|
2618
|
+
"workflow_name": row[5],
|
|
2619
|
+
}
|
|
2620
|
+
for row in rows
|
|
2621
|
+
]
|
|
2731
2622
|
|
|
2732
2623
|
# -------------------------------------------------------------------------
|
|
2733
2624
|
# Group Membership Methods (Erlang pg style)
|
|
@@ -2882,13 +2773,6 @@ class SQLAlchemyStorage:
|
|
|
2882
2773
|
)
|
|
2883
2774
|
)
|
|
2884
2775
|
|
|
2885
|
-
# Remove message subscriptions (legacy)
|
|
2886
|
-
await session.execute(
|
|
2887
|
-
delete(WorkflowMessageSubscription).where(
|
|
2888
|
-
WorkflowMessageSubscription.instance_id == instance_id
|
|
2889
|
-
)
|
|
2890
|
-
)
|
|
2891
|
-
|
|
2892
2776
|
# Remove channel subscriptions
|
|
2893
2777
|
await session.execute(
|
|
2894
2778
|
delete(ChannelSubscription).where(ChannelSubscription.instance_id == instance_id)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: edda-framework
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.9.0
|
|
4
4
|
Summary: Lightweight Durable Execution Framework
|
|
5
5
|
Project-URL: Homepage, https://github.com/i2y/edda
|
|
6
6
|
Project-URL: Documentation, https://github.com/i2y/edda#readme
|
|
@@ -65,6 +65,7 @@ Description-Content-Type: text/markdown
|
|
|
65
65
|
[](https://opensource.org/licenses/MIT)
|
|
66
66
|
[](https://www.python.org/downloads/)
|
|
67
67
|
[](https://i2y.github.io/edda/)
|
|
68
|
+
[](https://deepwiki.com/i2y/edda)
|
|
68
69
|
|
|
69
70
|
## Overview
|
|
70
71
|
|
|
@@ -665,72 +666,12 @@ async def order_with_timeout(ctx: WorkflowContext, order_id: str):
|
|
|
665
666
|
|
|
666
667
|
**For technical details**, see [Multi-Worker Continuations](local-docs/distributed-coroutines.md).
|
|
667
668
|
|
|
668
|
-
###
|
|
669
|
+
### Channel-based Messaging
|
|
669
670
|
|
|
670
|
-
Edda provides
|
|
671
|
+
Edda provides channel-based messaging for workflow-to-workflow communication with two delivery modes:
|
|
671
672
|
|
|
672
673
|
```python
|
|
673
|
-
from edda import workflow,
|
|
674
|
-
|
|
675
|
-
# Receiver workflow - waits for approval message
|
|
676
|
-
@workflow
|
|
677
|
-
async def approval_workflow(ctx: WorkflowContext, request_id: str):
|
|
678
|
-
# Wait for message on "approval" channel
|
|
679
|
-
msg = await wait_message(ctx, channel="approval")
|
|
680
|
-
|
|
681
|
-
if msg.data["approved"]:
|
|
682
|
-
return {"status": "approved", "approver": msg.data["approver"]}
|
|
683
|
-
return {"status": "rejected"}
|
|
684
|
-
|
|
685
|
-
# Sender workflow - sends approval decision
|
|
686
|
-
@workflow
|
|
687
|
-
async def manager_workflow(ctx: WorkflowContext, request_id: str):
|
|
688
|
-
# Review and make decision
|
|
689
|
-
decision = await review_request(ctx, request_id)
|
|
690
|
-
|
|
691
|
-
# Send message to waiting workflow
|
|
692
|
-
await send_message_to(
|
|
693
|
-
ctx,
|
|
694
|
-
target_instance_id=request_id,
|
|
695
|
-
channel="approval",
|
|
696
|
-
data={"approved": decision, "approver": "manager-123"},
|
|
697
|
-
)
|
|
698
|
-
```
|
|
699
|
-
|
|
700
|
-
**Group Communication (Erlang pg style)** - for fan-out messaging without knowing receiver instance IDs:
|
|
701
|
-
|
|
702
|
-
```python
|
|
703
|
-
from edda import workflow, join_group, wait_message, publish_to_group
|
|
704
|
-
|
|
705
|
-
# Receiver workflow - joins a group and listens
|
|
706
|
-
@workflow
|
|
707
|
-
async def notification_service(ctx: WorkflowContext, service_id: str):
|
|
708
|
-
# Join group at startup (loose coupling - sender doesn't need to know us)
|
|
709
|
-
await join_group(ctx, group="order_watchers")
|
|
710
|
-
|
|
711
|
-
while True:
|
|
712
|
-
msg = await wait_message(ctx, channel="order.created")
|
|
713
|
-
await send_notification(ctx, msg.data)
|
|
714
|
-
|
|
715
|
-
# Sender workflow - publishes to all group members
|
|
716
|
-
@workflow
|
|
717
|
-
async def order_processor(ctx: WorkflowContext, order_id: str):
|
|
718
|
-
result = await process_order(ctx, order_id)
|
|
719
|
-
|
|
720
|
-
# Broadcast to all watchers (doesn't need to know instance IDs)
|
|
721
|
-
count = await publish_to_group(
|
|
722
|
-
ctx,
|
|
723
|
-
group="order_watchers",
|
|
724
|
-
channel="order.created",
|
|
725
|
-
data={"order_id": order_id, "status": "completed"},
|
|
726
|
-
)
|
|
727
|
-
print(f"Notified {count} watchers")
|
|
728
|
-
```
|
|
729
|
-
|
|
730
|
-
**Channel API with Delivery Modes** - subscribe to channels with explicit delivery semantics:
|
|
731
|
-
|
|
732
|
-
```python
|
|
733
|
-
from edda import workflow, subscribe, receive, publish, WorkflowContext
|
|
674
|
+
from edda import workflow, subscribe, receive, publish, send_to, WorkflowContext
|
|
734
675
|
|
|
735
676
|
# Job Worker - processes jobs exclusively (competing mode)
|
|
736
677
|
@workflow
|
|
@@ -754,8 +695,11 @@ async def notification_handler(ctx: WorkflowContext, handler_id: str):
|
|
|
754
695
|
await send_notification(ctx, msg.data)
|
|
755
696
|
await ctx.recur(handler_id)
|
|
756
697
|
|
|
757
|
-
#
|
|
698
|
+
# Publish to channel (all subscribers or one competing subscriber)
|
|
758
699
|
await publish(ctx, channel="jobs", data={"task": "send_report"})
|
|
700
|
+
|
|
701
|
+
# Direct message to specific workflow instance
|
|
702
|
+
await send_to(ctx, instance_id="workflow-123", channel="approval", data={"approved": True})
|
|
759
703
|
```
|
|
760
704
|
|
|
761
705
|
**Delivery modes**:
|
|
@@ -765,7 +709,7 @@ await publish(ctx, channel="jobs", data={"task": "send_report"})
|
|
|
765
709
|
**Key features**:
|
|
766
710
|
- **Channel-based messaging**: Messages are delivered to workflows waiting on specific channels
|
|
767
711
|
- **Competing vs Broadcast**: Choose semantics per subscription
|
|
768
|
-
- **
|
|
712
|
+
- **Direct messaging**: `send_to()` for workflow-to-workflow communication
|
|
769
713
|
- **Database-backed**: All messages are persisted for durability
|
|
770
714
|
- **Lock-first delivery**: Safe for multi-worker environments
|
|
771
715
|
|
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
edda/__init__.py,sha256=hGC6WR2R36M8LWC97F-0Rw4Ln0QUUT_1xC-7acOy_Fk,2237
|
|
2
2
|
edda/activity.py,sha256=nRm9eBrr0lFe4ZRQ2whyZ6mo5xd171ITIVhqytUhOpw,21025
|
|
3
3
|
edda/app.py,sha256=kZ-VEvjIe3GjUA8RhT6OimuezNyPf2IhrvQ2kL44zJs,45201
|
|
4
|
-
edda/channels.py,sha256=
|
|
4
|
+
edda/channels.py,sha256=Budi0FyxalmcAMwj50mX3WzRce5OuLKXGws0Hp_snfw,34745
|
|
5
5
|
edda/compensation.py,sha256=iKLlnTxiF1YSatmYQW84EkPB1yMKUEZBtgjuGnghLtY,11824
|
|
6
|
-
edda/context.py,sha256=
|
|
6
|
+
edda/context.py,sha256=IavmrbCdTAozP4QWlQ5-rCHR9yJAT-aohqyrOnbVLBU,20858
|
|
7
7
|
edda/exceptions.py,sha256=-ntBLGpVQgPFG5N1o8m_7weejAYkNrUdxTkOP38vsHk,1766
|
|
8
8
|
edda/hooks.py,sha256=HUZ6FTM__DZjwuomDfTDEroQ3mugEPuJHcGm7CTQNvg,8193
|
|
9
9
|
edda/locking.py,sha256=NAFJmw-JaSVsXn4Y4czJyv_s9bWG8cdrzDBWIEag5X8,13661
|
|
@@ -25,9 +25,9 @@ edda/serialization/__init__.py,sha256=hnOVJN-mJNIsSa_XH9jwhIydOsWvIfCaFaSd37HUpl
|
|
|
25
25
|
edda/serialization/base.py,sha256=xJy2CY9gdJDCF0tmCor8NomL2Lr_w7cveVvxccuc-tA,1998
|
|
26
26
|
edda/serialization/json.py,sha256=Dq96V4n1yozexjCPd_CL6Iuvh1u3jJhef6sTcNxXZeA,2842
|
|
27
27
|
edda/storage/__init__.py,sha256=Q-kNJsjF8hMc2Q5MYFlLBENKExlNlKkbmUkwBOosj9I,216
|
|
28
|
-
edda/storage/models.py,sha256=
|
|
29
|
-
edda/storage/protocol.py,sha256=
|
|
30
|
-
edda/storage/sqlalchemy_storage.py,sha256=
|
|
28
|
+
edda/storage/models.py,sha256=vUwjiAOvp9uFNQgLK57kEGo7uzXplDZikOfnlOyed2M,12146
|
|
29
|
+
edda/storage/protocol.py,sha256=NTUuLZ5_OlBiASaJIRuz5x7NykpCOjQgDWWNrRQzong,39021
|
|
30
|
+
edda/storage/sqlalchemy_storage.py,sha256=KvSGapeKJ3hhClXNxFKHByD3Key5aidxBMUjs6-EJvE,136811
|
|
31
31
|
edda/viewer_ui/__init__.py,sha256=N1-T33SXadOXcBsDSgJJ9Iqz4y4verJngWryQu70c5c,517
|
|
32
32
|
edda/viewer_ui/app.py,sha256=CqHKsUj5pcysHCk0aRfkEqV4DIV4l3GzOPKBJ5DTYOQ,95624
|
|
33
33
|
edda/viewer_ui/components.py,sha256=A0IxLwgj_Lu51O57OfzOwME8jzoJtKegEVvSnWc7uPo,45174
|
|
@@ -36,8 +36,8 @@ edda/viewer_ui/theme.py,sha256=mrXoXLRzgSnvE2a58LuMcPJkhlvHEDMWVa8Smqtk4l0,8118
|
|
|
36
36
|
edda/visualizer/__init__.py,sha256=DOpDstNhR0VcXAs_eMKxaL30p_0u4PKZ4o2ndnYhiRo,343
|
|
37
37
|
edda/visualizer/ast_analyzer.py,sha256=plmx7C9X_X35xLY80jxOL3ljg3afXxBePRZubqUIkxY,13663
|
|
38
38
|
edda/visualizer/mermaid_generator.py,sha256=XWa2egoOTNDfJEjPcwoxwQmblUqXf7YInWFjFRI1QGo,12457
|
|
39
|
-
edda_framework-0.
|
|
40
|
-
edda_framework-0.
|
|
41
|
-
edda_framework-0.
|
|
42
|
-
edda_framework-0.
|
|
43
|
-
edda_framework-0.
|
|
39
|
+
edda_framework-0.9.0.dist-info/METADATA,sha256=esgoKFgUTWqAZWIHxgtKGl5j8VTaWiJw_oz93Dtm064,35741
|
|
40
|
+
edda_framework-0.9.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
41
|
+
edda_framework-0.9.0.dist-info/entry_points.txt,sha256=dPH47s6UoJgUZxHoeSMqZsQkLaSE-SGLi-gh88k2WrU,48
|
|
42
|
+
edda_framework-0.9.0.dist-info/licenses/LICENSE,sha256=udxb-V7_cYKTHqW7lNm48rxJ-Zpf0WAY_PyGDK9BPCo,1069
|
|
43
|
+
edda_framework-0.9.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|