edda-framework 0.10.0__py3-none-any.whl → 0.12.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edda/app.py +203 -35
- edda/channels.py +57 -12
- edda/context.py +24 -0
- edda/integrations/mirascope/__init__.py +78 -0
- edda/integrations/mirascope/agent.py +467 -0
- edda/integrations/mirascope/call.py +166 -0
- edda/integrations/mirascope/decorator.py +163 -0
- edda/integrations/mirascope/types.py +268 -0
- edda/migrations/mysql/20251217000000_initial_schema.sql +284 -0
- edda/migrations/postgresql/20251217000000_initial_schema.sql +284 -0
- edda/migrations/sqlite/20251217000000_initial_schema.sql +284 -0
- edda/outbox/relayer.py +34 -7
- edda/storage/migrations.py +435 -0
- edda/storage/models.py +2 -0
- edda/storage/pg_notify.py +5 -8
- edda/storage/sqlalchemy_storage.py +97 -61
- {edda_framework-0.10.0.dist-info → edda_framework-0.12.0.dist-info}/METADATA +47 -3
- {edda_framework-0.10.0.dist-info → edda_framework-0.12.0.dist-info}/RECORD +21 -12
- {edda_framework-0.10.0.dist-info → edda_framework-0.12.0.dist-info}/WHEEL +0 -0
- {edda_framework-0.10.0.dist-info → edda_framework-0.12.0.dist-info}/entry_points.txt +0 -0
- {edda_framework-0.10.0.dist-info → edda_framework-0.12.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,284 @@
|
|
|
1
|
+
-- migrate:up
|
|
2
|
+
|
|
3
|
+
-- Schema version tracking
|
|
4
|
+
CREATE TABLE IF NOT EXISTS schema_version (
|
|
5
|
+
version INTEGER PRIMARY KEY,
|
|
6
|
+
applied_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
7
|
+
description TEXT NOT NULL
|
|
8
|
+
);
|
|
9
|
+
|
|
10
|
+
-- Workflow definitions (source code storage)
|
|
11
|
+
CREATE TABLE IF NOT EXISTS workflow_definitions (
|
|
12
|
+
workflow_name TEXT NOT NULL,
|
|
13
|
+
source_hash TEXT NOT NULL,
|
|
14
|
+
source_code TEXT NOT NULL,
|
|
15
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
16
|
+
PRIMARY KEY (workflow_name, source_hash)
|
|
17
|
+
);
|
|
18
|
+
|
|
19
|
+
CREATE INDEX IF NOT EXISTS idx_definitions_name ON workflow_definitions(workflow_name);
|
|
20
|
+
CREATE INDEX IF NOT EXISTS idx_definitions_hash ON workflow_definitions(source_hash);
|
|
21
|
+
|
|
22
|
+
-- Workflow instances with distributed locking support
|
|
23
|
+
CREATE TABLE IF NOT EXISTS workflow_instances (
|
|
24
|
+
instance_id TEXT PRIMARY KEY,
|
|
25
|
+
workflow_name TEXT NOT NULL,
|
|
26
|
+
source_hash TEXT NOT NULL,
|
|
27
|
+
owner_service TEXT NOT NULL,
|
|
28
|
+
framework TEXT NOT NULL DEFAULT 'python',
|
|
29
|
+
status TEXT NOT NULL DEFAULT 'running',
|
|
30
|
+
current_activity_id TEXT,
|
|
31
|
+
continued_from TEXT,
|
|
32
|
+
started_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
33
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
34
|
+
input_data TEXT NOT NULL,
|
|
35
|
+
output_data TEXT,
|
|
36
|
+
locked_by TEXT,
|
|
37
|
+
locked_at TEXT,
|
|
38
|
+
lock_timeout_seconds INTEGER,
|
|
39
|
+
lock_expires_at TEXT,
|
|
40
|
+
CONSTRAINT valid_status CHECK (
|
|
41
|
+
status IN ('running', 'completed', 'failed', 'waiting_for_event', 'waiting_for_timer', 'waiting_for_message', 'compensating', 'cancelled', 'recurred')
|
|
42
|
+
),
|
|
43
|
+
FOREIGN KEY (continued_from) REFERENCES workflow_instances(instance_id),
|
|
44
|
+
FOREIGN KEY (workflow_name, source_hash) REFERENCES workflow_definitions(workflow_name, source_hash)
|
|
45
|
+
);
|
|
46
|
+
|
|
47
|
+
CREATE INDEX IF NOT EXISTS idx_instances_status ON workflow_instances(status);
|
|
48
|
+
CREATE INDEX IF NOT EXISTS idx_instances_workflow ON workflow_instances(workflow_name);
|
|
49
|
+
CREATE INDEX IF NOT EXISTS idx_instances_owner ON workflow_instances(owner_service);
|
|
50
|
+
CREATE INDEX IF NOT EXISTS idx_instances_framework ON workflow_instances(framework);
|
|
51
|
+
CREATE INDEX IF NOT EXISTS idx_instances_locked ON workflow_instances(locked_by, locked_at);
|
|
52
|
+
CREATE INDEX IF NOT EXISTS idx_instances_lock_expires ON workflow_instances(lock_expires_at);
|
|
53
|
+
CREATE INDEX IF NOT EXISTS idx_instances_updated ON workflow_instances(updated_at);
|
|
54
|
+
CREATE INDEX IF NOT EXISTS idx_instances_hash ON workflow_instances(source_hash);
|
|
55
|
+
CREATE INDEX IF NOT EXISTS idx_instances_continued_from ON workflow_instances(continued_from);
|
|
56
|
+
CREATE INDEX IF NOT EXISTS idx_instances_resumable ON workflow_instances(status, locked_by);
|
|
57
|
+
|
|
58
|
+
-- Workflow execution history (for deterministic replay)
|
|
59
|
+
CREATE TABLE IF NOT EXISTS workflow_history (
|
|
60
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
61
|
+
instance_id TEXT NOT NULL,
|
|
62
|
+
activity_id TEXT NOT NULL,
|
|
63
|
+
event_type TEXT NOT NULL,
|
|
64
|
+
data_type TEXT NOT NULL DEFAULT 'json',
|
|
65
|
+
event_data TEXT,
|
|
66
|
+
event_data_binary BLOB,
|
|
67
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
68
|
+
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
|
|
69
|
+
CONSTRAINT unique_instance_activity UNIQUE (instance_id, activity_id)
|
|
70
|
+
);
|
|
71
|
+
|
|
72
|
+
CREATE INDEX IF NOT EXISTS idx_history_instance ON workflow_history(instance_id, activity_id);
|
|
73
|
+
CREATE INDEX IF NOT EXISTS idx_history_created ON workflow_history(created_at);
|
|
74
|
+
|
|
75
|
+
-- Archived workflow history (for recur pattern)
|
|
76
|
+
CREATE TABLE IF NOT EXISTS workflow_history_archive (
|
|
77
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
78
|
+
instance_id TEXT NOT NULL,
|
|
79
|
+
activity_id TEXT NOT NULL,
|
|
80
|
+
event_type TEXT NOT NULL,
|
|
81
|
+
data_type TEXT NOT NULL DEFAULT 'json',
|
|
82
|
+
event_data TEXT,
|
|
83
|
+
event_data_binary BLOB,
|
|
84
|
+
created_at TEXT NOT NULL,
|
|
85
|
+
archived_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
86
|
+
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE
|
|
87
|
+
);
|
|
88
|
+
|
|
89
|
+
CREATE INDEX IF NOT EXISTS idx_history_archive_instance ON workflow_history_archive(instance_id);
|
|
90
|
+
CREATE INDEX IF NOT EXISTS idx_history_archive_archived ON workflow_history_archive(archived_at);
|
|
91
|
+
|
|
92
|
+
-- Compensation transactions (LIFO stack for Saga pattern)
|
|
93
|
+
CREATE TABLE IF NOT EXISTS workflow_compensations (
|
|
94
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
95
|
+
instance_id TEXT NOT NULL,
|
|
96
|
+
activity_id TEXT NOT NULL,
|
|
97
|
+
activity_name TEXT NOT NULL,
|
|
98
|
+
args TEXT NOT NULL,
|
|
99
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
100
|
+
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE
|
|
101
|
+
);
|
|
102
|
+
|
|
103
|
+
CREATE INDEX IF NOT EXISTS idx_compensations_instance ON workflow_compensations(instance_id, created_at DESC);
|
|
104
|
+
|
|
105
|
+
-- Timer subscriptions (for wait_timer)
|
|
106
|
+
CREATE TABLE IF NOT EXISTS workflow_timer_subscriptions (
|
|
107
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
108
|
+
instance_id TEXT NOT NULL,
|
|
109
|
+
timer_id TEXT NOT NULL,
|
|
110
|
+
expires_at TEXT NOT NULL,
|
|
111
|
+
activity_id TEXT,
|
|
112
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
113
|
+
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
|
|
114
|
+
CONSTRAINT unique_instance_timer UNIQUE (instance_id, timer_id)
|
|
115
|
+
);
|
|
116
|
+
|
|
117
|
+
CREATE INDEX IF NOT EXISTS idx_timer_subscriptions_expires ON workflow_timer_subscriptions(expires_at);
|
|
118
|
+
CREATE INDEX IF NOT EXISTS idx_timer_subscriptions_instance ON workflow_timer_subscriptions(instance_id);
|
|
119
|
+
|
|
120
|
+
-- Group memberships (Erlang pg style)
|
|
121
|
+
CREATE TABLE IF NOT EXISTS workflow_group_memberships (
|
|
122
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
123
|
+
instance_id TEXT NOT NULL,
|
|
124
|
+
group_name TEXT NOT NULL,
|
|
125
|
+
joined_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
126
|
+
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
|
|
127
|
+
CONSTRAINT unique_instance_group UNIQUE (instance_id, group_name)
|
|
128
|
+
);
|
|
129
|
+
|
|
130
|
+
CREATE INDEX IF NOT EXISTS idx_group_memberships_group ON workflow_group_memberships(group_name);
|
|
131
|
+
CREATE INDEX IF NOT EXISTS idx_group_memberships_instance ON workflow_group_memberships(instance_id);
|
|
132
|
+
|
|
133
|
+
-- Transactional outbox pattern
|
|
134
|
+
CREATE TABLE IF NOT EXISTS outbox_events (
|
|
135
|
+
event_id TEXT PRIMARY KEY,
|
|
136
|
+
event_type TEXT NOT NULL,
|
|
137
|
+
event_source TEXT NOT NULL,
|
|
138
|
+
data_type TEXT NOT NULL DEFAULT 'json',
|
|
139
|
+
event_data TEXT,
|
|
140
|
+
event_data_binary BLOB,
|
|
141
|
+
content_type TEXT NOT NULL DEFAULT 'application/json',
|
|
142
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
143
|
+
published_at TEXT,
|
|
144
|
+
status TEXT NOT NULL DEFAULT 'pending',
|
|
145
|
+
retry_count INTEGER DEFAULT 0,
|
|
146
|
+
last_error TEXT,
|
|
147
|
+
CONSTRAINT valid_outbox_status CHECK (status IN ('pending', 'processing', 'published', 'failed', 'invalid', 'expired'))
|
|
148
|
+
);
|
|
149
|
+
|
|
150
|
+
CREATE INDEX IF NOT EXISTS idx_outbox_status ON outbox_events(status, created_at);
|
|
151
|
+
CREATE INDEX IF NOT EXISTS idx_outbox_retry ON outbox_events(status, retry_count);
|
|
152
|
+
CREATE INDEX IF NOT EXISTS idx_outbox_published ON outbox_events(published_at);
|
|
153
|
+
|
|
154
|
+
-- Channel messages (persistent message queue)
|
|
155
|
+
CREATE TABLE IF NOT EXISTS channel_messages (
|
|
156
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
157
|
+
channel TEXT NOT NULL,
|
|
158
|
+
message_id TEXT NOT NULL UNIQUE,
|
|
159
|
+
data_type TEXT NOT NULL,
|
|
160
|
+
data TEXT,
|
|
161
|
+
data_binary BLOB,
|
|
162
|
+
metadata TEXT,
|
|
163
|
+
published_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
164
|
+
CONSTRAINT valid_data_type CHECK (data_type IN ('json', 'binary')),
|
|
165
|
+
CONSTRAINT data_type_consistency CHECK (
|
|
166
|
+
(data_type = 'json' AND data IS NOT NULL AND data_binary IS NULL) OR
|
|
167
|
+
(data_type = 'binary' AND data IS NULL AND data_binary IS NOT NULL)
|
|
168
|
+
)
|
|
169
|
+
);
|
|
170
|
+
|
|
171
|
+
CREATE INDEX IF NOT EXISTS idx_channel_messages_channel ON channel_messages(channel, published_at);
|
|
172
|
+
CREATE INDEX IF NOT EXISTS idx_channel_messages_id ON channel_messages(id);
|
|
173
|
+
|
|
174
|
+
-- Channel subscriptions
|
|
175
|
+
CREATE TABLE IF NOT EXISTS channel_subscriptions (
|
|
176
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
177
|
+
instance_id TEXT NOT NULL,
|
|
178
|
+
channel TEXT NOT NULL,
|
|
179
|
+
mode TEXT NOT NULL,
|
|
180
|
+
activity_id TEXT,
|
|
181
|
+
cursor_message_id INTEGER,
|
|
182
|
+
timeout_at TEXT,
|
|
183
|
+
subscribed_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
184
|
+
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
|
|
185
|
+
CONSTRAINT valid_mode CHECK (mode IN ('broadcast', 'competing')),
|
|
186
|
+
CONSTRAINT unique_instance_channel UNIQUE (instance_id, channel)
|
|
187
|
+
);
|
|
188
|
+
|
|
189
|
+
CREATE INDEX IF NOT EXISTS idx_channel_subscriptions_channel ON channel_subscriptions(channel);
|
|
190
|
+
CREATE INDEX IF NOT EXISTS idx_channel_subscriptions_instance ON channel_subscriptions(instance_id);
|
|
191
|
+
CREATE INDEX IF NOT EXISTS idx_channel_subscriptions_waiting ON channel_subscriptions(channel, activity_id);
|
|
192
|
+
CREATE INDEX IF NOT EXISTS idx_channel_subscriptions_timeout ON channel_subscriptions(timeout_at);
|
|
193
|
+
|
|
194
|
+
-- Channel delivery cursors (broadcast mode: track who read what)
|
|
195
|
+
CREATE TABLE IF NOT EXISTS channel_delivery_cursors (
|
|
196
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
197
|
+
channel TEXT NOT NULL,
|
|
198
|
+
instance_id TEXT NOT NULL,
|
|
199
|
+
last_delivered_id INTEGER NOT NULL,
|
|
200
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
201
|
+
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE,
|
|
202
|
+
CONSTRAINT unique_channel_instance UNIQUE (channel, instance_id)
|
|
203
|
+
);
|
|
204
|
+
|
|
205
|
+
CREATE INDEX IF NOT EXISTS idx_channel_delivery_cursors_channel ON channel_delivery_cursors(channel);
|
|
206
|
+
|
|
207
|
+
-- Channel message claims (competing mode: who is processing what)
|
|
208
|
+
CREATE TABLE IF NOT EXISTS channel_message_claims (
|
|
209
|
+
message_id TEXT PRIMARY KEY,
|
|
210
|
+
instance_id TEXT NOT NULL,
|
|
211
|
+
claimed_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
212
|
+
FOREIGN KEY (message_id) REFERENCES channel_messages(message_id) ON DELETE CASCADE,
|
|
213
|
+
FOREIGN KEY (instance_id) REFERENCES workflow_instances(instance_id) ON DELETE CASCADE
|
|
214
|
+
);
|
|
215
|
+
|
|
216
|
+
CREATE INDEX IF NOT EXISTS idx_channel_message_claims_instance ON channel_message_claims(instance_id);
|
|
217
|
+
|
|
218
|
+
-- System locks (for coordinating background tasks across pods)
|
|
219
|
+
CREATE TABLE IF NOT EXISTS system_locks (
|
|
220
|
+
lock_name TEXT PRIMARY KEY,
|
|
221
|
+
locked_by TEXT,
|
|
222
|
+
locked_at TEXT,
|
|
223
|
+
lock_expires_at TEXT
|
|
224
|
+
);
|
|
225
|
+
|
|
226
|
+
CREATE INDEX IF NOT EXISTS idx_system_locks_expires ON system_locks(lock_expires_at);
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
-- migrate:down
|
|
230
|
+
|
|
231
|
+
DROP INDEX IF EXISTS idx_channel_message_claims_instance;
|
|
232
|
+
DROP TABLE IF EXISTS channel_message_claims;
|
|
233
|
+
|
|
234
|
+
DROP INDEX IF EXISTS idx_channel_delivery_cursors_channel;
|
|
235
|
+
DROP TABLE IF EXISTS channel_delivery_cursors;
|
|
236
|
+
|
|
237
|
+
DROP INDEX IF EXISTS idx_channel_subscriptions_waiting;
|
|
238
|
+
DROP INDEX IF EXISTS idx_channel_subscriptions_instance;
|
|
239
|
+
DROP INDEX IF EXISTS idx_channel_subscriptions_channel;
|
|
240
|
+
DROP TABLE IF EXISTS channel_subscriptions;
|
|
241
|
+
|
|
242
|
+
DROP INDEX IF EXISTS idx_channel_messages_id;
|
|
243
|
+
DROP INDEX IF EXISTS idx_channel_messages_channel;
|
|
244
|
+
DROP TABLE IF EXISTS channel_messages;
|
|
245
|
+
|
|
246
|
+
DROP INDEX IF EXISTS idx_outbox_published;
|
|
247
|
+
DROP INDEX IF EXISTS idx_outbox_retry;
|
|
248
|
+
DROP INDEX IF EXISTS idx_outbox_status;
|
|
249
|
+
DROP TABLE IF EXISTS outbox_events;
|
|
250
|
+
|
|
251
|
+
DROP INDEX IF EXISTS idx_group_memberships_instance;
|
|
252
|
+
DROP INDEX IF EXISTS idx_group_memberships_group;
|
|
253
|
+
DROP TABLE IF EXISTS workflow_group_memberships;
|
|
254
|
+
|
|
255
|
+
DROP INDEX IF EXISTS idx_timer_subscriptions_instance;
|
|
256
|
+
DROP INDEX IF EXISTS idx_timer_subscriptions_expires;
|
|
257
|
+
DROP TABLE IF EXISTS workflow_timer_subscriptions;
|
|
258
|
+
|
|
259
|
+
DROP INDEX IF EXISTS idx_compensations_instance;
|
|
260
|
+
DROP TABLE IF EXISTS workflow_compensations;
|
|
261
|
+
|
|
262
|
+
DROP INDEX IF EXISTS idx_history_archive_archived;
|
|
263
|
+
DROP INDEX IF EXISTS idx_history_archive_instance;
|
|
264
|
+
DROP TABLE IF EXISTS workflow_history_archive;
|
|
265
|
+
|
|
266
|
+
DROP INDEX IF EXISTS idx_history_created;
|
|
267
|
+
DROP INDEX IF EXISTS idx_history_instance;
|
|
268
|
+
DROP TABLE IF EXISTS workflow_history;
|
|
269
|
+
|
|
270
|
+
DROP INDEX IF EXISTS idx_instances_continued_from;
|
|
271
|
+
DROP INDEX IF EXISTS idx_instances_hash;
|
|
272
|
+
DROP INDEX IF EXISTS idx_instances_updated;
|
|
273
|
+
DROP INDEX IF EXISTS idx_instances_locked;
|
|
274
|
+
DROP INDEX IF EXISTS idx_instances_framework;
|
|
275
|
+
DROP INDEX IF EXISTS idx_instances_owner;
|
|
276
|
+
DROP INDEX IF EXISTS idx_instances_workflow;
|
|
277
|
+
DROP INDEX IF EXISTS idx_instances_status;
|
|
278
|
+
DROP TABLE IF EXISTS workflow_instances;
|
|
279
|
+
|
|
280
|
+
DROP INDEX IF EXISTS idx_definitions_hash;
|
|
281
|
+
DROP INDEX IF EXISTS idx_definitions_name;
|
|
282
|
+
DROP TABLE IF EXISTS workflow_definitions;
|
|
283
|
+
|
|
284
|
+
DROP TABLE IF EXISTS schema_version;
|
edda/outbox/relayer.py
CHANGED
|
@@ -8,6 +8,7 @@ outbox events and publishes them to a Message Broker as CloudEvents.
|
|
|
8
8
|
import asyncio
|
|
9
9
|
import contextlib
|
|
10
10
|
import logging
|
|
11
|
+
import random
|
|
11
12
|
from datetime import UTC, datetime, timedelta
|
|
12
13
|
from typing import TYPE_CHECKING, Any
|
|
13
14
|
|
|
@@ -121,46 +122,70 @@ class OutboxRelayer:
|
|
|
121
122
|
|
|
122
123
|
async def _poll_loop(self) -> None:
|
|
123
124
|
"""
|
|
124
|
-
Main polling loop.
|
|
125
|
+
Main polling loop with adaptive backoff.
|
|
125
126
|
|
|
126
127
|
Continuously polls the database for pending events and publishes them.
|
|
127
128
|
When wake_event is provided (PostgreSQL NOTIFY integration), wakes up
|
|
128
|
-
immediately on notification, otherwise
|
|
129
|
+
immediately on notification, otherwise uses poll_interval as fallback.
|
|
130
|
+
|
|
131
|
+
Adaptive backoff behavior:
|
|
132
|
+
- When no events are found, exponentially backs off up to 30 seconds
|
|
133
|
+
- When events are processed, resets to base poll_interval
|
|
134
|
+
- When woken by NOTIFY, resets backoff
|
|
129
135
|
"""
|
|
136
|
+
consecutive_empty = 0
|
|
137
|
+
|
|
130
138
|
while self._running:
|
|
131
139
|
try:
|
|
132
|
-
await self._poll_and_publish()
|
|
140
|
+
count = await self._poll_and_publish()
|
|
141
|
+
if count == 0:
|
|
142
|
+
consecutive_empty += 1
|
|
143
|
+
else:
|
|
144
|
+
consecutive_empty = 0
|
|
133
145
|
except Exception as e:
|
|
134
146
|
logger.error(f"Error in outbox relayer poll loop: {e}")
|
|
147
|
+
consecutive_empty = 0 # Reset on error
|
|
148
|
+
|
|
149
|
+
# Adaptive backoff calculation
|
|
150
|
+
if consecutive_empty > 0:
|
|
151
|
+
# Exponential backoff: 2s, 4s, 8s, 16s, max 30s (with poll_interval=1)
|
|
152
|
+
backoff = min(self.poll_interval * (2 ** min(consecutive_empty, 4)), 30.0)
|
|
153
|
+
else:
|
|
154
|
+
backoff = self.poll_interval
|
|
155
|
+
jitter = random.uniform(0, backoff * 0.3)
|
|
135
156
|
|
|
136
157
|
# Wait before next poll (with optional NOTIFY wake)
|
|
137
158
|
if self._wake_event is not None:
|
|
138
159
|
try:
|
|
139
160
|
await asyncio.wait_for(
|
|
140
161
|
self._wake_event.wait(),
|
|
141
|
-
timeout=
|
|
162
|
+
timeout=backoff + jitter,
|
|
142
163
|
)
|
|
143
164
|
# Clear the event for next notification
|
|
144
165
|
self._wake_event.clear()
|
|
166
|
+
consecutive_empty = 0 # Reset on NOTIFY wake
|
|
145
167
|
logger.debug("Outbox relayer woken by NOTIFY")
|
|
146
168
|
except TimeoutError:
|
|
147
169
|
# Fallback polling timeout reached
|
|
148
170
|
pass
|
|
149
171
|
else:
|
|
150
|
-
await asyncio.sleep(
|
|
172
|
+
await asyncio.sleep(backoff + jitter)
|
|
151
173
|
|
|
152
|
-
async def _poll_and_publish(self) ->
|
|
174
|
+
async def _poll_and_publish(self) -> int:
|
|
153
175
|
"""
|
|
154
176
|
Poll for pending events and publish them.
|
|
155
177
|
|
|
156
178
|
Fetches a batch of pending events from the database and attempts
|
|
157
179
|
to publish each one to the Message Broker.
|
|
180
|
+
|
|
181
|
+
Returns:
|
|
182
|
+
Number of events processed
|
|
158
183
|
"""
|
|
159
184
|
# Get pending events
|
|
160
185
|
events = await self.storage.get_pending_outbox_events(limit=self.batch_size)
|
|
161
186
|
|
|
162
187
|
if not events:
|
|
163
|
-
return
|
|
188
|
+
return 0
|
|
164
189
|
|
|
165
190
|
logger.debug(f"Processing {len(events)} pending outbox events")
|
|
166
191
|
|
|
@@ -174,6 +199,8 @@ class OutboxRelayer:
|
|
|
174
199
|
exc_info=True,
|
|
175
200
|
)
|
|
176
201
|
|
|
202
|
+
return len(events)
|
|
203
|
+
|
|
177
204
|
async def _publish_event(self, event: dict[str, Any]) -> None:
|
|
178
205
|
"""
|
|
179
206
|
Publish a single outbox event to the Message Broker.
|