edda-framework 0.14.0__py3-none-any.whl → 0.14.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- edda/app.py +6 -21
- edda/locking.py +12 -37
- {edda_framework-0.14.0.dist-info → edda_framework-0.14.1.dist-info}/METADATA +1 -1
- {edda_framework-0.14.0.dist-info → edda_framework-0.14.1.dist-info}/RECORD +7 -7
- {edda_framework-0.14.0.dist-info → edda_framework-0.14.1.dist-info}/WHEEL +0 -0
- {edda_framework-0.14.0.dist-info → edda_framework-0.14.1.dist-info}/entry_points.txt +0 -0
- {edda_framework-0.14.0.dist-info → edda_framework-0.14.1.dist-info}/licenses/LICENSE +0 -0
edda/app.py
CHANGED
|
@@ -583,7 +583,6 @@ class EddaApp:
|
|
|
583
583
|
auto_resume_stale_workflows_periodically(
|
|
584
584
|
self.storage,
|
|
585
585
|
self.replay_engine,
|
|
586
|
-
self.worker_id,
|
|
587
586
|
interval=60,
|
|
588
587
|
),
|
|
589
588
|
name="leader_stale_workflow_resume",
|
|
@@ -628,7 +627,6 @@ class EddaApp:
|
|
|
628
627
|
auto_resume_stale_workflows_periodically(
|
|
629
628
|
self.storage,
|
|
630
629
|
self.replay_engine,
|
|
631
|
-
self.worker_id,
|
|
632
630
|
interval=60,
|
|
633
631
|
),
|
|
634
632
|
name="leader_stale_workflow_resume",
|
|
@@ -1411,7 +1409,8 @@ class EddaApp:
|
|
|
1411
1409
|
from growing indefinitely with orphaned messages (messages that were
|
|
1412
1410
|
published but never received by any subscriber).
|
|
1413
1411
|
|
|
1414
|
-
|
|
1412
|
+
Important: This task should only be run by a single worker (e.g., via leader
|
|
1413
|
+
election). It does not perform its own distributed coordination.
|
|
1415
1414
|
|
|
1416
1415
|
Args:
|
|
1417
1416
|
interval: Cleanup interval in seconds (default: 3600 = 1 hour)
|
|
@@ -1422,27 +1421,13 @@ class EddaApp:
|
|
|
1422
1421
|
"""
|
|
1423
1422
|
while True:
|
|
1424
1423
|
try:
|
|
1425
|
-
# Add jitter to prevent thundering herd
|
|
1424
|
+
# Add jitter to prevent thundering herd
|
|
1426
1425
|
jitter = random.uniform(0, interval * 0.3)
|
|
1427
1426
|
await asyncio.sleep(interval + jitter)
|
|
1428
1427
|
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
worker_id=self.worker_id,
|
|
1433
|
-
timeout_seconds=interval,
|
|
1434
|
-
)
|
|
1435
|
-
|
|
1436
|
-
if not lock_acquired:
|
|
1437
|
-
# Another pod is handling this task
|
|
1438
|
-
continue
|
|
1439
|
-
|
|
1440
|
-
try:
|
|
1441
|
-
deleted_count = await self.storage.cleanup_old_channel_messages(retention_days)
|
|
1442
|
-
if deleted_count > 0:
|
|
1443
|
-
logger.info("Cleaned up %d old channel messages", deleted_count)
|
|
1444
|
-
finally:
|
|
1445
|
-
await self.storage.release_system_lock("cleanup_old_messages", self.worker_id)
|
|
1428
|
+
deleted_count = await self.storage.cleanup_old_channel_messages(retention_days)
|
|
1429
|
+
if deleted_count > 0:
|
|
1430
|
+
logger.info("Cleaned up %d old channel messages", deleted_count)
|
|
1446
1431
|
except Exception as e:
|
|
1447
1432
|
logger.error("Error cleaning up old messages: %s", e, exc_info=True)
|
|
1448
1433
|
|
edda/locking.py
CHANGED
|
@@ -192,7 +192,6 @@ async def _refresh_lock_periodically(
|
|
|
192
192
|
|
|
193
193
|
async def cleanup_stale_locks_periodically(
|
|
194
194
|
storage: StorageProtocol,
|
|
195
|
-
worker_id: str,
|
|
196
195
|
interval: int = 60,
|
|
197
196
|
) -> None:
|
|
198
197
|
"""
|
|
@@ -204,49 +203,37 @@ async def cleanup_stale_locks_periodically(
|
|
|
204
203
|
Note: This function only cleans up locks without resuming workflows.
|
|
205
204
|
For automatic workflow resumption, use auto_resume_stale_workflows_periodically().
|
|
206
205
|
|
|
207
|
-
|
|
206
|
+
Important: This function should only be run by a single worker (e.g., via leader
|
|
207
|
+
election). It does not perform its own distributed coordination.
|
|
208
208
|
|
|
209
209
|
Example:
|
|
210
210
|
>>> asyncio.create_task(
|
|
211
|
-
... cleanup_stale_locks_periodically(storage,
|
|
211
|
+
... cleanup_stale_locks_periodically(storage, interval=60)
|
|
212
212
|
... )
|
|
213
213
|
|
|
214
214
|
Args:
|
|
215
215
|
storage: Storage backend
|
|
216
|
-
worker_id: Unique identifier for this worker (for global lock coordination)
|
|
217
216
|
interval: Cleanup interval in seconds (default: 60)
|
|
218
217
|
"""
|
|
219
218
|
with suppress(asyncio.CancelledError):
|
|
220
219
|
while True:
|
|
221
|
-
# Add jitter to prevent thundering herd
|
|
220
|
+
# Add jitter to prevent thundering herd
|
|
222
221
|
jitter = random.uniform(0, interval * 0.3)
|
|
223
222
|
await asyncio.sleep(interval + jitter)
|
|
224
223
|
|
|
225
|
-
# Try to acquire global lock for this task
|
|
226
|
-
lock_acquired = await storage.try_acquire_system_lock(
|
|
227
|
-
lock_name="cleanup_stale_locks",
|
|
228
|
-
worker_id=worker_id,
|
|
229
|
-
timeout_seconds=interval,
|
|
230
|
-
)
|
|
231
|
-
|
|
232
|
-
if not lock_acquired:
|
|
233
|
-
# Another pod is handling this task
|
|
234
|
-
continue
|
|
235
|
-
|
|
236
224
|
try:
|
|
237
225
|
# Clean up stale locks
|
|
238
226
|
workflows = await storage.cleanup_stale_locks()
|
|
239
227
|
|
|
240
228
|
if len(workflows) > 0:
|
|
241
229
|
logger.info("Cleaned up %d stale locks", len(workflows))
|
|
242
|
-
|
|
243
|
-
|
|
230
|
+
except Exception as e:
|
|
231
|
+
logger.error("Failed to cleanup stale locks: %s", e, exc_info=True)
|
|
244
232
|
|
|
245
233
|
|
|
246
234
|
async def auto_resume_stale_workflows_periodically(
|
|
247
235
|
storage: StorageProtocol,
|
|
248
236
|
replay_engine: Any,
|
|
249
|
-
worker_id: str,
|
|
250
237
|
interval: int = 60,
|
|
251
238
|
) -> None:
|
|
252
239
|
"""
|
|
@@ -255,39 +242,27 @@ async def auto_resume_stale_workflows_periodically(
|
|
|
255
242
|
This combines lock cleanup with automatic workflow resumption, ensuring
|
|
256
243
|
that workflows interrupted by worker crashes are automatically recovered.
|
|
257
244
|
|
|
258
|
-
|
|
259
|
-
|
|
245
|
+
Important: This function should only be run by a single worker (e.g., via leader
|
|
246
|
+
election). It does not perform its own distributed coordination.
|
|
260
247
|
|
|
261
248
|
Example:
|
|
262
249
|
>>> asyncio.create_task(
|
|
263
250
|
... auto_resume_stale_workflows_periodically(
|
|
264
|
-
... storage, replay_engine,
|
|
251
|
+
... storage, replay_engine, interval=60
|
|
265
252
|
... )
|
|
266
253
|
... )
|
|
267
254
|
|
|
268
255
|
Args:
|
|
269
256
|
storage: Storage backend
|
|
270
257
|
replay_engine: ReplayEngine instance for resuming workflows
|
|
271
|
-
worker_id: Unique identifier for this worker (for global lock coordination)
|
|
272
258
|
interval: Cleanup interval in seconds (default: 60)
|
|
273
259
|
"""
|
|
274
260
|
with suppress(asyncio.CancelledError):
|
|
275
261
|
while True:
|
|
276
|
-
# Add jitter to prevent thundering herd
|
|
262
|
+
# Add jitter to prevent thundering herd
|
|
277
263
|
jitter = random.uniform(0, interval * 0.3)
|
|
278
264
|
await asyncio.sleep(interval + jitter)
|
|
279
265
|
|
|
280
|
-
# Try to acquire global lock for this task
|
|
281
|
-
lock_acquired = await storage.try_acquire_system_lock(
|
|
282
|
-
lock_name="auto_resume_stale_workflows",
|
|
283
|
-
worker_id=worker_id,
|
|
284
|
-
timeout_seconds=interval,
|
|
285
|
-
)
|
|
286
|
-
|
|
287
|
-
if not lock_acquired:
|
|
288
|
-
# Another pod is handling this task
|
|
289
|
-
continue
|
|
290
|
-
|
|
291
266
|
try:
|
|
292
267
|
# Clean up stale locks and get workflows to resume
|
|
293
268
|
workflows_to_resume = await storage.cleanup_stale_locks()
|
|
@@ -369,8 +344,8 @@ async def auto_resume_stale_workflows_periodically(
|
|
|
369
344
|
e,
|
|
370
345
|
exc_info=True,
|
|
371
346
|
)
|
|
372
|
-
|
|
373
|
-
|
|
347
|
+
except Exception as e:
|
|
348
|
+
logger.error("Failed to cleanup stale locks: %s", e, exc_info=True)
|
|
374
349
|
|
|
375
350
|
|
|
376
351
|
class LockNotAcquiredError(Exception):
|
|
@@ -1,12 +1,12 @@
|
|
|
1
1
|
edda/__init__.py,sha256=hGC6WR2R36M8LWC97F-0Rw4Ln0QUUT_1xC-7acOy_Fk,2237
|
|
2
2
|
edda/activity.py,sha256=nRm9eBrr0lFe4ZRQ2whyZ6mo5xd171ITIVhqytUhOpw,21025
|
|
3
|
-
edda/app.py,sha256=
|
|
3
|
+
edda/app.py,sha256=gtBtNsWpib8BHwzC02MlP7LxUHNXqcZVwtFcUhTWGkk,67801
|
|
4
4
|
edda/channels.py,sha256=6JFZkeOs0xDumexr0_bLI_Mb4S245hLJM_Sqp3xPCCA,37676
|
|
5
5
|
edda/compensation.py,sha256=iKLlnTxiF1YSatmYQW84EkPB1yMKUEZBtgjuGnghLtY,11824
|
|
6
6
|
edda/context.py,sha256=Qqm_nUC5NNnOfHAb7taqKqZVIc0GoRWUrjZ4L9_-q70,22128
|
|
7
7
|
edda/exceptions.py,sha256=-ntBLGpVQgPFG5N1o8m_7weejAYkNrUdxTkOP38vsHk,1766
|
|
8
8
|
edda/hooks.py,sha256=HUZ6FTM__DZjwuomDfTDEroQ3mugEPuJHcGm7CTQNvg,8193
|
|
9
|
-
edda/locking.py,sha256=
|
|
9
|
+
edda/locking.py,sha256=ZMdzGO4u3h8m3kDysmkDpAkJNvQQADbILPsmo52EQis,12716
|
|
10
10
|
edda/pydantic_utils.py,sha256=dGVPNrrttDeq1k233PopCtjORYjZitsgASPfPnO6R10,9056
|
|
11
11
|
edda/replay.py,sha256=IQGByw9mlTpRulyUgsHJSPsZUULmM2YqFcm2WeB4jtw,43227
|
|
12
12
|
edda/retry.py,sha256=t4_E1skrhotA1XWHTLbKi-DOgCMasOUnhI9OT-O_eCE,6843
|
|
@@ -47,8 +47,8 @@ edda/visualizer/mermaid_generator.py,sha256=XWa2egoOTNDfJEjPcwoxwQmblUqXf7YInWFj
|
|
|
47
47
|
edda/migrations/mysql/20251217000000_initial_schema.sql,sha256=LpINasESRhadOeqABwDk4JZ0OZ4_zQw_opnhIR4Xe9U,12367
|
|
48
48
|
edda/migrations/postgresql/20251217000000_initial_schema.sql,sha256=hCaGMWeptpzpnsjfNKVsMYuwPRe__fK9E0VZpClAumQ,11732
|
|
49
49
|
edda/migrations/sqlite/20251217000000_initial_schema.sql,sha256=Wq9gCnQ0K9SOt0PY_8f1MG4va8rLVWIIcf2lnRzSK5g,11906
|
|
50
|
-
edda_framework-0.14.
|
|
51
|
-
edda_framework-0.14.
|
|
52
|
-
edda_framework-0.14.
|
|
53
|
-
edda_framework-0.14.
|
|
54
|
-
edda_framework-0.14.
|
|
50
|
+
edda_framework-0.14.1.dist-info/METADATA,sha256=3WamC1lB2LrLdUIbOPrYeoWsqW8leTXF8zRFB8rObpY,37567
|
|
51
|
+
edda_framework-0.14.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
52
|
+
edda_framework-0.14.1.dist-info/entry_points.txt,sha256=dPH47s6UoJgUZxHoeSMqZsQkLaSE-SGLi-gh88k2WrU,48
|
|
53
|
+
edda_framework-0.14.1.dist-info/licenses/LICENSE,sha256=udxb-V7_cYKTHqW7lNm48rxJ-Zpf0WAY_PyGDK9BPCo,1069
|
|
54
|
+
edda_framework-0.14.1.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|