edda-framework 0.7.0__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
edda/channels.py ADDED
@@ -0,0 +1,1017 @@
1
+ """
2
+ Channel-based Message Queue System for Edda.
3
+
4
+ This module provides Erlang/Elixir mailbox-style messaging with support for:
5
+ - Broadcast mode: All subscribers receive all messages (fan-out pattern)
6
+ - Competing mode: Each message is processed by only one subscriber (producer-consumer pattern)
7
+
8
+ Key concepts:
9
+ - Channel: A named message queue with persistent storage
10
+ - Message: A data payload published to a channel
11
+ - Subscription: A workflow's interest in receiving messages from a channel
12
+
13
+ The channel system solves the "mailbox problem" where messages sent before
14
+ `receive()` is called would be lost. Messages are always queued and persist
15
+ until consumed.
16
+
17
+ Example:
18
+ >>> from edda.channels import subscribe, receive, publish, ChannelMessage
19
+ >>>
20
+ >>> @workflow
21
+ ... async def worker(ctx: WorkflowContext, id: str):
22
+ ... # Subscribe to a channel
23
+ ... await subscribe(ctx, "tasks", mode="competing")
24
+ ...
25
+ ... while True:
26
+ ... # Receive messages (blocks until message available)
27
+ ... msg = await receive(ctx, "tasks")
28
+ ... await process(ctx, msg.data, activity_id=f"process:{msg.id}")
29
+ ... await ctx.recur()
30
+ >>>
31
+ >>> @workflow
32
+ ... async def producer(ctx: WorkflowContext, task_data: dict):
33
+ ... await publish(ctx, "tasks", task_data)
34
+ """
35
+
36
+ from __future__ import annotations
37
+
38
+ import uuid
39
+ from dataclasses import dataclass, field
40
+ from datetime import UTC, datetime, timedelta
41
+ from typing import TYPE_CHECKING, Any, overload
42
+
43
+ if TYPE_CHECKING:
44
+ from edda.context import WorkflowContext
45
+ from edda.storage.protocol import StorageProtocol
46
+
47
+
48
+ # =============================================================================
49
+ # Data Classes
50
+ # =============================================================================
51
+
52
+
53
+ @dataclass(frozen=True)
54
+ class ChannelMessage:
55
+ """
56
+ A message received from a channel.
57
+
58
+ Attributes:
59
+ id: Unique message identifier
60
+ channel: Channel name this message was received on
61
+ data: Message payload (dict or bytes)
62
+ metadata: Optional metadata (source, timestamp, etc.)
63
+ published_at: When the message was published
64
+ """
65
+
66
+ id: str
67
+ channel: str
68
+ data: dict[str, Any] | bytes
69
+ metadata: dict[str, Any] = field(default_factory=dict)
70
+ published_at: datetime = field(default_factory=lambda: datetime.now(UTC))
71
+
72
+
73
+ # =============================================================================
74
+ # Exceptions
75
+ # =============================================================================
76
+
77
+
78
+ class WaitForChannelMessageException(Exception):
79
+ """
80
+ Raised to pause workflow execution until a channel message arrives.
81
+
82
+ This exception is caught by the ReplayEngine to:
83
+ 1. Register the workflow as waiting for a channel message
84
+ 2. Release the workflow lock
85
+ 3. Update workflow status to 'waiting_for_message'
86
+
87
+ The workflow will be resumed when a message is delivered to the channel.
88
+ """
89
+
90
+ def __init__(
91
+ self,
92
+ channel: str,
93
+ timeout_seconds: int | None,
94
+ activity_id: str,
95
+ ) -> None:
96
+ self.channel = channel
97
+ self.timeout_seconds = timeout_seconds
98
+ self.activity_id = activity_id
99
+ # Calculate absolute timeout if specified
100
+ self.timeout_at: datetime | None = None
101
+ if timeout_seconds is not None:
102
+ self.timeout_at = datetime.now(UTC) + timedelta(seconds=timeout_seconds)
103
+ super().__init__(f"Waiting for message on channel: {channel}")
104
+
105
+
106
+ class WaitForTimerException(Exception):
107
+ """
108
+ Raised to pause workflow execution until a timer expires.
109
+
110
+ This exception is caught by the ReplayEngine to:
111
+ 1. Register a timer subscription in the database
112
+ 2. Release the workflow lock
113
+ 3. Update workflow status to 'waiting_for_timer'
114
+
115
+ The workflow will be resumed when the timer expires.
116
+ """
117
+
118
+ def __init__(
119
+ self,
120
+ duration_seconds: int,
121
+ expires_at: datetime,
122
+ timer_id: str,
123
+ activity_id: str,
124
+ ) -> None:
125
+ self.duration_seconds = duration_seconds
126
+ self.expires_at = expires_at
127
+ self.timer_id = timer_id
128
+ self.activity_id = activity_id
129
+ super().__init__(f"Waiting for timer: {timer_id}")
130
+
131
+
132
+ # =============================================================================
133
+ # Subscription Functions
134
+ # =============================================================================
135
+
136
+
137
+ async def subscribe(
138
+ ctx: WorkflowContext,
139
+ channel: str,
140
+ mode: str = "broadcast",
141
+ ) -> None:
142
+ """
143
+ Subscribe to a channel for receiving messages.
144
+
145
+ Args:
146
+ ctx: Workflow context
147
+ channel: Channel name to subscribe to
148
+ mode: Subscription mode - "broadcast" (all subscribers receive all messages)
149
+ or "competing" (each message goes to only one subscriber)
150
+
151
+ Example:
152
+ >>> @workflow
153
+ ... async def event_handler(ctx: WorkflowContext, id: str):
154
+ ... # Subscribe to order events (all handlers receive all events)
155
+ ... await subscribe(ctx, "order.events", mode="broadcast")
156
+ ...
157
+ ... while True:
158
+ ... event = await receive(ctx, "order.events")
159
+ ... await handle_event(ctx, event.data, activity_id=f"handle:{event.id}")
160
+ ... await ctx.recur()
161
+
162
+ >>> @workflow
163
+ ... async def job_worker(ctx: WorkflowContext, worker_id: str):
164
+ ... # Subscribe to job queue (each job processed by one worker)
165
+ ... await subscribe(ctx, "jobs", mode="competing")
166
+ ...
167
+ ... while True:
168
+ ... job = await receive(ctx, "jobs")
169
+ ... await execute_job(ctx, job.data, activity_id=f"job:{job.id}")
170
+ ... await ctx.recur()
171
+ """
172
+ if mode not in ("broadcast", "competing"):
173
+ raise ValueError(f"Invalid subscription mode: {mode}. Must be 'broadcast' or 'competing'")
174
+
175
+ await ctx.storage.subscribe_to_channel(ctx.instance_id, channel, mode)
176
+
177
+
178
+ async def unsubscribe(
179
+ ctx: WorkflowContext,
180
+ channel: str,
181
+ ) -> None:
182
+ """
183
+ Unsubscribe from a channel.
184
+
185
+ Note: Workflows are automatically unsubscribed from all channels when they
186
+ complete, fail, or are cancelled. Explicit unsubscribe is usually not necessary.
187
+
188
+ Args:
189
+ ctx: Workflow context
190
+ channel: Channel name to unsubscribe from
191
+ """
192
+ await ctx.storage.unsubscribe_from_channel(ctx.instance_id, channel)
193
+
194
+
195
+ # =============================================================================
196
+ # Message Receiving
197
+ # =============================================================================
198
+
199
+
200
+ async def receive(
201
+ ctx: WorkflowContext,
202
+ channel: str,
203
+ timeout_seconds: int | None = None,
204
+ message_id: str | None = None,
205
+ ) -> ChannelMessage:
206
+ """
207
+ Receive a message from a channel.
208
+
209
+ This function blocks (pauses the workflow) until a message is available
210
+ on the channel. Messages are queued persistently, so messages published
211
+ before this function is called will still be received.
212
+
213
+ Args:
214
+ ctx: Workflow context
215
+ channel: Channel name to receive from
216
+ timeout_seconds: Optional timeout in seconds
217
+ message_id: Optional ID for concurrent waiting (auto-generated if not provided)
218
+
219
+ Returns:
220
+ ChannelMessage object containing data and metadata
221
+
222
+ Raises:
223
+ WaitForChannelMessageException: Raised to pause workflow (caught by ReplayEngine)
224
+ TimeoutError: If timeout expires before message arrives
225
+
226
+ Example:
227
+ >>> @workflow
228
+ ... async def consumer(ctx: WorkflowContext, id: str):
229
+ ... await subscribe(ctx, "tasks", mode="competing")
230
+ ...
231
+ ... while True:
232
+ ... msg = await receive(ctx, "tasks")
233
+ ... await process(ctx, msg.data, activity_id=f"process:{msg.id}")
234
+ ... await ctx.recur()
235
+ """
236
+ # Generate activity ID
237
+ if message_id is None:
238
+ activity_id = ctx._generate_activity_id(f"receive_{channel}")
239
+ else:
240
+ activity_id = message_id
241
+
242
+ ctx._record_activity_id(activity_id)
243
+
244
+ # During replay, return cached message
245
+ if ctx.is_replaying:
246
+ found, cached_result = ctx._get_cached_result(activity_id)
247
+ if found:
248
+ # Check for cached error
249
+ if isinstance(cached_result, dict) and cached_result.get("_error"):
250
+ error_type = cached_result.get("error_type", "Exception")
251
+ error_message = cached_result.get("error_message", "Unknown error")
252
+ if error_type == "TimeoutError":
253
+ raise TimeoutError(error_message)
254
+ raise Exception(f"{error_type}: {error_message}")
255
+ # Return cached ChannelMessage
256
+ if isinstance(cached_result, ChannelMessage):
257
+ return cached_result
258
+ # Convert dict to ChannelMessage (from history)
259
+ if isinstance(cached_result, dict):
260
+ raw_data = cached_result.get("data", cached_result.get("payload", {}))
261
+ data: dict[str, Any] | bytes = (
262
+ raw_data if isinstance(raw_data, (dict, bytes)) else {}
263
+ )
264
+ published_at_str = cached_result.get("published_at")
265
+ published_at = (
266
+ datetime.fromisoformat(published_at_str)
267
+ if published_at_str
268
+ else datetime.now(UTC)
269
+ )
270
+ return ChannelMessage(
271
+ id=cached_result.get("id", "") or "",
272
+ channel=cached_result.get("channel", channel) or channel,
273
+ data=data,
274
+ metadata=cached_result.get("metadata") or {},
275
+ published_at=published_at,
276
+ )
277
+ raise RuntimeError(f"Unexpected cached result type: {type(cached_result)}")
278
+
279
+ # Check for pending messages in the queue
280
+ pending = await ctx.storage.get_pending_channel_messages(ctx.instance_id, channel)
281
+ if pending:
282
+ # Get the first pending message
283
+ msg_dict = pending[0]
284
+ msg_id = msg_dict["message_id"]
285
+
286
+ # For competing mode, try to claim the message
287
+ subscription = await _get_subscription(ctx, channel)
288
+ if subscription and subscription.get("mode") == "competing":
289
+ claimed = await ctx.storage.claim_channel_message(msg_id, ctx.instance_id)
290
+ if not claimed:
291
+ # Another worker claimed it, check next message
292
+ # For simplicity, raise exception to retry
293
+ raise WaitForChannelMessageException(
294
+ channel=channel,
295
+ timeout_seconds=timeout_seconds,
296
+ activity_id=activity_id,
297
+ )
298
+ # Delete the message after claiming (competing mode)
299
+ await ctx.storage.delete_channel_message(msg_id)
300
+ else:
301
+ # Broadcast mode - update cursor
302
+ await ctx.storage.update_delivery_cursor(channel, ctx.instance_id, msg_dict["id"])
303
+
304
+ # Build the message
305
+ raw_data = msg_dict.get("data")
306
+ data = raw_data if isinstance(raw_data, (dict, bytes)) else {}
307
+ published_at_str = msg_dict.get("published_at")
308
+ published_at = (
309
+ datetime.fromisoformat(published_at_str)
310
+ if isinstance(published_at_str, str)
311
+ else (published_at_str if isinstance(published_at_str, datetime) else datetime.now(UTC))
312
+ )
313
+
314
+ message = ChannelMessage(
315
+ id=msg_id,
316
+ channel=channel,
317
+ data=data,
318
+ metadata=msg_dict.get("metadata") or {},
319
+ published_at=published_at,
320
+ )
321
+
322
+ # Record in history for replay
323
+ await ctx.storage.append_history(
324
+ ctx.instance_id,
325
+ activity_id,
326
+ "ChannelMessageReceived",
327
+ {
328
+ "id": message.id,
329
+ "channel": message.channel,
330
+ "data": message.data,
331
+ "metadata": message.metadata,
332
+ "published_at": message.published_at.isoformat(),
333
+ },
334
+ )
335
+
336
+ return message
337
+
338
+ # No pending messages, raise exception to pause workflow
339
+ raise WaitForChannelMessageException(
340
+ channel=channel,
341
+ timeout_seconds=timeout_seconds,
342
+ activity_id=activity_id,
343
+ )
344
+
345
+
346
+ async def _get_subscription(ctx: WorkflowContext, channel: str) -> dict[str, Any] | None:
347
+ """Get the subscription info for a channel."""
348
+ return await ctx.storage.get_channel_subscription(ctx.instance_id, channel)
349
+
350
+
351
+ # =============================================================================
352
+ # Message Publishing
353
+ # =============================================================================
354
+
355
+
356
+ @overload
357
+ async def publish(
358
+ ctx_or_storage: WorkflowContext,
359
+ channel: str,
360
+ data: dict[str, Any] | bytes,
361
+ metadata: dict[str, Any] | None = None,
362
+ *,
363
+ target_instance_id: str | None = None,
364
+ worker_id: str | None = None,
365
+ ) -> str: ...
366
+
367
+
368
+ @overload
369
+ async def publish(
370
+ ctx_or_storage: StorageProtocol,
371
+ channel: str,
372
+ data: dict[str, Any] | bytes,
373
+ metadata: dict[str, Any] | None = None,
374
+ *,
375
+ target_instance_id: str | None = None,
376
+ worker_id: str | None = None,
377
+ ) -> str: ...
378
+
379
+
380
+ async def publish(
381
+ ctx_or_storage: WorkflowContext | StorageProtocol,
382
+ channel: str,
383
+ data: dict[str, Any] | bytes,
384
+ metadata: dict[str, Any] | None = None,
385
+ *,
386
+ target_instance_id: str | None = None,
387
+ worker_id: str | None = None,
388
+ ) -> str:
389
+ """
390
+ Publish a message to a channel.
391
+
392
+ Can be called from within a workflow (with WorkflowContext) or from
393
+ external code (with StorageProtocol directly).
394
+
395
+ Args:
396
+ ctx_or_storage: Workflow context or storage backend
397
+ channel: Channel name to publish to
398
+ data: Message payload (dict or bytes)
399
+ metadata: Optional metadata
400
+ target_instance_id: If provided, only deliver to this specific instance
401
+ (Point-to-Point delivery). If None, deliver to all
402
+ waiting subscribers (Pub/Sub delivery).
403
+ worker_id: Optional worker ID for Lock-First pattern (required for
404
+ CloudEvents HTTP handler)
405
+
406
+ Returns:
407
+ Message ID of the published message
408
+
409
+ Example:
410
+ >>> # From within a workflow
411
+ >>> @workflow
412
+ ... async def order_processor(ctx: WorkflowContext, order_id: str):
413
+ ... result = await process_order(ctx, order_id, activity_id="process:1")
414
+ ... await publish(ctx, "order.completed", {"order_id": order_id})
415
+ ... return result
416
+
417
+ >>> # From external code (e.g., HTTP handler)
418
+ >>> async def api_handler(request):
419
+ ... message_id = await publish(app.storage, "jobs", {"task": "process"})
420
+ ... return {"message_id": message_id}
421
+
422
+ >>> # Point-to-Point delivery (CloudEvents with eddainstanceid)
423
+ >>> await publish(
424
+ ... storage, "payment.completed", {"amount": 100},
425
+ ... target_instance_id="order-123", worker_id="worker-1"
426
+ ... )
427
+ """
428
+ # Determine if we have a context or direct storage
429
+ from edda.context import WorkflowContext as WfCtx
430
+
431
+ if isinstance(ctx_or_storage, WfCtx):
432
+ storage = ctx_or_storage.storage
433
+ # Add source metadata
434
+ full_metadata = metadata.copy() if metadata else {}
435
+ full_metadata.setdefault("source_instance_id", ctx_or_storage.instance_id)
436
+ full_metadata.setdefault("published_at", datetime.now(UTC).isoformat())
437
+ effective_worker_id = worker_id or ctx_or_storage.worker_id
438
+ else:
439
+ storage = ctx_or_storage
440
+ full_metadata = metadata.copy() if metadata else {}
441
+ full_metadata.setdefault("published_at", datetime.now(UTC).isoformat())
442
+ effective_worker_id = worker_id or f"publisher-{uuid.uuid4()}"
443
+
444
+ # Publish to channel
445
+ message_id = await storage.publish_to_channel(channel, data, full_metadata)
446
+
447
+ # Wake up waiting subscribers
448
+ # If in a transaction, defer delivery until after commit to ensure atomicity
449
+ if storage.in_transaction():
450
+ # Capture current values for the closure
451
+ _storage = storage
452
+ _channel = channel
453
+ _message_id = message_id
454
+ _data = data
455
+ _metadata = full_metadata
456
+ _target_instance_id = target_instance_id
457
+ _worker_id = effective_worker_id
458
+
459
+ async def deferred_wake() -> None:
460
+ await _wake_waiting_subscribers(
461
+ _storage,
462
+ _channel,
463
+ _message_id,
464
+ _data,
465
+ _metadata,
466
+ target_instance_id=_target_instance_id,
467
+ worker_id=_worker_id,
468
+ )
469
+
470
+ storage.register_post_commit_callback(deferred_wake)
471
+ else:
472
+ # Not in transaction - deliver immediately
473
+ await _wake_waiting_subscribers(
474
+ storage,
475
+ channel,
476
+ message_id,
477
+ data,
478
+ full_metadata,
479
+ target_instance_id=target_instance_id,
480
+ worker_id=effective_worker_id,
481
+ )
482
+
483
+ return message_id
484
+
485
+
486
+ async def _wake_waiting_subscribers(
487
+ storage: StorageProtocol,
488
+ channel: str,
489
+ message_id: str,
490
+ data: dict[str, Any] | bytes,
491
+ metadata: dict[str, Any],
492
+ *,
493
+ target_instance_id: str | None = None,
494
+ worker_id: str,
495
+ ) -> None:
496
+ """
497
+ Wake up subscribers waiting on a channel.
498
+
499
+ Args:
500
+ storage: Storage backend
501
+ channel: Channel name
502
+ message_id: Message ID
503
+ data: Message payload
504
+ metadata: Message metadata
505
+ target_instance_id: If provided, only wake this specific instance
506
+ (Point-to-Point delivery)
507
+ worker_id: Worker ID for Lock-First pattern
508
+ """
509
+ if target_instance_id:
510
+ # Point-to-Point delivery: deliver only to specific instance
511
+ await storage.deliver_channel_message(
512
+ instance_id=target_instance_id,
513
+ channel=channel,
514
+ message_id=message_id,
515
+ data=data,
516
+ metadata=metadata,
517
+ worker_id=worker_id,
518
+ )
519
+ return
520
+
521
+ # Pub/Sub delivery: deliver to all waiting subscribers
522
+ waiting = await storage.get_channel_subscribers_waiting(channel)
523
+
524
+ for sub in waiting:
525
+ instance_id = sub["instance_id"]
526
+ mode = sub["mode"]
527
+
528
+ if mode == "competing":
529
+ # For competing mode, only wake one subscriber
530
+ # Use Lock-First pattern
531
+ result = await storage.deliver_channel_message(
532
+ instance_id=instance_id,
533
+ channel=channel,
534
+ message_id=message_id,
535
+ data=data,
536
+ metadata=metadata,
537
+ worker_id=worker_id,
538
+ )
539
+ if result:
540
+ # Successfully woke one subscriber, stop
541
+ break
542
+ else:
543
+ # For broadcast mode, wake all subscribers
544
+ await storage.deliver_channel_message(
545
+ instance_id=instance_id,
546
+ channel=channel,
547
+ message_id=message_id,
548
+ data=data,
549
+ metadata=metadata,
550
+ worker_id=worker_id,
551
+ )
552
+
553
+
554
+ # =============================================================================
555
+ # Direct Messaging (Instance-to-Instance)
556
+ # =============================================================================
557
+
558
+
559
+ async def send_to(
560
+ ctx: WorkflowContext,
561
+ instance_id: str,
562
+ data: dict[str, Any] | bytes,
563
+ channel: str = "__direct__",
564
+ metadata: dict[str, Any] | None = None,
565
+ ) -> bool:
566
+ """
567
+ Send a message directly to a specific workflow instance.
568
+
569
+ This is useful for workflow-to-workflow communication where the target
570
+ instance ID is known.
571
+
572
+ Args:
573
+ ctx: Workflow context (source workflow)
574
+ instance_id: Target workflow instance ID
575
+ channel: Channel name (defaults to "__direct__" for direct messages)
576
+ data: Message payload
577
+ metadata: Optional metadata
578
+
579
+ Returns:
580
+ True if delivered, False if no workflow waiting
581
+
582
+ Example:
583
+ >>> @workflow
584
+ ... async def approver(ctx: WorkflowContext, request_id: str):
585
+ ... decision = await review(ctx, request_id, activity_id="review:1")
586
+ ... await send_to(ctx, instance_id=request_id, data={"approved": decision})
587
+ """
588
+ full_metadata = metadata.copy() if metadata else {}
589
+ full_metadata.setdefault("source_instance_id", ctx.instance_id)
590
+ full_metadata.setdefault("sent_at", datetime.now(UTC).isoformat())
591
+
592
+ # Publish to a direct channel for the target instance
593
+ direct_channel = f"{channel}:{instance_id}"
594
+ message_id = await ctx.storage.publish_to_channel(direct_channel, data, full_metadata)
595
+
596
+ # Try to deliver
597
+ result = await ctx.storage.deliver_channel_message(
598
+ instance_id=instance_id,
599
+ channel=direct_channel,
600
+ message_id=message_id,
601
+ data=data,
602
+ metadata=full_metadata,
603
+ worker_id=ctx.worker_id,
604
+ )
605
+
606
+ return result is not None
607
+
608
+
609
+ # =============================================================================
610
+ # Timer Functions
611
+ # =============================================================================
612
+
613
+
614
+ async def sleep(
615
+ ctx: WorkflowContext,
616
+ seconds: int,
617
+ timer_id: str | None = None,
618
+ ) -> None:
619
+ """
620
+ Pause workflow execution for a specified duration.
621
+
622
+ This is a durable sleep - the workflow will be resumed after the specified
623
+ time even if the worker restarts.
624
+
625
+ Args:
626
+ ctx: Workflow context
627
+ seconds: Duration to sleep in seconds
628
+ timer_id: Optional unique ID for this timer (auto-generated if not provided)
629
+
630
+ Example:
631
+ >>> @workflow
632
+ ... async def order_workflow(ctx: WorkflowContext, order_id: str):
633
+ ... await create_order(ctx, order_id, activity_id="create:1")
634
+ ... await sleep(ctx, 60) # Wait 60 seconds for payment
635
+ ... await check_payment(ctx, order_id, activity_id="check:1")
636
+ """
637
+ # Generate activity ID
638
+ if timer_id is None:
639
+ activity_id = ctx._generate_activity_id("sleep")
640
+ timer_id = activity_id
641
+ else:
642
+ activity_id = timer_id
643
+
644
+ ctx._record_activity_id(activity_id)
645
+
646
+ # During replay, return immediately
647
+ if ctx.is_replaying:
648
+ found, cached_result = ctx._get_cached_result(activity_id)
649
+ if found:
650
+ return
651
+
652
+ # Calculate expiry time (deterministic - calculated once)
653
+ expires_at = datetime.now(UTC) + timedelta(seconds=seconds)
654
+
655
+ # Raise exception to pause workflow
656
+ raise WaitForTimerException(
657
+ duration_seconds=seconds,
658
+ expires_at=expires_at,
659
+ timer_id=timer_id,
660
+ activity_id=activity_id,
661
+ )
662
+
663
+
664
+ async def sleep_until(
665
+ ctx: WorkflowContext,
666
+ target_time: datetime,
667
+ timer_id: str | None = None,
668
+ ) -> None:
669
+ """
670
+ Pause workflow execution until a specific time.
671
+
672
+ This is a durable sleep - the workflow will be resumed at the specified
673
+ time even if the worker restarts.
674
+
675
+ Args:
676
+ ctx: Workflow context
677
+ target_time: Absolute time to wake up (must be timezone-aware)
678
+ timer_id: Optional unique ID for this timer (auto-generated if not provided)
679
+
680
+ Example:
681
+ >>> from datetime import datetime, timedelta, UTC
682
+ >>>
683
+ >>> @workflow
684
+ ... async def scheduled_report(ctx: WorkflowContext, report_id: str):
685
+ ... # Schedule for tomorrow at 9 AM
686
+ ... tomorrow_9am = datetime.now(UTC).replace(hour=9, minute=0, second=0)
687
+ ... tomorrow_9am += timedelta(days=1)
688
+ ... await sleep_until(ctx, tomorrow_9am)
689
+ ... await generate_report(ctx, report_id, activity_id="generate:1")
690
+ """
691
+ if target_time.tzinfo is None:
692
+ raise ValueError("target_time must be timezone-aware")
693
+
694
+ # Generate activity ID
695
+ if timer_id is None:
696
+ activity_id = ctx._generate_activity_id("sleep_until")
697
+ timer_id = activity_id
698
+ else:
699
+ activity_id = timer_id
700
+
701
+ ctx._record_activity_id(activity_id)
702
+
703
+ # During replay, return immediately
704
+ if ctx.is_replaying:
705
+ found, cached_result = ctx._get_cached_result(activity_id)
706
+ if found:
707
+ return
708
+
709
+ # Calculate seconds until target
710
+ now = datetime.now(UTC)
711
+ delta = target_time - now
712
+ seconds = max(0, int(delta.total_seconds()))
713
+
714
+ # Raise exception to pause workflow
715
+ raise WaitForTimerException(
716
+ duration_seconds=seconds,
717
+ expires_at=target_time,
718
+ timer_id=timer_id,
719
+ activity_id=activity_id,
720
+ )
721
+
722
+
723
+ # =============================================================================
724
+ # CloudEvents Integration
725
+ # =============================================================================
726
+
727
+
728
+ @dataclass(frozen=True)
729
+ class ReceivedEvent:
730
+ """
731
+ Represents a CloudEvent received by a workflow.
732
+
733
+ This class provides structured access to both the event payload (data)
734
+ and CloudEvents metadata (type, source, id, time, etc.).
735
+
736
+ Attributes:
737
+ data: The event payload (JSON dict or Pydantic model)
738
+ type: CloudEvent type (e.g., "payment.completed")
739
+ source: CloudEvent source (e.g., "payment-service")
740
+ id: Unique event identifier
741
+ time: Event timestamp (ISO 8601 format)
742
+ datacontenttype: Content type of the data (typically "application/json")
743
+ subject: Subject of the event (optional CloudEvents extension)
744
+ extensions: Additional CloudEvents extension attributes
745
+
746
+ Example:
747
+ >>> # Without Pydantic model
748
+ >>> event = await wait_event(ctx, "payment.completed")
749
+ >>> amount = event.data["amount"]
750
+ >>> order_id = event.data["order_id"]
751
+ >>>
752
+ >>> # With Pydantic model (type-safe)
753
+ >>> event = await wait_event(ctx, "payment.completed", model=PaymentCompleted)
754
+ >>> amount = event.data.amount # Type-safe access
755
+ >>> order_id = event.data.order_id # IDE completion
756
+ >>>
757
+ >>> # Access CloudEvents metadata
758
+ >>> event_source = event.source
759
+ >>> event_time = event.time
760
+ >>> event_id = event.id
761
+ """
762
+
763
+ # Event payload (JSON dict or Pydantic model)
764
+ data: dict[str, Any] | Any # Any to support Pydantic models
765
+
766
+ # CloudEvents standard attributes
767
+ type: str
768
+ source: str
769
+ id: str
770
+ time: str | None = None
771
+ datacontenttype: str | None = None
772
+ subject: str | None = None
773
+
774
+ # CloudEvents extension attributes
775
+ extensions: dict[str, Any] = field(default_factory=dict)
776
+
777
+
778
+ class EventTimeoutError(Exception):
779
+ """
780
+ Exception raised when wait_event() times out.
781
+
782
+ This exception is raised when an event does not arrive within the
783
+ specified timeout period. The workflow can catch this exception to
784
+ handle timeout scenarios gracefully.
785
+
786
+ Example:
787
+ try:
788
+ event = await wait_event(ctx, "payment.completed", timeout_seconds=60)
789
+ except EventTimeoutError:
790
+ # Handle timeout - maybe send reminder or cancel order
791
+ await send_notification("Payment timeout")
792
+ """
793
+
794
+ def __init__(self, event_type: str, timeout_seconds: int):
795
+ self.event_type = event_type
796
+ self.timeout_seconds = timeout_seconds
797
+ super().__init__(f"Event '{event_type}' did not arrive within {timeout_seconds} seconds")
798
+
799
+
800
+ def _convert_channel_message_to_received_event(
801
+ msg: ChannelMessage,
802
+ event_type: str,
803
+ model: type[Any] | None = None,
804
+ ) -> ReceivedEvent:
805
+ """
806
+ Convert a ChannelMessage to a ReceivedEvent.
807
+
808
+ CloudEvents metadata is extracted from the message's metadata field
809
+ where it was stored with 'ce_' prefix.
810
+
811
+ Args:
812
+ msg: ChannelMessage received from receive()
813
+ event_type: The event type that was waited for
814
+ model: Optional Pydantic model to convert data to
815
+
816
+ Returns:
817
+ ReceivedEvent with CloudEvents metadata
818
+ """
819
+ from edda.pydantic_utils import from_json_dict
820
+
821
+ data: dict[str, Any] | Any
822
+ if model is not None and isinstance(msg.data, dict):
823
+ data = from_json_dict(msg.data, model)
824
+ elif isinstance(msg.data, dict):
825
+ data = msg.data
826
+ else:
827
+ # bytes data - wrap in dict for ReceivedEvent compatibility
828
+ data = {"_binary": msg.data}
829
+
830
+ return ReceivedEvent(
831
+ data=data,
832
+ type=event_type,
833
+ source=msg.metadata.get("ce_source", "unknown"),
834
+ id=msg.metadata.get("ce_id", msg.id),
835
+ time=msg.metadata.get("ce_time"),
836
+ datacontenttype=msg.metadata.get("ce_datacontenttype"),
837
+ subject=msg.metadata.get("ce_subject"),
838
+ extensions=msg.metadata.get("ce_extensions", {}),
839
+ )
840
+
841
+
842
+ async def wait_event(
843
+ ctx: WorkflowContext,
844
+ event_type: str,
845
+ timeout_seconds: int | None = None,
846
+ model: type[Any] | None = None,
847
+ event_id: str | None = None,
848
+ ) -> ReceivedEvent:
849
+ """
850
+ Wait for a CloudEvent to arrive.
851
+
852
+ This function pauses the workflow execution until a matching CloudEvent is received.
853
+ During replay, it returns the cached event data and metadata.
854
+
855
+ Internally, this uses the Channel-based Message Queue with event_type as the channel name.
856
+ CloudEvents metadata is preserved in the message metadata.
857
+
858
+ Args:
859
+ ctx: Workflow context
860
+ event_type: CloudEvent type to wait for (e.g., "payment.completed")
861
+ timeout_seconds: Optional timeout in seconds
862
+ model: Optional Pydantic model class to convert event data to
863
+ event_id: Optional event identifier (auto-generated if not provided)
864
+
865
+ Returns:
866
+ ReceivedEvent object containing event data and CloudEvents metadata.
867
+ If model is provided, ReceivedEvent.data will be a Pydantic model instance.
868
+
869
+ Note:
870
+ Events are delivered to workflows that are subscribed to the event_type channel.
871
+ Use subscribe(ctx, event_type) before calling wait_event() or let it auto-subscribe.
872
+
873
+ Raises:
874
+ WaitForChannelMessageException: During normal execution to pause the workflow
875
+ EventTimeoutError: If timeout is reached
876
+
877
+ Example:
878
+ >>> # Without Pydantic (dict access)
879
+ >>> @workflow
880
+ ... async def order_workflow(ctx: WorkflowContext, order_id: str):
881
+ ... await subscribe(ctx, "payment.completed", mode="broadcast")
882
+ ... payment_event = await wait_event(ctx, "payment.completed")
883
+ ... amount = payment_event.data["amount"]
884
+ ... order_id = payment_event.data["order_id"]
885
+ ...
886
+ >>> # With Pydantic (type-safe access)
887
+ >>> @workflow
888
+ ... async def order_workflow_typed(ctx: WorkflowContext, order_id: str):
889
+ ... await subscribe(ctx, "payment.completed", mode="broadcast")
890
+ ... payment_event = await wait_event(
891
+ ... ctx,
892
+ ... event_type="payment.completed",
893
+ ... model=PaymentCompleted
894
+ ... )
895
+ ... # Type-safe access with IDE completion
896
+ ... amount = payment_event.data.amount
897
+ """
898
+ # Auto-subscribe to the event_type channel if not already subscribed
899
+ subscription = await _get_subscription(ctx, event_type)
900
+ if subscription is None:
901
+ await subscribe(ctx, event_type, mode="broadcast")
902
+
903
+ # Use receive() with event_type as channel
904
+ msg = await receive(
905
+ ctx,
906
+ channel=event_type,
907
+ timeout_seconds=timeout_seconds,
908
+ message_id=event_id,
909
+ )
910
+
911
+ # Convert ChannelMessage to ReceivedEvent with CloudEvents metadata
912
+ return _convert_channel_message_to_received_event(msg, event_type, model)
913
+
914
+
915
+ # Backward compatibility aliases
916
+ wait_timer = sleep
917
+ wait_until = sleep_until
918
+
919
+
920
+ async def send_event(
921
+ event_type: str,
922
+ source: str,
923
+ data: dict[str, Any] | Any,
924
+ broker_url: str = "http://broker-ingress.knative-eventing.svc.cluster.local",
925
+ datacontenttype: str | None = None,
926
+ ) -> None:
927
+ """
928
+ Send a CloudEvent to Knative Broker.
929
+
930
+ Args:
931
+ event_type: CloudEvent type (e.g., "order.created")
932
+ source: CloudEvent source (e.g., "order-service")
933
+ data: Event payload (JSON dict or Pydantic model)
934
+ broker_url: Knative Broker URL
935
+ datacontenttype: Content type (defaults to "application/json")
936
+
937
+ Raises:
938
+ httpx.HTTPError: If the HTTP request fails
939
+
940
+ Example:
941
+ >>> # With dict
942
+ >>> await send_event("order.created", "order-service", {"order_id": "123"})
943
+ >>>
944
+ >>> # With Pydantic model (automatically converted to JSON)
945
+ >>> order = OrderCreated(order_id="123", amount=99.99)
946
+ >>> await send_event("order.created", "order-service", order)
947
+ """
948
+ import httpx
949
+ from cloudevents.conversion import to_structured
950
+ from cloudevents.http import CloudEvent
951
+
952
+ from edda.pydantic_utils import is_pydantic_instance, to_json_dict
953
+
954
+ # Convert Pydantic model to JSON dict
955
+ data_dict: dict[str, Any]
956
+ if is_pydantic_instance(data):
957
+ data_dict = to_json_dict(data)
958
+ elif isinstance(data, dict):
959
+ data_dict = data
960
+ else:
961
+ data_dict = {"_data": data}
962
+
963
+ # Create CloudEvent attributes
964
+ attributes: dict[str, Any] = {
965
+ "type": event_type,
966
+ "source": source,
967
+ "id": str(uuid.uuid4()),
968
+ }
969
+
970
+ # Set datacontenttype if specified
971
+ if datacontenttype:
972
+ attributes["datacontenttype"] = datacontenttype
973
+
974
+ event = CloudEvent(attributes, data_dict)
975
+
976
+ # Convert to structured format (HTTP)
977
+ headers, body = to_structured(event)
978
+
979
+ # Send to Knative Broker via HTTP POST
980
+ async with httpx.AsyncClient() as client:
981
+ response = await client.post(
982
+ broker_url,
983
+ headers=headers,
984
+ content=body,
985
+ timeout=10.0,
986
+ )
987
+ response.raise_for_status()
988
+
989
+
990
+ # =============================================================================
991
+ # Utility Functions
992
+ # =============================================================================
993
+
994
+
995
+ async def get_channel_stats(
996
+ _storage: StorageProtocol,
997
+ channel: str,
998
+ ) -> dict[str, Any]:
999
+ """
1000
+ Get statistics about a channel.
1001
+
1002
+ Args:
1003
+ storage: Storage backend
1004
+ channel: Channel name
1005
+
1006
+ Returns:
1007
+ Dictionary with channel statistics
1008
+ """
1009
+ # TODO: Implement actual statistics retrieval using _storage
1010
+ # - Query ChannelMessage table for message_count
1011
+ # - Query ChannelSubscription table for subscriber_count
1012
+ # For now, return placeholder values
1013
+ return {
1014
+ "channel": channel,
1015
+ "message_count": 0,
1016
+ "subscriber_count": 0,
1017
+ }