loopgraph 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. loopgraph-0.2.0/PKG-INFO +165 -0
  2. loopgraph-0.2.0/README.md +150 -0
  3. loopgraph-0.2.0/loopgraph/__init__.py +38 -0
  4. loopgraph-0.2.0/loopgraph/_debug.py +45 -0
  5. loopgraph-0.2.0/loopgraph/bus/__init__.py +5 -0
  6. loopgraph-0.2.0/loopgraph/bus/eventbus.py +186 -0
  7. loopgraph-0.2.0/loopgraph/concurrency/__init__.py +5 -0
  8. loopgraph-0.2.0/loopgraph/concurrency/policies.py +181 -0
  9. loopgraph-0.2.0/loopgraph/core/__init__.py +18 -0
  10. loopgraph-0.2.0/loopgraph/core/graph.py +425 -0
  11. loopgraph-0.2.0/loopgraph/core/state.py +443 -0
  12. loopgraph-0.2.0/loopgraph/core/types.py +72 -0
  13. loopgraph-0.2.0/loopgraph/diagnostics/__init__.py +5 -0
  14. loopgraph-0.2.0/loopgraph/diagnostics/inspect.py +70 -0
  15. loopgraph-0.2.0/loopgraph/persistence/__init__.py +6 -0
  16. loopgraph-0.2.0/loopgraph/persistence/event_log.py +63 -0
  17. loopgraph-0.2.0/loopgraph/persistence/snapshot.py +52 -0
  18. loopgraph-0.2.0/loopgraph/py.typed +0 -0
  19. loopgraph-0.2.0/loopgraph/registry/__init__.py +1 -0
  20. loopgraph-0.2.0/loopgraph/registry/function_registry.py +117 -0
  21. loopgraph-0.2.0/loopgraph/scheduler/__init__.py +5 -0
  22. loopgraph-0.2.0/loopgraph/scheduler/scheduler.py +569 -0
  23. loopgraph-0.2.0/loopgraph.egg-info/PKG-INFO +165 -0
  24. loopgraph-0.2.0/loopgraph.egg-info/SOURCES.txt +32 -0
  25. loopgraph-0.2.0/loopgraph.egg-info/dependency_links.txt +1 -0
  26. loopgraph-0.2.0/loopgraph.egg-info/requires.txt +8 -0
  27. loopgraph-0.2.0/loopgraph.egg-info/top_level.txt +1 -0
  28. loopgraph-0.2.0/pyproject.toml +52 -0
  29. loopgraph-0.2.0/setup.cfg +4 -0
  30. loopgraph-0.2.0/tests/test_doctests.py +45 -0
  31. loopgraph-0.2.0/tests/test_eventbus.py +159 -0
  32. loopgraph-0.2.0/tests/test_integration_workflows.py +879 -0
  33. loopgraph-0.2.0/tests/test_priority_concurrency.py +24 -0
  34. loopgraph-0.2.0/tests/test_scheduler_recovery.py +75 -0
@@ -0,0 +1,165 @@
1
+ Metadata-Version: 2.4
2
+ Name: loopgraph
3
+ Version: 0.2.0
4
+ Summary: Event-driven graph workflow engine with native loop support.
5
+ Author: LoopGraph Team
6
+ License: MIT
7
+ Requires-Python: >=3.10
8
+ Description-Content-Type: text/markdown
9
+ Provides-Extra: test
10
+ Requires-Dist: pytest>=7.4.0; extra == "test"
11
+ Requires-Dist: pytest-asyncio>=0.21.0; extra == "test"
12
+ Provides-Extra: lint
13
+ Requires-Dist: ruff>=0.6.4; extra == "lint"
14
+ Requires-Dist: mypy>=1.11.0; extra == "lint"
15
+
16
+ # LoopGraph
17
+
18
+ **The agent workflow engine that treats loops as a first-class citizen.**
19
+
20
+ Build graph-based AI agent workflows where cycles, re-entry, and iterative reasoning are native — not hacks. More intuitive and lightweight than LangGraph, with loops as a first-class primitive.
21
+
22
+ - **Zero dependencies** — pure Python 3.10+, nothing to install beyond your own agents
23
+ - **Native loop support** — cycles in your graph are validated, tracked, and safe by design
24
+ - **Event-driven** — every state transition emits events; hook in logging, metrics, or triggers anywhere
25
+ - **Async-first** — built on `asyncio`, handles concurrent nodes without blocking
26
+ - **Recoverable** — snapshot and replay any run from any point
27
+
28
+ ```bash
29
+ pip install loopgraph
30
+ ```
31
+
32
+ ---
33
+
34
+ ## Quickstart
35
+
36
+ ```python
37
+ import asyncio
38
+ from loopgraph.core.graph import Graph, Node, Edge, NodeKind
39
+ from loopgraph.bus.eventbus import EventBus
40
+ from loopgraph.registry.function_registry import FunctionRegistry
41
+ from loopgraph.scheduler.scheduler import Scheduler
42
+
43
+ async def my_agent(payload):
44
+ # your agent logic here
45
+ return {"result": "done", "loop_again": False}
46
+
47
+ async def router(payload):
48
+ # return the next node id
49
+ return "end" if not payload.get("loop_again") else "agent"
50
+
51
+ graph = Graph(
52
+ nodes=[
53
+ Node(id="agent", kind=NodeKind.TASK),
54
+ Node(id="router", kind=NodeKind.SWITCH),
55
+ Node(id="end", kind=NodeKind.TASK),
56
+ ],
57
+ edges=[
58
+ Edge(source="agent", target="router"),
59
+ Edge(source="router", target="agent"), # the loop back-edge
60
+ Edge(source="router", target="end"),
61
+ ],
62
+ entry="agent",
63
+ )
64
+
65
+ registry = FunctionRegistry()
66
+ registry.register("agent", my_agent)
67
+ registry.register("router", router)
68
+ registry.register("end", lambda p: p)
69
+
70
+ bus = EventBus()
71
+ scheduler = Scheduler(graph=graph, registry=registry, bus=bus)
72
+
73
+ asyncio.run(scheduler.run(payload={"input": "hello"}))
74
+ ```
75
+
76
+ ---
77
+
78
+ ## Why LoopGraph?
79
+
80
+ Most workflow engines assume a DAG — a graph with no cycles. That works for linear pipelines, but agent workflows are inherently iterative: an agent reasons, reflects, decides to try again, and loops back. Forcing that into a DAG requires awkward workarounds.
81
+
82
+ LoopGraph makes loops explicit and safe:
83
+
84
+ - **Back-edges are first-class** — declare a cycle in your graph and the engine handles reset, visit tracking, and state management automatically
85
+ - **Loop safety** — the engine validates your graph at construction time; overlapping loops that share nodes are rejected before anything runs
86
+ - **Full observability** — every loop iteration emits events (`NODE_SCHEDULED`, `NODE_COMPLETED`, `NODE_FAILED`) so you always know where you are
87
+
88
+ ---
89
+
90
+ ## Event Hooks
91
+
92
+ Subscribe to any workflow event to add logging, metrics, or side effects:
93
+
94
+ ```python
95
+ from loopgraph.core.types import EventType
96
+
97
+ async def on_completed(event):
98
+ print(f"{event.node_id} finished → {event.payload}")
99
+
100
+ bus.subscribe(EventType.NODE_COMPLETED, on_completed)
101
+ ```
102
+
103
+ Available events: `NODE_SCHEDULED`, `NODE_STARTED`, `NODE_COMPLETED`, `NODE_FAILED`.
104
+
105
+ ---
106
+
107
+ ## Custom Events from Handlers
108
+
109
+ Wrap your handler in a closure to emit custom events mid-execution:
110
+
111
+ ```python
112
+ def make_handler(bus, base_handler):
113
+ async def wrapper(payload):
114
+ await bus.emit(Event(id="pre", graph_id="g", node_id="n",
115
+ type=EventType.NODE_SCHEDULED, payload={"stage": "pre"}))
116
+ result = await base_handler(payload)
117
+ await bus.emit(Event(id="post", graph_id="g", node_id="n",
118
+ type=EventType.NODE_COMPLETED, payload={"stage": "post"}))
119
+ return result
120
+ return wrapper
121
+
122
+ registry.register("my_node", make_handler(bus, my_agent))
123
+ ```
124
+
125
+ ---
126
+
127
+ ## Loop Re-entry Rules
128
+
129
+ - Re-entry is triggered by a `SWITCH` node selecting a back-edge
130
+ - Only `COMPLETED` nodes can be reset for re-entry
131
+ - Reset clears upstream-completion tracking and preserves cumulative `visit_count`
132
+ - Overlapping loops sharing any node are rejected at graph construction time
133
+
134
+ ---
135
+
136
+ ## Installation
137
+
138
+ ```bash
139
+ pip install loopgraph
140
+ ```
141
+
142
+ Requires Python 3.10+. No runtime dependencies.
143
+
144
+ ---
145
+
146
+ ## Development
147
+
148
+ ```bash
149
+ git clone https://github.com/your-org/loopgraph
150
+ cd loopgraph
151
+ python3 -m venv .venv && source .venv/bin/activate
152
+ pip install -e ".[test,lint]"
153
+ pytest
154
+ ```
155
+
156
+ ---
157
+
158
+ ## Design Principles
159
+
160
+ - **Keep the core compact.** Nodes stay stateless and the scheduler stays simple, with minimum opinionated design and maximum freedom for users to compose their own workflow patterns. Handlers capture their own context (event bus, metrics, side effects) so the framework never grows special cases for custom behaviour.
161
+ - **Push heavy lifting to the edge.** Long-running work should run via remote APIs, threads, or separate nodes/clusters. We avoid building a distributed fan-out scheduler; users orchestrate their own parallelism while the engine focuses on deterministic single-node execution.
162
+ - **Flexible aggregation semantics.** Aggregator nodes may proceed when only a subset of upstream nodes finish — as long as those nodes reach a terminal state. Fail-fast and error-tolerance are user-level workflow patterns, and the engine stays policy-light so users can implement either.
163
+ - **Retries live with handlers.** The framework doesn't implement automatic retries. Each handler decides whether to retry, abort, or compensate, keeping recovery logic close to the business code.
164
+ - **Pluggable concurrency.** A shared ConcurrencyManager (semaphore or priority-aware) controls global slots. Multiple schedulers can share one manager, but there's no hidden magic — users choose the policy, preserving clarity and control.
165
+ - **Recovery through snapshots.** The engine snapshots execution state and event logs so users can resume or replay runs without re-executing nodes. Payloads flow naturally between nodes, satisfying replay needs without extra APIs.
@@ -0,0 +1,150 @@
1
+ # LoopGraph
2
+
3
+ **The agent workflow engine that treats loops as a first-class citizen.**
4
+
5
+ Build graph-based AI agent workflows where cycles, re-entry, and iterative reasoning are native — not hacks. More intuitive and lightweight than LangGraph, with loops as a first-class primitive.
6
+
7
+ - **Zero dependencies** — pure Python 3.10+, nothing to install beyond your own agents
8
+ - **Native loop support** — cycles in your graph are validated, tracked, and safe by design
9
+ - **Event-driven** — every state transition emits events; hook in logging, metrics, or triggers anywhere
10
+ - **Async-first** — built on `asyncio`, handles concurrent nodes without blocking
11
+ - **Recoverable** — snapshot and replay any run from any point
12
+
13
+ ```bash
14
+ pip install loopgraph
15
+ ```
16
+
17
+ ---
18
+
19
+ ## Quickstart
20
+
21
+ ```python
22
+ import asyncio
23
+ from loopgraph.core.graph import Graph, Node, Edge, NodeKind
24
+ from loopgraph.bus.eventbus import EventBus
25
+ from loopgraph.registry.function_registry import FunctionRegistry
26
+ from loopgraph.scheduler.scheduler import Scheduler
27
+
28
+ async def my_agent(payload):
29
+ # your agent logic here
30
+ return {"result": "done", "loop_again": False}
31
+
32
+ async def router(payload):
33
+ # return the next node id
34
+ return "end" if not payload.get("loop_again") else "agent"
35
+
36
+ graph = Graph(
37
+ nodes=[
38
+ Node(id="agent", kind=NodeKind.TASK),
39
+ Node(id="router", kind=NodeKind.SWITCH),
40
+ Node(id="end", kind=NodeKind.TASK),
41
+ ],
42
+ edges=[
43
+ Edge(source="agent", target="router"),
44
+ Edge(source="router", target="agent"), # the loop back-edge
45
+ Edge(source="router", target="end"),
46
+ ],
47
+ entry="agent",
48
+ )
49
+
50
+ registry = FunctionRegistry()
51
+ registry.register("agent", my_agent)
52
+ registry.register("router", router)
53
+ registry.register("end", lambda p: p)
54
+
55
+ bus = EventBus()
56
+ scheduler = Scheduler(graph=graph, registry=registry, bus=bus)
57
+
58
+ asyncio.run(scheduler.run(payload={"input": "hello"}))
59
+ ```
60
+
61
+ ---
62
+
63
+ ## Why LoopGraph?
64
+
65
+ Most workflow engines assume a DAG — a graph with no cycles. That works for linear pipelines, but agent workflows are inherently iterative: an agent reasons, reflects, decides to try again, and loops back. Forcing that into a DAG requires awkward workarounds.
66
+
67
+ LoopGraph makes loops explicit and safe:
68
+
69
+ - **Back-edges are first-class** — declare a cycle in your graph and the engine handles reset, visit tracking, and state management automatically
70
+ - **Loop safety** — the engine validates your graph at construction time; overlapping loops that share nodes are rejected before anything runs
71
+ - **Full observability** — every loop iteration emits events (`NODE_SCHEDULED`, `NODE_COMPLETED`, `NODE_FAILED`) so you always know where you are
72
+
73
+ ---
74
+
75
+ ## Event Hooks
76
+
77
+ Subscribe to any workflow event to add logging, metrics, or side effects:
78
+
79
+ ```python
80
+ from loopgraph.core.types import EventType
81
+
82
+ async def on_completed(event):
83
+ print(f"{event.node_id} finished → {event.payload}")
84
+
85
+ bus.subscribe(EventType.NODE_COMPLETED, on_completed)
86
+ ```
87
+
88
+ Available events: `NODE_SCHEDULED`, `NODE_STARTED`, `NODE_COMPLETED`, `NODE_FAILED`.
89
+
90
+ ---
91
+
92
+ ## Custom Events from Handlers
93
+
94
+ Wrap your handler in a closure to emit custom events mid-execution:
95
+
96
+ ```python
97
+ def make_handler(bus, base_handler):
98
+ async def wrapper(payload):
99
+ await bus.emit(Event(id="pre", graph_id="g", node_id="n",
100
+ type=EventType.NODE_SCHEDULED, payload={"stage": "pre"}))
101
+ result = await base_handler(payload)
102
+ await bus.emit(Event(id="post", graph_id="g", node_id="n",
103
+ type=EventType.NODE_COMPLETED, payload={"stage": "post"}))
104
+ return result
105
+ return wrapper
106
+
107
+ registry.register("my_node", make_handler(bus, my_agent))
108
+ ```
109
+
110
+ ---
111
+
112
+ ## Loop Re-entry Rules
113
+
114
+ - Re-entry is triggered by a `SWITCH` node selecting a back-edge
115
+ - Only `COMPLETED` nodes can be reset for re-entry
116
+ - Reset clears upstream-completion tracking and preserves cumulative `visit_count`
117
+ - Overlapping loops sharing any node are rejected at graph construction time
118
+
119
+ ---
120
+
121
+ ## Installation
122
+
123
+ ```bash
124
+ pip install loopgraph
125
+ ```
126
+
127
+ Requires Python 3.10+. No runtime dependencies.
128
+
129
+ ---
130
+
131
+ ## Development
132
+
133
+ ```bash
134
+ git clone https://github.com/your-org/loopgraph
135
+ cd loopgraph
136
+ python3 -m venv .venv && source .venv/bin/activate
137
+ pip install -e ".[test,lint]"
138
+ pytest
139
+ ```
140
+
141
+ ---
142
+
143
+ ## Design Principles
144
+
145
+ - **Keep the core compact.** Nodes stay stateless and the scheduler stays simple, with minimum opinionated design and maximum freedom for users to compose their own workflow patterns. Handlers capture their own context (event bus, metrics, side effects) so the framework never grows special cases for custom behaviour.
146
+ - **Push heavy lifting to the edge.** Long-running work should run via remote APIs, threads, or separate nodes/clusters. We avoid building a distributed fan-out scheduler; users orchestrate their own parallelism while the engine focuses on deterministic single-node execution.
147
+ - **Flexible aggregation semantics.** Aggregator nodes may proceed when only a subset of upstream nodes finish — as long as those nodes reach a terminal state. Fail-fast and error-tolerance are user-level workflow patterns, and the engine stays policy-light so users can implement either.
148
+ - **Retries live with handlers.** The framework doesn't implement automatic retries. Each handler decides whether to retry, abort, or compensate, keeping recovery logic close to the business code.
149
+ - **Pluggable concurrency.** A shared ConcurrencyManager (semaphore or priority-aware) controls global slots. Multiple schedulers can share one manager, but there's no hidden magic — users choose the policy, preserving clarity and control.
150
+ - **Recovery through snapshots.** The engine snapshots execution state and event logs so users can resume or replay runs without re-executing nodes. Payloads flow naturally between nodes, satisfying replay needs without extra APIs.
@@ -0,0 +1,38 @@
1
+ """LoopGraph package root exporting shared logging helpers."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+
7
+ from ._debug import (
8
+ log_branch,
9
+ log_loop_iteration,
10
+ log_parameter,
11
+ log_variable_change,
12
+ )
13
+
14
+ __all__ = [
15
+ "log_branch",
16
+ "log_loop_iteration",
17
+ "log_parameter",
18
+ "log_variable_change",
19
+ ]
20
+
21
+
22
+ def configure_default_logging(level: int = logging.DEBUG) -> None:
23
+ """Configure a default logging handler for debugging heavy modules.
24
+
25
+ >>> configure_default_logging()
26
+ >>> logging.getLogger("loopgraph").getEffectiveLevel() == logging.DEBUG
27
+ True
28
+ """
29
+ log_parameter("configure_default_logging", level=level)
30
+ logging.basicConfig(level=level, force=True)
31
+ logger = logging.getLogger("loopgraph")
32
+ log_variable_change("configure_default_logging", "logger_level", logger.level)
33
+ effective_level = logger.getEffectiveLevel()
34
+ log_variable_change(
35
+ "configure_default_logging",
36
+ "effective_level",
37
+ effective_level,
38
+ )
@@ -0,0 +1,45 @@
1
+ """Utilities to standardise verbose debug logging across the codebase."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import logging
6
+ from typing import Any
7
+
8
+ LOGGER = logging.getLogger("loopgraph")
9
+
10
+
11
+ def log_parameter(func_name: str, **params: Any) -> None:
12
+ """Log each parameter passed into a function.
13
+
14
+ >>> log_parameter("demo_func", alpha=1, beta="two")
15
+ """
16
+ LOGGER.debug("function=%s parameters=%r", func_name, params)
17
+
18
+
19
+ def log_variable_change(func_name: str, name: str, value: Any) -> None:
20
+ """Log a variable change to comply with the DevSOP rules.
21
+
22
+ >>> log_variable_change("demo_func", "counter", 10)
23
+ """
24
+ LOGGER.debug("function=%s variable=%s value=%r", func_name, name, value)
25
+
26
+
27
+ def log_branch(func_name: str, branch: str) -> None:
28
+ """Log which branch of conditional logic is being executed.
29
+
30
+ >>> log_branch("demo_func", "if_true")
31
+ """
32
+ LOGGER.debug("function=%s branch=%s", func_name, branch)
33
+
34
+
35
+ def log_loop_iteration(func_name: str, loop_name: str, iteration: int) -> None:
36
+ """Log loop iteration counts to support replay-friendly diagnostics.
37
+
38
+ >>> log_loop_iteration("demo_func", "main", 3)
39
+ """
40
+ LOGGER.debug(
41
+ "function=%s loop=%s iteration=%d",
42
+ func_name,
43
+ loop_name,
44
+ iteration,
45
+ )
@@ -0,0 +1,5 @@
1
+ """Event bus implementations."""
2
+
3
+ from .eventbus import ErrorHandler, Event, EventBus, EventListener
4
+
5
+ __all__ = ["ErrorHandler", "Event", "EventBus", "EventListener"]
@@ -0,0 +1,186 @@
1
+ """Asynchronous event bus implementation."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import time
7
+ from dataclasses import dataclass, field
8
+ from typing import Any, Callable, Coroutine, Dict, List, Optional
9
+
10
+ from .._debug import (
11
+ log_branch,
12
+ log_loop_iteration,
13
+ log_parameter,
14
+ log_variable_change,
15
+ )
16
+ from ..core.types import EventType, NodeStatus
17
+
18
+ EventListener = Callable[["Event"], Coroutine[Any, Any, None]]
19
+ ErrorHandler = Callable[[Exception, "Event"], Coroutine[Any, Any, None]]
20
+
21
+
22
+ @dataclass(frozen=True)
23
+ class Event:
24
+ """Event emitted during workflow execution.
25
+
26
+ >>> evt = Event(
27
+ ... id="evt-1",
28
+ ... graph_id="graph-1",
29
+ ... node_id="node-1",
30
+ ... type=EventType.NODE_COMPLETED,
31
+ ... )
32
+ >>> evt.type
33
+ <EventType.NODE_COMPLETED: 'node_completed'>
34
+ """
35
+
36
+ id: str
37
+ graph_id: str
38
+ node_id: Optional[str]
39
+ type: EventType
40
+ payload: Any = None
41
+ timestamp: float = field(default_factory=lambda: time.time())
42
+ replay: bool = False
43
+ visit_count: Optional[int] = None
44
+ status: Optional[NodeStatus] = None
45
+
46
+
47
+ class EventBus:
48
+ """Simple in-memory event bus.
49
+
50
+ >>> async def demo():
51
+ ... bus = EventBus()
52
+ ... received = []
53
+ ...
54
+ ... async def listener(event: Event) -> None:
55
+ ... received.append(event.id)
56
+ ...
57
+ ... bus.subscribe(EventType.NODE_COMPLETED, listener)
58
+ ... await bus.emit(
59
+ ... Event(
60
+ ... id="evt",
61
+ ... graph_id="g",
62
+ ... node_id="n",
63
+ ... type=EventType.NODE_COMPLETED,
64
+ ... )
65
+ ... )
66
+ ... return received
67
+ >>> asyncio.run(demo())
68
+ ['evt']
69
+ """
70
+
71
+ def __init__(self, on_error: Optional[ErrorHandler] = None) -> None:
72
+ """Initialize the event bus.
73
+
74
+ Args:
75
+ on_error: Optional async callback invoked when a listener raises an exception.
76
+ Signature: async def handler(exc: Exception, event: Event) -> None
77
+ If on_error itself raises, the exception propagates to the caller.
78
+ """
79
+ func_name = "EventBus.__init__"
80
+ log_parameter(func_name, on_error=on_error)
81
+ self._listeners: Dict[Optional[EventType], List[EventListener]] = {}
82
+ self._on_error = on_error
83
+ log_variable_change(func_name, "self._listeners", self._listeners)
84
+ log_variable_change(func_name, "self._on_error", self._on_error)
85
+
86
+ def subscribe(
87
+ self,
88
+ event_type: Optional[EventType],
89
+ listener: EventListener,
90
+ ) -> None:
91
+ """Register a listener for a specific event type or all events.
92
+
93
+ >>> bus = EventBus()
94
+ >>> async def noop(_event: Event) -> None:
95
+ ... pass
96
+ >>> bus.subscribe(None, noop)
97
+ """
98
+ func_name = "EventBus.subscribe"
99
+ log_parameter(func_name, event_type=event_type, listener=listener)
100
+ listeners = self._listeners.setdefault(event_type, [])
101
+ log_variable_change(func_name, "listeners_before", list(listeners))
102
+ listeners.append(listener)
103
+ log_variable_change(func_name, "listeners_after", list(listeners))
104
+
105
+ def unsubscribe(
106
+ self,
107
+ event_type: Optional[EventType],
108
+ listener: EventListener,
109
+ ) -> None:
110
+ """Remove a listener from the bus if present.
111
+
112
+ >>> bus = EventBus()
113
+ >>> async def noop(_event: Event) -> None:
114
+ ... pass
115
+ >>> bus.subscribe(None, noop)
116
+ >>> bus.unsubscribe(None, noop)
117
+ """
118
+ func_name = "EventBus.unsubscribe"
119
+ log_parameter(func_name, event_type=event_type, listener=listener)
120
+ listeners = self._listeners.get(event_type, [])
121
+ log_variable_change(func_name, "listeners_before", list(listeners))
122
+ if listener in listeners:
123
+ log_branch(func_name, "listener_present")
124
+ listeners.remove(listener)
125
+ log_variable_change(func_name, "listeners_after", list(listeners))
126
+ else:
127
+ log_branch(func_name, "listener_missing")
128
+
129
+ async def emit(self, event: Event) -> List[Any]:
130
+ """Emit an event to all registered listeners.
131
+
132
+ If a listener raises an exception and an on_error handler is configured,
133
+ the handler is invoked with the exception and event. If on_error raises,
134
+ the exception propagates to the caller.
135
+
136
+ >>> async def demo():
137
+ ... bus = EventBus()
138
+ ... events: List[str] = []
139
+ ...
140
+ ... async def collector(evt: Event) -> None:
141
+ ... events.append(evt.id)
142
+ ...
143
+ ... bus.subscribe(None, collector)
144
+ ... await bus.emit(
145
+ ... Event(
146
+ ... id="evt-1",
147
+ ... graph_id="g",
148
+ ... node_id=None,
149
+ ... type=EventType.WORKFLOW_COMPLETED,
150
+ ... )
151
+ ... )
152
+ ... return events
153
+ >>> asyncio.run(demo())
154
+ ['evt-1']
155
+ """
156
+ func_name = "EventBus.emit"
157
+ log_parameter(func_name, event=event)
158
+ listeners = list(self._listeners.get(event.type, []))
159
+ log_variable_change(func_name, "typed_listeners", listeners)
160
+ global_listeners = list(self._listeners.get(None, []))
161
+ log_variable_change(func_name, "global_listeners", global_listeners)
162
+ all_listeners: List[EventListener] = listeners + global_listeners
163
+ log_variable_change(func_name, "all_listeners", all_listeners)
164
+ if not all_listeners:
165
+ log_branch(func_name, "no_listeners")
166
+ return []
167
+ log_branch(func_name, "dispatch_listeners")
168
+ tasks: List[asyncio.Task[None]] = []
169
+ log_variable_change(func_name, "tasks", tasks)
170
+ for iteration, listener in enumerate(all_listeners):
171
+ log_loop_iteration(func_name, "listeners", iteration)
172
+ task = asyncio.create_task(listener(event))
173
+ log_variable_change(func_name, "task", task)
174
+ tasks.append(task)
175
+ log_variable_change(func_name, "tasks", list(tasks))
176
+ results = await asyncio.gather(*tasks, return_exceptions=True)
177
+ log_variable_change(func_name, "results", results)
178
+
179
+ # Invoke on_error handler for any exceptions
180
+ if self._on_error is not None:
181
+ for result in results:
182
+ if isinstance(result, Exception):
183
+ log_branch(func_name, "on_error_invoked")
184
+ await self._on_error(result, event)
185
+
186
+ return results
@@ -0,0 +1,5 @@
1
+ """Concurrency policy definitions."""
2
+
3
+ from .policies import ConcurrencyManager, PrioritySemaphorePolicy, SemaphorePolicy
4
+
5
+ __all__ = ["ConcurrencyManager", "SemaphorePolicy", "PrioritySemaphorePolicy"]