loopgraph 0.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- loopgraph/__init__.py +38 -0
- loopgraph/_debug.py +45 -0
- loopgraph/bus/__init__.py +5 -0
- loopgraph/bus/eventbus.py +186 -0
- loopgraph/concurrency/__init__.py +5 -0
- loopgraph/concurrency/policies.py +181 -0
- loopgraph/core/__init__.py +18 -0
- loopgraph/core/graph.py +425 -0
- loopgraph/core/state.py +443 -0
- loopgraph/core/types.py +72 -0
- loopgraph/diagnostics/__init__.py +5 -0
- loopgraph/diagnostics/inspect.py +70 -0
- loopgraph/persistence/__init__.py +6 -0
- loopgraph/persistence/event_log.py +63 -0
- loopgraph/persistence/snapshot.py +52 -0
- loopgraph/py.typed +0 -0
- loopgraph/registry/__init__.py +1 -0
- loopgraph/registry/function_registry.py +117 -0
- loopgraph/scheduler/__init__.py +5 -0
- loopgraph/scheduler/scheduler.py +569 -0
- loopgraph-0.2.0.dist-info/METADATA +165 -0
- loopgraph-0.2.0.dist-info/RECORD +24 -0
- loopgraph-0.2.0.dist-info/WHEEL +5 -0
- loopgraph-0.2.0.dist-info/top_level.txt +1 -0
loopgraph/__init__.py
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
"""LoopGraph package root exporting shared logging helpers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
|
|
7
|
+
from ._debug import (
|
|
8
|
+
log_branch,
|
|
9
|
+
log_loop_iteration,
|
|
10
|
+
log_parameter,
|
|
11
|
+
log_variable_change,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"log_branch",
|
|
16
|
+
"log_loop_iteration",
|
|
17
|
+
"log_parameter",
|
|
18
|
+
"log_variable_change",
|
|
19
|
+
]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def configure_default_logging(level: int = logging.DEBUG) -> None:
|
|
23
|
+
"""Configure a default logging handler for debugging heavy modules.
|
|
24
|
+
|
|
25
|
+
>>> configure_default_logging()
|
|
26
|
+
>>> logging.getLogger("loopgraph").getEffectiveLevel() == logging.DEBUG
|
|
27
|
+
True
|
|
28
|
+
"""
|
|
29
|
+
log_parameter("configure_default_logging", level=level)
|
|
30
|
+
logging.basicConfig(level=level, force=True)
|
|
31
|
+
logger = logging.getLogger("loopgraph")
|
|
32
|
+
log_variable_change("configure_default_logging", "logger_level", logger.level)
|
|
33
|
+
effective_level = logger.getEffectiveLevel()
|
|
34
|
+
log_variable_change(
|
|
35
|
+
"configure_default_logging",
|
|
36
|
+
"effective_level",
|
|
37
|
+
effective_level,
|
|
38
|
+
)
|
loopgraph/_debug.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
"""Utilities to standardise verbose debug logging across the codebase."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
LOGGER = logging.getLogger("loopgraph")
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def log_parameter(func_name: str, **params: Any) -> None:
|
|
12
|
+
"""Log each parameter passed into a function.
|
|
13
|
+
|
|
14
|
+
>>> log_parameter("demo_func", alpha=1, beta="two")
|
|
15
|
+
"""
|
|
16
|
+
LOGGER.debug("function=%s parameters=%r", func_name, params)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def log_variable_change(func_name: str, name: str, value: Any) -> None:
|
|
20
|
+
"""Log a variable change to comply with the DevSOP rules.
|
|
21
|
+
|
|
22
|
+
>>> log_variable_change("demo_func", "counter", 10)
|
|
23
|
+
"""
|
|
24
|
+
LOGGER.debug("function=%s variable=%s value=%r", func_name, name, value)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def log_branch(func_name: str, branch: str) -> None:
|
|
28
|
+
"""Log which branch of conditional logic is being executed.
|
|
29
|
+
|
|
30
|
+
>>> log_branch("demo_func", "if_true")
|
|
31
|
+
"""
|
|
32
|
+
LOGGER.debug("function=%s branch=%s", func_name, branch)
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def log_loop_iteration(func_name: str, loop_name: str, iteration: int) -> None:
|
|
36
|
+
"""Log loop iteration counts to support replay-friendly diagnostics.
|
|
37
|
+
|
|
38
|
+
>>> log_loop_iteration("demo_func", "main", 3)
|
|
39
|
+
"""
|
|
40
|
+
LOGGER.debug(
|
|
41
|
+
"function=%s loop=%s iteration=%d",
|
|
42
|
+
func_name,
|
|
43
|
+
loop_name,
|
|
44
|
+
iteration,
|
|
45
|
+
)
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
"""Asynchronous event bus implementation."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import time
|
|
7
|
+
from dataclasses import dataclass, field
|
|
8
|
+
from typing import Any, Callable, Coroutine, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
from .._debug import (
|
|
11
|
+
log_branch,
|
|
12
|
+
log_loop_iteration,
|
|
13
|
+
log_parameter,
|
|
14
|
+
log_variable_change,
|
|
15
|
+
)
|
|
16
|
+
from ..core.types import EventType, NodeStatus
|
|
17
|
+
|
|
18
|
+
EventListener = Callable[["Event"], Coroutine[Any, Any, None]]
|
|
19
|
+
ErrorHandler = Callable[[Exception, "Event"], Coroutine[Any, Any, None]]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@dataclass(frozen=True)
|
|
23
|
+
class Event:
|
|
24
|
+
"""Event emitted during workflow execution.
|
|
25
|
+
|
|
26
|
+
>>> evt = Event(
|
|
27
|
+
... id="evt-1",
|
|
28
|
+
... graph_id="graph-1",
|
|
29
|
+
... node_id="node-1",
|
|
30
|
+
... type=EventType.NODE_COMPLETED,
|
|
31
|
+
... )
|
|
32
|
+
>>> evt.type
|
|
33
|
+
<EventType.NODE_COMPLETED: 'node_completed'>
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
id: str
|
|
37
|
+
graph_id: str
|
|
38
|
+
node_id: Optional[str]
|
|
39
|
+
type: EventType
|
|
40
|
+
payload: Any = None
|
|
41
|
+
timestamp: float = field(default_factory=lambda: time.time())
|
|
42
|
+
replay: bool = False
|
|
43
|
+
visit_count: Optional[int] = None
|
|
44
|
+
status: Optional[NodeStatus] = None
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class EventBus:
|
|
48
|
+
"""Simple in-memory event bus.
|
|
49
|
+
|
|
50
|
+
>>> async def demo():
|
|
51
|
+
... bus = EventBus()
|
|
52
|
+
... received = []
|
|
53
|
+
...
|
|
54
|
+
... async def listener(event: Event) -> None:
|
|
55
|
+
... received.append(event.id)
|
|
56
|
+
...
|
|
57
|
+
... bus.subscribe(EventType.NODE_COMPLETED, listener)
|
|
58
|
+
... await bus.emit(
|
|
59
|
+
... Event(
|
|
60
|
+
... id="evt",
|
|
61
|
+
... graph_id="g",
|
|
62
|
+
... node_id="n",
|
|
63
|
+
... type=EventType.NODE_COMPLETED,
|
|
64
|
+
... )
|
|
65
|
+
... )
|
|
66
|
+
... return received
|
|
67
|
+
>>> asyncio.run(demo())
|
|
68
|
+
['evt']
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def __init__(self, on_error: Optional[ErrorHandler] = None) -> None:
|
|
72
|
+
"""Initialize the event bus.
|
|
73
|
+
|
|
74
|
+
Args:
|
|
75
|
+
on_error: Optional async callback invoked when a listener raises an exception.
|
|
76
|
+
Signature: async def handler(exc: Exception, event: Event) -> None
|
|
77
|
+
If on_error itself raises, the exception propagates to the caller.
|
|
78
|
+
"""
|
|
79
|
+
func_name = "EventBus.__init__"
|
|
80
|
+
log_parameter(func_name, on_error=on_error)
|
|
81
|
+
self._listeners: Dict[Optional[EventType], List[EventListener]] = {}
|
|
82
|
+
self._on_error = on_error
|
|
83
|
+
log_variable_change(func_name, "self._listeners", self._listeners)
|
|
84
|
+
log_variable_change(func_name, "self._on_error", self._on_error)
|
|
85
|
+
|
|
86
|
+
def subscribe(
|
|
87
|
+
self,
|
|
88
|
+
event_type: Optional[EventType],
|
|
89
|
+
listener: EventListener,
|
|
90
|
+
) -> None:
|
|
91
|
+
"""Register a listener for a specific event type or all events.
|
|
92
|
+
|
|
93
|
+
>>> bus = EventBus()
|
|
94
|
+
>>> async def noop(_event: Event) -> None:
|
|
95
|
+
... pass
|
|
96
|
+
>>> bus.subscribe(None, noop)
|
|
97
|
+
"""
|
|
98
|
+
func_name = "EventBus.subscribe"
|
|
99
|
+
log_parameter(func_name, event_type=event_type, listener=listener)
|
|
100
|
+
listeners = self._listeners.setdefault(event_type, [])
|
|
101
|
+
log_variable_change(func_name, "listeners_before", list(listeners))
|
|
102
|
+
listeners.append(listener)
|
|
103
|
+
log_variable_change(func_name, "listeners_after", list(listeners))
|
|
104
|
+
|
|
105
|
+
def unsubscribe(
|
|
106
|
+
self,
|
|
107
|
+
event_type: Optional[EventType],
|
|
108
|
+
listener: EventListener,
|
|
109
|
+
) -> None:
|
|
110
|
+
"""Remove a listener from the bus if present.
|
|
111
|
+
|
|
112
|
+
>>> bus = EventBus()
|
|
113
|
+
>>> async def noop(_event: Event) -> None:
|
|
114
|
+
... pass
|
|
115
|
+
>>> bus.subscribe(None, noop)
|
|
116
|
+
>>> bus.unsubscribe(None, noop)
|
|
117
|
+
"""
|
|
118
|
+
func_name = "EventBus.unsubscribe"
|
|
119
|
+
log_parameter(func_name, event_type=event_type, listener=listener)
|
|
120
|
+
listeners = self._listeners.get(event_type, [])
|
|
121
|
+
log_variable_change(func_name, "listeners_before", list(listeners))
|
|
122
|
+
if listener in listeners:
|
|
123
|
+
log_branch(func_name, "listener_present")
|
|
124
|
+
listeners.remove(listener)
|
|
125
|
+
log_variable_change(func_name, "listeners_after", list(listeners))
|
|
126
|
+
else:
|
|
127
|
+
log_branch(func_name, "listener_missing")
|
|
128
|
+
|
|
129
|
+
async def emit(self, event: Event) -> List[Any]:
|
|
130
|
+
"""Emit an event to all registered listeners.
|
|
131
|
+
|
|
132
|
+
If a listener raises an exception and an on_error handler is configured,
|
|
133
|
+
the handler is invoked with the exception and event. If on_error raises,
|
|
134
|
+
the exception propagates to the caller.
|
|
135
|
+
|
|
136
|
+
>>> async def demo():
|
|
137
|
+
... bus = EventBus()
|
|
138
|
+
... events: List[str] = []
|
|
139
|
+
...
|
|
140
|
+
... async def collector(evt: Event) -> None:
|
|
141
|
+
... events.append(evt.id)
|
|
142
|
+
...
|
|
143
|
+
... bus.subscribe(None, collector)
|
|
144
|
+
... await bus.emit(
|
|
145
|
+
... Event(
|
|
146
|
+
... id="evt-1",
|
|
147
|
+
... graph_id="g",
|
|
148
|
+
... node_id=None,
|
|
149
|
+
... type=EventType.WORKFLOW_COMPLETED,
|
|
150
|
+
... )
|
|
151
|
+
... )
|
|
152
|
+
... return events
|
|
153
|
+
>>> asyncio.run(demo())
|
|
154
|
+
['evt-1']
|
|
155
|
+
"""
|
|
156
|
+
func_name = "EventBus.emit"
|
|
157
|
+
log_parameter(func_name, event=event)
|
|
158
|
+
listeners = list(self._listeners.get(event.type, []))
|
|
159
|
+
log_variable_change(func_name, "typed_listeners", listeners)
|
|
160
|
+
global_listeners = list(self._listeners.get(None, []))
|
|
161
|
+
log_variable_change(func_name, "global_listeners", global_listeners)
|
|
162
|
+
all_listeners: List[EventListener] = listeners + global_listeners
|
|
163
|
+
log_variable_change(func_name, "all_listeners", all_listeners)
|
|
164
|
+
if not all_listeners:
|
|
165
|
+
log_branch(func_name, "no_listeners")
|
|
166
|
+
return []
|
|
167
|
+
log_branch(func_name, "dispatch_listeners")
|
|
168
|
+
tasks: List[asyncio.Task[None]] = []
|
|
169
|
+
log_variable_change(func_name, "tasks", tasks)
|
|
170
|
+
for iteration, listener in enumerate(all_listeners):
|
|
171
|
+
log_loop_iteration(func_name, "listeners", iteration)
|
|
172
|
+
task = asyncio.create_task(listener(event))
|
|
173
|
+
log_variable_change(func_name, "task", task)
|
|
174
|
+
tasks.append(task)
|
|
175
|
+
log_variable_change(func_name, "tasks", list(tasks))
|
|
176
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
177
|
+
log_variable_change(func_name, "results", results)
|
|
178
|
+
|
|
179
|
+
# Invoke on_error handler for any exceptions
|
|
180
|
+
if self._on_error is not None:
|
|
181
|
+
for result in results:
|
|
182
|
+
if isinstance(result, Exception):
|
|
183
|
+
log_branch(func_name, "on_error_invoked")
|
|
184
|
+
await self._on_error(result, event)
|
|
185
|
+
|
|
186
|
+
return results
|
|
@@ -0,0 +1,181 @@
|
|
|
1
|
+
"""Concurrency control policies."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import heapq
|
|
7
|
+
from contextlib import asynccontextmanager
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from typing import AsyncContextManager, AsyncIterator, List, Protocol
|
|
10
|
+
|
|
11
|
+
from .._debug import log_branch, log_parameter, log_variable_change
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class ConcurrencyManager(Protocol):
|
|
15
|
+
"""Protocol describing a priority-aware concurrency controller."""
|
|
16
|
+
|
|
17
|
+
def slot(self, key: str, priority: int = 0) -> AsyncContextManager[None]:
|
|
18
|
+
"""Acquire a concurrency slot honoring the provided priority."""
|
|
19
|
+
...
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class SemaphorePolicy(ConcurrencyManager):
|
|
23
|
+
"""Manage shared concurrency with an asyncio semaphore.
|
|
24
|
+
|
|
25
|
+
>>> async def demo():
|
|
26
|
+
... policy = SemaphorePolicy(limit=2)
|
|
27
|
+
... async with policy.slot("worker", priority=0):
|
|
28
|
+
... return policy.available_permits
|
|
29
|
+
>>> import asyncio
|
|
30
|
+
>>> asyncio.run(demo())
|
|
31
|
+
1
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
def __init__(self, limit: int) -> None:
|
|
35
|
+
func_name = "SemaphorePolicy.__init__"
|
|
36
|
+
log_parameter(func_name, limit=limit)
|
|
37
|
+
if limit <= 0:
|
|
38
|
+
log_branch(func_name, "invalid_limit")
|
|
39
|
+
raise ValueError("limit must be positive")
|
|
40
|
+
log_branch(func_name, "valid_limit")
|
|
41
|
+
self._limit = limit
|
|
42
|
+
log_variable_change(func_name, "self._limit", self._limit)
|
|
43
|
+
self._semaphore = asyncio.Semaphore(limit)
|
|
44
|
+
log_variable_change(func_name, "self._semaphore", self._semaphore)
|
|
45
|
+
|
|
46
|
+
@property
|
|
47
|
+
def available_permits(self) -> int:
|
|
48
|
+
"""Return currently available semaphore permits.
|
|
49
|
+
|
|
50
|
+
>>> policy = SemaphorePolicy(limit=1)
|
|
51
|
+
>>> policy.available_permits
|
|
52
|
+
1
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
func_name = "SemaphorePolicy.available_permits"
|
|
56
|
+
log_parameter(func_name)
|
|
57
|
+
permits = int(getattr(self._semaphore, "_value", 0))
|
|
58
|
+
log_variable_change(func_name, "permits", permits)
|
|
59
|
+
return permits
|
|
60
|
+
|
|
61
|
+
def slot(self, key: str, priority: int = 0) -> AsyncContextManager[None]:
|
|
62
|
+
"""Acquire a semaphore slot for the duration of the context."""
|
|
63
|
+
|
|
64
|
+
@asynccontextmanager
|
|
65
|
+
async def _slot() -> AsyncIterator[None]:
|
|
66
|
+
func_name = "SemaphorePolicy.slot"
|
|
67
|
+
log_parameter(func_name, key=key, priority=priority)
|
|
68
|
+
await self._semaphore.acquire()
|
|
69
|
+
log_variable_change(
|
|
70
|
+
func_name, "permits_after_acquire", self.available_permits
|
|
71
|
+
)
|
|
72
|
+
try:
|
|
73
|
+
log_branch(func_name, "enter_context")
|
|
74
|
+
yield
|
|
75
|
+
finally:
|
|
76
|
+
log_branch(func_name, "exit_context")
|
|
77
|
+
self._semaphore.release()
|
|
78
|
+
log_variable_change(
|
|
79
|
+
func_name, "permits_after_release", self.available_permits
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
return _slot()
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@dataclass(order=True)
|
|
86
|
+
class _PriorityEntry:
|
|
87
|
+
priority: int
|
|
88
|
+
order: int
|
|
89
|
+
key: str = field(compare=False)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
class PrioritySemaphorePolicy(ConcurrencyManager):
|
|
93
|
+
"""Semaphore implementation that releases slots in priority order.
|
|
94
|
+
|
|
95
|
+
>>> async def demo():
|
|
96
|
+
... policy = PrioritySemaphorePolicy(limit=1)
|
|
97
|
+
... order: List[str] = []
|
|
98
|
+
...
|
|
99
|
+
... async def worker(name: str, priority: int, delay: float = 0.0) -> None:
|
|
100
|
+
... await asyncio.sleep(delay)
|
|
101
|
+
... async with policy.slot(name, priority=priority):
|
|
102
|
+
... order.append(name)
|
|
103
|
+
...
|
|
104
|
+
... await asyncio.gather(
|
|
105
|
+
... worker("low", priority=10, delay=0.01),
|
|
106
|
+
... worker("high", priority=0, delay=0.0),
|
|
107
|
+
... )
|
|
108
|
+
... return order
|
|
109
|
+
>>> import asyncio
|
|
110
|
+
>>> asyncio.run(demo())
|
|
111
|
+
['high', 'low']
|
|
112
|
+
"""
|
|
113
|
+
|
|
114
|
+
def __init__(self, limit: int) -> None:
|
|
115
|
+
func_name = "PrioritySemaphorePolicy.__init__"
|
|
116
|
+
log_parameter(func_name, limit=limit)
|
|
117
|
+
if limit <= 0:
|
|
118
|
+
log_branch(func_name, "invalid_limit")
|
|
119
|
+
raise ValueError("limit must be positive")
|
|
120
|
+
log_branch(func_name, "valid_limit")
|
|
121
|
+
self._limit = limit
|
|
122
|
+
log_variable_change(func_name, "self._limit", self._limit)
|
|
123
|
+
self._available = limit
|
|
124
|
+
log_variable_change(func_name, "self._available", self._available)
|
|
125
|
+
self._queue: List[_PriorityEntry] = []
|
|
126
|
+
log_variable_change(func_name, "self._queue", self._queue)
|
|
127
|
+
self._order = 0
|
|
128
|
+
log_variable_change(func_name, "self._order", self._order)
|
|
129
|
+
self._condition = asyncio.Condition()
|
|
130
|
+
log_variable_change(func_name, "self._condition", self._condition)
|
|
131
|
+
|
|
132
|
+
def slot(self, key: str, priority: int = 0) -> AsyncContextManager[None]:
|
|
133
|
+
"""Acquire a slot honoring the lowest priority value first."""
|
|
134
|
+
|
|
135
|
+
@asynccontextmanager
|
|
136
|
+
async def _slot() -> AsyncIterator[None]:
|
|
137
|
+
func_name = "PrioritySemaphorePolicy.slot"
|
|
138
|
+
log_parameter(func_name, key=key, priority=priority)
|
|
139
|
+
entry = _PriorityEntry(priority=priority, order=self._order, key=key)
|
|
140
|
+
self._order += 1
|
|
141
|
+
log_variable_change(func_name, "entry", entry)
|
|
142
|
+
async with self._condition:
|
|
143
|
+
heapq.heappush(self._queue, entry)
|
|
144
|
+
log_variable_change(func_name, "queue", list(self._queue))
|
|
145
|
+
while not self._can_acquire(entry):
|
|
146
|
+
log_branch(func_name, "wait_for_turn")
|
|
147
|
+
await self._condition.wait()
|
|
148
|
+
log_branch(func_name, "acquired_priority_slot")
|
|
149
|
+
self._available -= 1
|
|
150
|
+
heapq.heappop(self._queue)
|
|
151
|
+
log_variable_change(func_name, "self._available", self._available)
|
|
152
|
+
try:
|
|
153
|
+
log_branch(func_name, "enter_context")
|
|
154
|
+
yield
|
|
155
|
+
finally:
|
|
156
|
+
async with self._condition:
|
|
157
|
+
log_branch(func_name, "release_slot")
|
|
158
|
+
self._available += 1
|
|
159
|
+
log_variable_change(func_name, "self._available", self._available)
|
|
160
|
+
self._condition.notify_all()
|
|
161
|
+
|
|
162
|
+
return _slot()
|
|
163
|
+
|
|
164
|
+
def _can_acquire(self, entry: _PriorityEntry) -> bool:
|
|
165
|
+
"""Return True if the entry can take a slot."""
|
|
166
|
+
|
|
167
|
+
func_name = "PrioritySemaphorePolicy._can_acquire"
|
|
168
|
+
log_parameter(func_name, entry=entry)
|
|
169
|
+
if self._available <= 0:
|
|
170
|
+
log_branch(func_name, "no_available_permits")
|
|
171
|
+
return False
|
|
172
|
+
if not self._queue:
|
|
173
|
+
log_branch(func_name, "queue_empty")
|
|
174
|
+
return False
|
|
175
|
+
head = self._queue[0]
|
|
176
|
+
log_variable_change(func_name, "head", head)
|
|
177
|
+
if head is not entry:
|
|
178
|
+
log_branch(func_name, "not_head_of_queue")
|
|
179
|
+
return False
|
|
180
|
+
log_branch(func_name, "can_acquire")
|
|
181
|
+
return True
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
"""Core domain models for LoopGraph."""
|
|
2
|
+
|
|
3
|
+
from .graph import Edge, Graph, Node
|
|
4
|
+
from .state import ExecutionState, NodeRuntimeState, NodeVisit
|
|
5
|
+
from .types import EventType, NodeKind, NodeStatus, VisitOutcome
|
|
6
|
+
|
|
7
|
+
__all__ = [
|
|
8
|
+
"Edge",
|
|
9
|
+
"Graph",
|
|
10
|
+
"Node",
|
|
11
|
+
"ExecutionState",
|
|
12
|
+
"NodeRuntimeState",
|
|
13
|
+
"NodeKind",
|
|
14
|
+
"EventType",
|
|
15
|
+
"NodeStatus",
|
|
16
|
+
"NodeVisit",
|
|
17
|
+
"VisitOutcome",
|
|
18
|
+
]
|