quartermaster-engine 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. quartermaster_engine/__init__.py +75 -0
  2. quartermaster_engine/context/__init__.py +6 -0
  3. quartermaster_engine/context/execution_context.py +64 -0
  4. quartermaster_engine/context/node_execution.py +84 -0
  5. quartermaster_engine/dispatchers/__init__.py +8 -0
  6. quartermaster_engine/dispatchers/async_dispatcher.py +93 -0
  7. quartermaster_engine/dispatchers/base.py +41 -0
  8. quartermaster_engine/dispatchers/sync_dispatcher.py +31 -0
  9. quartermaster_engine/dispatchers/thread_dispatcher.py +48 -0
  10. quartermaster_engine/events.py +68 -0
  11. quartermaster_engine/example_runner.py +704 -0
  12. quartermaster_engine/memory/__init__.py +6 -0
  13. quartermaster_engine/memory/flow_memory.py +50 -0
  14. quartermaster_engine/memory/persistent_memory.py +101 -0
  15. quartermaster_engine/messaging/__init__.py +6 -0
  16. quartermaster_engine/messaging/context_manager.py +137 -0
  17. quartermaster_engine/messaging/message_router.py +171 -0
  18. quartermaster_engine/nodes.py +85 -0
  19. quartermaster_engine/runner/__init__.py +5 -0
  20. quartermaster_engine/runner/flow_runner.py +567 -0
  21. quartermaster_engine/stores/__init__.py +6 -0
  22. quartermaster_engine/stores/base.py +61 -0
  23. quartermaster_engine/stores/memory_store.py +61 -0
  24. quartermaster_engine/stores/sqlite_store.py +245 -0
  25. quartermaster_engine/traversal/__init__.py +6 -0
  26. quartermaster_engine/traversal/traverse_in.py +73 -0
  27. quartermaster_engine/traversal/traverse_out.py +96 -0
  28. quartermaster_engine/types.py +64 -0
  29. quartermaster_engine-0.0.1.dist-info/METADATA +457 -0
  30. quartermaster_engine-0.0.1.dist-info/RECORD +32 -0
  31. quartermaster_engine-0.0.1.dist-info/WHEEL +4 -0
  32. quartermaster_engine-0.0.1.dist-info/licenses/LICENSE +190 -0
@@ -0,0 +1,75 @@
1
+ """quartermaster-engine: Execution engine for AI agent graphs."""
2
+
3
+ from quartermaster_engine.context.execution_context import ExecutionContext
4
+ from quartermaster_engine.context.node_execution import NodeExecution, NodeStatus
5
+ from quartermaster_engine.events import (
6
+ FlowError,
7
+ FlowEvent,
8
+ FlowFinished,
9
+ NodeFinished,
10
+ NodeStarted,
11
+ TokenGenerated,
12
+ UserInputRequired,
13
+ )
14
+ from quartermaster_engine.memory.flow_memory import FlowMemory
15
+ from quartermaster_engine.memory.persistent_memory import InMemoryPersistence, PersistentMemory
16
+ from quartermaster_engine.messaging.context_manager import ContextManager
17
+ from quartermaster_engine.messaging.message_router import MessageRouter
18
+ from quartermaster_engine.runner.flow_runner import FlowRunner
19
+ from quartermaster_engine.stores.base import ExecutionStore
20
+ from quartermaster_engine.example_runner import run_graph
21
+ from quartermaster_engine.stores.memory_store import InMemoryStore
22
+ from quartermaster_engine.types import (
23
+ ErrorStrategy,
24
+ GraphEdge,
25
+ GraphNode,
26
+ Message,
27
+ MessageRole,
28
+ MessageType,
29
+ NodeType,
30
+ ThoughtType,
31
+ TraverseIn,
32
+ TraverseOut,
33
+ )
34
+
35
+ __all__ = [
36
+ # Context
37
+ "ExecutionContext",
38
+ "NodeExecution",
39
+ "NodeStatus",
40
+ # Types & Enums
41
+ "NodeType",
42
+ "TraverseIn",
43
+ "TraverseOut",
44
+ "ThoughtType",
45
+ "MessageType",
46
+ "ErrorStrategy",
47
+ "GraphNode",
48
+ "GraphEdge",
49
+ "Message",
50
+ "MessageRole",
51
+ # Events
52
+ "FlowEvent",
53
+ "NodeStarted",
54
+ "TokenGenerated",
55
+ "NodeFinished",
56
+ "FlowFinished",
57
+ "UserInputRequired",
58
+ "FlowError",
59
+ # Stores
60
+ "ExecutionStore",
61
+ "InMemoryStore",
62
+ # Runner
63
+ "FlowRunner",
64
+ # Memory
65
+ "FlowMemory",
66
+ "PersistentMemory",
67
+ "InMemoryPersistence",
68
+ # Messaging
69
+ "MessageRouter",
70
+ "ContextManager",
71
+ # Example runner
72
+ "run_graph",
73
+ ]
74
+
75
+ __version__ = "0.1.0"
@@ -0,0 +1,6 @@
1
+ """Execution context and node state tracking."""
2
+
3
+ from quartermaster_engine.context.execution_context import ExecutionContext
4
+ from quartermaster_engine.context.node_execution import NodeExecution, NodeStatus
5
+
6
+ __all__ = ["ExecutionContext", "NodeExecution", "NodeStatus"]
@@ -0,0 +1,64 @@
1
+ """Execution context — the runtime state passed to each node during flow execution."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Callable
6
+ from dataclasses import dataclass, field
7
+ from typing import Any
8
+ from uuid import UUID
9
+
10
+ from quartermaster_engine.context.node_execution import NodeStatus
11
+ from quartermaster_engine.types import AgentGraph, GraphNode, Message
12
+
13
+
14
+ @dataclass
15
+ class ExecutionContext:
16
+ """Runtime context for node execution.
17
+
18
+ Carries the full state needed by a node to execute: the graph definition,
19
+ current node reference, conversation history, flow-scoped memory, and
20
+ callbacks for streaming and status updates.
21
+ """
22
+
23
+ flow_id: UUID
24
+ node_id: UUID
25
+ graph: AgentGraph
26
+ current_node: GraphNode
27
+ messages: list[Message] = field(default_factory=list)
28
+ memory: dict[str, Any] = field(default_factory=dict)
29
+ metadata: dict[str, Any] = field(default_factory=dict)
30
+
31
+ # Execution state
32
+ status: NodeStatus = NodeStatus.PENDING
33
+ parent_context: ExecutionContext | None = None
34
+
35
+ # Callbacks for real-time streaming
36
+ on_message: Callable[[str], None] | None = None
37
+ on_status_change: Callable[[NodeStatus], None] | None = None
38
+ on_token: Callable[[str], None] | None = None
39
+
40
+ def get_meta(self, key: str, default: Any = None) -> Any:
41
+ """Get a value from the node's metadata, falling back to graph metadata."""
42
+ if key in self.current_node.metadata:
43
+ return self.current_node.metadata[key]
44
+ return self.metadata.get(key, default)
45
+
46
+ def set_meta(self, key: str, value: Any) -> None:
47
+ """Set a metadata value on this context."""
48
+ self.metadata[key] = value
49
+
50
+ def emit_token(self, token: str) -> None:
51
+ """Emit a streaming token if a callback is registered."""
52
+ if self.on_token:
53
+ self.on_token(token)
54
+
55
+ def emit_message(self, content: str) -> None:
56
+ """Emit a complete message if a callback is registered."""
57
+ if self.on_message:
58
+ self.on_message(content)
59
+
60
+ def update_status(self, status: NodeStatus) -> None:
61
+ """Update this context's status and fire the callback."""
62
+ self.status = status
63
+ if self.on_status_change:
64
+ self.on_status_change(status)
@@ -0,0 +1,84 @@
1
+ """Node execution state tracking."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from datetime import UTC, datetime
7
+ from enum import Enum
8
+ from typing import Any
9
+ from uuid import UUID
10
+
11
+
12
+ class NodeStatus(str, Enum):
13
+ """Lifecycle status of a node during flow execution."""
14
+
15
+ PENDING = "pending"
16
+ RUNNING = "running"
17
+ WAITING_USER = "waiting_user"
18
+ WAITING_TOOL = "waiting_tool"
19
+ FINISHED = "finished"
20
+ FAILED = "failed"
21
+ SKIPPED = "skipped"
22
+
23
+ @property
24
+ def is_terminal(self) -> bool:
25
+ """Whether this status represents a completed state."""
26
+ return self in (NodeStatus.FINISHED, NodeStatus.FAILED, NodeStatus.SKIPPED)
27
+
28
+ @property
29
+ def is_active(self) -> bool:
30
+ """Whether this status represents an active (non-terminal) state."""
31
+ return not self.is_terminal and self != NodeStatus.PENDING
32
+
33
+
34
+ @dataclass
35
+ class NodeExecution:
36
+ """Tracks the execution state of a single node within a flow."""
37
+
38
+ node_id: UUID
39
+ status: NodeStatus = NodeStatus.PENDING
40
+ started_at: datetime | None = None
41
+ finished_at: datetime | None = None
42
+ result: str | None = None
43
+ error: str | None = None
44
+ retry_count: int = 0
45
+ output_data: dict[str, Any] = field(default_factory=dict)
46
+
47
+ def start(self) -> None:
48
+ """Mark this node as running."""
49
+ self.status = NodeStatus.RUNNING
50
+ self.started_at = datetime.now(UTC)
51
+
52
+ def finish(self, result: str | None = None, output_data: dict[str, Any] | None = None) -> None:
53
+ """Mark this node as successfully finished."""
54
+ self.status = NodeStatus.FINISHED
55
+ self.finished_at = datetime.now(UTC)
56
+ self.result = result
57
+ if output_data:
58
+ self.output_data.update(output_data)
59
+
60
+ def fail(self, error: str) -> None:
61
+ """Mark this node as failed."""
62
+ self.status = NodeStatus.FAILED
63
+ self.finished_at = datetime.now(UTC)
64
+ self.error = error
65
+
66
+ def skip(self) -> None:
67
+ """Mark this node as skipped."""
68
+ self.status = NodeStatus.SKIPPED
69
+ self.finished_at = datetime.now(UTC)
70
+
71
+ def wait_for_user(self) -> None:
72
+ """Mark this node as waiting for user input."""
73
+ self.status = NodeStatus.WAITING_USER
74
+
75
+ def wait_for_tool(self) -> None:
76
+ """Mark this node as waiting for tool execution."""
77
+ self.status = NodeStatus.WAITING_TOOL
78
+
79
+ @property
80
+ def duration_seconds(self) -> float | None:
81
+ """Execution duration in seconds, or None if not yet finished."""
82
+ if self.started_at and self.finished_at:
83
+ return (self.finished_at - self.started_at).total_seconds()
84
+ return None
@@ -0,0 +1,8 @@
1
+ """Task dispatchers — pluggable strategies for executing successor nodes."""
2
+
3
+ from quartermaster_engine.dispatchers.async_dispatcher import AsyncDispatcher
4
+ from quartermaster_engine.dispatchers.base import TaskDispatcher
5
+ from quartermaster_engine.dispatchers.sync_dispatcher import SyncDispatcher
6
+ from quartermaster_engine.dispatchers.thread_dispatcher import ThreadDispatcher
7
+
8
+ __all__ = ["AsyncDispatcher", "TaskDispatcher", "SyncDispatcher", "ThreadDispatcher"]
@@ -0,0 +1,93 @@
1
+ """Async dispatcher — executes nodes in parallel using asyncio.create_task.
2
+
3
+ Provides concurrent execution for branches using asyncio tasks. Ideal for
4
+ I/O-bound node execution in async web applications (FastAPI, aiohttp, etc.).
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ from collections.abc import Callable
11
+ from uuid import UUID
12
+
13
+
14
+ class AsyncDispatcher:
15
+ """Execute nodes concurrently using asyncio tasks.
16
+
17
+ Parallel branches are dispatched as asyncio tasks via ``asyncio.create_task``.
18
+ ``wait_all()`` awaits all pending tasks. Because the underlying
19
+ ``execute_fn`` is synchronous (it comes from FlowRunner), each call is
20
+ wrapped in ``loop.run_in_executor`` so the event loop is never blocked.
21
+ """
22
+
23
+ def __init__(self) -> None:
24
+ self._tasks: list[asyncio.Task[None]] = []
25
+ self._loop: asyncio.AbstractEventLoop | None = None
26
+
27
+ def _get_loop(self) -> asyncio.AbstractEventLoop:
28
+ """Return the running event loop, or create a new one if needed."""
29
+ if self._loop is None or self._loop.is_closed():
30
+ try:
31
+ self._loop = asyncio.get_running_loop()
32
+ except RuntimeError:
33
+ self._loop = asyncio.new_event_loop()
34
+ asyncio.set_event_loop(self._loop)
35
+ return self._loop
36
+
37
+ def dispatch(
38
+ self,
39
+ flow_id: UUID,
40
+ node_id: UUID,
41
+ execute_fn: Callable[[UUID, UUID], None],
42
+ ) -> None:
43
+ """Dispatch a node for execution as an asyncio task.
44
+
45
+ The synchronous *execute_fn* is scheduled on the event loop's default
46
+ executor so it does not block the loop.
47
+
48
+ Args:
49
+ flow_id: The flow this node belongs to.
50
+ node_id: The node to execute.
51
+ execute_fn: A callable that executes the node — signature (flow_id, node_id).
52
+ """
53
+ loop = self._get_loop()
54
+
55
+ async def _run() -> None:
56
+ await loop.run_in_executor(None, execute_fn, flow_id, node_id)
57
+
58
+ task = asyncio.ensure_future(_run(), loop=loop)
59
+ self._tasks.append(task)
60
+
61
+ def wait_all(self) -> None:
62
+ """Block until all dispatched tasks have completed.
63
+
64
+ Gathers all pending asyncio tasks and re-raises any exceptions
65
+ that occurred during branch execution.
66
+ """
67
+ if not self._tasks:
68
+ return
69
+
70
+ loop = self._get_loop()
71
+
72
+ async def _gather() -> None:
73
+ results = await asyncio.gather(*self._tasks, return_exceptions=True)
74
+ exceptions = [r for r in results if isinstance(r, Exception)]
75
+ self._tasks.clear()
76
+ if exceptions:
77
+ raise ExceptionGroup("Errors in async branches", exceptions)
78
+
79
+ if loop.is_running():
80
+ # We are inside an async context — schedule and block via a helper
81
+ import concurrent.futures
82
+
83
+ with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
84
+ future = pool.submit(asyncio.run, _gather())
85
+ future.result()
86
+ else:
87
+ loop.run_until_complete(_gather())
88
+
89
+ def shutdown(self) -> None:
90
+ """Cancel any remaining tasks and clean up resources."""
91
+ for task in self._tasks:
92
+ task.cancel()
93
+ self._tasks.clear()
@@ -0,0 +1,41 @@
1
+ """TaskDispatcher protocol — pluggable execution strategy for successor nodes."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Callable
6
+ from typing import Protocol
7
+ from uuid import UUID
8
+
9
+
10
+ class TaskDispatcher(Protocol):
11
+ """Protocol for dispatching node execution tasks.
12
+
13
+ Implementations control how successor nodes are executed:
14
+ synchronously (in-process), via threads, asyncio, or external task queues.
15
+ """
16
+
17
+ def dispatch(
18
+ self,
19
+ flow_id: UUID,
20
+ node_id: UUID,
21
+ execute_fn: Callable[[UUID, UUID], None],
22
+ ) -> None:
23
+ """Dispatch a node for execution.
24
+
25
+ Args:
26
+ flow_id: The flow this node belongs to.
27
+ node_id: The node to execute.
28
+ execute_fn: A callable that executes the node — signature (flow_id, node_id).
29
+ """
30
+ ...
31
+
32
+ def wait_all(self) -> None:
33
+ """Block until all dispatched tasks have completed.
34
+
35
+ Called at synchronization points (e.g., merge nodes) and at flow end.
36
+ """
37
+ ...
38
+
39
+ def shutdown(self) -> None:
40
+ """Clean up resources. Called when the flow runner is done."""
41
+ ...
@@ -0,0 +1,31 @@
1
+ """Synchronous dispatcher — executes nodes immediately in the current thread.
2
+
3
+ Simple, predictable, and great for testing. No true parallelism.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from collections.abc import Callable
9
+ from uuid import UUID
10
+
11
+
12
+ class SyncDispatcher:
13
+ """Execute nodes synchronously in the calling thread.
14
+
15
+ This is the simplest dispatcher. Successor nodes are executed one at a time
16
+ in the order they appear. No parallelism, but no concurrency issues either.
17
+ """
18
+
19
+ def dispatch(
20
+ self,
21
+ flow_id: UUID,
22
+ node_id: UUID,
23
+ execute_fn: Callable[[UUID, UUID], None],
24
+ ) -> None:
25
+ execute_fn(flow_id, node_id)
26
+
27
+ def wait_all(self) -> None:
28
+ pass # Nothing to wait for — everything is already done
29
+
30
+ def shutdown(self) -> None:
31
+ pass # No resources to clean up
@@ -0,0 +1,48 @@
1
+ """Thread-based dispatcher — executes nodes in parallel using a thread pool.
2
+
3
+ Provides true parallelism for branches. Suitable for I/O-bound node execution
4
+ (LLM API calls, tool invocations) which is the common case for agent graphs.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from collections.abc import Callable
10
+ from concurrent.futures import Future, ThreadPoolExecutor
11
+ from uuid import UUID
12
+
13
+
14
+ class ThreadDispatcher:
15
+ """Execute nodes in parallel using a thread pool.
16
+
17
+ Parallel branches are submitted to a ThreadPoolExecutor.
18
+ `wait_all()` blocks until all pending tasks complete.
19
+ """
20
+
21
+ def __init__(self, max_workers: int = 4) -> None:
22
+ self._pool = ThreadPoolExecutor(max_workers=max_workers)
23
+ self._futures: list[Future[None]] = []
24
+
25
+ def dispatch(
26
+ self,
27
+ flow_id: UUID,
28
+ node_id: UUID,
29
+ execute_fn: Callable[[UUID, UUID], None],
30
+ ) -> None:
31
+ future = self._pool.submit(execute_fn, flow_id, node_id)
32
+ self._futures.append(future)
33
+
34
+ def wait_all(self) -> None:
35
+ """Block until all dispatched tasks complete, then collect exceptions."""
36
+ exceptions: list[Exception] = []
37
+ for future in self._futures:
38
+ try:
39
+ future.result()
40
+ except Exception as e:
41
+ exceptions.append(e)
42
+ self._futures.clear()
43
+ if exceptions:
44
+ raise ExceptionGroup("Errors in parallel branches", exceptions)
45
+
46
+ def shutdown(self) -> None:
47
+ self._pool.shutdown(wait=True)
48
+ self._futures.clear()
@@ -0,0 +1,68 @@
1
+ """Flow execution events for real-time streaming."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import Any
7
+ from uuid import UUID
8
+
9
+ from quartermaster_engine.types import NodeType
10
+
11
+
12
+ @dataclass
13
+ class FlowEvent:
14
+ """Base class for all flow execution events."""
15
+
16
+ flow_id: UUID
17
+
18
+
19
+ @dataclass
20
+ class NodeStarted(FlowEvent):
21
+ """Emitted when a node begins execution."""
22
+
23
+ node_id: UUID = field(default_factory=lambda: UUID(int=0))
24
+ node_type: NodeType = NodeType.START
25
+ node_name: str = ""
26
+
27
+
28
+ @dataclass
29
+ class TokenGenerated(FlowEvent):
30
+ """Emitted for each streaming token from an LLM node."""
31
+
32
+ node_id: UUID = field(default_factory=lambda: UUID(int=0))
33
+ token: str = ""
34
+
35
+
36
+ @dataclass
37
+ class NodeFinished(FlowEvent):
38
+ """Emitted when a node completes execution."""
39
+
40
+ node_id: UUID = field(default_factory=lambda: UUID(int=0))
41
+ result: str = ""
42
+ output_data: dict[str, Any] = field(default_factory=dict)
43
+
44
+
45
+ @dataclass
46
+ class FlowFinished(FlowEvent):
47
+ """Emitted when the entire flow completes."""
48
+
49
+ final_output: str = ""
50
+ output_data: dict[str, Any] = field(default_factory=dict)
51
+
52
+
53
+ @dataclass
54
+ class UserInputRequired(FlowEvent):
55
+ """Emitted when a node is waiting for user input."""
56
+
57
+ node_id: UUID = field(default_factory=lambda: UUID(int=0))
58
+ prompt: str = ""
59
+ options: list[str] = field(default_factory=list)
60
+
61
+
62
+ @dataclass
63
+ class FlowError(FlowEvent):
64
+ """Emitted when a node fails."""
65
+
66
+ node_id: UUID = field(default_factory=lambda: UUID(int=0))
67
+ error: str = ""
68
+ recoverable: bool = False