agent-runtime-core 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,168 @@
1
+ """
2
+ SQLite event bus implementation.
3
+
4
+ Good for:
5
+ - Local development with persistence
6
+ - Single-process deployments
7
+ - Testing with real database
8
+ """
9
+
10
+ import asyncio
11
+ import json
12
+ import sqlite3
13
+ from contextlib import contextmanager
14
+ from datetime import datetime, timezone
15
+ from typing import AsyncIterator, Optional
16
+ from uuid import UUID
17
+
18
+ from agent_runtime.events.base import EventBus, Event
19
+
20
+
21
+ class SQLiteEventBus(EventBus):
22
+ """
23
+ SQLite-backed event bus implementation.
24
+
25
+ Stores events in a local SQLite database.
26
+ Uses polling for subscriptions (no real-time push).
27
+ """
28
+
29
+ def __init__(self, path: str = "agent_runtime.db"):
30
+ self.path = path
31
+ self._initialized = False
32
+
33
+ @contextmanager
34
+ def _get_connection(self):
35
+ """Get a database connection."""
36
+ conn = sqlite3.connect(self.path)
37
+ conn.row_factory = sqlite3.Row
38
+ try:
39
+ yield conn
40
+ finally:
41
+ conn.close()
42
+
43
+ def _ensure_initialized(self):
44
+ """Ensure the database schema exists."""
45
+ if self._initialized:
46
+ return
47
+
48
+ with self._get_connection() as conn:
49
+ conn.execute("""
50
+ CREATE TABLE IF NOT EXISTS events (
51
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
52
+ run_id TEXT NOT NULL,
53
+ seq INTEGER NOT NULL,
54
+ event_type TEXT NOT NULL,
55
+ payload TEXT NOT NULL,
56
+ timestamp TEXT NOT NULL,
57
+ UNIQUE(run_id, seq)
58
+ )
59
+ """)
60
+ conn.execute("""
61
+ CREATE INDEX IF NOT EXISTS idx_events_run_id
62
+ ON events(run_id)
63
+ """)
64
+ conn.execute("""
65
+ CREATE INDEX IF NOT EXISTS idx_events_run_seq
66
+ ON events(run_id, seq)
67
+ """)
68
+ conn.commit()
69
+
70
+ self._initialized = True
71
+
72
+ async def publish(self, event: Event) -> None:
73
+ """Publish an event."""
74
+ self._ensure_initialized()
75
+
76
+ with self._get_connection() as conn:
77
+ conn.execute(
78
+ """
79
+ INSERT OR REPLACE INTO events (run_id, seq, event_type, payload, timestamp)
80
+ VALUES (?, ?, ?, ?, ?)
81
+ """,
82
+ (
83
+ str(event.run_id),
84
+ event.seq,
85
+ event.event_type,
86
+ json.dumps(event.payload),
87
+ event.timestamp.isoformat(),
88
+ ),
89
+ )
90
+ conn.commit()
91
+
92
+ async def subscribe(
93
+ self,
94
+ run_id: UUID,
95
+ from_seq: int = 0,
96
+ check_complete: Optional[callable] = None,
97
+ poll_interval: float = 0.5,
98
+ ) -> AsyncIterator[Event]:
99
+ """
100
+ Subscribe to events for a run.
101
+
102
+ Uses polling since SQLite doesn't support real-time notifications.
103
+ """
104
+ current_seq = from_seq
105
+
106
+ while True:
107
+ # Get new events
108
+ events = await self.get_events(run_id, from_seq=current_seq)
109
+
110
+ for event in events:
111
+ yield event
112
+ current_seq = event.seq + 1
113
+
114
+ # Check if run is complete
115
+ if check_complete and await check_complete():
116
+ break
117
+
118
+ # Poll interval
119
+ await asyncio.sleep(poll_interval)
120
+
121
+ async def get_events(
122
+ self,
123
+ run_id: UUID,
124
+ from_seq: int = 0,
125
+ to_seq: Optional[int] = None,
126
+ ) -> list[Event]:
127
+ """Get historical events for a run."""
128
+ self._ensure_initialized()
129
+
130
+ with self._get_connection() as conn:
131
+ query = """
132
+ SELECT run_id, seq, event_type, payload, timestamp
133
+ FROM events
134
+ WHERE run_id = ? AND seq >= ?
135
+ """
136
+ params = [str(run_id), from_seq]
137
+
138
+ if to_seq is not None:
139
+ query += " AND seq <= ?"
140
+ params.append(to_seq)
141
+
142
+ query += " ORDER BY seq ASC"
143
+
144
+ rows = conn.execute(query, params).fetchall()
145
+
146
+ return [
147
+ Event(
148
+ run_id=UUID(row["run_id"]),
149
+ seq=row["seq"],
150
+ event_type=row["event_type"],
151
+ payload=json.loads(row["payload"]),
152
+ timestamp=datetime.fromisoformat(row["timestamp"]),
153
+ )
154
+ for row in rows
155
+ ]
156
+
157
+ async def get_next_seq(self, run_id: UUID) -> int:
158
+ """Get the next sequence number for a run."""
159
+ self._ensure_initialized()
160
+
161
+ with self._get_connection() as conn:
162
+ row = conn.execute(
163
+ "SELECT MAX(seq) as max_seq FROM events WHERE run_id = ?",
164
+ (str(run_id),),
165
+ ).fetchone()
166
+
167
+ max_seq = row["max_seq"]
168
+ return (max_seq + 1) if max_seq is not None else 0
@@ -0,0 +1,390 @@
1
+ """
2
+ Core interfaces for the agent runtime.
3
+
4
+ These interfaces are the stable public API. Everything else can change.
5
+ Agent frameworks (LangGraph, CrewAI, custom) adapt to these interfaces.
6
+
7
+ SEMVER PROTECTED - Breaking changes require major version bump.
8
+ """
9
+
10
+ from abc import ABC, abstractmethod
11
+ from dataclasses import dataclass, field
12
+ from datetime import datetime
13
+ from enum import Enum
14
+ from typing import Any, Callable, Optional, Protocol, TypedDict, AsyncIterator
15
+ from uuid import UUID
16
+
17
+
18
+ class EventType(str, Enum):
19
+ """
20
+ Standard event types emitted by agent runtimes.
21
+
22
+ All agent frameworks must emit through these types.
23
+ """
24
+
25
+ # Lifecycle events
26
+ RUN_STARTED = "run.started"
27
+ RUN_HEARTBEAT = "run.heartbeat"
28
+ RUN_SUCCEEDED = "run.succeeded"
29
+ RUN_FAILED = "run.failed"
30
+ RUN_CANCELLED = "run.cancelled"
31
+ RUN_TIMED_OUT = "run.timed_out"
32
+
33
+ # Message events
34
+ ASSISTANT_DELTA = "assistant.delta" # Token streaming (optional)
35
+ ASSISTANT_MESSAGE = "assistant.message" # Complete message
36
+
37
+ # Tool events
38
+ TOOL_CALL = "tool.call"
39
+ TOOL_RESULT = "tool.result"
40
+
41
+ # State events
42
+ STATE_CHECKPOINT = "state.checkpoint"
43
+
44
+
45
+ class Message(TypedDict, total=False):
46
+ """
47
+ Framework-neutral message format.
48
+
49
+ Compatible with OpenAI, Anthropic, and other providers.
50
+ """
51
+
52
+ role: str # "system" | "user" | "assistant" | "tool"
53
+ content: str | dict | list # String or structured content
54
+ name: Optional[str] # For tool messages
55
+ tool_call_id: Optional[str] # For tool results
56
+ tool_calls: Optional[list] # For assistant tool calls
57
+ metadata: dict # Additional metadata
58
+
59
+
60
+ @dataclass
61
+ class RunResult:
62
+ """
63
+ Result returned by an agent runtime after execution.
64
+
65
+ This is what the runner receives when an agent completes.
66
+ """
67
+
68
+ final_output: dict = field(default_factory=dict)
69
+ final_messages: list[Message] = field(default_factory=list)
70
+ usage: dict = field(default_factory=dict) # Token usage, costs, etc.
71
+ artifacts: dict = field(default_factory=dict) # Files, images, etc.
72
+
73
+
74
+ @dataclass
75
+ class ErrorInfo:
76
+ """Structured error information for failed runs."""
77
+
78
+ type: str # Error class name
79
+ message: str
80
+ stack: str = ""
81
+ retriable: bool = True
82
+ details: dict = field(default_factory=dict)
83
+
84
+
85
+ class RunContext(Protocol):
86
+ """
87
+ Context provided to agent runtimes during execution.
88
+
89
+ This is what agent frameworks use to interact with the runtime.
90
+ Implementations are provided by the runner.
91
+ """
92
+
93
+ @property
94
+ def run_id(self) -> UUID:
95
+ """Unique identifier for this run."""
96
+ ...
97
+
98
+ @property
99
+ def conversation_id(self) -> Optional[UUID]:
100
+ """Conversation this run belongs to (if any)."""
101
+ ...
102
+
103
+ @property
104
+ def input_messages(self) -> list[Message]:
105
+ """Input messages for this run."""
106
+ ...
107
+
108
+ @property
109
+ def params(self) -> dict:
110
+ """Additional parameters for this run."""
111
+ ...
112
+
113
+ @property
114
+ def metadata(self) -> dict:
115
+ """Metadata associated with this run (e.g., channel_id, user context)."""
116
+ ...
117
+
118
+ @property
119
+ def tool_registry(self) -> "ToolRegistry":
120
+ """Registry of available tools for this agent."""
121
+ ...
122
+
123
+ async def emit(self, event_type: EventType | str, payload: dict) -> None:
124
+ """
125
+ Emit an event to the event bus.
126
+
127
+ Args:
128
+ event_type: Type of event (use EventType enum)
129
+ payload: Event payload data
130
+ """
131
+ ...
132
+
133
+ async def checkpoint(self, state: dict) -> None:
134
+ """
135
+ Save a state checkpoint for recovery.
136
+
137
+ Args:
138
+ state: Serializable state to checkpoint
139
+ """
140
+ ...
141
+
142
+ async def get_state(self) -> Optional[dict]:
143
+ """
144
+ Get the last checkpointed state.
145
+
146
+ Returns:
147
+ The last saved state, or None if no checkpoint exists.
148
+ """
149
+ ...
150
+
151
+ def cancelled(self) -> bool:
152
+ """
153
+ Check if cancellation has been requested.
154
+
155
+ Agent runtimes should check this between steps.
156
+ """
157
+ ...
158
+
159
+
160
+ class AgentRuntime(ABC):
161
+ """
162
+ Base class for agent runtime implementations.
163
+
164
+ Subclass this to create custom agent runtimes.
165
+ Each runtime is identified by a unique key.
166
+ """
167
+
168
+ @property
169
+ @abstractmethod
170
+ def key(self) -> str:
171
+ """
172
+ Unique identifier for this runtime.
173
+
174
+ Used to route runs to the correct runtime.
175
+ """
176
+ ...
177
+
178
+ @abstractmethod
179
+ async def run(self, ctx: RunContext) -> RunResult:
180
+ """
181
+ Execute an agent run.
182
+
183
+ Args:
184
+ ctx: Runtime context with input, tools, and event emission
185
+
186
+ Returns:
187
+ RunResult with final output and messages
188
+
189
+ Raises:
190
+ Exception: On unrecoverable errors (will be caught by runner)
191
+ """
192
+ ...
193
+
194
+ async def cancel(self, ctx: RunContext) -> None:
195
+ """
196
+ Handle cancellation request.
197
+
198
+ Override for custom cleanup. Default does nothing.
199
+ Called when cancellation is requested but run is still active.
200
+ """
201
+ pass
202
+
203
+ async def on_error(self, ctx: RunContext, error: Exception) -> Optional[ErrorInfo]:
204
+ """
205
+ Handle an error during execution.
206
+
207
+ Override to customize error handling/classification.
208
+ Return ErrorInfo to control retry behavior.
209
+ """
210
+ return ErrorInfo(
211
+ type=type(error).__name__,
212
+ message=str(error),
213
+ retriable=True,
214
+ )
215
+
216
+
217
+ @dataclass
218
+ class Tool:
219
+ """Definition of a tool available to agents."""
220
+
221
+ name: str
222
+ description: str
223
+ parameters: dict # JSON Schema for parameters
224
+ handler: Callable # async def handler(**kwargs) -> Any
225
+ has_side_effects: bool = False
226
+ requires_confirmation: bool = False
227
+ metadata: dict = field(default_factory=dict)
228
+
229
+
230
+ # Alias for backwards compatibility
231
+ ToolDefinition = Tool
232
+
233
+
234
+ class ToolRegistry:
235
+ """
236
+ Registry of tools available to a specific agent.
237
+
238
+ Tools are allow-listed per agent_key for security.
239
+ """
240
+
241
+ def __init__(self):
242
+ self._tools: dict[str, Tool] = {}
243
+
244
+ def register(self, tool: Tool) -> None:
245
+ """Register a tool."""
246
+ self._tools[tool.name] = tool
247
+
248
+ def get(self, name: str) -> Optional[Tool]:
249
+ """Get a tool by name."""
250
+ return self._tools.get(name)
251
+
252
+ def list_tools(self) -> list[Tool]:
253
+ """List all registered tools."""
254
+ return list(self._tools.values())
255
+
256
+ def to_openai_format(self) -> list[dict]:
257
+ """Convert tools to OpenAI function calling format."""
258
+ return [
259
+ {
260
+ "type": "function",
261
+ "function": {
262
+ "name": tool.name,
263
+ "description": tool.description,
264
+ "parameters": tool.parameters,
265
+ },
266
+ }
267
+ for tool in self._tools.values()
268
+ ]
269
+
270
+ async def execute(self, name: str, arguments: dict) -> Any:
271
+ """
272
+ Execute a tool by name.
273
+
274
+ Args:
275
+ name: Tool name
276
+ arguments: Tool arguments
277
+
278
+ Returns:
279
+ Tool result
280
+
281
+ Raises:
282
+ KeyError: If tool not found
283
+ """
284
+ tool = self._tools.get(name)
285
+ if not tool:
286
+ raise KeyError(f"Tool not found: {name}")
287
+ return await tool.handler(**arguments)
288
+
289
+
290
+ class LLMClient(ABC):
291
+ """
292
+ Abstract LLM client interface.
293
+
294
+ Implementations: OpenAIClient, AnthropicClient, LiteLLMClient, etc.
295
+ This abstraction is what makes the runtime model-agnostic.
296
+ """
297
+
298
+ @abstractmethod
299
+ async def generate(
300
+ self,
301
+ messages: list[Message],
302
+ *,
303
+ model: Optional[str] = None,
304
+ stream: bool = False,
305
+ tools: Optional[list[dict]] = None,
306
+ temperature: Optional[float] = None,
307
+ max_tokens: Optional[int] = None,
308
+ **kwargs,
309
+ ) -> "LLMResponse":
310
+ """
311
+ Generate a completion from the LLM.
312
+
313
+ Args:
314
+ messages: Conversation messages
315
+ model: Model identifier (uses default if not specified)
316
+ stream: Whether to stream the response
317
+ tools: Tool definitions in OpenAI format
318
+ temperature: Sampling temperature
319
+ max_tokens: Maximum tokens to generate
320
+ **kwargs: Provider-specific parameters
321
+
322
+ Returns:
323
+ LLMResponse with message and usage info
324
+ """
325
+ ...
326
+
327
+ @abstractmethod
328
+ async def stream(
329
+ self,
330
+ messages: list[Message],
331
+ *,
332
+ model: Optional[str] = None,
333
+ tools: Optional[list[dict]] = None,
334
+ **kwargs,
335
+ ) -> AsyncIterator["LLMStreamChunk"]:
336
+ """
337
+ Stream a completion from the LLM.
338
+
339
+ Yields:
340
+ LLMStreamChunk objects with deltas
341
+ """
342
+ ...
343
+
344
+
345
+ @dataclass
346
+ class LLMResponse:
347
+ """Response from an LLM generation."""
348
+
349
+ message: Message
350
+ usage: dict = field(default_factory=dict) # prompt_tokens, completion_tokens, etc.
351
+ model: str = ""
352
+ finish_reason: str = ""
353
+ raw_response: Optional[Any] = None
354
+
355
+
356
+ @dataclass
357
+ class LLMStreamChunk:
358
+ """A chunk from a streaming LLM response."""
359
+
360
+ delta: str = ""
361
+ tool_calls: Optional[list] = None
362
+ finish_reason: Optional[str] = None
363
+ usage: Optional[dict] = None
364
+
365
+
366
+ class TraceSink(ABC):
367
+ """
368
+ Abstract trace sink for observability.
369
+
370
+ Implementations: NoopTraceSink, LangfuseTraceSink, OpenTelemetrySink, etc.
371
+ """
372
+
373
+ @abstractmethod
374
+ def start_run(self, run_id: UUID, metadata: dict) -> None:
375
+ """Start tracing a run."""
376
+ ...
377
+
378
+ @abstractmethod
379
+ def log_event(self, run_id: UUID, event_type: str, payload: dict) -> None:
380
+ """Log an event within a run."""
381
+ ...
382
+
383
+ @abstractmethod
384
+ def end_run(self, run_id: UUID, outcome: str, metadata: Optional[dict] = None) -> None:
385
+ """End tracing a run."""
386
+ ...
387
+
388
+ def flush(self) -> None:
389
+ """Flush any buffered traces. Default is no-op."""
390
+ pass
@@ -0,0 +1,83 @@
1
+ """
2
+ LLM client implementations.
3
+
4
+ Provides:
5
+ - LLMClient: Abstract interface (from interfaces.py)
6
+ - OpenAIClient: OpenAI API client
7
+ - AnthropicClient: Anthropic API client
8
+ - LiteLLMClient: LiteLLM adapter (optional)
9
+ """
10
+
11
+ from agent_runtime.interfaces import LLMClient, LLMResponse, LLMStreamChunk
12
+
13
+ __all__ = [
14
+ "LLMClient",
15
+ "LLMResponse",
16
+ "LLMStreamChunk",
17
+ "get_llm_client",
18
+ "OpenAIConfigurationError",
19
+ "AnthropicConfigurationError",
20
+ ]
21
+
22
+
23
+ class OpenAIConfigurationError(Exception):
24
+ """Raised when OpenAI API key is not configured."""
25
+ pass
26
+
27
+
28
+ class AnthropicConfigurationError(Exception):
29
+ """Raised when Anthropic API key is not configured."""
30
+ pass
31
+
32
+
33
+ def get_llm_client(provider: str = None, **kwargs) -> LLMClient:
34
+ """
35
+ Factory function to get an LLM client.
36
+
37
+ Args:
38
+ provider: "openai", "anthropic", "litellm", etc.
39
+ **kwargs: Provider-specific configuration (e.g., api_key, default_model)
40
+
41
+ Returns:
42
+ LLMClient instance
43
+
44
+ Raises:
45
+ OpenAIConfigurationError: If OpenAI is selected but API key is not configured
46
+ AnthropicConfigurationError: If Anthropic is selected but API key is not configured
47
+ ValueError: If an unknown provider is specified
48
+
49
+ Example:
50
+ # Using config (recommended)
51
+ from agent_runtime.config import configure
52
+ configure(model_provider="openai", openai_api_key="sk-...")
53
+ llm = get_llm_client()
54
+
55
+ # Or with explicit API key
56
+ llm = get_llm_client(api_key='sk-...')
57
+
58
+ # Or with a different provider
59
+ llm = get_llm_client(provider='anthropic', api_key='sk-ant-...')
60
+ """
61
+ from agent_runtime.config import get_config
62
+
63
+ config = get_config()
64
+ provider = provider or config.model_provider
65
+
66
+ if provider == "openai":
67
+ from agent_runtime.llm.openai import OpenAIClient
68
+ return OpenAIClient(**kwargs)
69
+
70
+ elif provider == "anthropic":
71
+ from agent_runtime.llm.anthropic import AnthropicClient
72
+ return AnthropicClient(**kwargs)
73
+
74
+ elif provider == "litellm":
75
+ from agent_runtime.llm.litellm_client import LiteLLMClient
76
+ return LiteLLMClient(**kwargs)
77
+
78
+ else:
79
+ raise ValueError(
80
+ f"Unknown LLM provider: {provider}\n\n"
81
+ f"Supported providers: 'openai', 'anthropic', 'litellm'\n"
82
+ f"Set model_provider in your configuration."
83
+ )