agent-runtime-core 0.3.0__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. agent_runtime_core-0.5.0/PKG-INFO +863 -0
  2. agent_runtime_core-0.5.0/README.md +820 -0
  3. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/__init__.py +19 -1
  4. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/interfaces.py +8 -0
  5. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/persistence/__init__.py +44 -8
  6. agent_runtime_core-0.5.0/agent_runtime_core/persistence/base.py +737 -0
  7. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/persistence/manager.py +120 -12
  8. agent_runtime_core-0.5.0/agent_runtime_core/steps.py +373 -0
  9. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/pyproject.toml +1 -1
  10. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/tests/test_persistence.py +186 -0
  11. agent_runtime_core-0.5.0/tests/test_steps.py +365 -0
  12. agent_runtime_core-0.3.0/PKG-INFO +0 -461
  13. agent_runtime_core-0.3.0/README.md +0 -418
  14. agent_runtime_core-0.3.0/agent_runtime_core/persistence/base.py +0 -332
  15. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/.gitignore +0 -0
  16. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/LICENSE +0 -0
  17. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/config.py +0 -0
  18. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/events/__init__.py +0 -0
  19. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/events/base.py +0 -0
  20. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/events/memory.py +0 -0
  21. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/events/redis.py +0 -0
  22. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/events/sqlite.py +0 -0
  23. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/llm/__init__.py +0 -0
  24. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/llm/anthropic.py +0 -0
  25. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/llm/litellm_client.py +0 -0
  26. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/llm/openai.py +0 -0
  27. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/persistence/file.py +0 -0
  28. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/queue/__init__.py +0 -0
  29. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/queue/base.py +0 -0
  30. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/queue/memory.py +0 -0
  31. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/queue/redis.py +0 -0
  32. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/queue/sqlite.py +0 -0
  33. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/registry.py +0 -0
  34. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/runner.py +0 -0
  35. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/state/__init__.py +0 -0
  36. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/state/base.py +0 -0
  37. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/state/memory.py +0 -0
  38. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/state/redis.py +0 -0
  39. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/state/sqlite.py +0 -0
  40. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/testing.py +0 -0
  41. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/tracing/__init__.py +0 -0
  42. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/tracing/langfuse.py +0 -0
  43. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/agent_runtime_core/tracing/noop.py +0 -0
  44. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/tests/__init__.py +0 -0
  45. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/tests/test_events.py +0 -0
  46. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/tests/test_imports.py +0 -0
  47. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/tests/test_queue.py +0 -0
  48. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/tests/test_state.py +0 -0
  49. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/tests/test_testing.py +0 -0
  50. {agent_runtime_core-0.3.0 → agent_runtime_core-0.5.0}/uv.lock +0 -0
@@ -0,0 +1,863 @@
1
+ Metadata-Version: 2.4
2
+ Name: agent-runtime-core
3
+ Version: 0.5.0
4
+ Summary: Framework-agnostic Python library for executing AI agents with consistent patterns
5
+ Project-URL: Homepage, https://github.com/colstrom/agent_runtime_core
6
+ Project-URL: Repository, https://github.com/colstrom/agent_runtime_core
7
+ Author: Chris Olstrom
8
+ License-Expression: MIT
9
+ License-File: LICENSE
10
+ Keywords: agents,ai,async,llm,runtime
11
+ Classifier: Development Status :: 3 - Alpha
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: MIT License
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.11
16
+ Classifier: Programming Language :: Python :: 3.12
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
+ Requires-Python: >=3.11
20
+ Provides-Extra: all
21
+ Requires-Dist: anthropic>=0.18.0; extra == 'all'
22
+ Requires-Dist: langfuse>=2.0.0; extra == 'all'
23
+ Requires-Dist: litellm>=1.0.0; extra == 'all'
24
+ Requires-Dist: openai>=1.0.0; extra == 'all'
25
+ Requires-Dist: redis>=5.0.0; extra == 'all'
26
+ Provides-Extra: anthropic
27
+ Requires-Dist: anthropic>=0.18.0; extra == 'anthropic'
28
+ Provides-Extra: dev
29
+ Requires-Dist: mypy>=1.0.0; extra == 'dev'
30
+ Requires-Dist: pytest-asyncio>=0.23.0; extra == 'dev'
31
+ Requires-Dist: pytest-cov>=4.0.0; extra == 'dev'
32
+ Requires-Dist: pytest>=8.0.0; extra == 'dev'
33
+ Requires-Dist: ruff>=0.1.0; extra == 'dev'
34
+ Provides-Extra: langfuse
35
+ Requires-Dist: langfuse>=2.0.0; extra == 'langfuse'
36
+ Provides-Extra: litellm
37
+ Requires-Dist: litellm>=1.0.0; extra == 'litellm'
38
+ Provides-Extra: openai
39
+ Requires-Dist: openai>=1.0.0; extra == 'openai'
40
+ Provides-Extra: redis
41
+ Requires-Dist: redis>=5.0.0; extra == 'redis'
42
+ Description-Content-Type: text/markdown
43
+
44
+ # agent-runtime-core
45
+
46
+ [![PyPI version](https://badge.fury.io/py/agent-runtime-core.svg)](https://badge.fury.io/py/agent-runtime-core)
47
+ [![Python 3.11+](https://img.shields.io/badge/python-3.11+-blue.svg)](https://www.python.org/downloads/)
48
+ [![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
49
+
50
+ A lightweight, framework-agnostic Python library for building AI agent systems. Provides the core abstractions and implementations needed to build production-ready AI agents without tying you to any specific framework.
51
+
52
+ ## Features
53
+
54
+ - 🔌 **Framework Agnostic** - Works with LangGraph, CrewAI, OpenAI Agents, or your own custom loops
55
+ - 🤖 **Model Agnostic** - OpenAI, Anthropic, or any provider via LiteLLM
56
+ - 📦 **Zero Required Dependencies** - Core library has no dependencies; add only what you need
57
+ - 🔄 **Async First** - Built for modern async Python with full sync support
58
+ - 🛠️ **Pluggable Backends** - Memory, Redis, or SQLite for queues, events, and state
59
+ - 📊 **Observable** - Built-in tracing with optional Langfuse integration
60
+ - 🧩 **Composable** - Mix and match components to build your ideal agent system
61
+
62
+ ## Installation
63
+
64
+ ```bash
65
+ # Core library (no dependencies)
66
+ pip install agent-runtime-core
67
+
68
+ # With specific LLM providers
69
+ pip install agent-runtime-core[openai]
70
+ pip install agent-runtime-core[anthropic]
71
+ pip install agent-runtime-core[litellm]
72
+
73
+ # With Redis backend support
74
+ pip install agent-runtime-core[redis]
75
+
76
+ # With observability
77
+ pip install agent-runtime-core[langfuse]
78
+
79
+ # Everything
80
+ pip install agent-runtime-core[all]
81
+ ```
82
+
83
+ ## Quick Start
84
+
85
+ ### Basic Configuration
86
+
87
+ ```python
88
+ from agent_runtime_core import configure, get_config
89
+
90
+ # Configure the runtime
91
+ configure(
92
+ model_provider="openai",
93
+ openai_api_key="sk-...", # Or use OPENAI_API_KEY env var
94
+ default_model="gpt-4o",
95
+ )
96
+
97
+ # Access configuration anywhere
98
+ config = get_config()
99
+ print(config.model_provider) # "openai"
100
+ ```
101
+
102
+ ### Creating an Agent
103
+
104
+ ```python
105
+ from agent_runtime_core import (
106
+ AgentRuntime,
107
+ RunContext,
108
+ RunResult,
109
+ EventType,
110
+ register_runtime,
111
+ )
112
+
113
+ class MyAgent(AgentRuntime):
114
+ """A simple conversational agent."""
115
+
116
+ @property
117
+ def key(self) -> str:
118
+ return "my-agent"
119
+
120
+ async def run(self, ctx: RunContext) -> RunResult:
121
+ # Access input messages
122
+ messages = ctx.input_messages
123
+
124
+ # Get an LLM client
125
+ from agent_runtime_core.llm import get_llm_client
126
+ llm = get_llm_client()
127
+
128
+ # Generate a response
129
+ response = await llm.generate(messages)
130
+
131
+ # Emit events for observability
132
+ await ctx.emit(EventType.ASSISTANT_MESSAGE, {
133
+ "content": response.message["content"],
134
+ })
135
+
136
+ # Return the result
137
+ return RunResult(
138
+ final_output={"response": response.message["content"]},
139
+ final_messages=[response.message],
140
+ )
141
+
142
+ # Register the agent
143
+ register_runtime(MyAgent())
144
+ ```
145
+
146
+ ### Using Tools
147
+
148
+ ```python
149
+ from agent_runtime_core import Tool, ToolRegistry, RunContext, RunResult
150
+
151
+ # Define tools
152
+ def get_weather(location: str) -> str:
153
+ """Get the current weather for a location."""
154
+ return f"The weather in {location} is sunny, 72°F"
155
+
156
+ def search_web(query: str) -> str:
157
+ """Search the web for information."""
158
+ return f"Search results for: {query}"
159
+
160
+ # Create a tool registry
161
+ tools = ToolRegistry()
162
+ tools.register(Tool.from_function(get_weather))
163
+ tools.register(Tool.from_function(search_web))
164
+
165
+ class ToolAgent(AgentRuntime):
166
+ @property
167
+ def key(self) -> str:
168
+ return "tool-agent"
169
+
170
+ async def run(self, ctx: RunContext) -> RunResult:
171
+ from agent_runtime_core.llm import get_llm_client
172
+ llm = get_llm_client()
173
+
174
+ messages = list(ctx.input_messages)
175
+
176
+ while True:
177
+ # Generate with tools
178
+ response = await llm.generate(
179
+ messages,
180
+ tools=tools.to_openai_format(),
181
+ )
182
+
183
+ messages.append(response.message)
184
+
185
+ # Check for tool calls
186
+ if not response.tool_calls:
187
+ break
188
+
189
+ # Execute tools
190
+ for tool_call in response.tool_calls:
191
+ result = await tools.execute(
192
+ tool_call["function"]["name"],
193
+ tool_call["function"]["arguments"],
194
+ )
195
+
196
+ await ctx.emit(EventType.TOOL_RESULT, {
197
+ "tool_call_id": tool_call["id"],
198
+ "result": result,
199
+ })
200
+
201
+ messages.append({
202
+ "role": "tool",
203
+ "tool_call_id": tool_call["id"],
204
+ "content": str(result),
205
+ })
206
+
207
+ return RunResult(
208
+ final_output={"response": response.message["content"]},
209
+ final_messages=messages,
210
+ )
211
+ ```
212
+
213
+ ### Running Agents
214
+
215
+ ```python
216
+ from agent_runtime_core import AgentRunner, RunnerConfig, get_runtime
217
+ import asyncio
218
+
219
+ async def main():
220
+ # Get a registered agent
221
+ agent = get_runtime("my-agent")
222
+
223
+ # Create a runner
224
+ runner = AgentRunner(
225
+ config=RunnerConfig(
226
+ run_timeout_seconds=300,
227
+ max_retries=3,
228
+ )
229
+ )
230
+
231
+ # Execute a run
232
+ result = await runner.execute(
233
+ agent=agent,
234
+ run_id="run-123",
235
+ input_data={
236
+ "messages": [
237
+ {"role": "user", "content": "Hello!"}
238
+ ]
239
+ },
240
+ )
241
+
242
+ print(result.final_output)
243
+
244
+ asyncio.run(main())
245
+ ```
246
+
247
+ ## Core Concepts
248
+
249
+ ### AgentRuntime
250
+
251
+ The base class for all agents. Implement the `run` method to define your agent's behavior:
252
+
253
+ ```python
254
+ class AgentRuntime(ABC):
255
+ @property
256
+ @abstractmethod
257
+ def key(self) -> str:
258
+ """Unique identifier for this agent."""
259
+ pass
260
+
261
+ @abstractmethod
262
+ async def run(self, ctx: RunContext) -> RunResult:
263
+ """Execute the agent logic."""
264
+ pass
265
+ ```
266
+
267
+ ### RunContext
268
+
269
+ Provides access to the current run's state and utilities:
270
+
271
+ ```python
272
+ class RunContext:
273
+ run_id: UUID # Unique run identifier
274
+ input_messages: list # Input messages
275
+ metadata: dict # Run metadata
276
+ tools: ToolRegistry # Available tools
277
+
278
+ async def emit(self, event_type: EventType, payload: dict) -> None:
279
+ """Emit an event."""
280
+
281
+ async def checkpoint(self, state: dict) -> None:
282
+ """Save a checkpoint."""
283
+
284
+ def is_cancelled(self) -> bool:
285
+ """Check if run was cancelled."""
286
+ ```
287
+
288
+ ### RunResult
289
+
290
+ The result of an agent run:
291
+
292
+ ```python
293
+ @dataclass
294
+ class RunResult:
295
+ final_output: dict # Structured output
296
+ final_messages: list = None # Conversation history
297
+ error: ErrorInfo = None # Error details if failed
298
+ ```
299
+
300
+ ### Event Types
301
+
302
+ Built-in event types for observability:
303
+
304
+ - `EventType.RUN_STARTED` - Run execution began
305
+ - `EventType.RUN_SUCCEEDED` - Run completed successfully
306
+ - `EventType.RUN_FAILED` - Run failed with error
307
+ - `EventType.TOOL_CALL` - Tool was invoked
308
+ - `EventType.TOOL_RESULT` - Tool returned result
309
+ - `EventType.ASSISTANT_MESSAGE` - LLM generated message
310
+ - `EventType.CHECKPOINT` - State checkpoint saved
311
+
312
+ ## Backend Options
313
+
314
+ ### Queue Backends
315
+
316
+ ```python
317
+ from agent_runtime_core.queue import InMemoryQueue, RedisQueue
318
+
319
+ # In-memory (for development)
320
+ queue = InMemoryQueue()
321
+
322
+ # Redis (for production)
323
+ queue = RedisQueue(redis_url="redis://localhost:6379/0")
324
+ ```
325
+
326
+ ### Event Bus Backends
327
+
328
+ ```python
329
+ from agent_runtime_core.events import InMemoryEventBus, RedisEventBus
330
+
331
+ # In-memory
332
+ event_bus = InMemoryEventBus()
333
+
334
+ # Redis Pub/Sub
335
+ event_bus = RedisEventBus(redis_url="redis://localhost:6379/0")
336
+ ```
337
+
338
+ ### State Store Backends
339
+
340
+ ```python
341
+ from agent_runtime_core.state import InMemoryStateStore, RedisStateStore, SQLiteStateStore
342
+
343
+ # In-memory
344
+ state = InMemoryStateStore()
345
+
346
+ # Redis
347
+ state = RedisStateStore(redis_url="redis://localhost:6379/0")
348
+
349
+ # SQLite (persistent, single-node)
350
+ state = SQLiteStateStore(db_path="./agent_state.db")
351
+ ```
352
+
353
+ ## Persistence
354
+
355
+ The persistence module provides storage for conversations, tasks, memory, and preferences with pluggable backends.
356
+
357
+ ### File-Based Storage (Default)
358
+
359
+ ```python
360
+ from agent_runtime_core.persistence import (
361
+ PersistenceManager,
362
+ PersistenceConfig,
363
+ Scope,
364
+ )
365
+ from pathlib import Path
366
+
367
+ # Create manager with file-based storage
368
+ config = PersistenceConfig(project_dir=Path.cwd())
369
+ manager = PersistenceManager(config)
370
+
371
+ # Store memory (key-value)
372
+ await manager.memory.set("user_name", "Alice", scope=Scope.PROJECT)
373
+ name = await manager.memory.get("user_name")
374
+
375
+ # Store conversations
376
+ from agent_runtime_core.persistence import Conversation, Message
377
+ conv = Conversation(title="Chat 1")
378
+ conv.messages.append(Message(role="user", content="Hello!"))
379
+ await manager.conversations.save(conv)
380
+
381
+ # Store tasks
382
+ from agent_runtime_core.persistence import Task, TaskState
383
+ task = Task(name="Review code", conversation_id=conv.id)
384
+ await manager.tasks.save(task)
385
+ await manager.tasks.update(task.id, state=TaskState.COMPLETE)
386
+
387
+ # Store preferences
388
+ await manager.preferences.set("theme", "dark")
389
+ ```
390
+
391
+ ### Custom Backends (e.g., Django/Database)
392
+
393
+ The persistence layer is designed to be pluggable. Implement the abstract base classes for your backend:
394
+
395
+ ```python
396
+ from agent_runtime_core.persistence import (
397
+ MemoryStore,
398
+ ConversationStore,
399
+ TaskStore,
400
+ PreferencesStore,
401
+ PersistenceConfig,
402
+ PersistenceManager,
403
+ )
404
+
405
+ class MyDatabaseMemoryStore(MemoryStore):
406
+ def __init__(self, user):
407
+ self.user = user
408
+
409
+ async def get(self, key: str, scope=None) -> Optional[Any]:
410
+ # Your database logic here
411
+ pass
412
+
413
+ async def set(self, key: str, value: Any, scope=None) -> None:
414
+ # Your database logic here
415
+ pass
416
+
417
+ # ... implement other methods
418
+
419
+ # Three ways to configure custom backends:
420
+
421
+ # 1. Pre-instantiated stores (recommended for request-scoped)
422
+ config = PersistenceConfig(
423
+ memory_store=MyDatabaseMemoryStore(user=request.user),
424
+ conversation_store=MyDatabaseConversationStore(user=request.user),
425
+ )
426
+
427
+ # 2. Factory functions (for lazy instantiation)
428
+ config = PersistenceConfig(
429
+ memory_store_factory=lambda: MyDatabaseMemoryStore(user=get_current_user()),
430
+ )
431
+
432
+ # 3. Classes with kwargs
433
+ config = PersistenceConfig(
434
+ memory_store_class=MyDatabaseMemoryStore,
435
+ memory_store_kwargs={"user": request.user},
436
+ )
437
+
438
+ manager = PersistenceManager(config)
439
+ ```
440
+
441
+ ### Persistence Data Models
442
+
443
+ ```python
444
+ from agent_runtime_core.persistence import (
445
+ # Conversation models
446
+ Conversation, # Chat conversation with messages
447
+ ConversationMessage, # Single message with branching support
448
+ ToolCall, # Tool invocation within a message
449
+ ToolResult, # Result of a tool call
450
+
451
+ # Task models (with dependencies and checkpoints)
452
+ Task, # Task with state, dependencies, checkpoints
453
+ TaskList, # Collection of tasks
454
+ TaskState, # NOT_STARTED, IN_PROGRESS, COMPLETE, CANCELLED
455
+
456
+ # Knowledge models (optional)
457
+ Fact, # Learned facts about user/project
458
+ FactType, # USER, PROJECT, PREFERENCE, CONTEXT, CUSTOM
459
+ Summary, # Conversation summaries
460
+ Embedding, # Vector embeddings for semantic search
461
+
462
+ # Audit models (optional)
463
+ AuditEntry, # Interaction logs
464
+ AuditEventType, # CONVERSATION_START, TOOL_CALL, AGENT_ERROR, etc.
465
+ ErrorRecord, # Error history with resolution tracking
466
+ ErrorSeverity, # DEBUG, INFO, WARNING, ERROR, CRITICAL
467
+ PerformanceMetric, # Timing, token usage, etc.
468
+
469
+ Scope, # GLOBAL, PROJECT, SESSION
470
+ )
471
+ ```
472
+
473
+ ### Conversation Branching
474
+
475
+ Messages and conversations support branching for edit/regenerate workflows:
476
+
477
+ ```python
478
+ from agent_runtime_core.persistence import Conversation, ConversationMessage
479
+ from uuid import uuid4
480
+
481
+ # Create a branched message (e.g., user edited their message)
482
+ branch_id = uuid4()
483
+ edited_msg = ConversationMessage(
484
+ id=uuid4(),
485
+ role="user",
486
+ content="Updated question",
487
+ parent_message_id=original_msg.id, # Points to original
488
+ branch_id=branch_id,
489
+ )
490
+
491
+ # Fork a conversation
492
+ forked_conv = Conversation(
493
+ id=uuid4(),
494
+ title="Forked conversation",
495
+ parent_conversation_id=original_conv.id,
496
+ active_branch_id=branch_id,
497
+ )
498
+ ```
499
+
500
+ ### Enhanced Tasks
501
+
502
+ Tasks support dependencies, checkpoints for resumable operations, and execution tracking:
503
+
504
+ ```python
505
+ from agent_runtime_core.persistence import Task, TaskState
506
+ from uuid import uuid4
507
+ from datetime import datetime
508
+
509
+ task = Task(
510
+ id=uuid4(),
511
+ name="Process large dataset",
512
+ description="Multi-step data processing",
513
+ state=TaskState.IN_PROGRESS,
514
+
515
+ # Dependencies - this task depends on others
516
+ dependencies=[task1.id, task2.id],
517
+
518
+ # Scheduling
519
+ priority=10, # Higher = more important
520
+ due_at=datetime(2024, 12, 31),
521
+
522
+ # Checkpoint for resumable operations
523
+ checkpoint_data={"step": 5, "processed": 1000},
524
+ checkpoint_at=datetime.utcnow(),
525
+
526
+ # Execution tracking
527
+ attempts=2,
528
+ last_error="Temporary network failure",
529
+ )
530
+ ```
531
+
532
+ ### Optional: Knowledge Store
533
+
534
+ The KnowledgeStore is optional and must be explicitly configured. It stores facts, summaries, and embeddings:
535
+
536
+ ```python
537
+ from agent_runtime_core.persistence import (
538
+ KnowledgeStore, Fact, FactType, Summary, Embedding,
539
+ PersistenceConfig, PersistenceManager,
540
+ )
541
+
542
+ # Implement your own KnowledgeStore
543
+ class MyKnowledgeStore(KnowledgeStore):
544
+ async def save_fact(self, fact, scope=Scope.PROJECT):
545
+ # Save to database
546
+ ...
547
+
548
+ async def get_fact(self, fact_id, scope=Scope.PROJECT):
549
+ ...
550
+
551
+ # ... implement other abstract methods
552
+
553
+ # Configure with optional store
554
+ config = PersistenceConfig(
555
+ knowledge_store=MyKnowledgeStore(),
556
+ )
557
+ manager = PersistenceManager(config)
558
+
559
+ # Check if available before using
560
+ if manager.has_knowledge():
561
+ await manager.knowledge.save_fact(Fact(
562
+ id=uuid4(),
563
+ key="user.preferred_language",
564
+ value="Python",
565
+ fact_type=FactType.PREFERENCE,
566
+ ))
567
+ ```
568
+
569
+ ### Optional: Audit Store
570
+
571
+ The AuditStore is optional and tracks interaction logs, errors, and performance metrics:
572
+
573
+ ```python
574
+ from agent_runtime_core.persistence import (
575
+ AuditStore, AuditEntry, AuditEventType,
576
+ ErrorRecord, ErrorSeverity, PerformanceMetric,
577
+ )
578
+
579
+ # Implement your own AuditStore
580
+ class MyAuditStore(AuditStore):
581
+ async def log_event(self, entry, scope=Scope.PROJECT):
582
+ # Log to database/file
583
+ ...
584
+
585
+ async def log_error(self, error, scope=Scope.PROJECT):
586
+ ...
587
+
588
+ async def record_metric(self, metric, scope=Scope.PROJECT):
589
+ ...
590
+
591
+ # ... implement other abstract methods
592
+
593
+ # Use in manager
594
+ config = PersistenceConfig(
595
+ audit_store=MyAuditStore(),
596
+ )
597
+ manager = PersistenceManager(config)
598
+
599
+ if manager.has_audit():
600
+ # Log an event
601
+ await manager.audit.log_event(AuditEntry(
602
+ id=uuid4(),
603
+ event_type=AuditEventType.TOOL_CALL,
604
+ action="Called search tool",
605
+ details={"query": "python docs"},
606
+ ))
607
+
608
+ # Record performance metric
609
+ await manager.audit.record_metric(PerformanceMetric(
610
+ id=uuid4(),
611
+ name="llm_latency",
612
+ value=1250.5,
613
+ unit="ms",
614
+ tags={"model": "gpt-4"},
615
+ ))
616
+ ```
617
+
618
+ ## LLM Clients
619
+
620
+ ### OpenAI
621
+
622
+ ```python
623
+ from agent_runtime_core.llm import OpenAIClient
624
+
625
+ client = OpenAIClient(
626
+ api_key="sk-...", # Or use OPENAI_API_KEY env var
627
+ default_model="gpt-4o",
628
+ )
629
+
630
+ response = await client.generate([
631
+ {"role": "user", "content": "Hello!"}
632
+ ])
633
+ ```
634
+
635
+ ### Anthropic
636
+
637
+ ```python
638
+ from agent_runtime_core.llm import AnthropicClient
639
+
640
+ client = AnthropicClient(
641
+ api_key="sk-ant-...", # Or use ANTHROPIC_API_KEY env var
642
+ default_model="claude-3-5-sonnet-20241022",
643
+ )
644
+ ```
645
+
646
+ ### LiteLLM (Any Provider)
647
+
648
+ ```python
649
+ from agent_runtime_core.llm import LiteLLMClient
650
+
651
+ # Use any LiteLLM-supported model
652
+ client = LiteLLMClient(default_model="gpt-4o")
653
+ client = LiteLLMClient(default_model="claude-3-5-sonnet-20241022")
654
+ client = LiteLLMClient(default_model="ollama/llama2")
655
+ ```
656
+
657
+ ## Tracing & Observability
658
+
659
+ ### Langfuse Integration
660
+
661
+ ```python
662
+ from agent_runtime_core import configure
663
+
664
+ configure(
665
+ langfuse_enabled=True,
666
+ langfuse_public_key="pk-...",
667
+ langfuse_secret_key="sk-...",
668
+ )
669
+ ```
670
+
671
+ ### Custom Trace Sink
672
+
673
+ ```python
674
+ from agent_runtime_core import TraceSink
675
+
676
+ class MyTraceSink(TraceSink):
677
+ async def trace(self, event: dict) -> None:
678
+ # Send to your observability platform
679
+ print(f"Trace: {event}")
680
+ ```
681
+
682
+ ## Integration with Django
683
+
684
+ For Django applications, use [django-agent-runtime](https://pypi.org/project/django-agent-runtime/) which provides:
685
+
686
+ - Django models for conversations, memory, tasks, and preferences
687
+ - Database-backed persistence stores
688
+ - REST API endpoints
689
+ - Server-Sent Events (SSE) for real-time streaming
690
+ - Management commands for running workers
691
+
692
+ ```bash
693
+ pip install django-agent-runtime
694
+ ```
695
+
696
+ ## Testing
697
+
698
+ The library includes testing utilities for unit testing your agents:
699
+
700
+ ```python
701
+ from agent_runtime_core.testing import (
702
+ MockRunContext,
703
+ MockLLMClient,
704
+ create_test_context,
705
+ run_agent_test,
706
+ )
707
+
708
+ # Create a mock context
709
+ ctx = create_test_context(
710
+ input_messages=[{"role": "user", "content": "Hello!"}]
711
+ )
712
+
713
+ # Create a mock LLM client with predefined responses
714
+ mock_llm = MockLLMClient(responses=[
715
+ {"role": "assistant", "content": "Hi there!"}
716
+ ])
717
+
718
+ # Run your agent
719
+ result = await run_agent_test(MyAgent(), ctx)
720
+ assert result.final_output["response"] == "Hi there!"
721
+ ```
722
+
723
+ ## Step Executor
724
+
725
+ The `StepExecutor` provides a structured way to execute multi-step operations with automatic checkpointing, resume capability, retries, and progress reporting. Ideal for long-running agent tasks.
726
+
727
+ ### Basic Usage
728
+
729
+ ```python
730
+ from agent_runtime_core.steps import StepExecutor, Step
731
+
732
+ class MyAgent(AgentRuntime):
733
+ async def run(self, ctx: RunContext) -> RunResult:
734
+ executor = StepExecutor(ctx)
735
+
736
+ results = await executor.run([
737
+ Step("fetch", self.fetch_data),
738
+ Step("process", self.process_data, retries=3),
739
+ Step("validate", self.validate_results),
740
+ ])
741
+
742
+ return RunResult(final_output=results)
743
+
744
+ async def fetch_data(self, ctx, state):
745
+ # Fetch data from external API
746
+ return {"items": [...]}
747
+
748
+ async def process_data(self, ctx, state):
749
+ # Access results from previous steps via state
750
+ return {"processed": True}
751
+
752
+ async def validate_results(self, ctx, state):
753
+ return {"valid": True}
754
+ ```
755
+
756
+ ### Step Options
757
+
758
+ ```python
759
+ Step(
760
+ name="process", # Unique step identifier
761
+ fn=process_data, # Async function(ctx, state) -> result
762
+ retries=3, # Retry attempts on failure (default: 0)
763
+ retry_delay=2.0, # Seconds between retries (default: 1.0)
764
+ timeout=30.0, # Step timeout in seconds (optional)
765
+ description="Process data", # Human-readable description
766
+ checkpoint=True, # Save checkpoint after step (default: True)
767
+ )
768
+ ```
769
+
770
+ ### Resume from Checkpoint
771
+
772
+ Steps automatically checkpoint after completion. If execution is interrupted, it resumes from the last checkpoint:
773
+
774
+ ```python
775
+ # First run - completes step1, fails during step2
776
+ executor = StepExecutor(ctx)
777
+ await executor.run([step1, step2, step3]) # Checkpoints after step1
778
+
779
+ # Second run - skips step1, resumes from step2
780
+ executor = StepExecutor(ctx)
781
+ await executor.run([step1, step2, step3]) # step1 skipped
782
+ ```
783
+
784
+ ### Custom State
785
+
786
+ Pass state between steps using `initial_state` and the `state` dict:
787
+
788
+ ```python
789
+ async def step1(ctx, state):
790
+ state["counter"] = 1
791
+ return "done"
792
+
793
+ async def step2(ctx, state):
794
+ state["counter"] += 1 # Access state from step1
795
+ return state["counter"]
796
+
797
+ executor = StepExecutor(ctx)
798
+ results = await executor.run(
799
+ [Step("step1", step1), Step("step2", step2)],
800
+ initial_state={"counter": 0},
801
+ )
802
+ ```
803
+
804
+ ### Events
805
+
806
+ The executor emits events for observability:
807
+
808
+ - `EventType.STEP_STARTED` - Step execution began
809
+ - `EventType.STEP_COMPLETED` - Step completed successfully
810
+ - `EventType.STEP_FAILED` - Step failed after all retries
811
+ - `EventType.STEP_RETRYING` - Step is being retried
812
+ - `EventType.STEP_SKIPPED` - Step skipped (already completed)
813
+ - `EventType.PROGRESS_UPDATE` - Progress percentage update
814
+
815
+ ## API Reference
816
+
817
+ ### Configuration
818
+
819
+ | Setting | Type | Default | Description |
820
+ |---------|------|---------|-------------|
821
+ | `model_provider` | str | `"openai"` | LLM provider: openai, anthropic, litellm |
822
+ | `default_model` | str | `"gpt-4o"` | Default model to use |
823
+ | `queue_backend` | str | `"memory"` | Queue backend: memory, redis |
824
+ | `event_bus_backend` | str | `"memory"` | Event bus: memory, redis |
825
+ | `state_store_backend` | str | `"memory"` | State store: memory, redis, sqlite |
826
+ | `redis_url` | str | `None` | Redis connection URL |
827
+ | `langfuse_enabled` | bool | `False` | Enable Langfuse tracing |
828
+
829
+ ### Registry Functions
830
+
831
+ ```python
832
+ register_runtime(runtime: AgentRuntime) -> None
833
+ get_runtime(key: str) -> AgentRuntime
834
+ list_runtimes() -> list[str]
835
+ unregister_runtime(key: str) -> None
836
+ clear_registry() -> None
837
+ ```
838
+
839
+ ### Persistence Functions
840
+
841
+ ```python
842
+ from agent_runtime_core.persistence import (
843
+ configure_persistence,
844
+ get_persistence_manager,
845
+ )
846
+
847
+ # Configure global persistence
848
+ configure_persistence(
849
+ memory_store_class=MyMemoryStore,
850
+ project_dir=Path.cwd(),
851
+ )
852
+
853
+ # Get the global manager
854
+ manager = get_persistence_manager()
855
+ ```
856
+
857
+ ## Contributing
858
+
859
+ Contributions are welcome! Please feel free to submit a Pull Request.
860
+
861
+ ## License
862
+
863
+ MIT License - see [LICENSE](LICENSE) for details.