agent-runtime-core 0.2.1__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (38) hide show
  1. {agent_runtime → agent_runtime_core}/__init__.py +8 -8
  2. {agent_runtime → agent_runtime_core}/config.py +1 -1
  3. {agent_runtime → agent_runtime_core}/events/__init__.py +5 -5
  4. {agent_runtime → agent_runtime_core}/events/memory.py +1 -1
  5. {agent_runtime → agent_runtime_core}/events/redis.py +1 -1
  6. {agent_runtime → agent_runtime_core}/events/sqlite.py +1 -1
  7. {agent_runtime → agent_runtime_core}/llm/__init__.py +6 -6
  8. {agent_runtime → agent_runtime_core}/llm/anthropic.py +4 -4
  9. {agent_runtime → agent_runtime_core}/llm/litellm_client.py +2 -2
  10. {agent_runtime → agent_runtime_core}/llm/openai.py +4 -4
  11. {agent_runtime → agent_runtime_core}/persistence/__init__.py +48 -12
  12. agent_runtime_core/persistence/base.py +737 -0
  13. {agent_runtime → agent_runtime_core}/persistence/file.py +1 -1
  14. {agent_runtime → agent_runtime_core}/persistence/manager.py +122 -14
  15. {agent_runtime → agent_runtime_core}/queue/__init__.py +5 -5
  16. {agent_runtime → agent_runtime_core}/queue/memory.py +1 -1
  17. {agent_runtime → agent_runtime_core}/queue/redis.py +1 -1
  18. {agent_runtime → agent_runtime_core}/queue/sqlite.py +1 -1
  19. {agent_runtime → agent_runtime_core}/registry.py +1 -1
  20. {agent_runtime → agent_runtime_core}/runner.py +6 -6
  21. {agent_runtime → agent_runtime_core}/state/__init__.py +5 -5
  22. {agent_runtime → agent_runtime_core}/state/memory.py +1 -1
  23. {agent_runtime → agent_runtime_core}/state/redis.py +1 -1
  24. {agent_runtime → agent_runtime_core}/state/sqlite.py +1 -1
  25. {agent_runtime → agent_runtime_core}/testing.py +1 -1
  26. {agent_runtime → agent_runtime_core}/tracing/__init__.py +4 -4
  27. {agent_runtime → agent_runtime_core}/tracing/langfuse.py +1 -1
  28. {agent_runtime → agent_runtime_core}/tracing/noop.py +1 -1
  29. {agent_runtime_core-0.2.1.dist-info → agent_runtime_core-0.4.0.dist-info}/METADATA +352 -42
  30. agent_runtime_core-0.4.0.dist-info/RECORD +36 -0
  31. agent_runtime/persistence/base.py +0 -332
  32. agent_runtime_core-0.2.1.dist-info/RECORD +0 -36
  33. {agent_runtime → agent_runtime_core}/events/base.py +0 -0
  34. {agent_runtime → agent_runtime_core}/interfaces.py +0 -0
  35. {agent_runtime → agent_runtime_core}/queue/base.py +0 -0
  36. {agent_runtime → agent_runtime_core}/state/base.py +0 -0
  37. {agent_runtime_core-0.2.1.dist-info → agent_runtime_core-0.4.0.dist-info}/WHEEL +0 -0
  38. {agent_runtime_core-0.2.1.dist-info → agent_runtime_core-0.4.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agent-runtime-core
3
- Version: 0.2.1
3
+ Version: 0.4.0
4
4
  Summary: Framework-agnostic Python library for executing AI agents with consistent patterns
5
5
  Project-URL: Homepage, https://github.com/colstrom/agent_runtime_core
6
6
  Project-URL: Repository, https://github.com/colstrom/agent_runtime_core
@@ -85,7 +85,7 @@ pip install agent-runtime-core[all]
85
85
  ### Basic Configuration
86
86
 
87
87
  ```python
88
- from agent_runtime import configure, get_config
88
+ from agent_runtime_core import configure, get_config
89
89
 
90
90
  # Configure the runtime
91
91
  configure(
@@ -102,7 +102,7 @@ print(config.model_provider) # "openai"
102
102
  ### Creating an Agent
103
103
 
104
104
  ```python
105
- from agent_runtime import (
105
+ from agent_runtime_core import (
106
106
  AgentRuntime,
107
107
  RunContext,
108
108
  RunResult,
@@ -112,27 +112,27 @@ from agent_runtime import (
112
112
 
113
113
  class MyAgent(AgentRuntime):
114
114
  """A simple conversational agent."""
115
-
115
+
116
116
  @property
117
117
  def key(self) -> str:
118
118
  return "my-agent"
119
-
119
+
120
120
  async def run(self, ctx: RunContext) -> RunResult:
121
121
  # Access input messages
122
122
  messages = ctx.input_messages
123
-
123
+
124
124
  # Get an LLM client
125
- from agent_runtime.llm import get_llm_client
125
+ from agent_runtime_core.llm import get_llm_client
126
126
  llm = get_llm_client()
127
-
127
+
128
128
  # Generate a response
129
129
  response = await llm.generate(messages)
130
-
130
+
131
131
  # Emit events for observability
132
132
  await ctx.emit(EventType.ASSISTANT_MESSAGE, {
133
133
  "content": response.message["content"],
134
134
  })
135
-
135
+
136
136
  # Return the result
137
137
  return RunResult(
138
138
  final_output={"response": response.message["content"]},
@@ -146,7 +146,7 @@ register_runtime(MyAgent())
146
146
  ### Using Tools
147
147
 
148
148
  ```python
149
- from agent_runtime import Tool, ToolRegistry, RunContext, RunResult
149
+ from agent_runtime_core import Tool, ToolRegistry, RunContext, RunResult
150
150
 
151
151
  # Define tools
152
152
  def get_weather(location: str) -> str:
@@ -166,44 +166,44 @@ class ToolAgent(AgentRuntime):
166
166
  @property
167
167
  def key(self) -> str:
168
168
  return "tool-agent"
169
-
169
+
170
170
  async def run(self, ctx: RunContext) -> RunResult:
171
- from agent_runtime.llm import get_llm_client
171
+ from agent_runtime_core.llm import get_llm_client
172
172
  llm = get_llm_client()
173
-
173
+
174
174
  messages = list(ctx.input_messages)
175
-
175
+
176
176
  while True:
177
177
  # Generate with tools
178
178
  response = await llm.generate(
179
179
  messages,
180
180
  tools=tools.to_openai_format(),
181
181
  )
182
-
182
+
183
183
  messages.append(response.message)
184
-
184
+
185
185
  # Check for tool calls
186
186
  if not response.tool_calls:
187
187
  break
188
-
188
+
189
189
  # Execute tools
190
190
  for tool_call in response.tool_calls:
191
191
  result = await tools.execute(
192
192
  tool_call["function"]["name"],
193
193
  tool_call["function"]["arguments"],
194
194
  )
195
-
195
+
196
196
  await ctx.emit(EventType.TOOL_RESULT, {
197
197
  "tool_call_id": tool_call["id"],
198
198
  "result": result,
199
199
  })
200
-
200
+
201
201
  messages.append({
202
202
  "role": "tool",
203
203
  "tool_call_id": tool_call["id"],
204
204
  "content": str(result),
205
205
  })
206
-
206
+
207
207
  return RunResult(
208
208
  final_output={"response": response.message["content"]},
209
209
  final_messages=messages,
@@ -213,13 +213,13 @@ class ToolAgent(AgentRuntime):
213
213
  ### Running Agents
214
214
 
215
215
  ```python
216
- from agent_runtime import AgentRunner, RunnerConfig, get_runtime
216
+ from agent_runtime_core import AgentRunner, RunnerConfig, get_runtime
217
217
  import asyncio
218
218
 
219
219
  async def main():
220
220
  # Get a registered agent
221
221
  agent = get_runtime("my-agent")
222
-
222
+
223
223
  # Create a runner
224
224
  runner = AgentRunner(
225
225
  config=RunnerConfig(
@@ -227,7 +227,7 @@ async def main():
227
227
  max_retries=3,
228
228
  )
229
229
  )
230
-
230
+
231
231
  # Execute a run
232
232
  result = await runner.execute(
233
233
  agent=agent,
@@ -238,7 +238,7 @@ async def main():
238
238
  ]
239
239
  },
240
240
  )
241
-
241
+
242
242
  print(result.final_output)
243
243
 
244
244
  asyncio.run(main())
@@ -257,7 +257,7 @@ class AgentRuntime(ABC):
257
257
  def key(self) -> str:
258
258
  """Unique identifier for this agent."""
259
259
  pass
260
-
260
+
261
261
  @abstractmethod
262
262
  async def run(self, ctx: RunContext) -> RunResult:
263
263
  """Execute the agent logic."""
@@ -274,13 +274,13 @@ class RunContext:
274
274
  input_messages: list # Input messages
275
275
  metadata: dict # Run metadata
276
276
  tools: ToolRegistry # Available tools
277
-
277
+
278
278
  async def emit(self, event_type: EventType, payload: dict) -> None:
279
279
  """Emit an event."""
280
-
280
+
281
281
  async def checkpoint(self, state: dict) -> None:
282
282
  """Save a checkpoint."""
283
-
283
+
284
284
  def is_cancelled(self) -> bool:
285
285
  """Check if run was cancelled."""
286
286
  ```
@@ -314,10 +314,10 @@ Built-in event types for observability:
314
314
  ### Queue Backends
315
315
 
316
316
  ```python
317
- from agent_runtime.queue import MemoryQueue, RedisQueue
317
+ from agent_runtime_core.queue import InMemoryQueue, RedisQueue
318
318
 
319
319
  # In-memory (for development)
320
- queue = MemoryQueue()
320
+ queue = InMemoryQueue()
321
321
 
322
322
  # Redis (for production)
323
323
  queue = RedisQueue(redis_url="redis://localhost:6379/0")
@@ -326,10 +326,10 @@ queue = RedisQueue(redis_url="redis://localhost:6379/0")
326
326
  ### Event Bus Backends
327
327
 
328
328
  ```python
329
- from agent_runtime.events import MemoryEventBus, RedisEventBus
329
+ from agent_runtime_core.events import InMemoryEventBus, RedisEventBus
330
330
 
331
331
  # In-memory
332
- event_bus = MemoryEventBus()
332
+ event_bus = InMemoryEventBus()
333
333
 
334
334
  # Redis Pub/Sub
335
335
  event_bus = RedisEventBus(redis_url="redis://localhost:6379/0")
@@ -338,10 +338,10 @@ event_bus = RedisEventBus(redis_url="redis://localhost:6379/0")
338
338
  ### State Store Backends
339
339
 
340
340
  ```python
341
- from agent_runtime.state import MemoryStateStore, RedisStateStore, SQLiteStateStore
341
+ from agent_runtime_core.state import InMemoryStateStore, RedisStateStore, SQLiteStateStore
342
342
 
343
343
  # In-memory
344
- state = MemoryStateStore()
344
+ state = InMemoryStateStore()
345
345
 
346
346
  # Redis
347
347
  state = RedisStateStore(redis_url="redis://localhost:6379/0")
@@ -350,12 +350,277 @@ state = RedisStateStore(redis_url="redis://localhost:6379/0")
350
350
  state = SQLiteStateStore(db_path="./agent_state.db")
351
351
  ```
352
352
 
353
+ ## Persistence
354
+
355
+ The persistence module provides storage for conversations, tasks, memory, and preferences with pluggable backends.
356
+
357
+ ### File-Based Storage (Default)
358
+
359
+ ```python
360
+ from agent_runtime_core.persistence import (
361
+ PersistenceManager,
362
+ PersistenceConfig,
363
+ Scope,
364
+ )
365
+ from pathlib import Path
366
+
367
+ # Create manager with file-based storage
368
+ config = PersistenceConfig(project_dir=Path.cwd())
369
+ manager = PersistenceManager(config)
370
+
371
+ # Store memory (key-value)
372
+ await manager.memory.set("user_name", "Alice", scope=Scope.PROJECT)
373
+ name = await manager.memory.get("user_name")
374
+
375
+ # Store conversations
376
+ from agent_runtime_core.persistence import Conversation, Message
377
+ conv = Conversation(title="Chat 1")
378
+ conv.messages.append(Message(role="user", content="Hello!"))
379
+ await manager.conversations.save(conv)
380
+
381
+ # Store tasks
382
+ from agent_runtime_core.persistence import Task, TaskState
383
+ task = Task(name="Review code", conversation_id=conv.id)
384
+ await manager.tasks.save(task)
385
+ await manager.tasks.update(task.id, state=TaskState.COMPLETE)
386
+
387
+ # Store preferences
388
+ await manager.preferences.set("theme", "dark")
389
+ ```
390
+
391
+ ### Custom Backends (e.g., Django/Database)
392
+
393
+ The persistence layer is designed to be pluggable. Implement the abstract base classes for your backend:
394
+
395
+ ```python
396
+ from agent_runtime_core.persistence import (
397
+ MemoryStore,
398
+ ConversationStore,
399
+ TaskStore,
400
+ PreferencesStore,
401
+ PersistenceConfig,
402
+ PersistenceManager,
403
+ )
404
+
405
+ class MyDatabaseMemoryStore(MemoryStore):
406
+ def __init__(self, user):
407
+ self.user = user
408
+
409
+ async def get(self, key: str, scope=None) -> Optional[Any]:
410
+ # Your database logic here
411
+ pass
412
+
413
+ async def set(self, key: str, value: Any, scope=None) -> None:
414
+ # Your database logic here
415
+ pass
416
+
417
+ # ... implement other methods
418
+
419
+ # Three ways to configure custom backends:
420
+
421
+ # 1. Pre-instantiated stores (recommended for request-scoped)
422
+ config = PersistenceConfig(
423
+ memory_store=MyDatabaseMemoryStore(user=request.user),
424
+ conversation_store=MyDatabaseConversationStore(user=request.user),
425
+ )
426
+
427
+ # 2. Factory functions (for lazy instantiation)
428
+ config = PersistenceConfig(
429
+ memory_store_factory=lambda: MyDatabaseMemoryStore(user=get_current_user()),
430
+ )
431
+
432
+ # 3. Classes with kwargs
433
+ config = PersistenceConfig(
434
+ memory_store_class=MyDatabaseMemoryStore,
435
+ memory_store_kwargs={"user": request.user},
436
+ )
437
+
438
+ manager = PersistenceManager(config)
439
+ ```
440
+
441
+ ### Persistence Data Models
442
+
443
+ ```python
444
+ from agent_runtime_core.persistence import (
445
+ # Conversation models
446
+ Conversation, # Chat conversation with messages
447
+ ConversationMessage, # Single message with branching support
448
+ ToolCall, # Tool invocation within a message
449
+ ToolResult, # Result of a tool call
450
+
451
+ # Task models (with dependencies and checkpoints)
452
+ Task, # Task with state, dependencies, checkpoints
453
+ TaskList, # Collection of tasks
454
+ TaskState, # NOT_STARTED, IN_PROGRESS, COMPLETE, CANCELLED
455
+
456
+ # Knowledge models (optional)
457
+ Fact, # Learned facts about user/project
458
+ FactType, # USER, PROJECT, PREFERENCE, CONTEXT, CUSTOM
459
+ Summary, # Conversation summaries
460
+ Embedding, # Vector embeddings for semantic search
461
+
462
+ # Audit models (optional)
463
+ AuditEntry, # Interaction logs
464
+ AuditEventType, # CONVERSATION_START, TOOL_CALL, AGENT_ERROR, etc.
465
+ ErrorRecord, # Error history with resolution tracking
466
+ ErrorSeverity, # DEBUG, INFO, WARNING, ERROR, CRITICAL
467
+ PerformanceMetric, # Timing, token usage, etc.
468
+
469
+ Scope, # GLOBAL, PROJECT, SESSION
470
+ )
471
+ ```
472
+
473
+ ### Conversation Branching
474
+
475
+ Messages and conversations support branching for edit/regenerate workflows:
476
+
477
+ ```python
478
+ from agent_runtime_core.persistence import Conversation, ConversationMessage
479
+ from uuid import uuid4
480
+
481
+ # Create a branched message (e.g., user edited their message)
482
+ branch_id = uuid4()
483
+ edited_msg = ConversationMessage(
484
+ id=uuid4(),
485
+ role="user",
486
+ content="Updated question",
487
+ parent_message_id=original_msg.id, # Points to original
488
+ branch_id=branch_id,
489
+ )
490
+
491
+ # Fork a conversation
492
+ forked_conv = Conversation(
493
+ id=uuid4(),
494
+ title="Forked conversation",
495
+ parent_conversation_id=original_conv.id,
496
+ active_branch_id=branch_id,
497
+ )
498
+ ```
499
+
500
+ ### Enhanced Tasks
501
+
502
+ Tasks support dependencies, checkpoints for resumable operations, and execution tracking:
503
+
504
+ ```python
505
+ from agent_runtime_core.persistence import Task, TaskState
506
+ from uuid import uuid4
507
+ from datetime import datetime
508
+
509
+ task = Task(
510
+ id=uuid4(),
511
+ name="Process large dataset",
512
+ description="Multi-step data processing",
513
+ state=TaskState.IN_PROGRESS,
514
+
515
+ # Dependencies - this task depends on others
516
+ dependencies=[task1.id, task2.id],
517
+
518
+ # Scheduling
519
+ priority=10, # Higher = more important
520
+ due_at=datetime(2024, 12, 31),
521
+
522
+ # Checkpoint for resumable operations
523
+ checkpoint_data={"step": 5, "processed": 1000},
524
+ checkpoint_at=datetime.utcnow(),
525
+
526
+ # Execution tracking
527
+ attempts=2,
528
+ last_error="Temporary network failure",
529
+ )
530
+ ```
531
+
532
+ ### Optional: Knowledge Store
533
+
534
+ The KnowledgeStore is optional and must be explicitly configured. It stores facts, summaries, and embeddings:
535
+
536
+ ```python
537
+ from agent_runtime_core.persistence import (
538
+ KnowledgeStore, Fact, FactType, Summary, Embedding,
539
+ PersistenceConfig, PersistenceManager,
540
+ )
541
+
542
+ # Implement your own KnowledgeStore
543
+ class MyKnowledgeStore(KnowledgeStore):
544
+ async def save_fact(self, fact, scope=Scope.PROJECT):
545
+ # Save to database
546
+ ...
547
+
548
+ async def get_fact(self, fact_id, scope=Scope.PROJECT):
549
+ ...
550
+
551
+ # ... implement other abstract methods
552
+
553
+ # Configure with optional store
554
+ config = PersistenceConfig(
555
+ knowledge_store=MyKnowledgeStore(),
556
+ )
557
+ manager = PersistenceManager(config)
558
+
559
+ # Check if available before using
560
+ if manager.has_knowledge():
561
+ await manager.knowledge.save_fact(Fact(
562
+ id=uuid4(),
563
+ key="user.preferred_language",
564
+ value="Python",
565
+ fact_type=FactType.PREFERENCE,
566
+ ))
567
+ ```
568
+
569
+ ### Optional: Audit Store
570
+
571
+ The AuditStore is optional and tracks interaction logs, errors, and performance metrics:
572
+
573
+ ```python
574
+ from agent_runtime_core.persistence import (
575
+ AuditStore, AuditEntry, AuditEventType,
576
+ ErrorRecord, ErrorSeverity, PerformanceMetric,
577
+ )
578
+
579
+ # Implement your own AuditStore
580
+ class MyAuditStore(AuditStore):
581
+ async def log_event(self, entry, scope=Scope.PROJECT):
582
+ # Log to database/file
583
+ ...
584
+
585
+ async def log_error(self, error, scope=Scope.PROJECT):
586
+ ...
587
+
588
+ async def record_metric(self, metric, scope=Scope.PROJECT):
589
+ ...
590
+
591
+ # ... implement other abstract methods
592
+
593
+ # Use in manager
594
+ config = PersistenceConfig(
595
+ audit_store=MyAuditStore(),
596
+ )
597
+ manager = PersistenceManager(config)
598
+
599
+ if manager.has_audit():
600
+ # Log an event
601
+ await manager.audit.log_event(AuditEntry(
602
+ id=uuid4(),
603
+ event_type=AuditEventType.TOOL_CALL,
604
+ action="Called search tool",
605
+ details={"query": "python docs"},
606
+ ))
607
+
608
+ # Record performance metric
609
+ await manager.audit.record_metric(PerformanceMetric(
610
+ id=uuid4(),
611
+ name="llm_latency",
612
+ value=1250.5,
613
+ unit="ms",
614
+ tags={"model": "gpt-4"},
615
+ ))
616
+ ```
617
+
353
618
  ## LLM Clients
354
619
 
355
620
  ### OpenAI
356
621
 
357
622
  ```python
358
- from agent_runtime.llm import OpenAIClient
623
+ from agent_runtime_core.llm import OpenAIClient
359
624
 
360
625
  client = OpenAIClient(
361
626
  api_key="sk-...", # Or use OPENAI_API_KEY env var
@@ -370,7 +635,7 @@ response = await client.generate([
370
635
  ### Anthropic
371
636
 
372
637
  ```python
373
- from agent_runtime.llm import AnthropicClient
638
+ from agent_runtime_core.llm import AnthropicClient
374
639
 
375
640
  client = AnthropicClient(
376
641
  api_key="sk-ant-...", # Or use ANTHROPIC_API_KEY env var
@@ -381,7 +646,7 @@ client = AnthropicClient(
381
646
  ### LiteLLM (Any Provider)
382
647
 
383
648
  ```python
384
- from agent_runtime.llm import LiteLLMClient
649
+ from agent_runtime_core.llm import LiteLLMClient
385
650
 
386
651
  # Use any LiteLLM-supported model
387
652
  client = LiteLLMClient(default_model="gpt-4o")
@@ -394,7 +659,7 @@ client = LiteLLMClient(default_model="ollama/llama2")
394
659
  ### Langfuse Integration
395
660
 
396
661
  ```python
397
- from agent_runtime import configure
662
+ from agent_runtime_core import configure
398
663
 
399
664
  configure(
400
665
  langfuse_enabled=True,
@@ -406,7 +671,7 @@ configure(
406
671
  ### Custom Trace Sink
407
672
 
408
673
  ```python
409
- from agent_runtime import TraceSink
674
+ from agent_runtime_core import TraceSink
410
675
 
411
676
  class MyTraceSink(TraceSink):
412
677
  async def trace(self, event: dict) -> None:
@@ -418,16 +683,43 @@ class MyTraceSink(TraceSink):
418
683
 
419
684
  For Django applications, use [django-agent-runtime](https://pypi.org/project/django-agent-runtime/) which provides:
420
685
 
421
- - Django models for conversations, runs, and events
686
+ - Django models for conversations, memory, tasks, and preferences
687
+ - Database-backed persistence stores
422
688
  - REST API endpoints
423
689
  - Server-Sent Events (SSE) for real-time streaming
424
690
  - Management commands for running workers
425
- - PostgreSQL-backed queue and event bus
426
691
 
427
692
  ```bash
428
693
  pip install django-agent-runtime
429
694
  ```
430
695
 
696
+ ## Testing
697
+
698
+ The library includes testing utilities for unit testing your agents:
699
+
700
+ ```python
701
+ from agent_runtime_core.testing import (
702
+ MockRunContext,
703
+ MockLLMClient,
704
+ create_test_context,
705
+ run_agent_test,
706
+ )
707
+
708
+ # Create a mock context
709
+ ctx = create_test_context(
710
+ input_messages=[{"role": "user", "content": "Hello!"}]
711
+ )
712
+
713
+ # Create a mock LLM client with predefined responses
714
+ mock_llm = MockLLMClient(responses=[
715
+ {"role": "assistant", "content": "Hi there!"}
716
+ ])
717
+
718
+ # Run your agent
719
+ result = await run_agent_test(MyAgent(), ctx)
720
+ assert result.final_output["response"] == "Hi there!"
721
+ ```
722
+
431
723
  ## API Reference
432
724
 
433
725
  ### Configuration
@@ -452,6 +744,24 @@ unregister_runtime(key: str) -> None
452
744
  clear_registry() -> None
453
745
  ```
454
746
 
747
+ ### Persistence Functions
748
+
749
+ ```python
750
+ from agent_runtime_core.persistence import (
751
+ configure_persistence,
752
+ get_persistence_manager,
753
+ )
754
+
755
+ # Configure global persistence
756
+ configure_persistence(
757
+ memory_store_class=MyMemoryStore,
758
+ project_dir=Path.cwd(),
759
+ )
760
+
761
+ # Get the global manager
762
+ manager = get_persistence_manager()
763
+ ```
764
+
455
765
  ## Contributing
456
766
 
457
767
  Contributions are welcome! Please feel free to submit a Pull Request.
@@ -0,0 +1,36 @@
1
+ agent_runtime_core/__init__.py,sha256=Z3OrJpoY9vrf-2hX3ulTqVRwA7YN0cF5mi-xTg5o3kg,3626
2
+ agent_runtime_core/config.py,sha256=e3_uB5brAuQcWU36sOhWF9R6RoJrngtCS-xEB3n2fas,4986
3
+ agent_runtime_core/interfaces.py,sha256=-VGZJHUkyF8kdO-BDkURyc-sLbObIHErIFw1Hzn3n14,10434
4
+ agent_runtime_core/registry.py,sha256=hrbEdNNdqEz7-uN-82qofsXFTZBRDxZ2Ht9qwmp1qkw,1476
5
+ agent_runtime_core/runner.py,sha256=M3It72UhfmLt17jVnSvObiSfQ1_RN4JVUIJsjnRd2Ps,12771
6
+ agent_runtime_core/testing.py,sha256=ordECGprBappLBMWxlETvuf2AoIPNomJFeSedXaY30E,11131
7
+ agent_runtime_core/events/__init__.py,sha256=Gg7cMQHWfLTQ4Xik09KSg7cWbQDmW_MuF5_jl-yZkHU,1575
8
+ agent_runtime_core/events/base.py,sha256=NfHYyoczxr40Er5emROi_aY_07m5hDrKsn31pdWY2DY,1950
9
+ agent_runtime_core/events/memory.py,sha256=9z4tY8XB8xDg3ybHsIwilOcRo7HY-vB-8vxiz6O54BE,2491
10
+ agent_runtime_core/events/redis.py,sha256=7PsUO2-iqrdGCJZUOq1IdzwDdNhqT5mwEnH5xy2Fklo,5874
11
+ agent_runtime_core/events/sqlite.py,sha256=ZpGgeuQujYT8pkDsiXDoFXTcBf2KqzoWX4D4J9xkmeE,5097
12
+ agent_runtime_core/llm/__init__.py,sha256=LyFFDtk4HhvUXct0nTeKuYuWzVmVqLDSVRpnPArbGqY,2461
13
+ agent_runtime_core/llm/anthropic.py,sha256=pt9QAjrv2dIPSAY3Pv6N_BzxL1tbhL-kPWsQ-DcHMLI,7516
14
+ agent_runtime_core/llm/litellm_client.py,sha256=c-O-lE08cT3ne0xSOvSDezPL6hCiA69p3HnB451Ipe4,5193
15
+ agent_runtime_core/llm/openai.py,sha256=qBZkkndDgYQ6LG-9bHS2za5KJTGSgL-c_7h0bD3_5lg,6862
16
+ agent_runtime_core/persistence/__init__.py,sha256=l9_1Mzhy9_Y-IIKuBKTR3Z8r2TPyr0a6b1HEdyZf1_I,2772
17
+ agent_runtime_core/persistence/base.py,sha256=k0wuzTCffPJ609dj9hIhnaqnNI1Qr3pCzJ-6E1YkSRU,21616
18
+ agent_runtime_core/persistence/file.py,sha256=oDB4_ZQkwHTCT1uoqpw5jOleK69YXCQwlTPsW86Yb-I,17785
19
+ agent_runtime_core/persistence/manager.py,sha256=UL_eFsFM28nXM6O9PTHdzKX9Qxh9v2gBGd1m9Bs0vog,14309
20
+ agent_runtime_core/queue/__init__.py,sha256=m8gapXACPGApLj0RIDpVe5cQYuvKq1QsY2_mXzZcULQ,1527
21
+ agent_runtime_core/queue/base.py,sha256=QW1eWbwBX_tmVD8yJobFJtlxLd_RtUWHTuXGessuxy8,3959
22
+ agent_runtime_core/queue/memory.py,sha256=G65NJ2QU8sB2WQ7myHXc8LzSFowEzBXtCt78WmhvxO8,5416
23
+ agent_runtime_core/queue/redis.py,sha256=x9BEoeh6cVcWdaziFSQZHUvq_bHN1WYHWw17HIEWi5o,15622
24
+ agent_runtime_core/queue/sqlite.py,sha256=3YwkU0QOcmAxYBpbfEB9xk8f-e2C3QcHK_RnPZvdick,14338
25
+ agent_runtime_core/state/__init__.py,sha256=RK16Sj1QPW0SNxtmESlLRMpFBY_hZbMGGNnvcLbdcWw,1564
26
+ agent_runtime_core/state/base.py,sha256=NqE3B0ySa-U2jkelgmkBbkmkaIQxfu4pDryoxkZTMrc,1593
27
+ agent_runtime_core/state/memory.py,sha256=yEGwoR25zWcoxL79_gEu38P_dHvjJOTGij2wxqZ7X9A,1566
28
+ agent_runtime_core/state/redis.py,sha256=VXY6ULEphehHVq4zSw5Y4AMCibm9Ghvzk7PqCrgStDg,3430
29
+ agent_runtime_core/state/sqlite.py,sha256=HKZwDiC_7F1W8Z_Pz8roEs91XhQ9rUHfGpuQ7WWt_NQ,4873
30
+ agent_runtime_core/tracing/__init__.py,sha256=u1QicGc39e30gWyQD4cQWxGGjITnkwoOPUhNrG6aNyI,1266
31
+ agent_runtime_core/tracing/langfuse.py,sha256=Rj2sUlatk5sFro0y68tw5X6fQcSwWxcBOSOjB0F7JTU,3660
32
+ agent_runtime_core/tracing/noop.py,sha256=SpsbpsUcNG6C3xZG3uyiNPUHY8etloISx3w56Q8D3KE,751
33
+ agent_runtime_core-0.4.0.dist-info/METADATA,sha256=KW6Jv-d_UBN4o3hxfIrc_Dz3aGMG31bAgI4EzOKAC0o,20675
34
+ agent_runtime_core-0.4.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
35
+ agent_runtime_core-0.4.0.dist-info/licenses/LICENSE,sha256=PcOO8aiOZ4H2MWYeKIis3o6xTCT1hNkDyCxHZhh1NeM,1070
36
+ agent_runtime_core-0.4.0.dist-info/RECORD,,