agent-runtime-core 0.7.0__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (32) hide show
  1. agent_runtime_core/__init__.py +108 -1
  2. agent_runtime_core/agentic_loop.py +254 -0
  3. agent_runtime_core/config.py +54 -4
  4. agent_runtime_core/config_schema.py +307 -0
  5. agent_runtime_core/interfaces.py +106 -0
  6. agent_runtime_core/json_runtime.py +509 -0
  7. agent_runtime_core/llm/__init__.py +80 -7
  8. agent_runtime_core/llm/anthropic.py +133 -12
  9. agent_runtime_core/llm/models_config.py +180 -0
  10. agent_runtime_core/memory/__init__.py +70 -0
  11. agent_runtime_core/memory/manager.py +554 -0
  12. agent_runtime_core/memory/mixin.py +294 -0
  13. agent_runtime_core/multi_agent.py +569 -0
  14. agent_runtime_core/persistence/__init__.py +2 -0
  15. agent_runtime_core/persistence/file.py +277 -0
  16. agent_runtime_core/rag/__init__.py +65 -0
  17. agent_runtime_core/rag/chunking.py +224 -0
  18. agent_runtime_core/rag/indexer.py +253 -0
  19. agent_runtime_core/rag/retriever.py +261 -0
  20. agent_runtime_core/runner.py +193 -15
  21. agent_runtime_core/tool_calling_agent.py +88 -130
  22. agent_runtime_core/tools.py +179 -0
  23. agent_runtime_core/vectorstore/__init__.py +193 -0
  24. agent_runtime_core/vectorstore/base.py +138 -0
  25. agent_runtime_core/vectorstore/embeddings.py +242 -0
  26. agent_runtime_core/vectorstore/sqlite_vec.py +328 -0
  27. agent_runtime_core/vectorstore/vertex.py +295 -0
  28. {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/METADATA +202 -1
  29. agent_runtime_core-0.7.1.dist-info/RECORD +57 -0
  30. agent_runtime_core-0.7.0.dist-info/RECORD +0 -39
  31. {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/WHEEL +0 -0
  32. {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,307 @@
1
+ """
2
+ AgentConfig - Canonical JSON schema for portable agent definitions.
3
+
4
+ This schema defines the format for agent configurations that can be:
5
+ 1. Stored in Django as JSON revisions
6
+ 2. Loaded from standalone .json files
7
+ 3. Used by agent_runtime_core without Django dependency
8
+
9
+ Example:
10
+ # Load from file
11
+ config = AgentConfig.from_file("my_agent.json")
12
+
13
+ # Create runtime and run
14
+ runtime = JsonAgentRuntime(config, llm_client)
15
+ result = await runtime.run(ctx)
16
+ """
17
+
18
+ from dataclasses import dataclass, field
19
+ from datetime import datetime
20
+ from typing import Any, Optional
21
+ import json
22
+ from pathlib import Path
23
+
24
+
25
+ @dataclass
26
+ class SubAgentToolConfig:
27
+ """
28
+ Configuration for a sub-agent tool (agent-as-tool pattern).
29
+
30
+ This allows an agent to delegate to another agent as if it were a tool.
31
+ The sub-agent can either be referenced by slug (resolved at runtime)
32
+ or embedded inline (for portable standalone configs).
33
+ """
34
+
35
+ name: str # Tool name the parent uses to invoke this agent
36
+ description: str # When to use this agent (shown to parent LLM)
37
+
38
+ # Reference to sub-agent (one of these should be set)
39
+ agent_slug: str = "" # Reference by slug (resolved at runtime from registry)
40
+ agent_config: Optional["AgentConfig"] = None # Embedded config (for standalone)
41
+
42
+ # Invocation settings
43
+ invocation_mode: str = "delegate" # "delegate" or "handoff"
44
+ context_mode: str = "full" # "full", "summary", or "message_only"
45
+ max_turns: Optional[int] = None
46
+
47
+ def to_dict(self) -> dict:
48
+ result = {
49
+ "name": self.name,
50
+ "description": self.description,
51
+ "tool_type": "subagent",
52
+ "invocation_mode": self.invocation_mode,
53
+ "context_mode": self.context_mode,
54
+ }
55
+ if self.agent_slug:
56
+ result["agent_slug"] = self.agent_slug
57
+ if self.agent_config:
58
+ result["agent_config"] = self.agent_config.to_dict()
59
+ if self.max_turns is not None:
60
+ result["max_turns"] = self.max_turns
61
+ return result
62
+
63
+ @classmethod
64
+ def from_dict(cls, data: dict) -> "SubAgentToolConfig":
65
+ agent_config = None
66
+ if "agent_config" in data and data["agent_config"]:
67
+ # Defer import to avoid circular dependency
68
+ agent_config = AgentConfig.from_dict(data["agent_config"])
69
+
70
+ return cls(
71
+ name=data["name"],
72
+ description=data["description"],
73
+ agent_slug=data.get("agent_slug", ""),
74
+ agent_config=agent_config,
75
+ invocation_mode=data.get("invocation_mode", "delegate"),
76
+ context_mode=data.get("context_mode", "full"),
77
+ max_turns=data.get("max_turns"),
78
+ )
79
+
80
+
81
+ @dataclass
82
+ class ToolConfig:
83
+ """Configuration for a single tool."""
84
+
85
+ name: str
86
+ description: str
87
+ parameters: dict # JSON Schema for parameters
88
+ function_path: str # Import path like "myapp.services.orders.lookup_order"
89
+
90
+ # Optional metadata
91
+ requires_confirmation: bool = False
92
+ is_safe: bool = True # No side effects
93
+ timeout_seconds: int = 30
94
+
95
+ def to_dict(self) -> dict:
96
+ return {
97
+ "name": self.name,
98
+ "description": self.description,
99
+ "parameters": self.parameters,
100
+ "function_path": self.function_path,
101
+ "requires_confirmation": self.requires_confirmation,
102
+ "is_safe": self.is_safe,
103
+ "timeout_seconds": self.timeout_seconds,
104
+ }
105
+
106
+ @classmethod
107
+ def from_dict(cls, data: dict) -> "ToolConfig":
108
+ return cls(
109
+ name=data["name"],
110
+ description=data["description"],
111
+ parameters=data.get("parameters", {}),
112
+ function_path=data.get("function_path", ""),
113
+ requires_confirmation=data.get("requires_confirmation", False),
114
+ is_safe=data.get("is_safe", True),
115
+ timeout_seconds=data.get("timeout_seconds", 30),
116
+ )
117
+
118
+
119
+ @dataclass
120
+ class KnowledgeConfig:
121
+ """Configuration for a knowledge source."""
122
+
123
+ name: str
124
+ knowledge_type: str # "text", "file", "url"
125
+ inclusion_mode: str = "always" # "always", "on_demand", "rag"
126
+
127
+ # Content (depends on type)
128
+ content: str = "" # For text type
129
+ file_path: str = "" # For file type
130
+ url: str = "" # For url type
131
+
132
+ def to_dict(self) -> dict:
133
+ return {
134
+ "name": self.name,
135
+ "type": self.knowledge_type,
136
+ "inclusion_mode": self.inclusion_mode,
137
+ "content": self.content,
138
+ "file_path": self.file_path,
139
+ "url": self.url,
140
+ }
141
+
142
+ @classmethod
143
+ def from_dict(cls, data: dict) -> "KnowledgeConfig":
144
+ return cls(
145
+ name=data["name"],
146
+ knowledge_type=data.get("type", "text"),
147
+ inclusion_mode=data.get("inclusion_mode", "always"),
148
+ content=data.get("content", ""),
149
+ file_path=data.get("file_path", ""),
150
+ url=data.get("url", ""),
151
+ )
152
+
153
+
154
+ @dataclass
155
+ class AgentConfig:
156
+ """
157
+ Canonical configuration for an agent.
158
+
159
+ This is the portable format that can be serialized to JSON and
160
+ loaded by any runtime (Django or standalone).
161
+
162
+ For multi-agent systems, sub_agents contains embedded agent configs
163
+ that this agent can delegate to. The sub_agent_tools list defines
164
+ how each sub-agent is exposed as a tool.
165
+ """
166
+
167
+ # Identity
168
+ name: str
169
+ slug: str
170
+ description: str = ""
171
+
172
+ # Core configuration
173
+ system_prompt: str = ""
174
+ model: str = "gpt-4o"
175
+ model_settings: dict = field(default_factory=dict)
176
+
177
+ # Tools and knowledge
178
+ tools: list[ToolConfig] = field(default_factory=list)
179
+ knowledge: list[KnowledgeConfig] = field(default_factory=list)
180
+
181
+ # Sub-agent tools (agent-as-tool pattern)
182
+ # These define other agents this agent can delegate to
183
+ sub_agent_tools: list[SubAgentToolConfig] = field(default_factory=list)
184
+
185
+ # Metadata
186
+ version: str = "1.0"
187
+ schema_version: str = "1" # For future schema migrations
188
+ created_at: Optional[str] = None
189
+ updated_at: Optional[str] = None
190
+
191
+ # Extra config (for extensibility)
192
+ extra: dict = field(default_factory=dict)
193
+
194
+ def to_dict(self) -> dict:
195
+ """Serialize to dictionary (JSON-compatible)."""
196
+ result = {
197
+ "schema_version": self.schema_version,
198
+ "version": self.version,
199
+ "name": self.name,
200
+ "slug": self.slug,
201
+ "description": self.description,
202
+ "system_prompt": self.system_prompt,
203
+ "model": self.model,
204
+ "model_settings": self.model_settings,
205
+ "tools": [t.to_dict() for t in self.tools],
206
+ "knowledge": [k.to_dict() for k in self.knowledge],
207
+ "created_at": self.created_at,
208
+ "updated_at": self.updated_at,
209
+ "extra": self.extra,
210
+ }
211
+ # Only include sub_agent_tools if there are any
212
+ if self.sub_agent_tools:
213
+ result["sub_agent_tools"] = [s.to_dict() for s in self.sub_agent_tools]
214
+ return result
215
+
216
+ def to_json(self, indent: int = 2) -> str:
217
+ """Serialize to JSON string."""
218
+ return json.dumps(self.to_dict(), indent=indent)
219
+
220
+ def save(self, path: str | Path) -> None:
221
+ """Save to a JSON file."""
222
+ path = Path(path)
223
+ path.write_text(self.to_json())
224
+
225
+ @classmethod
226
+ def from_dict(cls, data: dict) -> "AgentConfig":
227
+ """Load from dictionary."""
228
+ # Parse regular tools (skip subagent tools - they're in sub_agent_tools)
229
+ tools = []
230
+ for t in data.get("tools", []):
231
+ if t.get("tool_type") != "subagent":
232
+ tools.append(ToolConfig.from_dict(t))
233
+
234
+ knowledge = [KnowledgeConfig.from_dict(k) for k in data.get("knowledge", [])]
235
+
236
+ # Parse sub-agent tools
237
+ sub_agent_tools = []
238
+ for s in data.get("sub_agent_tools", []):
239
+ sub_agent_tools.append(SubAgentToolConfig.from_dict(s))
240
+ # Also check tools list for subagent type (backwards compat)
241
+ for t in data.get("tools", []):
242
+ if t.get("tool_type") == "subagent":
243
+ sub_agent_tools.append(SubAgentToolConfig.from_dict(t))
244
+
245
+ return cls(
246
+ name=data["name"],
247
+ slug=data["slug"],
248
+ description=data.get("description", ""),
249
+ system_prompt=data.get("system_prompt", ""),
250
+ model=data.get("model", "gpt-4o"),
251
+ model_settings=data.get("model_settings", {}),
252
+ tools=tools,
253
+ knowledge=knowledge,
254
+ sub_agent_tools=sub_agent_tools,
255
+ version=data.get("version", "1.0"),
256
+ schema_version=data.get("schema_version", "1"),
257
+ created_at=data.get("created_at"),
258
+ updated_at=data.get("updated_at"),
259
+ extra=data.get("extra", {}),
260
+ )
261
+
262
+ @classmethod
263
+ def from_json(cls, json_str: str) -> "AgentConfig":
264
+ """Load from JSON string."""
265
+ return cls.from_dict(json.loads(json_str))
266
+
267
+ @classmethod
268
+ def from_file(cls, path: str | Path) -> "AgentConfig":
269
+ """Load from a JSON file."""
270
+ path = Path(path)
271
+ return cls.from_json(path.read_text())
272
+
273
+ def with_timestamp(self) -> "AgentConfig":
274
+ """Return a copy with updated timestamp."""
275
+ now = datetime.utcnow().isoformat() + "Z"
276
+ return AgentConfig(
277
+ name=self.name,
278
+ slug=self.slug,
279
+ description=self.description,
280
+ system_prompt=self.system_prompt,
281
+ model=self.model,
282
+ model_settings=self.model_settings,
283
+ tools=self.tools,
284
+ knowledge=self.knowledge,
285
+ sub_agent_tools=self.sub_agent_tools,
286
+ version=self.version,
287
+ schema_version=self.schema_version,
288
+ created_at=self.created_at or now,
289
+ updated_at=now,
290
+ extra=self.extra,
291
+ )
292
+
293
+ def get_all_embedded_agents(self) -> dict[str, "AgentConfig"]:
294
+ """
295
+ Get all embedded agent configs (for standalone execution).
296
+
297
+ Returns a dict mapping slug -> AgentConfig for all sub-agents
298
+ that have embedded configs (not just slug references).
299
+ """
300
+ agents = {}
301
+ for sub_tool in self.sub_agent_tools:
302
+ if sub_tool.agent_config:
303
+ agents[sub_tool.agent_config.slug] = sub_tool.agent_config
304
+ # Recursively get nested sub-agents
305
+ agents.update(sub_tool.agent_config.get_all_embedded_agents())
306
+ return agents
307
+
@@ -15,6 +15,18 @@ from typing import Any, Callable, Optional, Protocol, TypedDict, AsyncIterator
15
15
  from uuid import UUID
16
16
 
17
17
 
18
+ class EventVisibility(str, Enum):
19
+ """
20
+ Visibility levels for events.
21
+
22
+ Controls which events are shown to users in the UI.
23
+ """
24
+
25
+ INTERNAL = "internal" # Never shown to UI (checkpoints, heartbeats)
26
+ DEBUG = "debug" # Shown only in debug mode (tool calls, tool results)
27
+ USER = "user" # Always shown to users (assistant messages, errors)
28
+
29
+
18
30
  class EventType(str, Enum):
19
31
  """
20
32
  Standard event types emitted by agent runtimes.
@@ -41,6 +53,9 @@ class EventType(str, Enum):
41
53
  # State events
42
54
  STATE_CHECKPOINT = "state.checkpoint"
43
55
 
56
+ # Error events (distinct from run.failed - for runtime errors shown to users)
57
+ ERROR = "error"
58
+
44
59
  # Step execution events (for long-running multi-step agents)
45
60
  STEP_STARTED = "step.started"
46
61
  STEP_COMPLETED = "step.completed"
@@ -138,6 +153,30 @@ class RunContext(Protocol):
138
153
  """
139
154
  ...
140
155
 
156
+ async def emit_user_message(self, content: str) -> None:
157
+ """
158
+ Emit a message that will always be shown to the user.
159
+
160
+ This is a convenience method for emitting assistant messages.
161
+
162
+ Args:
163
+ content: The message content to display
164
+ """
165
+ ...
166
+
167
+ async def emit_error(self, error: str, details: dict = None) -> None:
168
+ """
169
+ Emit an error that will be shown to the user.
170
+
171
+ This is for runtime errors that should be displayed to users,
172
+ distinct from run.failed which is the final failure event.
173
+
174
+ Args:
175
+ error: The error message
176
+ details: Optional additional error details
177
+ """
178
+ ...
179
+
141
180
  async def checkpoint(self, state: dict) -> None:
142
181
  """
143
182
  Save a state checkpoint for recovery.
@@ -403,6 +442,56 @@ class LLMClient(ABC):
403
442
  ...
404
443
 
405
444
 
445
+ class LLMToolCall:
446
+ """
447
+ Wrapper for tool call data from LLM responses to provide attribute access.
448
+
449
+ This provides a consistent interface for accessing tool call data
450
+ regardless of the underlying format (OpenAI, Anthropic, etc.).
451
+
452
+ Note: This is different from persistence.ToolCall which is a dataclass
453
+ for storing tool calls in conversations.
454
+ """
455
+
456
+ def __init__(self, data: dict):
457
+ self._data = data
458
+
459
+ @property
460
+ def id(self) -> str:
461
+ return self._data.get("id", "")
462
+
463
+ @property
464
+ def name(self) -> str:
465
+ func = self._data.get("function", {})
466
+ return func.get("name", "")
467
+
468
+ @property
469
+ def arguments(self) -> dict:
470
+ import json
471
+ import ast
472
+ func = self._data.get("function", {})
473
+ args = func.get("arguments", "{}")
474
+ if isinstance(args, str):
475
+ # First try standard JSON parsing
476
+ try:
477
+ return json.loads(args)
478
+ except json.JSONDecodeError:
479
+ pass
480
+
481
+ # Some models (e.g., Claude via certain providers) return Python dict syntax
482
+ # with single quotes instead of JSON double quotes. Try ast.literal_eval.
483
+ try:
484
+ result = ast.literal_eval(args)
485
+ if isinstance(result, dict):
486
+ return result
487
+ except (ValueError, SyntaxError):
488
+ pass
489
+
490
+ # Last resort: return empty dict
491
+ return {}
492
+ return args
493
+
494
+
406
495
  @dataclass
407
496
  class LLMResponse:
408
497
  """Response from an LLM generation."""
@@ -413,6 +502,23 @@ class LLMResponse:
413
502
  finish_reason: str = ""
414
503
  raw_response: Optional[Any] = None
415
504
 
505
+ @property
506
+ def tool_calls(self) -> Optional[list["LLMToolCall"]]:
507
+ """Extract tool_calls from the message for convenience."""
508
+ if isinstance(self.message, dict):
509
+ calls = self.message.get("tool_calls")
510
+ if calls:
511
+ # Convert to LLMToolCall objects with name, arguments, id attributes
512
+ return [LLMToolCall(tc) for tc in calls]
513
+ return None
514
+
515
+ @property
516
+ def content(self) -> str:
517
+ """Extract content from the message for convenience."""
518
+ if isinstance(self.message, dict):
519
+ return self.message.get("content", "")
520
+ return ""
521
+
416
522
 
417
523
  @dataclass
418
524
  class LLMStreamChunk: