agent-runtime-core 0.7.0__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_runtime_core/__init__.py +108 -1
- agent_runtime_core/agentic_loop.py +254 -0
- agent_runtime_core/config.py +54 -4
- agent_runtime_core/config_schema.py +307 -0
- agent_runtime_core/interfaces.py +106 -0
- agent_runtime_core/json_runtime.py +509 -0
- agent_runtime_core/llm/__init__.py +80 -7
- agent_runtime_core/llm/anthropic.py +133 -12
- agent_runtime_core/llm/models_config.py +180 -0
- agent_runtime_core/memory/__init__.py +70 -0
- agent_runtime_core/memory/manager.py +554 -0
- agent_runtime_core/memory/mixin.py +294 -0
- agent_runtime_core/multi_agent.py +569 -0
- agent_runtime_core/persistence/__init__.py +2 -0
- agent_runtime_core/persistence/file.py +277 -0
- agent_runtime_core/rag/__init__.py +65 -0
- agent_runtime_core/rag/chunking.py +224 -0
- agent_runtime_core/rag/indexer.py +253 -0
- agent_runtime_core/rag/retriever.py +261 -0
- agent_runtime_core/runner.py +193 -15
- agent_runtime_core/tool_calling_agent.py +88 -130
- agent_runtime_core/tools.py +179 -0
- agent_runtime_core/vectorstore/__init__.py +193 -0
- agent_runtime_core/vectorstore/base.py +138 -0
- agent_runtime_core/vectorstore/embeddings.py +242 -0
- agent_runtime_core/vectorstore/sqlite_vec.py +328 -0
- agent_runtime_core/vectorstore/vertex.py +295 -0
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/METADATA +202 -1
- agent_runtime_core-0.7.1.dist-info/RECORD +57 -0
- agent_runtime_core-0.7.0.dist-info/RECORD +0 -39
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/WHEEL +0 -0
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,509 @@
|
|
|
1
|
+
"""
|
|
2
|
+
JsonAgentRuntime - A runtime that loads agent configuration from JSON.
|
|
3
|
+
|
|
4
|
+
This allows running agents defined in the portable AgentConfig format,
|
|
5
|
+
either from a JSON file or from a Django revision snapshot.
|
|
6
|
+
|
|
7
|
+
Example:
|
|
8
|
+
# Load from file
|
|
9
|
+
config = AgentConfig.from_file("my_agent.json")
|
|
10
|
+
runtime = JsonAgentRuntime(config)
|
|
11
|
+
|
|
12
|
+
# Or load directly
|
|
13
|
+
runtime = JsonAgentRuntime.from_file("my_agent.json")
|
|
14
|
+
|
|
15
|
+
# Run the agent
|
|
16
|
+
result = await runtime.run(ctx)
|
|
17
|
+
|
|
18
|
+
Multi-Agent Support:
|
|
19
|
+
The runtime supports sub-agent tools defined in the config. Sub-agents
|
|
20
|
+
can be embedded (agent_config) or referenced by slug (agent_slug).
|
|
21
|
+
|
|
22
|
+
# Config with embedded sub-agent
|
|
23
|
+
config = AgentConfig(
|
|
24
|
+
slug="triage-agent",
|
|
25
|
+
sub_agent_tools=[
|
|
26
|
+
SubAgentToolConfig(
|
|
27
|
+
name="billing_specialist",
|
|
28
|
+
description="Handle billing questions",
|
|
29
|
+
agent_config=AgentConfig(slug="billing-agent", ...),
|
|
30
|
+
)
|
|
31
|
+
]
|
|
32
|
+
)
|
|
33
|
+
"""
|
|
34
|
+
|
|
35
|
+
import importlib
|
|
36
|
+
import logging
|
|
37
|
+
from typing import Any, Callable, Optional
|
|
38
|
+
|
|
39
|
+
from agent_runtime_core.interfaces import (
|
|
40
|
+
AgentRuntime,
|
|
41
|
+
RunContext,
|
|
42
|
+
RunResult,
|
|
43
|
+
Tool,
|
|
44
|
+
ToolDefinition,
|
|
45
|
+
)
|
|
46
|
+
from agent_runtime_core.agentic_loop import run_agentic_loop
|
|
47
|
+
from agent_runtime_core.config_schema import AgentConfig, ToolConfig, SubAgentToolConfig
|
|
48
|
+
|
|
49
|
+
logger = logging.getLogger(__name__)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def resolve_function(function_path: str) -> Callable:
|
|
53
|
+
"""
|
|
54
|
+
Resolve a function path like 'myapp.services.orders.lookup_order' to the actual callable.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
function_path: Dotted path to the function
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
The callable function
|
|
61
|
+
|
|
62
|
+
Raises:
|
|
63
|
+
ImportError: If the module cannot be imported
|
|
64
|
+
AttributeError: If the function doesn't exist in the module
|
|
65
|
+
"""
|
|
66
|
+
parts = function_path.rsplit(".", 1)
|
|
67
|
+
if len(parts) != 2:
|
|
68
|
+
raise ValueError(f"Invalid function path: {function_path}. Expected 'module.function' format.")
|
|
69
|
+
|
|
70
|
+
module_path, function_name = parts
|
|
71
|
+
module = importlib.import_module(module_path)
|
|
72
|
+
return getattr(module, function_name)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class ConfiguredTool(Tool):
|
|
76
|
+
"""A tool created from ToolConfig that resolves function_path at runtime."""
|
|
77
|
+
|
|
78
|
+
def __init__(self, config: ToolConfig):
|
|
79
|
+
self.config = config
|
|
80
|
+
self._function: Optional[Callable] = None
|
|
81
|
+
|
|
82
|
+
@property
|
|
83
|
+
def definition(self) -> ToolDefinition:
|
|
84
|
+
return ToolDefinition(
|
|
85
|
+
name=self.config.name,
|
|
86
|
+
description=self.config.description,
|
|
87
|
+
parameters=self.config.parameters,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
def _get_function(self) -> Callable:
|
|
91
|
+
"""Lazily resolve the function."""
|
|
92
|
+
if self._function is None:
|
|
93
|
+
self._function = resolve_function(self.config.function_path)
|
|
94
|
+
return self._function
|
|
95
|
+
|
|
96
|
+
async def execute(self, args: dict, ctx: RunContext) -> Any:
|
|
97
|
+
"""Execute the tool by calling the resolved function."""
|
|
98
|
+
func = self._get_function()
|
|
99
|
+
|
|
100
|
+
# Check if function is async
|
|
101
|
+
if hasattr(func, "__call__"):
|
|
102
|
+
import asyncio
|
|
103
|
+
if asyncio.iscoroutinefunction(func):
|
|
104
|
+
return await func(**args)
|
|
105
|
+
else:
|
|
106
|
+
# Run sync function in thread pool
|
|
107
|
+
loop = asyncio.get_event_loop()
|
|
108
|
+
return await loop.run_in_executor(None, lambda: func(**args))
|
|
109
|
+
|
|
110
|
+
return func(**args)
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class SubAgentTool(Tool):
|
|
114
|
+
"""
|
|
115
|
+
A tool that delegates to another agent (sub-agent).
|
|
116
|
+
|
|
117
|
+
This implements the "agent-as-tool" pattern where one agent can
|
|
118
|
+
invoke another agent as if it were a tool.
|
|
119
|
+
"""
|
|
120
|
+
|
|
121
|
+
def __init__(
|
|
122
|
+
self,
|
|
123
|
+
config: SubAgentToolConfig,
|
|
124
|
+
agent_registry: Optional[dict[str, "JsonAgentRuntime"]] = None,
|
|
125
|
+
):
|
|
126
|
+
"""
|
|
127
|
+
Initialize a sub-agent tool.
|
|
128
|
+
|
|
129
|
+
Args:
|
|
130
|
+
config: The sub-agent tool configuration
|
|
131
|
+
agent_registry: Optional registry of pre-loaded agent runtimes
|
|
132
|
+
(for embedded configs or external lookup)
|
|
133
|
+
"""
|
|
134
|
+
self.config = config
|
|
135
|
+
self.agent_registry = agent_registry or {}
|
|
136
|
+
self._runtime: Optional["JsonAgentRuntime"] = None
|
|
137
|
+
|
|
138
|
+
@property
|
|
139
|
+
def definition(self) -> ToolDefinition:
|
|
140
|
+
"""Get the tool definition for this sub-agent."""
|
|
141
|
+
# Sub-agent tools take a single 'message' parameter
|
|
142
|
+
# Note: The handler here is a placeholder - actual execution goes through
|
|
143
|
+
# execute() which receives the RunContext. The runtime handles this specially.
|
|
144
|
+
async def _placeholder_handler(**kwargs):
|
|
145
|
+
raise RuntimeError("SubAgentTool.execute() should be called directly with context")
|
|
146
|
+
|
|
147
|
+
return ToolDefinition(
|
|
148
|
+
name=self.config.name,
|
|
149
|
+
description=self.config.description,
|
|
150
|
+
parameters={
|
|
151
|
+
"type": "object",
|
|
152
|
+
"properties": {
|
|
153
|
+
"message": {
|
|
154
|
+
"type": "string",
|
|
155
|
+
"description": "The message or task to send to the sub-agent",
|
|
156
|
+
},
|
|
157
|
+
"context": {
|
|
158
|
+
"type": "string",
|
|
159
|
+
"description": "Optional additional context for the sub-agent",
|
|
160
|
+
},
|
|
161
|
+
},
|
|
162
|
+
"required": ["message"],
|
|
163
|
+
},
|
|
164
|
+
handler=_placeholder_handler,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
def _get_runtime(self) -> "JsonAgentRuntime":
|
|
168
|
+
"""Get or create the sub-agent runtime."""
|
|
169
|
+
if self._runtime is not None:
|
|
170
|
+
return self._runtime
|
|
171
|
+
|
|
172
|
+
# Try embedded config first
|
|
173
|
+
if self.config.agent_config:
|
|
174
|
+
self._runtime = JsonAgentRuntime(self.config.agent_config)
|
|
175
|
+
return self._runtime
|
|
176
|
+
|
|
177
|
+
# Try registry lookup by slug
|
|
178
|
+
if self.config.agent_slug:
|
|
179
|
+
if self.config.agent_slug in self.agent_registry:
|
|
180
|
+
self._runtime = self.agent_registry[self.config.agent_slug]
|
|
181
|
+
return self._runtime
|
|
182
|
+
raise ValueError(
|
|
183
|
+
f"Sub-agent '{self.config.agent_slug}' not found in registry. "
|
|
184
|
+
f"Available: {list(self.agent_registry.keys())}"
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
raise ValueError(
|
|
188
|
+
f"Sub-agent tool '{self.config.name}' has no agent_config or agent_slug"
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
async def execute(self, args: dict, ctx: RunContext) -> Any:
|
|
192
|
+
"""
|
|
193
|
+
Execute the sub-agent with the given message.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
args: Tool arguments (message, optional context)
|
|
197
|
+
ctx: The parent run context
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
The sub-agent's response
|
|
201
|
+
"""
|
|
202
|
+
message = args.get("message", "")
|
|
203
|
+
additional_context = args.get("context", "")
|
|
204
|
+
|
|
205
|
+
runtime = self._get_runtime()
|
|
206
|
+
|
|
207
|
+
# Build messages for sub-agent based on context_mode
|
|
208
|
+
sub_messages = self._build_sub_agent_messages(message, additional_context, ctx)
|
|
209
|
+
|
|
210
|
+
# Create a sub-context for the sub-agent
|
|
211
|
+
# Note: In a full implementation, you'd want to track parent-child relationships
|
|
212
|
+
from agent_runtime_core.interfaces import RunContext as RC
|
|
213
|
+
sub_ctx = RC(
|
|
214
|
+
run_id=f"{ctx.run_id}-sub-{self.config.name}",
|
|
215
|
+
conversation_id=ctx.conversation_id,
|
|
216
|
+
input_messages=sub_messages,
|
|
217
|
+
params=ctx.params,
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
# Run the sub-agent
|
|
221
|
+
logger.info(f"Invoking sub-agent '{self.config.name}' ({runtime.key})")
|
|
222
|
+
result = await runtime.run(sub_ctx)
|
|
223
|
+
|
|
224
|
+
# Extract the response
|
|
225
|
+
response = result.final_output.get("response", "")
|
|
226
|
+
if not response and result.final_messages:
|
|
227
|
+
# Try to get from last assistant message
|
|
228
|
+
for msg in reversed(result.final_messages):
|
|
229
|
+
if msg.get("role") == "assistant" and msg.get("content"):
|
|
230
|
+
response = msg["content"]
|
|
231
|
+
break
|
|
232
|
+
|
|
233
|
+
return response
|
|
234
|
+
|
|
235
|
+
def _build_sub_agent_messages(
|
|
236
|
+
self,
|
|
237
|
+
message: str,
|
|
238
|
+
additional_context: str,
|
|
239
|
+
ctx: RunContext,
|
|
240
|
+
) -> list[dict]:
|
|
241
|
+
"""
|
|
242
|
+
Build the message list for the sub-agent based on context_mode.
|
|
243
|
+
|
|
244
|
+
Args:
|
|
245
|
+
message: The message to send
|
|
246
|
+
additional_context: Optional additional context
|
|
247
|
+
ctx: Parent run context
|
|
248
|
+
|
|
249
|
+
Returns:
|
|
250
|
+
List of messages for the sub-agent
|
|
251
|
+
"""
|
|
252
|
+
context_mode = self.config.context_mode
|
|
253
|
+
|
|
254
|
+
if context_mode == "full":
|
|
255
|
+
# Pass full conversation history
|
|
256
|
+
messages = list(ctx.input_messages)
|
|
257
|
+
# Add the delegation message
|
|
258
|
+
if additional_context:
|
|
259
|
+
messages.append({
|
|
260
|
+
"role": "user",
|
|
261
|
+
"content": f"{message}\n\nAdditional context: {additional_context}",
|
|
262
|
+
})
|
|
263
|
+
else:
|
|
264
|
+
messages.append({"role": "user", "content": message})
|
|
265
|
+
return messages
|
|
266
|
+
|
|
267
|
+
elif context_mode == "summary":
|
|
268
|
+
# Summarize context (simplified - in production you'd use LLM)
|
|
269
|
+
summary = f"Previous conversation context: {len(ctx.input_messages)} messages exchanged."
|
|
270
|
+
return [
|
|
271
|
+
{"role": "system", "content": summary},
|
|
272
|
+
{"role": "user", "content": message},
|
|
273
|
+
]
|
|
274
|
+
|
|
275
|
+
else: # "message_only" or default
|
|
276
|
+
# Just the message, no context
|
|
277
|
+
content = message
|
|
278
|
+
if additional_context:
|
|
279
|
+
content = f"{message}\n\nContext: {additional_context}"
|
|
280
|
+
return [{"role": "user", "content": content}]
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
class JsonAgentRuntime(AgentRuntime):
|
|
284
|
+
"""
|
|
285
|
+
An agent runtime that loads its configuration from AgentConfig.
|
|
286
|
+
|
|
287
|
+
This provides a portable way to define and run agents using JSON configuration,
|
|
288
|
+
without requiring Django or any specific framework.
|
|
289
|
+
|
|
290
|
+
Supports multi-agent systems through sub_agent_tools, which allow this agent
|
|
291
|
+
to delegate to other agents as if they were tools.
|
|
292
|
+
"""
|
|
293
|
+
|
|
294
|
+
def __init__(
|
|
295
|
+
self,
|
|
296
|
+
config: AgentConfig,
|
|
297
|
+
agent_registry: Optional[dict[str, "JsonAgentRuntime"]] = None,
|
|
298
|
+
):
|
|
299
|
+
"""
|
|
300
|
+
Initialize the runtime with an AgentConfig.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
config: The agent configuration
|
|
304
|
+
agent_registry: Optional registry of pre-loaded agent runtimes
|
|
305
|
+
for sub-agent lookup by slug
|
|
306
|
+
"""
|
|
307
|
+
self.config = config
|
|
308
|
+
self.agent_registry = agent_registry or {}
|
|
309
|
+
self._tools: list[Tool] = []
|
|
310
|
+
self._tools_loaded = False
|
|
311
|
+
|
|
312
|
+
# Build registry from embedded sub-agent configs
|
|
313
|
+
self._build_embedded_agent_registry()
|
|
314
|
+
|
|
315
|
+
def _build_embedded_agent_registry(self) -> None:
|
|
316
|
+
"""Build registry entries for embedded sub-agent configs."""
|
|
317
|
+
for sub_tool in self.config.sub_agent_tools:
|
|
318
|
+
if sub_tool.agent_config:
|
|
319
|
+
slug = sub_tool.agent_config.slug
|
|
320
|
+
if slug not in self.agent_registry:
|
|
321
|
+
# Create runtime for embedded config (recursively)
|
|
322
|
+
self.agent_registry[slug] = JsonAgentRuntime(
|
|
323
|
+
sub_tool.agent_config,
|
|
324
|
+
agent_registry=self.agent_registry,
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
@property
|
|
328
|
+
def key(self) -> str:
|
|
329
|
+
return self.config.slug
|
|
330
|
+
|
|
331
|
+
@classmethod
|
|
332
|
+
def from_file(cls, path: str) -> "JsonAgentRuntime":
|
|
333
|
+
"""Load a runtime from a JSON file."""
|
|
334
|
+
config = AgentConfig.from_file(path)
|
|
335
|
+
return cls(config)
|
|
336
|
+
|
|
337
|
+
@classmethod
|
|
338
|
+
def from_json(cls, json_str: str) -> "JsonAgentRuntime":
|
|
339
|
+
"""Load a runtime from a JSON string."""
|
|
340
|
+
config = AgentConfig.from_json(json_str)
|
|
341
|
+
return cls(config)
|
|
342
|
+
|
|
343
|
+
@classmethod
|
|
344
|
+
def from_dict(cls, data: dict) -> "JsonAgentRuntime":
|
|
345
|
+
"""Load a runtime from a dictionary."""
|
|
346
|
+
config = AgentConfig.from_dict(data)
|
|
347
|
+
return cls(config)
|
|
348
|
+
|
|
349
|
+
@classmethod
|
|
350
|
+
def from_system_export(cls, data: dict) -> "JsonAgentRuntime":
|
|
351
|
+
"""
|
|
352
|
+
Load a runtime from an exported multi-agent system.
|
|
353
|
+
|
|
354
|
+
This handles the system export format which has entry_agent at the top level.
|
|
355
|
+
|
|
356
|
+
Args:
|
|
357
|
+
data: Exported system config (from AgentSystemVersion.export_config)
|
|
358
|
+
|
|
359
|
+
Returns:
|
|
360
|
+
JsonAgentRuntime for the entry agent with all sub-agents wired up
|
|
361
|
+
"""
|
|
362
|
+
entry_agent_data = data.get("entry_agent")
|
|
363
|
+
if not entry_agent_data:
|
|
364
|
+
raise ValueError("System export must have 'entry_agent' key")
|
|
365
|
+
|
|
366
|
+
config = AgentConfig.from_dict(entry_agent_data)
|
|
367
|
+
return cls(config)
|
|
368
|
+
|
|
369
|
+
def _load_tools(self) -> list[Tool]:
|
|
370
|
+
"""Load and resolve all tools from config, including sub-agent tools."""
|
|
371
|
+
if self._tools_loaded:
|
|
372
|
+
return self._tools
|
|
373
|
+
|
|
374
|
+
self._tools = []
|
|
375
|
+
|
|
376
|
+
# Load regular function tools
|
|
377
|
+
for tool_config in self.config.tools:
|
|
378
|
+
try:
|
|
379
|
+
tool = ConfiguredTool(tool_config)
|
|
380
|
+
# Validate that the function can be resolved
|
|
381
|
+
tool._get_function()
|
|
382
|
+
self._tools.append(tool)
|
|
383
|
+
logger.debug(f"Loaded tool: {tool_config.name}")
|
|
384
|
+
except Exception as e:
|
|
385
|
+
logger.error(f"Failed to load tool {tool_config.name}: {e}")
|
|
386
|
+
raise
|
|
387
|
+
|
|
388
|
+
# Load sub-agent tools
|
|
389
|
+
for sub_tool_config in self.config.sub_agent_tools:
|
|
390
|
+
try:
|
|
391
|
+
tool = SubAgentTool(sub_tool_config, agent_registry=self.agent_registry)
|
|
392
|
+
self._tools.append(tool)
|
|
393
|
+
logger.debug(f"Loaded sub-agent tool: {sub_tool_config.name}")
|
|
394
|
+
except Exception as e:
|
|
395
|
+
logger.error(f"Failed to load sub-agent tool {sub_tool_config.name}: {e}")
|
|
396
|
+
raise
|
|
397
|
+
|
|
398
|
+
self._tools_loaded = True
|
|
399
|
+
return self._tools
|
|
400
|
+
|
|
401
|
+
def _build_system_prompt(self) -> str:
|
|
402
|
+
"""Build the full system prompt including knowledge."""
|
|
403
|
+
parts = []
|
|
404
|
+
|
|
405
|
+
# Add base system prompt
|
|
406
|
+
if self.config.system_prompt:
|
|
407
|
+
parts.append(self.config.system_prompt)
|
|
408
|
+
|
|
409
|
+
# Add always-included knowledge
|
|
410
|
+
for knowledge in self.config.knowledge:
|
|
411
|
+
if knowledge.inclusion_mode == "always" and knowledge.content:
|
|
412
|
+
parts.append(f"\n\n## {knowledge.name}\n{knowledge.content}")
|
|
413
|
+
|
|
414
|
+
return "\n".join(parts)
|
|
415
|
+
|
|
416
|
+
async def run(self, ctx: RunContext) -> RunResult:
|
|
417
|
+
"""
|
|
418
|
+
Run the agent using the agentic loop.
|
|
419
|
+
|
|
420
|
+
Args:
|
|
421
|
+
ctx: The run context with conversation history and state
|
|
422
|
+
|
|
423
|
+
Returns:
|
|
424
|
+
RunResult with the agent's response
|
|
425
|
+
"""
|
|
426
|
+
from agent_runtime_core.llm import get_llm_client
|
|
427
|
+
|
|
428
|
+
# Load tools
|
|
429
|
+
tools = self._load_tools()
|
|
430
|
+
|
|
431
|
+
# Build system prompt with knowledge
|
|
432
|
+
system_prompt = self._build_system_prompt()
|
|
433
|
+
|
|
434
|
+
# Get model settings
|
|
435
|
+
model = self.config.model
|
|
436
|
+
model_settings = self.config.model_settings or {}
|
|
437
|
+
|
|
438
|
+
# Get LLM client
|
|
439
|
+
llm = get_llm_client(model=model)
|
|
440
|
+
|
|
441
|
+
# Build messages list with system prompt
|
|
442
|
+
messages = []
|
|
443
|
+
if system_prompt:
|
|
444
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
445
|
+
messages.extend(ctx.input_messages)
|
|
446
|
+
|
|
447
|
+
# Convert tools to OpenAI format
|
|
448
|
+
tool_schemas = None
|
|
449
|
+
tool_map = {}
|
|
450
|
+
if tools:
|
|
451
|
+
tool_schemas = []
|
|
452
|
+
for tool in tools:
|
|
453
|
+
tool_schemas.append({
|
|
454
|
+
"type": "function",
|
|
455
|
+
"function": {
|
|
456
|
+
"name": tool.definition.name,
|
|
457
|
+
"description": tool.definition.description,
|
|
458
|
+
"parameters": tool.definition.parameters,
|
|
459
|
+
}
|
|
460
|
+
})
|
|
461
|
+
tool_map[tool.definition.name] = tool
|
|
462
|
+
|
|
463
|
+
# Create tool executor
|
|
464
|
+
async def execute_tool(name: str, args: dict) -> Any:
|
|
465
|
+
if name not in tool_map:
|
|
466
|
+
raise ValueError(f"Unknown tool: {name}")
|
|
467
|
+
return await tool_map[name].execute(args, ctx)
|
|
468
|
+
|
|
469
|
+
# Run the agentic loop
|
|
470
|
+
result = await run_agentic_loop(
|
|
471
|
+
llm=llm,
|
|
472
|
+
messages=messages,
|
|
473
|
+
tools=tool_schemas,
|
|
474
|
+
execute_tool=execute_tool,
|
|
475
|
+
ctx=ctx,
|
|
476
|
+
model=model,
|
|
477
|
+
**model_settings,
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
return RunResult(
|
|
481
|
+
final_output={"response": result.final_content},
|
|
482
|
+
final_messages=result.messages,
|
|
483
|
+
usage=result.usage,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
def get_tools(self) -> list[Tool]:
|
|
487
|
+
"""Get the list of configured tools (including sub-agent tools)."""
|
|
488
|
+
return self._load_tools()
|
|
489
|
+
|
|
490
|
+
def get_system_prompt(self) -> str:
|
|
491
|
+
"""Get the full system prompt including knowledge."""
|
|
492
|
+
return self._build_system_prompt()
|
|
493
|
+
|
|
494
|
+
def get_sub_agent_tools(self) -> list[SubAgentTool]:
|
|
495
|
+
"""Get only the sub-agent tools."""
|
|
496
|
+
return [t for t in self._load_tools() if isinstance(t, SubAgentTool)]
|
|
497
|
+
|
|
498
|
+
def get_sub_agent_runtimes(self) -> dict[str, "JsonAgentRuntime"]:
|
|
499
|
+
"""
|
|
500
|
+
Get all sub-agent runtimes available to this agent.
|
|
501
|
+
|
|
502
|
+
Returns:
|
|
503
|
+
Dict mapping agent slug to JsonAgentRuntime
|
|
504
|
+
"""
|
|
505
|
+
return dict(self.agent_registry)
|
|
506
|
+
|
|
507
|
+
def has_sub_agents(self) -> bool:
|
|
508
|
+
"""Check if this agent has any sub-agent tools."""
|
|
509
|
+
return len(self.config.sub_agent_tools) > 0
|
|
@@ -6,15 +6,38 @@ Provides:
|
|
|
6
6
|
- OpenAIClient: OpenAI API client
|
|
7
7
|
- AnthropicClient: Anthropic API client
|
|
8
8
|
- LiteLLMClient: LiteLLM adapter (optional)
|
|
9
|
+
- get_llm_client: Factory with auto-detection from model name
|
|
10
|
+
- get_llm_client_for_model: Get client for a specific model
|
|
9
11
|
"""
|
|
10
12
|
|
|
13
|
+
from typing import Optional
|
|
14
|
+
|
|
11
15
|
from agent_runtime_core.interfaces import LLMClient, LLMResponse, LLMStreamChunk
|
|
16
|
+
from agent_runtime_core.llm.models_config import (
|
|
17
|
+
ModelInfo,
|
|
18
|
+
SUPPORTED_MODELS,
|
|
19
|
+
DEFAULT_MODEL,
|
|
20
|
+
get_model_info,
|
|
21
|
+
get_provider_for_model,
|
|
22
|
+
list_models_for_ui,
|
|
23
|
+
)
|
|
12
24
|
|
|
13
25
|
__all__ = [
|
|
26
|
+
# Interfaces
|
|
14
27
|
"LLMClient",
|
|
15
28
|
"LLMResponse",
|
|
16
29
|
"LLMStreamChunk",
|
|
30
|
+
# Factory functions
|
|
17
31
|
"get_llm_client",
|
|
32
|
+
"get_llm_client_for_model",
|
|
33
|
+
# Model config
|
|
34
|
+
"ModelInfo",
|
|
35
|
+
"SUPPORTED_MODELS",
|
|
36
|
+
"DEFAULT_MODEL",
|
|
37
|
+
"get_model_info",
|
|
38
|
+
"get_provider_for_model",
|
|
39
|
+
"list_models_for_ui",
|
|
40
|
+
# Exceptions
|
|
18
41
|
"OpenAIConfigurationError",
|
|
19
42
|
"AnthropicConfigurationError",
|
|
20
43
|
]
|
|
@@ -30,37 +53,56 @@ class AnthropicConfigurationError(Exception):
|
|
|
30
53
|
pass
|
|
31
54
|
|
|
32
55
|
|
|
33
|
-
def get_llm_client(
|
|
56
|
+
def get_llm_client(
|
|
57
|
+
provider: Optional[str] = None,
|
|
58
|
+
model: Optional[str] = None,
|
|
59
|
+
**kwargs
|
|
60
|
+
) -> LLMClient:
|
|
34
61
|
"""
|
|
35
62
|
Factory function to get an LLM client.
|
|
36
63
|
|
|
64
|
+
Can auto-detect provider from model name if model is provided.
|
|
65
|
+
|
|
37
66
|
Args:
|
|
38
|
-
provider: "openai", "anthropic", "litellm", etc.
|
|
67
|
+
provider: "openai", "anthropic", "litellm", etc. (optional if model provided)
|
|
68
|
+
model: Model ID - if provided, auto-detects provider
|
|
39
69
|
**kwargs: Provider-specific configuration (e.g., api_key, default_model)
|
|
40
70
|
|
|
41
71
|
Returns:
|
|
42
72
|
LLMClient instance
|
|
43
|
-
|
|
73
|
+
|
|
44
74
|
Raises:
|
|
45
75
|
OpenAIConfigurationError: If OpenAI is selected but API key is not configured
|
|
46
76
|
AnthropicConfigurationError: If Anthropic is selected but API key is not configured
|
|
47
77
|
ValueError: If an unknown provider is specified
|
|
48
|
-
|
|
78
|
+
|
|
49
79
|
Example:
|
|
50
|
-
#
|
|
80
|
+
# Auto-detect from model name (recommended)
|
|
81
|
+
llm = get_llm_client(model="claude-sonnet-4-20250514")
|
|
82
|
+
llm = get_llm_client(model="gpt-4o")
|
|
83
|
+
|
|
84
|
+
# Using config
|
|
51
85
|
from agent_runtime_core.config import configure
|
|
52
86
|
configure(model_provider="openai", openai_api_key="sk-...")
|
|
53
87
|
llm = get_llm_client()
|
|
54
|
-
|
|
88
|
+
|
|
55
89
|
# Or with explicit API key
|
|
56
90
|
llm = get_llm_client(api_key='sk-...')
|
|
57
|
-
|
|
91
|
+
|
|
58
92
|
# Or with a different provider
|
|
59
93
|
llm = get_llm_client(provider='anthropic', api_key='sk-ant-...')
|
|
60
94
|
"""
|
|
61
95
|
from agent_runtime_core.config import get_config
|
|
62
96
|
|
|
63
97
|
config = get_config()
|
|
98
|
+
|
|
99
|
+
# Auto-detect provider from model name if not explicitly provided
|
|
100
|
+
if provider is None and model:
|
|
101
|
+
detected_provider = get_provider_for_model(model)
|
|
102
|
+
if detected_provider:
|
|
103
|
+
provider = detected_provider
|
|
104
|
+
|
|
105
|
+
# Fall back to config
|
|
64
106
|
provider = provider or config.model_provider
|
|
65
107
|
|
|
66
108
|
if provider == "openai":
|
|
@@ -81,3 +123,34 @@ def get_llm_client(provider: str = None, **kwargs) -> LLMClient:
|
|
|
81
123
|
f"Supported providers: 'openai', 'anthropic', 'litellm'\n"
|
|
82
124
|
f"Set model_provider in your configuration."
|
|
83
125
|
)
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def get_llm_client_for_model(model: str, **kwargs) -> LLMClient:
|
|
129
|
+
"""
|
|
130
|
+
Get an LLM client configured for a specific model.
|
|
131
|
+
|
|
132
|
+
This is a convenience function that auto-detects the provider
|
|
133
|
+
and sets the default model.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
model: Model ID (e.g., "gpt-4o", "claude-sonnet-4-20250514")
|
|
137
|
+
**kwargs: Additional client configuration
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
LLMClient configured for the specified model
|
|
141
|
+
|
|
142
|
+
Raises:
|
|
143
|
+
ValueError: If model provider cannot be determined
|
|
144
|
+
|
|
145
|
+
Example:
|
|
146
|
+
llm = get_llm_client_for_model("claude-sonnet-4-20250514")
|
|
147
|
+
response = await llm.generate(messages) # Uses claude-sonnet-4-20250514
|
|
148
|
+
"""
|
|
149
|
+
provider = get_provider_for_model(model)
|
|
150
|
+
if not provider:
|
|
151
|
+
raise ValueError(
|
|
152
|
+
f"Cannot determine provider for model: {model}\n"
|
|
153
|
+
f"Known models: {', '.join(SUPPORTED_MODELS.keys())}"
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
return get_llm_client(provider=provider, default_model=model, **kwargs)
|