agent-runtime-core 0.7.0__py3-none-any.whl → 0.7.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_runtime_core/__init__.py +108 -1
- agent_runtime_core/agentic_loop.py +254 -0
- agent_runtime_core/config.py +54 -4
- agent_runtime_core/config_schema.py +307 -0
- agent_runtime_core/interfaces.py +106 -0
- agent_runtime_core/json_runtime.py +509 -0
- agent_runtime_core/llm/__init__.py +80 -7
- agent_runtime_core/llm/anthropic.py +133 -12
- agent_runtime_core/llm/models_config.py +180 -0
- agent_runtime_core/memory/__init__.py +70 -0
- agent_runtime_core/memory/manager.py +554 -0
- agent_runtime_core/memory/mixin.py +294 -0
- agent_runtime_core/multi_agent.py +569 -0
- agent_runtime_core/persistence/__init__.py +2 -0
- agent_runtime_core/persistence/file.py +277 -0
- agent_runtime_core/rag/__init__.py +65 -0
- agent_runtime_core/rag/chunking.py +224 -0
- agent_runtime_core/rag/indexer.py +253 -0
- agent_runtime_core/rag/retriever.py +261 -0
- agent_runtime_core/runner.py +193 -15
- agent_runtime_core/tool_calling_agent.py +88 -130
- agent_runtime_core/tools.py +179 -0
- agent_runtime_core/vectorstore/__init__.py +193 -0
- agent_runtime_core/vectorstore/base.py +138 -0
- agent_runtime_core/vectorstore/embeddings.py +242 -0
- agent_runtime_core/vectorstore/sqlite_vec.py +328 -0
- agent_runtime_core/vectorstore/vertex.py +295 -0
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/METADATA +202 -1
- agent_runtime_core-0.7.1.dist-info/RECORD +57 -0
- agent_runtime_core-0.7.0.dist-info/RECORD +0 -39
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/WHEEL +0 -0
- {agent_runtime_core-0.7.0.dist-info → agent_runtime_core-0.7.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
Anthropic API client implementation.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
+
import json
|
|
5
6
|
import os
|
|
6
7
|
from typing import AsyncIterator, Optional
|
|
7
8
|
|
|
@@ -103,14 +104,17 @@ class AnthropicClient(LLMClient):
|
|
|
103
104
|
"""Generate a completion from Anthropic."""
|
|
104
105
|
model = model or self.default_model
|
|
105
106
|
|
|
106
|
-
# Extract system message
|
|
107
|
+
# Extract system message and convert other messages
|
|
107
108
|
system_message = None
|
|
108
|
-
|
|
109
|
+
converted_messages = []
|
|
109
110
|
for msg in messages:
|
|
110
111
|
if msg.get("role") == "system":
|
|
111
112
|
system_message = msg.get("content", "")
|
|
112
113
|
else:
|
|
113
|
-
|
|
114
|
+
converted_messages.append(self._convert_message(msg))
|
|
115
|
+
|
|
116
|
+
# Merge consecutive messages with the same role (required by Anthropic)
|
|
117
|
+
chat_messages = self._merge_consecutive_messages(converted_messages)
|
|
114
118
|
|
|
115
119
|
request_kwargs = {
|
|
116
120
|
"model": model,
|
|
@@ -152,14 +156,17 @@ class AnthropicClient(LLMClient):
|
|
|
152
156
|
"""Stream a completion from Anthropic."""
|
|
153
157
|
model = model or self.default_model
|
|
154
158
|
|
|
155
|
-
# Extract system message
|
|
159
|
+
# Extract system message and convert other messages
|
|
156
160
|
system_message = None
|
|
157
|
-
|
|
161
|
+
converted_messages = []
|
|
158
162
|
for msg in messages:
|
|
159
163
|
if msg.get("role") == "system":
|
|
160
164
|
system_message = msg.get("content", "")
|
|
161
165
|
else:
|
|
162
|
-
|
|
166
|
+
converted_messages.append(self._convert_message(msg))
|
|
167
|
+
|
|
168
|
+
# Merge consecutive messages with the same role (required by Anthropic)
|
|
169
|
+
chat_messages = self._merge_consecutive_messages(converted_messages)
|
|
163
170
|
|
|
164
171
|
request_kwargs = {
|
|
165
172
|
"model": model,
|
|
@@ -183,18 +190,130 @@ class AnthropicClient(LLMClient):
|
|
|
183
190
|
yield LLMStreamChunk(finish_reason="stop")
|
|
184
191
|
|
|
185
192
|
def _convert_message(self, msg: Message) -> dict:
|
|
186
|
-
"""
|
|
193
|
+
"""
|
|
194
|
+
Convert our message format to Anthropic format.
|
|
195
|
+
|
|
196
|
+
Handles:
|
|
197
|
+
- Regular user/assistant messages
|
|
198
|
+
- Assistant messages with tool_calls (need content blocks)
|
|
199
|
+
- Tool result messages (need tool_result content blocks)
|
|
200
|
+
"""
|
|
187
201
|
role = msg.get("role", "user")
|
|
188
|
-
if role == "assistant":
|
|
189
|
-
role = "assistant"
|
|
190
|
-
elif role == "tool":
|
|
191
|
-
role = "user" # Tool results go as user messages in Anthropic
|
|
192
202
|
|
|
203
|
+
# Handle tool result messages
|
|
204
|
+
if role == "tool":
|
|
205
|
+
# Tool results go as user messages with tool_result content blocks
|
|
206
|
+
return {
|
|
207
|
+
"role": "user",
|
|
208
|
+
"content": [
|
|
209
|
+
{
|
|
210
|
+
"type": "tool_result",
|
|
211
|
+
"tool_use_id": msg.get("tool_call_id", ""),
|
|
212
|
+
"content": msg.get("content", ""),
|
|
213
|
+
}
|
|
214
|
+
],
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
# Handle assistant messages with tool_calls
|
|
218
|
+
if role == "assistant" and msg.get("tool_calls"):
|
|
219
|
+
content_blocks = []
|
|
220
|
+
|
|
221
|
+
# Add text content if present
|
|
222
|
+
text_content = msg.get("content", "")
|
|
223
|
+
if text_content:
|
|
224
|
+
content_blocks.append({
|
|
225
|
+
"type": "text",
|
|
226
|
+
"text": text_content,
|
|
227
|
+
})
|
|
228
|
+
|
|
229
|
+
# Add tool_use blocks for each tool call
|
|
230
|
+
for tool_call in msg.get("tool_calls", []):
|
|
231
|
+
# Handle both dict format and nested function format
|
|
232
|
+
if "function" in tool_call:
|
|
233
|
+
# OpenAI-style format: {"id": ..., "function": {"name": ..., "arguments": ...}}
|
|
234
|
+
func = tool_call["function"]
|
|
235
|
+
tool_id = tool_call.get("id", "")
|
|
236
|
+
tool_name = func.get("name", "")
|
|
237
|
+
# Arguments might be a string (JSON) or already a dict
|
|
238
|
+
args = func.get("arguments", {})
|
|
239
|
+
if isinstance(args, str):
|
|
240
|
+
try:
|
|
241
|
+
args = json.loads(args)
|
|
242
|
+
except json.JSONDecodeError:
|
|
243
|
+
args = {}
|
|
244
|
+
else:
|
|
245
|
+
# Direct format: {"id": ..., "name": ..., "arguments": ...}
|
|
246
|
+
tool_id = tool_call.get("id", "")
|
|
247
|
+
tool_name = tool_call.get("name", "")
|
|
248
|
+
args = tool_call.get("arguments", {})
|
|
249
|
+
if isinstance(args, str):
|
|
250
|
+
try:
|
|
251
|
+
args = json.loads(args)
|
|
252
|
+
except json.JSONDecodeError:
|
|
253
|
+
args = {}
|
|
254
|
+
|
|
255
|
+
content_blocks.append({
|
|
256
|
+
"type": "tool_use",
|
|
257
|
+
"id": tool_id,
|
|
258
|
+
"name": tool_name,
|
|
259
|
+
"input": args,
|
|
260
|
+
})
|
|
261
|
+
|
|
262
|
+
return {
|
|
263
|
+
"role": "assistant",
|
|
264
|
+
"content": content_blocks,
|
|
265
|
+
}
|
|
266
|
+
|
|
267
|
+
# Regular user or assistant message
|
|
193
268
|
return {
|
|
194
269
|
"role": role,
|
|
195
270
|
"content": msg.get("content", ""),
|
|
196
271
|
}
|
|
197
272
|
|
|
273
|
+
def _merge_consecutive_messages(self, messages: list[dict]) -> list[dict]:
|
|
274
|
+
"""
|
|
275
|
+
Merge consecutive messages with the same role.
|
|
276
|
+
|
|
277
|
+
Anthropic requires that messages alternate between user and assistant roles.
|
|
278
|
+
When we have multiple tool results (which become user messages), they need
|
|
279
|
+
to be combined into a single user message with multiple content blocks.
|
|
280
|
+
"""
|
|
281
|
+
if not messages:
|
|
282
|
+
return messages
|
|
283
|
+
|
|
284
|
+
merged = []
|
|
285
|
+
for msg in messages:
|
|
286
|
+
if not merged:
|
|
287
|
+
merged.append(msg)
|
|
288
|
+
continue
|
|
289
|
+
|
|
290
|
+
last_msg = merged[-1]
|
|
291
|
+
|
|
292
|
+
# If same role, merge the content
|
|
293
|
+
if msg["role"] == last_msg["role"]:
|
|
294
|
+
last_content = last_msg["content"]
|
|
295
|
+
new_content = msg["content"]
|
|
296
|
+
|
|
297
|
+
# Convert to list format if needed
|
|
298
|
+
if isinstance(last_content, str):
|
|
299
|
+
if last_content:
|
|
300
|
+
last_content = [{"type": "text", "text": last_content}]
|
|
301
|
+
else:
|
|
302
|
+
last_content = []
|
|
303
|
+
|
|
304
|
+
if isinstance(new_content, str):
|
|
305
|
+
if new_content:
|
|
306
|
+
new_content = [{"type": "text", "text": new_content}]
|
|
307
|
+
else:
|
|
308
|
+
new_content = []
|
|
309
|
+
|
|
310
|
+
# Merge content blocks
|
|
311
|
+
last_msg["content"] = last_content + new_content
|
|
312
|
+
else:
|
|
313
|
+
merged.append(msg)
|
|
314
|
+
|
|
315
|
+
return merged
|
|
316
|
+
|
|
198
317
|
def _convert_tools(self, tools: list[dict]) -> list[dict]:
|
|
199
318
|
"""Convert OpenAI tool format to Anthropic format."""
|
|
200
319
|
result = []
|
|
@@ -217,12 +336,14 @@ class AnthropicClient(LLMClient):
|
|
|
217
336
|
if block.type == "text":
|
|
218
337
|
content += block.text
|
|
219
338
|
elif block.type == "tool_use":
|
|
339
|
+
# Convert input to JSON string (not Python str() which gives wrong format)
|
|
340
|
+
arguments = json.dumps(block.input) if isinstance(block.input, dict) else str(block.input)
|
|
220
341
|
tool_calls.append({
|
|
221
342
|
"id": block.id,
|
|
222
343
|
"type": "function",
|
|
223
344
|
"function": {
|
|
224
345
|
"name": block.name,
|
|
225
|
-
"arguments":
|
|
346
|
+
"arguments": arguments,
|
|
226
347
|
},
|
|
227
348
|
})
|
|
228
349
|
|
|
@@ -0,0 +1,180 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Supported LLM models configuration.
|
|
3
|
+
|
|
4
|
+
Provides a central registry of supported models with their providers,
|
|
5
|
+
capabilities, and metadata. Used for:
|
|
6
|
+
- Auto-detecting provider from model name
|
|
7
|
+
- Populating model selectors in UI
|
|
8
|
+
- Validating model choices
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from dataclasses import dataclass
|
|
12
|
+
from typing import Optional
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ModelInfo:
|
|
17
|
+
"""Information about a supported model."""
|
|
18
|
+
id: str # Model identifier (e.g., "gpt-4o", "claude-sonnet-4-20250514")
|
|
19
|
+
name: str # Display name (e.g., "GPT-4o", "Claude Sonnet 4")
|
|
20
|
+
provider: str # "openai" or "anthropic"
|
|
21
|
+
context_window: int # Max context in tokens
|
|
22
|
+
supports_tools: bool = True
|
|
23
|
+
supports_vision: bool = False
|
|
24
|
+
supports_streaming: bool = True
|
|
25
|
+
description: str = ""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
# Registry of supported models
|
|
29
|
+
SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
30
|
+
# OpenAI Models
|
|
31
|
+
"gpt-4o": ModelInfo(
|
|
32
|
+
id="gpt-4o",
|
|
33
|
+
name="GPT-4o",
|
|
34
|
+
provider="openai",
|
|
35
|
+
context_window=128000,
|
|
36
|
+
supports_vision=True,
|
|
37
|
+
description="Most capable OpenAI model, multimodal",
|
|
38
|
+
),
|
|
39
|
+
"gpt-4o-mini": ModelInfo(
|
|
40
|
+
id="gpt-4o-mini",
|
|
41
|
+
name="GPT-4o Mini",
|
|
42
|
+
provider="openai",
|
|
43
|
+
context_window=128000,
|
|
44
|
+
supports_vision=True,
|
|
45
|
+
description="Fast and affordable, good for most tasks",
|
|
46
|
+
),
|
|
47
|
+
"gpt-4-turbo": ModelInfo(
|
|
48
|
+
id="gpt-4-turbo",
|
|
49
|
+
name="GPT-4 Turbo",
|
|
50
|
+
provider="openai",
|
|
51
|
+
context_window=128000,
|
|
52
|
+
supports_vision=True,
|
|
53
|
+
description="Previous generation flagship",
|
|
54
|
+
),
|
|
55
|
+
"o1": ModelInfo(
|
|
56
|
+
id="o1",
|
|
57
|
+
name="o1",
|
|
58
|
+
provider="openai",
|
|
59
|
+
context_window=200000,
|
|
60
|
+
supports_tools=False,
|
|
61
|
+
description="Advanced reasoning model",
|
|
62
|
+
),
|
|
63
|
+
"o1-mini": ModelInfo(
|
|
64
|
+
id="o1-mini",
|
|
65
|
+
name="o1 Mini",
|
|
66
|
+
provider="openai",
|
|
67
|
+
context_window=128000,
|
|
68
|
+
supports_tools=False,
|
|
69
|
+
description="Fast reasoning model",
|
|
70
|
+
),
|
|
71
|
+
"o3-mini": ModelInfo(
|
|
72
|
+
id="o3-mini",
|
|
73
|
+
name="o3 Mini",
|
|
74
|
+
provider="openai",
|
|
75
|
+
context_window=200000,
|
|
76
|
+
supports_tools=True,
|
|
77
|
+
description="Latest reasoning model with tool use",
|
|
78
|
+
),
|
|
79
|
+
|
|
80
|
+
# Anthropic Models - Claude 4.5 (latest)
|
|
81
|
+
"claude-sonnet-4-5-20250929": ModelInfo(
|
|
82
|
+
id="claude-sonnet-4-5-20250929",
|
|
83
|
+
name="Claude Sonnet 4.5",
|
|
84
|
+
provider="anthropic",
|
|
85
|
+
context_window=200000,
|
|
86
|
+
supports_vision=True,
|
|
87
|
+
description="Best balance of speed and capability for agents and coding",
|
|
88
|
+
),
|
|
89
|
+
"claude-opus-4-5-20251101": ModelInfo(
|
|
90
|
+
id="claude-opus-4-5-20251101",
|
|
91
|
+
name="Claude Opus 4.5",
|
|
92
|
+
provider="anthropic",
|
|
93
|
+
context_window=200000,
|
|
94
|
+
supports_vision=True,
|
|
95
|
+
description="Premium model - maximum intelligence with practical performance",
|
|
96
|
+
),
|
|
97
|
+
"claude-haiku-4-5-20251001": ModelInfo(
|
|
98
|
+
id="claude-haiku-4-5-20251001",
|
|
99
|
+
name="Claude Haiku 4.5",
|
|
100
|
+
provider="anthropic",
|
|
101
|
+
context_window=200000,
|
|
102
|
+
supports_vision=True,
|
|
103
|
+
description="Fastest model with near-frontier intelligence",
|
|
104
|
+
),
|
|
105
|
+
# Anthropic Models - Claude 4 (previous generation)
|
|
106
|
+
"claude-sonnet-4-20250514": ModelInfo(
|
|
107
|
+
id="claude-sonnet-4-20250514",
|
|
108
|
+
name="Claude Sonnet 4",
|
|
109
|
+
provider="anthropic",
|
|
110
|
+
context_window=200000,
|
|
111
|
+
supports_vision=True,
|
|
112
|
+
description="Previous generation Sonnet",
|
|
113
|
+
),
|
|
114
|
+
"claude-opus-4-20250514": ModelInfo(
|
|
115
|
+
id="claude-opus-4-20250514",
|
|
116
|
+
name="Claude Opus 4",
|
|
117
|
+
provider="anthropic",
|
|
118
|
+
context_window=200000,
|
|
119
|
+
supports_vision=True,
|
|
120
|
+
description="Previous generation Opus",
|
|
121
|
+
),
|
|
122
|
+
# Anthropic Models - Claude 3.5 (legacy)
|
|
123
|
+
"claude-3-5-sonnet-20241022": ModelInfo(
|
|
124
|
+
id="claude-3-5-sonnet-20241022",
|
|
125
|
+
name="Claude 3.5 Sonnet",
|
|
126
|
+
provider="anthropic",
|
|
127
|
+
context_window=200000,
|
|
128
|
+
supports_vision=True,
|
|
129
|
+
description="Legacy model, still excellent",
|
|
130
|
+
),
|
|
131
|
+
"claude-3-5-haiku-20241022": ModelInfo(
|
|
132
|
+
id="claude-3-5-haiku-20241022",
|
|
133
|
+
name="Claude 3.5 Haiku",
|
|
134
|
+
provider="anthropic",
|
|
135
|
+
context_window=200000,
|
|
136
|
+
supports_vision=True,
|
|
137
|
+
description="Legacy fast model",
|
|
138
|
+
),
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
# Default model to use
|
|
142
|
+
DEFAULT_MODEL = "claude-sonnet-4-5-20250929"
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
def get_model_info(model_id: str) -> Optional[ModelInfo]:
|
|
146
|
+
"""Get info for a model by ID."""
|
|
147
|
+
return SUPPORTED_MODELS.get(model_id)
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
def get_provider_for_model(model_id: str) -> Optional[str]:
|
|
151
|
+
"""
|
|
152
|
+
Detect the provider for a model ID.
|
|
153
|
+
|
|
154
|
+
Returns "openai", "anthropic", or None if unknown.
|
|
155
|
+
"""
|
|
156
|
+
# Check registry first
|
|
157
|
+
if model_id in SUPPORTED_MODELS:
|
|
158
|
+
return SUPPORTED_MODELS[model_id].provider
|
|
159
|
+
|
|
160
|
+
# Fallback heuristics for unlisted models
|
|
161
|
+
if model_id.startswith("gpt-") or model_id.startswith("o1") or model_id.startswith("o3"):
|
|
162
|
+
return "openai"
|
|
163
|
+
if model_id.startswith("claude"):
|
|
164
|
+
return "anthropic"
|
|
165
|
+
|
|
166
|
+
return None
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def list_models_for_ui() -> list[dict]:
|
|
170
|
+
"""Get list of models formatted for UI dropdowns."""
|
|
171
|
+
return [
|
|
172
|
+
{
|
|
173
|
+
"id": m.id,
|
|
174
|
+
"name": m.name,
|
|
175
|
+
"provider": m.provider,
|
|
176
|
+
"description": m.description,
|
|
177
|
+
}
|
|
178
|
+
for m in SUPPORTED_MODELS.values()
|
|
179
|
+
]
|
|
180
|
+
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Cross-conversation memory system for AI agents.
|
|
3
|
+
|
|
4
|
+
This module provides automatic memory extraction and recall across conversations,
|
|
5
|
+
allowing agents to remember facts, preferences, and context about users.
|
|
6
|
+
|
|
7
|
+
Key Components:
|
|
8
|
+
- MemoryManager: Extracts and recalls memories using LLM
|
|
9
|
+
- MemoryConfig: Configuration for memory behavior
|
|
10
|
+
- MemoryEnabledAgent: Mixin for adding memory to agents
|
|
11
|
+
|
|
12
|
+
Example usage:
|
|
13
|
+
from agent_runtime_core.memory import MemoryManager, MemoryConfig
|
|
14
|
+
from agent_runtime_core.persistence import FileKnowledgeStore
|
|
15
|
+
from agent_runtime_core.llm import get_llm_client
|
|
16
|
+
|
|
17
|
+
# Setup memory manager
|
|
18
|
+
knowledge_store = FileKnowledgeStore()
|
|
19
|
+
llm = get_llm_client()
|
|
20
|
+
memory = MemoryManager(knowledge_store, llm)
|
|
21
|
+
|
|
22
|
+
# Extract memories from a conversation
|
|
23
|
+
messages = [
|
|
24
|
+
{"role": "user", "content": "My name is Alice and I prefer dark mode."},
|
|
25
|
+
{"role": "assistant", "content": "Nice to meet you, Alice! I've noted your preference for dark mode."},
|
|
26
|
+
]
|
|
27
|
+
await memory.extract_memories(messages, user_id="user-123")
|
|
28
|
+
|
|
29
|
+
# Recall memories for a new conversation
|
|
30
|
+
relevant_memories = await memory.recall_memories(
|
|
31
|
+
query="What theme does the user prefer?",
|
|
32
|
+
user_id="user-123",
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# Or use with ToolCallingAgent via mixin
|
|
36
|
+
from agent_runtime_core.memory import MemoryEnabledAgent
|
|
37
|
+
from agent_runtime_core import ToolCallingAgent
|
|
38
|
+
|
|
39
|
+
class MyAgent(MemoryEnabledAgent, ToolCallingAgent):
|
|
40
|
+
memory_enabled = True # Enable memory for this agent
|
|
41
|
+
|
|
42
|
+
@property
|
|
43
|
+
def key(self) -> str:
|
|
44
|
+
return "my-agent"
|
|
45
|
+
|
|
46
|
+
@property
|
|
47
|
+
def system_prompt(self) -> str:
|
|
48
|
+
return "You are a helpful assistant."
|
|
49
|
+
|
|
50
|
+
@property
|
|
51
|
+
def tools(self) -> ToolRegistry:
|
|
52
|
+
return ToolRegistry()
|
|
53
|
+
"""
|
|
54
|
+
|
|
55
|
+
from agent_runtime_core.memory.manager import (
|
|
56
|
+
MemoryManager,
|
|
57
|
+
MemoryConfig,
|
|
58
|
+
ExtractedMemory,
|
|
59
|
+
RecalledMemory,
|
|
60
|
+
)
|
|
61
|
+
from agent_runtime_core.memory.mixin import MemoryEnabledAgent
|
|
62
|
+
|
|
63
|
+
__all__ = [
|
|
64
|
+
"MemoryManager",
|
|
65
|
+
"MemoryConfig",
|
|
66
|
+
"ExtractedMemory",
|
|
67
|
+
"RecalledMemory",
|
|
68
|
+
"MemoryEnabledAgent",
|
|
69
|
+
]
|
|
70
|
+
|