agent-runtime-core 0.9.0__tar.gz → 0.9.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_runtime_core-0.9.1/LICENSE +83 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/PKG-INFO +1 -1
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/agentic_loop.py +11 -6
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/config.py +4 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/interfaces.py +29 -11
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/llm/anthropic.py +78 -7
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/llm/models_config.py +50 -6
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/llm/openai.py +51 -2
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/multi_agent.py +11 -1
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/tool_calling_agent.py +3 -1
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/pyproject.toml +3 -3
- agent_runtime_core-0.9.0/LICENSE +0 -21
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/.gitignore +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/README.md +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/config_schema.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/contexts.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/events/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/events/base.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/events/memory.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/events/redis.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/events/sqlite.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/files/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/files/base.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/files/ocr.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/files/processors.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/files/tools.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/files/vision.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/json_runtime.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/llm/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/llm/litellm_client.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/memory/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/memory/manager.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/memory/mixin.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/persistence/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/persistence/base.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/persistence/file.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/persistence/manager.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/privacy.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/queue/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/queue/base.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/queue/memory.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/queue/redis.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/queue/sqlite.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/rag/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/rag/chunking.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/rag/indexer.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/rag/retriever.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/registry.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/runner.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/state/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/state/base.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/state/memory.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/state/redis.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/state/sqlite.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/steps.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/testing.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/tools.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/tracing/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/tracing/langfuse.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/tracing/noop.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/base.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/embeddings.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/sqlite_vec.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/vertex.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/__init__.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_contexts.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_events.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_imports.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_llm_anthropic.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_memory.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_multi_agent.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_persistence.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_privacy.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_queue.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_rag.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_state.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_steps.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_testing.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_tools.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/tests/test_vectorstore.py +0 -0
- {agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/uv.lock +0 -0
|
@@ -0,0 +1,83 @@
|
|
|
1
|
+
Business Source License 1.1
|
|
2
|
+
|
|
3
|
+
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
|
|
4
|
+
"Business Source License" is a trademark of MariaDB Corporation Ab.
|
|
5
|
+
|
|
6
|
+
Parameters
|
|
7
|
+
|
|
8
|
+
Licensor: Chris Barry
|
|
9
|
+
Licensed Work: agent-runtime-core
|
|
10
|
+
The Licensed Work is (c) 2025 Chris Barry
|
|
11
|
+
Additional Use Grant: You may use the Licensed Work for non-production purposes,
|
|
12
|
+
including development, testing, and evaluation. Production
|
|
13
|
+
use requires a commercial license from the Licensor.
|
|
14
|
+
Change Date: 2029-01-24
|
|
15
|
+
Change License: MIT
|
|
16
|
+
|
|
17
|
+
Terms
|
|
18
|
+
|
|
19
|
+
The Licensor hereby grants you the right to copy, modify, create derivative
|
|
20
|
+
works, redistribute, and make non-production use of the Licensed Work. The
|
|
21
|
+
Licensor may make an Additional Use Grant, above, permitting limited production
|
|
22
|
+
use.
|
|
23
|
+
|
|
24
|
+
Effective on the Change Date, or the fourth anniversary of the first publicly
|
|
25
|
+
available distribution of a specific version of the Licensed Work under this
|
|
26
|
+
License, whichever comes first, the Licensor hereby grants you rights under
|
|
27
|
+
the terms of the Change License, and the rights granted in the paragraph above
|
|
28
|
+
terminate.
|
|
29
|
+
|
|
30
|
+
If your use of the Licensed Work does not comply with the requirements
|
|
31
|
+
currently in effect as described in this License, you must purchase a
|
|
32
|
+
commercial license from the Licensor, its affiliated entities, or authorized
|
|
33
|
+
resellers, or you must refrain from using the Licensed Work.
|
|
34
|
+
|
|
35
|
+
All copies of the original and modified Licensed Work, and derivative works
|
|
36
|
+
of the Licensed Work, are subject to this License. This License applies
|
|
37
|
+
separately for each version of the Licensed Work and the Change Date may vary
|
|
38
|
+
for each version of the Licensed Work released by Licensor.
|
|
39
|
+
|
|
40
|
+
You must conspicuously display this License on each original or modified copy
|
|
41
|
+
of the Licensed Work. If you receive the Licensed Work in original or
|
|
42
|
+
modified form from a third party, the terms and conditions set forth in this
|
|
43
|
+
License apply to your use of that work.
|
|
44
|
+
|
|
45
|
+
Any use of the Licensed Work in violation of this License will automatically
|
|
46
|
+
terminate your rights under this License for the current and all other
|
|
47
|
+
versions of the Licensed Work.
|
|
48
|
+
|
|
49
|
+
This License does not grant you any right in any trademark or logo of
|
|
50
|
+
Licensor or its affiliates (provided that you may use a trademark or logo of
|
|
51
|
+
Licensor as expressly required by this License).
|
|
52
|
+
|
|
53
|
+
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
|
|
54
|
+
AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
|
|
55
|
+
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
|
|
56
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
|
|
57
|
+
TITLE.
|
|
58
|
+
|
|
59
|
+
MariaDB hereby grants you permission to use this License's text to license
|
|
60
|
+
your works, and to refer to it using the trademark "Business Source License",
|
|
61
|
+
as long as you comply with the Covenants of Licensor below.
|
|
62
|
+
|
|
63
|
+
Covenants of Licensor
|
|
64
|
+
|
|
65
|
+
In consideration of the right to use this License's text and the "Business
|
|
66
|
+
Source License" name and trademark, Licensor covenants to MariaDB, and to all
|
|
67
|
+
other recipients of the licensed work to be provided by Licensor:
|
|
68
|
+
|
|
69
|
+
1. To specify as the Change License the GPL Version 2.0 or any later version,
|
|
70
|
+
or a license that is compatible with GPL Version 2.0 or a later version,
|
|
71
|
+
where "compatible" means that software provided under the Change License can
|
|
72
|
+
be included in a program with software provided under GPL Version 2.0 or a
|
|
73
|
+
later version. Licensor may specify additional Change Licenses without
|
|
74
|
+
limitation.
|
|
75
|
+
|
|
76
|
+
2. To either: (a) specify an additional grant of rights to use that does not
|
|
77
|
+
impose any additional restriction on the right granted in this License, as
|
|
78
|
+
the Additional Use Grant; or (b) insert the text "None".
|
|
79
|
+
|
|
80
|
+
3. To specify a Change Date.
|
|
81
|
+
|
|
82
|
+
4. Not to modify this License in any other way.
|
|
83
|
+
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: agent-runtime-core
|
|
3
|
-
Version: 0.9.
|
|
3
|
+
Version: 0.9.1
|
|
4
4
|
Summary: Framework-agnostic Python library for executing AI agents with consistent patterns
|
|
5
5
|
Project-URL: Homepage, https://github.com/makemore/agent-runtime-core
|
|
6
6
|
Project-URL: Repository, https://github.com/makemore/agent-runtime-core
|
|
@@ -154,7 +154,7 @@ async def run_agentic_loop(
|
|
|
154
154
|
ctx: RunContext,
|
|
155
155
|
*,
|
|
156
156
|
model: Optional[str] = None,
|
|
157
|
-
max_iterations: int =
|
|
157
|
+
max_iterations: Optional[int] = None,
|
|
158
158
|
emit_events: bool = True,
|
|
159
159
|
ensure_final_response: bool = False,
|
|
160
160
|
**llm_kwargs,
|
|
@@ -175,7 +175,8 @@ async def run_agentic_loop(
|
|
|
175
175
|
execute_tool: Async function that executes a tool: (name, args) -> result
|
|
176
176
|
ctx: Run context for emitting events
|
|
177
177
|
model: Model to use (passed to LLM client)
|
|
178
|
-
max_iterations: Maximum loop iterations to prevent infinite loops
|
|
178
|
+
max_iterations: Maximum loop iterations to prevent infinite loops.
|
|
179
|
+
If None, uses the value from config (default: 50).
|
|
179
180
|
emit_events: Whether to emit TOOL_CALL and TOOL_RESULT events
|
|
180
181
|
ensure_final_response: If True, ensures a summary is generated when tools
|
|
181
182
|
were used but the final response is empty or very short. This is useful
|
|
@@ -208,14 +209,18 @@ async def run_agentic_loop(
|
|
|
208
209
|
max_consecutive_errors = 3 # Bail out if tool keeps failing
|
|
209
210
|
|
|
210
211
|
# Initialize usage tracking (enabled in debug mode)
|
|
211
|
-
|
|
212
|
+
config = get_config()
|
|
213
|
+
debug_mode = config.debug
|
|
212
214
|
usage_stats = UsageStats() if debug_mode else None
|
|
213
215
|
effective_model = model or "unknown"
|
|
214
216
|
|
|
215
|
-
|
|
217
|
+
# Use config default if max_iterations not specified
|
|
218
|
+
effective_max_iterations = max_iterations if max_iterations is not None else config.max_iterations
|
|
219
|
+
|
|
220
|
+
while iteration < effective_max_iterations:
|
|
216
221
|
iteration += 1
|
|
217
|
-
print(f"[agentic-loop] Iteration {iteration}/{
|
|
218
|
-
logger.debug(f"Agentic loop iteration {iteration}/{
|
|
222
|
+
print(f"[agentic-loop] Iteration {iteration}/{effective_max_iterations}, messages={len(messages)}", flush=True)
|
|
223
|
+
logger.debug(f"Agentic loop iteration {iteration}/{effective_max_iterations}")
|
|
219
224
|
|
|
220
225
|
# Call LLM
|
|
221
226
|
if tools:
|
|
@@ -91,6 +91,9 @@ class RuntimeConfig:
|
|
|
91
91
|
# Debug mode - enables verbose logging, cost tracking, etc.
|
|
92
92
|
debug: bool = False
|
|
93
93
|
|
|
94
|
+
# Agentic loop settings
|
|
95
|
+
max_iterations: int = 50 # Maximum iterations for tool-calling loops
|
|
96
|
+
|
|
94
97
|
def get_openai_api_key(self) -> Optional[str]:
|
|
95
98
|
"""Get OpenAI API key from config or environment."""
|
|
96
99
|
return self.openai_api_key or os.environ.get("OPENAI_API_KEY")
|
|
@@ -202,6 +205,7 @@ def _apply_env_vars(config: RuntimeConfig) -> None:
|
|
|
202
205
|
"AGENT_RUNTIME_RETRY_BACKOFF_BASE": "retry_backoff_base",
|
|
203
206
|
"AGENT_RUNTIME_RETRY_BACKOFF_MAX": "retry_backoff_max",
|
|
204
207
|
"AGENT_RUNTIME_MAX_HISTORY_MESSAGES": "max_history_messages",
|
|
208
|
+
"AGENT_RUNTIME_MAX_ITERATIONS": "max_iterations",
|
|
205
209
|
}
|
|
206
210
|
|
|
207
211
|
bool_fields = {
|
|
@@ -323,13 +323,21 @@ class ToolRegistry:
|
|
|
323
323
|
"""
|
|
324
324
|
return self.to_openai_format()
|
|
325
325
|
|
|
326
|
-
async def execute(
|
|
326
|
+
async def execute(
|
|
327
|
+
self,
|
|
328
|
+
name: str,
|
|
329
|
+
arguments: dict,
|
|
330
|
+
ctx: Optional["RunContext"] = None,
|
|
331
|
+
**kwargs
|
|
332
|
+
) -> Any:
|
|
327
333
|
"""
|
|
328
334
|
Execute a tool by name.
|
|
329
335
|
|
|
330
336
|
Args:
|
|
331
337
|
name: Tool name
|
|
332
338
|
arguments: Tool arguments
|
|
339
|
+
ctx: Optional RunContext, passed to handlers with requires_context=True
|
|
340
|
+
**kwargs: Additional arguments to pass to the handler
|
|
333
341
|
|
|
334
342
|
Returns:
|
|
335
343
|
Tool result
|
|
@@ -340,7 +348,15 @@ class ToolRegistry:
|
|
|
340
348
|
tool = self._tools.get(name)
|
|
341
349
|
if not tool:
|
|
342
350
|
raise KeyError(f"Tool not found: {name}")
|
|
343
|
-
|
|
351
|
+
|
|
352
|
+
# Check if the tool requires context
|
|
353
|
+
requires_context = tool.metadata.get('requires_context', False) if tool.metadata else False
|
|
354
|
+
|
|
355
|
+
if requires_context and ctx is not None:
|
|
356
|
+
# Pass ctx to the handler for tools that need it (e.g., sub-agent tools)
|
|
357
|
+
return await tool.handler(**arguments, ctx=ctx, **kwargs)
|
|
358
|
+
else:
|
|
359
|
+
return await tool.handler(**arguments, **kwargs)
|
|
344
360
|
|
|
345
361
|
async def execute_with_events(
|
|
346
362
|
self,
|
|
@@ -350,19 +366,19 @@ class ToolRegistry:
|
|
|
350
366
|
) -> Any:
|
|
351
367
|
"""
|
|
352
368
|
Execute a tool and automatically emit events.
|
|
353
|
-
|
|
369
|
+
|
|
354
370
|
This is a convenience method that wraps execute() and handles
|
|
355
371
|
event emission automatically. Use this in your agent loop to
|
|
356
372
|
reduce boilerplate.
|
|
357
|
-
|
|
373
|
+
|
|
358
374
|
Args:
|
|
359
375
|
tool_call: Tool call object with name, arguments, and id
|
|
360
376
|
ctx: Run context for emitting events
|
|
361
377
|
**kwargs: Additional arguments to pass to the tool
|
|
362
|
-
|
|
378
|
+
|
|
363
379
|
Returns:
|
|
364
380
|
Tool result
|
|
365
|
-
|
|
381
|
+
|
|
366
382
|
Example:
|
|
367
383
|
for tool_call in response.tool_calls:
|
|
368
384
|
result = await tools.execute_with_events(tool_call, ctx)
|
|
@@ -373,17 +389,17 @@ class ToolRegistry:
|
|
|
373
389
|
"tool_args": tool_call.arguments,
|
|
374
390
|
"tool_call_id": tool_call.id,
|
|
375
391
|
})
|
|
376
|
-
|
|
377
|
-
# Execute the tool
|
|
378
|
-
result = await self.execute(tool_call.name, tool_call.arguments, **kwargs)
|
|
379
|
-
|
|
392
|
+
|
|
393
|
+
# Execute the tool, passing ctx for tools that require it
|
|
394
|
+
result = await self.execute(tool_call.name, tool_call.arguments, ctx=ctx, **kwargs)
|
|
395
|
+
|
|
380
396
|
# Emit tool result event
|
|
381
397
|
await ctx.emit(EventType.TOOL_RESULT, {
|
|
382
398
|
"tool_name": tool_call.name,
|
|
383
399
|
"tool_call_id": tool_call.id,
|
|
384
400
|
"result": result,
|
|
385
401
|
})
|
|
386
|
-
|
|
402
|
+
|
|
387
403
|
return result
|
|
388
404
|
|
|
389
405
|
|
|
@@ -501,6 +517,7 @@ class LLMResponse:
|
|
|
501
517
|
model: str = ""
|
|
502
518
|
finish_reason: str = ""
|
|
503
519
|
raw_response: Optional[Any] = None
|
|
520
|
+
thinking: Optional[str] = None # Extended thinking content (Anthropic)
|
|
504
521
|
|
|
505
522
|
@property
|
|
506
523
|
def tool_calls(self) -> Optional[list["LLMToolCall"]]:
|
|
@@ -528,6 +545,7 @@ class LLMStreamChunk:
|
|
|
528
545
|
tool_calls: Optional[list] = None
|
|
529
546
|
finish_reason: Optional[str] = None
|
|
530
547
|
usage: Optional[dict] = None
|
|
548
|
+
thinking: Optional[str] = None # Extended thinking content (Anthropic)
|
|
531
549
|
|
|
532
550
|
|
|
533
551
|
class TraceSink(ABC):
|
|
@@ -176,9 +176,27 @@ class AnthropicClient(LLMClient):
|
|
|
176
176
|
tools: Optional[list[dict]] = None,
|
|
177
177
|
temperature: Optional[float] = None,
|
|
178
178
|
max_tokens: Optional[int] = None,
|
|
179
|
+
thinking: bool = False,
|
|
180
|
+
thinking_budget: Optional[int] = None,
|
|
179
181
|
**kwargs,
|
|
180
182
|
) -> LLMResponse:
|
|
181
|
-
"""
|
|
183
|
+
"""
|
|
184
|
+
Generate a completion from Anthropic.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
messages: List of messages in framework-neutral format
|
|
188
|
+
model: Model ID to use (defaults to self.default_model)
|
|
189
|
+
stream: Whether to stream the response (not used here, use stream() method)
|
|
190
|
+
tools: List of tools in OpenAI format
|
|
191
|
+
temperature: Sampling temperature (0.0 to 1.0)
|
|
192
|
+
max_tokens: Maximum tokens to generate
|
|
193
|
+
thinking: Enable extended thinking mode for deeper reasoning
|
|
194
|
+
thinking_budget: Max tokens for thinking (default: 10000, max: 128000)
|
|
195
|
+
**kwargs: Additional parameters passed to the API
|
|
196
|
+
|
|
197
|
+
Returns:
|
|
198
|
+
LLMResponse with the generated message
|
|
199
|
+
"""
|
|
182
200
|
model = model or self.default_model
|
|
183
201
|
|
|
184
202
|
# Validate and repair message history before processing
|
|
@@ -206,15 +224,28 @@ class AnthropicClient(LLMClient):
|
|
|
206
224
|
request_kwargs["system"] = system_message
|
|
207
225
|
if tools:
|
|
208
226
|
request_kwargs["tools"] = self._convert_tools(tools)
|
|
209
|
-
|
|
227
|
+
|
|
228
|
+
# Handle extended thinking mode
|
|
229
|
+
if thinking:
|
|
230
|
+
# Extended thinking requires specific configuration
|
|
231
|
+
# Temperature must be 1.0 when using thinking
|
|
232
|
+
request_kwargs["thinking"] = {
|
|
233
|
+
"type": "enabled",
|
|
234
|
+
"budget_tokens": thinking_budget or 10000,
|
|
235
|
+
}
|
|
236
|
+
# Temperature must be exactly 1.0 for extended thinking
|
|
237
|
+
request_kwargs["temperature"] = 1.0
|
|
238
|
+
elif temperature is not None:
|
|
210
239
|
request_kwargs["temperature"] = temperature
|
|
211
240
|
|
|
212
241
|
request_kwargs.update(kwargs)
|
|
213
242
|
|
|
214
243
|
response = await self._client.messages.create(**request_kwargs)
|
|
215
244
|
|
|
245
|
+
message, thinking_content = self._convert_response(response)
|
|
246
|
+
|
|
216
247
|
return LLMResponse(
|
|
217
|
-
message=
|
|
248
|
+
message=message,
|
|
218
249
|
usage={
|
|
219
250
|
"prompt_tokens": response.usage.input_tokens,
|
|
220
251
|
"completion_tokens": response.usage.output_tokens,
|
|
@@ -223,6 +254,7 @@ class AnthropicClient(LLMClient):
|
|
|
223
254
|
model=response.model,
|
|
224
255
|
finish_reason=response.stop_reason or "",
|
|
225
256
|
raw_response=response,
|
|
257
|
+
thinking=thinking_content,
|
|
226
258
|
)
|
|
227
259
|
|
|
228
260
|
async def stream(
|
|
@@ -231,9 +263,26 @@ class AnthropicClient(LLMClient):
|
|
|
231
263
|
*,
|
|
232
264
|
model: Optional[str] = None,
|
|
233
265
|
tools: Optional[list[dict]] = None,
|
|
266
|
+
thinking: bool = False,
|
|
267
|
+
thinking_budget: Optional[int] = None,
|
|
268
|
+
temperature: Optional[float] = None,
|
|
234
269
|
**kwargs,
|
|
235
270
|
) -> AsyncIterator[LLMStreamChunk]:
|
|
236
|
-
"""
|
|
271
|
+
"""
|
|
272
|
+
Stream a completion from Anthropic.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
messages: List of messages in framework-neutral format
|
|
276
|
+
model: Model ID to use
|
|
277
|
+
tools: List of tools in OpenAI format
|
|
278
|
+
thinking: Enable extended thinking mode
|
|
279
|
+
thinking_budget: Max tokens for thinking (default: 10000)
|
|
280
|
+
temperature: Sampling temperature (ignored if thinking=True)
|
|
281
|
+
**kwargs: Additional parameters
|
|
282
|
+
|
|
283
|
+
Yields:
|
|
284
|
+
LLMStreamChunk with delta content and thinking content
|
|
285
|
+
"""
|
|
237
286
|
model = model or self.default_model
|
|
238
287
|
|
|
239
288
|
# Validate and repair message history before processing
|
|
@@ -262,6 +311,16 @@ class AnthropicClient(LLMClient):
|
|
|
262
311
|
if tools:
|
|
263
312
|
request_kwargs["tools"] = self._convert_tools(tools)
|
|
264
313
|
|
|
314
|
+
# Handle extended thinking mode
|
|
315
|
+
if thinking:
|
|
316
|
+
request_kwargs["thinking"] = {
|
|
317
|
+
"type": "enabled",
|
|
318
|
+
"budget_tokens": thinking_budget or 10000,
|
|
319
|
+
}
|
|
320
|
+
request_kwargs["temperature"] = 1.0
|
|
321
|
+
elif temperature is not None:
|
|
322
|
+
request_kwargs["temperature"] = temperature
|
|
323
|
+
|
|
265
324
|
request_kwargs.update(kwargs)
|
|
266
325
|
|
|
267
326
|
async with self._client.messages.stream(**request_kwargs) as stream:
|
|
@@ -269,6 +328,9 @@ class AnthropicClient(LLMClient):
|
|
|
269
328
|
if event.type == "content_block_delta":
|
|
270
329
|
if hasattr(event.delta, "text"):
|
|
271
330
|
yield LLMStreamChunk(delta=event.delta.text)
|
|
331
|
+
elif hasattr(event.delta, "thinking"):
|
|
332
|
+
# Extended thinking content
|
|
333
|
+
yield LLMStreamChunk(delta="", thinking=event.delta.thinking)
|
|
272
334
|
elif event.type == "message_stop":
|
|
273
335
|
yield LLMStreamChunk(finish_reason="stop")
|
|
274
336
|
|
|
@@ -410,14 +472,23 @@ class AnthropicClient(LLMClient):
|
|
|
410
472
|
})
|
|
411
473
|
return result
|
|
412
474
|
|
|
413
|
-
def _convert_response(self, response) -> Message:
|
|
414
|
-
"""
|
|
475
|
+
def _convert_response(self, response) -> tuple[Message, Optional[str]]:
|
|
476
|
+
"""
|
|
477
|
+
Convert Anthropic response to our format.
|
|
478
|
+
|
|
479
|
+
Returns:
|
|
480
|
+
Tuple of (message, thinking_content)
|
|
481
|
+
"""
|
|
415
482
|
content = ""
|
|
416
483
|
tool_calls = []
|
|
484
|
+
thinking_content = None
|
|
417
485
|
|
|
418
486
|
for block in response.content:
|
|
419
487
|
if block.type == "text":
|
|
420
488
|
content += block.text
|
|
489
|
+
elif block.type == "thinking":
|
|
490
|
+
# Extended thinking block
|
|
491
|
+
thinking_content = block.thinking
|
|
421
492
|
elif block.type == "tool_use":
|
|
422
493
|
# Convert input to JSON string (not Python str() which gives wrong format)
|
|
423
494
|
arguments = json.dumps(block.input) if isinstance(block.input, dict) else str(block.input)
|
|
@@ -438,4 +509,4 @@ class AnthropicClient(LLMClient):
|
|
|
438
509
|
if tool_calls:
|
|
439
510
|
result["tool_calls"] = tool_calls
|
|
440
511
|
|
|
441
|
-
return result
|
|
512
|
+
return result, thinking_content
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/llm/models_config.py
RENAMED
|
@@ -22,19 +22,48 @@ class ModelInfo:
|
|
|
22
22
|
supports_tools: bool = True
|
|
23
23
|
supports_vision: bool = False
|
|
24
24
|
supports_streaming: bool = True
|
|
25
|
+
supports_thinking: bool = False # Extended thinking (Anthropic) or reasoning (OpenAI)
|
|
25
26
|
description: str = ""
|
|
26
27
|
|
|
27
28
|
|
|
28
29
|
# Registry of supported models
|
|
29
30
|
SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
30
|
-
# OpenAI Models
|
|
31
|
+
# OpenAI Models - GPT-5 (latest)
|
|
32
|
+
"gpt-5": ModelInfo(
|
|
33
|
+
id="gpt-5",
|
|
34
|
+
name="GPT-5",
|
|
35
|
+
provider="openai",
|
|
36
|
+
context_window=256000,
|
|
37
|
+
supports_vision=True,
|
|
38
|
+
supports_thinking=True,
|
|
39
|
+
description="Most capable OpenAI model with reasoning",
|
|
40
|
+
),
|
|
41
|
+
"gpt-5-mini": ModelInfo(
|
|
42
|
+
id="gpt-5-mini",
|
|
43
|
+
name="GPT-5 Mini",
|
|
44
|
+
provider="openai",
|
|
45
|
+
context_window=256000,
|
|
46
|
+
supports_vision=True,
|
|
47
|
+
supports_thinking=True,
|
|
48
|
+
description="Fast GPT-5 variant with reasoning",
|
|
49
|
+
),
|
|
50
|
+
"gpt-5-turbo": ModelInfo(
|
|
51
|
+
id="gpt-5-turbo",
|
|
52
|
+
name="GPT-5 Turbo",
|
|
53
|
+
provider="openai",
|
|
54
|
+
context_window=256000,
|
|
55
|
+
supports_vision=True,
|
|
56
|
+
supports_thinking=True,
|
|
57
|
+
description="Balanced GPT-5 variant for production",
|
|
58
|
+
),
|
|
59
|
+
# OpenAI Models - GPT-4o
|
|
31
60
|
"gpt-4o": ModelInfo(
|
|
32
61
|
id="gpt-4o",
|
|
33
62
|
name="GPT-4o",
|
|
34
63
|
provider="openai",
|
|
35
64
|
context_window=128000,
|
|
36
65
|
supports_vision=True,
|
|
37
|
-
description="Most capable
|
|
66
|
+
description="Most capable GPT-4 model, multimodal",
|
|
38
67
|
),
|
|
39
68
|
"gpt-4o-mini": ModelInfo(
|
|
40
69
|
id="gpt-4o-mini",
|
|
@@ -52,12 +81,14 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
52
81
|
supports_vision=True,
|
|
53
82
|
description="Previous generation flagship",
|
|
54
83
|
),
|
|
84
|
+
# OpenAI Models - o-series (reasoning)
|
|
55
85
|
"o1": ModelInfo(
|
|
56
86
|
id="o1",
|
|
57
87
|
name="o1",
|
|
58
88
|
provider="openai",
|
|
59
89
|
context_window=200000,
|
|
60
90
|
supports_tools=False,
|
|
91
|
+
supports_thinking=True,
|
|
61
92
|
description="Advanced reasoning model",
|
|
62
93
|
),
|
|
63
94
|
"o1-mini": ModelInfo(
|
|
@@ -66,6 +97,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
66
97
|
provider="openai",
|
|
67
98
|
context_window=128000,
|
|
68
99
|
supports_tools=False,
|
|
100
|
+
supports_thinking=True,
|
|
69
101
|
description="Fast reasoning model",
|
|
70
102
|
),
|
|
71
103
|
"o3-mini": ModelInfo(
|
|
@@ -74,6 +106,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
74
106
|
provider="openai",
|
|
75
107
|
context_window=200000,
|
|
76
108
|
supports_tools=True,
|
|
109
|
+
supports_thinking=True,
|
|
77
110
|
description="Latest reasoning model with tool use",
|
|
78
111
|
),
|
|
79
112
|
|
|
@@ -84,6 +117,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
84
117
|
provider="anthropic",
|
|
85
118
|
context_window=200000,
|
|
86
119
|
supports_vision=True,
|
|
120
|
+
supports_thinking=True,
|
|
87
121
|
description="Best balance of speed and capability for agents and coding",
|
|
88
122
|
),
|
|
89
123
|
"claude-opus-4-5-20251101": ModelInfo(
|
|
@@ -92,6 +126,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
92
126
|
provider="anthropic",
|
|
93
127
|
context_window=200000,
|
|
94
128
|
supports_vision=True,
|
|
129
|
+
supports_thinking=True,
|
|
95
130
|
description="Premium model - maximum intelligence with practical performance",
|
|
96
131
|
),
|
|
97
132
|
"claude-haiku-4-5-20251001": ModelInfo(
|
|
@@ -100,6 +135,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
100
135
|
provider="anthropic",
|
|
101
136
|
context_window=200000,
|
|
102
137
|
supports_vision=True,
|
|
138
|
+
supports_thinking=True,
|
|
103
139
|
description="Fastest model with near-frontier intelligence",
|
|
104
140
|
),
|
|
105
141
|
# Anthropic Models - Claude 4 (previous generation)
|
|
@@ -109,6 +145,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
109
145
|
provider="anthropic",
|
|
110
146
|
context_window=200000,
|
|
111
147
|
supports_vision=True,
|
|
148
|
+
supports_thinking=True,
|
|
112
149
|
description="Previous generation Sonnet",
|
|
113
150
|
),
|
|
114
151
|
"claude-opus-4-20250514": ModelInfo(
|
|
@@ -117,6 +154,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
117
154
|
provider="anthropic",
|
|
118
155
|
context_window=200000,
|
|
119
156
|
supports_vision=True,
|
|
157
|
+
supports_thinking=True,
|
|
120
158
|
description="Previous generation Opus",
|
|
121
159
|
),
|
|
122
160
|
# Anthropic Models - Claude 3.5 (legacy)
|
|
@@ -126,6 +164,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
126
164
|
provider="anthropic",
|
|
127
165
|
context_window=200000,
|
|
128
166
|
supports_vision=True,
|
|
167
|
+
supports_thinking=True,
|
|
129
168
|
description="Legacy model, still excellent",
|
|
130
169
|
),
|
|
131
170
|
"claude-3-5-haiku-20241022": ModelInfo(
|
|
@@ -134,6 +173,7 @@ SUPPORTED_MODELS: dict[str, ModelInfo] = {
|
|
|
134
173
|
provider="anthropic",
|
|
135
174
|
context_window=200000,
|
|
136
175
|
supports_vision=True,
|
|
176
|
+
supports_thinking=True,
|
|
137
177
|
description="Legacy fast model",
|
|
138
178
|
),
|
|
139
179
|
}
|
|
@@ -150,19 +190,20 @@ def get_model_info(model_id: str) -> Optional[ModelInfo]:
|
|
|
150
190
|
def get_provider_for_model(model_id: str) -> Optional[str]:
|
|
151
191
|
"""
|
|
152
192
|
Detect the provider for a model ID.
|
|
153
|
-
|
|
193
|
+
|
|
154
194
|
Returns "openai", "anthropic", or None if unknown.
|
|
155
195
|
"""
|
|
156
196
|
# Check registry first
|
|
157
197
|
if model_id in SUPPORTED_MODELS:
|
|
158
198
|
return SUPPORTED_MODELS[model_id].provider
|
|
159
|
-
|
|
199
|
+
|
|
160
200
|
# Fallback heuristics for unlisted models
|
|
161
|
-
if model_id.startswith("gpt-") or model_id.startswith("o1") or
|
|
201
|
+
if (model_id.startswith("gpt-") or model_id.startswith("o1") or
|
|
202
|
+
model_id.startswith("o3") or model_id.startswith("gpt5")):
|
|
162
203
|
return "openai"
|
|
163
204
|
if model_id.startswith("claude"):
|
|
164
205
|
return "anthropic"
|
|
165
|
-
|
|
206
|
+
|
|
166
207
|
return None
|
|
167
208
|
|
|
168
209
|
|
|
@@ -174,6 +215,9 @@ def list_models_for_ui() -> list[dict]:
|
|
|
174
215
|
"name": m.name,
|
|
175
216
|
"provider": m.provider,
|
|
176
217
|
"description": m.description,
|
|
218
|
+
"supports_thinking": m.supports_thinking,
|
|
219
|
+
"supports_tools": m.supports_tools,
|
|
220
|
+
"supports_vision": m.supports_vision,
|
|
177
221
|
}
|
|
178
222
|
for m in SUPPORTED_MODELS.values()
|
|
179
223
|
]
|
|
@@ -104,9 +104,27 @@ class OpenAIClient(LLMClient):
|
|
|
104
104
|
tools: Optional[list[dict]] = None,
|
|
105
105
|
temperature: Optional[float] = None,
|
|
106
106
|
max_tokens: Optional[int] = None,
|
|
107
|
+
thinking: bool = False,
|
|
108
|
+
reasoning_effort: Optional[str] = None,
|
|
107
109
|
**kwargs,
|
|
108
110
|
) -> LLMResponse:
|
|
109
|
-
"""
|
|
111
|
+
"""
|
|
112
|
+
Generate a completion from OpenAI.
|
|
113
|
+
|
|
114
|
+
Args:
|
|
115
|
+
messages: List of messages in framework-neutral format
|
|
116
|
+
model: Model ID to use (defaults to self.default_model)
|
|
117
|
+
stream: Whether to stream (not used here, use stream() method)
|
|
118
|
+
tools: List of tools in OpenAI format
|
|
119
|
+
temperature: Sampling temperature
|
|
120
|
+
max_tokens: Maximum tokens to generate
|
|
121
|
+
thinking: Enable reasoning mode for o-series and GPT-5 models
|
|
122
|
+
reasoning_effort: Reasoning effort level: "low", "medium", or "high"
|
|
123
|
+
**kwargs: Additional parameters passed to the API
|
|
124
|
+
|
|
125
|
+
Returns:
|
|
126
|
+
LLMResponse with the generated message
|
|
127
|
+
"""
|
|
110
128
|
model = model or self.default_model
|
|
111
129
|
|
|
112
130
|
request_kwargs = {
|
|
@@ -121,6 +139,15 @@ class OpenAIClient(LLMClient):
|
|
|
121
139
|
if max_tokens is not None:
|
|
122
140
|
request_kwargs["max_tokens"] = max_tokens
|
|
123
141
|
|
|
142
|
+
# Handle reasoning mode for o-series and GPT-5 models
|
|
143
|
+
if thinking or reasoning_effort:
|
|
144
|
+
# reasoning_effort controls how much reasoning the model does
|
|
145
|
+
# Valid values: "low", "medium", "high"
|
|
146
|
+
effort = reasoning_effort or "medium"
|
|
147
|
+
if effort not in ("low", "medium", "high"):
|
|
148
|
+
effort = "medium"
|
|
149
|
+
request_kwargs["reasoning_effort"] = effort
|
|
150
|
+
|
|
124
151
|
request_kwargs.update(kwargs)
|
|
125
152
|
|
|
126
153
|
response = await self._client.chat.completions.create(**request_kwargs)
|
|
@@ -146,9 +173,24 @@ class OpenAIClient(LLMClient):
|
|
|
146
173
|
*,
|
|
147
174
|
model: Optional[str] = None,
|
|
148
175
|
tools: Optional[list[dict]] = None,
|
|
176
|
+
thinking: bool = False,
|
|
177
|
+
reasoning_effort: Optional[str] = None,
|
|
149
178
|
**kwargs,
|
|
150
179
|
) -> AsyncIterator[LLMStreamChunk]:
|
|
151
|
-
"""
|
|
180
|
+
"""
|
|
181
|
+
Stream a completion from OpenAI.
|
|
182
|
+
|
|
183
|
+
Args:
|
|
184
|
+
messages: List of messages
|
|
185
|
+
model: Model ID to use
|
|
186
|
+
tools: List of tools in OpenAI format
|
|
187
|
+
thinking: Enable reasoning mode for o-series and GPT-5 models
|
|
188
|
+
reasoning_effort: Reasoning effort level: "low", "medium", or "high"
|
|
189
|
+
**kwargs: Additional parameters
|
|
190
|
+
|
|
191
|
+
Yields:
|
|
192
|
+
LLMStreamChunk with delta content
|
|
193
|
+
"""
|
|
152
194
|
model = model or self.default_model
|
|
153
195
|
|
|
154
196
|
request_kwargs = {
|
|
@@ -160,6 +202,13 @@ class OpenAIClient(LLMClient):
|
|
|
160
202
|
if tools:
|
|
161
203
|
request_kwargs["tools"] = tools
|
|
162
204
|
|
|
205
|
+
# Handle reasoning mode for o-series and GPT-5 models
|
|
206
|
+
if thinking or reasoning_effort:
|
|
207
|
+
effort = reasoning_effort or "medium"
|
|
208
|
+
if effort not in ("low", "medium", "high"):
|
|
209
|
+
effort = "medium"
|
|
210
|
+
request_kwargs["reasoning_effort"] = effort
|
|
211
|
+
|
|
163
212
|
request_kwargs.update(kwargs)
|
|
164
213
|
|
|
165
214
|
async with await self._client.chat.completions.create(**request_kwargs) as stream:
|
|
@@ -1230,7 +1230,17 @@ class SubAgentContext:
|
|
|
1230
1230
|
return ToolRegistry()
|
|
1231
1231
|
|
|
1232
1232
|
async def emit(self, event_type: EventType | str, payload: dict) -> None:
|
|
1233
|
-
"""Emit events through parent context with sub-agent tagging.
|
|
1233
|
+
"""Emit events through parent context with sub-agent tagging.
|
|
1234
|
+
|
|
1235
|
+
Note: ASSISTANT_MESSAGE events are suppressed for sub-agents because
|
|
1236
|
+
the parent agent will relay the response. This prevents duplicate
|
|
1237
|
+
messages in the UI.
|
|
1238
|
+
"""
|
|
1239
|
+
# Suppress ASSISTANT_MESSAGE events from sub-agents to prevent duplicates
|
|
1240
|
+
# The parent agent will relay the sub-agent's response
|
|
1241
|
+
if event_type == EventType.ASSISTANT_MESSAGE or event_type == "assistant.message":
|
|
1242
|
+
return
|
|
1243
|
+
|
|
1234
1244
|
# Tag the event as coming from a sub-agent
|
|
1235
1245
|
tagged_payload = dict(payload)
|
|
1236
1246
|
tagged_payload["sub_agent_run_id"] = str(self._run_id)
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/tool_calling_agent.py
RENAMED
|
@@ -75,8 +75,10 @@ class ToolCallingAgent(AgentRuntime):
|
|
|
75
75
|
Maximum number of tool-calling iterations.
|
|
76
76
|
|
|
77
77
|
Override to change the default limit.
|
|
78
|
+
Default uses the value from config (default: 50).
|
|
78
79
|
"""
|
|
79
|
-
|
|
80
|
+
from agent_runtime_core.config import get_config
|
|
81
|
+
return get_config().max_iterations
|
|
80
82
|
|
|
81
83
|
@property
|
|
82
84
|
def model(self) -> Optional[str]:
|
|
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
|
4
4
|
|
|
5
5
|
[project]
|
|
6
6
|
name = "agent-runtime-core"
|
|
7
|
-
version = "0.9.
|
|
7
|
+
version = "0.9.1"
|
|
8
8
|
description = "Framework-agnostic Python library for executing AI agents with consistent patterns"
|
|
9
9
|
readme = "README.md"
|
|
10
10
|
license = "MIT"
|
|
@@ -103,11 +103,11 @@ testpaths = ["tests"]
|
|
|
103
103
|
|
|
104
104
|
[tool.ruff]
|
|
105
105
|
line-length = 100
|
|
106
|
-
target-version = "
|
|
106
|
+
target-version = "0.9.1"
|
|
107
107
|
|
|
108
108
|
[tool.ruff.lint]
|
|
109
109
|
select = ["E", "F", "I", "N", "W", "UP"]
|
|
110
110
|
|
|
111
111
|
[tool.mypy]
|
|
112
|
-
python_version = "
|
|
112
|
+
python_version = "0.9.1"
|
|
113
113
|
strict = true
|
agent_runtime_core-0.9.0/LICENSE
DELETED
|
@@ -1,21 +0,0 @@
|
|
|
1
|
-
MIT License
|
|
2
|
-
|
|
3
|
-
Copyright (c) 2026 Chris Barry
|
|
4
|
-
|
|
5
|
-
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
-
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
-
in the Software without restriction, including without limitation the rights
|
|
8
|
-
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
-
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
-
furnished to do so, subject to the following conditions:
|
|
11
|
-
|
|
12
|
-
The above copyright notice and this permission notice shall be included in all
|
|
13
|
-
copies or substantial portions of the Software.
|
|
14
|
-
|
|
15
|
-
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
-
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
-
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
-
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
-
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
-
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
-
SOFTWARE.
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/files/processors.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/llm/litellm_client.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/persistence/__init__.py
RENAMED
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/persistence/base.py
RENAMED
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/persistence/file.py
RENAMED
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/persistence/manager.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/tracing/__init__.py
RENAMED
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/tracing/langfuse.py
RENAMED
|
File without changes
|
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/__init__.py
RENAMED
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/base.py
RENAMED
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/embeddings.py
RENAMED
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/sqlite_vec.py
RENAMED
|
File without changes
|
{agent_runtime_core-0.9.0 → agent_runtime_core-0.9.1}/agent_runtime_core/vectorstore/vertex.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|