agent-runtime-core 0.5.1__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -34,7 +34,7 @@ Example usage:
34
34
  return RunResult(final_output={"message": "Hello!"})
35
35
  """
36
36
 
37
- __version__ = "0.5.1"
37
+ __version__ = "0.5.2"
38
38
 
39
39
  # Core interfaces
40
40
  from agent_runtime_core.interfaces import (
@@ -53,6 +53,10 @@ from agent_runtime_core.interfaces import (
53
53
  TraceSink,
54
54
  )
55
55
 
56
+
57
+ # Tool Calling Agent base class
58
+ from agent_runtime_core.tool_calling_agent import ToolCallingAgent
59
+
56
60
  # Configuration
57
61
  from agent_runtime_core.config import (
58
62
  RuntimeConfig,
@@ -142,6 +146,7 @@ __all__ = [
142
146
  "TraceSink",
143
147
  "EventType",
144
148
  "ErrorInfo",
149
+ "ToolCallingAgent",
145
150
  # Configuration
146
151
  "RuntimeConfig",
147
152
  "configure",
@@ -303,6 +303,50 @@ class ToolRegistry:
303
303
  raise KeyError(f"Tool not found: {name}")
304
304
  return await tool.handler(**arguments)
305
305
 
306
+ async def execute_with_events(
307
+ self,
308
+ tool_call: Any, # ToolCall object with name, arguments, id
309
+ ctx: "RunContext",
310
+ **kwargs
311
+ ) -> Any:
312
+ """
313
+ Execute a tool and automatically emit events.
314
+
315
+ This is a convenience method that wraps execute() and handles
316
+ event emission automatically. Use this in your agent loop to
317
+ reduce boilerplate.
318
+
319
+ Args:
320
+ tool_call: Tool call object with name, arguments, and id
321
+ ctx: Run context for emitting events
322
+ **kwargs: Additional arguments to pass to the tool
323
+
324
+ Returns:
325
+ Tool result
326
+
327
+ Example:
328
+ for tool_call in response.tool_calls:
329
+ result = await tools.execute_with_events(tool_call, ctx)
330
+ """
331
+ # Emit tool call event
332
+ await ctx.emit(EventType.TOOL_CALL, {
333
+ "tool_name": tool_call.name,
334
+ "tool_args": tool_call.arguments,
335
+ "tool_call_id": tool_call.id,
336
+ })
337
+
338
+ # Execute the tool
339
+ result = await self.execute(tool_call.name, tool_call.arguments, **kwargs)
340
+
341
+ # Emit tool result event
342
+ await ctx.emit(EventType.TOOL_RESULT, {
343
+ "tool_name": tool_call.name,
344
+ "tool_call_id": tool_call.id,
345
+ "result": result,
346
+ })
347
+
348
+ return result
349
+
306
350
 
307
351
  class LLMClient(ABC):
308
352
  """
@@ -0,0 +1,256 @@
1
+ """
2
+ ToolCallingAgent - A base class for agents that use tool calling.
3
+
4
+ This eliminates the boilerplate of implementing the tool-calling loop
5
+ in every agent. Just define your system prompt and tools, and the base
6
+ class handles the rest.
7
+ """
8
+
9
+ import json
10
+ import logging
11
+ from abc import abstractmethod
12
+ from typing import Optional
13
+
14
+ from agent_runtime_core.interfaces import (
15
+ AgentRuntime,
16
+ RunContext,
17
+ RunResult,
18
+ EventType,
19
+ ToolRegistry,
20
+ LLMClient,
21
+ )
22
+
23
+ logger = logging.getLogger(__name__)
24
+
25
+
26
+ class ToolCallingAgent(AgentRuntime):
27
+ """
28
+ Base class for agents that use tool calling.
29
+
30
+ Handles the standard tool-calling loop so you don't have to implement it
31
+ in every agent. Just override the abstract properties and you're done.
32
+
33
+ Example:
34
+ class MyAgent(ToolCallingAgent):
35
+ @property
36
+ def key(self) -> str:
37
+ return "my-agent"
38
+
39
+ @property
40
+ def system_prompt(self) -> str:
41
+ return "You are a helpful assistant..."
42
+
43
+ @property
44
+ def tools(self) -> ToolRegistry:
45
+ return create_my_tools()
46
+ """
47
+
48
+ @property
49
+ @abstractmethod
50
+ def system_prompt(self) -> str:
51
+ """
52
+ System prompt for the agent.
53
+
54
+ This is prepended to the conversation messages.
55
+ """
56
+ ...
57
+
58
+ @property
59
+ @abstractmethod
60
+ def tools(self) -> ToolRegistry:
61
+ """
62
+ Tools available to the agent.
63
+
64
+ Return a ToolRegistry with all tools registered.
65
+ """
66
+ ...
67
+
68
+ @property
69
+ def max_iterations(self) -> int:
70
+ """
71
+ Maximum number of tool-calling iterations.
72
+
73
+ Override to change the default limit.
74
+ """
75
+ return 10
76
+
77
+ @property
78
+ def model(self) -> Optional[str]:
79
+ """
80
+ Model to use for this agent.
81
+
82
+ If None, uses the default model from configuration.
83
+ Override to use a specific model.
84
+ """
85
+ return None
86
+
87
+ @property
88
+ def temperature(self) -> Optional[float]:
89
+ """
90
+ Temperature for LLM generation.
91
+
92
+ If None, uses the LLM client's default.
93
+ Override to set a specific temperature.
94
+ """
95
+ return None
96
+
97
+ def get_llm_client(self) -> LLMClient:
98
+ """
99
+ Get the LLM client to use.
100
+
101
+ Override to customize LLM client selection.
102
+ Default uses the configured client.
103
+ """
104
+ from agent_runtime_core.llm import get_llm_client
105
+ return get_llm_client()
106
+
107
+ async def before_run(self, ctx: RunContext) -> None:
108
+ """
109
+ Hook called before the agent run starts.
110
+
111
+ Override to add custom initialization logic.
112
+ """
113
+ pass
114
+
115
+ async def after_run(self, ctx: RunContext, result: RunResult) -> RunResult:
116
+ """
117
+ Hook called after the agent run completes.
118
+
119
+ Override to add custom finalization logic.
120
+ Can modify the result before returning.
121
+ """
122
+ return result
123
+
124
+ async def on_tool_call(self, ctx: RunContext, tool_name: str, tool_args: dict) -> None:
125
+ """
126
+ Hook called before each tool execution.
127
+
128
+ Override to add custom logic (logging, validation, etc.).
129
+ """
130
+ pass
131
+
132
+ async def on_tool_result(self, ctx: RunContext, tool_name: str, result: any) -> any:
133
+ """
134
+ Hook called after each tool execution.
135
+
136
+ Override to transform or validate tool results.
137
+ Can return a modified result.
138
+ """
139
+ return result
140
+
141
+ async def run(self, ctx: RunContext) -> RunResult:
142
+ """
143
+ Execute the agent with tool calling support.
144
+
145
+ This implements the standard tool-calling loop:
146
+ 1. Build messages with system prompt
147
+ 2. Call LLM with tools
148
+ 3. If tool calls, execute them and loop
149
+ 4. If no tool calls, return final response
150
+ """
151
+ logger.info(f"[{self.key}] Starting run, input messages: {len(ctx.input_messages)}")
152
+
153
+ # Call before_run hook
154
+ await self.before_run(ctx)
155
+
156
+ # Get LLM client
157
+ llm = self.get_llm_client()
158
+
159
+ # Build messages with system prompt
160
+ messages = [
161
+ {"role": "system", "content": self.system_prompt}
162
+ ] + ctx.input_messages
163
+
164
+ logger.info(f"[{self.key}] Built {len(messages)} messages (including system prompt)")
165
+
166
+ # Run the agent loop (tool calling)
167
+ iteration = 0
168
+ final_response = None
169
+
170
+ while iteration < self.max_iterations:
171
+ iteration += 1
172
+ logger.info(f"[{self.key}] Iteration {iteration}/{self.max_iterations}")
173
+
174
+ # Generate response with tools
175
+ logger.info(f"[{self.key}] Calling LLM...")
176
+ response = await llm.generate(
177
+ messages=messages,
178
+ tools=self.tools.to_openai_format(),
179
+ model=self.model,
180
+ temperature=self.temperature,
181
+ )
182
+ logger.info(f"[{self.key}] LLM response received, tool_calls: {bool(response.message.get('tool_calls'))}")
183
+
184
+ # Check if the model wants to call tools
185
+ if response.message.get('tool_calls'):
186
+ # Add the assistant message with tool calls
187
+ messages.append(response.message)
188
+
189
+ # Execute the tools
190
+ tool_results = []
191
+ for tool_call in response.message.get('tool_calls'):
192
+ # Emit tool call event
193
+ await ctx.emit(EventType.TOOL_CALL, {
194
+ "tool_name": tool_call["function"]["name"],
195
+ "tool_args": json.loads(tool_call["function"]["arguments"]),
196
+ "tool_call_id": tool_call["id"],
197
+ })
198
+
199
+ # Call before_tool_call hook
200
+ await self.on_tool_call(ctx, tool_call["function"]["name"], json.loads(tool_call["function"]["arguments"]))
201
+
202
+ # Execute the tool
203
+ result = await self.tools.execute(
204
+ tool_call["function"]["name"],
205
+ json.loads(tool_call["function"]["arguments"]),
206
+ )
207
+
208
+ # Call after_tool_result hook
209
+ result = await self.on_tool_result(ctx, tool_call["function"]["name"], result)
210
+
211
+ tool_results.append({
212
+ "tool_call_id": tool_call["id"],
213
+ "result": result,
214
+ })
215
+
216
+ # Emit tool result event
217
+ await ctx.emit(EventType.TOOL_RESULT, {
218
+ "tool_name": tool_call["function"]["name"],
219
+ "tool_call_id": tool_call["id"],
220
+ "result": result,
221
+ })
222
+
223
+ # Add tool results to messages
224
+ for tr in tool_results:
225
+ messages.append({
226
+ "role": "tool",
227
+ "tool_call_id": tr["tool_call_id"],
228
+ "content": str(tr["result"]),
229
+ })
230
+ else:
231
+ # No tool calls - we have the final response
232
+ final_response = response.message["content"]
233
+ logger.info(f"[{self.key}] Final response received: {final_response[:100] if final_response else 'None'}...")
234
+ break
235
+
236
+ # Emit the final assistant message
237
+ if final_response:
238
+ logger.info(f"[{self.key}] Emitting ASSISTANT_MESSAGE event")
239
+ await ctx.emit(EventType.ASSISTANT_MESSAGE, {
240
+ "content": final_response,
241
+ })
242
+ logger.info(f"[{self.key}] Event emitted successfully")
243
+ else:
244
+ logger.warning(f"[{self.key}] No final response to emit!")
245
+
246
+ logger.info(f"[{self.key}] Returning RunResult")
247
+ result = RunResult(
248
+ final_output={"response": final_response},
249
+ final_messages=messages,
250
+ usage=response.usage if response else {},
251
+ )
252
+
253
+ # Call after_run hook
254
+ result = await self.after_run(ctx, result)
255
+
256
+ return result
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: agent-runtime-core
3
- Version: 0.5.1
3
+ Version: 0.5.2
4
4
  Summary: Framework-agnostic Python library for executing AI agents with consistent patterns
5
5
  Project-URL: Homepage, https://github.com/makemore/agent-runtime-core
6
6
  Project-URL: Repository, https://github.com/makemore/agent-runtime-core
@@ -49,8 +49,15 @@ Description-Content-Type: text/markdown
49
49
 
50
50
  A lightweight, framework-agnostic Python library for building AI agent systems. Provides the core abstractions and implementations needed to build production-ready AI agents without tying you to any specific framework.
51
51
 
52
- ## Features
52
+ ## Recent Updates
53
+
54
+ | Version | Date | Changes |
55
+ |---------|------|---------|
56
+ | **0.5.2** | 2025-01-14 | Add ToolCallingAgent base class, execute_with_events helper |
57
+ | **0.5.1** | 2025-01-13 | Bug fixes and improvements |
58
+ | **0.5.0** | 2025-01-12 | Initial stable release |
53
59
 
60
+ ## Features
54
61
  - 🔌 **Framework Agnostic** - Works with LangGraph, CrewAI, OpenAI Agents, or your own custom loops
55
62
  - 🤖 **Model Agnostic** - OpenAI, Anthropic, or any provider via LiteLLM
56
63
  - 📦 **Zero Required Dependencies** - Core library has no dependencies; add only what you need
@@ -1,10 +1,11 @@
1
- agent_runtime_core/__init__.py,sha256=9Aen5YoHmemdCHBhGIsKINMalFr-_QkkeGs6bXlWSKU,4010
1
+ agent_runtime_core/__init__.py,sha256=pn2I6UI_nbnho9K8KViC05RsWTRm0ftXyi7TF0dbyLQ,4139
2
2
  agent_runtime_core/config.py,sha256=e3_uB5brAuQcWU36sOhWF9R6RoJrngtCS-xEB3n2fas,4986
3
- agent_runtime_core/interfaces.py,sha256=V3CAt8otNMF4Wdo5xJ9DyScL0iYcmQ90U0weadMQsw0,10777
3
+ agent_runtime_core/interfaces.py,sha256=T74pgS229tvarQD-_o25oflylUR7jq_jbgUjnvVs6IA,12191
4
4
  agent_runtime_core/registry.py,sha256=hrbEdNNdqEz7-uN-82qofsXFTZBRDxZ2Ht9qwmp1qkw,1476
5
5
  agent_runtime_core/runner.py,sha256=M3It72UhfmLt17jVnSvObiSfQ1_RN4JVUIJsjnRd2Ps,12771
6
6
  agent_runtime_core/steps.py,sha256=XpVFK7P-ZOpr7NwaP7XFygduIpjrKld-OIig7dHNMKE,11994
7
7
  agent_runtime_core/testing.py,sha256=ordECGprBappLBMWxlETvuf2AoIPNomJFeSedXaY30E,11131
8
+ agent_runtime_core/tool_calling_agent.py,sha256=LY0lSI9GZbWoIKd-7NXFSvByAhvK6L625eOp01VbTrw,8525
8
9
  agent_runtime_core/events/__init__.py,sha256=Gg7cMQHWfLTQ4Xik09KSg7cWbQDmW_MuF5_jl-yZkHU,1575
9
10
  agent_runtime_core/events/base.py,sha256=NfHYyoczxr40Er5emROi_aY_07m5hDrKsn31pdWY2DY,1950
10
11
  agent_runtime_core/events/memory.py,sha256=9z4tY8XB8xDg3ybHsIwilOcRo7HY-vB-8vxiz6O54BE,2491
@@ -31,7 +32,7 @@ agent_runtime_core/state/sqlite.py,sha256=HKZwDiC_7F1W8Z_Pz8roEs91XhQ9rUHfGpuQ7W
31
32
  agent_runtime_core/tracing/__init__.py,sha256=u1QicGc39e30gWyQD4cQWxGGjITnkwoOPUhNrG6aNyI,1266
32
33
  agent_runtime_core/tracing/langfuse.py,sha256=Rj2sUlatk5sFro0y68tw5X6fQcSwWxcBOSOjB0F7JTU,3660
33
34
  agent_runtime_core/tracing/noop.py,sha256=SpsbpsUcNG6C3xZG3uyiNPUHY8etloISx3w56Q8D3KE,751
34
- agent_runtime_core-0.5.1.dist-info/METADATA,sha256=Y-ZOcumIFWdrqBWlV4QIvx5xyPaJGFnhUXtLtnqHjc4,23491
35
- agent_runtime_core-0.5.1.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
36
- agent_runtime_core-0.5.1.dist-info/licenses/LICENSE,sha256=fDlWep3_mUrj8KHV_jk275tHVEW7_9sJRhkNuGCZ_TA,1068
37
- agent_runtime_core-0.5.1.dist-info/RECORD,,
35
+ agent_runtime_core-0.5.2.dist-info/METADATA,sha256=Lu_Z_hwn2hWC8czhLGqaUFcT7dEOQ2GSXRlx12RR9KE,23765
36
+ agent_runtime_core-0.5.2.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
37
+ agent_runtime_core-0.5.2.dist-info/licenses/LICENSE,sha256=fDlWep3_mUrj8KHV_jk275tHVEW7_9sJRhkNuGCZ_TA,1068
38
+ agent_runtime_core-0.5.2.dist-info/RECORD,,