RouteKitAI 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (64) hide show
  1. routekitai/__init__.py +53 -0
  2. routekitai/cli/__init__.py +18 -0
  3. routekitai/cli/main.py +40 -0
  4. routekitai/cli/replay.py +80 -0
  5. routekitai/cli/run.py +95 -0
  6. routekitai/cli/serve.py +966 -0
  7. routekitai/cli/test_agent.py +178 -0
  8. routekitai/cli/trace.py +209 -0
  9. routekitai/cli/trace_analyze.py +120 -0
  10. routekitai/cli/trace_search.py +126 -0
  11. routekitai/core/__init__.py +58 -0
  12. routekitai/core/agent.py +325 -0
  13. routekitai/core/errors.py +49 -0
  14. routekitai/core/hooks.py +174 -0
  15. routekitai/core/memory.py +54 -0
  16. routekitai/core/message.py +132 -0
  17. routekitai/core/model.py +91 -0
  18. routekitai/core/policies.py +373 -0
  19. routekitai/core/policy.py +85 -0
  20. routekitai/core/policy_adapter.py +133 -0
  21. routekitai/core/runtime.py +1403 -0
  22. routekitai/core/tool.py +148 -0
  23. routekitai/core/tools.py +180 -0
  24. routekitai/evals/__init__.py +13 -0
  25. routekitai/evals/dataset.py +75 -0
  26. routekitai/evals/metrics.py +101 -0
  27. routekitai/evals/runner.py +184 -0
  28. routekitai/graphs/__init__.py +12 -0
  29. routekitai/graphs/executors.py +457 -0
  30. routekitai/graphs/graph.py +164 -0
  31. routekitai/memory/__init__.py +13 -0
  32. routekitai/memory/episodic.py +242 -0
  33. routekitai/memory/kv.py +34 -0
  34. routekitai/memory/retrieval.py +192 -0
  35. routekitai/memory/vector.py +700 -0
  36. routekitai/memory/working.py +66 -0
  37. routekitai/message.py +29 -0
  38. routekitai/model.py +48 -0
  39. routekitai/observability/__init__.py +21 -0
  40. routekitai/observability/analyzer.py +314 -0
  41. routekitai/observability/exporters/__init__.py +10 -0
  42. routekitai/observability/exporters/base.py +30 -0
  43. routekitai/observability/exporters/jsonl.py +81 -0
  44. routekitai/observability/exporters/otel.py +119 -0
  45. routekitai/observability/spans.py +111 -0
  46. routekitai/observability/streaming.py +117 -0
  47. routekitai/observability/trace.py +144 -0
  48. routekitai/providers/__init__.py +9 -0
  49. routekitai/providers/anthropic.py +227 -0
  50. routekitai/providers/azure_openai.py +243 -0
  51. routekitai/providers/local.py +196 -0
  52. routekitai/providers/openai.py +321 -0
  53. routekitai/py.typed +0 -0
  54. routekitai/sandbox/__init__.py +12 -0
  55. routekitai/sandbox/filesystem.py +131 -0
  56. routekitai/sandbox/network.py +142 -0
  57. routekitai/sandbox/permissions.py +70 -0
  58. routekitai/tool.py +33 -0
  59. routekitai-0.1.0.dist-info/METADATA +328 -0
  60. routekitai-0.1.0.dist-info/RECORD +64 -0
  61. routekitai-0.1.0.dist-info/WHEEL +5 -0
  62. routekitai-0.1.0.dist-info/entry_points.txt +2 -0
  63. routekitai-0.1.0.dist-info/licenses/LICENSE +21 -0
  64. routekitai-0.1.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,227 @@
1
+ """Anthropic Claude model provider."""
2
+
3
+ import os
4
+ from collections.abc import AsyncIterator
5
+ from typing import Any
6
+
7
+ import httpx
8
+
9
+ from routekitai.core.errors import ModelError
10
+ from routekitai.core.message import Message, MessageRole
11
+ from routekitai.core.model import Model, ModelResponse, StreamEvent, ToolCall, Usage
12
+ from routekitai.core.tool import Tool
13
+
14
+
15
+ class AnthropicModel(Model):
16
+ """Anthropic Claude model provider.
17
+
18
+ Supports Claude models via Anthropic API with tool use.
19
+ """
20
+
21
+ def __init__(
22
+ self,
23
+ name: str = "claude-3-opus-20240229",
24
+ provider: str = "anthropic",
25
+ api_key: str | None = None,
26
+ base_url: str = "https://api.anthropic.com/v1",
27
+ **kwargs: Any,
28
+ ) -> None:
29
+ """Initialize Anthropic model.
30
+
31
+ Args:
32
+ name: Model name (e.g., claude-3-opus-20240229, claude-3-sonnet-20240229)
33
+ provider: Provider name
34
+ api_key: Anthropic API key (defaults to ANTHROPIC_API_KEY env var)
35
+ base_url: API base URL
36
+ **kwargs: Additional configuration
37
+ """
38
+ super().__init__()
39
+ self._name = name
40
+ self._provider = provider
41
+ self.api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
42
+ self.base_url = base_url.rstrip("/")
43
+ self._client: httpx.AsyncClient | None = None
44
+
45
+ @property
46
+ def name(self) -> str:
47
+ """Return the model name."""
48
+ return self._name
49
+
50
+ @property
51
+ def provider(self) -> str:
52
+ """Return the provider name."""
53
+ return self._provider
54
+
55
+ def _get_client(self) -> "httpx.AsyncClient":
56
+ """Get or create HTTP client."""
57
+ try:
58
+ import httpx
59
+ except ImportError as e:
60
+ raise ModelError(
61
+ "httpx is required for Anthropic provider. Install with: pip install httpx"
62
+ ) from e
63
+
64
+ if self._client is None:
65
+ headers = {
66
+ "Content-Type": "application/json",
67
+ "x-api-key": self.api_key or "",
68
+ "anthropic-version": "2023-06-01",
69
+ }
70
+ self._client = httpx.AsyncClient(
71
+ base_url=self.base_url,
72
+ headers=headers,
73
+ timeout=60.0,
74
+ )
75
+ return self._client
76
+
77
+ def _message_to_anthropic(self, message: Message) -> dict[str, Any]:
78
+ """Convert routkitai Message to Anthropic format."""
79
+ role_map = {
80
+ MessageRole.SYSTEM: "system",
81
+ MessageRole.USER: "user",
82
+ MessageRole.ASSISTANT: "assistant",
83
+ }
84
+ # Anthropic doesn't support tool role messages directly
85
+ if message.role == MessageRole.TOOL:
86
+ # Convert tool messages to user messages with tool result
87
+ return {
88
+ "role": "user",
89
+ "content": f"Tool result: {message.content}",
90
+ }
91
+ return {
92
+ "role": role_map.get(message.role, "user"),
93
+ "content": message.content,
94
+ }
95
+
96
+ def _tools_to_anthropic(self, tools: list[Tool]) -> list[dict[str, Any]]:
97
+ """Convert routkitai Tools to Anthropic tool format."""
98
+ return [
99
+ {
100
+ "name": tool.name,
101
+ "description": tool.description,
102
+ "input_schema": tool.parameters,
103
+ }
104
+ for tool in tools
105
+ ]
106
+
107
+ def _anthropic_to_tool_calls(self, tool_use_blocks: list[dict[str, Any]]) -> list[ToolCall]:
108
+ """Convert Anthropic tool use blocks to routkitai format."""
109
+ result = []
110
+ for block in tool_use_blocks:
111
+ if block.get("type") == "tool_use":
112
+ result.append(
113
+ ToolCall(
114
+ id=block.get("id", ""),
115
+ name=block.get("name", ""),
116
+ arguments=block.get("input", {}),
117
+ )
118
+ )
119
+ return result
120
+
121
+ async def chat(
122
+ self,
123
+ messages: list[Message],
124
+ tools: list[Tool] | None = None,
125
+ stream: bool = False,
126
+ **kwargs: Any,
127
+ ) -> ModelResponse | AsyncIterator[StreamEvent]:
128
+ """Chat with Anthropic Claude model.
129
+
130
+ Args:
131
+ messages: Conversation messages
132
+ tools: Optional tools
133
+ stream: Whether to stream (not yet implemented)
134
+ **kwargs: Additional parameters (max_tokens, temperature, etc.)
135
+
136
+ Returns:
137
+ ModelResponse or stream of events
138
+
139
+ Raises:
140
+ ModelError: If API call fails
141
+ """
142
+ if stream:
143
+ raise NotImplementedError("Streaming not yet implemented for Anthropic")
144
+
145
+ if not self.api_key:
146
+ raise ModelError(
147
+ "Anthropic API key is required. Set ANTHROPIC_API_KEY env var or pass api_key parameter"
148
+ )
149
+
150
+ client = self._get_client()
151
+
152
+ # Separate system messages from conversation
153
+ system_messages = [msg.content for msg in messages if msg.role == MessageRole.SYSTEM]
154
+ conversation_messages = [
155
+ self._message_to_anthropic(msg) for msg in messages if msg.role != MessageRole.SYSTEM
156
+ ]
157
+
158
+ # Prepare request
159
+ request_data: dict[str, Any] = {
160
+ "model": self.name,
161
+ "messages": conversation_messages,
162
+ "max_tokens": kwargs.get("max_tokens", 1024),
163
+ **{k: v for k, v in kwargs.items() if k not in ("max_tokens", "stream")},
164
+ }
165
+
166
+ if system_messages:
167
+ request_data["system"] = (
168
+ system_messages[0] if len(system_messages) == 1 else "\n".join(system_messages)
169
+ )
170
+
171
+ if tools:
172
+ request_data["tools"] = self._tools_to_anthropic(tools)
173
+
174
+ try:
175
+ response = await client.post("/messages", json=request_data)
176
+ response.raise_for_status()
177
+ data = response.json()
178
+
179
+ # Parse response
180
+ content_blocks = data.get("content", [])
181
+ text_content = ""
182
+ tool_use_blocks = []
183
+
184
+ for block in content_blocks:
185
+ if block.get("type") == "text":
186
+ text_content += block.get("text", "")
187
+ elif block.get("type") == "tool_use":
188
+ tool_use_blocks.append(block)
189
+
190
+ # Convert tool calls
191
+ tool_calls = None
192
+ if tool_use_blocks:
193
+ tool_calls = self._anthropic_to_tool_calls(tool_use_blocks)
194
+
195
+ # Parse usage
196
+ usage_data = data.get("usage", {})
197
+ usage = Usage(
198
+ prompt_tokens=usage_data.get("input_tokens", 0),
199
+ completion_tokens=usage_data.get("output_tokens", 0),
200
+ total_tokens=usage_data.get("input_tokens", 0) + usage_data.get("output_tokens", 0),
201
+ )
202
+
203
+ return ModelResponse(
204
+ content=text_content,
205
+ tool_calls=tool_calls,
206
+ usage=usage,
207
+ metadata={"raw_response": data},
208
+ )
209
+
210
+ except httpx.HTTPStatusError as e:
211
+ raise ModelError(
212
+ f"Anthropic API error: {e.response.status_code} - {e.response.text}"
213
+ ) from e
214
+ except Exception as e:
215
+ raise ModelError(f"Failed to call Anthropic API: {e}") from e
216
+
217
+ async def __aenter__(self) -> "AnthropicModel":
218
+ """Async context manager entry."""
219
+ return self
220
+
221
+ async def __aexit__(
222
+ self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any
223
+ ) -> None:
224
+ """Async context manager exit."""
225
+ if self._client:
226
+ await self._client.aclose()
227
+ self._client = None
@@ -0,0 +1,243 @@
1
+ """Azure OpenAI model provider."""
2
+
3
+ import json
4
+ import os
5
+ from collections.abc import AsyncIterator
6
+ from typing import Any
7
+
8
+ import httpx
9
+
10
+ from routekitai.core.errors import ModelError
11
+ from routekitai.core.message import Message, MessageRole
12
+ from routekitai.core.model import Model, ModelResponse, StreamEvent, ToolCall, Usage
13
+ from routekitai.core.tool import Tool
14
+
15
+
16
+ class AzureOpenAIModel(Model):
17
+ """Azure OpenAI model provider.
18
+
19
+ Supports Azure OpenAI deployments with custom endpoints and API versions.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ name: str = "gpt-4",
25
+ provider: str = "azure-openai",
26
+ api_key: str | None = None,
27
+ endpoint: str | None = None,
28
+ deployment_name: str | None = None,
29
+ api_version: str = "2024-02-15-preview",
30
+ **kwargs: Any,
31
+ ) -> None:
32
+ """Initialize Azure OpenAI model.
33
+
34
+ Args:
35
+ name: Model name
36
+ provider: Provider name
37
+ api_key: Azure OpenAI API key (defaults to AZURE_OPENAI_API_KEY env var)
38
+ endpoint: Azure OpenAI endpoint URL (defaults to AZURE_OPENAI_ENDPOINT env var)
39
+ deployment_name: Deployment name (defaults to model name)
40
+ api_version: API version
41
+ **kwargs: Additional configuration
42
+ """
43
+ super().__init__()
44
+ self._name = name
45
+ self._provider = provider
46
+ self.api_key = api_key or os.getenv("AZURE_OPENAI_API_KEY")
47
+ self.endpoint = (endpoint or os.getenv("AZURE_OPENAI_ENDPOINT") or "").rstrip("/")
48
+ self.deployment_name = deployment_name or name
49
+ self.api_version = api_version
50
+ self._client: httpx.AsyncClient | None = None
51
+
52
+ @property
53
+ def name(self) -> str:
54
+ """Return the model name."""
55
+ return self._name
56
+
57
+ @property
58
+ def provider(self) -> str:
59
+ """Return the provider name."""
60
+ return self._provider
61
+
62
+ def _get_client(self) -> "httpx.AsyncClient":
63
+ """Get or create HTTP client."""
64
+ try:
65
+ import httpx
66
+ except ImportError as e:
67
+ raise ModelError(
68
+ "httpx is required for Azure OpenAI provider. Install with: pip install httpx"
69
+ ) from e
70
+
71
+ if not self.endpoint:
72
+ raise ModelError(
73
+ "Azure OpenAI endpoint is required. Set AZURE_OPENAI_ENDPOINT env var or pass endpoint parameter"
74
+ )
75
+ if self._client is None:
76
+ headers = {
77
+ "Content-Type": "application/json",
78
+ "api-key": self.api_key or "",
79
+ }
80
+ self._client = httpx.AsyncClient(
81
+ base_url=self.endpoint,
82
+ headers=headers,
83
+ timeout=60.0,
84
+ )
85
+ return self._client
86
+
87
+ def _message_to_openai(self, message: Message) -> dict[str, Any]:
88
+ """Convert routkitai Message to OpenAI format."""
89
+ role_map = {
90
+ MessageRole.SYSTEM: "system",
91
+ MessageRole.USER: "user",
92
+ MessageRole.ASSISTANT: "assistant",
93
+ MessageRole.TOOL: "tool",
94
+ }
95
+ msg_dict: dict[str, Any] = {
96
+ "role": role_map.get(message.role, "user"),
97
+ "content": message.content,
98
+ }
99
+ if message.tool_calls:
100
+ msg_dict["tool_calls"] = [
101
+ {
102
+ "id": tc.get("id", ""),
103
+ "type": "function",
104
+ "function": {
105
+ "name": tc.get("name", ""),
106
+ "arguments": json.dumps(tc.get("arguments", {})),
107
+ },
108
+ }
109
+ for tc in message.tool_calls
110
+ ]
111
+ if message.tool_result:
112
+ msg_dict["tool_call_id"] = message.tool_result.get("tool_call_id", "")
113
+ return msg_dict
114
+
115
+ def _tools_to_openai(self, tools: list[Tool]) -> list[dict[str, Any]]:
116
+ """Convert routkitai Tools to OpenAI function format."""
117
+ return [
118
+ {
119
+ "type": "function",
120
+ "function": {
121
+ "name": tool.name,
122
+ "description": tool.description,
123
+ "parameters": tool.parameters,
124
+ },
125
+ }
126
+ for tool in tools
127
+ ]
128
+
129
+ def _openai_to_tool_calls(self, tool_calls: list[dict[str, Any]]) -> list[ToolCall]:
130
+ """Convert OpenAI tool calls to routkitai format."""
131
+ result = []
132
+ for tc in tool_calls:
133
+ if tc.get("type") == "function":
134
+ func = tc.get("function", {})
135
+ try:
136
+ arguments = json.loads(func.get("arguments", "{}"))
137
+ except (json.JSONDecodeError, TypeError):
138
+ arguments = func.get("arguments", {})
139
+ result.append(
140
+ ToolCall(
141
+ id=tc.get("id", ""),
142
+ name=func.get("name", ""),
143
+ arguments=arguments,
144
+ )
145
+ )
146
+ return result
147
+
148
+ async def chat(
149
+ self,
150
+ messages: list[Message],
151
+ tools: list[Tool] | None = None,
152
+ stream: bool = False,
153
+ **kwargs: Any,
154
+ ) -> ModelResponse | AsyncIterator[StreamEvent]:
155
+ """Chat with Azure OpenAI model.
156
+
157
+ Args:
158
+ messages: Conversation messages
159
+ tools: Optional tools
160
+ stream: Whether to stream (not yet implemented)
161
+ **kwargs: Additional parameters (temperature, max_tokens, etc.)
162
+
163
+ Returns:
164
+ ModelResponse or stream of events
165
+
166
+ Raises:
167
+ ModelError: If API call fails
168
+ """
169
+ if stream:
170
+ raise NotImplementedError("Streaming not yet implemented for Azure OpenAI")
171
+
172
+ if not self.api_key:
173
+ raise ModelError(
174
+ "Azure OpenAI API key is required. Set AZURE_OPENAI_API_KEY env var or pass api_key parameter"
175
+ )
176
+
177
+ client = self._get_client()
178
+
179
+ # Convert messages
180
+ openai_messages = [self._message_to_openai(msg) for msg in messages]
181
+
182
+ # Prepare request URL with deployment and API version
183
+ url = f"/openai/deployments/{self.deployment_name}/chat/completions?api-version={self.api_version}"
184
+
185
+ # Prepare request data
186
+ request_data: dict[str, Any] = {
187
+ "messages": openai_messages,
188
+ **kwargs, # Allow temperature, max_tokens, etc.
189
+ }
190
+
191
+ # Add tools if provided
192
+ if tools:
193
+ request_data["tools"] = self._tools_to_openai(tools)
194
+
195
+ try:
196
+ response = await client.post(url, json=request_data)
197
+ response.raise_for_status()
198
+ data = response.json()
199
+
200
+ # Parse response
201
+ choice = data.get("choices", [{}])[0]
202
+ message = choice.get("message", {})
203
+ content = message.get("content", "")
204
+ tool_calls_data = message.get("tool_calls", [])
205
+
206
+ # Convert tool calls
207
+ tool_calls = None
208
+ if tool_calls_data:
209
+ tool_calls = self._openai_to_tool_calls(tool_calls_data)
210
+
211
+ # Parse usage
212
+ usage_data = data.get("usage", {})
213
+ usage = Usage(
214
+ prompt_tokens=usage_data.get("prompt_tokens", 0),
215
+ completion_tokens=usage_data.get("completion_tokens", 0),
216
+ total_tokens=usage_data.get("total_tokens", 0),
217
+ )
218
+
219
+ return ModelResponse(
220
+ content=content,
221
+ tool_calls=tool_calls,
222
+ usage=usage,
223
+ metadata={"raw_response": data},
224
+ )
225
+
226
+ except httpx.HTTPStatusError as e:
227
+ raise ModelError(
228
+ f"Azure OpenAI API error: {e.response.status_code} - {e.response.text}"
229
+ ) from e
230
+ except Exception as e:
231
+ raise ModelError(f"Failed to call Azure OpenAI API: {e}") from e
232
+
233
+ async def __aenter__(self) -> "AzureOpenAIModel":
234
+ """Async context manager entry."""
235
+ return self
236
+
237
+ async def __aexit__(
238
+ self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: Any
239
+ ) -> None:
240
+ """Async context manager exit."""
241
+ if self._client:
242
+ await self._client.aclose()
243
+ self._client = None
@@ -0,0 +1,196 @@
1
+ """Local/Fake model provider for testing."""
2
+
3
+ from collections import deque
4
+ from collections.abc import AsyncIterator, Callable
5
+ from typing import Any
6
+
7
+ from pydantic import Field
8
+
9
+ from routekitai.core.errors import ModelError
10
+ from routekitai.core.message import Message
11
+ from routekitai.core.model import Model, ModelResponse, StreamEvent, ToolCall, Usage
12
+ from routekitai.core.tool import Tool
13
+
14
+
15
+ class FakeModel(Model):
16
+ """Deterministic fake model for testing.
17
+
18
+ Uses scripted responses based on message content or a response function.
19
+ Can also use a queue of responses for sequential calls.
20
+ """
21
+
22
+ name: str = Field(default="fake", description="Model name")
23
+ provider: str = Field(default="local", description="Provider name")
24
+ responses: dict[str, str | dict[str, Any]] | None = Field(
25
+ default=None, description="Dict mapping message content to response"
26
+ )
27
+ response_fn: Callable[[list[Message], list[Tool] | None], str | dict[str, Any]] | None = Field(
28
+ default=None, description="Function(message, tools) -> response"
29
+ )
30
+ response_queue: deque[str | dict[str, Any] | Callable] = Field(
31
+ default_factory=deque, description="Queue of responses for sequential calls"
32
+ )
33
+ call_count: int = Field(default=0, exclude=True)
34
+
35
+ def __init__(
36
+ self,
37
+ name: str = "fake",
38
+ provider: str = "local",
39
+ responses: dict[str, str | dict[str, Any]] | None = None,
40
+ response_fn: Callable[[list[Message], list[Tool] | None], str | dict[str, Any]]
41
+ | None = None,
42
+ response_queue: deque[str | dict[str, Any] | Callable] | None = None,
43
+ **kwargs: Any,
44
+ ) -> None:
45
+ """Initialize fake model.
46
+
47
+ Args:
48
+ name: Model name
49
+ provider: Provider name
50
+ responses: Dict mapping message content to response
51
+ response_fn: Function(message, tools) -> response dict
52
+ response_queue: Queue of responses for sequential calls
53
+ **kwargs: Additional configuration
54
+ """
55
+ super().__init__()
56
+ object.__setattr__(self, "name", name)
57
+ object.__setattr__(self, "provider", provider)
58
+ object.__setattr__(self, "responses", responses or {})
59
+ object.__setattr__(self, "response_fn", response_fn)
60
+ object.__setattr__(self, "response_queue", response_queue or deque())
61
+ object.__setattr__(self, "call_count", 0)
62
+
63
+ def add_response(
64
+ self,
65
+ response: str
66
+ | dict[str, Any]
67
+ | Callable[[list[Message], list[Tool] | None], str | dict[str, Any]],
68
+ ) -> None:
69
+ """Add a response to the queue.
70
+
71
+ Args:
72
+ response: Response string, dict, or callable
73
+ """
74
+ self.response_queue.append(response)
75
+
76
+ async def chat(
77
+ self,
78
+ messages: list[Message],
79
+ tools: list[Tool] | None = None,
80
+ stream: bool = False,
81
+ **kwargs: Any,
82
+ ) -> ModelResponse | AsyncIterator[StreamEvent]:
83
+ """Chat with fake model.
84
+
85
+ Args:
86
+ messages: Conversation messages
87
+ tools: Optional tools
88
+ stream: Whether to stream (not supported)
89
+ **kwargs: Additional parameters
90
+
91
+ Returns:
92
+ ModelResponse
93
+
94
+ Raises:
95
+ ModelError: If model call fails
96
+ """
97
+ if stream:
98
+ raise NotImplementedError("Streaming not supported for FakeModel")
99
+
100
+ object.__setattr__(self, "call_count", self.call_count + 1)
101
+
102
+ # Get last user message
103
+ last_message = None
104
+ for msg in reversed(messages):
105
+ if msg.role.value == "user":
106
+ last_message = msg
107
+ break
108
+
109
+ # Use response queue if available (for sequential calls)
110
+ if self.response_queue:
111
+ response_data = self.response_queue.popleft()
112
+ if callable(response_data):
113
+ result = response_data(messages, tools)
114
+ else:
115
+ result = response_data
116
+
117
+ if isinstance(result, dict):
118
+ tool_calls = None
119
+ if "tool_calls" in result:
120
+ tool_calls = [
121
+ ToolCall(
122
+ id=tc.get("id", f"call_{i}"),
123
+ name=tc.get("name", ""),
124
+ arguments=tc.get("arguments", {}),
125
+ )
126
+ for i, tc in enumerate(result.get("tool_calls", []))
127
+ ]
128
+ return ModelResponse(
129
+ content=result.get("content", ""),
130
+ tool_calls=tool_calls,
131
+ usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30),
132
+ )
133
+ elif isinstance(result, str):
134
+ return ModelResponse(
135
+ content=result,
136
+ usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30),
137
+ )
138
+
139
+ # Use response function if provided
140
+ if self.response_fn:
141
+ try:
142
+ result = self.response_fn(messages, tools)
143
+ if isinstance(result, dict):
144
+ return ModelResponse(
145
+ content=result.get("content", ""),
146
+ tool_calls=result.get("tool_calls"),
147
+ usage=result.get(
148
+ "usage", Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30)
149
+ ),
150
+ )
151
+ elif isinstance(result, str):
152
+ return ModelResponse(
153
+ content=result,
154
+ usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30),
155
+ )
156
+ except Exception as e:
157
+ raise ModelError(f"Response function failed: {e}") from e
158
+
159
+ # Use responses dict
160
+ if last_message and self.responses:
161
+ content = last_message.content.lower()
162
+ for key, response in self.responses.items():
163
+ if key.lower() in content:
164
+ # Check if response includes tool calls
165
+ if isinstance(response, dict):
166
+ tool_calls = None
167
+ if "tool_calls" in response:
168
+ tool_calls = [
169
+ ToolCall(
170
+ id=tc.get("id", f"call_{i}"),
171
+ name=tc.get("name", ""),
172
+ arguments=tc.get("arguments", {}),
173
+ )
174
+ for i, tc in enumerate(response.get("tool_calls", []))
175
+ ]
176
+ return ModelResponse(
177
+ content=response.get("content", ""),
178
+ tool_calls=tool_calls,
179
+ usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30),
180
+ )
181
+ return ModelResponse(
182
+ content=response,
183
+ usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30),
184
+ )
185
+
186
+ # Default response
187
+ default = "I understand. How can I help you?"
188
+ if tools and last_message:
189
+ # If tools available and user asks something, suggest using a tool
190
+ tool_names = [t.name for t in tools]
191
+ default = f"I can help with that. Available tools: {', '.join(tool_names)}."
192
+
193
+ return ModelResponse(
194
+ content=default,
195
+ usage=Usage(prompt_tokens=10, completion_tokens=20, total_tokens=30),
196
+ )