mbxai 0.6.9__py3-none-any.whl → 0.6.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mbxai/__init__.py +1 -1
- mbxai/mcp/server.py +1 -1
- mbxai/openrouter/client.py +3 -4
- mbxai/tools/client.py +232 -112
- {mbxai-0.6.9.dist-info → mbxai-0.6.11.dist-info}/METADATA +1 -1
- {mbxai-0.6.9.dist-info → mbxai-0.6.11.dist-info}/RECORD +8 -8
- {mbxai-0.6.9.dist-info → mbxai-0.6.11.dist-info}/WHEEL +0 -0
- {mbxai-0.6.9.dist-info → mbxai-0.6.11.dist-info}/licenses/LICENSE +0 -0
mbxai/__init__.py
CHANGED
mbxai/mcp/server.py
CHANGED
mbxai/openrouter/client.py
CHANGED
@@ -187,7 +187,7 @@ class OpenRouterClient:
|
|
187
187
|
self.model = value
|
188
188
|
|
189
189
|
@with_retry()
|
190
|
-
|
190
|
+
def chat_completion(
|
191
191
|
self,
|
192
192
|
messages: list[dict[str, Any]],
|
193
193
|
*,
|
@@ -205,7 +205,6 @@ class OpenRouterClient:
|
|
205
205
|
total_size = sum(len(str(msg)) for msg in messages)
|
206
206
|
logger.info(f"Total message size: {total_size} bytes")
|
207
207
|
|
208
|
-
# OpenAI client's create method is synchronous
|
209
208
|
response = self._client.chat.completions.create(
|
210
209
|
messages=messages,
|
211
210
|
model=model or self.model,
|
@@ -240,7 +239,7 @@ class OpenRouterClient:
|
|
240
239
|
self._handle_api_error("chat completion", e)
|
241
240
|
|
242
241
|
@with_retry()
|
243
|
-
|
242
|
+
def chat_completion_parse(
|
244
243
|
self,
|
245
244
|
messages: list[dict[str, Any]],
|
246
245
|
response_format: type[BaseModel],
|
@@ -281,7 +280,7 @@ class OpenRouterClient:
|
|
281
280
|
format_desc = f"Respond with valid JSON matching this Pydantic model: {response_format.__name__}"
|
282
281
|
last_user_msg["content"] = f"{format_desc}\n\n{last_user_msg['content']}"
|
283
282
|
|
284
|
-
response =
|
283
|
+
response = self.chat_completion(
|
285
284
|
messages,
|
286
285
|
model=model,
|
287
286
|
stream=stream,
|
mbxai/tools/client.py
CHANGED
@@ -16,90 +16,40 @@ logger = logging.getLogger(__name__)
|
|
16
16
|
T = TypeVar("T", bound=BaseModel)
|
17
17
|
|
18
18
|
class ToolClient:
|
19
|
-
"""
|
19
|
+
"""Client for handling tool calls with OpenRouter."""
|
20
20
|
|
21
|
-
def __init__(self, openrouter_client: OpenRouterClient):
|
22
|
-
"""Initialize the
|
23
|
-
self._openrouter_client = openrouter_client
|
24
|
-
self._tools: dict[str, Tool] = {}
|
21
|
+
def __init__(self, openrouter_client: OpenRouterClient) -> None:
|
22
|
+
"""Initialize the ToolClient.
|
25
23
|
|
26
|
-
|
27
|
-
|
28
|
-
|
24
|
+
Args:
|
25
|
+
openrouter_client: The OpenRouter client to use
|
26
|
+
"""
|
27
|
+
self._client = openrouter_client
|
28
|
+
self._tools: dict[str, Tool] = {}
|
29
29
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
return await tool.function(**kwargs)
|
30
|
+
def register_tool(
|
31
|
+
self,
|
32
|
+
name: str,
|
33
|
+
description: str,
|
34
|
+
function: Callable[..., Any],
|
35
|
+
schema: dict[str, Any],
|
36
|
+
) -> None:
|
37
|
+
"""Register a new tool.
|
40
38
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
39
|
+
Args:
|
40
|
+
name: The name of the tool
|
41
|
+
description: A description of what the tool does
|
42
|
+
function: The function to call when the tool is used
|
43
|
+
schema: The JSON schema for the tool's parameters
|
44
|
+
"""
|
45
|
+
tool = Tool(
|
46
|
+
name=name,
|
47
|
+
description=description,
|
48
|
+
function=function,
|
49
|
+
schema=schema,
|
51
50
|
)
|
52
|
-
|
53
|
-
|
54
|
-
if not response:
|
55
|
-
raise ValueError("No response received from OpenRouter")
|
56
|
-
|
57
|
-
if not response.choices:
|
58
|
-
raise ValueError("Response missing choices")
|
59
|
-
|
60
|
-
choice = response.choices[0]
|
61
|
-
if not choice:
|
62
|
-
raise ValueError("Empty choice in response")
|
63
|
-
|
64
|
-
message = choice.message
|
65
|
-
if not message:
|
66
|
-
raise ValueError("Choice missing message")
|
67
|
-
|
68
|
-
# If message has function call, execute it
|
69
|
-
if message.function_call:
|
70
|
-
tool_name = message.function_call.name
|
71
|
-
tool_args = json.loads(message.function_call.arguments)
|
72
|
-
|
73
|
-
# Invoke the tool
|
74
|
-
tool_response = await self.invoke_tool(tool_name, **tool_args)
|
75
|
-
|
76
|
-
# Add tool response to messages
|
77
|
-
messages.append({
|
78
|
-
"role": "assistant",
|
79
|
-
"content": None,
|
80
|
-
"function_call": {
|
81
|
-
"name": tool_name,
|
82
|
-
"arguments": message.function_call.arguments,
|
83
|
-
},
|
84
|
-
})
|
85
|
-
messages.append({
|
86
|
-
"role": "function",
|
87
|
-
"name": tool_name,
|
88
|
-
"content": json.dumps(tool_response),
|
89
|
-
})
|
90
|
-
|
91
|
-
# Get final response
|
92
|
-
final_response = await self._openrouter_client.chat_completion(
|
93
|
-
messages=messages,
|
94
|
-
model=model,
|
95
|
-
)
|
96
|
-
|
97
|
-
if not final_response or not final_response.choices:
|
98
|
-
raise ValueError("No response received after tool execution")
|
99
|
-
|
100
|
-
return final_response
|
101
|
-
|
102
|
-
return response
|
51
|
+
self._tools[name] = tool
|
52
|
+
logger.info(f"Registered tool: {name}")
|
103
53
|
|
104
54
|
def _truncate_content(self, content: str | None, max_length: int = 100) -> str:
|
105
55
|
"""Truncate content for logging."""
|
@@ -246,12 +196,123 @@ class ToolClient:
|
|
246
196
|
# Log the messages we're about to send
|
247
197
|
self._log_messages(messages, validate_responses=False)
|
248
198
|
|
199
|
+
async def chat(
|
200
|
+
self,
|
201
|
+
messages: list[dict[str, Any]],
|
202
|
+
*,
|
203
|
+
model: str | None = None,
|
204
|
+
stream: bool = False,
|
205
|
+
**kwargs: Any,
|
206
|
+
) -> Any:
|
207
|
+
"""Chat with the model, handling tool calls."""
|
208
|
+
tools = [tool.to_openai_function() for tool in self._tools.values()]
|
209
|
+
|
210
|
+
if tools:
|
211
|
+
logger.info(f"Available tools: {[tool['function']['name'] for tool in tools]}")
|
212
|
+
kwargs["tools"] = tools
|
213
|
+
kwargs["tool_choice"] = "auto"
|
214
|
+
|
215
|
+
while True:
|
216
|
+
# Get the model's response
|
217
|
+
response = self._client.chat_completion(
|
218
|
+
messages=messages,
|
219
|
+
model=model,
|
220
|
+
stream=stream,
|
221
|
+
**kwargs,
|
222
|
+
)
|
223
|
+
|
224
|
+
if stream:
|
225
|
+
return response
|
226
|
+
|
227
|
+
message = response.choices[0].message
|
228
|
+
# Add the assistant's message with tool calls
|
229
|
+
assistant_message = {
|
230
|
+
"role": "assistant",
|
231
|
+
"content": message.content or None, # Ensure content is None if empty
|
232
|
+
}
|
233
|
+
if message.tool_calls:
|
234
|
+
assistant_message["tool_calls"] = [
|
235
|
+
{
|
236
|
+
"id": tool_call.id,
|
237
|
+
"type": "function",
|
238
|
+
"function": {
|
239
|
+
"name": tool_call.function.name,
|
240
|
+
"arguments": tool_call.function.arguments,
|
241
|
+
},
|
242
|
+
}
|
243
|
+
for tool_call in message.tool_calls
|
244
|
+
]
|
245
|
+
messages.append(assistant_message)
|
246
|
+
logger.info(f"Message count: {len(messages)}, Added assistant message with tool calls: {[tc.function.name for tc in message.tool_calls] if message.tool_calls else None}")
|
247
|
+
|
248
|
+
# If there are no tool calls, we're done
|
249
|
+
if not message.tool_calls:
|
250
|
+
return response
|
251
|
+
|
252
|
+
# Process all tool calls
|
253
|
+
tool_responses = []
|
254
|
+
for tool_call in message.tool_calls:
|
255
|
+
tool = self._tools.get(tool_call.function.name)
|
256
|
+
if not tool:
|
257
|
+
raise ValueError(f"Unknown tool: {tool_call.function.name}")
|
258
|
+
|
259
|
+
# Parse arguments if they're a string
|
260
|
+
arguments = tool_call.function.arguments
|
261
|
+
if isinstance(arguments, str):
|
262
|
+
try:
|
263
|
+
arguments = json.loads(arguments)
|
264
|
+
except json.JSONDecodeError as e:
|
265
|
+
logger.error(f"Failed to parse tool arguments: {e}")
|
266
|
+
raise ValueError(f"Invalid tool arguments format: {arguments}")
|
267
|
+
|
268
|
+
# Call the tool
|
269
|
+
logger.info(f"Calling tool: {tool.name} with args: {self._truncate_dict(arguments)}")
|
270
|
+
try:
|
271
|
+
if inspect.iscoroutinefunction(tool.function):
|
272
|
+
result = await asyncio.wait_for(tool.function(**arguments), timeout=300.0) # 5 minutes timeout
|
273
|
+
else:
|
274
|
+
result = tool.function(**arguments)
|
275
|
+
logger.info(f"Tool {tool.name} completed successfully")
|
276
|
+
except asyncio.TimeoutError:
|
277
|
+
logger.error(f"Tool {tool.name} timed out after 5 minutes")
|
278
|
+
result = {"error": "Tool execution timed out after 5 minutes"}
|
279
|
+
except Exception as e:
|
280
|
+
logger.error(f"Error calling tool {tool.name}: {str(e)}")
|
281
|
+
result = {"error": f"Tool execution failed: {str(e)}"}
|
282
|
+
|
283
|
+
# Convert result to JSON string if it's not already
|
284
|
+
if not isinstance(result, str):
|
285
|
+
result = json.dumps(result)
|
286
|
+
|
287
|
+
# Create the tool response
|
288
|
+
tool_response = {
|
289
|
+
"role": "tool",
|
290
|
+
"tool_call_id": tool_call.id,
|
291
|
+
"content": result,
|
292
|
+
}
|
293
|
+
tool_responses.append(tool_response)
|
294
|
+
logger.info(f"Created tool response for call ID {tool_call.id}")
|
295
|
+
|
296
|
+
# Add all tool responses to the messages
|
297
|
+
messages.extend(tool_responses)
|
298
|
+
logger.info(f"Message count: {len(messages)}, Added {len(tool_responses)} tool responses to messages")
|
299
|
+
|
300
|
+
# Validate the message sequence
|
301
|
+
self._validate_message_sequence(messages, validate_responses=True)
|
302
|
+
|
303
|
+
# Log the messages we're about to send
|
304
|
+
self._log_messages(messages, validate_responses=False)
|
305
|
+
|
306
|
+
# Continue the loop to get the next response
|
307
|
+
continue
|
308
|
+
|
249
309
|
async def parse(
|
250
310
|
self,
|
251
|
-
messages: list[dict[str,
|
311
|
+
messages: list[dict[str, Any]],
|
252
312
|
response_format: type[T],
|
253
313
|
*,
|
254
314
|
model: str | None = None,
|
315
|
+
stream: bool = False,
|
255
316
|
**kwargs: Any,
|
256
317
|
) -> Any:
|
257
318
|
"""Chat with the model and parse the response into a Pydantic model.
|
@@ -260,46 +321,105 @@ class ToolClient:
|
|
260
321
|
messages: The conversation messages
|
261
322
|
response_format: The Pydantic model to parse the response into
|
262
323
|
model: Optional model override
|
324
|
+
stream: Whether to stream the response
|
263
325
|
**kwargs: Additional parameters for the chat completion
|
264
326
|
|
265
327
|
Returns:
|
266
328
|
The parsed response from the model
|
267
329
|
"""
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
choice = response.choices[0]
|
279
|
-
if not choice:
|
280
|
-
raise ValueError("Empty choice in response")
|
330
|
+
tools = [tool.to_openai_function() for tool in self._tools.values()]
|
331
|
+
|
332
|
+
if tools:
|
333
|
+
logger.info(f"Available tools: {[tool['function']['name'] for tool in tools]}")
|
334
|
+
kwargs["tools"] = tools
|
335
|
+
kwargs["tool_choice"] = "auto"
|
336
|
+
|
337
|
+
while True:
|
338
|
+
# Log messages before sending to OpenRouter
|
339
|
+
self._log_messages(messages)
|
281
340
|
|
282
|
-
|
283
|
-
|
284
|
-
raise ValueError("Choice missing message")
|
285
|
-
|
286
|
-
# If we still have tool calls, something went wrong
|
287
|
-
if message.tool_calls:
|
288
|
-
raise ValueError("Unexpected tool calls in final response")
|
289
|
-
|
290
|
-
# Ensure we have content to parse
|
291
|
-
if not message.content:
|
292
|
-
raise ValueError("No content in final response to parse")
|
293
|
-
|
294
|
-
# Now that we have the final response, parse it into the desired format
|
295
|
-
try:
|
296
|
-
final_response = await self._openrouter_client.chat_completion_parse(
|
341
|
+
# Get the model's response
|
342
|
+
response = self._client.chat_completion_parse(
|
297
343
|
messages=messages,
|
298
344
|
response_format=response_format,
|
299
345
|
model=model,
|
346
|
+
stream=stream,
|
300
347
|
**kwargs,
|
301
348
|
)
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
349
|
+
|
350
|
+
if stream:
|
351
|
+
return response
|
352
|
+
|
353
|
+
message = response.choices[0].message
|
354
|
+
# Add the assistant's message with tool calls
|
355
|
+
assistant_message = {
|
356
|
+
"role": "assistant",
|
357
|
+
"content": message.content or None, # Ensure content is None if empty
|
358
|
+
}
|
359
|
+
if message.tool_calls:
|
360
|
+
assistant_message["tool_calls"] = [
|
361
|
+
{
|
362
|
+
"id": tool_call.id,
|
363
|
+
"type": "function",
|
364
|
+
"function": {
|
365
|
+
"name": tool_call.function.name,
|
366
|
+
"arguments": tool_call.function.arguments,
|
367
|
+
},
|
368
|
+
}
|
369
|
+
for tool_call in message.tool_calls
|
370
|
+
]
|
371
|
+
messages.append(assistant_message)
|
372
|
+
logger.info(f"Message count: {len(messages)}, Added assistant message with tool calls: {[tc.function.name for tc in message.tool_calls] if message.tool_calls else None}")
|
373
|
+
|
374
|
+
# If there are no tool calls, we're done
|
375
|
+
if not message.tool_calls:
|
376
|
+
return response
|
377
|
+
|
378
|
+
# Process all tool calls
|
379
|
+
tool_responses = []
|
380
|
+
for tool_call in message.tool_calls:
|
381
|
+
tool = self._tools.get(tool_call.function.name)
|
382
|
+
if not tool:
|
383
|
+
raise ValueError(f"Unknown tool: {tool_call.function.name}")
|
384
|
+
|
385
|
+
# Parse arguments if they're a string
|
386
|
+
arguments = tool_call.function.arguments
|
387
|
+
if isinstance(arguments, str):
|
388
|
+
try:
|
389
|
+
arguments = json.loads(arguments)
|
390
|
+
except json.JSONDecodeError as e:
|
391
|
+
logger.error(f"Failed to parse tool arguments: {e}")
|
392
|
+
raise ValueError(f"Invalid tool arguments format: {arguments}")
|
393
|
+
|
394
|
+
# Call the tool
|
395
|
+
logger.info(f"Calling tool: {tool.name} with args: {self._truncate_dict(arguments)}")
|
396
|
+
if inspect.iscoroutinefunction(tool.function):
|
397
|
+
result = await tool.function(**arguments)
|
398
|
+
else:
|
399
|
+
result = tool.function(**arguments)
|
400
|
+
|
401
|
+
# Convert result to JSON string if it's not already
|
402
|
+
if not isinstance(result, str):
|
403
|
+
result = json.dumps(result)
|
404
|
+
|
405
|
+
# Create the tool response
|
406
|
+
tool_response = {
|
407
|
+
"role": "tool",
|
408
|
+
"tool_call_id": tool_call.id,
|
409
|
+
"content": result,
|
410
|
+
}
|
411
|
+
tool_responses.append(tool_response)
|
412
|
+
logger.info(f"Created tool response for call ID {tool_call.id}")
|
413
|
+
|
414
|
+
# Add all tool responses to the messages
|
415
|
+
messages.extend(tool_responses)
|
416
|
+
logger.info(f"Message count: {len(messages)}, Added {len(tool_responses)} tool responses to messages")
|
417
|
+
|
418
|
+
# Validate the message sequence
|
419
|
+
self._validate_message_sequence(messages, validate_responses=True)
|
420
|
+
|
421
|
+
# Log the messages we're about to send
|
422
|
+
self._log_messages(messages, validate_responses=False)
|
423
|
+
|
424
|
+
# Continue the loop to get the next response
|
425
|
+
continue
|
@@ -1,18 +1,18 @@
|
|
1
|
-
mbxai/__init__.py,sha256=
|
1
|
+
mbxai/__init__.py,sha256=TPQPmobkBuhgo14NoMCbpjcR4Lgi3T-hc27EY0cDggk,48
|
2
2
|
mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
|
3
3
|
mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
|
4
4
|
mbxai/mcp/client.py,sha256=B8ZpH-uecmTCgoDw65LwwVxsFWVoX-08t5ff0hOEPXk,6011
|
5
5
|
mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
|
6
|
-
mbxai/mcp/server.py,sha256=
|
6
|
+
mbxai/mcp/server.py,sha256=T0-Y7FeHRFqSTp2ERU96fOQlQJKjMFxg8oqC4dzBmBA,3463
|
7
7
|
mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
|
8
|
-
mbxai/openrouter/client.py,sha256=
|
8
|
+
mbxai/openrouter/client.py,sha256=RO5tbF42vkcjxjvC-QFB8DGA0gQLljH3KPBn3HgZV8I,13662
|
9
9
|
mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
|
10
10
|
mbxai/openrouter/models.py,sha256=b3IjjtZAjeGOf2rLsdnCD1HacjTnS8jmv_ZXorc-KJQ,2604
|
11
11
|
mbxai/tools/__init__.py,sha256=QUFaXhDm-UKcuAtT1rbKzhBkvyRBVokcQIOf9cxIuwc,160
|
12
|
-
mbxai/tools/client.py,sha256=
|
12
|
+
mbxai/tools/client.py,sha256=t7rdITqgCbDXQPFOZhGj6VDDPAwqdilJMKPfCOcJaFo,17279
|
13
13
|
mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
|
14
14
|
mbxai/tools/types.py,sha256=fo5t9UbsHGynhA88vD_ecgDqL8iLvt2E1h1ym43Rrgk,745
|
15
|
-
mbxai-0.6.
|
16
|
-
mbxai-0.6.
|
17
|
-
mbxai-0.6.
|
18
|
-
mbxai-0.6.
|
15
|
+
mbxai-0.6.11.dist-info/METADATA,sha256=TtJhH4DpDQqvu0nRx1-M6U4XRkXt51PBWzxbJtDV-g0,4108
|
16
|
+
mbxai-0.6.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
17
|
+
mbxai-0.6.11.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
|
18
|
+
mbxai-0.6.11.dist-info/RECORD,,
|
File without changes
|
File without changes
|