mbxai 0.5.11__tar.gz → 0.5.13__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. {mbxai-0.5.11 → mbxai-0.5.13}/PKG-INFO +1 -1
  2. {mbxai-0.5.11 → mbxai-0.5.13}/pyproject.toml +1 -1
  3. {mbxai-0.5.11 → mbxai-0.5.13}/setup.py +1 -1
  4. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/__init__.py +1 -1
  5. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/mcp/server.py +1 -1
  6. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/tools/client.py +152 -108
  7. {mbxai-0.5.11 → mbxai-0.5.13}/uv.lock +7 -7
  8. {mbxai-0.5.11 → mbxai-0.5.13}/.vscode/PythonImportHelper-v2-Completion.json +0 -0
  9. {mbxai-0.5.11 → mbxai-0.5.13}/LICENSE +0 -0
  10. {mbxai-0.5.11 → mbxai-0.5.13}/README.md +0 -0
  11. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/core.py +0 -0
  12. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/mcp/__init__.py +0 -0
  13. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/mcp/client.py +0 -0
  14. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/mcp/example.py +0 -0
  15. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/openrouter/__init__.py +0 -0
  16. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/openrouter/client.py +0 -0
  17. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/openrouter/config.py +0 -0
  18. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/openrouter/models.py +0 -0
  19. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/tools/__init__.py +0 -0
  20. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/tools/example.py +0 -0
  21. {mbxai-0.5.11 → mbxai-0.5.13}/src/mbxai/tools/types.py +0 -0
  22. {mbxai-0.5.11 → mbxai-0.5.13}/tests/test_core.py +0 -0
  23. {mbxai-0.5.11 → mbxai-0.5.13}/tests/test_mcp.py +0 -0
  24. {mbxai-0.5.11 → mbxai-0.5.13}/tests/test_openrouter.py +0 -0
  25. {mbxai-0.5.11 → mbxai-0.5.13}/tests/test_tools.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mbxai
3
- Version: 0.5.11
3
+ Version: 0.5.13
4
4
  Summary: MBX AI SDK
5
5
  Project-URL: Homepage, https://www.mibexx.de
6
6
  Project-URL: Documentation, https://www.mibexx.de
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "mbxai"
7
- version = "0.5.11"
7
+ version = "0.5.13"
8
8
  authors = [
9
9
  { name = "MBX AI" }
10
10
  ]
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="mbxai",
5
- version="0.5.11",
5
+ version="0.5.13",
6
6
  author="MBX AI",
7
7
  description="MBX AI SDK",
8
8
  long_description=open("README.md").read(),
@@ -2,4 +2,4 @@
2
2
  MBX AI package.
3
3
  """
4
4
 
5
- __version__ = "0.5.11"
5
+ __version__ = "0.5.13"
@@ -31,7 +31,7 @@ class MCPServer:
31
31
  self.app = FastAPI(
32
32
  title=self.name,
33
33
  description=self.description,
34
- version="0.5.11",
34
+ version="0.5.13",
35
35
  )
36
36
 
37
37
  # Initialize MCP server
@@ -48,7 +48,57 @@ class ToolClient:
48
48
  schema=schema,
49
49
  )
50
50
  self._tools[name] = tool
51
- logger.debug(f"Registered tool: {name}")
51
+ logger.info(f"Registered tool: {name}")
52
+
53
+ def _truncate_content(self, content: str | None, max_length: int = 100) -> str:
54
+ """Truncate content for logging."""
55
+ if not content:
56
+ return "None"
57
+ if len(content) <= max_length:
58
+ return content
59
+ return content[:max_length] + "..."
60
+
61
+ def _log_messages(self, messages: list[dict[str, Any]]) -> None:
62
+ """Log the messages being sent to OpenRouter."""
63
+ logger.info("Sending messages to OpenRouter:")
64
+ for msg in messages:
65
+ role = msg.get("role", "unknown")
66
+ content = self._truncate_content(msg.get("content"))
67
+ tool_calls = msg.get("tool_calls", [])
68
+ tool_call_id = msg.get("tool_call_id")
69
+
70
+ if tool_calls:
71
+ tool_call_info = [
72
+ f"{tc['function']['name']}(id={tc['id']})"
73
+ for tc in tool_calls
74
+ ]
75
+ logger.info(f" {role}: content='{content}', tool_calls={tool_call_info}")
76
+ elif tool_call_id:
77
+ logger.info(f" {role}: content='{content}', tool_call_id={tool_call_id}")
78
+ else:
79
+ logger.info(f" {role}: content='{content}'")
80
+
81
+ # Validate tool call responses
82
+ tool_call_ids = set()
83
+ tool_response_ids = set()
84
+
85
+ for msg in messages:
86
+ if msg.get("role") == "assistant" and "tool_calls" in msg:
87
+ for tc in msg["tool_calls"]:
88
+ tool_call_ids.add(tc["id"])
89
+ elif msg.get("role") == "tool":
90
+ tool_response_ids.add(msg["tool_call_id"])
91
+
92
+ missing_responses = tool_call_ids - tool_response_ids
93
+ if missing_responses:
94
+ logger.error(f"Missing tool responses for call IDs: {missing_responses}")
95
+ logger.error("Message sequence:")
96
+ for msg in messages:
97
+ role = msg.get("role", "unknown")
98
+ if role == "assistant" and "tool_calls" in msg:
99
+ logger.error(f" Assistant message with tool calls: {[tc['id'] for tc in msg['tool_calls']]}")
100
+ elif role == "tool":
101
+ logger.error(f" Tool response for call ID: {msg['tool_call_id']}")
52
102
 
53
103
  async def chat(
54
104
  self,
@@ -72,11 +122,14 @@ class ToolClient:
72
122
  tools = [tool.to_openai_function() for tool in self._tools.values()]
73
123
 
74
124
  if tools:
75
- logger.debug(f"Using tools: {tools}")
125
+ logger.info(f"Available tools: {[tool['function']['name'] for tool in tools]}")
76
126
  kwargs["tools"] = tools
77
127
  kwargs["tool_choice"] = "auto"
78
128
 
79
129
  while True:
130
+ # Log messages before sending to OpenRouter
131
+ self._log_messages(messages)
132
+
80
133
  # Get the model's response
81
134
  response = self._client.chat_completion(
82
135
  messages=messages,
@@ -107,92 +160,86 @@ class ToolClient:
107
160
  for tool_call in message.tool_calls
108
161
  ]
109
162
  messages.append(assistant_message)
110
- logger.debug(f"Added assistant message: {assistant_message}")
163
+ logger.info(f"Assistant message: content='{self._truncate_content(message.content)}', tool_calls={[tc.function.name for tc in message.tool_calls] if message.tool_calls else None}")
111
164
 
112
165
  # If there are no tool calls, we're done
113
166
  if not message.tool_calls:
114
167
  return response
115
168
 
116
- # Handle each tool call
169
+ # Handle all tool calls before getting the next model response
170
+ tool_responses = []
117
171
  for tool_call in message.tool_calls:
118
- logger.debug(f"Processing tool call: {tool_call}")
119
- logger.debug(f"Tool call ID: {tool_call.id}")
120
- logger.debug(f"Tool call function: {tool_call.function}")
121
- logger.debug(f"Tool call arguments: {tool_call.function.arguments}")
122
-
123
172
  tool = self._tools.get(tool_call.function.name)
124
173
  if not tool:
125
174
  raise ValueError(f"Unknown tool: {tool_call.function.name}")
126
175
 
127
176
  # Parse arguments if they're a string
128
177
  arguments = tool_call.function.arguments
129
- logger.debug(f"Raw arguments type: {type(arguments)}")
130
- logger.debug(f"Raw arguments: {arguments}")
131
-
132
178
  if isinstance(arguments, str):
133
179
  try:
134
180
  arguments = json.loads(arguments)
135
- logger.debug(f"Parsed arguments: {arguments}")
136
181
  except json.JSONDecodeError as e:
137
182
  logger.error(f"Failed to parse tool arguments: {e}")
138
183
  raise ValueError(f"Invalid tool arguments format: {arguments}")
139
184
 
140
185
  # Call the tool
141
- logger.debug(f"Calling tool {tool.name} with arguments: {arguments}")
186
+ logger.info(f"Calling tool: {tool.name} with args: {self._truncate_content(json.dumps(arguments))}")
142
187
  if inspect.iscoroutinefunction(tool.function):
143
188
  result = await tool.function(**arguments)
144
189
  else:
145
190
  result = tool.function(**arguments)
146
- logger.debug(f"Tool result: {result}")
147
191
 
148
192
  # Convert result to JSON string if it's not already
149
193
  if not isinstance(result, str):
150
194
  result = json.dumps(result)
151
195
 
152
- # Add the tool response to the messages
196
+ # Create the tool response
153
197
  tool_response = {
154
198
  "role": "tool",
155
199
  "tool_call_id": tool_call.id,
156
200
  "content": result,
157
201
  }
158
- messages.append(tool_response)
159
- logger.debug(f"Added tool response to messages: {tool_response}")
160
-
161
- # Get a new response from the model with this tool result
162
- response = self._client.chat_completion(
163
- messages=messages,
164
- model=model,
165
- stream=stream,
166
- **kwargs,
167
- )
168
-
169
- if stream:
170
- return response
171
-
172
- message = response.choices[0].message
173
- # Add the assistant's message with tool calls
174
- assistant_message = {
175
- "role": "assistant",
176
- "content": message.content or None, # Ensure content is None if empty
177
- }
178
- if message.tool_calls:
179
- assistant_message["tool_calls"] = [
180
- {
181
- "id": tool_call.id,
182
- "type": "function",
183
- "function": {
184
- "name": tool_call.function.name,
185
- "arguments": tool_call.function.arguments,
186
- },
187
- }
188
- for tool_call in message.tool_calls
189
- ]
190
- messages.append(assistant_message)
191
- logger.debug(f"Added assistant message: {assistant_message}")
192
-
193
- # If there are no more tool calls, we're done
194
- if not message.tool_calls:
195
- return response
202
+ tool_responses.append(tool_response)
203
+ logger.info(f"Tool response for call ID {tool_call.id}: {self._truncate_content(result)}")
204
+
205
+ # Add all tool responses to the messages
206
+ messages.extend(tool_responses)
207
+
208
+ # Get a new response from the model with all tool results
209
+ response = self._client.chat_completion(
210
+ messages=messages,
211
+ model=model,
212
+ stream=stream,
213
+ **kwargs,
214
+ )
215
+
216
+ if stream:
217
+ return response
218
+
219
+ message = response.choices[0].message
220
+ # Add the assistant's message with tool calls
221
+ assistant_message = {
222
+ "role": "assistant",
223
+ "content": message.content or None, # Ensure content is None if empty
224
+ }
225
+ if message.tool_calls:
226
+ assistant_message["tool_calls"] = [
227
+ {
228
+ "id": tool_call.id,
229
+ "type": "function",
230
+ "function": {
231
+ "name": tool_call.function.name,
232
+ "arguments": tool_call.function.arguments,
233
+ },
234
+ }
235
+ for tool_call in message.tool_calls
236
+ ]
237
+ messages.append(assistant_message)
238
+ logger.info(f"Assistant message: content='{self._truncate_content(message.content)}', tool_calls={[tc.function.name for tc in message.tool_calls] if message.tool_calls else None}")
239
+
240
+ # If there are no more tool calls, we're done
241
+ if not message.tool_calls:
242
+ return response
196
243
 
197
244
  async def parse(
198
245
  self,
@@ -218,11 +265,14 @@ class ToolClient:
218
265
  tools = [tool.to_openai_function() for tool in self._tools.values()]
219
266
 
220
267
  if tools:
221
- logger.debug(f"Using tools: {tools}")
268
+ logger.info(f"Available tools: {[tool['function']['name'] for tool in tools]}")
222
269
  kwargs["tools"] = tools
223
270
  kwargs["tool_choice"] = "auto"
224
271
 
225
272
  while True:
273
+ # Log messages before sending to OpenRouter
274
+ self._log_messages(messages)
275
+
226
276
  # Get the model's response
227
277
  response = self._client.chat_completion_parse(
228
278
  messages=messages,
@@ -254,90 +304,84 @@ class ToolClient:
254
304
  for tool_call in message.tool_calls
255
305
  ]
256
306
  messages.append(assistant_message)
257
- logger.debug(f"Added assistant message: {assistant_message}")
307
+ logger.info(f"Assistant message: content='{self._truncate_content(message.content)}', tool_calls={[tc.function.name for tc in message.tool_calls] if message.tool_calls else None}")
258
308
 
259
309
  # If there are no tool calls, we're done
260
310
  if not message.tool_calls:
261
311
  return response
262
312
 
263
- # Handle each tool call
313
+ # Handle all tool calls before getting the next model response
314
+ tool_responses = []
264
315
  for tool_call in message.tool_calls:
265
- logger.debug(f"Processing tool call: {tool_call}")
266
- logger.debug(f"Tool call ID: {tool_call.id}")
267
- logger.debug(f"Tool call function: {tool_call.function}")
268
- logger.debug(f"Tool call arguments: {tool_call.function.arguments}")
269
-
270
316
  tool = self._tools.get(tool_call.function.name)
271
317
  if not tool:
272
318
  raise ValueError(f"Unknown tool: {tool_call.function.name}")
273
319
 
274
320
  # Parse arguments if they're a string
275
321
  arguments = tool_call.function.arguments
276
- logger.debug(f"Raw arguments type: {type(arguments)}")
277
- logger.debug(f"Raw arguments: {arguments}")
278
-
279
322
  if isinstance(arguments, str):
280
323
  try:
281
324
  arguments = json.loads(arguments)
282
- logger.debug(f"Parsed arguments: {arguments}")
283
325
  except json.JSONDecodeError as e:
284
326
  logger.error(f"Failed to parse tool arguments: {e}")
285
327
  raise ValueError(f"Invalid tool arguments format: {arguments}")
286
328
 
287
329
  # Call the tool
288
- logger.debug(f"Calling tool {tool.name} with arguments: {arguments}")
330
+ logger.info(f"Calling tool: {tool.name} with args: {self._truncate_content(json.dumps(arguments))}")
289
331
  if inspect.iscoroutinefunction(tool.function):
290
332
  result = await tool.function(**arguments)
291
333
  else:
292
334
  result = tool.function(**arguments)
293
- logger.debug(f"Tool result: {result}")
294
335
 
295
336
  # Convert result to JSON string if it's not already
296
337
  if not isinstance(result, str):
297
338
  result = json.dumps(result)
298
339
 
299
- # Add the tool response to the messages
340
+ # Create the tool response
300
341
  tool_response = {
301
342
  "role": "tool",
302
343
  "tool_call_id": tool_call.id,
303
344
  "content": result,
304
345
  }
305
- messages.append(tool_response)
306
- logger.debug(f"Added tool response to messages: {tool_response}")
307
-
308
- # Get a new response from the model with this tool result
309
- response = self._client.chat_completion_parse(
310
- messages=messages,
311
- response_format=response_format,
312
- model=model,
313
- stream=stream,
314
- **kwargs,
315
- )
316
-
317
- if stream:
318
- return response
319
-
320
- message = response.choices[0].message
321
- # Add the assistant's message with tool calls
322
- assistant_message = {
323
- "role": "assistant",
324
- "content": message.content or None, # Ensure content is None if empty
325
- }
326
- if message.tool_calls:
327
- assistant_message["tool_calls"] = [
328
- {
329
- "id": tool_call.id,
330
- "type": "function",
331
- "function": {
332
- "name": tool_call.function.name,
333
- "arguments": tool_call.function.arguments,
334
- },
335
- }
336
- for tool_call in message.tool_calls
337
- ]
338
- messages.append(assistant_message)
339
- logger.debug(f"Added assistant message: {assistant_message}")
340
-
341
- # If there are no more tool calls, we're done
342
- if not message.tool_calls:
343
- return response
346
+ tool_responses.append(tool_response)
347
+ logger.info(f"Tool response: {self._truncate_content(result)}")
348
+
349
+ # Add all tool responses to the messages
350
+ messages.extend(tool_responses)
351
+
352
+ # Get a new response from the model with all tool results
353
+ response = self._client.chat_completion_parse(
354
+ messages=messages,
355
+ response_format=response_format,
356
+ model=model,
357
+ stream=stream,
358
+ **kwargs,
359
+ )
360
+
361
+ if stream:
362
+ return response
363
+
364
+ message = response.choices[0].message
365
+ # Add the assistant's message with tool calls
366
+ assistant_message = {
367
+ "role": "assistant",
368
+ "content": message.content or None, # Ensure content is None if empty
369
+ }
370
+ if message.tool_calls:
371
+ assistant_message["tool_calls"] = [
372
+ {
373
+ "id": tool_call.id,
374
+ "type": "function",
375
+ "function": {
376
+ "name": tool_call.function.name,
377
+ "arguments": tool_call.function.arguments,
378
+ },
379
+ }
380
+ for tool_call in message.tool_calls
381
+ ]
382
+ messages.append(assistant_message)
383
+ logger.info(f"Assistant message: content='{self._truncate_content(message.content)}', tool_calls={[tc.function.name for tc in message.tool_calls] if message.tool_calls else None}")
384
+
385
+ # If there are no more tool calls, we're done
386
+ if not message.tool_calls:
387
+ return response
@@ -292,11 +292,11 @@ wheels = [
292
292
 
293
293
  [[package]]
294
294
  name = "httpx-sse"
295
- version = "0.5.11"
295
+ version = "0.5.13"
296
296
  source = { registry = "https://pypi.org/simple" }
297
- sdist = { url = "https://files.pythonhosted.org/packages/4c/60/8f4281fa9bbf3c8034fd54c0e7412e66edbab6bc74c4996bd616f8d0406e/httpx-sse-0.5.11.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721", size = 12624 }
297
+ sdist = { url = "https://files.pythonhosted.org/packages/4c/60/8f4281fa9bbf3c8034fd54c0e7412e66edbab6bc74c4996bd616f8d0406e/httpx-sse-0.5.13.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721", size = 12624 }
298
298
  wheels = [
299
- { url = "https://files.pythonhosted.org/packages/e1/9b/a181f281f65d776426002f330c31849b86b31fc9d848db62e16f03ff739f/httpx_sse-0.5.11-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f", size = 7819 },
299
+ { url = "https://files.pythonhosted.org/packages/e1/9b/a181f281f65d776426002f330c31849b86b31fc9d848db62e16f03ff739f/httpx_sse-0.5.13-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f", size = 7819 },
300
300
  ]
301
301
 
302
302
  [[package]]
@@ -446,7 +446,7 @@ wheels = [
446
446
 
447
447
  [[package]]
448
448
  name = "mbxai"
449
- version = "0.5.11"
449
+ version = "0.5.13"
450
450
  source = { editable = "." }
451
451
  dependencies = [
452
452
  { name = "fastapi" },
@@ -980,14 +980,14 @@ wheels = [
980
980
 
981
981
  [[package]]
982
982
  name = "typing-inspection"
983
- version = "0.5.11"
983
+ version = "0.5.13"
984
984
  source = { registry = "https://pypi.org/simple" }
985
985
  dependencies = [
986
986
  { name = "typing-extensions" },
987
987
  ]
988
- sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.5.11.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222 }
988
+ sdist = { url = "https://files.pythonhosted.org/packages/82/5c/e6082df02e215b846b4b8c0b887a64d7d08ffaba30605502639d44c06b82/typing_inspection-0.5.13.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122", size = 76222 }
989
989
  wheels = [
990
- { url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.5.11-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125 },
990
+ { url = "https://files.pythonhosted.org/packages/31/08/aa4fdfb71f7de5176385bd9e90852eaf6b5d622735020ad600f2bab54385/typing_inspection-0.5.13-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f", size = 14125 },
991
991
  ]
992
992
 
993
993
  [[package]]
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes