mbxai 2.1.2__py3-none-any.whl → 2.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mbxai/__init__.py +1 -1
- mbxai/agent/client.py +35 -3
- mbxai/mcp/server.py +1 -1
- mbxai/openrouter/client.py +1 -83
- mbxai-2.2.0.dist-info/METADATA +492 -0
- {mbxai-2.1.2.dist-info → mbxai-2.2.0.dist-info}/RECORD +8 -8
- mbxai-2.1.2.dist-info/METADATA +0 -346
- {mbxai-2.1.2.dist-info → mbxai-2.2.0.dist-info}/WHEEL +0 -0
- {mbxai-2.1.2.dist-info → mbxai-2.2.0.dist-info}/licenses/LICENSE +0 -0
mbxai/__init__.py
CHANGED
mbxai/agent/client.py
CHANGED
@@ -134,6 +134,35 @@ class AgentClient:
|
|
134
134
|
logger.debug(f"🔗 AI call with {len(messages)} messages (no history)")
|
135
135
|
return self._ai_client.parse(full_messages, response_format)
|
136
136
|
|
137
|
+
def _validate_answers(self, answers: Any) -> bool:
|
138
|
+
"""
|
139
|
+
Validate that answers parameter is a proper AnswerList with content.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
answers: The answers parameter to validate
|
143
|
+
|
144
|
+
Returns:
|
145
|
+
True if answers is valid and has content, False otherwise
|
146
|
+
"""
|
147
|
+
# Check if answers is the correct type
|
148
|
+
if not isinstance(answers, AnswerList):
|
149
|
+
logger.warning(f"Invalid answers type: {type(answers)}. Expected AnswerList, treating as no answers.")
|
150
|
+
return False
|
151
|
+
|
152
|
+
# Check if answers has content
|
153
|
+
if not hasattr(answers, 'answers') or not answers.answers:
|
154
|
+
logger.info(f"Empty answers list provided, proceeding without answers processing.")
|
155
|
+
return False
|
156
|
+
|
157
|
+
# Check if answers list contains valid Answer objects
|
158
|
+
for answer in answers.answers:
|
159
|
+
if not hasattr(answer, 'key') or not hasattr(answer, 'answer'):
|
160
|
+
logger.warning(f"Invalid answer object in list: {answer}. Treating as no answers.")
|
161
|
+
return False
|
162
|
+
|
163
|
+
logger.debug(f"Validated {len(answers.answers)} answers")
|
164
|
+
return True
|
165
|
+
|
137
166
|
def _extract_token_usage(self, response: Any) -> TokenUsage:
|
138
167
|
"""Extract token usage information from an AI response."""
|
139
168
|
try:
|
@@ -188,7 +217,7 @@ class AgentClient:
|
|
188
217
|
final_response_structure: Type[BaseModel] = None,
|
189
218
|
ask_questions: bool = True,
|
190
219
|
agent_id: str = None,
|
191
|
-
answers: AnswerList = None
|
220
|
+
answers: AnswerList | None = None
|
192
221
|
) -> AgentResponse:
|
193
222
|
"""
|
194
223
|
Process a prompt through the agent's thinking process.
|
@@ -252,8 +281,11 @@ class AgentClient:
|
|
252
281
|
|
253
282
|
# Handle answers provided (skip question generation and process directly)
|
254
283
|
if answers is not None:
|
255
|
-
|
256
|
-
|
284
|
+
if self._validate_answers(answers):
|
285
|
+
logger.info(f"📝 Agent {agent_id}: Processing with provided answers, skipping question generation")
|
286
|
+
return self._process_answers_directly(agent_id, prompt, final_response_structure, answers, token_summary, history_for_ai)
|
287
|
+
else:
|
288
|
+
logger.info(f"📝 Agent {agent_id}: Invalid or empty answers provided, proceeding with normal flow")
|
257
289
|
|
258
290
|
# Step 1: Generate questions (if ask_questions is True)
|
259
291
|
if ask_questions:
|
mbxai/mcp/server.py
CHANGED
mbxai/openrouter/client.py
CHANGED
@@ -4,7 +4,7 @@ OpenRouter client implementation.
|
|
4
4
|
|
5
5
|
from typing import Any, Optional, Union, Type
|
6
6
|
from openai import OpenAI, OpenAIError, RateLimitError, APITimeoutError, APIConnectionError, BadRequestError, AuthenticationError
|
7
|
-
|
7
|
+
|
8
8
|
from .models import OpenRouterModel, OpenRouterModelRegistry
|
9
9
|
from .config import OpenRouterConfig
|
10
10
|
from .schema import format_response
|
@@ -350,88 +350,6 @@ class OpenRouterClient:
|
|
350
350
|
logger.error("Could not read response content")
|
351
351
|
self._handle_api_error("parse completion", e)
|
352
352
|
|
353
|
-
@with_retry()
|
354
|
-
def create_parsed(
|
355
|
-
self,
|
356
|
-
messages: list[dict[str, Any]],
|
357
|
-
response_format: Type[BaseModel],
|
358
|
-
*,
|
359
|
-
model: str | None = None,
|
360
|
-
stream: bool = False,
|
361
|
-
**kwargs: Any,
|
362
|
-
) -> Any:
|
363
|
-
"""Get a chat completion from OpenRouter with structured output.
|
364
|
-
|
365
|
-
Args:
|
366
|
-
messages: The messages to send to the model
|
367
|
-
response_format: A Pydantic model defining the expected response format
|
368
|
-
model: Optional model override
|
369
|
-
stream: Whether to stream the response
|
370
|
-
**kwargs: Additional arguments to pass to the API
|
371
|
-
|
372
|
-
Returns:
|
373
|
-
The parsed response from the model
|
374
|
-
"""
|
375
|
-
try:
|
376
|
-
# Convert Pydantic model to OpenAI response format
|
377
|
-
response_format_param = type_to_response_format_param(response_format)
|
378
|
-
|
379
|
-
# Log the request details
|
380
|
-
logger.debug(f"Sending parsed chat completion request to OpenRouter with model: {model or self.model}")
|
381
|
-
logger.debug(f"Message count: {len(messages)}")
|
382
|
-
logger.debug(f"Response format: {json.dumps(response_format_param, indent=2)}")
|
383
|
-
|
384
|
-
# Calculate total message size for logging
|
385
|
-
total_size = sum(len(str(msg)) for msg in messages)
|
386
|
-
logger.debug(f"Total message size: {total_size} bytes")
|
387
|
-
|
388
|
-
request = {
|
389
|
-
"model": model or self.model,
|
390
|
-
"messages": messages,
|
391
|
-
"stream": stream,
|
392
|
-
"response_format": response_format_param,
|
393
|
-
**kwargs,
|
394
|
-
}
|
395
|
-
|
396
|
-
response = self._client.chat.completions.create(**request)
|
397
|
-
|
398
|
-
if response is None:
|
399
|
-
logger.error("Received None response from OpenRouter API")
|
400
|
-
raise OpenRouterAPIError("Received None response from OpenRouter API")
|
401
|
-
|
402
|
-
# Validate response structure
|
403
|
-
if not hasattr(response, 'choices'):
|
404
|
-
logger.error(f"Response missing 'choices' attribute. Available attributes: {dir(response)}")
|
405
|
-
raise OpenRouterAPIError("Invalid response format: missing 'choices' attribute")
|
406
|
-
|
407
|
-
if response.choices is None:
|
408
|
-
logger.error("Response choices is None")
|
409
|
-
raise OpenRouterAPIError("Invalid response format: choices is None")
|
410
|
-
|
411
|
-
logger.debug(f"Response type: {type(response)}")
|
412
|
-
logger.debug(f"Response attributes: {dir(response)}")
|
413
|
-
logger.debug(f"Received response from OpenRouter: {len(response.choices)} choices")
|
414
|
-
|
415
|
-
return response
|
416
|
-
|
417
|
-
except Exception as e:
|
418
|
-
stack_trace = traceback.format_exc()
|
419
|
-
logger.error(f"Error in parsed chat completion: {str(e)}")
|
420
|
-
logger.error(f"Stack trace:\n{stack_trace}")
|
421
|
-
logger.error(f"Request details: model={model or self.model}, stream={stream}, kwargs={kwargs}")
|
422
|
-
logger.error(f"Message structure: {[{'role': msg.get('role'), 'content_length': len(str(msg.get('content', '')))} for msg in messages]}")
|
423
|
-
|
424
|
-
if hasattr(e, 'response') and e.response is not None:
|
425
|
-
logger.error(f"Response status: {e.response.status_code}")
|
426
|
-
logger.error(f"Response headers: {e.response.headers}")
|
427
|
-
try:
|
428
|
-
content = e.response.text
|
429
|
-
logger.error(f"Response content length: {len(content)} bytes")
|
430
|
-
logger.error(f"Response content preview: {content[:1000]}...")
|
431
|
-
except:
|
432
|
-
logger.error("Could not read response content")
|
433
|
-
self._handle_api_error("parsed chat completion", e)
|
434
|
-
|
435
353
|
@classmethod
|
436
354
|
def register_model(cls, name: str, value: str) -> None:
|
437
355
|
"""Register a new model.
|
@@ -0,0 +1,492 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: mbxai
|
3
|
+
Version: 2.2.0
|
4
|
+
Summary: MBX AI SDK
|
5
|
+
Project-URL: Homepage, https://www.mibexx.de
|
6
|
+
Project-URL: Documentation, https://www.mibexx.de
|
7
|
+
Project-URL: Repository, https://github.com/yourusername/mbxai.git
|
8
|
+
Author: MBX AI
|
9
|
+
License: MIT
|
10
|
+
License-File: LICENSE
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
12
|
+
Classifier: Operating System :: OS Independent
|
13
|
+
Classifier: Programming Language :: Python
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
15
|
+
Requires-Python: >=3.12
|
16
|
+
Requires-Dist: fastapi>=0.115.12
|
17
|
+
Requires-Dist: httpx>=0.27.0
|
18
|
+
Requires-Dist: mcp>=1.7.1
|
19
|
+
Requires-Dist: openai>=1.77.0
|
20
|
+
Requires-Dist: pydantic-settings>=2.9.1
|
21
|
+
Requires-Dist: pydantic>=2.9.1
|
22
|
+
Requires-Dist: python-multipart>=0.0.20
|
23
|
+
Requires-Dist: sse-starlette>=2.3.4
|
24
|
+
Requires-Dist: starlette>=0.46.2
|
25
|
+
Requires-Dist: typing-inspection<=0.4.0
|
26
|
+
Requires-Dist: uvicorn>=0.34.2
|
27
|
+
Provides-Extra: dev
|
28
|
+
Requires-Dist: black>=24.3.0; extra == 'dev'
|
29
|
+
Requires-Dist: isort>=5.13.2; extra == 'dev'
|
30
|
+
Requires-Dist: mypy>=1.8.0; extra == 'dev'
|
31
|
+
Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
|
32
|
+
Requires-Dist: pytest-cov>=6.1.1; extra == 'dev'
|
33
|
+
Requires-Dist: pytest>=8.3.5; extra == 'dev'
|
34
|
+
Description-Content-Type: text/markdown
|
35
|
+
|
36
|
+
# MBX AI
|
37
|
+
|
38
|
+
A comprehensive Python library for building intelligent AI applications with Large Language Models (LLMs), structured responses, tool integration, and agent-based thinking.
|
39
|
+
|
40
|
+
## 🚀 Features
|
41
|
+
|
42
|
+
- **🔗 Multiple AI Client Types**: OpenRouter integration with tool-enabled and MCP-enabled variants
|
43
|
+
- **🤖 Intelligent Agent System**: Dialog-based thinking with question generation, quality iteration, and conversation memory
|
44
|
+
- **🛠️ Tool Integration**: Easy function registration with automatic schema generation
|
45
|
+
- **🔌 MCP Support**: Full Model Context Protocol (MCP) client and server implementation
|
46
|
+
- **📋 Structured Responses**: Type-safe responses using Pydantic models
|
47
|
+
- **🔄 Quality Iteration**: Built-in response improvement through AI-powered quality checks
|
48
|
+
- **💬 Conversation Memory**: Persistent dialog sessions with history management
|
49
|
+
- **⚡ Automatic Retry**: Built-in retry logic with exponential backoff for robust connections
|
50
|
+
|
51
|
+
## 📦 Installation
|
52
|
+
|
53
|
+
```bash
|
54
|
+
pip install mbxai
|
55
|
+
```
|
56
|
+
|
57
|
+
## 🏗️ Architecture Overview
|
58
|
+
|
59
|
+
MBX AI provides four main client types, each building upon the previous:
|
60
|
+
|
61
|
+
1. **OpenRouterClient** - Basic LLM interactions with structured responses
|
62
|
+
2. **ToolClient** - Adds function calling capabilities
|
63
|
+
3. **MCPClient** - Adds Model Context Protocol server integration
|
64
|
+
4. **AgentClient** - Adds intelligent dialog-based thinking (wraps any of the above)
|
65
|
+
|
66
|
+
| Client | Structured Responses | Function Calling | MCP Integration | Agent Thinking |
|
67
|
+
|--------|---------------------|------------------|-----------------|----------------|
|
68
|
+
| OpenRouterClient | ✅ | ❌ | ❌ | ❌ |
|
69
|
+
| ToolClient | ✅ | ✅ | ❌ | ❌ |
|
70
|
+
| MCPClient | ✅ | ✅ | ✅ | ❌ |
|
71
|
+
| AgentClient | ✅ | ✅* | ✅* | ✅ |
|
72
|
+
|
73
|
+
*AgentClient capabilities depend on the wrapped client
|
74
|
+
|
75
|
+
## 🚀 Quick Start
|
76
|
+
|
77
|
+
### Basic OpenRouter Client
|
78
|
+
|
79
|
+
```python
|
80
|
+
import os
|
81
|
+
from mbxai import OpenRouterClient
|
82
|
+
from pydantic import BaseModel, Field
|
83
|
+
|
84
|
+
# Initialize client
|
85
|
+
client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY"))
|
86
|
+
|
87
|
+
# Simple chat
|
88
|
+
response = client.create([
|
89
|
+
{"role": "user", "content": "What is the capital of France?"}
|
90
|
+
])
|
91
|
+
print(response.choices[0].message.content)
|
92
|
+
|
93
|
+
# Structured response
|
94
|
+
class CityInfo(BaseModel):
|
95
|
+
name: str = Field(description="City name")
|
96
|
+
population: int = Field(description="Population count")
|
97
|
+
country: str = Field(description="Country name")
|
98
|
+
|
99
|
+
response = client.parse(
|
100
|
+
messages=[{"role": "user", "content": "Tell me about Paris"}],
|
101
|
+
response_format=CityInfo
|
102
|
+
)
|
103
|
+
city = response.choices[0].message.parsed
|
104
|
+
print(f"{city.name}, {city.country} - Population: {city.population:,}")
|
105
|
+
```
|
106
|
+
|
107
|
+
### Tool Client with Automatic Schema Generation
|
108
|
+
|
109
|
+
```python
|
110
|
+
import os
|
111
|
+
from mbxai import ToolClient, OpenRouterClient
|
112
|
+
|
113
|
+
# Initialize clients
|
114
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY"))
|
115
|
+
tool_client = ToolClient(openrouter_client)
|
116
|
+
|
117
|
+
# Define a function - schema is auto-generated!
|
118
|
+
def get_weather(location: str, unit: str = "celsius") -> dict:
|
119
|
+
"""Get weather information for a location.
|
120
|
+
|
121
|
+
Args:
|
122
|
+
location: The city or location name
|
123
|
+
unit: Temperature unit (celsius or fahrenheit)
|
124
|
+
"""
|
125
|
+
return {
|
126
|
+
"location": location,
|
127
|
+
"temperature": 22,
|
128
|
+
"unit": unit,
|
129
|
+
"condition": "Sunny"
|
130
|
+
}
|
131
|
+
|
132
|
+
# Register tool (schema automatically generated from function signature)
|
133
|
+
tool_client.register_tool(
|
134
|
+
name="get_weather",
|
135
|
+
description="Get current weather for a location",
|
136
|
+
function=get_weather
|
137
|
+
# No schema needed - automatically generated!
|
138
|
+
)
|
139
|
+
|
140
|
+
# Use the tool
|
141
|
+
response = tool_client.chat([
|
142
|
+
{"role": "user", "content": "What's the weather like in Tokyo?"}
|
143
|
+
])
|
144
|
+
print(response.choices[0].message.content)
|
145
|
+
```
|
146
|
+
|
147
|
+
### MCP Client for Server Integration
|
148
|
+
|
149
|
+
```python
|
150
|
+
import os
|
151
|
+
from mbxai import MCPClient, OpenRouterClient
|
152
|
+
|
153
|
+
# Initialize MCP client
|
154
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY"))
|
155
|
+
mcp_client = MCPClient(openrouter_client)
|
156
|
+
|
157
|
+
# Register MCP server (automatically loads all tools)
|
158
|
+
mcp_client.register_mcp_server("data-analysis", "http://localhost:8000")
|
159
|
+
|
160
|
+
# Chat with MCP tools available
|
161
|
+
response = mcp_client.chat([
|
162
|
+
{"role": "user", "content": "Analyze the sales data from the server"}
|
163
|
+
])
|
164
|
+
print(response.choices[0].message.content)
|
165
|
+
```
|
166
|
+
|
167
|
+
### Agent Client - Intelligent Dialog System
|
168
|
+
|
169
|
+
The AgentClient provides an intelligent thinking process with question generation, quality improvement, and conversation memory.
|
170
|
+
|
171
|
+
```python
|
172
|
+
import os
|
173
|
+
from mbxai import AgentClient, OpenRouterClient
|
174
|
+
from pydantic import BaseModel, Field
|
175
|
+
|
176
|
+
class TravelPlan(BaseModel):
|
177
|
+
destination: str = Field(description="Travel destination")
|
178
|
+
duration: str = Field(description="Trip duration")
|
179
|
+
activities: list[str] = Field(description="Recommended activities")
|
180
|
+
budget: str = Field(description="Estimated budget")
|
181
|
+
|
182
|
+
# Initialize agent
|
183
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY"))
|
184
|
+
agent = AgentClient(openrouter_client, max_iterations=2)
|
185
|
+
|
186
|
+
# Agent with questions (interactive mode)
|
187
|
+
response = agent.agent(
|
188
|
+
prompt="Plan a vacation for me",
|
189
|
+
final_response_structure=TravelPlan,
|
190
|
+
ask_questions=True
|
191
|
+
)
|
192
|
+
|
193
|
+
if response.has_questions():
|
194
|
+
print("Agent Questions:")
|
195
|
+
for q in response.questions:
|
196
|
+
print(f"- {q.question}")
|
197
|
+
|
198
|
+
# Answer questions
|
199
|
+
from mbxai import AnswerList, Answer
|
200
|
+
answers = AnswerList(answers=[
|
201
|
+
Answer(key="destination_preference", answer="Mountain destination"),
|
202
|
+
Answer(key="budget_range", answer="$2000-3000"),
|
203
|
+
Answer(key="duration", answer="5 days")
|
204
|
+
])
|
205
|
+
|
206
|
+
# Continue with answers
|
207
|
+
final_response = agent.agent(
|
208
|
+
prompt="Continue with the travel planning",
|
209
|
+
final_response_structure=TravelPlan,
|
210
|
+
agent_id=response.agent_id,
|
211
|
+
answers=answers
|
212
|
+
)
|
213
|
+
|
214
|
+
plan = final_response.final_response
|
215
|
+
print(f"Destination: {plan.destination}")
|
216
|
+
print(f"Duration: {plan.duration}")
|
217
|
+
else:
|
218
|
+
# Direct response
|
219
|
+
plan = response.final_response
|
220
|
+
print(f"Destination: {plan.destination}")
|
221
|
+
```
|
222
|
+
|
223
|
+
### Agent with Tool Integration
|
224
|
+
|
225
|
+
```python
|
226
|
+
from mbxai import AgentClient, ToolClient, OpenRouterClient
|
227
|
+
|
228
|
+
# Setup tool-enabled agent
|
229
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY"))
|
230
|
+
tool_client = ToolClient(openrouter_client)
|
231
|
+
agent = AgentClient(tool_client)
|
232
|
+
|
233
|
+
# Register tools via agent (proxy method)
|
234
|
+
def search_flights(origin: str, destination: str, date: str) -> dict:
|
235
|
+
"""Search for flights between cities."""
|
236
|
+
return {
|
237
|
+
"flights": [
|
238
|
+
{"airline": "Example Air", "price": "$450", "duration": "3h 15m"}
|
239
|
+
]
|
240
|
+
}
|
241
|
+
|
242
|
+
agent.register_tool(
|
243
|
+
name="search_flights",
|
244
|
+
description="Search for flights between cities",
|
245
|
+
function=search_flights
|
246
|
+
)
|
247
|
+
|
248
|
+
# Agent automatically uses tools when needed
|
249
|
+
class FlightInfo(BaseModel):
|
250
|
+
flights: list[dict] = Field(description="Available flights")
|
251
|
+
recommendation: str = Field(description="Flight recommendation")
|
252
|
+
|
253
|
+
response = agent.agent(
|
254
|
+
prompt="Find flights from New York to Los Angeles for tomorrow",
|
255
|
+
final_response_structure=FlightInfo,
|
256
|
+
ask_questions=False
|
257
|
+
)
|
258
|
+
|
259
|
+
flight_info = response.final_response
|
260
|
+
print(f"Found {len(flight_info.flights)} flights")
|
261
|
+
print(f"Recommendation: {flight_info.recommendation}")
|
262
|
+
```
|
263
|
+
|
264
|
+
## 📚 Detailed Documentation
|
265
|
+
|
266
|
+
### OpenRouterClient
|
267
|
+
|
268
|
+
The base client for OpenRouter API integration with structured response support.
|
269
|
+
|
270
|
+
#### Key Features:
|
271
|
+
- **Multiple Models**: Support for GPT-4, Claude, Llama, and other models via OpenRouter
|
272
|
+
- **Structured Responses**: Type-safe responses using Pydantic models
|
273
|
+
- **Retry Logic**: Automatic retry with exponential backoff
|
274
|
+
- **Error Handling**: Comprehensive error handling with detailed logging
|
275
|
+
|
276
|
+
#### Methods:
|
277
|
+
- `create()` - Basic chat completion
|
278
|
+
- `parse()` - Chat completion with structured response
|
279
|
+
|
280
|
+
#### Configuration:
|
281
|
+
```python
|
282
|
+
client = OpenRouterClient(
|
283
|
+
token="your-api-key",
|
284
|
+
model="openai/gpt-4-turbo", # or use OpenRouterModel enum
|
285
|
+
max_retries=3,
|
286
|
+
retry_initial_delay=1.0,
|
287
|
+
retry_max_delay=10.0
|
288
|
+
)
|
289
|
+
```
|
290
|
+
|
291
|
+
### ToolClient
|
292
|
+
|
293
|
+
Extends OpenRouterClient with function calling capabilities.
|
294
|
+
|
295
|
+
#### Key Features:
|
296
|
+
- **Automatic Schema Generation**: Generate JSON schemas from Python function signatures
|
297
|
+
- **Tool Registration**: Simple function registration
|
298
|
+
- **Tool Execution**: Automatic tool calling and response handling
|
299
|
+
- **Error Recovery**: Graceful handling of tool execution errors
|
300
|
+
|
301
|
+
#### Usage:
|
302
|
+
```python
|
303
|
+
tool_client = ToolClient(openrouter_client)
|
304
|
+
|
305
|
+
# Register with automatic schema
|
306
|
+
tool_client.register_tool("function_name", "description", function)
|
307
|
+
|
308
|
+
# Register with custom schema
|
309
|
+
tool_client.register_tool("function_name", "description", function, custom_schema)
|
310
|
+
```
|
311
|
+
|
312
|
+
### MCPClient
|
313
|
+
|
314
|
+
Extends ToolClient with Model Context Protocol (MCP) server integration.
|
315
|
+
|
316
|
+
#### Key Features:
|
317
|
+
- **MCP Server Integration**: Connect to MCP servers and load their tools
|
318
|
+
- **Tool Discovery**: Automatically discover and register tools from MCP servers
|
319
|
+
- **HTTP Client Management**: Built-in HTTP client for MCP communication
|
320
|
+
- **Schema Conversion**: Convert MCP schemas to OpenAI function format
|
321
|
+
|
322
|
+
#### Usage:
|
323
|
+
```python
|
324
|
+
mcp_client = MCPClient(openrouter_client)
|
325
|
+
mcp_client.register_mcp_server("server-name", "http://localhost:8000")
|
326
|
+
```
|
327
|
+
|
328
|
+
### AgentClient
|
329
|
+
|
330
|
+
Wraps any client with intelligent dialog-based thinking capabilities.
|
331
|
+
|
332
|
+
#### Key Features:
|
333
|
+
- **Question Generation**: Automatically generates clarifying questions
|
334
|
+
- **Quality Iteration**: Improves responses through multiple AI review cycles
|
335
|
+
- **Conversation Memory**: Maintains conversation history across interactions
|
336
|
+
- **Flexible Configuration**: Configurable quality vs speed tradeoffs
|
337
|
+
- **Tool Proxy Methods**: Access underlying client's tool capabilities
|
338
|
+
|
339
|
+
#### Configuration Options:
|
340
|
+
```python
|
341
|
+
agent = AgentClient(
|
342
|
+
ai_client=any_supported_client,
|
343
|
+
max_iterations=2 # 0=fastest, 3+=highest quality
|
344
|
+
)
|
345
|
+
```
|
346
|
+
|
347
|
+
#### Dialog Flow:
|
348
|
+
1. **Question Generation** (if `ask_questions=True`)
|
349
|
+
2. **Answer Processing** (if questions were asked)
|
350
|
+
3. **Thinking Process** (analyze prompt and context)
|
351
|
+
4. **Quality Iteration** (improve response through AI review)
|
352
|
+
5. **Final Response** (generate structured output)
|
353
|
+
|
354
|
+
#### Session Management:
|
355
|
+
```python
|
356
|
+
# List active sessions
|
357
|
+
sessions = agent.list_sessions()
|
358
|
+
|
359
|
+
# Get session info
|
360
|
+
info = agent.get_session_info(agent_id)
|
361
|
+
|
362
|
+
# Delete session
|
363
|
+
agent.delete_session(agent_id)
|
364
|
+
```
|
365
|
+
|
366
|
+
## 🏃♂️ Advanced Examples
|
367
|
+
|
368
|
+
### Custom Model Registration
|
369
|
+
|
370
|
+
```python
|
371
|
+
from mbxai import OpenRouterClient, OpenRouterModel
|
372
|
+
|
373
|
+
# Register custom model
|
374
|
+
OpenRouterClient.register_model("CUSTOM_MODEL", "provider/model-name")
|
375
|
+
|
376
|
+
# Use custom model
|
377
|
+
client = OpenRouterClient(token="your-key", model="CUSTOM_MODEL")
|
378
|
+
```
|
379
|
+
|
380
|
+
### Conversation History and Context
|
381
|
+
|
382
|
+
```python
|
383
|
+
# Start a conversation
|
384
|
+
response1 = agent.agent("Tell me about quantum computing", ScienceExplanation)
|
385
|
+
agent_id = response1.agent_id
|
386
|
+
|
387
|
+
# Continue conversation with context
|
388
|
+
response2 = agent.agent(
|
389
|
+
"How does it compare to classical computing?",
|
390
|
+
ComparisonExplanation,
|
391
|
+
agent_id=agent_id,
|
392
|
+
ask_questions=False
|
393
|
+
)
|
394
|
+
|
395
|
+
# The agent remembers the previous conversation context
|
396
|
+
```
|
397
|
+
|
398
|
+
### Error Handling and Logging
|
399
|
+
|
400
|
+
```python
|
401
|
+
import logging
|
402
|
+
|
403
|
+
# Configure logging
|
404
|
+
logging.basicConfig(level=logging.DEBUG)
|
405
|
+
|
406
|
+
try:
|
407
|
+
response = client.create(messages)
|
408
|
+
except OpenRouterAPIError as e:
|
409
|
+
print(f"API Error: {e}")
|
410
|
+
except OpenRouterConnectionError as e:
|
411
|
+
print(f"Connection Error: {e}")
|
412
|
+
except Exception as e:
|
413
|
+
print(f"Unexpected Error: {e}")
|
414
|
+
```
|
415
|
+
|
416
|
+
### Streaming Responses
|
417
|
+
|
418
|
+
```python
|
419
|
+
# Streaming with OpenRouterClient
|
420
|
+
response = client.create(messages, stream=True)
|
421
|
+
for chunk in response:
|
422
|
+
if chunk.choices[0].delta.content:
|
423
|
+
print(chunk.choices[0].delta.content, end="")
|
424
|
+
|
425
|
+
# Streaming with ToolClient (tools execute before streaming)
|
426
|
+
response = tool_client.chat(messages, stream=True)
|
427
|
+
for chunk in response:
|
428
|
+
if chunk.choices[0].delta.content:
|
429
|
+
print(chunk.choices[0].delta.content, end="")
|
430
|
+
```
|
431
|
+
|
432
|
+
## 🧪 Testing
|
433
|
+
|
434
|
+
Run the test suite:
|
435
|
+
|
436
|
+
```bash
|
437
|
+
# Install development dependencies
|
438
|
+
pip install -e ".[dev]"
|
439
|
+
|
440
|
+
# Run tests
|
441
|
+
pytest tests/
|
442
|
+
|
443
|
+
# Run with coverage
|
444
|
+
pytest tests/ --cov=mbxai --cov-report=html
|
445
|
+
```
|
446
|
+
|
447
|
+
## 🔧 Development Setup
|
448
|
+
|
449
|
+
1. Clone the repository:
|
450
|
+
```bash
|
451
|
+
git clone https://github.com/yourusername/mbxai.git
|
452
|
+
cd mbxai/packages
|
453
|
+
```
|
454
|
+
|
455
|
+
2. Create a virtual environment:
|
456
|
+
```bash
|
457
|
+
python -m venv .venv
|
458
|
+
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
459
|
+
```
|
460
|
+
|
461
|
+
3. Install in development mode:
|
462
|
+
```bash
|
463
|
+
pip install -e ".[dev]"
|
464
|
+
```
|
465
|
+
|
466
|
+
4. Set up environment variables:
|
467
|
+
```bash
|
468
|
+
export OPENROUTER_API_KEY="your-api-key"
|
469
|
+
```
|
470
|
+
|
471
|
+
## 📄 License
|
472
|
+
|
473
|
+
MIT License - see [LICENSE](LICENSE) file for details.
|
474
|
+
|
475
|
+
## 🤝 Contributing
|
476
|
+
|
477
|
+
Contributions are welcome! Please feel free to submit a Pull Request.
|
478
|
+
|
479
|
+
## 🔗 Links
|
480
|
+
|
481
|
+
- **Homepage**: [https://www.mibexx.de](https://www.mibexx.de)
|
482
|
+
- **Documentation**: [https://www.mibexx.de](https://www.mibexx.de)
|
483
|
+
- **Repository**: [https://github.com/yourusername/mbxai](https://github.com/yourusername/mbxai)
|
484
|
+
|
485
|
+
## 📊 Version Information
|
486
|
+
|
487
|
+
Current version: **2.1.3**
|
488
|
+
|
489
|
+
- Python 3.12+ required
|
490
|
+
- Built with modern async/await patterns
|
491
|
+
- Type-safe with Pydantic v2
|
492
|
+
- Compatible with OpenAI SDK v1.77+
|
@@ -1,7 +1,7 @@
|
|
1
|
-
mbxai/__init__.py,sha256=
|
1
|
+
mbxai/__init__.py,sha256=uPglmoE2kWWVTJn4_zLFhpUZveKcvqpirELVG9NOwI8,407
|
2
2
|
mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
|
3
3
|
mbxai/agent/__init__.py,sha256=5j3mW2NZtAU1s2w8n833axWBQsxW8U0qKwoQ9JtQZ4k,289
|
4
|
-
mbxai/agent/client.py,sha256=
|
4
|
+
mbxai/agent/client.py,sha256=M4n1RKeSi3e59nvJUjDdFR0dgWP-X1pe5j1YzjSV1js,37870
|
5
5
|
mbxai/agent/models.py,sha256=sjBtaAENDABHl8IqTON1gxFFSZIaQYUCBFHB5804_Fw,5780
|
6
6
|
mbxai/examples/agent_example.py,sha256=7gQHcMVWBu2xdxnVNzz4UfW0lkUnw9a5DN2-YoIRxXE,7420
|
7
7
|
mbxai/examples/agent_iterations_example.py,sha256=xMqZhBWS67EkRkArjOAY2fCgLkQ32Qn9E4CSfEKW4MU,7905
|
@@ -26,9 +26,9 @@ mbxai/examples/mcp/mcp_server_example.py,sha256=nFfg22Jnc6HMW_ezLO3So1xwDdx2_rIt
|
|
26
26
|
mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
|
27
27
|
mbxai/mcp/client.py,sha256=QRzId6o4_WRWVv3rtm8cfZZGaoY_UlaOO-oqNjY-tmw,5219
|
28
28
|
mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
|
29
|
-
mbxai/mcp/server.py,sha256=
|
29
|
+
mbxai/mcp/server.py,sha256=z-ZyrFbQ-ZD_re4LUP4l9BO17jeJCxEEX3tNFsIoRO0,3332
|
30
30
|
mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
|
31
|
-
mbxai/openrouter/client.py,sha256=
|
31
|
+
mbxai/openrouter/client.py,sha256=aWgE_-OqfwIcJmotM7diR0HbzA0qnVTdVH3xVIPyWHA,14542
|
32
32
|
mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
|
33
33
|
mbxai/openrouter/models.py,sha256=b3IjjtZAjeGOf2rLsdnCD1HacjTnS8jmv_ZXorc-KJQ,2604
|
34
34
|
mbxai/openrouter/schema.py,sha256=H_77ZrA9zmbX155bWpCJj1jehUyJPS0QybEW1IVAoe0,540
|
@@ -36,7 +36,7 @@ mbxai/tools/__init__.py,sha256=ogxrHvgJ7OR62Lmd5x9Eh5d2C0jqWyQis7Zy3yKpZ78,218
|
|
36
36
|
mbxai/tools/client.py,sha256=2wFPD-UN3Y2DSyrnqxt2vvFgTYHzUl14_y0r6fhAWmM,17198
|
37
37
|
mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
|
38
38
|
mbxai/tools/types.py,sha256=OFfM7scDGTm4FOcJA2ecj-fxL1MEBkqPsT3hqCL1Jto,9505
|
39
|
-
mbxai-2.
|
40
|
-
mbxai-2.
|
41
|
-
mbxai-2.
|
42
|
-
mbxai-2.
|
39
|
+
mbxai-2.2.0.dist-info/METADATA,sha256=ABGw5xmU3g9dh1lYkcruwAzXNdEO_54hEcrVztrMG30,14405
|
40
|
+
mbxai-2.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
41
|
+
mbxai-2.2.0.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
|
42
|
+
mbxai-2.2.0.dist-info/RECORD,,
|
mbxai-2.1.2.dist-info/METADATA
DELETED
@@ -1,346 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: mbxai
|
3
|
-
Version: 2.1.2
|
4
|
-
Summary: MBX AI SDK
|
5
|
-
Project-URL: Homepage, https://www.mibexx.de
|
6
|
-
Project-URL: Documentation, https://www.mibexx.de
|
7
|
-
Project-URL: Repository, https://github.com/yourusername/mbxai.git
|
8
|
-
Author: MBX AI
|
9
|
-
License: MIT
|
10
|
-
License-File: LICENSE
|
11
|
-
Classifier: Development Status :: 4 - Beta
|
12
|
-
Classifier: Operating System :: OS Independent
|
13
|
-
Classifier: Programming Language :: Python
|
14
|
-
Classifier: Programming Language :: Python :: 3.12
|
15
|
-
Requires-Python: >=3.12
|
16
|
-
Requires-Dist: fastapi>=0.115.12
|
17
|
-
Requires-Dist: httpx>=0.27.0
|
18
|
-
Requires-Dist: mcp>=1.7.1
|
19
|
-
Requires-Dist: openai>=1.77.0
|
20
|
-
Requires-Dist: pydantic-settings>=2.9.1
|
21
|
-
Requires-Dist: pydantic>=2.9.1
|
22
|
-
Requires-Dist: python-multipart>=0.0.20
|
23
|
-
Requires-Dist: sse-starlette>=2.3.4
|
24
|
-
Requires-Dist: starlette>=0.46.2
|
25
|
-
Requires-Dist: typing-inspection<=0.4.0
|
26
|
-
Requires-Dist: uvicorn>=0.34.2
|
27
|
-
Provides-Extra: dev
|
28
|
-
Requires-Dist: black>=24.3.0; extra == 'dev'
|
29
|
-
Requires-Dist: isort>=5.13.2; extra == 'dev'
|
30
|
-
Requires-Dist: mypy>=1.8.0; extra == 'dev'
|
31
|
-
Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
|
32
|
-
Requires-Dist: pytest-cov>=6.1.1; extra == 'dev'
|
33
|
-
Requires-Dist: pytest>=8.3.5; extra == 'dev'
|
34
|
-
Description-Content-Type: text/markdown
|
35
|
-
|
36
|
-
# MBX AI
|
37
|
-
|
38
|
-
A Python library for building AI applications with LLMs.
|
39
|
-
|
40
|
-
## Features
|
41
|
-
|
42
|
-
- **OpenRouter Integration**: Connect to various LLM providers through OpenRouter
|
43
|
-
- **Intelligent Agent System**: AgentClient with dialog-based thinking, question generation, and quality iteration
|
44
|
-
- **Tool Integration**: Easily integrate tools with LLMs using the Model Context Protocol (MCP)
|
45
|
-
- **Structured Output**: Get structured, typed responses from LLMs
|
46
|
-
- **Chat Interface**: Simple chat interface for interacting with LLMs
|
47
|
-
- **FastAPI Server**: Built-in FastAPI server for tool integration
|
48
|
-
|
49
|
-
## Installation
|
50
|
-
|
51
|
-
```bash
|
52
|
-
pip install mbxai
|
53
|
-
```
|
54
|
-
|
55
|
-
## Quick Start
|
56
|
-
|
57
|
-
### Basic Usage
|
58
|
-
|
59
|
-
```python
|
60
|
-
from mbxai import OpenRouterClient
|
61
|
-
|
62
|
-
# Initialize the client
|
63
|
-
client = OpenRouterClient(api_key="your-api-key")
|
64
|
-
|
65
|
-
# Chat with an LLM
|
66
|
-
response = await client.chat([
|
67
|
-
{"role": "user", "content": "Hello, how are you?"}
|
68
|
-
])
|
69
|
-
print(response.choices[0].message.content)
|
70
|
-
```
|
71
|
-
|
72
|
-
### Quick Agent Example
|
73
|
-
|
74
|
-
```python
|
75
|
-
from mbxai import AgentClient, OpenRouterClient
|
76
|
-
from pydantic import BaseModel, Field
|
77
|
-
|
78
|
-
class TravelPlan(BaseModel):
|
79
|
-
destination: str = Field(description="Travel destination")
|
80
|
-
activities: list[str] = Field(description="Recommended activities")
|
81
|
-
budget: str = Field(description="Estimated budget")
|
82
|
-
|
83
|
-
# Initialize agent
|
84
|
-
client = OpenRouterClient(token="your-api-key")
|
85
|
-
agent = AgentClient(client)
|
86
|
-
|
87
|
-
# Get intelligent response with automatic quality improvement
|
88
|
-
response = agent.agent(
|
89
|
-
prompt="Plan a weekend trip to a mountain destination",
|
90
|
-
final_response_structure=TravelPlan,
|
91
|
-
ask_questions=False
|
92
|
-
)
|
93
|
-
|
94
|
-
plan = response.final_response
|
95
|
-
print(f"Destination: {plan.destination}")
|
96
|
-
print(f"Activities: {', '.join(plan.activities)}")
|
97
|
-
```
|
98
|
-
|
99
|
-
### Using Tools
|
100
|
-
|
101
|
-
```python
|
102
|
-
from mbxai import OpenRouterClient, ToolClient
|
103
|
-
from pydantic import BaseModel
|
104
|
-
|
105
|
-
# Define your tool's input and output models
|
106
|
-
class CalculatorInput(BaseModel):
|
107
|
-
a: float
|
108
|
-
b: float
|
109
|
-
|
110
|
-
class CalculatorOutput(BaseModel):
|
111
|
-
result: float
|
112
|
-
|
113
|
-
# Create a calculator tool
|
114
|
-
async def calculator(input: CalculatorInput) -> CalculatorOutput:
|
115
|
-
return CalculatorOutput(result=input.a + input.b)
|
116
|
-
|
117
|
-
# Initialize the client with tools
|
118
|
-
client = ToolClient(OpenRouterClient(api_key="your-api-key"))
|
119
|
-
client.add_tool(calculator)
|
120
|
-
|
121
|
-
# Use the tool in a chat
|
122
|
-
response = await client.chat([
|
123
|
-
{"role": "user", "content": "What is 2 + 3?"}
|
124
|
-
])
|
125
|
-
print(response.choices[0].message.content)
|
126
|
-
```
|
127
|
-
|
128
|
-
### Using MCP (Model Context Protocol)
|
129
|
-
|
130
|
-
```python
|
131
|
-
from mbxai import OpenRouterClient, MCPClient
|
132
|
-
from mbxai.mcp import MCPServer
|
133
|
-
from mcp.server.fastmcp import FastMCP
|
134
|
-
from pydantic import BaseModel
|
135
|
-
|
136
|
-
# Define your tool's input and output models
|
137
|
-
class CalculatorInput(BaseModel):
|
138
|
-
a: float
|
139
|
-
b: float
|
140
|
-
|
141
|
-
class CalculatorOutput(BaseModel):
|
142
|
-
result: float
|
143
|
-
|
144
|
-
# Create a FastMCP instance
|
145
|
-
mcp = FastMCP("calculator-service")
|
146
|
-
|
147
|
-
# Create a calculator tool
|
148
|
-
@mcp.tool()
|
149
|
-
async def calculator(argument: CalculatorInput) -> CalculatorOutput:
|
150
|
-
return CalculatorOutput(result=argument.a + argument.b)
|
151
|
-
|
152
|
-
# Start the MCP server
|
153
|
-
server = MCPServer("calculator-service")
|
154
|
-
await server.add_tool(calculator)
|
155
|
-
await server.start()
|
156
|
-
|
157
|
-
# Initialize the MCP client
|
158
|
-
client = MCPClient(OpenRouterClient(api_key="your-api-key"))
|
159
|
-
await client.register_mcp_server("calculator-service", "http://localhost:8000")
|
160
|
-
|
161
|
-
# Use the tool in a chat
|
162
|
-
response = await client.chat([
|
163
|
-
{"role": "user", "content": "What is 2 + 3?"}
|
164
|
-
])
|
165
|
-
print(response.choices[0].message.content)
|
166
|
-
```
|
167
|
-
|
168
|
-
### Using AgentClient (Intelligent Dialog System)
|
169
|
-
|
170
|
-
The `AgentClient` provides an intelligent dialog-based thinking process that can ask clarifying questions, iterate on responses, and provide structured outputs.
|
171
|
-
|
172
|
-
#### Basic Agent Usage
|
173
|
-
|
174
|
-
```python
|
175
|
-
from mbxai import AgentClient, OpenRouterClient
|
176
|
-
from pydantic import BaseModel, Field
|
177
|
-
|
178
|
-
# Define your response structure
|
179
|
-
class BookRecommendation(BaseModel):
|
180
|
-
title: str = Field(description="The title of the recommended book")
|
181
|
-
author: str = Field(description="The author of the book")
|
182
|
-
genre: str = Field(description="The genre of the book")
|
183
|
-
reason: str = Field(description="Why this book is recommended")
|
184
|
-
|
185
|
-
# Initialize the agent
|
186
|
-
client = OpenRouterClient(token="your-api-key")
|
187
|
-
agent = AgentClient(client)
|
188
|
-
|
189
|
-
# Get a recommendation with questions
|
190
|
-
response = agent.agent(
|
191
|
-
prompt="I want a book recommendation",
|
192
|
-
final_response_structure=BookRecommendation,
|
193
|
-
ask_questions=True # Agent will ask clarifying questions
|
194
|
-
)
|
195
|
-
|
196
|
-
if response.has_questions():
|
197
|
-
# Display questions to user
|
198
|
-
for question in response.questions:
|
199
|
-
print(f"Q: {question.question}")
|
200
|
-
|
201
|
-
# Collect answers and continue
|
202
|
-
from mbxai import AnswerList, Answer
|
203
|
-
answers = AnswerList(answers=[
|
204
|
-
Answer(key="genre", answer="I love science fiction"),
|
205
|
-
Answer(key="complexity", answer="I prefer complex narratives")
|
206
|
-
])
|
207
|
-
|
208
|
-
# Continue the conversation
|
209
|
-
final_response = agent.answer_to_agent(response.agent_id, answers)
|
210
|
-
book_rec = final_response.final_response
|
211
|
-
print(f"Recommended: {book_rec.title} by {book_rec.author}")
|
212
|
-
else:
|
213
|
-
# Direct response without questions
|
214
|
-
book_rec = response.final_response
|
215
|
-
print(f"Recommended: {book_rec.title} by {book_rec.author}")
|
216
|
-
```
|
217
|
-
|
218
|
-
#### Agent with Tool Integration
|
219
|
-
|
220
|
-
```python
|
221
|
-
from mbxai import AgentClient, ToolClient, OpenRouterClient
|
222
|
-
|
223
|
-
# Initialize with tool support
|
224
|
-
openrouter_client = OpenRouterClient(token="your-api-key")
|
225
|
-
tool_client = ToolClient(openrouter_client)
|
226
|
-
agent = AgentClient(tool_client)
|
227
|
-
|
228
|
-
# Register tools via the agent (schema auto-generated!)
|
229
|
-
def get_weather(location: str, unit: str = "fahrenheit") -> dict:
|
230
|
-
"""Get weather information for a location.
|
231
|
-
|
232
|
-
Args:
|
233
|
-
location: The city or location name
|
234
|
-
unit: Temperature unit (fahrenheit or celsius)
|
235
|
-
"""
|
236
|
-
return {"location": location, "temperature": "72°F", "conditions": "Sunny"}
|
237
|
-
|
238
|
-
agent.register_tool(
|
239
|
-
name="get_weather",
|
240
|
-
description="Get current weather for a location",
|
241
|
-
function=get_weather
|
242
|
-
# Schema automatically generated from function signature!
|
243
|
-
)
|
244
|
-
|
245
|
-
# Use agent with tools
|
246
|
-
class WeatherResponse(BaseModel):
|
247
|
-
location: str = Field(description="The location")
|
248
|
-
weather: str = Field(description="Weather description")
|
249
|
-
recommendations: list[str] = Field(description="Clothing recommendations")
|
250
|
-
|
251
|
-
response = agent.agent(
|
252
|
-
prompt="What's the weather in San Francisco and what should I wear?",
|
253
|
-
final_response_structure=WeatherResponse,
|
254
|
-
ask_questions=False
|
255
|
-
)
|
256
|
-
|
257
|
-
weather_info = response.final_response
|
258
|
-
print(f"Weather: {weather_info.weather}")
|
259
|
-
```
|
260
|
-
|
261
|
-
#### Agent Configuration
|
262
|
-
|
263
|
-
```python
|
264
|
-
# Configure quality iterations (default: 2)
|
265
|
-
agent = AgentClient(
|
266
|
-
ai_client=openrouter_client,
|
267
|
-
max_iterations=3 # More iterations = higher quality, slower response
|
268
|
-
)
|
269
|
-
|
270
|
-
# Different configurations for different use cases:
|
271
|
-
# max_iterations=0: Fastest, basic quality (chatbots)
|
272
|
-
# max_iterations=1: Fast, good quality (content generation)
|
273
|
-
# max_iterations=2: Balanced (default, recommended)
|
274
|
-
# max_iterations=3+: Highest quality (analysis, reports)
|
275
|
-
```
|
276
|
-
|
277
|
-
#### Agent with MCP Client
|
278
|
-
|
279
|
-
```python
|
280
|
-
from mbxai import AgentClient, MCPClient
|
281
|
-
|
282
|
-
# Initialize with MCP support
|
283
|
-
mcp_client = MCPClient(OpenRouterClient(token="your-api-key"))
|
284
|
-
agent = AgentClient(mcp_client)
|
285
|
-
|
286
|
-
# Register MCP servers
|
287
|
-
agent.register_mcp_server("data-analysis", "http://localhost:8000")
|
288
|
-
|
289
|
-
# Register individual tools
|
290
|
-
agent.register_tool("analyze_data", "Analyze dataset", analyze_function, schema)
|
291
|
-
|
292
|
-
# Use agent with full MCP capabilities
|
293
|
-
response = agent.agent(
|
294
|
-
prompt="Analyze the sales data and provide insights",
|
295
|
-
final_response_structure=AnalysisReport,
|
296
|
-
ask_questions=True
|
297
|
-
)
|
298
|
-
```
|
299
|
-
|
300
|
-
#### Agent Features
|
301
|
-
|
302
|
-
- **Intelligent Questions**: Automatically generates clarifying questions when needed
|
303
|
-
- **Quality Iteration**: Improves responses through multiple AI review cycles
|
304
|
-
- **Tool Integration**: Seamlessly works with ToolClient and MCPClient
|
305
|
-
- **Structured Output**: Always returns properly typed Pydantic models
|
306
|
-
- **Session Management**: Handles multi-turn conversations with question/answer flow
|
307
|
-
- **Configurable**: Adjust quality vs speed with max_iterations parameter
|
308
|
-
|
309
|
-
#### Supported AI Clients
|
310
|
-
|
311
|
-
| Client | Structured Responses | Tool Registration | MCP Server Registration |
|
312
|
-
|--------|---------------------|-------------------|------------------------|
|
313
|
-
| OpenRouterClient | ✅ | ❌ | ❌ |
|
314
|
-
| ToolClient | ✅ | ✅ | ❌ |
|
315
|
-
| MCPClient | ✅ | ✅ | ✅ |
|
316
|
-
|
317
|
-
## Development
|
318
|
-
|
319
|
-
### Setup
|
320
|
-
|
321
|
-
1. Clone the repository:
|
322
|
-
```bash
|
323
|
-
git clone https://github.com/yourusername/mbxai.git
|
324
|
-
cd mbxai
|
325
|
-
```
|
326
|
-
|
327
|
-
2. Create a virtual environment:
|
328
|
-
```bash
|
329
|
-
python -m venv .venv
|
330
|
-
source .venv/bin/activate # On Windows: .venv\Scripts\activate
|
331
|
-
```
|
332
|
-
|
333
|
-
3. Install dependencies:
|
334
|
-
```bash
|
335
|
-
pip install -e ".[dev]"
|
336
|
-
```
|
337
|
-
|
338
|
-
### Running Tests
|
339
|
-
|
340
|
-
```bash
|
341
|
-
pytest tests/
|
342
|
-
```
|
343
|
-
|
344
|
-
## License
|
345
|
-
|
346
|
-
MIT License
|
File without changes
|
File without changes
|