mbxai 1.6.0__tar.gz → 2.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. mbxai-2.0.0/PKG-INFO +346 -0
  2. mbxai-2.0.0/README.md +311 -0
  3. {mbxai-1.6.0 → mbxai-2.0.0}/pyproject.toml +2 -2
  4. {mbxai-1.6.0 → mbxai-2.0.0}/setup.py +1 -1
  5. mbxai-2.0.0/src/mbxai/__init__.py +22 -0
  6. mbxai-2.0.0/src/mbxai/agent/__init__.py +8 -0
  7. mbxai-2.0.0/src/mbxai/agent/client.py +450 -0
  8. mbxai-2.0.0/src/mbxai/agent/models.py +56 -0
  9. mbxai-2.0.0/src/mbxai/examples/agent_example.py +152 -0
  10. mbxai-2.0.0/src/mbxai/examples/agent_iterations_example.py +173 -0
  11. mbxai-2.0.0/src/mbxai/examples/agent_tool_registration_example.py +247 -0
  12. mbxai-2.0.0/src/mbxai/examples/agent_validation_example.py +123 -0
  13. mbxai-2.0.0/src/mbxai/examples/auto_schema_example.py +228 -0
  14. mbxai-2.0.0/src/mbxai/examples/simple_agent_test.py +168 -0
  15. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/mcp/server.py +1 -1
  16. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/tools/client.py +57 -5
  17. mbxai-1.6.0/PKG-INFO +0 -169
  18. mbxai-1.6.0/README.md +0 -134
  19. mbxai-1.6.0/src/mbxai/__init__.py +0 -5
  20. {mbxai-1.6.0 → mbxai-2.0.0}/.gitignore +0 -0
  21. {mbxai-1.6.0 → mbxai-2.0.0}/LICENSE +0 -0
  22. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/core.py +0 -0
  23. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/mcp/mcp_client_example.py +0 -0
  24. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/mcp/mcp_server_example.py +0 -0
  25. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/openrouter_example.py +0 -0
  26. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/parse_example.py +0 -0
  27. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/parse_tool_example.py +0 -0
  28. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/request.json +0 -0
  29. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/response.json +0 -0
  30. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/send_request.py +0 -0
  31. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/examples/tool_client_example.py +0 -0
  32. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/mcp/__init__.py +0 -0
  33. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/mcp/client.py +0 -0
  34. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/mcp/example.py +0 -0
  35. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/openrouter/__init__.py +0 -0
  36. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/openrouter/client.py +0 -0
  37. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/openrouter/config.py +0 -0
  38. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/openrouter/models.py +0 -0
  39. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/openrouter/schema.py +0 -0
  40. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/tools/__init__.py +0 -0
  41. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/tools/example.py +0 -0
  42. {mbxai-1.6.0 → mbxai-2.0.0}/src/mbxai/tools/types.py +0 -0
  43. {mbxai-1.6.0 → mbxai-2.0.0}/tests/test_mcp_tool_registration.py +0 -0
  44. {mbxai-1.6.0 → mbxai-2.0.0}/tests/test_real_mcp_schema.py +0 -0
  45. {mbxai-1.6.0 → mbxai-2.0.0}/tests/test_schema_conversion.py +0 -0
mbxai-2.0.0/PKG-INFO ADDED
@@ -0,0 +1,346 @@
1
+ Metadata-Version: 2.4
2
+ Name: mbxai
3
+ Version: 2.0.0
4
+ Summary: MBX AI SDK
5
+ Project-URL: Homepage, https://www.mibexx.de
6
+ Project-URL: Documentation, https://www.mibexx.de
7
+ Project-URL: Repository, https://github.com/yourusername/mbxai.git
8
+ Author: MBX AI
9
+ License: MIT
10
+ License-File: LICENSE
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Operating System :: OS Independent
13
+ Classifier: Programming Language :: Python
14
+ Classifier: Programming Language :: Python :: 3.12
15
+ Requires-Python: >=3.12
16
+ Requires-Dist: fastapi>=0.115.12
17
+ Requires-Dist: httpx>=0.27.0
18
+ Requires-Dist: mcp>=1.7.1
19
+ Requires-Dist: openai>=1.77.0
20
+ Requires-Dist: pydantic-settings>=2.9.1
21
+ Requires-Dist: pydantic>=2.9.1
22
+ Requires-Dist: python-multipart>=0.0.20
23
+ Requires-Dist: sse-starlette>=2.3.4
24
+ Requires-Dist: starlette>=0.46.2
25
+ Requires-Dist: typing-inspection<=0.4.0
26
+ Requires-Dist: uvicorn>=0.34.2
27
+ Provides-Extra: dev
28
+ Requires-Dist: black>=24.3.0; extra == 'dev'
29
+ Requires-Dist: isort>=5.13.2; extra == 'dev'
30
+ Requires-Dist: mypy>=1.8.0; extra == 'dev'
31
+ Requires-Dist: pytest-asyncio>=0.26.0; extra == 'dev'
32
+ Requires-Dist: pytest-cov>=6.1.1; extra == 'dev'
33
+ Requires-Dist: pytest>=8.3.5; extra == 'dev'
34
+ Description-Content-Type: text/markdown
35
+
36
+ # MBX AI
37
+
38
+ A Python library for building AI applications with LLMs.
39
+
40
+ ## Features
41
+
42
+ - **OpenRouter Integration**: Connect to various LLM providers through OpenRouter
43
+ - **Intelligent Agent System**: AgentClient with dialog-based thinking, question generation, and quality iteration
44
+ - **Tool Integration**: Easily integrate tools with LLMs using the Model Context Protocol (MCP)
45
+ - **Structured Output**: Get structured, typed responses from LLMs
46
+ - **Chat Interface**: Simple chat interface for interacting with LLMs
47
+ - **FastAPI Server**: Built-in FastAPI server for tool integration
48
+
49
+ ## Installation
50
+
51
+ ```bash
52
+ pip install mbxai
53
+ ```
54
+
55
+ ## Quick Start
56
+
57
+ ### Basic Usage
58
+
59
+ ```python
60
+ from mbxai import OpenRouterClient
61
+
62
+ # Initialize the client
63
+ client = OpenRouterClient(api_key="your-api-key")
64
+
65
+ # Chat with an LLM
66
+ response = await client.chat([
67
+ {"role": "user", "content": "Hello, how are you?"}
68
+ ])
69
+ print(response.choices[0].message.content)
70
+ ```
71
+
72
+ ### Quick Agent Example
73
+
74
+ ```python
75
+ from mbxai import AgentClient, OpenRouterClient
76
+ from pydantic import BaseModel, Field
77
+
78
+ class TravelPlan(BaseModel):
79
+ destination: str = Field(description="Travel destination")
80
+ activities: list[str] = Field(description="Recommended activities")
81
+ budget: str = Field(description="Estimated budget")
82
+
83
+ # Initialize agent
84
+ client = OpenRouterClient(token="your-api-key")
85
+ agent = AgentClient(client)
86
+
87
+ # Get intelligent response with automatic quality improvement
88
+ response = agent.agent(
89
+ prompt="Plan a weekend trip to a mountain destination",
90
+ final_response_structure=TravelPlan,
91
+ ask_questions=False
92
+ )
93
+
94
+ plan = response.final_response
95
+ print(f"Destination: {plan.destination}")
96
+ print(f"Activities: {', '.join(plan.activities)}")
97
+ ```
98
+
99
+ ### Using Tools
100
+
101
+ ```python
102
+ from mbxai import OpenRouterClient, ToolClient
103
+ from pydantic import BaseModel
104
+
105
+ # Define your tool's input and output models
106
+ class CalculatorInput(BaseModel):
107
+ a: float
108
+ b: float
109
+
110
+ class CalculatorOutput(BaseModel):
111
+ result: float
112
+
113
+ # Create a calculator tool
114
+ async def calculator(input: CalculatorInput) -> CalculatorOutput:
115
+ return CalculatorOutput(result=input.a + input.b)
116
+
117
+ # Initialize the client with tools
118
+ client = ToolClient(OpenRouterClient(api_key="your-api-key"))
119
+ client.add_tool(calculator)
120
+
121
+ # Use the tool in a chat
122
+ response = await client.chat([
123
+ {"role": "user", "content": "What is 2 + 3?"}
124
+ ])
125
+ print(response.choices[0].message.content)
126
+ ```
127
+
128
+ ### Using MCP (Model Context Protocol)
129
+
130
+ ```python
131
+ from mbxai import OpenRouterClient, MCPClient
132
+ from mbxai.mcp import MCPServer
133
+ from mcp.server.fastmcp import FastMCP
134
+ from pydantic import BaseModel
135
+
136
+ # Define your tool's input and output models
137
+ class CalculatorInput(BaseModel):
138
+ a: float
139
+ b: float
140
+
141
+ class CalculatorOutput(BaseModel):
142
+ result: float
143
+
144
+ # Create a FastMCP instance
145
+ mcp = FastMCP("calculator-service")
146
+
147
+ # Create a calculator tool
148
+ @mcp.tool()
149
+ async def calculator(argument: CalculatorInput) -> CalculatorOutput:
150
+ return CalculatorOutput(result=argument.a + argument.b)
151
+
152
+ # Start the MCP server
153
+ server = MCPServer("calculator-service")
154
+ await server.add_tool(calculator)
155
+ await server.start()
156
+
157
+ # Initialize the MCP client
158
+ client = MCPClient(OpenRouterClient(api_key="your-api-key"))
159
+ await client.register_mcp_server("calculator-service", "http://localhost:8000")
160
+
161
+ # Use the tool in a chat
162
+ response = await client.chat([
163
+ {"role": "user", "content": "What is 2 + 3?"}
164
+ ])
165
+ print(response.choices[0].message.content)
166
+ ```
167
+
168
+ ### Using AgentClient (Intelligent Dialog System)
169
+
170
+ The `AgentClient` provides an intelligent dialog-based thinking process that can ask clarifying questions, iterate on responses, and provide structured outputs.
171
+
172
+ #### Basic Agent Usage
173
+
174
+ ```python
175
+ from mbxai import AgentClient, OpenRouterClient
176
+ from pydantic import BaseModel, Field
177
+
178
+ # Define your response structure
179
+ class BookRecommendation(BaseModel):
180
+ title: str = Field(description="The title of the recommended book")
181
+ author: str = Field(description="The author of the book")
182
+ genre: str = Field(description="The genre of the book")
183
+ reason: str = Field(description="Why this book is recommended")
184
+
185
+ # Initialize the agent
186
+ client = OpenRouterClient(token="your-api-key")
187
+ agent = AgentClient(client)
188
+
189
+ # Get a recommendation with questions
190
+ response = agent.agent(
191
+ prompt="I want a book recommendation",
192
+ final_response_structure=BookRecommendation,
193
+ ask_questions=True # Agent will ask clarifying questions
194
+ )
195
+
196
+ if response.has_questions():
197
+ # Display questions to user
198
+ for question in response.questions:
199
+ print(f"Q: {question.question}")
200
+
201
+ # Collect answers and continue
202
+ from mbxai import AnswerList, Answer
203
+ answers = AnswerList(answers=[
204
+ Answer(key="genre", answer="I love science fiction"),
205
+ Answer(key="complexity", answer="I prefer complex narratives")
206
+ ])
207
+
208
+ # Continue the conversation
209
+ final_response = agent.answer_to_agent(response.agent_id, answers)
210
+ book_rec = final_response.final_response
211
+ print(f"Recommended: {book_rec.title} by {book_rec.author}")
212
+ else:
213
+ # Direct response without questions
214
+ book_rec = response.final_response
215
+ print(f"Recommended: {book_rec.title} by {book_rec.author}")
216
+ ```
217
+
218
+ #### Agent with Tool Integration
219
+
220
+ ```python
221
+ from mbxai import AgentClient, ToolClient, OpenRouterClient
222
+
223
+ # Initialize with tool support
224
+ openrouter_client = OpenRouterClient(token="your-api-key")
225
+ tool_client = ToolClient(openrouter_client)
226
+ agent = AgentClient(tool_client)
227
+
228
+ # Register tools via the agent (schema auto-generated!)
229
+ def get_weather(location: str, unit: str = "fahrenheit") -> dict:
230
+ """Get weather information for a location.
231
+
232
+ Args:
233
+ location: The city or location name
234
+ unit: Temperature unit (fahrenheit or celsius)
235
+ """
236
+ return {"location": location, "temperature": "72°F", "conditions": "Sunny"}
237
+
238
+ agent.register_tool(
239
+ name="get_weather",
240
+ description="Get current weather for a location",
241
+ function=get_weather
242
+ # Schema automatically generated from function signature!
243
+ )
244
+
245
+ # Use agent with tools
246
+ class WeatherResponse(BaseModel):
247
+ location: str = Field(description="The location")
248
+ weather: str = Field(description="Weather description")
249
+ recommendations: list[str] = Field(description="Clothing recommendations")
250
+
251
+ response = agent.agent(
252
+ prompt="What's the weather in San Francisco and what should I wear?",
253
+ final_response_structure=WeatherResponse,
254
+ ask_questions=False
255
+ )
256
+
257
+ weather_info = response.final_response
258
+ print(f"Weather: {weather_info.weather}")
259
+ ```
260
+
261
+ #### Agent Configuration
262
+
263
+ ```python
264
+ # Configure quality iterations (default: 2)
265
+ agent = AgentClient(
266
+ ai_client=openrouter_client,
267
+ max_iterations=3 # More iterations = higher quality, slower response
268
+ )
269
+
270
+ # Different configurations for different use cases:
271
+ # max_iterations=0: Fastest, basic quality (chatbots)
272
+ # max_iterations=1: Fast, good quality (content generation)
273
+ # max_iterations=2: Balanced (default, recommended)
274
+ # max_iterations=3+: Highest quality (analysis, reports)
275
+ ```
276
+
277
+ #### Agent with MCP Client
278
+
279
+ ```python
280
+ from mbxai import AgentClient, MCPClient
281
+
282
+ # Initialize with MCP support
283
+ mcp_client = MCPClient(OpenRouterClient(token="your-api-key"))
284
+ agent = AgentClient(mcp_client)
285
+
286
+ # Register MCP servers
287
+ agent.register_mcp_server("data-analysis", "http://localhost:8000")
288
+
289
+ # Register individual tools
290
+ agent.register_tool("analyze_data", "Analyze dataset", analyze_function, schema)
291
+
292
+ # Use agent with full MCP capabilities
293
+ response = agent.agent(
294
+ prompt="Analyze the sales data and provide insights",
295
+ final_response_structure=AnalysisReport,
296
+ ask_questions=True
297
+ )
298
+ ```
299
+
300
+ #### Agent Features
301
+
302
+ - **Intelligent Questions**: Automatically generates clarifying questions when needed
303
+ - **Quality Iteration**: Improves responses through multiple AI review cycles
304
+ - **Tool Integration**: Seamlessly works with ToolClient and MCPClient
305
+ - **Structured Output**: Always returns properly typed Pydantic models
306
+ - **Session Management**: Handles multi-turn conversations with question/answer flow
307
+ - **Configurable**: Adjust quality vs speed with max_iterations parameter
308
+
309
+ #### Supported AI Clients
310
+
311
+ | Client | Structured Responses | Tool Registration | MCP Server Registration |
312
+ |--------|---------------------|-------------------|------------------------|
313
+ | OpenRouterClient | ✅ | ❌ | ❌ |
314
+ | ToolClient | ✅ | ✅ | ❌ |
315
+ | MCPClient | ✅ | ✅ | ✅ |
316
+
317
+ ## Development
318
+
319
+ ### Setup
320
+
321
+ 1. Clone the repository:
322
+ ```bash
323
+ git clone https://github.com/yourusername/mbxai.git
324
+ cd mbxai
325
+ ```
326
+
327
+ 2. Create a virtual environment:
328
+ ```bash
329
+ python -m venv .venv
330
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
331
+ ```
332
+
333
+ 3. Install dependencies:
334
+ ```bash
335
+ pip install -e ".[dev]"
336
+ ```
337
+
338
+ ### Running Tests
339
+
340
+ ```bash
341
+ pytest tests/
342
+ ```
343
+
344
+ ## License
345
+
346
+ MIT License
mbxai-2.0.0/README.md ADDED
@@ -0,0 +1,311 @@
1
+ # MBX AI
2
+
3
+ A Python library for building AI applications with LLMs.
4
+
5
+ ## Features
6
+
7
+ - **OpenRouter Integration**: Connect to various LLM providers through OpenRouter
8
+ - **Intelligent Agent System**: AgentClient with dialog-based thinking, question generation, and quality iteration
9
+ - **Tool Integration**: Easily integrate tools with LLMs using the Model Context Protocol (MCP)
10
+ - **Structured Output**: Get structured, typed responses from LLMs
11
+ - **Chat Interface**: Simple chat interface for interacting with LLMs
12
+ - **FastAPI Server**: Built-in FastAPI server for tool integration
13
+
14
+ ## Installation
15
+
16
+ ```bash
17
+ pip install mbxai
18
+ ```
19
+
20
+ ## Quick Start
21
+
22
+ ### Basic Usage
23
+
24
+ ```python
25
+ from mbxai import OpenRouterClient
26
+
27
+ # Initialize the client
28
+ client = OpenRouterClient(api_key="your-api-key")
29
+
30
+ # Chat with an LLM
31
+ response = await client.chat([
32
+ {"role": "user", "content": "Hello, how are you?"}
33
+ ])
34
+ print(response.choices[0].message.content)
35
+ ```
36
+
37
+ ### Quick Agent Example
38
+
39
+ ```python
40
+ from mbxai import AgentClient, OpenRouterClient
41
+ from pydantic import BaseModel, Field
42
+
43
+ class TravelPlan(BaseModel):
44
+ destination: str = Field(description="Travel destination")
45
+ activities: list[str] = Field(description="Recommended activities")
46
+ budget: str = Field(description="Estimated budget")
47
+
48
+ # Initialize agent
49
+ client = OpenRouterClient(token="your-api-key")
50
+ agent = AgentClient(client)
51
+
52
+ # Get intelligent response with automatic quality improvement
53
+ response = agent.agent(
54
+ prompt="Plan a weekend trip to a mountain destination",
55
+ final_response_structure=TravelPlan,
56
+ ask_questions=False
57
+ )
58
+
59
+ plan = response.final_response
60
+ print(f"Destination: {plan.destination}")
61
+ print(f"Activities: {', '.join(plan.activities)}")
62
+ ```
63
+
64
+ ### Using Tools
65
+
66
+ ```python
67
+ from mbxai import OpenRouterClient, ToolClient
68
+ from pydantic import BaseModel
69
+
70
+ # Define your tool's input and output models
71
+ class CalculatorInput(BaseModel):
72
+ a: float
73
+ b: float
74
+
75
+ class CalculatorOutput(BaseModel):
76
+ result: float
77
+
78
+ # Create a calculator tool
79
+ async def calculator(input: CalculatorInput) -> CalculatorOutput:
80
+ return CalculatorOutput(result=input.a + input.b)
81
+
82
+ # Initialize the client with tools
83
+ client = ToolClient(OpenRouterClient(api_key="your-api-key"))
84
+ client.add_tool(calculator)
85
+
86
+ # Use the tool in a chat
87
+ response = await client.chat([
88
+ {"role": "user", "content": "What is 2 + 3?"}
89
+ ])
90
+ print(response.choices[0].message.content)
91
+ ```
92
+
93
+ ### Using MCP (Model Context Protocol)
94
+
95
+ ```python
96
+ from mbxai import OpenRouterClient, MCPClient
97
+ from mbxai.mcp import MCPServer
98
+ from mcp.server.fastmcp import FastMCP
99
+ from pydantic import BaseModel
100
+
101
+ # Define your tool's input and output models
102
+ class CalculatorInput(BaseModel):
103
+ a: float
104
+ b: float
105
+
106
+ class CalculatorOutput(BaseModel):
107
+ result: float
108
+
109
+ # Create a FastMCP instance
110
+ mcp = FastMCP("calculator-service")
111
+
112
+ # Create a calculator tool
113
+ @mcp.tool()
114
+ async def calculator(argument: CalculatorInput) -> CalculatorOutput:
115
+ return CalculatorOutput(result=argument.a + argument.b)
116
+
117
+ # Start the MCP server
118
+ server = MCPServer("calculator-service")
119
+ await server.add_tool(calculator)
120
+ await server.start()
121
+
122
+ # Initialize the MCP client
123
+ client = MCPClient(OpenRouterClient(api_key="your-api-key"))
124
+ await client.register_mcp_server("calculator-service", "http://localhost:8000")
125
+
126
+ # Use the tool in a chat
127
+ response = await client.chat([
128
+ {"role": "user", "content": "What is 2 + 3?"}
129
+ ])
130
+ print(response.choices[0].message.content)
131
+ ```
132
+
133
+ ### Using AgentClient (Intelligent Dialog System)
134
+
135
+ The `AgentClient` provides an intelligent dialog-based thinking process that can ask clarifying questions, iterate on responses, and provide structured outputs.
136
+
137
+ #### Basic Agent Usage
138
+
139
+ ```python
140
+ from mbxai import AgentClient, OpenRouterClient
141
+ from pydantic import BaseModel, Field
142
+
143
+ # Define your response structure
144
+ class BookRecommendation(BaseModel):
145
+ title: str = Field(description="The title of the recommended book")
146
+ author: str = Field(description="The author of the book")
147
+ genre: str = Field(description="The genre of the book")
148
+ reason: str = Field(description="Why this book is recommended")
149
+
150
+ # Initialize the agent
151
+ client = OpenRouterClient(token="your-api-key")
152
+ agent = AgentClient(client)
153
+
154
+ # Get a recommendation with questions
155
+ response = agent.agent(
156
+ prompt="I want a book recommendation",
157
+ final_response_structure=BookRecommendation,
158
+ ask_questions=True # Agent will ask clarifying questions
159
+ )
160
+
161
+ if response.has_questions():
162
+ # Display questions to user
163
+ for question in response.questions:
164
+ print(f"Q: {question.question}")
165
+
166
+ # Collect answers and continue
167
+ from mbxai import AnswerList, Answer
168
+ answers = AnswerList(answers=[
169
+ Answer(key="genre", answer="I love science fiction"),
170
+ Answer(key="complexity", answer="I prefer complex narratives")
171
+ ])
172
+
173
+ # Continue the conversation
174
+ final_response = agent.answer_to_agent(response.agent_id, answers)
175
+ book_rec = final_response.final_response
176
+ print(f"Recommended: {book_rec.title} by {book_rec.author}")
177
+ else:
178
+ # Direct response without questions
179
+ book_rec = response.final_response
180
+ print(f"Recommended: {book_rec.title} by {book_rec.author}")
181
+ ```
182
+
183
+ #### Agent with Tool Integration
184
+
185
+ ```python
186
+ from mbxai import AgentClient, ToolClient, OpenRouterClient
187
+
188
+ # Initialize with tool support
189
+ openrouter_client = OpenRouterClient(token="your-api-key")
190
+ tool_client = ToolClient(openrouter_client)
191
+ agent = AgentClient(tool_client)
192
+
193
+ # Register tools via the agent (schema auto-generated!)
194
+ def get_weather(location: str, unit: str = "fahrenheit") -> dict:
195
+ """Get weather information for a location.
196
+
197
+ Args:
198
+ location: The city or location name
199
+ unit: Temperature unit (fahrenheit or celsius)
200
+ """
201
+ return {"location": location, "temperature": "72°F", "conditions": "Sunny"}
202
+
203
+ agent.register_tool(
204
+ name="get_weather",
205
+ description="Get current weather for a location",
206
+ function=get_weather
207
+ # Schema automatically generated from function signature!
208
+ )
209
+
210
+ # Use agent with tools
211
+ class WeatherResponse(BaseModel):
212
+ location: str = Field(description="The location")
213
+ weather: str = Field(description="Weather description")
214
+ recommendations: list[str] = Field(description="Clothing recommendations")
215
+
216
+ response = agent.agent(
217
+ prompt="What's the weather in San Francisco and what should I wear?",
218
+ final_response_structure=WeatherResponse,
219
+ ask_questions=False
220
+ )
221
+
222
+ weather_info = response.final_response
223
+ print(f"Weather: {weather_info.weather}")
224
+ ```
225
+
226
+ #### Agent Configuration
227
+
228
+ ```python
229
+ # Configure quality iterations (default: 2)
230
+ agent = AgentClient(
231
+ ai_client=openrouter_client,
232
+ max_iterations=3 # More iterations = higher quality, slower response
233
+ )
234
+
235
+ # Different configurations for different use cases:
236
+ # max_iterations=0: Fastest, basic quality (chatbots)
237
+ # max_iterations=1: Fast, good quality (content generation)
238
+ # max_iterations=2: Balanced (default, recommended)
239
+ # max_iterations=3+: Highest quality (analysis, reports)
240
+ ```
241
+
242
+ #### Agent with MCP Client
243
+
244
+ ```python
245
+ from mbxai import AgentClient, MCPClient
246
+
247
+ # Initialize with MCP support
248
+ mcp_client = MCPClient(OpenRouterClient(token="your-api-key"))
249
+ agent = AgentClient(mcp_client)
250
+
251
+ # Register MCP servers
252
+ agent.register_mcp_server("data-analysis", "http://localhost:8000")
253
+
254
+ # Register individual tools
255
+ agent.register_tool("analyze_data", "Analyze dataset", analyze_function, schema)
256
+
257
+ # Use agent with full MCP capabilities
258
+ response = agent.agent(
259
+ prompt="Analyze the sales data and provide insights",
260
+ final_response_structure=AnalysisReport,
261
+ ask_questions=True
262
+ )
263
+ ```
264
+
265
+ #### Agent Features
266
+
267
+ - **Intelligent Questions**: Automatically generates clarifying questions when needed
268
+ - **Quality Iteration**: Improves responses through multiple AI review cycles
269
+ - **Tool Integration**: Seamlessly works with ToolClient and MCPClient
270
+ - **Structured Output**: Always returns properly typed Pydantic models
271
+ - **Session Management**: Handles multi-turn conversations with question/answer flow
272
+ - **Configurable**: Adjust quality vs speed with max_iterations parameter
273
+
274
+ #### Supported AI Clients
275
+
276
+ | Client | Structured Responses | Tool Registration | MCP Server Registration |
277
+ |--------|---------------------|-------------------|------------------------|
278
+ | OpenRouterClient | ✅ | ❌ | ❌ |
279
+ | ToolClient | ✅ | ✅ | ❌ |
280
+ | MCPClient | ✅ | ✅ | ✅ |
281
+
282
+ ## Development
283
+
284
+ ### Setup
285
+
286
+ 1. Clone the repository:
287
+ ```bash
288
+ git clone https://github.com/yourusername/mbxai.git
289
+ cd mbxai
290
+ ```
291
+
292
+ 2. Create a virtual environment:
293
+ ```bash
294
+ python -m venv .venv
295
+ source .venv/bin/activate # On Windows: .venv\Scripts\activate
296
+ ```
297
+
298
+ 3. Install dependencies:
299
+ ```bash
300
+ pip install -e ".[dev]"
301
+ ```
302
+
303
+ ### Running Tests
304
+
305
+ ```bash
306
+ pytest tests/
307
+ ```
308
+
309
+ ## License
310
+
311
+ MIT License
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "mbxai"
7
- version = "1.6.0"
7
+ version = "2.0.0"
8
8
  authors = [
9
9
  { name = "MBX AI" }
10
10
  ]
@@ -82,6 +82,6 @@ strict_equality = true
82
82
 
83
83
  [dependency-groups]
84
84
  dev = [
85
- "build>=1.6.0.post1",
85
+ "build>=2.0.0.post1",
86
86
  "twine>=6.1.0",
87
87
  ]
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="mbxai",
5
- version="1.6.0",
5
+ version="2.0.0",
6
6
  author="MBX AI",
7
7
  description="MBX AI SDK",
8
8
  long_description=open("README.md").read(),
@@ -0,0 +1,22 @@
1
+ """
2
+ MBX AI package.
3
+ """
4
+
5
+ from .agent import AgentClient, AgentResponse, Question, Result, AnswerList, Answer
6
+ from .openrouter import OpenRouterClient
7
+ from .tools import ToolClient
8
+ from .mcp import MCPClient
9
+
10
+ __version__ = "2.0.0"
11
+
12
+ __all__ = [
13
+ "AgentClient",
14
+ "AgentResponse",
15
+ "Question",
16
+ "Result",
17
+ "AnswerList",
18
+ "Answer",
19
+ "OpenRouterClient",
20
+ "ToolClient",
21
+ "MCPClient"
22
+ ]
@@ -0,0 +1,8 @@
1
+ """
2
+ Agent package for MBX AI.
3
+ """
4
+
5
+ from .client import AgentClient
6
+ from .models import AgentResponse, Question, Result, AnswerList, Answer, QuestionList, QualityCheck
7
+
8
+ __all__ = ["AgentClient", "AgentResponse", "Question", "Result", "AnswerList", "Answer", "QuestionList", "QualityCheck"]