mbxai 2.1.0__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mbxai/__init__.py CHANGED
@@ -7,7 +7,7 @@ from .openrouter import OpenRouterClient
7
7
  from .tools import ToolClient
8
8
  from .mcp import MCPClient
9
9
 
10
- __version__ = "2.1.0"
10
+ __version__ = "2.1.1"
11
11
 
12
12
  __all__ = [
13
13
  "AgentClient",
mbxai/agent/client.py CHANGED
@@ -128,8 +128,10 @@ class AgentClient:
128
128
  # Combine conversation history with new messages
129
129
  if conversation_history:
130
130
  full_messages = conversation_history + messages
131
+ logger.debug(f"🔗 AI call with {len(conversation_history)} history messages + {len(messages)} new messages = {len(full_messages)} total")
131
132
  else:
132
133
  full_messages = messages
134
+ logger.debug(f"🔗 AI call with {len(messages)} messages (no history)")
133
135
  return self._ai_client.parse(full_messages, response_format)
134
136
 
135
137
  def _extract_token_usage(self, response: Any) -> TokenUsage:
@@ -185,7 +187,8 @@ class AgentClient:
185
187
  prompt: str,
186
188
  final_response_structure: Type[BaseModel],
187
189
  ask_questions: bool = True,
188
- agent_id: str = None
190
+ agent_id: str = None,
191
+ answers: AnswerList = None
189
192
  ) -> AgentResponse:
190
193
  """
191
194
  Process a prompt through the agent's thinking process.
@@ -195,6 +198,7 @@ class AgentClient:
195
198
  final_response_structure: Pydantic model defining the expected final response format
196
199
  ask_questions: Whether to ask clarifying questions (default: True)
197
200
  agent_id: Optional agent session ID to continue an existing conversation
201
+ answers: Optional answers to questions (when continuing a conversation with questions)
198
202
 
199
203
  Returns:
200
204
  AgentResponse containing either questions to ask or the final response
@@ -211,7 +215,21 @@ class AgentClient:
211
215
 
212
216
  # Check if this is a continuing conversation
213
217
  existing_session = self._agent_sessions.get(agent_id, {})
214
- conversation_history = existing_session.get("conversation_history", [])
218
+ conversation_history = existing_session.get("conversation_history", []).copy()
219
+
220
+ if conversation_history:
221
+ logger.info(f"📜 Agent {agent_id}: Loaded conversation history with {len(conversation_history)} messages")
222
+
223
+ # Store conversation history for AI calls (don't include current prompt yet)
224
+ history_for_ai = conversation_history.copy()
225
+
226
+ # Add current prompt to full conversation history for session storage
227
+ conversation_history.append({"role": "user", "content": prompt})
228
+
229
+ # Handle answers provided (skip question generation and process directly)
230
+ if answers is not None:
231
+ logger.info(f"📝 Agent {agent_id}: Processing with provided answers, skipping question generation")
232
+ return self._process_answers_directly(agent_id, prompt, final_response_structure, answers, token_summary, history_for_ai)
215
233
 
216
234
  # Step 1: Generate questions (if ask_questions is True)
217
235
  if ask_questions:
@@ -236,7 +254,7 @@ IMPORTANT: For each question, provide a technical key identifier that:
236
254
  messages = [{"role": "user", "content": questions_prompt}]
237
255
 
238
256
  try:
239
- response = self._call_ai_parse(messages, QuestionList, conversation_history)
257
+ response = self._call_ai_parse(messages, QuestionList, history_for_ai)
240
258
  question_list = self._extract_parsed_content(response, QuestionList)
241
259
 
242
260
  # Extract token usage for question generation
@@ -253,7 +271,8 @@ IMPORTANT: For each question, provide a technical key identifier that:
253
271
  "final_response_structure": final_response_structure,
254
272
  "questions": question_list.questions,
255
273
  "step": "waiting_for_answers",
256
- "token_summary": token_summary
274
+ "token_summary": token_summary,
275
+ "conversation_history": history_for_ai # Include history without current prompt
257
276
  }
258
277
  logger.info(f"📋 Agent {agent_id}: Waiting for user answers to {len(question_list.questions)} questions")
259
278
  return agent_response
@@ -263,71 +282,75 @@ IMPORTANT: For each question, provide a technical key identifier that:
263
282
 
264
283
  # Step 2 & 3: No questions or ask_questions=False - proceed directly
265
284
  logger.info(f"⚡ Agent {agent_id}: No questions needed, proceeding directly to processing")
266
- return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary, conversation_history)
285
+ return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary, history_for_ai)
267
286
 
268
- def answer_to_agent(self, agent_id: str, answers: AnswerList) -> AgentResponse:
287
+ def _process_answers_directly(
288
+ self,
289
+ agent_id: str,
290
+ prompt: str,
291
+ final_response_structure: Type[BaseModel],
292
+ answers: AnswerList,
293
+ token_summary: TokenSummary,
294
+ conversation_history: list[dict[str, Any]]
295
+ ) -> AgentResponse:
269
296
  """
270
- Continue an agent session by providing answers to questions.
271
-
297
+ Process answers directly without going through question generation.
298
+
272
299
  Args:
273
300
  agent_id: The agent session identifier
274
- answers: List of answers to the questions
275
-
301
+ prompt: The current prompt
302
+ final_response_structure: Expected response structure
303
+ answers: Provided answers
304
+ token_summary: Current token usage summary
305
+ conversation_history: Conversation history
306
+
276
307
  Returns:
277
308
  AgentResponse with the final result
278
-
279
- Raises:
280
- ValueError: If the agent session is not found or in wrong state
281
309
  """
282
- if agent_id not in self._agent_sessions:
283
- raise ValueError(f"Agent session {agent_id} not found")
284
-
285
- session = self._agent_sessions[agent_id]
286
- if session["step"] != "waiting_for_answers":
287
- raise ValueError(f"Agent session {agent_id} is not waiting for answers")
288
-
289
- # Convert answers to a more usable format and create question-answer pairs
290
- answer_dict = {answer.key: answer.answer for answer in answers.answers}
310
+ # Check if we have a session with questions to match against
311
+ session = self._agent_sessions.get(agent_id, {})
291
312
  questions = session.get("questions", [])
292
313
 
293
- # Create question-answer pairs for better context
294
- qa_pairs = []
295
- for question in questions:
296
- answer_text = answer_dict.get(question.key, "No answer provided")
297
- qa_pairs.append({
298
- "question": question.question,
299
- "key": question.key,
300
- "answer": answer_text,
301
- "required": question.required
302
- })
303
-
304
- logger.info(f"📝 Agent {agent_id}: Received {len(answers.answers)} answers, continuing processing")
305
-
306
- # Get token summary from session
307
- token_summary = session.get("token_summary", TokenSummary())
308
- conversation_history = session.get("conversation_history", [])
314
+ if not questions:
315
+ # No previous questions - treat as simple additional context
316
+ logger.info(f"📝 Agent {agent_id}: No previous questions found, treating answers as additional context")
317
+ answer_dict = {answer.key: answer.answer for answer in answers.answers}
318
+ qa_pairs = []
319
+ for answer in answers.answers:
320
+ qa_pairs.append({
321
+ "question": f"Information about {answer.key}",
322
+ "key": answer.key,
323
+ "answer": answer.answer,
324
+ "required": True
325
+ })
326
+ else:
327
+ # Match answers with previous questions
328
+ logger.info(f"📝 Agent {agent_id}: Matching {len(answers.answers)} answers with previous questions")
329
+ answer_dict = {answer.key: answer.answer for answer in answers.answers}
330
+
331
+ # Create question-answer pairs for better context
332
+ qa_pairs = []
333
+ for question in questions:
334
+ answer_text = answer_dict.get(question.key, "No answer provided")
335
+ qa_pairs.append({
336
+ "question": question.question,
337
+ "key": question.key,
338
+ "answer": answer_text,
339
+ "required": question.required
340
+ })
309
341
 
310
342
  # Process with the provided answers and question context
311
343
  result = self._process_with_answers(
312
- session["original_prompt"],
313
- session["final_response_structure"],
344
+ prompt,
345
+ final_response_structure,
314
346
  qa_pairs,
315
347
  agent_id,
316
348
  token_summary,
317
349
  conversation_history
318
350
  )
319
351
 
320
- # Update session with conversation history but don't delete it
321
- if agent_id in self._agent_sessions:
322
- self._agent_sessions[agent_id]["step"] = "completed"
323
- self._agent_sessions[agent_id]["conversation_history"] = self._agent_sessions[agent_id].get("conversation_history", [])
324
-
325
- # Add this interaction to history
326
- self._agent_sessions[agent_id]["conversation_history"].extend([
327
- {"role": "user", "content": session["original_prompt"]},
328
- {"role": "assistant", "content": str(result.final_response) if result.final_response else "No response generated"}
329
- ])
330
-
352
+ # Note: History management is now handled in _process_with_answers
353
+ # No need to duplicate history management here
331
354
  return result
332
355
 
333
356
  def _format_qa_context_for_quality_check(self, answers: Union[list, dict[str, str]]) -> str:
@@ -406,6 +429,28 @@ IMPORTANT: For each question, provide a technical key identifier that:
406
429
  logger.info(f"📝 Agent {agent_id}: Generating final structured response")
407
430
  final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary, conversation_history)
408
431
 
432
+ # Update session with the final response in conversation history
433
+ if agent_id in self._agent_sessions:
434
+ # Update conversation history with assistant response
435
+ updated_history = conversation_history.copy()
436
+ updated_history.append({"role": "assistant", "content": str(final_response)})
437
+
438
+ self._agent_sessions[agent_id]["conversation_history"] = updated_history
439
+ self._agent_sessions[agent_id]["step"] = "completed"
440
+ self._agent_sessions[agent_id]["token_summary"] = token_summary
441
+ logger.info(f"💾 Agent {agent_id}: Updated session with conversation history ({len(updated_history)} messages)")
442
+ else:
443
+ # Create new session if it doesn't exist
444
+ updated_history = conversation_history.copy()
445
+ updated_history.append({"role": "assistant", "content": str(final_response)})
446
+
447
+ self._agent_sessions[agent_id] = {
448
+ "step": "completed",
449
+ "conversation_history": updated_history,
450
+ "token_summary": token_summary
451
+ }
452
+ logger.info(f"💾 Agent {agent_id}: Created new session with conversation history ({len(updated_history)} messages)")
453
+
409
454
  # Log final token summary
410
455
  logger.info(f"📊 Agent {agent_id}: Token usage summary - Total: {token_summary.total_tokens} "
411
456
  f"(Prompt: {token_summary.total_prompt_tokens}, Completion: {token_summary.total_completion_tokens})")
@@ -48,8 +48,8 @@ def example_with_questions():
48
48
  Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
49
49
  ])
50
50
 
51
- # Continue the agent process with answers
52
- final_response = agent.answer_to_agent(response.agent_id, answers)
51
+ # Continue the agent process with answers using the unified interface
52
+ final_response = agent.agent("Continue with previous questions", BookRecommendation, ask_questions=False, agent_id=response.agent_id, answers=answers)
53
53
 
54
54
  if final_response.is_complete():
55
55
  book_rec = final_response.final_response
@@ -74,7 +74,7 @@ def demonstrate_agent_with_questions():
74
74
  ])
75
75
 
76
76
  print(f"\n📝 Providing answers and continuing...")
77
- final_response = agent.answer_to_agent(response.agent_id, answers)
77
+ final_response = agent.agent("Continue with answers", WeatherResponse, ask_questions=False, agent_id=response.agent_id, answers=answers)
78
78
 
79
79
  if final_response.is_complete():
80
80
  print("\n✅ Final response received!")
@@ -0,0 +1,169 @@
1
+ """
2
+ Test example demonstrating conversation history persistence across multiple interactions.
3
+ """
4
+
5
+ import os
6
+ from pydantic import BaseModel, Field
7
+ from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
8
+
9
+
10
+ class StoryResponse(BaseModel):
11
+ """A story response that should reference previous conversation."""
12
+ story_part: str = Field(description="The current part of the story")
13
+ references_to_previous: str = Field(description="How this connects to our previous conversation")
14
+ conversation_context: str = Field(description="Summary of what was discussed before")
15
+
16
+
17
+ class ChatResponse(BaseModel):
18
+ """A general chat response."""
19
+ response: str = Field(description="The response to the user's message")
20
+ context_awareness: str = Field(description="What the AI remembers from our conversation")
21
+
22
+
23
+ def test_conversation_history_persistence():
24
+ """Test that conversation history persists and is used across multiple interactions."""
25
+ print("🧪 TESTING: Conversation History Persistence")
26
+ print("=" * 60)
27
+
28
+ # Initialize the clients
29
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
30
+ agent = AgentClient(openrouter_client, max_iterations=1)
31
+
32
+ # First interaction - establish context
33
+ print("\n1️⃣ First interaction - setting up story context:")
34
+ prompt1 = "I want to tell a collaborative story about a space explorer named Luna who discovers an ancient alien artifact on Mars."
35
+ response1 = agent.agent(prompt1, StoryResponse, ask_questions=False)
36
+
37
+ agent_id = response1.agent_id
38
+ print(f"Agent ID: {agent_id}")
39
+
40
+ if response1.is_complete():
41
+ story1 = response1.final_response
42
+ print(f"Story Part 1: {story1.story_part[:200]}...")
43
+ print(f"Context awareness: {story1.context_awareness}")
44
+
45
+ # Check session info
46
+ session_info = agent.get_session_info(agent_id)
47
+ print(f"📊 Session after first interaction: {session_info['conversation_length']} messages")
48
+
49
+ # Second interaction - continue story, should reference Luna and the artifact
50
+ print(f"\n2️⃣ Second interaction - continuing story (should remember Luna and artifact):")
51
+ prompt2 = "Luna touches the artifact and something amazing happens. Continue the story."
52
+ response2 = agent.agent(prompt2, StoryResponse, ask_questions=False, agent_id=agent_id)
53
+
54
+ if response2.is_complete():
55
+ story2 = response2.final_response
56
+ print(f"Story Part 2: {story2.story_part[:200]}...")
57
+ print(f"References to previous: {story2.references_to_previous}")
58
+ print(f"Conversation context: {story2.conversation_context}")
59
+
60
+ # Check session info
61
+ session_info = agent.get_session_info(agent_id)
62
+ print(f"📊 Session after second interaction: {session_info['conversation_length']} messages")
63
+
64
+ # Third interaction - change topic but should still remember story context
65
+ print(f"\n3️⃣ Third interaction - changing topic (should still remember our story):")
66
+ prompt3 = "Actually, let's pause the story. What do you think Luna's personality is like based on our story so far?"
67
+ response3 = agent.agent(prompt3, ChatResponse, ask_questions=False, agent_id=agent_id)
68
+
69
+ if response3.is_complete():
70
+ chat3 = response3.final_response
71
+ print(f"Response: {chat3.response}")
72
+ print(f"Context awareness: {chat3.context_awareness}")
73
+
74
+ # Check session info
75
+ session_info = agent.get_session_info(agent_id)
76
+ print(f"📊 Session after third interaction: {session_info['conversation_length']} messages")
77
+
78
+ # Fourth interaction - return to story, should remember everything
79
+ print(f"\n4️⃣ Fourth interaction - returning to story (should remember all previous context):")
80
+ prompt4 = "Great! Now let's continue Luna's story from where we left off. What happens next with the artifact?"
81
+ response4 = agent.agent(prompt4, StoryResponse, ask_questions=False, agent_id=agent_id)
82
+
83
+ if response4.is_complete():
84
+ story4 = response4.final_response
85
+ print(f"Story Part 4: {story4.story_part[:200]}...")
86
+ print(f"References to previous: {story4.references_to_previous}")
87
+ print(f"Conversation context: {story4.conversation_context}")
88
+
89
+ # Final session info
90
+ session_info = agent.get_session_info(agent_id)
91
+ print(f"📊 Final session state: {session_info['conversation_length']} messages")
92
+ print(f"Session step: {session_info.get('step', 'unknown')}")
93
+
94
+ # Display full conversation history
95
+ print(f"\n💬 FULL CONVERSATION HISTORY:")
96
+ session_info = agent.get_session_info(agent_id)
97
+ history = session_info.get('conversation_history', [])
98
+ for i, msg in enumerate(history, 1):
99
+ role = msg['role'].upper()
100
+ content = msg['content'][:100] + "..." if len(msg['content']) > 100 else msg['content']
101
+ print(f"{i:2d}. {role}: {content}")
102
+
103
+ # Cleanup
104
+ print(f"\n🗑️ Cleaning up session {agent_id}")
105
+ agent.delete_session(agent_id)
106
+
107
+
108
+ def test_with_questions_and_history():
109
+ """Test conversation history with questions and answers."""
110
+ print("\n🧪 TESTING: Questions + Answers + History")
111
+ print("=" * 60)
112
+
113
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
114
+ agent = AgentClient(openrouter_client, max_iterations=1)
115
+
116
+ # Start with questions
117
+ print("\n1️⃣ Starting conversation with questions:")
118
+ response1 = agent.agent("I want a personalized workout plan", ChatResponse, ask_questions=True)
119
+
120
+ agent_id = response1.agent_id
121
+ print(f"Agent ID: {agent_id}")
122
+
123
+ if response1.has_questions():
124
+ print(f"📋 Generated {len(response1.questions)} questions:")
125
+ for q in response1.questions:
126
+ print(f" - {q.question}")
127
+
128
+ # Answer questions
129
+ answers = AnswerList(answers=[
130
+ Answer(key="fitness_level", answer="Beginner"),
131
+ Answer(key="goals", answer="Weight loss and muscle building"),
132
+ Answer(key="time_available", answer="30 minutes per day, 4 days a week"),
133
+ Answer(key="equipment", answer="Home gym with dumbbells and resistance bands")
134
+ ])
135
+
136
+ print(f"\n2️⃣ Providing answers:")
137
+ response2 = agent.agent("Here are my answers", ChatResponse, ask_questions=False, agent_id=agent_id, answers=answers)
138
+
139
+ if response2.is_complete():
140
+ workout_plan = response2.final_response
141
+ print(f"Workout plan: {workout_plan.response[:200]}...")
142
+ print(f"Context awareness: {workout_plan.context_awareness}")
143
+
144
+ # Continue conversation
145
+ print(f"\n3️⃣ Follow-up question (should remember all previous context):")
146
+ response3 = agent.agent("Can you modify this plan to focus more on cardio?", ChatResponse, ask_questions=False, agent_id=agent_id)
147
+
148
+ if response3.is_complete():
149
+ modified_plan = response3.final_response
150
+ print(f"Modified plan: {modified_plan.response[:200]}...")
151
+ print(f"Context awareness: {modified_plan.context_awareness}")
152
+
153
+ # Show history
154
+ session_info = agent.get_session_info(agent_id)
155
+ print(f"\n💬 Conversation had {session_info['conversation_length']} messages")
156
+
157
+ # Cleanup
158
+ agent.delete_session(agent_id)
159
+
160
+
161
+ if __name__ == "__main__":
162
+ try:
163
+ test_conversation_history_persistence()
164
+ print("\n" + "="*80 + "\n")
165
+ test_with_questions_and_history()
166
+ except Exception as e:
167
+ print(f"Error: {e}")
168
+ import traceback
169
+ traceback.print_exc()
@@ -116,7 +116,7 @@ def demonstrate_dialog_with_questions():
116
116
  ])
117
117
 
118
118
  print(f"\n📝 Providing answers...")
119
- final_response = agent.answer_to_agent(agent_id, answers)
119
+ final_response = agent.agent("Continue with answers", BookRecommendation, ask_questions=False, agent_id=agent_id, answers=answers)
120
120
 
121
121
  if final_response.is_complete():
122
122
  book_rec = final_response.final_response
@@ -0,0 +1,109 @@
1
+ """
2
+ Example demonstrating the new unified agent interface.
3
+ """
4
+
5
+ import os
6
+ from pydantic import BaseModel, Field
7
+ from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
8
+
9
+
10
+ class SimpleResponse(BaseModel):
11
+ """A simple response."""
12
+ response: str = Field(description="The response text")
13
+ context_used: str = Field(description="How context was used in this response")
14
+
15
+
16
+ def demonstrate_unified_interface():
17
+ """Demonstrate the unified agent interface with and without questions."""
18
+ print("🔧 DEMO: Unified Agent Interface")
19
+ print("=" * 50)
20
+
21
+ # Initialize the clients
22
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
23
+ agent = AgentClient(openrouter_client, max_iterations=1)
24
+
25
+ # Example 1: Start conversation with questions
26
+ print("\n1️⃣ Starting conversation that generates questions:")
27
+ response1 = agent.agent("I need help planning a trip", SimpleResponse, ask_questions=True)
28
+
29
+ agent_id = response1.agent_id
30
+ print(f"Agent ID: {agent_id}")
31
+
32
+ if response1.has_questions():
33
+ print(f"📋 Generated {len(response1.questions)} questions:")
34
+ for q in response1.questions:
35
+ print(f" - {q.question} (key: {q.key})")
36
+
37
+ # Example 2: Provide answers using the unified interface
38
+ print(f"\n2️⃣ Providing answers using unified interface:")
39
+ answers = AnswerList(answers=[
40
+ Answer(key="destination", answer="Japan"),
41
+ Answer(key="duration", answer="10 days"),
42
+ Answer(key="budget", answer="$3000"),
43
+ Answer(key="interests", answer="culture, food, temples")
44
+ ])
45
+
46
+ response2 = agent.agent(
47
+ "Now help me plan the trip",
48
+ SimpleResponse,
49
+ ask_questions=False,
50
+ agent_id=agent_id,
51
+ answers=answers
52
+ )
53
+
54
+ if response2.is_complete():
55
+ trip_plan = response2.final_response
56
+ print(f"Response: {trip_plan.response}")
57
+ print(f"Context used: {trip_plan.context_used}")
58
+
59
+ # Example 3: Continue the conversation
60
+ print(f"\n3️⃣ Continuing conversation without questions:")
61
+ response3 = agent.agent(
62
+ "What about transportation within Japan?",
63
+ SimpleResponse,
64
+ ask_questions=False,
65
+ agent_id=agent_id
66
+ )
67
+
68
+ if response3.is_complete():
69
+ transport_info = response3.final_response
70
+ print(f"Response: {transport_info.response}")
71
+ print(f"Context used: {transport_info.context_used}")
72
+
73
+ # Example 4: Using answers without previous questions (new session)
74
+ print(f"\n4️⃣ Starting new session with direct answers (no questions):")
75
+ new_answers = AnswerList(answers=[
76
+ Answer(key="city", answer="Tokyo"),
77
+ Answer(key="travel_style", answer="luxury"),
78
+ Answer(key="group_size", answer="2 people")
79
+ ])
80
+
81
+ response4 = agent.agent(
82
+ "Recommend restaurants",
83
+ SimpleResponse,
84
+ ask_questions=False,
85
+ answers=new_answers # New session, no agent_id provided
86
+ )
87
+
88
+ if response4.is_complete():
89
+ restaurant_info = response4.final_response
90
+ print(f"New Agent ID: {response4.agent_id}")
91
+ print(f"Response: {restaurant_info.response}")
92
+ print(f"Context used: {restaurant_info.context_used}")
93
+
94
+ # Show active sessions
95
+ print(f"\n📊 Active Sessions: {agent.list_sessions()}")
96
+
97
+ # Cleanup
98
+ print(f"\n🗑️ Cleaning up sessions...")
99
+ agent.delete_session(agent_id)
100
+ if response4.agent_id != agent_id:
101
+ agent.delete_session(response4.agent_id)
102
+ print(f"Active Sessions after cleanup: {agent.list_sessions()}")
103
+
104
+
105
+ if __name__ == "__main__":
106
+ try:
107
+ demonstrate_unified_interface()
108
+ except Exception as e:
109
+ print(f"Error: {e}")
mbxai/mcp/server.py CHANGED
@@ -31,7 +31,7 @@ class MCPServer:
31
31
  self.app = FastAPI(
32
32
  title=self.name,
33
33
  description=self.description,
34
- version="2.1.0",
34
+ version="2.1.1",
35
35
  )
36
36
 
37
37
  # Initialize MCP server
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mbxai
3
- Version: 2.1.0
3
+ Version: 2.1.1
4
4
  Summary: MBX AI SDK
5
5
  Project-URL: Homepage, https://www.mibexx.de
6
6
  Project-URL: Documentation, https://www.mibexx.de
@@ -1,15 +1,16 @@
1
- mbxai/__init__.py,sha256=aKvwNzfCZoNtqcIFsrCegUTc6WyYk-JW-9YVAxN8A7A,407
1
+ mbxai/__init__.py,sha256=e4xVVEhVt0ryj9sh9mHtCKINk2GfFdnSuQndPtTOdHw,407
2
2
  mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
3
3
  mbxai/agent/__init__.py,sha256=5j3mW2NZtAU1s2w8n833axWBQsxW8U0qKwoQ9JtQZ4k,289
4
- mbxai/agent/client.py,sha256=AntnXfzfcFThErTwV6qiDudPaZSEMj2HrREFQ4rruoo,32128
4
+ mbxai/agent/client.py,sha256=zVJVa-7xxQqkWcmAwK69RLP0K7mR4_L1El0tQz_-P_k,34729
5
5
  mbxai/agent/models.py,sha256=sjBtaAENDABHl8IqTON1gxFFSZIaQYUCBFHB5804_Fw,5780
6
- mbxai/examples/agent_example.py,sha256=z3S8yBH7rU-ZjpYxNeTKDuyJZ8huEh3OVhhY4VaC79U,7308
6
+ mbxai/examples/agent_example.py,sha256=7gQHcMVWBu2xdxnVNzz4UfW0lkUnw9a5DN2-YoIRxXE,7420
7
7
  mbxai/examples/agent_iterations_example.py,sha256=xMqZhBWS67EkRkArjOAY2fCgLkQ32Qn9E4CSfEKW4MU,7905
8
- mbxai/examples/agent_logging_example.py,sha256=su2Ccdfp8aYGCQkZhnNRGbaBd6DZEsYpPoxm7dI2g_o,10162
8
+ mbxai/examples/agent_logging_example.py,sha256=P5LDcoIn0XCYWMPJVTjeXNkY32ELyKEf63Z_1nu5QkA,10232
9
9
  mbxai/examples/agent_tool_registration_example.py,sha256=oWm0-d4mdba-VQ3HobiCIR0IHtEDCtJenb8Lnm9QqCw,9108
10
10
  mbxai/examples/agent_validation_example.py,sha256=xlEf5Mwq5_Iu8bNU4cuHGZVYvAyZNhO2GMFmOom-CLo,4185
11
11
  mbxai/examples/auto_schema_example.py,sha256=ymuJJqqDxYznZT2VN6zVFEM7m_lDuccZ1AKSx-xzLTM,8174
12
- mbxai/examples/dialog_agent_example.py,sha256=biG8--n_hkQn-JltTqEgHO_TpkOtoHkRgTBZs4eVPsQ,6942
12
+ mbxai/examples/conversation_history_test.py,sha256=TpOh5ruQlXDPTPEu_0qTACAaQPSklKp8RYiOm1UzqPI,7773
13
+ mbxai/examples/dialog_agent_example.py,sha256=k502Y_pq6uddWEcH-5i0MxqyakxHKSy-KvHv1s4G1dw,7015
13
14
  mbxai/examples/openrouter_example.py,sha256=-grXHKMmFLoh-yUIEMc31n8Gg1S7uSazBWCIOWxgbyQ,1317
14
15
  mbxai/examples/parse_example.py,sha256=eCKMJoOl6qwo8sDP6Trc6ncgjPlgTqi5tPE2kB5_P0k,3821
15
16
  mbxai/examples/parse_tool_example.py,sha256=duHN8scI9ZK6XZ5hdiz1Adzyc-_7tH9Ls9qP4S0bf5s,5477
@@ -18,12 +19,13 @@ mbxai/examples/response.json,sha256=4SGJJyQjWWeN__Mrxm6ZtHIo1NUtLEheldd5KaA2mHw,
18
19
  mbxai/examples/send_request.py,sha256=O5gCHUHy7RvkEFo9IQATgnSOfOdu8OqKHfjAlLDwWPg,6023
19
20
  mbxai/examples/simple_agent_test.py,sha256=joCVszUpRkrxHv2DM9QTAh1r6S8iv16pZ-zSPZSBQiU,6391
20
21
  mbxai/examples/tool_client_example.py,sha256=9DNaejXLA85dPbExMiv5y76qlFhzOJF9E5EnMOsy_Dc,3993
22
+ mbxai/examples/unified_interface_example.py,sha256=EQpatD95zHPAbXN93EHA4EB0v-5vMsOA1yfEMFFmF-A,3887
21
23
  mbxai/examples/mcp/mcp_client_example.py,sha256=d5-TRHNDdp3nT_NGt0tKpT3VUAJVvqAHSyqkzk9Dd2s,2972
22
24
  mbxai/examples/mcp/mcp_server_example.py,sha256=nFfg22Jnc6HMW_ezLO3So1xwDdx2_rItj5CR-y_Nevs,3966
23
25
  mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
24
26
  mbxai/mcp/client.py,sha256=QRzId6o4_WRWVv3rtm8cfZZGaoY_UlaOO-oqNjY-tmw,5219
25
27
  mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
26
- mbxai/mcp/server.py,sha256=e8Ie6AtplYC2bPss6m8n6uWaLHj1LIftG0Mzozwuv1I,3332
28
+ mbxai/mcp/server.py,sha256=DN4a7qwoz_YSLpeGhZlez1XIqddYFkn7RC-2VKno3Uc,3332
27
29
  mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
28
30
  mbxai/openrouter/client.py,sha256=3LD6WDJ8wjo_nefH5d1NJCsrWPvBc_KBf2NsItUoSt8,18302
29
31
  mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
@@ -33,7 +35,7 @@ mbxai/tools/__init__.py,sha256=ogxrHvgJ7OR62Lmd5x9Eh5d2C0jqWyQis7Zy3yKpZ78,218
33
35
  mbxai/tools/client.py,sha256=2wFPD-UN3Y2DSyrnqxt2vvFgTYHzUl14_y0r6fhAWmM,17198
34
36
  mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
35
37
  mbxai/tools/types.py,sha256=OFfM7scDGTm4FOcJA2ecj-fxL1MEBkqPsT3hqCL1Jto,9505
36
- mbxai-2.1.0.dist-info/METADATA,sha256=tYQbghjhM4q3HQAJ-0MpNIu_P9e4eS5E-v6Y2I6V9kk,10018
37
- mbxai-2.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
38
- mbxai-2.1.0.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
39
- mbxai-2.1.0.dist-info/RECORD,,
38
+ mbxai-2.1.1.dist-info/METADATA,sha256=PlNhE1zB42gvsB3HbLJTSVgA0JKmjhGtdX_ThlCD1Hk,10018
39
+ mbxai-2.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
40
+ mbxai-2.1.1.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
41
+ mbxai-2.1.1.dist-info/RECORD,,
File without changes