mbxai 2.1.1__tar.gz → 2.1.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. {mbxai-2.1.1 → mbxai-2.1.3}/PKG-INFO +1 -1
  2. {mbxai-2.1.1 → mbxai-2.1.3}/pyproject.toml +1 -1
  3. {mbxai-2.1.1 → mbxai-2.1.3}/setup.py +1 -1
  4. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/__init__.py +1 -1
  5. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/agent/client.py +74 -16
  6. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/dialog_agent_example.py +15 -0
  7. mbxai-2.1.3/src/mbxai/examples/optional_prompt_example.py +164 -0
  8. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/mcp/server.py +1 -1
  9. {mbxai-2.1.1 → mbxai-2.1.3}/.gitignore +0 -0
  10. {mbxai-2.1.1 → mbxai-2.1.3}/LICENSE +0 -0
  11. {mbxai-2.1.1 → mbxai-2.1.3}/README.md +0 -0
  12. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/agent/__init__.py +0 -0
  13. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/agent/models.py +0 -0
  14. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/core.py +0 -0
  15. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/agent_example.py +0 -0
  16. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/agent_iterations_example.py +0 -0
  17. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/agent_logging_example.py +0 -0
  18. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/agent_tool_registration_example.py +0 -0
  19. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/agent_validation_example.py +0 -0
  20. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/auto_schema_example.py +0 -0
  21. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/conversation_history_test.py +0 -0
  22. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/mcp/mcp_client_example.py +0 -0
  23. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/mcp/mcp_server_example.py +0 -0
  24. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/openrouter_example.py +0 -0
  25. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/parse_example.py +0 -0
  26. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/parse_tool_example.py +0 -0
  27. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/request.json +0 -0
  28. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/response.json +0 -0
  29. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/send_request.py +0 -0
  30. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/simple_agent_test.py +0 -0
  31. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/tool_client_example.py +0 -0
  32. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/examples/unified_interface_example.py +0 -0
  33. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/mcp/__init__.py +0 -0
  34. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/mcp/client.py +0 -0
  35. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/mcp/example.py +0 -0
  36. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/openrouter/__init__.py +0 -0
  37. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/openrouter/client.py +0 -0
  38. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/openrouter/config.py +0 -0
  39. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/openrouter/models.py +0 -0
  40. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/openrouter/schema.py +0 -0
  41. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/tools/__init__.py +0 -0
  42. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/tools/client.py +0 -0
  43. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/tools/example.py +0 -0
  44. {mbxai-2.1.1 → mbxai-2.1.3}/src/mbxai/tools/types.py +0 -0
  45. {mbxai-2.1.1 → mbxai-2.1.3}/tests/test_mcp_tool_registration.py +0 -0
  46. {mbxai-2.1.1 → mbxai-2.1.3}/tests/test_real_mcp_schema.py +0 -0
  47. {mbxai-2.1.1 → mbxai-2.1.3}/tests/test_schema_conversion.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mbxai
3
- Version: 2.1.1
3
+ Version: 2.1.3
4
4
  Summary: MBX AI SDK
5
5
  Project-URL: Homepage, https://www.mibexx.de
6
6
  Project-URL: Documentation, https://www.mibexx.de
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "mbxai"
7
- version = "2.1.1"
7
+ version = "2.1.3"
8
8
  authors = [
9
9
  { name = "MBX AI" }
10
10
  ]
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="mbxai",
5
- version="2.1.1",
5
+ version="2.1.3",
6
6
  author="MBX AI",
7
7
  description="MBX AI SDK",
8
8
  long_description=open("README.md").read(),
@@ -7,7 +7,7 @@ from .openrouter import OpenRouterClient
7
7
  from .tools import ToolClient
8
8
  from .mcp import MCPClient
9
9
 
10
- __version__ = "2.1.1"
10
+ __version__ = "2.1.3"
11
11
 
12
12
  __all__ = [
13
13
  "AgentClient",
@@ -134,6 +134,35 @@ class AgentClient:
134
134
  logger.debug(f"🔗 AI call with {len(messages)} messages (no history)")
135
135
  return self._ai_client.parse(full_messages, response_format)
136
136
 
137
+ def _validate_answers(self, answers: Any) -> bool:
138
+ """
139
+ Validate that answers parameter is a proper AnswerList with content.
140
+
141
+ Args:
142
+ answers: The answers parameter to validate
143
+
144
+ Returns:
145
+ True if answers is valid and has content, False otherwise
146
+ """
147
+ # Check if answers is the correct type
148
+ if not isinstance(answers, AnswerList):
149
+ logger.warning(f"Invalid answers type: {type(answers)}. Expected AnswerList, treating as no answers.")
150
+ return False
151
+
152
+ # Check if answers has content
153
+ if not hasattr(answers, 'answers') or not answers.answers:
154
+ logger.info(f"Empty answers list provided, proceeding without answers processing.")
155
+ return False
156
+
157
+ # Check if answers list contains valid Answer objects
158
+ for answer in answers.answers:
159
+ if not hasattr(answer, 'key') or not hasattr(answer, 'answer'):
160
+ logger.warning(f"Invalid answer object in list: {answer}. Treating as no answers.")
161
+ return False
162
+
163
+ logger.debug(f"Validated {len(answers.answers)} answers")
164
+ return True
165
+
137
166
  def _extract_token_usage(self, response: Any) -> TokenUsage:
138
167
  """Extract token usage information from an AI response."""
139
168
  try:
@@ -184,39 +213,63 @@ class AgentClient:
184
213
 
185
214
  def agent(
186
215
  self,
187
- prompt: str,
188
- final_response_structure: Type[BaseModel],
216
+ prompt: str = None,
217
+ final_response_structure: Type[BaseModel] = None,
189
218
  ask_questions: bool = True,
190
219
  agent_id: str = None,
191
- answers: AnswerList = None
220
+ answers: AnswerList | None = None
192
221
  ) -> AgentResponse:
193
222
  """
194
223
  Process a prompt through the agent's thinking process.
195
224
 
196
225
  Args:
197
- prompt: The initial prompt from the user
198
- final_response_structure: Pydantic model defining the expected final response format
226
+ prompt: The prompt from the user (optional if agent_id exists with history)
227
+ final_response_structure: Pydantic model defining the expected final response format (required for new sessions)
199
228
  ask_questions: Whether to ask clarifying questions (default: True)
200
229
  agent_id: Optional agent session ID to continue an existing conversation
201
230
  answers: Optional answers to questions (when continuing a conversation with questions)
202
231
 
203
232
  Returns:
204
233
  AgentResponse containing either questions to ask or the final response
234
+
235
+ Raises:
236
+ ValueError: If neither prompt nor agent_id with history is provided, or if final_response_structure is missing for new sessions
205
237
  """
206
- # Use provided agent_id or create a new one
207
- if agent_id is None:
208
- agent_id = str(__import__("uuid").uuid4())
238
+ # Validate inputs and determine session type
239
+ is_existing_session = agent_id is not None and agent_id in self._agent_sessions
240
+ existing_session = self._agent_sessions.get(agent_id, {}) if agent_id else {}
241
+ conversation_history = existing_session.get("conversation_history", []).copy()
242
+
243
+ # Validation logic
244
+ if not is_existing_session:
245
+ # New session - both prompt and final_response_structure are required
246
+ if not prompt:
247
+ raise ValueError("Prompt is required when starting a new agent session")
248
+ if not final_response_structure:
249
+ raise ValueError("final_response_structure is required when starting a new agent session")
250
+
251
+ # Create new agent_id if not provided
252
+ if agent_id is None:
253
+ agent_id = str(__import__("uuid").uuid4())
209
254
  logger.info(f"🚀 Starting new agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
210
255
  else:
211
- logger.info(f"🔄 Continuing agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
256
+ # Existing session - use previous final_response_structure if not provided
257
+ if not final_response_structure:
258
+ final_response_structure = existing_session.get("final_response_structure")
259
+ if not final_response_structure:
260
+ raise ValueError("final_response_structure not found in existing session and not provided")
261
+
262
+ # Handle optional prompt for existing sessions
263
+ if not prompt:
264
+ # Use conversation history to continue without explicit prompt
265
+ prompt = "[Continue conversation based on history]"
266
+ logger.info(f"🔄 Continuing agent process (ID: {agent_id}) without explicit prompt (using history)")
267
+ else:
268
+ logger.info(f"🔄 Continuing agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
212
269
 
213
270
  # Initialize token summary
214
271
  token_summary = TokenSummary()
215
272
 
216
- # Check if this is a continuing conversation
217
- existing_session = self._agent_sessions.get(agent_id, {})
218
- conversation_history = existing_session.get("conversation_history", []).copy()
219
-
220
273
  if conversation_history:
221
274
  logger.info(f"📜 Agent {agent_id}: Loaded conversation history with {len(conversation_history)} messages")
222
275
 
@@ -228,8 +281,11 @@ class AgentClient:
228
281
 
229
282
  # Handle answers provided (skip question generation and process directly)
230
283
  if answers is not None:
231
- logger.info(f"📝 Agent {agent_id}: Processing with provided answers, skipping question generation")
232
- return self._process_answers_directly(agent_id, prompt, final_response_structure, answers, token_summary, history_for_ai)
284
+ if self._validate_answers(answers):
285
+ logger.info(f"📝 Agent {agent_id}: Processing with provided answers, skipping question generation")
286
+ return self._process_answers_directly(agent_id, prompt, final_response_structure, answers, token_summary, history_for_ai)
287
+ else:
288
+ logger.info(f"📝 Agent {agent_id}: Invalid or empty answers provided, proceeding with normal flow")
233
289
 
234
290
  # Step 1: Generate questions (if ask_questions is True)
235
291
  if ask_questions:
@@ -438,6 +494,7 @@ IMPORTANT: For each question, provide a technical key identifier that:
438
494
  self._agent_sessions[agent_id]["conversation_history"] = updated_history
439
495
  self._agent_sessions[agent_id]["step"] = "completed"
440
496
  self._agent_sessions[agent_id]["token_summary"] = token_summary
497
+ self._agent_sessions[agent_id]["final_response_structure"] = final_response_structure
441
498
  logger.info(f"💾 Agent {agent_id}: Updated session with conversation history ({len(updated_history)} messages)")
442
499
  else:
443
500
  # Create new session if it doesn't exist
@@ -447,7 +504,8 @@ IMPORTANT: For each question, provide a technical key identifier that:
447
504
  self._agent_sessions[agent_id] = {
448
505
  "step": "completed",
449
506
  "conversation_history": updated_history,
450
- "token_summary": token_summary
507
+ "token_summary": token_summary,
508
+ "final_response_structure": final_response_structure
451
509
  }
452
510
  logger.info(f"💾 Agent {agent_id}: Created new session with conversation history ({len(updated_history)} messages)")
453
511
 
@@ -135,6 +135,21 @@ def demonstrate_dialog_with_questions():
135
135
  print(f"Genre: {book_rec2.genre}")
136
136
  print(f"Reason: {book_rec2.reason}")
137
137
  print(f"Connection to previous: {book_rec2.connection_to_previous}")
138
+
139
+ # Continue WITHOUT explicit prompt - using only conversation history
140
+ print(f"\n3️⃣ Continuing conversation WITHOUT explicit prompt (history-based):")
141
+ try:
142
+ response3 = agent.agent(agent_id=agent_id, ask_questions=False) # No prompt provided
143
+
144
+ if response3.is_complete():
145
+ book_rec3 = response3.final_response
146
+ print(f"History-based recommendation:")
147
+ print(f"Book: {book_rec3.title} by {book_rec3.author}")
148
+ print(f"Genre: {book_rec3.genre}")
149
+ print(f"Reason: {book_rec3.reason}")
150
+ print(f"Connection to previous: {book_rec3.connection_to_previous}")
151
+ except Exception as e:
152
+ print(f"Error with history-based continuation: {e}")
138
153
 
139
154
  # Session cleanup
140
155
  print(f"\n🗑️ Session cleanup...")
@@ -0,0 +1,164 @@
1
+ """
2
+ Example demonstrating optional prompt functionality with existing agent sessions.
3
+ """
4
+
5
+ import os
6
+ from pydantic import BaseModel, Field
7
+ from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
8
+
9
+
10
+ class ConversationResponse(BaseModel):
11
+ """A general conversation response."""
12
+ response: str = Field(description="The response to the conversation")
13
+ context_used: str = Field(description="How previous conversation history was used")
14
+ continuation_type: str = Field(description="Type of continuation (new topic, follow-up, etc.)")
15
+
16
+
17
+ class StoryResponse(BaseModel):
18
+ """A story response."""
19
+ story_continuation: str = Field(description="The next part of the story")
20
+ character_development: str = Field(description="How characters developed in this part")
21
+ plot_advancement: str = Field(description="How the plot advanced")
22
+
23
+
24
+ def demonstrate_optional_prompt():
25
+ """Demonstrate using agent with optional prompts for existing sessions."""
26
+ print("🎭 DEMO: Optional Prompt with Existing Agent Sessions")
27
+ print("=" * 60)
28
+
29
+ # Initialize the clients
30
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
31
+ agent = AgentClient(openrouter_client, max_iterations=1)
32
+
33
+ # First interaction - establish a story context
34
+ print("\n1️⃣ First interaction - establishing story context:")
35
+ response1 = agent.agent(
36
+ prompt="Let's create a story about a detective solving a mystery in a small town",
37
+ final_response_structure=StoryResponse,
38
+ ask_questions=False
39
+ )
40
+
41
+ agent_id = response1.agent_id
42
+ print(f"Agent ID: {agent_id}")
43
+
44
+ if response1.is_complete():
45
+ story1 = response1.final_response
46
+ print(f"Story start: {story1.story_continuation[:150]}...")
47
+ print(f"Characters: {story1.character_development}")
48
+
49
+ # Second interaction - continue with explicit prompt
50
+ print(f"\n2️⃣ Second interaction - continue with explicit prompt:")
51
+ response2 = agent.agent(
52
+ prompt="The detective finds a mysterious letter. What does it say?",
53
+ agent_id=agent_id,
54
+ ask_questions=False
55
+ )
56
+
57
+ if response2.is_complete():
58
+ story2 = response2.final_response
59
+ print(f"Story continuation: {story2.story_continuation[:150]}...")
60
+ print(f"Plot advancement: {story2.plot_advancement}")
61
+
62
+ # Third interaction - continue WITHOUT explicit prompt (using only history)
63
+ print(f"\n3️⃣ Third interaction - continue WITHOUT explicit prompt (history-based):")
64
+ try:
65
+ response3 = agent.agent(
66
+ agent_id=agent_id, # Only provide agent_id, no prompt
67
+ ask_questions=False
68
+ )
69
+
70
+ if response3.is_complete():
71
+ story3 = response3.final_response
72
+ print(f"History-based continuation: {story3.story_continuation[:150]}...")
73
+ print(f"Character development: {story3.character_development}")
74
+ print(f"Plot advancement: {story3.plot_advancement}")
75
+
76
+ except Exception as e:
77
+ print(f"Error with history-based continuation: {e}")
78
+
79
+ # Fourth interaction - switch response format but use same session
80
+ print(f"\n4️⃣ Fourth interaction - switch to conversation format:")
81
+ response4 = agent.agent(
82
+ prompt="What do you think about this story so far?",
83
+ final_response_structure=ConversationResponse,
84
+ agent_id=agent_id,
85
+ ask_questions=False
86
+ )
87
+
88
+ if response4.is_complete():
89
+ conv4 = response4.final_response
90
+ print(f"Analysis: {conv4.response[:150]}...")
91
+ print(f"Context used: {conv4.context_used}")
92
+ print(f"Continuation type: {conv4.continuation_type}")
93
+
94
+ # Fifth interaction - continue conversation without prompt
95
+ print(f"\n5️⃣ Fifth interaction - continue conversation analysis without prompt:")
96
+ try:
97
+ response5 = agent.agent(
98
+ agent_id=agent_id,
99
+ ask_questions=False
100
+ )
101
+
102
+ if response5.is_complete():
103
+ conv5 = response5.final_response
104
+ print(f"Continued analysis: {conv5.response[:150]}...")
105
+ print(f"Context used: {conv5.context_used}")
106
+ print(f"Continuation type: {conv5.continuation_type}")
107
+
108
+ except Exception as e:
109
+ print(f"Error with continued conversation: {e}")
110
+
111
+ # Show final session state
112
+ session_info = agent.get_session_info(agent_id)
113
+ print(f"\n📊 Final session state:")
114
+ print(f" - Total messages: {session_info['conversation_length']}")
115
+ print(f" - Session step: {session_info.get('step', 'unknown')}")
116
+ print(f" - Has final_response_structure: {'final_response_structure' in session_info}")
117
+
118
+ # Cleanup
119
+ agent.delete_session(agent_id)
120
+
121
+
122
+ def demonstrate_error_cases():
123
+ """Demonstrate error cases with optional prompt."""
124
+ print("\n🚨 DEMO: Error Cases with Optional Prompt")
125
+ print("=" * 50)
126
+
127
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
128
+ agent = AgentClient(openrouter_client, max_iterations=1)
129
+
130
+ # Error case 1: No prompt and no existing session
131
+ print("\n❌ Error case 1: No prompt, no existing session")
132
+ try:
133
+ response = agent.agent(ask_questions=False)
134
+ print("This should have failed!")
135
+ except ValueError as e:
136
+ print(f"Expected error: {e}")
137
+
138
+ # Error case 2: No prompt and no final_response_structure for new session
139
+ print("\n❌ Error case 2: No final_response_structure for new session")
140
+ try:
141
+ response = agent.agent(prompt="Test", ask_questions=False)
142
+ print("This should have failed!")
143
+ except ValueError as e:
144
+ print(f"Expected error: {e}")
145
+
146
+ # Error case 3: Unknown agent_id without prompt
147
+ print("\n❌ Error case 3: Unknown agent_id without prompt")
148
+ try:
149
+ response = agent.agent(agent_id="unknown-id", ask_questions=False)
150
+ print("This should have failed!")
151
+ except ValueError as e:
152
+ print(f"Expected error: {e}")
153
+
154
+ print("\n✅ All error cases handled correctly!")
155
+
156
+
157
+ if __name__ == "__main__":
158
+ try:
159
+ demonstrate_optional_prompt()
160
+ demonstrate_error_cases()
161
+ except Exception as e:
162
+ print(f"Unexpected error: {e}")
163
+ import traceback
164
+ traceback.print_exc()
@@ -31,7 +31,7 @@ class MCPServer:
31
31
  self.app = FastAPI(
32
32
  title=self.name,
33
33
  description=self.description,
34
- version="2.1.1",
34
+ version="2.1.3",
35
35
  )
36
36
 
37
37
  # Initialize MCP server
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes