mbxai 2.0.5__tar.gz → 2.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {mbxai-2.0.5 → mbxai-2.1.1}/PKG-INFO +1 -1
  2. {mbxai-2.0.5 → mbxai-2.1.1}/pyproject.toml +1 -1
  3. {mbxai-2.0.5 → mbxai-2.1.1}/setup.py +1 -1
  4. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/__init__.py +1 -1
  5. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/agent/client.py +273 -63
  6. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/agent_example.py +37 -2
  7. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/agent_logging_example.py +1 -1
  8. mbxai-2.1.1/src/mbxai/examples/conversation_history_test.py +169 -0
  9. mbxai-2.1.1/src/mbxai/examples/dialog_agent_example.py +157 -0
  10. mbxai-2.1.1/src/mbxai/examples/unified_interface_example.py +109 -0
  11. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/mcp/server.py +1 -1
  12. {mbxai-2.0.5 → mbxai-2.1.1}/.gitignore +0 -0
  13. {mbxai-2.0.5 → mbxai-2.1.1}/LICENSE +0 -0
  14. {mbxai-2.0.5 → mbxai-2.1.1}/README.md +0 -0
  15. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/agent/__init__.py +0 -0
  16. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/agent/models.py +0 -0
  17. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/core.py +0 -0
  18. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/agent_iterations_example.py +0 -0
  19. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/agent_tool_registration_example.py +0 -0
  20. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/agent_validation_example.py +0 -0
  21. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/auto_schema_example.py +0 -0
  22. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/mcp/mcp_client_example.py +0 -0
  23. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/mcp/mcp_server_example.py +0 -0
  24. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/openrouter_example.py +0 -0
  25. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/parse_example.py +0 -0
  26. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/parse_tool_example.py +0 -0
  27. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/request.json +0 -0
  28. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/response.json +0 -0
  29. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/send_request.py +0 -0
  30. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/simple_agent_test.py +0 -0
  31. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/examples/tool_client_example.py +0 -0
  32. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/mcp/__init__.py +0 -0
  33. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/mcp/client.py +0 -0
  34. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/mcp/example.py +0 -0
  35. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/openrouter/__init__.py +0 -0
  36. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/openrouter/client.py +0 -0
  37. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/openrouter/config.py +0 -0
  38. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/openrouter/models.py +0 -0
  39. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/openrouter/schema.py +0 -0
  40. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/tools/__init__.py +0 -0
  41. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/tools/client.py +0 -0
  42. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/tools/example.py +0 -0
  43. {mbxai-2.0.5 → mbxai-2.1.1}/src/mbxai/tools/types.py +0 -0
  44. {mbxai-2.0.5 → mbxai-2.1.1}/tests/test_mcp_tool_registration.py +0 -0
  45. {mbxai-2.0.5 → mbxai-2.1.1}/tests/test_real_mcp_schema.py +0 -0
  46. {mbxai-2.0.5 → mbxai-2.1.1}/tests/test_schema_conversion.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mbxai
3
- Version: 2.0.5
3
+ Version: 2.1.1
4
4
  Summary: MBX AI SDK
5
5
  Project-URL: Homepage, https://www.mibexx.de
6
6
  Project-URL: Documentation, https://www.mibexx.de
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "mbxai"
7
- version = "2.0.5"
7
+ version = "2.1.1"
8
8
  authors = [
9
9
  { name = "MBX AI" }
10
10
  ]
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="mbxai",
5
- version="2.0.5",
5
+ version="2.1.1",
6
6
  author="MBX AI",
7
7
  description="MBX AI SDK",
8
8
  long_description=open("README.md").read(),
@@ -7,7 +7,7 @@ from .openrouter import OpenRouterClient
7
7
  from .tools import ToolClient
8
8
  from .mcp import MCPClient
9
9
 
10
- __version__ = "2.0.5"
10
+ __version__ = "2.1.1"
11
11
 
12
12
  __all__ = [
13
13
  "AgentClient",
@@ -123,9 +123,16 @@ class AgentClient:
123
123
  f"Use MCPClient to register MCP servers."
124
124
  )
125
125
 
126
- def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel]) -> Any:
127
- """Call the parse method on the AI client."""
128
- return self._ai_client.parse(messages, response_format)
126
+ def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel], conversation_history: list[dict[str, Any]] = None) -> Any:
127
+ """Call the parse method on the AI client with optional conversation history."""
128
+ # Combine conversation history with new messages
129
+ if conversation_history:
130
+ full_messages = conversation_history + messages
131
+ logger.debug(f"🔗 AI call with {len(conversation_history)} history messages + {len(messages)} new messages = {len(full_messages)} total")
132
+ else:
133
+ full_messages = messages
134
+ logger.debug(f"🔗 AI call with {len(messages)} messages (no history)")
135
+ return self._ai_client.parse(full_messages, response_format)
129
136
 
130
137
  def _extract_token_usage(self, response: Any) -> TokenUsage:
131
138
  """Extract token usage information from an AI response."""
@@ -179,7 +186,9 @@ class AgentClient:
179
186
  self,
180
187
  prompt: str,
181
188
  final_response_structure: Type[BaseModel],
182
- ask_questions: bool = True
189
+ ask_questions: bool = True,
190
+ agent_id: str = None,
191
+ answers: AnswerList = None
183
192
  ) -> AgentResponse:
184
193
  """
185
194
  Process a prompt through the agent's thinking process.
@@ -188,16 +197,40 @@ class AgentClient:
188
197
  prompt: The initial prompt from the user
189
198
  final_response_structure: Pydantic model defining the expected final response format
190
199
  ask_questions: Whether to ask clarifying questions (default: True)
200
+ agent_id: Optional agent session ID to continue an existing conversation
201
+ answers: Optional answers to questions (when continuing a conversation with questions)
191
202
 
192
203
  Returns:
193
204
  AgentResponse containing either questions to ask or the final response
194
205
  """
195
- agent_id = str(__import__("uuid").uuid4())
196
- logger.info(f"🚀 Starting agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
206
+ # Use provided agent_id or create a new one
207
+ if agent_id is None:
208
+ agent_id = str(__import__("uuid").uuid4())
209
+ logger.info(f"🚀 Starting new agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
210
+ else:
211
+ logger.info(f"🔄 Continuing agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
197
212
 
198
213
  # Initialize token summary
199
214
  token_summary = TokenSummary()
200
215
 
216
+ # Check if this is a continuing conversation
217
+ existing_session = self._agent_sessions.get(agent_id, {})
218
+ conversation_history = existing_session.get("conversation_history", []).copy()
219
+
220
+ if conversation_history:
221
+ logger.info(f"📜 Agent {agent_id}: Loaded conversation history with {len(conversation_history)} messages")
222
+
223
+ # Store conversation history for AI calls (don't include current prompt yet)
224
+ history_for_ai = conversation_history.copy()
225
+
226
+ # Add current prompt to full conversation history for session storage
227
+ conversation_history.append({"role": "user", "content": prompt})
228
+
229
+ # Handle answers provided (skip question generation and process directly)
230
+ if answers is not None:
231
+ logger.info(f"📝 Agent {agent_id}: Processing with provided answers, skipping question generation")
232
+ return self._process_answers_directly(agent_id, prompt, final_response_structure, answers, token_summary, history_for_ai)
233
+
201
234
  # Step 1: Generate questions (if ask_questions is True)
202
235
  if ask_questions:
203
236
  logger.info(f"❓ Agent {agent_id}: Analyzing prompt and generating clarifying questions")
@@ -221,7 +254,7 @@ IMPORTANT: For each question, provide a technical key identifier that:
221
254
  messages = [{"role": "user", "content": questions_prompt}]
222
255
 
223
256
  try:
224
- response = self._call_ai_parse(messages, QuestionList)
257
+ response = self._call_ai_parse(messages, QuestionList, history_for_ai)
225
258
  question_list = self._extract_parsed_content(response, QuestionList)
226
259
 
227
260
  # Extract token usage for question generation
@@ -238,7 +271,8 @@ IMPORTANT: For each question, provide a technical key identifier that:
238
271
  "final_response_structure": final_response_structure,
239
272
  "questions": question_list.questions,
240
273
  "step": "waiting_for_answers",
241
- "token_summary": token_summary
274
+ "token_summary": token_summary,
275
+ "conversation_history": history_for_ai # Include history without current prompt
242
276
  }
243
277
  logger.info(f"📋 Agent {agent_id}: Waiting for user answers to {len(question_list.questions)} questions")
244
278
  return agent_response
@@ -248,58 +282,124 @@ IMPORTANT: For each question, provide a technical key identifier that:
248
282
 
249
283
  # Step 2 & 3: No questions or ask_questions=False - proceed directly
250
284
  logger.info(f"⚡ Agent {agent_id}: No questions needed, proceeding directly to processing")
251
- return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary)
285
+ return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary, history_for_ai)
252
286
 
253
- def answer_to_agent(self, agent_id: str, answers: AnswerList) -> AgentResponse:
287
+ def _process_answers_directly(
288
+ self,
289
+ agent_id: str,
290
+ prompt: str,
291
+ final_response_structure: Type[BaseModel],
292
+ answers: AnswerList,
293
+ token_summary: TokenSummary,
294
+ conversation_history: list[dict[str, Any]]
295
+ ) -> AgentResponse:
254
296
  """
255
- Continue an agent session by providing answers to questions.
256
-
297
+ Process answers directly without going through question generation.
298
+
257
299
  Args:
258
300
  agent_id: The agent session identifier
259
- answers: List of answers to the questions
260
-
301
+ prompt: The current prompt
302
+ final_response_structure: Expected response structure
303
+ answers: Provided answers
304
+ token_summary: Current token usage summary
305
+ conversation_history: Conversation history
306
+
261
307
  Returns:
262
308
  AgentResponse with the final result
263
-
264
- Raises:
265
- ValueError: If the agent session is not found or in wrong state
266
309
  """
267
- if agent_id not in self._agent_sessions:
268
- raise ValueError(f"Agent session {agent_id} not found")
269
-
270
- session = self._agent_sessions[agent_id]
271
- if session["step"] != "waiting_for_answers":
272
- raise ValueError(f"Agent session {agent_id} is not waiting for answers")
273
-
274
- # Convert answers to a more usable format
275
- answer_dict = {answer.key: answer.answer for answer in answers.answers}
276
-
277
- logger.info(f"📝 Agent {agent_id}: Received {len(answers.answers)} answers, continuing processing")
278
-
279
- # Get token summary from session
280
- token_summary = session.get("token_summary", TokenSummary())
281
-
282
- # Process with the provided answers
310
+ # Check if we have a session with questions to match against
311
+ session = self._agent_sessions.get(agent_id, {})
312
+ questions = session.get("questions", [])
313
+
314
+ if not questions:
315
+ # No previous questions - treat as simple additional context
316
+ logger.info(f"📝 Agent {agent_id}: No previous questions found, treating answers as additional context")
317
+ answer_dict = {answer.key: answer.answer for answer in answers.answers}
318
+ qa_pairs = []
319
+ for answer in answers.answers:
320
+ qa_pairs.append({
321
+ "question": f"Information about {answer.key}",
322
+ "key": answer.key,
323
+ "answer": answer.answer,
324
+ "required": True
325
+ })
326
+ else:
327
+ # Match answers with previous questions
328
+ logger.info(f"📝 Agent {agent_id}: Matching {len(answers.answers)} answers with previous questions")
329
+ answer_dict = {answer.key: answer.answer for answer in answers.answers}
330
+
331
+ # Create question-answer pairs for better context
332
+ qa_pairs = []
333
+ for question in questions:
334
+ answer_text = answer_dict.get(question.key, "No answer provided")
335
+ qa_pairs.append({
336
+ "question": question.question,
337
+ "key": question.key,
338
+ "answer": answer_text,
339
+ "required": question.required
340
+ })
341
+
342
+ # Process with the provided answers and question context
283
343
  result = self._process_with_answers(
284
- session["original_prompt"],
285
- session["final_response_structure"],
286
- answer_dict,
344
+ prompt,
345
+ final_response_structure,
346
+ qa_pairs,
287
347
  agent_id,
288
- token_summary
348
+ token_summary,
349
+ conversation_history
289
350
  )
290
351
 
291
- # Clean up the session
292
- del self._agent_sessions[agent_id]
293
-
352
+ # Note: History management is now handled in _process_with_answers
353
+ # No need to duplicate history management here
294
354
  return result
295
355
 
356
+ def _format_qa_context_for_quality_check(self, answers: Union[list, dict[str, str]]) -> str:
357
+ """
358
+ Format question-answer context for quality check and improvement prompts.
359
+
360
+ Args:
361
+ answers: Question-answer pairs or simple answers
362
+
363
+ Returns:
364
+ Formatted context text
365
+ """
366
+ if not answers:
367
+ return ""
368
+
369
+ if isinstance(answers, list) and answers:
370
+ # Check if it's a list of question-answer pairs (enhanced format)
371
+ if isinstance(answers[0], dict) and "question" in answers[0]:
372
+ context_text = "\nContext Information (Questions & Answers):\n"
373
+ context_text += "The response was generated with the following additional context:\n\n"
374
+ for i, qa_pair in enumerate(answers, 1):
375
+ question = qa_pair.get("question", "Unknown question")
376
+ answer = qa_pair.get("answer", "No answer provided")
377
+ required = qa_pair.get("required", True)
378
+
379
+ status_marker = "🔴 REQUIRED" if required else "🟡 OPTIONAL"
380
+ context_text += f"{i}. {status_marker} Q: {question}\n"
381
+ context_text += f" A: {answer}\n\n"
382
+ return context_text
383
+ else:
384
+ # Legacy format - simple list
385
+ return f"\nAdditional context: {', '.join(str(a) for a in answers)}\n\n"
386
+ elif isinstance(answers, dict) and answers:
387
+ # Legacy format - simple dict
388
+ context_text = "\nAdditional context provided:\n"
389
+ for key, answer in answers.items():
390
+ context_text += f"- {key}: {answer}\n"
391
+ return context_text + "\n"
392
+
393
+ return ""
394
+
296
395
  def _process_with_answers(
297
396
  self,
298
397
  prompt: str,
299
398
  final_response_structure: Type[BaseModel],
300
399
  answers: Union[list, dict[str, str]],
301
400
  agent_id: str,
302
- token_summary: TokenSummary
401
+ token_summary: TokenSummary,
402
+ conversation_history: list[dict[str, Any]] = None
303
403
  ) -> AgentResponse:
304
404
  """
305
405
  Process the prompt with answers through the thinking pipeline.
@@ -307,23 +407,49 @@ IMPORTANT: For each question, provide a technical key identifier that:
307
407
  Args:
308
408
  prompt: The original prompt
309
409
  final_response_structure: Expected final response structure
310
- answers: Answers to questions (empty if no questions were asked)
410
+ answers: Question-answer pairs or simple answers (empty if no questions were asked)
311
411
  agent_id: The agent session identifier
312
412
  token_summary: Current token usage summary
413
+ conversation_history: Optional conversation history for dialog context
313
414
 
314
415
  Returns:
315
416
  AgentResponse with the final result
316
417
  """
418
+ if conversation_history is None:
419
+ conversation_history = []
420
+
317
421
  # Step 3: Process the prompt with thinking
318
422
  logger.info(f"🧠 Agent {agent_id}: Processing prompt and generating initial response")
319
- result = self._think_and_process(prompt, answers, agent_id, token_summary)
423
+ result = self._think_and_process(prompt, answers, agent_id, token_summary, conversation_history)
320
424
 
321
425
  # Step 4: Quality check and iteration
322
- final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary)
426
+ final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary, conversation_history)
323
427
 
324
428
  # Step 5: Generate final answer in requested format
325
429
  logger.info(f"📝 Agent {agent_id}: Generating final structured response")
326
- final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary)
430
+ final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary, conversation_history)
431
+
432
+ # Update session with the final response in conversation history
433
+ if agent_id in self._agent_sessions:
434
+ # Update conversation history with assistant response
435
+ updated_history = conversation_history.copy()
436
+ updated_history.append({"role": "assistant", "content": str(final_response)})
437
+
438
+ self._agent_sessions[agent_id]["conversation_history"] = updated_history
439
+ self._agent_sessions[agent_id]["step"] = "completed"
440
+ self._agent_sessions[agent_id]["token_summary"] = token_summary
441
+ logger.info(f"💾 Agent {agent_id}: Updated session with conversation history ({len(updated_history)} messages)")
442
+ else:
443
+ # Create new session if it doesn't exist
444
+ updated_history = conversation_history.copy()
445
+ updated_history.append({"role": "assistant", "content": str(final_response)})
446
+
447
+ self._agent_sessions[agent_id] = {
448
+ "step": "completed",
449
+ "conversation_history": updated_history,
450
+ "token_summary": token_summary
451
+ }
452
+ logger.info(f"💾 Agent {agent_id}: Created new session with conversation history ({len(updated_history)} messages)")
327
453
 
328
454
  # Log final token summary
329
455
  logger.info(f"📊 Agent {agent_id}: Token usage summary - Total: {token_summary.total_tokens} "
@@ -331,27 +457,49 @@ IMPORTANT: For each question, provide a technical key identifier that:
331
457
 
332
458
  return AgentResponse(agent_id=agent_id, final_response=final_response, token_summary=token_summary)
333
459
 
334
- def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
460
+ def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> str:
335
461
  """
336
462
  Process the prompt with thinking.
337
463
 
338
464
  Args:
339
465
  prompt: The original prompt
340
- answers: Answers to questions
466
+ answers: Question-answer pairs or simple answers
341
467
  agent_id: The agent session identifier
342
468
  token_summary: Current token usage summary
469
+ conversation_history: Optional conversation history for dialog context
343
470
 
344
471
  Returns:
345
472
  The AI's result
346
473
  """
347
- # Format answers for the prompt
474
+ if conversation_history is None:
475
+ conversation_history = []
476
+ # Format answers for the prompt with enhanced context
348
477
  answers_text = ""
349
- if isinstance(answers, dict) and answers:
478
+ if isinstance(answers, list) and answers:
479
+ # Check if it's a list of question-answer pairs (enhanced format)
480
+ if answers and isinstance(answers[0], dict) and "question" in answers[0]:
481
+ answers_text = "\n\nQuestion-Answer Context:\n"
482
+ answers_text += "The following questions were asked to gather more information, along with the answers provided:\n\n"
483
+ for i, qa_pair in enumerate(answers, 1):
484
+ question = qa_pair.get("question", "Unknown question")
485
+ answer = qa_pair.get("answer", "No answer provided")
486
+ key = qa_pair.get("key", "")
487
+ required = qa_pair.get("required", True)
488
+
489
+ status_marker = "🔴 REQUIRED" if required else "🟡 OPTIONAL"
490
+ answers_text += f"{i}. {status_marker} Question: {question}\n"
491
+ answers_text += f" Answer: {answer}\n"
492
+ if key:
493
+ answers_text += f" (Key: {key})\n"
494
+ answers_text += "\n"
495
+ else:
496
+ # Legacy format - simple list
497
+ answers_text = f"\n\nAdditional information: {', '.join(str(a) for a in answers)}\n"
498
+ elif isinstance(answers, dict) and answers:
499
+ # Legacy format - simple dict
350
500
  answers_text = "\n\nAdditional information provided:\n"
351
501
  for key, answer in answers.items():
352
502
  answers_text += f"- {key}: {answer}\n"
353
- elif isinstance(answers, list) and answers:
354
- answers_text = f"\n\nAdditional information: {', '.join(answers)}\n"
355
503
 
356
504
  thinking_prompt = f"""
357
505
  Think about this prompt, the goal and the steps required to fulfill it:
@@ -360,15 +508,19 @@ Think about this prompt, the goal and the steps required to fulfill it:
360
508
  ==========
361
509
  {answers_text}
362
510
 
363
- Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response. Use any available tools to gather information or perform actions that would improve your response.
511
+ Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response.
512
+
513
+ IMPORTANT: When formulating your response, take into account both the original prompt AND the specific questions that were asked along with their answers. The questions reveal what additional information was deemed necessary, and the answers provide crucial context that should inform your response.
514
+
515
+ Use any available tools to gather information or perform actions that would improve your response.
364
516
 
365
- Provide your best result for the given prompt.
517
+ Provide your best result for the given prompt, incorporating all the context from the question-answer pairs.
366
518
  """
367
519
 
368
520
  messages = [{"role": "user", "content": thinking_prompt}]
369
521
 
370
522
  try:
371
- response = self._call_ai_parse(messages, Result)
523
+ response = self._call_ai_parse(messages, Result, conversation_history)
372
524
  result_obj = self._extract_parsed_content(response, Result)
373
525
 
374
526
  # Track token usage for thinking process
@@ -380,20 +532,24 @@ Provide your best result for the given prompt.
380
532
  logger.error(f"Error in thinking process: {e}")
381
533
  raise RuntimeError(f"Failed to process prompt with AI client: {e}") from e
382
534
 
383
- def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
535
+ def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> str:
384
536
  """
385
537
  Check the quality of the result and iterate if needed.
386
538
 
387
539
  Args:
388
540
  prompt: The original prompt
389
541
  result: The current result
390
- answers: The answers provided
542
+ answers: Question-answer pairs or simple answers
391
543
  agent_id: The agent session identifier
392
544
  token_summary: Current token usage summary
545
+ conversation_history: Optional conversation history for dialog context
393
546
 
394
547
  Returns:
395
548
  The final improved result
396
549
  """
550
+ if conversation_history is None:
551
+ conversation_history = []
552
+
397
553
  current_result = result
398
554
 
399
555
  if self._max_iterations == 0:
@@ -402,13 +558,16 @@ Provide your best result for the given prompt.
402
558
 
403
559
  logger.info(f"🔍 Agent {agent_id}: Starting quality check and improvement process (max iterations: {self._max_iterations})")
404
560
 
561
+ # Format context information for quality checks
562
+ context_text = self._format_qa_context_for_quality_check(answers)
563
+
405
564
  for iteration in range(self._max_iterations):
406
565
  quality_prompt = f"""
407
566
  Given this original prompt:
408
567
  ==========
409
568
  {prompt}
410
569
  ==========
411
-
570
+ {context_text}
412
571
  And this result:
413
572
  ==========
414
573
  {current_result}
@@ -416,13 +575,15 @@ And this result:
416
575
 
417
576
  Is this result good and comprehensive, or does it need to be improved? Consider if the response fully addresses the prompt, provides sufficient detail, and would be helpful to the user.
418
577
 
578
+ IMPORTANT: Also evaluate whether the result properly incorporates and addresses the information provided through the question-answer pairs above. The response should demonstrate that it has taken this additional context into account.
579
+
419
580
  Evaluate the quality and provide feedback if improvements are needed.
420
581
  """
421
582
 
422
583
  messages = [{"role": "user", "content": quality_prompt}]
423
584
 
424
585
  try:
425
- response = self._call_ai_parse(messages, QualityCheck)
586
+ response = self._call_ai_parse(messages, QualityCheck, conversation_history)
426
587
  quality_check = self._extract_parsed_content(response, QualityCheck)
427
588
 
428
589
  # Track token usage for quality check
@@ -441,7 +602,7 @@ The original prompt was:
441
602
  ==========
442
603
  {prompt}
443
604
  ==========
444
-
605
+ {context_text}
445
606
  The current result is:
446
607
  ==========
447
608
  {current_result}
@@ -452,11 +613,11 @@ Feedback for improvement:
452
613
  {quality_check.feedback}
453
614
  ==========
454
615
 
455
- Please provide an improved version that addresses the feedback while maintaining the strengths of the current result.
616
+ Please provide an improved version that addresses the feedback while maintaining the strengths of the current result. Make sure to incorporate all the context from the question-answer pairs above.
456
617
  """
457
618
 
458
619
  messages = [{"role": "user", "content": improvement_prompt}]
459
- improvement_response = self._call_ai_parse(messages, Result)
620
+ improvement_response = self._call_ai_parse(messages, Result, conversation_history)
460
621
  result_obj = self._extract_parsed_content(improvement_response, Result)
461
622
  current_result = result_obj.result
462
623
 
@@ -476,7 +637,7 @@ Please provide an improved version that addresses the feedback while maintaining
476
637
 
477
638
  return current_result
478
639
 
479
- def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary) -> BaseModel:
640
+ def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> BaseModel:
480
641
  """
481
642
  Generate the final response in the requested format.
482
643
 
@@ -486,10 +647,13 @@ Please provide an improved version that addresses the feedback while maintaining
486
647
  final_response_structure: The expected response structure
487
648
  agent_id: The agent session identifier
488
649
  token_summary: Current token usage summary
650
+ conversation_history: Optional conversation history for dialog context
489
651
 
490
652
  Returns:
491
653
  The final response in the requested format
492
654
  """
655
+ if conversation_history is None:
656
+ conversation_history = []
493
657
  final_prompt = f"""
494
658
  Given this original prompt:
495
659
  ==========
@@ -507,7 +671,7 @@ Generate the final answer in the exact format requested. Make sure the response
507
671
  messages = [{"role": "user", "content": final_prompt}]
508
672
 
509
673
  try:
510
- response = self._call_ai_parse(messages, final_response_structure)
674
+ response = self._call_ai_parse(messages, final_response_structure, conversation_history)
511
675
  final_response = self._extract_parsed_content(response, final_response_structure)
512
676
 
513
677
  # Track token usage for final response generation
@@ -534,3 +698,49 @@ Generate the final answer in the exact format requested. Make sure the response
534
698
  logger.error(f"Fallback response creation failed: {fallback_error}")
535
699
  # Last resort - return the structure with default values
536
700
  return final_response_structure()
701
+
702
+ def get_session_info(self, agent_id: str) -> dict[str, Any]:
703
+ """
704
+ Get information about an agent session.
705
+
706
+ Args:
707
+ agent_id: The agent session identifier
708
+
709
+ Returns:
710
+ Session information dictionary
711
+
712
+ Raises:
713
+ ValueError: If the agent session is not found
714
+ """
715
+ if agent_id not in self._agent_sessions:
716
+ raise ValueError(f"Agent session {agent_id} not found")
717
+
718
+ session = self._agent_sessions[agent_id].copy()
719
+ # Remove sensitive information and add summary
720
+ session["conversation_length"] = len(session.get("conversation_history", []))
721
+ return session
722
+
723
+ def delete_session(self, agent_id: str) -> bool:
724
+ """
725
+ Delete an agent session.
726
+
727
+ Args:
728
+ agent_id: The agent session identifier
729
+
730
+ Returns:
731
+ True if session was deleted, False if it didn't exist
732
+ """
733
+ if agent_id in self._agent_sessions:
734
+ del self._agent_sessions[agent_id]
735
+ logger.info(f"🗑️ Deleted agent session {agent_id}")
736
+ return True
737
+ return False
738
+
739
+ def list_sessions(self) -> list[str]:
740
+ """
741
+ List all active agent session IDs.
742
+
743
+ Returns:
744
+ List of agent session IDs
745
+ """
746
+ return list(self._agent_sessions.keys())
@@ -48,8 +48,8 @@ def example_with_questions():
48
48
  Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
49
49
  ])
50
50
 
51
- # Continue the agent process with answers
52
- final_response = agent.answer_to_agent(response.agent_id, answers)
51
+ # Continue the agent process with answers using the unified interface
52
+ final_response = agent.agent("Continue with previous questions", BookRecommendation, ask_questions=False, agent_id=response.agent_id, answers=answers)
53
53
 
54
54
  if final_response.is_complete():
55
55
  book_rec = final_response.final_response
@@ -126,6 +126,33 @@ def example_with_tool_client():
126
126
  print(f"- {rec}")
127
127
 
128
128
 
129
+ def example_dialog_conversation():
130
+ """Example demonstrating persistent dialog functionality."""
131
+ print("Example of persistent dialog conversation:")
132
+
133
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
134
+ agent = AgentClient(openrouter_client, max_iterations=1)
135
+
136
+ # Start conversation
137
+ response1 = agent.agent("I want a book recommendation for science fiction", BookRecommendation, ask_questions=False)
138
+ agent_id = response1.agent_id
139
+
140
+ if response1.is_complete():
141
+ book1 = response1.final_response
142
+ print(f"First recommendation: {book1.title} by {book1.author}")
143
+
144
+ # Continue conversation with same agent_id
145
+ response2 = agent.agent("Can you recommend something by a different author in the same genre?", BookRecommendation, ask_questions=False, agent_id=agent_id)
146
+
147
+ if response2.is_complete():
148
+ book2 = response2.final_response
149
+ print(f"Second recommendation: {book2.title} by {book2.author}")
150
+ print(f"Context: {book2.reason}")
151
+
152
+ # Clean up session when done
153
+ agent.delete_session(agent_id)
154
+
155
+
129
156
  if __name__ == "__main__":
130
157
  print("=== Agent Client Examples ===\n")
131
158
 
@@ -150,3 +177,11 @@ if __name__ == "__main__":
150
177
  example_with_tool_client()
151
178
  except Exception as e:
152
179
  print(f"Error: {e}")
180
+
181
+ print("\n" + "="*50 + "\n")
182
+
183
+ print("4. Example with persistent dialog:")
184
+ try:
185
+ example_dialog_conversation()
186
+ except Exception as e:
187
+ print(f"Error: {e}")
@@ -74,7 +74,7 @@ def demonstrate_agent_with_questions():
74
74
  ])
75
75
 
76
76
  print(f"\n📝 Providing answers and continuing...")
77
- final_response = agent.answer_to_agent(response.agent_id, answers)
77
+ final_response = agent.agent("Continue with answers", WeatherResponse, ask_questions=False, agent_id=response.agent_id, answers=answers)
78
78
 
79
79
  if final_response.is_complete():
80
80
  print("\n✅ Final response received!")
@@ -0,0 +1,169 @@
1
+ """
2
+ Test example demonstrating conversation history persistence across multiple interactions.
3
+ """
4
+
5
+ import os
6
+ from pydantic import BaseModel, Field
7
+ from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
8
+
9
+
10
+ class StoryResponse(BaseModel):
11
+ """A story response that should reference previous conversation."""
12
+ story_part: str = Field(description="The current part of the story")
13
+ references_to_previous: str = Field(description="How this connects to our previous conversation")
14
+ conversation_context: str = Field(description="Summary of what was discussed before")
15
+
16
+
17
+ class ChatResponse(BaseModel):
18
+ """A general chat response."""
19
+ response: str = Field(description="The response to the user's message")
20
+ context_awareness: str = Field(description="What the AI remembers from our conversation")
21
+
22
+
23
+ def test_conversation_history_persistence():
24
+ """Test that conversation history persists and is used across multiple interactions."""
25
+ print("🧪 TESTING: Conversation History Persistence")
26
+ print("=" * 60)
27
+
28
+ # Initialize the clients
29
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
30
+ agent = AgentClient(openrouter_client, max_iterations=1)
31
+
32
+ # First interaction - establish context
33
+ print("\n1️⃣ First interaction - setting up story context:")
34
+ prompt1 = "I want to tell a collaborative story about a space explorer named Luna who discovers an ancient alien artifact on Mars."
35
+ response1 = agent.agent(prompt1, StoryResponse, ask_questions=False)
36
+
37
+ agent_id = response1.agent_id
38
+ print(f"Agent ID: {agent_id}")
39
+
40
+ if response1.is_complete():
41
+ story1 = response1.final_response
42
+ print(f"Story Part 1: {story1.story_part[:200]}...")
43
+ print(f"Context awareness: {story1.context_awareness}")
44
+
45
+ # Check session info
46
+ session_info = agent.get_session_info(agent_id)
47
+ print(f"📊 Session after first interaction: {session_info['conversation_length']} messages")
48
+
49
+ # Second interaction - continue story, should reference Luna and the artifact
50
+ print(f"\n2️⃣ Second interaction - continuing story (should remember Luna and artifact):")
51
+ prompt2 = "Luna touches the artifact and something amazing happens. Continue the story."
52
+ response2 = agent.agent(prompt2, StoryResponse, ask_questions=False, agent_id=agent_id)
53
+
54
+ if response2.is_complete():
55
+ story2 = response2.final_response
56
+ print(f"Story Part 2: {story2.story_part[:200]}...")
57
+ print(f"References to previous: {story2.references_to_previous}")
58
+ print(f"Conversation context: {story2.conversation_context}")
59
+
60
+ # Check session info
61
+ session_info = agent.get_session_info(agent_id)
62
+ print(f"📊 Session after second interaction: {session_info['conversation_length']} messages")
63
+
64
+ # Third interaction - change topic but should still remember story context
65
+ print(f"\n3️⃣ Third interaction - changing topic (should still remember our story):")
66
+ prompt3 = "Actually, let's pause the story. What do you think Luna's personality is like based on our story so far?"
67
+ response3 = agent.agent(prompt3, ChatResponse, ask_questions=False, agent_id=agent_id)
68
+
69
+ if response3.is_complete():
70
+ chat3 = response3.final_response
71
+ print(f"Response: {chat3.response}")
72
+ print(f"Context awareness: {chat3.context_awareness}")
73
+
74
+ # Check session info
75
+ session_info = agent.get_session_info(agent_id)
76
+ print(f"📊 Session after third interaction: {session_info['conversation_length']} messages")
77
+
78
+ # Fourth interaction - return to story, should remember everything
79
+ print(f"\n4️⃣ Fourth interaction - returning to story (should remember all previous context):")
80
+ prompt4 = "Great! Now let's continue Luna's story from where we left off. What happens next with the artifact?"
81
+ response4 = agent.agent(prompt4, StoryResponse, ask_questions=False, agent_id=agent_id)
82
+
83
+ if response4.is_complete():
84
+ story4 = response4.final_response
85
+ print(f"Story Part 4: {story4.story_part[:200]}...")
86
+ print(f"References to previous: {story4.references_to_previous}")
87
+ print(f"Conversation context: {story4.conversation_context}")
88
+
89
+ # Final session info
90
+ session_info = agent.get_session_info(agent_id)
91
+ print(f"📊 Final session state: {session_info['conversation_length']} messages")
92
+ print(f"Session step: {session_info.get('step', 'unknown')}")
93
+
94
+ # Display full conversation history
95
+ print(f"\n💬 FULL CONVERSATION HISTORY:")
96
+ session_info = agent.get_session_info(agent_id)
97
+ history = session_info.get('conversation_history', [])
98
+ for i, msg in enumerate(history, 1):
99
+ role = msg['role'].upper()
100
+ content = msg['content'][:100] + "..." if len(msg['content']) > 100 else msg['content']
101
+ print(f"{i:2d}. {role}: {content}")
102
+
103
+ # Cleanup
104
+ print(f"\n🗑️ Cleaning up session {agent_id}")
105
+ agent.delete_session(agent_id)
106
+
107
+
108
+ def test_with_questions_and_history():
109
+ """Test conversation history with questions and answers."""
110
+ print("\n🧪 TESTING: Questions + Answers + History")
111
+ print("=" * 60)
112
+
113
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
114
+ agent = AgentClient(openrouter_client, max_iterations=1)
115
+
116
+ # Start with questions
117
+ print("\n1️⃣ Starting conversation with questions:")
118
+ response1 = agent.agent("I want a personalized workout plan", ChatResponse, ask_questions=True)
119
+
120
+ agent_id = response1.agent_id
121
+ print(f"Agent ID: {agent_id}")
122
+
123
+ if response1.has_questions():
124
+ print(f"📋 Generated {len(response1.questions)} questions:")
125
+ for q in response1.questions:
126
+ print(f" - {q.question}")
127
+
128
+ # Answer questions
129
+ answers = AnswerList(answers=[
130
+ Answer(key="fitness_level", answer="Beginner"),
131
+ Answer(key="goals", answer="Weight loss and muscle building"),
132
+ Answer(key="time_available", answer="30 minutes per day, 4 days a week"),
133
+ Answer(key="equipment", answer="Home gym with dumbbells and resistance bands")
134
+ ])
135
+
136
+ print(f"\n2️⃣ Providing answers:")
137
+ response2 = agent.agent("Here are my answers", ChatResponse, ask_questions=False, agent_id=agent_id, answers=answers)
138
+
139
+ if response2.is_complete():
140
+ workout_plan = response2.final_response
141
+ print(f"Workout plan: {workout_plan.response[:200]}...")
142
+ print(f"Context awareness: {workout_plan.context_awareness}")
143
+
144
+ # Continue conversation
145
+ print(f"\n3️⃣ Follow-up question (should remember all previous context):")
146
+ response3 = agent.agent("Can you modify this plan to focus more on cardio?", ChatResponse, ask_questions=False, agent_id=agent_id)
147
+
148
+ if response3.is_complete():
149
+ modified_plan = response3.final_response
150
+ print(f"Modified plan: {modified_plan.response[:200]}...")
151
+ print(f"Context awareness: {modified_plan.context_awareness}")
152
+
153
+ # Show history
154
+ session_info = agent.get_session_info(agent_id)
155
+ print(f"\n💬 Conversation had {session_info['conversation_length']} messages")
156
+
157
+ # Cleanup
158
+ agent.delete_session(agent_id)
159
+
160
+
161
+ if __name__ == "__main__":
162
+ try:
163
+ test_conversation_history_persistence()
164
+ print("\n" + "="*80 + "\n")
165
+ test_with_questions_and_history()
166
+ except Exception as e:
167
+ print(f"Error: {e}")
168
+ import traceback
169
+ traceback.print_exc()
@@ -0,0 +1,157 @@
1
+ """
2
+ Example usage of the AgentClient with persistent dialog sessions.
3
+ """
4
+
5
+ import os
6
+ from pydantic import BaseModel, Field
7
+ from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
8
+
9
+
10
+ class ChatResponse(BaseModel):
11
+ """A general chat response."""
12
+ response: str = Field(description="The response to the user's message")
13
+ context_awareness: str = Field(description="How this response relates to previous conversation")
14
+
15
+
16
+ class BookRecommendation(BaseModel):
17
+ """A book recommendation response."""
18
+ title: str = Field(description="The title of the recommended book")
19
+ author: str = Field(description="The author of the book")
20
+ genre: str = Field(description="The genre of the book")
21
+ reason: str = Field(description="Why this book is recommended based on conversation")
22
+ connection_to_previous: str = Field(description="How this recommendation connects to our previous conversation")
23
+
24
+
25
+ def demonstrate_dialog_conversation():
26
+ """Demonstrate persistent dialog functionality."""
27
+ print("🔄 DEMO: Persistent Dialog Agent")
28
+ print("=" * 50)
29
+
30
+ # Initialize the clients
31
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
32
+ agent = AgentClient(openrouter_client, max_iterations=1)
33
+
34
+ # First conversation - start new session
35
+ print("\n1️⃣ Starting new conversation:")
36
+ prompt1 = "Hi, I'm looking for a good book to read. I love science fiction."
37
+ response1 = agent.agent(prompt1, ChatResponse, ask_questions=False)
38
+
39
+ if response1.is_complete():
40
+ agent_id = response1.agent_id
41
+ chat_resp = response1.final_response
42
+ print(f"Agent ID: {agent_id}")
43
+ print(f"Response: {chat_resp.response}")
44
+ print(f"Context awareness: {chat_resp.context_awareness}")
45
+
46
+ # Second conversation - continue same session
47
+ print(f"\n2️⃣ Continuing conversation with agent {agent_id}:")
48
+ prompt2 = "Actually, I also enjoy fantasy novels. What would you recommend that combines both genres?"
49
+ response2 = agent.agent(prompt2, BookRecommendation, ask_questions=False, agent_id=agent_id)
50
+
51
+ if response2.is_complete():
52
+ book_rec = response2.final_response
53
+ print(f"Book: {book_rec.title} by {book_rec.author}")
54
+ print(f"Genre: {book_rec.genre}")
55
+ print(f"Reason: {book_rec.reason}")
56
+ print(f"Connection to previous: {book_rec.connection_to_previous}")
57
+
58
+ # Third conversation - continue same session
59
+ print(f"\n3️⃣ Continuing conversation with agent {agent_id}:")
60
+ prompt3 = "That sounds great! Can you recommend something similar but from a different author?"
61
+ response3 = agent.agent(prompt3, BookRecommendation, ask_questions=False, agent_id=agent_id)
62
+
63
+ if response3.is_complete():
64
+ book_rec2 = response3.final_response
65
+ print(f"Book: {book_rec2.title} by {book_rec2.author}")
66
+ print(f"Genre: {book_rec2.genre}")
67
+ print(f"Reason: {book_rec2.reason}")
68
+ print(f"Connection to previous: {book_rec2.connection_to_previous}")
69
+
70
+ # Show session info
71
+ print(f"\n📊 Session Information:")
72
+ try:
73
+ session_info = agent.get_session_info(agent_id)
74
+ print(f"Conversation length: {session_info['conversation_length']} messages")
75
+ print(f"Session step: {session_info.get('step', 'unknown')}")
76
+ except Exception as e:
77
+ print(f"Error getting session info: {e}")
78
+
79
+ # List all sessions
80
+ print(f"\n📝 Active sessions: {agent.list_sessions()}")
81
+
82
+ # Cleanup - optional
83
+ print(f"\n🗑️ Cleaning up session...")
84
+ deleted = agent.delete_session(agent_id)
85
+ print(f"Session deleted: {deleted}")
86
+ print(f"Active sessions after cleanup: {agent.list_sessions()}")
87
+
88
+
89
+ def demonstrate_dialog_with_questions():
90
+ """Demonstrate dialog with question-answer flow."""
91
+ print("\n🔄 DEMO: Dialog Agent with Questions")
92
+ print("=" * 50)
93
+
94
+ # Initialize the clients
95
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
96
+ agent = AgentClient(openrouter_client, max_iterations=1)
97
+
98
+ # First conversation with questions
99
+ print("\n1️⃣ Starting conversation with questions:")
100
+ prompt1 = "I want a personalized book recommendation"
101
+ response1 = agent.agent(prompt1, BookRecommendation, ask_questions=True)
102
+
103
+ agent_id = response1.agent_id
104
+ print(f"Agent ID: {agent_id}")
105
+
106
+ if response1.has_questions():
107
+ print(f"\n📋 Agent generated {len(response1.questions)} questions:")
108
+ for i, question in enumerate(response1.questions, 1):
109
+ print(f" {i}. {question.question} (key: {question.key})")
110
+
111
+ # Simulate answering questions
112
+ answers = AnswerList(answers=[
113
+ Answer(key="genre_preference", answer="I love science fiction and fantasy"),
114
+ Answer(key="reading_level", answer="I prefer complex, adult novels"),
115
+ Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
116
+ ])
117
+
118
+ print(f"\n📝 Providing answers...")
119
+ final_response = agent.agent("Continue with answers", BookRecommendation, ask_questions=False, agent_id=agent_id, answers=answers)
120
+
121
+ if final_response.is_complete():
122
+ book_rec = final_response.final_response
123
+ print(f"Book: {book_rec.title} by {book_rec.author}")
124
+ print(f"Genre: {book_rec.genre}")
125
+ print(f"Reason: {book_rec.reason}")
126
+
127
+ # Continue conversation - this should remember the previous interaction
128
+ print(f"\n2️⃣ Continuing conversation with agent {agent_id}:")
129
+ prompt2 = "Thank you! Can you also recommend something by a female author in the same genres?"
130
+ response2 = agent.agent(prompt2, BookRecommendation, ask_questions=False, agent_id=agent_id)
131
+
132
+ if response2.is_complete():
133
+ book_rec2 = response2.final_response
134
+ print(f"Book: {book_rec2.title} by {book_rec2.author}")
135
+ print(f"Genre: {book_rec2.genre}")
136
+ print(f"Reason: {book_rec2.reason}")
137
+ print(f"Connection to previous: {book_rec2.connection_to_previous}")
138
+
139
+ # Session cleanup
140
+ print(f"\n🗑️ Session cleanup...")
141
+ agent.delete_session(agent_id)
142
+
143
+
144
+ if __name__ == "__main__":
145
+ print("=== Dialog Agent Examples ===\n")
146
+
147
+ try:
148
+ demonstrate_dialog_conversation()
149
+ except Exception as e:
150
+ print(f"Error in dialog conversation demo: {e}")
151
+
152
+ print("\n" + "="*80 + "\n")
153
+
154
+ try:
155
+ demonstrate_dialog_with_questions()
156
+ except Exception as e:
157
+ print(f"Error in dialog with questions demo: {e}")
@@ -0,0 +1,109 @@
1
+ """
2
+ Example demonstrating the new unified agent interface.
3
+ """
4
+
5
+ import os
6
+ from pydantic import BaseModel, Field
7
+ from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
8
+
9
+
10
+ class SimpleResponse(BaseModel):
11
+ """A simple response."""
12
+ response: str = Field(description="The response text")
13
+ context_used: str = Field(description="How context was used in this response")
14
+
15
+
16
+ def demonstrate_unified_interface():
17
+ """Demonstrate the unified agent interface with and without questions."""
18
+ print("🔧 DEMO: Unified Agent Interface")
19
+ print("=" * 50)
20
+
21
+ # Initialize the clients
22
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
23
+ agent = AgentClient(openrouter_client, max_iterations=1)
24
+
25
+ # Example 1: Start conversation with questions
26
+ print("\n1️⃣ Starting conversation that generates questions:")
27
+ response1 = agent.agent("I need help planning a trip", SimpleResponse, ask_questions=True)
28
+
29
+ agent_id = response1.agent_id
30
+ print(f"Agent ID: {agent_id}")
31
+
32
+ if response1.has_questions():
33
+ print(f"📋 Generated {len(response1.questions)} questions:")
34
+ for q in response1.questions:
35
+ print(f" - {q.question} (key: {q.key})")
36
+
37
+ # Example 2: Provide answers using the unified interface
38
+ print(f"\n2️⃣ Providing answers using unified interface:")
39
+ answers = AnswerList(answers=[
40
+ Answer(key="destination", answer="Japan"),
41
+ Answer(key="duration", answer="10 days"),
42
+ Answer(key="budget", answer="$3000"),
43
+ Answer(key="interests", answer="culture, food, temples")
44
+ ])
45
+
46
+ response2 = agent.agent(
47
+ "Now help me plan the trip",
48
+ SimpleResponse,
49
+ ask_questions=False,
50
+ agent_id=agent_id,
51
+ answers=answers
52
+ )
53
+
54
+ if response2.is_complete():
55
+ trip_plan = response2.final_response
56
+ print(f"Response: {trip_plan.response}")
57
+ print(f"Context used: {trip_plan.context_used}")
58
+
59
+ # Example 3: Continue the conversation
60
+ print(f"\n3️⃣ Continuing conversation without questions:")
61
+ response3 = agent.agent(
62
+ "What about transportation within Japan?",
63
+ SimpleResponse,
64
+ ask_questions=False,
65
+ agent_id=agent_id
66
+ )
67
+
68
+ if response3.is_complete():
69
+ transport_info = response3.final_response
70
+ print(f"Response: {transport_info.response}")
71
+ print(f"Context used: {transport_info.context_used}")
72
+
73
+ # Example 4: Using answers without previous questions (new session)
74
+ print(f"\n4️⃣ Starting new session with direct answers (no questions):")
75
+ new_answers = AnswerList(answers=[
76
+ Answer(key="city", answer="Tokyo"),
77
+ Answer(key="travel_style", answer="luxury"),
78
+ Answer(key="group_size", answer="2 people")
79
+ ])
80
+
81
+ response4 = agent.agent(
82
+ "Recommend restaurants",
83
+ SimpleResponse,
84
+ ask_questions=False,
85
+ answers=new_answers # New session, no agent_id provided
86
+ )
87
+
88
+ if response4.is_complete():
89
+ restaurant_info = response4.final_response
90
+ print(f"New Agent ID: {response4.agent_id}")
91
+ print(f"Response: {restaurant_info.response}")
92
+ print(f"Context used: {restaurant_info.context_used}")
93
+
94
+ # Show active sessions
95
+ print(f"\n📊 Active Sessions: {agent.list_sessions()}")
96
+
97
+ # Cleanup
98
+ print(f"\n🗑️ Cleaning up sessions...")
99
+ agent.delete_session(agent_id)
100
+ if response4.agent_id != agent_id:
101
+ agent.delete_session(response4.agent_id)
102
+ print(f"Active Sessions after cleanup: {agent.list_sessions()}")
103
+
104
+
105
+ if __name__ == "__main__":
106
+ try:
107
+ demonstrate_unified_interface()
108
+ except Exception as e:
109
+ print(f"Error: {e}")
@@ -31,7 +31,7 @@ class MCPServer:
31
31
  self.app = FastAPI(
32
32
  title=self.name,
33
33
  description=self.description,
34
- version="2.0.5",
34
+ version="2.1.1",
35
35
  )
36
36
 
37
37
  # Initialize MCP server
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes