mbxai 2.0.5__tar.gz → 2.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (44) hide show
  1. {mbxai-2.0.5 → mbxai-2.1.0}/PKG-INFO +1 -1
  2. {mbxai-2.0.5 → mbxai-2.1.0}/pyproject.toml +1 -1
  3. {mbxai-2.0.5 → mbxai-2.1.0}/setup.py +1 -1
  4. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/__init__.py +1 -1
  5. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/agent/client.py +202 -37
  6. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/agent_example.py +35 -0
  7. mbxai-2.1.0/src/mbxai/examples/dialog_agent_example.py +157 -0
  8. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/mcp/server.py +1 -1
  9. {mbxai-2.0.5 → mbxai-2.1.0}/.gitignore +0 -0
  10. {mbxai-2.0.5 → mbxai-2.1.0}/LICENSE +0 -0
  11. {mbxai-2.0.5 → mbxai-2.1.0}/README.md +0 -0
  12. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/agent/__init__.py +0 -0
  13. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/agent/models.py +0 -0
  14. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/core.py +0 -0
  15. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/agent_iterations_example.py +0 -0
  16. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/agent_logging_example.py +0 -0
  17. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/agent_tool_registration_example.py +0 -0
  18. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/agent_validation_example.py +0 -0
  19. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/auto_schema_example.py +0 -0
  20. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/mcp/mcp_client_example.py +0 -0
  21. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/mcp/mcp_server_example.py +0 -0
  22. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/openrouter_example.py +0 -0
  23. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/parse_example.py +0 -0
  24. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/parse_tool_example.py +0 -0
  25. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/request.json +0 -0
  26. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/response.json +0 -0
  27. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/send_request.py +0 -0
  28. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/simple_agent_test.py +0 -0
  29. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/examples/tool_client_example.py +0 -0
  30. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/mcp/__init__.py +0 -0
  31. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/mcp/client.py +0 -0
  32. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/mcp/example.py +0 -0
  33. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/openrouter/__init__.py +0 -0
  34. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/openrouter/client.py +0 -0
  35. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/openrouter/config.py +0 -0
  36. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/openrouter/models.py +0 -0
  37. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/openrouter/schema.py +0 -0
  38. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/tools/__init__.py +0 -0
  39. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/tools/client.py +0 -0
  40. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/tools/example.py +0 -0
  41. {mbxai-2.0.5 → mbxai-2.1.0}/src/mbxai/tools/types.py +0 -0
  42. {mbxai-2.0.5 → mbxai-2.1.0}/tests/test_mcp_tool_registration.py +0 -0
  43. {mbxai-2.0.5 → mbxai-2.1.0}/tests/test_real_mcp_schema.py +0 -0
  44. {mbxai-2.0.5 → mbxai-2.1.0}/tests/test_schema_conversion.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mbxai
3
- Version: 2.0.5
3
+ Version: 2.1.0
4
4
  Summary: MBX AI SDK
5
5
  Project-URL: Homepage, https://www.mibexx.de
6
6
  Project-URL: Documentation, https://www.mibexx.de
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
4
4
 
5
5
  [project]
6
6
  name = "mbxai"
7
- version = "2.0.5"
7
+ version = "2.1.0"
8
8
  authors = [
9
9
  { name = "MBX AI" }
10
10
  ]
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name="mbxai",
5
- version="2.0.5",
5
+ version="2.1.0",
6
6
  author="MBX AI",
7
7
  description="MBX AI SDK",
8
8
  long_description=open("README.md").read(),
@@ -7,7 +7,7 @@ from .openrouter import OpenRouterClient
7
7
  from .tools import ToolClient
8
8
  from .mcp import MCPClient
9
9
 
10
- __version__ = "2.0.5"
10
+ __version__ = "2.1.0"
11
11
 
12
12
  __all__ = [
13
13
  "AgentClient",
@@ -123,9 +123,14 @@ class AgentClient:
123
123
  f"Use MCPClient to register MCP servers."
124
124
  )
125
125
 
126
- def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel]) -> Any:
127
- """Call the parse method on the AI client."""
128
- return self._ai_client.parse(messages, response_format)
126
+ def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel], conversation_history: list[dict[str, Any]] = None) -> Any:
127
+ """Call the parse method on the AI client with optional conversation history."""
128
+ # Combine conversation history with new messages
129
+ if conversation_history:
130
+ full_messages = conversation_history + messages
131
+ else:
132
+ full_messages = messages
133
+ return self._ai_client.parse(full_messages, response_format)
129
134
 
130
135
  def _extract_token_usage(self, response: Any) -> TokenUsage:
131
136
  """Extract token usage information from an AI response."""
@@ -179,7 +184,8 @@ class AgentClient:
179
184
  self,
180
185
  prompt: str,
181
186
  final_response_structure: Type[BaseModel],
182
- ask_questions: bool = True
187
+ ask_questions: bool = True,
188
+ agent_id: str = None
183
189
  ) -> AgentResponse:
184
190
  """
185
191
  Process a prompt through the agent's thinking process.
@@ -188,16 +194,25 @@ class AgentClient:
188
194
  prompt: The initial prompt from the user
189
195
  final_response_structure: Pydantic model defining the expected final response format
190
196
  ask_questions: Whether to ask clarifying questions (default: True)
197
+ agent_id: Optional agent session ID to continue an existing conversation
191
198
 
192
199
  Returns:
193
200
  AgentResponse containing either questions to ask or the final response
194
201
  """
195
- agent_id = str(__import__("uuid").uuid4())
196
- logger.info(f"🚀 Starting agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
202
+ # Use provided agent_id or create a new one
203
+ if agent_id is None:
204
+ agent_id = str(__import__("uuid").uuid4())
205
+ logger.info(f"🚀 Starting new agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
206
+ else:
207
+ logger.info(f"🔄 Continuing agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
197
208
 
198
209
  # Initialize token summary
199
210
  token_summary = TokenSummary()
200
211
 
212
+ # Check if this is a continuing conversation
213
+ existing_session = self._agent_sessions.get(agent_id, {})
214
+ conversation_history = existing_session.get("conversation_history", [])
215
+
201
216
  # Step 1: Generate questions (if ask_questions is True)
202
217
  if ask_questions:
203
218
  logger.info(f"❓ Agent {agent_id}: Analyzing prompt and generating clarifying questions")
@@ -221,7 +236,7 @@ IMPORTANT: For each question, provide a technical key identifier that:
221
236
  messages = [{"role": "user", "content": questions_prompt}]
222
237
 
223
238
  try:
224
- response = self._call_ai_parse(messages, QuestionList)
239
+ response = self._call_ai_parse(messages, QuestionList, conversation_history)
225
240
  question_list = self._extract_parsed_content(response, QuestionList)
226
241
 
227
242
  # Extract token usage for question generation
@@ -248,7 +263,7 @@ IMPORTANT: For each question, provide a technical key identifier that:
248
263
 
249
264
  # Step 2 & 3: No questions or ask_questions=False - proceed directly
250
265
  logger.info(f"⚡ Agent {agent_id}: No questions needed, proceeding directly to processing")
251
- return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary)
266
+ return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary, conversation_history)
252
267
 
253
268
  def answer_to_agent(self, agent_id: str, answers: AnswerList) -> AgentResponse:
254
269
  """
@@ -271,35 +286,97 @@ IMPORTANT: For each question, provide a technical key identifier that:
271
286
  if session["step"] != "waiting_for_answers":
272
287
  raise ValueError(f"Agent session {agent_id} is not waiting for answers")
273
288
 
274
- # Convert answers to a more usable format
289
+ # Convert answers to a more usable format and create question-answer pairs
275
290
  answer_dict = {answer.key: answer.answer for answer in answers.answers}
291
+ questions = session.get("questions", [])
292
+
293
+ # Create question-answer pairs for better context
294
+ qa_pairs = []
295
+ for question in questions:
296
+ answer_text = answer_dict.get(question.key, "No answer provided")
297
+ qa_pairs.append({
298
+ "question": question.question,
299
+ "key": question.key,
300
+ "answer": answer_text,
301
+ "required": question.required
302
+ })
276
303
 
277
304
  logger.info(f"📝 Agent {agent_id}: Received {len(answers.answers)} answers, continuing processing")
278
305
 
279
306
  # Get token summary from session
280
307
  token_summary = session.get("token_summary", TokenSummary())
308
+ conversation_history = session.get("conversation_history", [])
281
309
 
282
- # Process with the provided answers
310
+ # Process with the provided answers and question context
283
311
  result = self._process_with_answers(
284
312
  session["original_prompt"],
285
313
  session["final_response_structure"],
286
- answer_dict,
314
+ qa_pairs,
287
315
  agent_id,
288
- token_summary
316
+ token_summary,
317
+ conversation_history
289
318
  )
290
319
 
291
- # Clean up the session
292
- del self._agent_sessions[agent_id]
320
+ # Update session with conversation history but don't delete it
321
+ if agent_id in self._agent_sessions:
322
+ self._agent_sessions[agent_id]["step"] = "completed"
323
+ self._agent_sessions[agent_id]["conversation_history"] = self._agent_sessions[agent_id].get("conversation_history", [])
324
+
325
+ # Add this interaction to history
326
+ self._agent_sessions[agent_id]["conversation_history"].extend([
327
+ {"role": "user", "content": session["original_prompt"]},
328
+ {"role": "assistant", "content": str(result.final_response) if result.final_response else "No response generated"}
329
+ ])
293
330
 
294
331
  return result
295
332
 
333
+ def _format_qa_context_for_quality_check(self, answers: Union[list, dict[str, str]]) -> str:
334
+ """
335
+ Format question-answer context for quality check and improvement prompts.
336
+
337
+ Args:
338
+ answers: Question-answer pairs or simple answers
339
+
340
+ Returns:
341
+ Formatted context text
342
+ """
343
+ if not answers:
344
+ return ""
345
+
346
+ if isinstance(answers, list) and answers:
347
+ # Check if it's a list of question-answer pairs (enhanced format)
348
+ if isinstance(answers[0], dict) and "question" in answers[0]:
349
+ context_text = "\nContext Information (Questions & Answers):\n"
350
+ context_text += "The response was generated with the following additional context:\n\n"
351
+ for i, qa_pair in enumerate(answers, 1):
352
+ question = qa_pair.get("question", "Unknown question")
353
+ answer = qa_pair.get("answer", "No answer provided")
354
+ required = qa_pair.get("required", True)
355
+
356
+ status_marker = "🔴 REQUIRED" if required else "🟡 OPTIONAL"
357
+ context_text += f"{i}. {status_marker} Q: {question}\n"
358
+ context_text += f" A: {answer}\n\n"
359
+ return context_text
360
+ else:
361
+ # Legacy format - simple list
362
+ return f"\nAdditional context: {', '.join(str(a) for a in answers)}\n\n"
363
+ elif isinstance(answers, dict) and answers:
364
+ # Legacy format - simple dict
365
+ context_text = "\nAdditional context provided:\n"
366
+ for key, answer in answers.items():
367
+ context_text += f"- {key}: {answer}\n"
368
+ return context_text + "\n"
369
+
370
+ return ""
371
+
296
372
  def _process_with_answers(
297
373
  self,
298
374
  prompt: str,
299
375
  final_response_structure: Type[BaseModel],
300
376
  answers: Union[list, dict[str, str]],
301
377
  agent_id: str,
302
- token_summary: TokenSummary
378
+ token_summary: TokenSummary,
379
+ conversation_history: list[dict[str, Any]] = None
303
380
  ) -> AgentResponse:
304
381
  """
305
382
  Process the prompt with answers through the thinking pipeline.
@@ -307,23 +384,27 @@ IMPORTANT: For each question, provide a technical key identifier that:
307
384
  Args:
308
385
  prompt: The original prompt
309
386
  final_response_structure: Expected final response structure
310
- answers: Answers to questions (empty if no questions were asked)
387
+ answers: Question-answer pairs or simple answers (empty if no questions were asked)
311
388
  agent_id: The agent session identifier
312
389
  token_summary: Current token usage summary
390
+ conversation_history: Optional conversation history for dialog context
313
391
 
314
392
  Returns:
315
393
  AgentResponse with the final result
316
394
  """
395
+ if conversation_history is None:
396
+ conversation_history = []
397
+
317
398
  # Step 3: Process the prompt with thinking
318
399
  logger.info(f"🧠 Agent {agent_id}: Processing prompt and generating initial response")
319
- result = self._think_and_process(prompt, answers, agent_id, token_summary)
400
+ result = self._think_and_process(prompt, answers, agent_id, token_summary, conversation_history)
320
401
 
321
402
  # Step 4: Quality check and iteration
322
- final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary)
403
+ final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary, conversation_history)
323
404
 
324
405
  # Step 5: Generate final answer in requested format
325
406
  logger.info(f"📝 Agent {agent_id}: Generating final structured response")
326
- final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary)
407
+ final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary, conversation_history)
327
408
 
328
409
  # Log final token summary
329
410
  logger.info(f"📊 Agent {agent_id}: Token usage summary - Total: {token_summary.total_tokens} "
@@ -331,27 +412,49 @@ IMPORTANT: For each question, provide a technical key identifier that:
331
412
 
332
413
  return AgentResponse(agent_id=agent_id, final_response=final_response, token_summary=token_summary)
333
414
 
334
- def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
415
+ def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> str:
335
416
  """
336
417
  Process the prompt with thinking.
337
418
 
338
419
  Args:
339
420
  prompt: The original prompt
340
- answers: Answers to questions
421
+ answers: Question-answer pairs or simple answers
341
422
  agent_id: The agent session identifier
342
423
  token_summary: Current token usage summary
424
+ conversation_history: Optional conversation history for dialog context
343
425
 
344
426
  Returns:
345
427
  The AI's result
346
428
  """
347
- # Format answers for the prompt
429
+ if conversation_history is None:
430
+ conversation_history = []
431
+ # Format answers for the prompt with enhanced context
348
432
  answers_text = ""
349
- if isinstance(answers, dict) and answers:
433
+ if isinstance(answers, list) and answers:
434
+ # Check if it's a list of question-answer pairs (enhanced format)
435
+ if answers and isinstance(answers[0], dict) and "question" in answers[0]:
436
+ answers_text = "\n\nQuestion-Answer Context:\n"
437
+ answers_text += "The following questions were asked to gather more information, along with the answers provided:\n\n"
438
+ for i, qa_pair in enumerate(answers, 1):
439
+ question = qa_pair.get("question", "Unknown question")
440
+ answer = qa_pair.get("answer", "No answer provided")
441
+ key = qa_pair.get("key", "")
442
+ required = qa_pair.get("required", True)
443
+
444
+ status_marker = "🔴 REQUIRED" if required else "🟡 OPTIONAL"
445
+ answers_text += f"{i}. {status_marker} Question: {question}\n"
446
+ answers_text += f" Answer: {answer}\n"
447
+ if key:
448
+ answers_text += f" (Key: {key})\n"
449
+ answers_text += "\n"
450
+ else:
451
+ # Legacy format - simple list
452
+ answers_text = f"\n\nAdditional information: {', '.join(str(a) for a in answers)}\n"
453
+ elif isinstance(answers, dict) and answers:
454
+ # Legacy format - simple dict
350
455
  answers_text = "\n\nAdditional information provided:\n"
351
456
  for key, answer in answers.items():
352
457
  answers_text += f"- {key}: {answer}\n"
353
- elif isinstance(answers, list) and answers:
354
- answers_text = f"\n\nAdditional information: {', '.join(answers)}\n"
355
458
 
356
459
  thinking_prompt = f"""
357
460
  Think about this prompt, the goal and the steps required to fulfill it:
@@ -360,15 +463,19 @@ Think about this prompt, the goal and the steps required to fulfill it:
360
463
  ==========
361
464
  {answers_text}
362
465
 
363
- Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response. Use any available tools to gather information or perform actions that would improve your response.
466
+ Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response.
467
+
468
+ IMPORTANT: When formulating your response, take into account both the original prompt AND the specific questions that were asked along with their answers. The questions reveal what additional information was deemed necessary, and the answers provide crucial context that should inform your response.
469
+
470
+ Use any available tools to gather information or perform actions that would improve your response.
364
471
 
365
- Provide your best result for the given prompt.
472
+ Provide your best result for the given prompt, incorporating all the context from the question-answer pairs.
366
473
  """
367
474
 
368
475
  messages = [{"role": "user", "content": thinking_prompt}]
369
476
 
370
477
  try:
371
- response = self._call_ai_parse(messages, Result)
478
+ response = self._call_ai_parse(messages, Result, conversation_history)
372
479
  result_obj = self._extract_parsed_content(response, Result)
373
480
 
374
481
  # Track token usage for thinking process
@@ -380,20 +487,24 @@ Provide your best result for the given prompt.
380
487
  logger.error(f"Error in thinking process: {e}")
381
488
  raise RuntimeError(f"Failed to process prompt with AI client: {e}") from e
382
489
 
383
- def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
490
+ def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> str:
384
491
  """
385
492
  Check the quality of the result and iterate if needed.
386
493
 
387
494
  Args:
388
495
  prompt: The original prompt
389
496
  result: The current result
390
- answers: The answers provided
497
+ answers: Question-answer pairs or simple answers
391
498
  agent_id: The agent session identifier
392
499
  token_summary: Current token usage summary
500
+ conversation_history: Optional conversation history for dialog context
393
501
 
394
502
  Returns:
395
503
  The final improved result
396
504
  """
505
+ if conversation_history is None:
506
+ conversation_history = []
507
+
397
508
  current_result = result
398
509
 
399
510
  if self._max_iterations == 0:
@@ -402,13 +513,16 @@ Provide your best result for the given prompt.
402
513
 
403
514
  logger.info(f"🔍 Agent {agent_id}: Starting quality check and improvement process (max iterations: {self._max_iterations})")
404
515
 
516
+ # Format context information for quality checks
517
+ context_text = self._format_qa_context_for_quality_check(answers)
518
+
405
519
  for iteration in range(self._max_iterations):
406
520
  quality_prompt = f"""
407
521
  Given this original prompt:
408
522
  ==========
409
523
  {prompt}
410
524
  ==========
411
-
525
+ {context_text}
412
526
  And this result:
413
527
  ==========
414
528
  {current_result}
@@ -416,13 +530,15 @@ And this result:
416
530
 
417
531
  Is this result good and comprehensive, or does it need to be improved? Consider if the response fully addresses the prompt, provides sufficient detail, and would be helpful to the user.
418
532
 
533
+ IMPORTANT: Also evaluate whether the result properly incorporates and addresses the information provided through the question-answer pairs above. The response should demonstrate that it has taken this additional context into account.
534
+
419
535
  Evaluate the quality and provide feedback if improvements are needed.
420
536
  """
421
537
 
422
538
  messages = [{"role": "user", "content": quality_prompt}]
423
539
 
424
540
  try:
425
- response = self._call_ai_parse(messages, QualityCheck)
541
+ response = self._call_ai_parse(messages, QualityCheck, conversation_history)
426
542
  quality_check = self._extract_parsed_content(response, QualityCheck)
427
543
 
428
544
  # Track token usage for quality check
@@ -441,7 +557,7 @@ The original prompt was:
441
557
  ==========
442
558
  {prompt}
443
559
  ==========
444
-
560
+ {context_text}
445
561
  The current result is:
446
562
  ==========
447
563
  {current_result}
@@ -452,11 +568,11 @@ Feedback for improvement:
452
568
  {quality_check.feedback}
453
569
  ==========
454
570
 
455
- Please provide an improved version that addresses the feedback while maintaining the strengths of the current result.
571
+ Please provide an improved version that addresses the feedback while maintaining the strengths of the current result. Make sure to incorporate all the context from the question-answer pairs above.
456
572
  """
457
573
 
458
574
  messages = [{"role": "user", "content": improvement_prompt}]
459
- improvement_response = self._call_ai_parse(messages, Result)
575
+ improvement_response = self._call_ai_parse(messages, Result, conversation_history)
460
576
  result_obj = self._extract_parsed_content(improvement_response, Result)
461
577
  current_result = result_obj.result
462
578
 
@@ -476,7 +592,7 @@ Please provide an improved version that addresses the feedback while maintaining
476
592
 
477
593
  return current_result
478
594
 
479
- def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary) -> BaseModel:
595
+ def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> BaseModel:
480
596
  """
481
597
  Generate the final response in the requested format.
482
598
 
@@ -486,10 +602,13 @@ Please provide an improved version that addresses the feedback while maintaining
486
602
  final_response_structure: The expected response structure
487
603
  agent_id: The agent session identifier
488
604
  token_summary: Current token usage summary
605
+ conversation_history: Optional conversation history for dialog context
489
606
 
490
607
  Returns:
491
608
  The final response in the requested format
492
609
  """
610
+ if conversation_history is None:
611
+ conversation_history = []
493
612
  final_prompt = f"""
494
613
  Given this original prompt:
495
614
  ==========
@@ -507,7 +626,7 @@ Generate the final answer in the exact format requested. Make sure the response
507
626
  messages = [{"role": "user", "content": final_prompt}]
508
627
 
509
628
  try:
510
- response = self._call_ai_parse(messages, final_response_structure)
629
+ response = self._call_ai_parse(messages, final_response_structure, conversation_history)
511
630
  final_response = self._extract_parsed_content(response, final_response_structure)
512
631
 
513
632
  # Track token usage for final response generation
@@ -534,3 +653,49 @@ Generate the final answer in the exact format requested. Make sure the response
534
653
  logger.error(f"Fallback response creation failed: {fallback_error}")
535
654
  # Last resort - return the structure with default values
536
655
  return final_response_structure()
656
+
657
+ def get_session_info(self, agent_id: str) -> dict[str, Any]:
658
+ """
659
+ Get information about an agent session.
660
+
661
+ Args:
662
+ agent_id: The agent session identifier
663
+
664
+ Returns:
665
+ Session information dictionary
666
+
667
+ Raises:
668
+ ValueError: If the agent session is not found
669
+ """
670
+ if agent_id not in self._agent_sessions:
671
+ raise ValueError(f"Agent session {agent_id} not found")
672
+
673
+ session = self._agent_sessions[agent_id].copy()
674
+ # Remove sensitive information and add summary
675
+ session["conversation_length"] = len(session.get("conversation_history", []))
676
+ return session
677
+
678
+ def delete_session(self, agent_id: str) -> bool:
679
+ """
680
+ Delete an agent session.
681
+
682
+ Args:
683
+ agent_id: The agent session identifier
684
+
685
+ Returns:
686
+ True if session was deleted, False if it didn't exist
687
+ """
688
+ if agent_id in self._agent_sessions:
689
+ del self._agent_sessions[agent_id]
690
+ logger.info(f"🗑️ Deleted agent session {agent_id}")
691
+ return True
692
+ return False
693
+
694
+ def list_sessions(self) -> list[str]:
695
+ """
696
+ List all active agent session IDs.
697
+
698
+ Returns:
699
+ List of agent session IDs
700
+ """
701
+ return list(self._agent_sessions.keys())
@@ -126,6 +126,33 @@ def example_with_tool_client():
126
126
  print(f"- {rec}")
127
127
 
128
128
 
129
+ def example_dialog_conversation():
130
+ """Example demonstrating persistent dialog functionality."""
131
+ print("Example of persistent dialog conversation:")
132
+
133
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
134
+ agent = AgentClient(openrouter_client, max_iterations=1)
135
+
136
+ # Start conversation
137
+ response1 = agent.agent("I want a book recommendation for science fiction", BookRecommendation, ask_questions=False)
138
+ agent_id = response1.agent_id
139
+
140
+ if response1.is_complete():
141
+ book1 = response1.final_response
142
+ print(f"First recommendation: {book1.title} by {book1.author}")
143
+
144
+ # Continue conversation with same agent_id
145
+ response2 = agent.agent("Can you recommend something by a different author in the same genre?", BookRecommendation, ask_questions=False, agent_id=agent_id)
146
+
147
+ if response2.is_complete():
148
+ book2 = response2.final_response
149
+ print(f"Second recommendation: {book2.title} by {book2.author}")
150
+ print(f"Context: {book2.reason}")
151
+
152
+ # Clean up session when done
153
+ agent.delete_session(agent_id)
154
+
155
+
129
156
  if __name__ == "__main__":
130
157
  print("=== Agent Client Examples ===\n")
131
158
 
@@ -150,3 +177,11 @@ if __name__ == "__main__":
150
177
  example_with_tool_client()
151
178
  except Exception as e:
152
179
  print(f"Error: {e}")
180
+
181
+ print("\n" + "="*50 + "\n")
182
+
183
+ print("4. Example with persistent dialog:")
184
+ try:
185
+ example_dialog_conversation()
186
+ except Exception as e:
187
+ print(f"Error: {e}")
@@ -0,0 +1,157 @@
1
+ """
2
+ Example usage of the AgentClient with persistent dialog sessions.
3
+ """
4
+
5
+ import os
6
+ from pydantic import BaseModel, Field
7
+ from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
8
+
9
+
10
+ class ChatResponse(BaseModel):
11
+ """A general chat response."""
12
+ response: str = Field(description="The response to the user's message")
13
+ context_awareness: str = Field(description="How this response relates to previous conversation")
14
+
15
+
16
+ class BookRecommendation(BaseModel):
17
+ """A book recommendation response."""
18
+ title: str = Field(description="The title of the recommended book")
19
+ author: str = Field(description="The author of the book")
20
+ genre: str = Field(description="The genre of the book")
21
+ reason: str = Field(description="Why this book is recommended based on conversation")
22
+ connection_to_previous: str = Field(description="How this recommendation connects to our previous conversation")
23
+
24
+
25
+ def demonstrate_dialog_conversation():
26
+ """Demonstrate persistent dialog functionality."""
27
+ print("🔄 DEMO: Persistent Dialog Agent")
28
+ print("=" * 50)
29
+
30
+ # Initialize the clients
31
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
32
+ agent = AgentClient(openrouter_client, max_iterations=1)
33
+
34
+ # First conversation - start new session
35
+ print("\n1️⃣ Starting new conversation:")
36
+ prompt1 = "Hi, I'm looking for a good book to read. I love science fiction."
37
+ response1 = agent.agent(prompt1, ChatResponse, ask_questions=False)
38
+
39
+ if response1.is_complete():
40
+ agent_id = response1.agent_id
41
+ chat_resp = response1.final_response
42
+ print(f"Agent ID: {agent_id}")
43
+ print(f"Response: {chat_resp.response}")
44
+ print(f"Context awareness: {chat_resp.context_awareness}")
45
+
46
+ # Second conversation - continue same session
47
+ print(f"\n2️⃣ Continuing conversation with agent {agent_id}:")
48
+ prompt2 = "Actually, I also enjoy fantasy novels. What would you recommend that combines both genres?"
49
+ response2 = agent.agent(prompt2, BookRecommendation, ask_questions=False, agent_id=agent_id)
50
+
51
+ if response2.is_complete():
52
+ book_rec = response2.final_response
53
+ print(f"Book: {book_rec.title} by {book_rec.author}")
54
+ print(f"Genre: {book_rec.genre}")
55
+ print(f"Reason: {book_rec.reason}")
56
+ print(f"Connection to previous: {book_rec.connection_to_previous}")
57
+
58
+ # Third conversation - continue same session
59
+ print(f"\n3️⃣ Continuing conversation with agent {agent_id}:")
60
+ prompt3 = "That sounds great! Can you recommend something similar but from a different author?"
61
+ response3 = agent.agent(prompt3, BookRecommendation, ask_questions=False, agent_id=agent_id)
62
+
63
+ if response3.is_complete():
64
+ book_rec2 = response3.final_response
65
+ print(f"Book: {book_rec2.title} by {book_rec2.author}")
66
+ print(f"Genre: {book_rec2.genre}")
67
+ print(f"Reason: {book_rec2.reason}")
68
+ print(f"Connection to previous: {book_rec2.connection_to_previous}")
69
+
70
+ # Show session info
71
+ print(f"\n📊 Session Information:")
72
+ try:
73
+ session_info = agent.get_session_info(agent_id)
74
+ print(f"Conversation length: {session_info['conversation_length']} messages")
75
+ print(f"Session step: {session_info.get('step', 'unknown')}")
76
+ except Exception as e:
77
+ print(f"Error getting session info: {e}")
78
+
79
+ # List all sessions
80
+ print(f"\n📝 Active sessions: {agent.list_sessions()}")
81
+
82
+ # Cleanup - optional
83
+ print(f"\n🗑️ Cleaning up session...")
84
+ deleted = agent.delete_session(agent_id)
85
+ print(f"Session deleted: {deleted}")
86
+ print(f"Active sessions after cleanup: {agent.list_sessions()}")
87
+
88
+
89
+ def demonstrate_dialog_with_questions():
90
+ """Demonstrate dialog with question-answer flow."""
91
+ print("\n🔄 DEMO: Dialog Agent with Questions")
92
+ print("=" * 50)
93
+
94
+ # Initialize the clients
95
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
96
+ agent = AgentClient(openrouter_client, max_iterations=1)
97
+
98
+ # First conversation with questions
99
+ print("\n1️⃣ Starting conversation with questions:")
100
+ prompt1 = "I want a personalized book recommendation"
101
+ response1 = agent.agent(prompt1, BookRecommendation, ask_questions=True)
102
+
103
+ agent_id = response1.agent_id
104
+ print(f"Agent ID: {agent_id}")
105
+
106
+ if response1.has_questions():
107
+ print(f"\n📋 Agent generated {len(response1.questions)} questions:")
108
+ for i, question in enumerate(response1.questions, 1):
109
+ print(f" {i}. {question.question} (key: {question.key})")
110
+
111
+ # Simulate answering questions
112
+ answers = AnswerList(answers=[
113
+ Answer(key="genre_preference", answer="I love science fiction and fantasy"),
114
+ Answer(key="reading_level", answer="I prefer complex, adult novels"),
115
+ Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
116
+ ])
117
+
118
+ print(f"\n📝 Providing answers...")
119
+ final_response = agent.answer_to_agent(agent_id, answers)
120
+
121
+ if final_response.is_complete():
122
+ book_rec = final_response.final_response
123
+ print(f"Book: {book_rec.title} by {book_rec.author}")
124
+ print(f"Genre: {book_rec.genre}")
125
+ print(f"Reason: {book_rec.reason}")
126
+
127
+ # Continue conversation - this should remember the previous interaction
128
+ print(f"\n2️⃣ Continuing conversation with agent {agent_id}:")
129
+ prompt2 = "Thank you! Can you also recommend something by a female author in the same genres?"
130
+ response2 = agent.agent(prompt2, BookRecommendation, ask_questions=False, agent_id=agent_id)
131
+
132
+ if response2.is_complete():
133
+ book_rec2 = response2.final_response
134
+ print(f"Book: {book_rec2.title} by {book_rec2.author}")
135
+ print(f"Genre: {book_rec2.genre}")
136
+ print(f"Reason: {book_rec2.reason}")
137
+ print(f"Connection to previous: {book_rec2.connection_to_previous}")
138
+
139
+ # Session cleanup
140
+ print(f"\n🗑️ Session cleanup...")
141
+ agent.delete_session(agent_id)
142
+
143
+
144
+ if __name__ == "__main__":
145
+ print("=== Dialog Agent Examples ===\n")
146
+
147
+ try:
148
+ demonstrate_dialog_conversation()
149
+ except Exception as e:
150
+ print(f"Error in dialog conversation demo: {e}")
151
+
152
+ print("\n" + "="*80 + "\n")
153
+
154
+ try:
155
+ demonstrate_dialog_with_questions()
156
+ except Exception as e:
157
+ print(f"Error in dialog with questions demo: {e}")
@@ -31,7 +31,7 @@ class MCPServer:
31
31
  self.app = FastAPI(
32
32
  title=self.name,
33
33
  description=self.description,
34
- version="2.0.5",
34
+ version="2.1.0",
35
35
  )
36
36
 
37
37
  # Initialize MCP server
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes