mbxai 2.0.5__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mbxai/__init__.py +1 -1
- mbxai/agent/client.py +273 -63
- mbxai/examples/agent_example.py +37 -2
- mbxai/examples/agent_logging_example.py +1 -1
- mbxai/examples/conversation_history_test.py +169 -0
- mbxai/examples/dialog_agent_example.py +157 -0
- mbxai/examples/unified_interface_example.py +109 -0
- mbxai/mcp/server.py +1 -1
- {mbxai-2.0.5.dist-info → mbxai-2.1.1.dist-info}/METADATA +1 -1
- {mbxai-2.0.5.dist-info → mbxai-2.1.1.dist-info}/RECORD +12 -9
- {mbxai-2.0.5.dist-info → mbxai-2.1.1.dist-info}/WHEEL +0 -0
- {mbxai-2.0.5.dist-info → mbxai-2.1.1.dist-info}/licenses/LICENSE +0 -0
mbxai/__init__.py
CHANGED
mbxai/agent/client.py
CHANGED
@@ -123,9 +123,16 @@ class AgentClient:
|
|
123
123
|
f"Use MCPClient to register MCP servers."
|
124
124
|
)
|
125
125
|
|
126
|
-
def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel]) -> Any:
|
127
|
-
"""Call the parse method on the AI client."""
|
128
|
-
|
126
|
+
def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel], conversation_history: list[dict[str, Any]] = None) -> Any:
|
127
|
+
"""Call the parse method on the AI client with optional conversation history."""
|
128
|
+
# Combine conversation history with new messages
|
129
|
+
if conversation_history:
|
130
|
+
full_messages = conversation_history + messages
|
131
|
+
logger.debug(f"🔗 AI call with {len(conversation_history)} history messages + {len(messages)} new messages = {len(full_messages)} total")
|
132
|
+
else:
|
133
|
+
full_messages = messages
|
134
|
+
logger.debug(f"🔗 AI call with {len(messages)} messages (no history)")
|
135
|
+
return self._ai_client.parse(full_messages, response_format)
|
129
136
|
|
130
137
|
def _extract_token_usage(self, response: Any) -> TokenUsage:
|
131
138
|
"""Extract token usage information from an AI response."""
|
@@ -179,7 +186,9 @@ class AgentClient:
|
|
179
186
|
self,
|
180
187
|
prompt: str,
|
181
188
|
final_response_structure: Type[BaseModel],
|
182
|
-
ask_questions: bool = True
|
189
|
+
ask_questions: bool = True,
|
190
|
+
agent_id: str = None,
|
191
|
+
answers: AnswerList = None
|
183
192
|
) -> AgentResponse:
|
184
193
|
"""
|
185
194
|
Process a prompt through the agent's thinking process.
|
@@ -188,16 +197,40 @@ class AgentClient:
|
|
188
197
|
prompt: The initial prompt from the user
|
189
198
|
final_response_structure: Pydantic model defining the expected final response format
|
190
199
|
ask_questions: Whether to ask clarifying questions (default: True)
|
200
|
+
agent_id: Optional agent session ID to continue an existing conversation
|
201
|
+
answers: Optional answers to questions (when continuing a conversation with questions)
|
191
202
|
|
192
203
|
Returns:
|
193
204
|
AgentResponse containing either questions to ask or the final response
|
194
205
|
"""
|
195
|
-
agent_id
|
196
|
-
|
206
|
+
# Use provided agent_id or create a new one
|
207
|
+
if agent_id is None:
|
208
|
+
agent_id = str(__import__("uuid").uuid4())
|
209
|
+
logger.info(f"🚀 Starting new agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
|
210
|
+
else:
|
211
|
+
logger.info(f"🔄 Continuing agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
|
197
212
|
|
198
213
|
# Initialize token summary
|
199
214
|
token_summary = TokenSummary()
|
200
215
|
|
216
|
+
# Check if this is a continuing conversation
|
217
|
+
existing_session = self._agent_sessions.get(agent_id, {})
|
218
|
+
conversation_history = existing_session.get("conversation_history", []).copy()
|
219
|
+
|
220
|
+
if conversation_history:
|
221
|
+
logger.info(f"📜 Agent {agent_id}: Loaded conversation history with {len(conversation_history)} messages")
|
222
|
+
|
223
|
+
# Store conversation history for AI calls (don't include current prompt yet)
|
224
|
+
history_for_ai = conversation_history.copy()
|
225
|
+
|
226
|
+
# Add current prompt to full conversation history for session storage
|
227
|
+
conversation_history.append({"role": "user", "content": prompt})
|
228
|
+
|
229
|
+
# Handle answers provided (skip question generation and process directly)
|
230
|
+
if answers is not None:
|
231
|
+
logger.info(f"📝 Agent {agent_id}: Processing with provided answers, skipping question generation")
|
232
|
+
return self._process_answers_directly(agent_id, prompt, final_response_structure, answers, token_summary, history_for_ai)
|
233
|
+
|
201
234
|
# Step 1: Generate questions (if ask_questions is True)
|
202
235
|
if ask_questions:
|
203
236
|
logger.info(f"❓ Agent {agent_id}: Analyzing prompt and generating clarifying questions")
|
@@ -221,7 +254,7 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
221
254
|
messages = [{"role": "user", "content": questions_prompt}]
|
222
255
|
|
223
256
|
try:
|
224
|
-
response = self._call_ai_parse(messages, QuestionList)
|
257
|
+
response = self._call_ai_parse(messages, QuestionList, history_for_ai)
|
225
258
|
question_list = self._extract_parsed_content(response, QuestionList)
|
226
259
|
|
227
260
|
# Extract token usage for question generation
|
@@ -238,7 +271,8 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
238
271
|
"final_response_structure": final_response_structure,
|
239
272
|
"questions": question_list.questions,
|
240
273
|
"step": "waiting_for_answers",
|
241
|
-
"token_summary": token_summary
|
274
|
+
"token_summary": token_summary,
|
275
|
+
"conversation_history": history_for_ai # Include history without current prompt
|
242
276
|
}
|
243
277
|
logger.info(f"📋 Agent {agent_id}: Waiting for user answers to {len(question_list.questions)} questions")
|
244
278
|
return agent_response
|
@@ -248,58 +282,124 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
248
282
|
|
249
283
|
# Step 2 & 3: No questions or ask_questions=False - proceed directly
|
250
284
|
logger.info(f"⚡ Agent {agent_id}: No questions needed, proceeding directly to processing")
|
251
|
-
return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary)
|
285
|
+
return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary, history_for_ai)
|
252
286
|
|
253
|
-
def
|
287
|
+
def _process_answers_directly(
|
288
|
+
self,
|
289
|
+
agent_id: str,
|
290
|
+
prompt: str,
|
291
|
+
final_response_structure: Type[BaseModel],
|
292
|
+
answers: AnswerList,
|
293
|
+
token_summary: TokenSummary,
|
294
|
+
conversation_history: list[dict[str, Any]]
|
295
|
+
) -> AgentResponse:
|
254
296
|
"""
|
255
|
-
|
256
|
-
|
297
|
+
Process answers directly without going through question generation.
|
298
|
+
|
257
299
|
Args:
|
258
300
|
agent_id: The agent session identifier
|
259
|
-
|
260
|
-
|
301
|
+
prompt: The current prompt
|
302
|
+
final_response_structure: Expected response structure
|
303
|
+
answers: Provided answers
|
304
|
+
token_summary: Current token usage summary
|
305
|
+
conversation_history: Conversation history
|
306
|
+
|
261
307
|
Returns:
|
262
308
|
AgentResponse with the final result
|
263
|
-
|
264
|
-
Raises:
|
265
|
-
ValueError: If the agent session is not found or in wrong state
|
266
309
|
"""
|
267
|
-
if
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
if
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
310
|
+
# Check if we have a session with questions to match against
|
311
|
+
session = self._agent_sessions.get(agent_id, {})
|
312
|
+
questions = session.get("questions", [])
|
313
|
+
|
314
|
+
if not questions:
|
315
|
+
# No previous questions - treat as simple additional context
|
316
|
+
logger.info(f"📝 Agent {agent_id}: No previous questions found, treating answers as additional context")
|
317
|
+
answer_dict = {answer.key: answer.answer for answer in answers.answers}
|
318
|
+
qa_pairs = []
|
319
|
+
for answer in answers.answers:
|
320
|
+
qa_pairs.append({
|
321
|
+
"question": f"Information about {answer.key}",
|
322
|
+
"key": answer.key,
|
323
|
+
"answer": answer.answer,
|
324
|
+
"required": True
|
325
|
+
})
|
326
|
+
else:
|
327
|
+
# Match answers with previous questions
|
328
|
+
logger.info(f"📝 Agent {agent_id}: Matching {len(answers.answers)} answers with previous questions")
|
329
|
+
answer_dict = {answer.key: answer.answer for answer in answers.answers}
|
330
|
+
|
331
|
+
# Create question-answer pairs for better context
|
332
|
+
qa_pairs = []
|
333
|
+
for question in questions:
|
334
|
+
answer_text = answer_dict.get(question.key, "No answer provided")
|
335
|
+
qa_pairs.append({
|
336
|
+
"question": question.question,
|
337
|
+
"key": question.key,
|
338
|
+
"answer": answer_text,
|
339
|
+
"required": question.required
|
340
|
+
})
|
341
|
+
|
342
|
+
# Process with the provided answers and question context
|
283
343
|
result = self._process_with_answers(
|
284
|
-
|
285
|
-
|
286
|
-
|
344
|
+
prompt,
|
345
|
+
final_response_structure,
|
346
|
+
qa_pairs,
|
287
347
|
agent_id,
|
288
|
-
token_summary
|
348
|
+
token_summary,
|
349
|
+
conversation_history
|
289
350
|
)
|
290
351
|
|
291
|
-
#
|
292
|
-
|
293
|
-
|
352
|
+
# Note: History management is now handled in _process_with_answers
|
353
|
+
# No need to duplicate history management here
|
294
354
|
return result
|
295
355
|
|
356
|
+
def _format_qa_context_for_quality_check(self, answers: Union[list, dict[str, str]]) -> str:
|
357
|
+
"""
|
358
|
+
Format question-answer context for quality check and improvement prompts.
|
359
|
+
|
360
|
+
Args:
|
361
|
+
answers: Question-answer pairs or simple answers
|
362
|
+
|
363
|
+
Returns:
|
364
|
+
Formatted context text
|
365
|
+
"""
|
366
|
+
if not answers:
|
367
|
+
return ""
|
368
|
+
|
369
|
+
if isinstance(answers, list) and answers:
|
370
|
+
# Check if it's a list of question-answer pairs (enhanced format)
|
371
|
+
if isinstance(answers[0], dict) and "question" in answers[0]:
|
372
|
+
context_text = "\nContext Information (Questions & Answers):\n"
|
373
|
+
context_text += "The response was generated with the following additional context:\n\n"
|
374
|
+
for i, qa_pair in enumerate(answers, 1):
|
375
|
+
question = qa_pair.get("question", "Unknown question")
|
376
|
+
answer = qa_pair.get("answer", "No answer provided")
|
377
|
+
required = qa_pair.get("required", True)
|
378
|
+
|
379
|
+
status_marker = "🔴 REQUIRED" if required else "🟡 OPTIONAL"
|
380
|
+
context_text += f"{i}. {status_marker} Q: {question}\n"
|
381
|
+
context_text += f" A: {answer}\n\n"
|
382
|
+
return context_text
|
383
|
+
else:
|
384
|
+
# Legacy format - simple list
|
385
|
+
return f"\nAdditional context: {', '.join(str(a) for a in answers)}\n\n"
|
386
|
+
elif isinstance(answers, dict) and answers:
|
387
|
+
# Legacy format - simple dict
|
388
|
+
context_text = "\nAdditional context provided:\n"
|
389
|
+
for key, answer in answers.items():
|
390
|
+
context_text += f"- {key}: {answer}\n"
|
391
|
+
return context_text + "\n"
|
392
|
+
|
393
|
+
return ""
|
394
|
+
|
296
395
|
def _process_with_answers(
|
297
396
|
self,
|
298
397
|
prompt: str,
|
299
398
|
final_response_structure: Type[BaseModel],
|
300
399
|
answers: Union[list, dict[str, str]],
|
301
400
|
agent_id: str,
|
302
|
-
token_summary: TokenSummary
|
401
|
+
token_summary: TokenSummary,
|
402
|
+
conversation_history: list[dict[str, Any]] = None
|
303
403
|
) -> AgentResponse:
|
304
404
|
"""
|
305
405
|
Process the prompt with answers through the thinking pipeline.
|
@@ -307,23 +407,49 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
307
407
|
Args:
|
308
408
|
prompt: The original prompt
|
309
409
|
final_response_structure: Expected final response structure
|
310
|
-
answers:
|
410
|
+
answers: Question-answer pairs or simple answers (empty if no questions were asked)
|
311
411
|
agent_id: The agent session identifier
|
312
412
|
token_summary: Current token usage summary
|
413
|
+
conversation_history: Optional conversation history for dialog context
|
313
414
|
|
314
415
|
Returns:
|
315
416
|
AgentResponse with the final result
|
316
417
|
"""
|
418
|
+
if conversation_history is None:
|
419
|
+
conversation_history = []
|
420
|
+
|
317
421
|
# Step 3: Process the prompt with thinking
|
318
422
|
logger.info(f"🧠 Agent {agent_id}: Processing prompt and generating initial response")
|
319
|
-
result = self._think_and_process(prompt, answers, agent_id, token_summary)
|
423
|
+
result = self._think_and_process(prompt, answers, agent_id, token_summary, conversation_history)
|
320
424
|
|
321
425
|
# Step 4: Quality check and iteration
|
322
|
-
final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary)
|
426
|
+
final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary, conversation_history)
|
323
427
|
|
324
428
|
# Step 5: Generate final answer in requested format
|
325
429
|
logger.info(f"📝 Agent {agent_id}: Generating final structured response")
|
326
|
-
final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary)
|
430
|
+
final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary, conversation_history)
|
431
|
+
|
432
|
+
# Update session with the final response in conversation history
|
433
|
+
if agent_id in self._agent_sessions:
|
434
|
+
# Update conversation history with assistant response
|
435
|
+
updated_history = conversation_history.copy()
|
436
|
+
updated_history.append({"role": "assistant", "content": str(final_response)})
|
437
|
+
|
438
|
+
self._agent_sessions[agent_id]["conversation_history"] = updated_history
|
439
|
+
self._agent_sessions[agent_id]["step"] = "completed"
|
440
|
+
self._agent_sessions[agent_id]["token_summary"] = token_summary
|
441
|
+
logger.info(f"💾 Agent {agent_id}: Updated session with conversation history ({len(updated_history)} messages)")
|
442
|
+
else:
|
443
|
+
# Create new session if it doesn't exist
|
444
|
+
updated_history = conversation_history.copy()
|
445
|
+
updated_history.append({"role": "assistant", "content": str(final_response)})
|
446
|
+
|
447
|
+
self._agent_sessions[agent_id] = {
|
448
|
+
"step": "completed",
|
449
|
+
"conversation_history": updated_history,
|
450
|
+
"token_summary": token_summary
|
451
|
+
}
|
452
|
+
logger.info(f"💾 Agent {agent_id}: Created new session with conversation history ({len(updated_history)} messages)")
|
327
453
|
|
328
454
|
# Log final token summary
|
329
455
|
logger.info(f"📊 Agent {agent_id}: Token usage summary - Total: {token_summary.total_tokens} "
|
@@ -331,27 +457,49 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
331
457
|
|
332
458
|
return AgentResponse(agent_id=agent_id, final_response=final_response, token_summary=token_summary)
|
333
459
|
|
334
|
-
def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
|
460
|
+
def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> str:
|
335
461
|
"""
|
336
462
|
Process the prompt with thinking.
|
337
463
|
|
338
464
|
Args:
|
339
465
|
prompt: The original prompt
|
340
|
-
answers:
|
466
|
+
answers: Question-answer pairs or simple answers
|
341
467
|
agent_id: The agent session identifier
|
342
468
|
token_summary: Current token usage summary
|
469
|
+
conversation_history: Optional conversation history for dialog context
|
343
470
|
|
344
471
|
Returns:
|
345
472
|
The AI's result
|
346
473
|
"""
|
347
|
-
|
474
|
+
if conversation_history is None:
|
475
|
+
conversation_history = []
|
476
|
+
# Format answers for the prompt with enhanced context
|
348
477
|
answers_text = ""
|
349
|
-
if isinstance(answers,
|
478
|
+
if isinstance(answers, list) and answers:
|
479
|
+
# Check if it's a list of question-answer pairs (enhanced format)
|
480
|
+
if answers and isinstance(answers[0], dict) and "question" in answers[0]:
|
481
|
+
answers_text = "\n\nQuestion-Answer Context:\n"
|
482
|
+
answers_text += "The following questions were asked to gather more information, along with the answers provided:\n\n"
|
483
|
+
for i, qa_pair in enumerate(answers, 1):
|
484
|
+
question = qa_pair.get("question", "Unknown question")
|
485
|
+
answer = qa_pair.get("answer", "No answer provided")
|
486
|
+
key = qa_pair.get("key", "")
|
487
|
+
required = qa_pair.get("required", True)
|
488
|
+
|
489
|
+
status_marker = "🔴 REQUIRED" if required else "🟡 OPTIONAL"
|
490
|
+
answers_text += f"{i}. {status_marker} Question: {question}\n"
|
491
|
+
answers_text += f" Answer: {answer}\n"
|
492
|
+
if key:
|
493
|
+
answers_text += f" (Key: {key})\n"
|
494
|
+
answers_text += "\n"
|
495
|
+
else:
|
496
|
+
# Legacy format - simple list
|
497
|
+
answers_text = f"\n\nAdditional information: {', '.join(str(a) for a in answers)}\n"
|
498
|
+
elif isinstance(answers, dict) and answers:
|
499
|
+
# Legacy format - simple dict
|
350
500
|
answers_text = "\n\nAdditional information provided:\n"
|
351
501
|
for key, answer in answers.items():
|
352
502
|
answers_text += f"- {key}: {answer}\n"
|
353
|
-
elif isinstance(answers, list) and answers:
|
354
|
-
answers_text = f"\n\nAdditional information: {', '.join(answers)}\n"
|
355
503
|
|
356
504
|
thinking_prompt = f"""
|
357
505
|
Think about this prompt, the goal and the steps required to fulfill it:
|
@@ -360,15 +508,19 @@ Think about this prompt, the goal and the steps required to fulfill it:
|
|
360
508
|
==========
|
361
509
|
{answers_text}
|
362
510
|
|
363
|
-
Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response.
|
511
|
+
Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response.
|
512
|
+
|
513
|
+
IMPORTANT: When formulating your response, take into account both the original prompt AND the specific questions that were asked along with their answers. The questions reveal what additional information was deemed necessary, and the answers provide crucial context that should inform your response.
|
514
|
+
|
515
|
+
Use any available tools to gather information or perform actions that would improve your response.
|
364
516
|
|
365
|
-
Provide your best result for the given prompt.
|
517
|
+
Provide your best result for the given prompt, incorporating all the context from the question-answer pairs.
|
366
518
|
"""
|
367
519
|
|
368
520
|
messages = [{"role": "user", "content": thinking_prompt}]
|
369
521
|
|
370
522
|
try:
|
371
|
-
response = self._call_ai_parse(messages, Result)
|
523
|
+
response = self._call_ai_parse(messages, Result, conversation_history)
|
372
524
|
result_obj = self._extract_parsed_content(response, Result)
|
373
525
|
|
374
526
|
# Track token usage for thinking process
|
@@ -380,20 +532,24 @@ Provide your best result for the given prompt.
|
|
380
532
|
logger.error(f"Error in thinking process: {e}")
|
381
533
|
raise RuntimeError(f"Failed to process prompt with AI client: {e}") from e
|
382
534
|
|
383
|
-
def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
|
535
|
+
def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> str:
|
384
536
|
"""
|
385
537
|
Check the quality of the result and iterate if needed.
|
386
538
|
|
387
539
|
Args:
|
388
540
|
prompt: The original prompt
|
389
541
|
result: The current result
|
390
|
-
answers:
|
542
|
+
answers: Question-answer pairs or simple answers
|
391
543
|
agent_id: The agent session identifier
|
392
544
|
token_summary: Current token usage summary
|
545
|
+
conversation_history: Optional conversation history for dialog context
|
393
546
|
|
394
547
|
Returns:
|
395
548
|
The final improved result
|
396
549
|
"""
|
550
|
+
if conversation_history is None:
|
551
|
+
conversation_history = []
|
552
|
+
|
397
553
|
current_result = result
|
398
554
|
|
399
555
|
if self._max_iterations == 0:
|
@@ -402,13 +558,16 @@ Provide your best result for the given prompt.
|
|
402
558
|
|
403
559
|
logger.info(f"🔍 Agent {agent_id}: Starting quality check and improvement process (max iterations: {self._max_iterations})")
|
404
560
|
|
561
|
+
# Format context information for quality checks
|
562
|
+
context_text = self._format_qa_context_for_quality_check(answers)
|
563
|
+
|
405
564
|
for iteration in range(self._max_iterations):
|
406
565
|
quality_prompt = f"""
|
407
566
|
Given this original prompt:
|
408
567
|
==========
|
409
568
|
{prompt}
|
410
569
|
==========
|
411
|
-
|
570
|
+
{context_text}
|
412
571
|
And this result:
|
413
572
|
==========
|
414
573
|
{current_result}
|
@@ -416,13 +575,15 @@ And this result:
|
|
416
575
|
|
417
576
|
Is this result good and comprehensive, or does it need to be improved? Consider if the response fully addresses the prompt, provides sufficient detail, and would be helpful to the user.
|
418
577
|
|
578
|
+
IMPORTANT: Also evaluate whether the result properly incorporates and addresses the information provided through the question-answer pairs above. The response should demonstrate that it has taken this additional context into account.
|
579
|
+
|
419
580
|
Evaluate the quality and provide feedback if improvements are needed.
|
420
581
|
"""
|
421
582
|
|
422
583
|
messages = [{"role": "user", "content": quality_prompt}]
|
423
584
|
|
424
585
|
try:
|
425
|
-
response = self._call_ai_parse(messages, QualityCheck)
|
586
|
+
response = self._call_ai_parse(messages, QualityCheck, conversation_history)
|
426
587
|
quality_check = self._extract_parsed_content(response, QualityCheck)
|
427
588
|
|
428
589
|
# Track token usage for quality check
|
@@ -441,7 +602,7 @@ The original prompt was:
|
|
441
602
|
==========
|
442
603
|
{prompt}
|
443
604
|
==========
|
444
|
-
|
605
|
+
{context_text}
|
445
606
|
The current result is:
|
446
607
|
==========
|
447
608
|
{current_result}
|
@@ -452,11 +613,11 @@ Feedback for improvement:
|
|
452
613
|
{quality_check.feedback}
|
453
614
|
==========
|
454
615
|
|
455
|
-
Please provide an improved version that addresses the feedback while maintaining the strengths of the current result.
|
616
|
+
Please provide an improved version that addresses the feedback while maintaining the strengths of the current result. Make sure to incorporate all the context from the question-answer pairs above.
|
456
617
|
"""
|
457
618
|
|
458
619
|
messages = [{"role": "user", "content": improvement_prompt}]
|
459
|
-
improvement_response = self._call_ai_parse(messages, Result)
|
620
|
+
improvement_response = self._call_ai_parse(messages, Result, conversation_history)
|
460
621
|
result_obj = self._extract_parsed_content(improvement_response, Result)
|
461
622
|
current_result = result_obj.result
|
462
623
|
|
@@ -476,7 +637,7 @@ Please provide an improved version that addresses the feedback while maintaining
|
|
476
637
|
|
477
638
|
return current_result
|
478
639
|
|
479
|
-
def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary) -> BaseModel:
|
640
|
+
def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> BaseModel:
|
480
641
|
"""
|
481
642
|
Generate the final response in the requested format.
|
482
643
|
|
@@ -486,10 +647,13 @@ Please provide an improved version that addresses the feedback while maintaining
|
|
486
647
|
final_response_structure: The expected response structure
|
487
648
|
agent_id: The agent session identifier
|
488
649
|
token_summary: Current token usage summary
|
650
|
+
conversation_history: Optional conversation history for dialog context
|
489
651
|
|
490
652
|
Returns:
|
491
653
|
The final response in the requested format
|
492
654
|
"""
|
655
|
+
if conversation_history is None:
|
656
|
+
conversation_history = []
|
493
657
|
final_prompt = f"""
|
494
658
|
Given this original prompt:
|
495
659
|
==========
|
@@ -507,7 +671,7 @@ Generate the final answer in the exact format requested. Make sure the response
|
|
507
671
|
messages = [{"role": "user", "content": final_prompt}]
|
508
672
|
|
509
673
|
try:
|
510
|
-
response = self._call_ai_parse(messages, final_response_structure)
|
674
|
+
response = self._call_ai_parse(messages, final_response_structure, conversation_history)
|
511
675
|
final_response = self._extract_parsed_content(response, final_response_structure)
|
512
676
|
|
513
677
|
# Track token usage for final response generation
|
@@ -534,3 +698,49 @@ Generate the final answer in the exact format requested. Make sure the response
|
|
534
698
|
logger.error(f"Fallback response creation failed: {fallback_error}")
|
535
699
|
# Last resort - return the structure with default values
|
536
700
|
return final_response_structure()
|
701
|
+
|
702
|
+
def get_session_info(self, agent_id: str) -> dict[str, Any]:
|
703
|
+
"""
|
704
|
+
Get information about an agent session.
|
705
|
+
|
706
|
+
Args:
|
707
|
+
agent_id: The agent session identifier
|
708
|
+
|
709
|
+
Returns:
|
710
|
+
Session information dictionary
|
711
|
+
|
712
|
+
Raises:
|
713
|
+
ValueError: If the agent session is not found
|
714
|
+
"""
|
715
|
+
if agent_id not in self._agent_sessions:
|
716
|
+
raise ValueError(f"Agent session {agent_id} not found")
|
717
|
+
|
718
|
+
session = self._agent_sessions[agent_id].copy()
|
719
|
+
# Remove sensitive information and add summary
|
720
|
+
session["conversation_length"] = len(session.get("conversation_history", []))
|
721
|
+
return session
|
722
|
+
|
723
|
+
def delete_session(self, agent_id: str) -> bool:
|
724
|
+
"""
|
725
|
+
Delete an agent session.
|
726
|
+
|
727
|
+
Args:
|
728
|
+
agent_id: The agent session identifier
|
729
|
+
|
730
|
+
Returns:
|
731
|
+
True if session was deleted, False if it didn't exist
|
732
|
+
"""
|
733
|
+
if agent_id in self._agent_sessions:
|
734
|
+
del self._agent_sessions[agent_id]
|
735
|
+
logger.info(f"🗑️ Deleted agent session {agent_id}")
|
736
|
+
return True
|
737
|
+
return False
|
738
|
+
|
739
|
+
def list_sessions(self) -> list[str]:
|
740
|
+
"""
|
741
|
+
List all active agent session IDs.
|
742
|
+
|
743
|
+
Returns:
|
744
|
+
List of agent session IDs
|
745
|
+
"""
|
746
|
+
return list(self._agent_sessions.keys())
|
mbxai/examples/agent_example.py
CHANGED
@@ -48,8 +48,8 @@ def example_with_questions():
|
|
48
48
|
Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
|
49
49
|
])
|
50
50
|
|
51
|
-
# Continue the agent process with answers
|
52
|
-
final_response = agent.
|
51
|
+
# Continue the agent process with answers using the unified interface
|
52
|
+
final_response = agent.agent("Continue with previous questions", BookRecommendation, ask_questions=False, agent_id=response.agent_id, answers=answers)
|
53
53
|
|
54
54
|
if final_response.is_complete():
|
55
55
|
book_rec = final_response.final_response
|
@@ -126,6 +126,33 @@ def example_with_tool_client():
|
|
126
126
|
print(f"- {rec}")
|
127
127
|
|
128
128
|
|
129
|
+
def example_dialog_conversation():
|
130
|
+
"""Example demonstrating persistent dialog functionality."""
|
131
|
+
print("Example of persistent dialog conversation:")
|
132
|
+
|
133
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
134
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
135
|
+
|
136
|
+
# Start conversation
|
137
|
+
response1 = agent.agent("I want a book recommendation for science fiction", BookRecommendation, ask_questions=False)
|
138
|
+
agent_id = response1.agent_id
|
139
|
+
|
140
|
+
if response1.is_complete():
|
141
|
+
book1 = response1.final_response
|
142
|
+
print(f"First recommendation: {book1.title} by {book1.author}")
|
143
|
+
|
144
|
+
# Continue conversation with same agent_id
|
145
|
+
response2 = agent.agent("Can you recommend something by a different author in the same genre?", BookRecommendation, ask_questions=False, agent_id=agent_id)
|
146
|
+
|
147
|
+
if response2.is_complete():
|
148
|
+
book2 = response2.final_response
|
149
|
+
print(f"Second recommendation: {book2.title} by {book2.author}")
|
150
|
+
print(f"Context: {book2.reason}")
|
151
|
+
|
152
|
+
# Clean up session when done
|
153
|
+
agent.delete_session(agent_id)
|
154
|
+
|
155
|
+
|
129
156
|
if __name__ == "__main__":
|
130
157
|
print("=== Agent Client Examples ===\n")
|
131
158
|
|
@@ -150,3 +177,11 @@ if __name__ == "__main__":
|
|
150
177
|
example_with_tool_client()
|
151
178
|
except Exception as e:
|
152
179
|
print(f"Error: {e}")
|
180
|
+
|
181
|
+
print("\n" + "="*50 + "\n")
|
182
|
+
|
183
|
+
print("4. Example with persistent dialog:")
|
184
|
+
try:
|
185
|
+
example_dialog_conversation()
|
186
|
+
except Exception as e:
|
187
|
+
print(f"Error: {e}")
|
@@ -74,7 +74,7 @@ def demonstrate_agent_with_questions():
|
|
74
74
|
])
|
75
75
|
|
76
76
|
print(f"\n📝 Providing answers and continuing...")
|
77
|
-
final_response = agent.
|
77
|
+
final_response = agent.agent("Continue with answers", WeatherResponse, ask_questions=False, agent_id=response.agent_id, answers=answers)
|
78
78
|
|
79
79
|
if final_response.is_complete():
|
80
80
|
print("\n✅ Final response received!")
|
@@ -0,0 +1,169 @@
|
|
1
|
+
"""
|
2
|
+
Test example demonstrating conversation history persistence across multiple interactions.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
|
8
|
+
|
9
|
+
|
10
|
+
class StoryResponse(BaseModel):
|
11
|
+
"""A story response that should reference previous conversation."""
|
12
|
+
story_part: str = Field(description="The current part of the story")
|
13
|
+
references_to_previous: str = Field(description="How this connects to our previous conversation")
|
14
|
+
conversation_context: str = Field(description="Summary of what was discussed before")
|
15
|
+
|
16
|
+
|
17
|
+
class ChatResponse(BaseModel):
|
18
|
+
"""A general chat response."""
|
19
|
+
response: str = Field(description="The response to the user's message")
|
20
|
+
context_awareness: str = Field(description="What the AI remembers from our conversation")
|
21
|
+
|
22
|
+
|
23
|
+
def test_conversation_history_persistence():
|
24
|
+
"""Test that conversation history persists and is used across multiple interactions."""
|
25
|
+
print("🧪 TESTING: Conversation History Persistence")
|
26
|
+
print("=" * 60)
|
27
|
+
|
28
|
+
# Initialize the clients
|
29
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
30
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
31
|
+
|
32
|
+
# First interaction - establish context
|
33
|
+
print("\n1️⃣ First interaction - setting up story context:")
|
34
|
+
prompt1 = "I want to tell a collaborative story about a space explorer named Luna who discovers an ancient alien artifact on Mars."
|
35
|
+
response1 = agent.agent(prompt1, StoryResponse, ask_questions=False)
|
36
|
+
|
37
|
+
agent_id = response1.agent_id
|
38
|
+
print(f"Agent ID: {agent_id}")
|
39
|
+
|
40
|
+
if response1.is_complete():
|
41
|
+
story1 = response1.final_response
|
42
|
+
print(f"Story Part 1: {story1.story_part[:200]}...")
|
43
|
+
print(f"Context awareness: {story1.context_awareness}")
|
44
|
+
|
45
|
+
# Check session info
|
46
|
+
session_info = agent.get_session_info(agent_id)
|
47
|
+
print(f"📊 Session after first interaction: {session_info['conversation_length']} messages")
|
48
|
+
|
49
|
+
# Second interaction - continue story, should reference Luna and the artifact
|
50
|
+
print(f"\n2️⃣ Second interaction - continuing story (should remember Luna and artifact):")
|
51
|
+
prompt2 = "Luna touches the artifact and something amazing happens. Continue the story."
|
52
|
+
response2 = agent.agent(prompt2, StoryResponse, ask_questions=False, agent_id=agent_id)
|
53
|
+
|
54
|
+
if response2.is_complete():
|
55
|
+
story2 = response2.final_response
|
56
|
+
print(f"Story Part 2: {story2.story_part[:200]}...")
|
57
|
+
print(f"References to previous: {story2.references_to_previous}")
|
58
|
+
print(f"Conversation context: {story2.conversation_context}")
|
59
|
+
|
60
|
+
# Check session info
|
61
|
+
session_info = agent.get_session_info(agent_id)
|
62
|
+
print(f"📊 Session after second interaction: {session_info['conversation_length']} messages")
|
63
|
+
|
64
|
+
# Third interaction - change topic but should still remember story context
|
65
|
+
print(f"\n3️⃣ Third interaction - changing topic (should still remember our story):")
|
66
|
+
prompt3 = "Actually, let's pause the story. What do you think Luna's personality is like based on our story so far?"
|
67
|
+
response3 = agent.agent(prompt3, ChatResponse, ask_questions=False, agent_id=agent_id)
|
68
|
+
|
69
|
+
if response3.is_complete():
|
70
|
+
chat3 = response3.final_response
|
71
|
+
print(f"Response: {chat3.response}")
|
72
|
+
print(f"Context awareness: {chat3.context_awareness}")
|
73
|
+
|
74
|
+
# Check session info
|
75
|
+
session_info = agent.get_session_info(agent_id)
|
76
|
+
print(f"📊 Session after third interaction: {session_info['conversation_length']} messages")
|
77
|
+
|
78
|
+
# Fourth interaction - return to story, should remember everything
|
79
|
+
print(f"\n4️⃣ Fourth interaction - returning to story (should remember all previous context):")
|
80
|
+
prompt4 = "Great! Now let's continue Luna's story from where we left off. What happens next with the artifact?"
|
81
|
+
response4 = agent.agent(prompt4, StoryResponse, ask_questions=False, agent_id=agent_id)
|
82
|
+
|
83
|
+
if response4.is_complete():
|
84
|
+
story4 = response4.final_response
|
85
|
+
print(f"Story Part 4: {story4.story_part[:200]}...")
|
86
|
+
print(f"References to previous: {story4.references_to_previous}")
|
87
|
+
print(f"Conversation context: {story4.conversation_context}")
|
88
|
+
|
89
|
+
# Final session info
|
90
|
+
session_info = agent.get_session_info(agent_id)
|
91
|
+
print(f"📊 Final session state: {session_info['conversation_length']} messages")
|
92
|
+
print(f"Session step: {session_info.get('step', 'unknown')}")
|
93
|
+
|
94
|
+
# Display full conversation history
|
95
|
+
print(f"\n💬 FULL CONVERSATION HISTORY:")
|
96
|
+
session_info = agent.get_session_info(agent_id)
|
97
|
+
history = session_info.get('conversation_history', [])
|
98
|
+
for i, msg in enumerate(history, 1):
|
99
|
+
role = msg['role'].upper()
|
100
|
+
content = msg['content'][:100] + "..." if len(msg['content']) > 100 else msg['content']
|
101
|
+
print(f"{i:2d}. {role}: {content}")
|
102
|
+
|
103
|
+
# Cleanup
|
104
|
+
print(f"\n🗑️ Cleaning up session {agent_id}")
|
105
|
+
agent.delete_session(agent_id)
|
106
|
+
|
107
|
+
|
108
|
+
def test_with_questions_and_history():
|
109
|
+
"""Test conversation history with questions and answers."""
|
110
|
+
print("\n🧪 TESTING: Questions + Answers + History")
|
111
|
+
print("=" * 60)
|
112
|
+
|
113
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
114
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
115
|
+
|
116
|
+
# Start with questions
|
117
|
+
print("\n1️⃣ Starting conversation with questions:")
|
118
|
+
response1 = agent.agent("I want a personalized workout plan", ChatResponse, ask_questions=True)
|
119
|
+
|
120
|
+
agent_id = response1.agent_id
|
121
|
+
print(f"Agent ID: {agent_id}")
|
122
|
+
|
123
|
+
if response1.has_questions():
|
124
|
+
print(f"📋 Generated {len(response1.questions)} questions:")
|
125
|
+
for q in response1.questions:
|
126
|
+
print(f" - {q.question}")
|
127
|
+
|
128
|
+
# Answer questions
|
129
|
+
answers = AnswerList(answers=[
|
130
|
+
Answer(key="fitness_level", answer="Beginner"),
|
131
|
+
Answer(key="goals", answer="Weight loss and muscle building"),
|
132
|
+
Answer(key="time_available", answer="30 minutes per day, 4 days a week"),
|
133
|
+
Answer(key="equipment", answer="Home gym with dumbbells and resistance bands")
|
134
|
+
])
|
135
|
+
|
136
|
+
print(f"\n2️⃣ Providing answers:")
|
137
|
+
response2 = agent.agent("Here are my answers", ChatResponse, ask_questions=False, agent_id=agent_id, answers=answers)
|
138
|
+
|
139
|
+
if response2.is_complete():
|
140
|
+
workout_plan = response2.final_response
|
141
|
+
print(f"Workout plan: {workout_plan.response[:200]}...")
|
142
|
+
print(f"Context awareness: {workout_plan.context_awareness}")
|
143
|
+
|
144
|
+
# Continue conversation
|
145
|
+
print(f"\n3️⃣ Follow-up question (should remember all previous context):")
|
146
|
+
response3 = agent.agent("Can you modify this plan to focus more on cardio?", ChatResponse, ask_questions=False, agent_id=agent_id)
|
147
|
+
|
148
|
+
if response3.is_complete():
|
149
|
+
modified_plan = response3.final_response
|
150
|
+
print(f"Modified plan: {modified_plan.response[:200]}...")
|
151
|
+
print(f"Context awareness: {modified_plan.context_awareness}")
|
152
|
+
|
153
|
+
# Show history
|
154
|
+
session_info = agent.get_session_info(agent_id)
|
155
|
+
print(f"\n💬 Conversation had {session_info['conversation_length']} messages")
|
156
|
+
|
157
|
+
# Cleanup
|
158
|
+
agent.delete_session(agent_id)
|
159
|
+
|
160
|
+
|
161
|
+
if __name__ == "__main__":
|
162
|
+
try:
|
163
|
+
test_conversation_history_persistence()
|
164
|
+
print("\n" + "="*80 + "\n")
|
165
|
+
test_with_questions_and_history()
|
166
|
+
except Exception as e:
|
167
|
+
print(f"Error: {e}")
|
168
|
+
import traceback
|
169
|
+
traceback.print_exc()
|
@@ -0,0 +1,157 @@
|
|
1
|
+
"""
|
2
|
+
Example usage of the AgentClient with persistent dialog sessions.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
|
8
|
+
|
9
|
+
|
10
|
+
class ChatResponse(BaseModel):
|
11
|
+
"""A general chat response."""
|
12
|
+
response: str = Field(description="The response to the user's message")
|
13
|
+
context_awareness: str = Field(description="How this response relates to previous conversation")
|
14
|
+
|
15
|
+
|
16
|
+
class BookRecommendation(BaseModel):
|
17
|
+
"""A book recommendation response."""
|
18
|
+
title: str = Field(description="The title of the recommended book")
|
19
|
+
author: str = Field(description="The author of the book")
|
20
|
+
genre: str = Field(description="The genre of the book")
|
21
|
+
reason: str = Field(description="Why this book is recommended based on conversation")
|
22
|
+
connection_to_previous: str = Field(description="How this recommendation connects to our previous conversation")
|
23
|
+
|
24
|
+
|
25
|
+
def demonstrate_dialog_conversation():
|
26
|
+
"""Demonstrate persistent dialog functionality."""
|
27
|
+
print("🔄 DEMO: Persistent Dialog Agent")
|
28
|
+
print("=" * 50)
|
29
|
+
|
30
|
+
# Initialize the clients
|
31
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
32
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
33
|
+
|
34
|
+
# First conversation - start new session
|
35
|
+
print("\n1️⃣ Starting new conversation:")
|
36
|
+
prompt1 = "Hi, I'm looking for a good book to read. I love science fiction."
|
37
|
+
response1 = agent.agent(prompt1, ChatResponse, ask_questions=False)
|
38
|
+
|
39
|
+
if response1.is_complete():
|
40
|
+
agent_id = response1.agent_id
|
41
|
+
chat_resp = response1.final_response
|
42
|
+
print(f"Agent ID: {agent_id}")
|
43
|
+
print(f"Response: {chat_resp.response}")
|
44
|
+
print(f"Context awareness: {chat_resp.context_awareness}")
|
45
|
+
|
46
|
+
# Second conversation - continue same session
|
47
|
+
print(f"\n2️⃣ Continuing conversation with agent {agent_id}:")
|
48
|
+
prompt2 = "Actually, I also enjoy fantasy novels. What would you recommend that combines both genres?"
|
49
|
+
response2 = agent.agent(prompt2, BookRecommendation, ask_questions=False, agent_id=agent_id)
|
50
|
+
|
51
|
+
if response2.is_complete():
|
52
|
+
book_rec = response2.final_response
|
53
|
+
print(f"Book: {book_rec.title} by {book_rec.author}")
|
54
|
+
print(f"Genre: {book_rec.genre}")
|
55
|
+
print(f"Reason: {book_rec.reason}")
|
56
|
+
print(f"Connection to previous: {book_rec.connection_to_previous}")
|
57
|
+
|
58
|
+
# Third conversation - continue same session
|
59
|
+
print(f"\n3️⃣ Continuing conversation with agent {agent_id}:")
|
60
|
+
prompt3 = "That sounds great! Can you recommend something similar but from a different author?"
|
61
|
+
response3 = agent.agent(prompt3, BookRecommendation, ask_questions=False, agent_id=agent_id)
|
62
|
+
|
63
|
+
if response3.is_complete():
|
64
|
+
book_rec2 = response3.final_response
|
65
|
+
print(f"Book: {book_rec2.title} by {book_rec2.author}")
|
66
|
+
print(f"Genre: {book_rec2.genre}")
|
67
|
+
print(f"Reason: {book_rec2.reason}")
|
68
|
+
print(f"Connection to previous: {book_rec2.connection_to_previous}")
|
69
|
+
|
70
|
+
# Show session info
|
71
|
+
print(f"\n📊 Session Information:")
|
72
|
+
try:
|
73
|
+
session_info = agent.get_session_info(agent_id)
|
74
|
+
print(f"Conversation length: {session_info['conversation_length']} messages")
|
75
|
+
print(f"Session step: {session_info.get('step', 'unknown')}")
|
76
|
+
except Exception as e:
|
77
|
+
print(f"Error getting session info: {e}")
|
78
|
+
|
79
|
+
# List all sessions
|
80
|
+
print(f"\n📝 Active sessions: {agent.list_sessions()}")
|
81
|
+
|
82
|
+
# Cleanup - optional
|
83
|
+
print(f"\n🗑️ Cleaning up session...")
|
84
|
+
deleted = agent.delete_session(agent_id)
|
85
|
+
print(f"Session deleted: {deleted}")
|
86
|
+
print(f"Active sessions after cleanup: {agent.list_sessions()}")
|
87
|
+
|
88
|
+
|
89
|
+
def demonstrate_dialog_with_questions():
|
90
|
+
"""Demonstrate dialog with question-answer flow."""
|
91
|
+
print("\n🔄 DEMO: Dialog Agent with Questions")
|
92
|
+
print("=" * 50)
|
93
|
+
|
94
|
+
# Initialize the clients
|
95
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
96
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
97
|
+
|
98
|
+
# First conversation with questions
|
99
|
+
print("\n1️⃣ Starting conversation with questions:")
|
100
|
+
prompt1 = "I want a personalized book recommendation"
|
101
|
+
response1 = agent.agent(prompt1, BookRecommendation, ask_questions=True)
|
102
|
+
|
103
|
+
agent_id = response1.agent_id
|
104
|
+
print(f"Agent ID: {agent_id}")
|
105
|
+
|
106
|
+
if response1.has_questions():
|
107
|
+
print(f"\n📋 Agent generated {len(response1.questions)} questions:")
|
108
|
+
for i, question in enumerate(response1.questions, 1):
|
109
|
+
print(f" {i}. {question.question} (key: {question.key})")
|
110
|
+
|
111
|
+
# Simulate answering questions
|
112
|
+
answers = AnswerList(answers=[
|
113
|
+
Answer(key="genre_preference", answer="I love science fiction and fantasy"),
|
114
|
+
Answer(key="reading_level", answer="I prefer complex, adult novels"),
|
115
|
+
Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
|
116
|
+
])
|
117
|
+
|
118
|
+
print(f"\n📝 Providing answers...")
|
119
|
+
final_response = agent.agent("Continue with answers", BookRecommendation, ask_questions=False, agent_id=agent_id, answers=answers)
|
120
|
+
|
121
|
+
if final_response.is_complete():
|
122
|
+
book_rec = final_response.final_response
|
123
|
+
print(f"Book: {book_rec.title} by {book_rec.author}")
|
124
|
+
print(f"Genre: {book_rec.genre}")
|
125
|
+
print(f"Reason: {book_rec.reason}")
|
126
|
+
|
127
|
+
# Continue conversation - this should remember the previous interaction
|
128
|
+
print(f"\n2️⃣ Continuing conversation with agent {agent_id}:")
|
129
|
+
prompt2 = "Thank you! Can you also recommend something by a female author in the same genres?"
|
130
|
+
response2 = agent.agent(prompt2, BookRecommendation, ask_questions=False, agent_id=agent_id)
|
131
|
+
|
132
|
+
if response2.is_complete():
|
133
|
+
book_rec2 = response2.final_response
|
134
|
+
print(f"Book: {book_rec2.title} by {book_rec2.author}")
|
135
|
+
print(f"Genre: {book_rec2.genre}")
|
136
|
+
print(f"Reason: {book_rec2.reason}")
|
137
|
+
print(f"Connection to previous: {book_rec2.connection_to_previous}")
|
138
|
+
|
139
|
+
# Session cleanup
|
140
|
+
print(f"\n🗑️ Session cleanup...")
|
141
|
+
agent.delete_session(agent_id)
|
142
|
+
|
143
|
+
|
144
|
+
if __name__ == "__main__":
|
145
|
+
print("=== Dialog Agent Examples ===\n")
|
146
|
+
|
147
|
+
try:
|
148
|
+
demonstrate_dialog_conversation()
|
149
|
+
except Exception as e:
|
150
|
+
print(f"Error in dialog conversation demo: {e}")
|
151
|
+
|
152
|
+
print("\n" + "="*80 + "\n")
|
153
|
+
|
154
|
+
try:
|
155
|
+
demonstrate_dialog_with_questions()
|
156
|
+
except Exception as e:
|
157
|
+
print(f"Error in dialog with questions demo: {e}")
|
@@ -0,0 +1,109 @@
|
|
1
|
+
"""
|
2
|
+
Example demonstrating the new unified agent interface.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
|
8
|
+
|
9
|
+
|
10
|
+
class SimpleResponse(BaseModel):
|
11
|
+
"""A simple response."""
|
12
|
+
response: str = Field(description="The response text")
|
13
|
+
context_used: str = Field(description="How context was used in this response")
|
14
|
+
|
15
|
+
|
16
|
+
def demonstrate_unified_interface():
|
17
|
+
"""Demonstrate the unified agent interface with and without questions."""
|
18
|
+
print("🔧 DEMO: Unified Agent Interface")
|
19
|
+
print("=" * 50)
|
20
|
+
|
21
|
+
# Initialize the clients
|
22
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
23
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
24
|
+
|
25
|
+
# Example 1: Start conversation with questions
|
26
|
+
print("\n1️⃣ Starting conversation that generates questions:")
|
27
|
+
response1 = agent.agent("I need help planning a trip", SimpleResponse, ask_questions=True)
|
28
|
+
|
29
|
+
agent_id = response1.agent_id
|
30
|
+
print(f"Agent ID: {agent_id}")
|
31
|
+
|
32
|
+
if response1.has_questions():
|
33
|
+
print(f"📋 Generated {len(response1.questions)} questions:")
|
34
|
+
for q in response1.questions:
|
35
|
+
print(f" - {q.question} (key: {q.key})")
|
36
|
+
|
37
|
+
# Example 2: Provide answers using the unified interface
|
38
|
+
print(f"\n2️⃣ Providing answers using unified interface:")
|
39
|
+
answers = AnswerList(answers=[
|
40
|
+
Answer(key="destination", answer="Japan"),
|
41
|
+
Answer(key="duration", answer="10 days"),
|
42
|
+
Answer(key="budget", answer="$3000"),
|
43
|
+
Answer(key="interests", answer="culture, food, temples")
|
44
|
+
])
|
45
|
+
|
46
|
+
response2 = agent.agent(
|
47
|
+
"Now help me plan the trip",
|
48
|
+
SimpleResponse,
|
49
|
+
ask_questions=False,
|
50
|
+
agent_id=agent_id,
|
51
|
+
answers=answers
|
52
|
+
)
|
53
|
+
|
54
|
+
if response2.is_complete():
|
55
|
+
trip_plan = response2.final_response
|
56
|
+
print(f"Response: {trip_plan.response}")
|
57
|
+
print(f"Context used: {trip_plan.context_used}")
|
58
|
+
|
59
|
+
# Example 3: Continue the conversation
|
60
|
+
print(f"\n3️⃣ Continuing conversation without questions:")
|
61
|
+
response3 = agent.agent(
|
62
|
+
"What about transportation within Japan?",
|
63
|
+
SimpleResponse,
|
64
|
+
ask_questions=False,
|
65
|
+
agent_id=agent_id
|
66
|
+
)
|
67
|
+
|
68
|
+
if response3.is_complete():
|
69
|
+
transport_info = response3.final_response
|
70
|
+
print(f"Response: {transport_info.response}")
|
71
|
+
print(f"Context used: {transport_info.context_used}")
|
72
|
+
|
73
|
+
# Example 4: Using answers without previous questions (new session)
|
74
|
+
print(f"\n4️⃣ Starting new session with direct answers (no questions):")
|
75
|
+
new_answers = AnswerList(answers=[
|
76
|
+
Answer(key="city", answer="Tokyo"),
|
77
|
+
Answer(key="travel_style", answer="luxury"),
|
78
|
+
Answer(key="group_size", answer="2 people")
|
79
|
+
])
|
80
|
+
|
81
|
+
response4 = agent.agent(
|
82
|
+
"Recommend restaurants",
|
83
|
+
SimpleResponse,
|
84
|
+
ask_questions=False,
|
85
|
+
answers=new_answers # New session, no agent_id provided
|
86
|
+
)
|
87
|
+
|
88
|
+
if response4.is_complete():
|
89
|
+
restaurant_info = response4.final_response
|
90
|
+
print(f"New Agent ID: {response4.agent_id}")
|
91
|
+
print(f"Response: {restaurant_info.response}")
|
92
|
+
print(f"Context used: {restaurant_info.context_used}")
|
93
|
+
|
94
|
+
# Show active sessions
|
95
|
+
print(f"\n📊 Active Sessions: {agent.list_sessions()}")
|
96
|
+
|
97
|
+
# Cleanup
|
98
|
+
print(f"\n🗑️ Cleaning up sessions...")
|
99
|
+
agent.delete_session(agent_id)
|
100
|
+
if response4.agent_id != agent_id:
|
101
|
+
agent.delete_session(response4.agent_id)
|
102
|
+
print(f"Active Sessions after cleanup: {agent.list_sessions()}")
|
103
|
+
|
104
|
+
|
105
|
+
if __name__ == "__main__":
|
106
|
+
try:
|
107
|
+
demonstrate_unified_interface()
|
108
|
+
except Exception as e:
|
109
|
+
print(f"Error: {e}")
|
mbxai/mcp/server.py
CHANGED
@@ -1,14 +1,16 @@
|
|
1
|
-
mbxai/__init__.py,sha256=
|
1
|
+
mbxai/__init__.py,sha256=e4xVVEhVt0ryj9sh9mHtCKINk2GfFdnSuQndPtTOdHw,407
|
2
2
|
mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
|
3
3
|
mbxai/agent/__init__.py,sha256=5j3mW2NZtAU1s2w8n833axWBQsxW8U0qKwoQ9JtQZ4k,289
|
4
|
-
mbxai/agent/client.py,sha256=
|
4
|
+
mbxai/agent/client.py,sha256=zVJVa-7xxQqkWcmAwK69RLP0K7mR4_L1El0tQz_-P_k,34729
|
5
5
|
mbxai/agent/models.py,sha256=sjBtaAENDABHl8IqTON1gxFFSZIaQYUCBFHB5804_Fw,5780
|
6
|
-
mbxai/examples/agent_example.py,sha256=
|
6
|
+
mbxai/examples/agent_example.py,sha256=7gQHcMVWBu2xdxnVNzz4UfW0lkUnw9a5DN2-YoIRxXE,7420
|
7
7
|
mbxai/examples/agent_iterations_example.py,sha256=xMqZhBWS67EkRkArjOAY2fCgLkQ32Qn9E4CSfEKW4MU,7905
|
8
|
-
mbxai/examples/agent_logging_example.py,sha256=
|
8
|
+
mbxai/examples/agent_logging_example.py,sha256=P5LDcoIn0XCYWMPJVTjeXNkY32ELyKEf63Z_1nu5QkA,10232
|
9
9
|
mbxai/examples/agent_tool_registration_example.py,sha256=oWm0-d4mdba-VQ3HobiCIR0IHtEDCtJenb8Lnm9QqCw,9108
|
10
10
|
mbxai/examples/agent_validation_example.py,sha256=xlEf5Mwq5_Iu8bNU4cuHGZVYvAyZNhO2GMFmOom-CLo,4185
|
11
11
|
mbxai/examples/auto_schema_example.py,sha256=ymuJJqqDxYznZT2VN6zVFEM7m_lDuccZ1AKSx-xzLTM,8174
|
12
|
+
mbxai/examples/conversation_history_test.py,sha256=TpOh5ruQlXDPTPEu_0qTACAaQPSklKp8RYiOm1UzqPI,7773
|
13
|
+
mbxai/examples/dialog_agent_example.py,sha256=k502Y_pq6uddWEcH-5i0MxqyakxHKSy-KvHv1s4G1dw,7015
|
12
14
|
mbxai/examples/openrouter_example.py,sha256=-grXHKMmFLoh-yUIEMc31n8Gg1S7uSazBWCIOWxgbyQ,1317
|
13
15
|
mbxai/examples/parse_example.py,sha256=eCKMJoOl6qwo8sDP6Trc6ncgjPlgTqi5tPE2kB5_P0k,3821
|
14
16
|
mbxai/examples/parse_tool_example.py,sha256=duHN8scI9ZK6XZ5hdiz1Adzyc-_7tH9Ls9qP4S0bf5s,5477
|
@@ -17,12 +19,13 @@ mbxai/examples/response.json,sha256=4SGJJyQjWWeN__Mrxm6ZtHIo1NUtLEheldd5KaA2mHw,
|
|
17
19
|
mbxai/examples/send_request.py,sha256=O5gCHUHy7RvkEFo9IQATgnSOfOdu8OqKHfjAlLDwWPg,6023
|
18
20
|
mbxai/examples/simple_agent_test.py,sha256=joCVszUpRkrxHv2DM9QTAh1r6S8iv16pZ-zSPZSBQiU,6391
|
19
21
|
mbxai/examples/tool_client_example.py,sha256=9DNaejXLA85dPbExMiv5y76qlFhzOJF9E5EnMOsy_Dc,3993
|
22
|
+
mbxai/examples/unified_interface_example.py,sha256=EQpatD95zHPAbXN93EHA4EB0v-5vMsOA1yfEMFFmF-A,3887
|
20
23
|
mbxai/examples/mcp/mcp_client_example.py,sha256=d5-TRHNDdp3nT_NGt0tKpT3VUAJVvqAHSyqkzk9Dd2s,2972
|
21
24
|
mbxai/examples/mcp/mcp_server_example.py,sha256=nFfg22Jnc6HMW_ezLO3So1xwDdx2_rItj5CR-y_Nevs,3966
|
22
25
|
mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
|
23
26
|
mbxai/mcp/client.py,sha256=QRzId6o4_WRWVv3rtm8cfZZGaoY_UlaOO-oqNjY-tmw,5219
|
24
27
|
mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
|
25
|
-
mbxai/mcp/server.py,sha256=
|
28
|
+
mbxai/mcp/server.py,sha256=DN4a7qwoz_YSLpeGhZlez1XIqddYFkn7RC-2VKno3Uc,3332
|
26
29
|
mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
|
27
30
|
mbxai/openrouter/client.py,sha256=3LD6WDJ8wjo_nefH5d1NJCsrWPvBc_KBf2NsItUoSt8,18302
|
28
31
|
mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
|
@@ -32,7 +35,7 @@ mbxai/tools/__init__.py,sha256=ogxrHvgJ7OR62Lmd5x9Eh5d2C0jqWyQis7Zy3yKpZ78,218
|
|
32
35
|
mbxai/tools/client.py,sha256=2wFPD-UN3Y2DSyrnqxt2vvFgTYHzUl14_y0r6fhAWmM,17198
|
33
36
|
mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
|
34
37
|
mbxai/tools/types.py,sha256=OFfM7scDGTm4FOcJA2ecj-fxL1MEBkqPsT3hqCL1Jto,9505
|
35
|
-
mbxai-2.
|
36
|
-
mbxai-2.
|
37
|
-
mbxai-2.
|
38
|
-
mbxai-2.
|
38
|
+
mbxai-2.1.1.dist-info/METADATA,sha256=PlNhE1zB42gvsB3HbLJTSVgA0JKmjhGtdX_ThlCD1Hk,10018
|
39
|
+
mbxai-2.1.1.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
40
|
+
mbxai-2.1.1.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
|
41
|
+
mbxai-2.1.1.dist-info/RECORD,,
|
File without changes
|
File without changes
|