mbxai 2.1.0__tar.gz → 2.1.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mbxai-2.1.0 → mbxai-2.1.2}/PKG-INFO +1 -1
- {mbxai-2.1.0 → mbxai-2.1.2}/pyproject.toml +1 -1
- {mbxai-2.1.0 → mbxai-2.1.2}/setup.py +1 -1
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/__init__.py +1 -1
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/agent/client.py +132 -61
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/agent_example.py +2 -2
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/agent_logging_example.py +1 -1
- mbxai-2.1.2/src/mbxai/examples/conversation_history_test.py +169 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/dialog_agent_example.py +16 -1
- mbxai-2.1.2/src/mbxai/examples/optional_prompt_example.py +164 -0
- mbxai-2.1.2/src/mbxai/examples/unified_interface_example.py +109 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/mcp/server.py +1 -1
- {mbxai-2.1.0 → mbxai-2.1.2}/.gitignore +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/LICENSE +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/README.md +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/agent/__init__.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/agent/models.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/core.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/agent_iterations_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/agent_tool_registration_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/agent_validation_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/auto_schema_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/mcp/mcp_client_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/mcp/mcp_server_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/openrouter_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/parse_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/parse_tool_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/request.json +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/response.json +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/send_request.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/simple_agent_test.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/examples/tool_client_example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/mcp/__init__.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/mcp/client.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/mcp/example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/openrouter/__init__.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/openrouter/client.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/openrouter/config.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/openrouter/models.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/openrouter/schema.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/tools/__init__.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/tools/client.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/tools/example.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/src/mbxai/tools/types.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/tests/test_mcp_tool_registration.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/tests/test_real_mcp_schema.py +0 -0
- {mbxai-2.1.0 → mbxai-2.1.2}/tests/test_schema_conversion.py +0 -0
@@ -128,8 +128,10 @@ class AgentClient:
|
|
128
128
|
# Combine conversation history with new messages
|
129
129
|
if conversation_history:
|
130
130
|
full_messages = conversation_history + messages
|
131
|
+
logger.debug(f"🔗 AI call with {len(conversation_history)} history messages + {len(messages)} new messages = {len(full_messages)} total")
|
131
132
|
else:
|
132
133
|
full_messages = messages
|
134
|
+
logger.debug(f"🔗 AI call with {len(messages)} messages (no history)")
|
133
135
|
return self._ai_client.parse(full_messages, response_format)
|
134
136
|
|
135
137
|
def _extract_token_usage(self, response: Any) -> TokenUsage:
|
@@ -182,36 +184,76 @@ class AgentClient:
|
|
182
184
|
|
183
185
|
def agent(
|
184
186
|
self,
|
185
|
-
prompt: str,
|
186
|
-
final_response_structure: Type[BaseModel],
|
187
|
+
prompt: str = None,
|
188
|
+
final_response_structure: Type[BaseModel] = None,
|
187
189
|
ask_questions: bool = True,
|
188
|
-
agent_id: str = None
|
190
|
+
agent_id: str = None,
|
191
|
+
answers: AnswerList = None
|
189
192
|
) -> AgentResponse:
|
190
193
|
"""
|
191
194
|
Process a prompt through the agent's thinking process.
|
192
195
|
|
193
196
|
Args:
|
194
|
-
prompt: The
|
195
|
-
final_response_structure: Pydantic model defining the expected final response format
|
197
|
+
prompt: The prompt from the user (optional if agent_id exists with history)
|
198
|
+
final_response_structure: Pydantic model defining the expected final response format (required for new sessions)
|
196
199
|
ask_questions: Whether to ask clarifying questions (default: True)
|
197
200
|
agent_id: Optional agent session ID to continue an existing conversation
|
201
|
+
answers: Optional answers to questions (when continuing a conversation with questions)
|
198
202
|
|
199
203
|
Returns:
|
200
204
|
AgentResponse containing either questions to ask or the final response
|
205
|
+
|
206
|
+
Raises:
|
207
|
+
ValueError: If neither prompt nor agent_id with history is provided, or if final_response_structure is missing for new sessions
|
201
208
|
"""
|
202
|
-
#
|
203
|
-
|
204
|
-
|
209
|
+
# Validate inputs and determine session type
|
210
|
+
is_existing_session = agent_id is not None and agent_id in self._agent_sessions
|
211
|
+
existing_session = self._agent_sessions.get(agent_id, {}) if agent_id else {}
|
212
|
+
conversation_history = existing_session.get("conversation_history", []).copy()
|
213
|
+
|
214
|
+
# Validation logic
|
215
|
+
if not is_existing_session:
|
216
|
+
# New session - both prompt and final_response_structure are required
|
217
|
+
if not prompt:
|
218
|
+
raise ValueError("Prompt is required when starting a new agent session")
|
219
|
+
if not final_response_structure:
|
220
|
+
raise ValueError("final_response_structure is required when starting a new agent session")
|
221
|
+
|
222
|
+
# Create new agent_id if not provided
|
223
|
+
if agent_id is None:
|
224
|
+
agent_id = str(__import__("uuid").uuid4())
|
205
225
|
logger.info(f"🚀 Starting new agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
|
206
226
|
else:
|
207
|
-
|
227
|
+
# Existing session - use previous final_response_structure if not provided
|
228
|
+
if not final_response_structure:
|
229
|
+
final_response_structure = existing_session.get("final_response_structure")
|
230
|
+
if not final_response_structure:
|
231
|
+
raise ValueError("final_response_structure not found in existing session and not provided")
|
232
|
+
|
233
|
+
# Handle optional prompt for existing sessions
|
234
|
+
if not prompt:
|
235
|
+
# Use conversation history to continue without explicit prompt
|
236
|
+
prompt = "[Continue conversation based on history]"
|
237
|
+
logger.info(f"🔄 Continuing agent process (ID: {agent_id}) without explicit prompt (using history)")
|
238
|
+
else:
|
239
|
+
logger.info(f"🔄 Continuing agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
|
208
240
|
|
209
241
|
# Initialize token summary
|
210
242
|
token_summary = TokenSummary()
|
211
243
|
|
212
|
-
|
213
|
-
|
214
|
-
|
244
|
+
if conversation_history:
|
245
|
+
logger.info(f"📜 Agent {agent_id}: Loaded conversation history with {len(conversation_history)} messages")
|
246
|
+
|
247
|
+
# Store conversation history for AI calls (don't include current prompt yet)
|
248
|
+
history_for_ai = conversation_history.copy()
|
249
|
+
|
250
|
+
# Add current prompt to full conversation history for session storage
|
251
|
+
conversation_history.append({"role": "user", "content": prompt})
|
252
|
+
|
253
|
+
# Handle answers provided (skip question generation and process directly)
|
254
|
+
if answers is not None:
|
255
|
+
logger.info(f"📝 Agent {agent_id}: Processing with provided answers, skipping question generation")
|
256
|
+
return self._process_answers_directly(agent_id, prompt, final_response_structure, answers, token_summary, history_for_ai)
|
215
257
|
|
216
258
|
# Step 1: Generate questions (if ask_questions is True)
|
217
259
|
if ask_questions:
|
@@ -236,7 +278,7 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
236
278
|
messages = [{"role": "user", "content": questions_prompt}]
|
237
279
|
|
238
280
|
try:
|
239
|
-
response = self._call_ai_parse(messages, QuestionList,
|
281
|
+
response = self._call_ai_parse(messages, QuestionList, history_for_ai)
|
240
282
|
question_list = self._extract_parsed_content(response, QuestionList)
|
241
283
|
|
242
284
|
# Extract token usage for question generation
|
@@ -253,7 +295,8 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
253
295
|
"final_response_structure": final_response_structure,
|
254
296
|
"questions": question_list.questions,
|
255
297
|
"step": "waiting_for_answers",
|
256
|
-
"token_summary": token_summary
|
298
|
+
"token_summary": token_summary,
|
299
|
+
"conversation_history": history_for_ai # Include history without current prompt
|
257
300
|
}
|
258
301
|
logger.info(f"📋 Agent {agent_id}: Waiting for user answers to {len(question_list.questions)} questions")
|
259
302
|
return agent_response
|
@@ -263,71 +306,75 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
263
306
|
|
264
307
|
# Step 2 & 3: No questions or ask_questions=False - proceed directly
|
265
308
|
logger.info(f"⚡ Agent {agent_id}: No questions needed, proceeding directly to processing")
|
266
|
-
return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary,
|
309
|
+
return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary, history_for_ai)
|
267
310
|
|
268
|
-
def
|
311
|
+
def _process_answers_directly(
|
312
|
+
self,
|
313
|
+
agent_id: str,
|
314
|
+
prompt: str,
|
315
|
+
final_response_structure: Type[BaseModel],
|
316
|
+
answers: AnswerList,
|
317
|
+
token_summary: TokenSummary,
|
318
|
+
conversation_history: list[dict[str, Any]]
|
319
|
+
) -> AgentResponse:
|
269
320
|
"""
|
270
|
-
|
271
|
-
|
321
|
+
Process answers directly without going through question generation.
|
322
|
+
|
272
323
|
Args:
|
273
324
|
agent_id: The agent session identifier
|
274
|
-
|
275
|
-
|
325
|
+
prompt: The current prompt
|
326
|
+
final_response_structure: Expected response structure
|
327
|
+
answers: Provided answers
|
328
|
+
token_summary: Current token usage summary
|
329
|
+
conversation_history: Conversation history
|
330
|
+
|
276
331
|
Returns:
|
277
332
|
AgentResponse with the final result
|
278
|
-
|
279
|
-
Raises:
|
280
|
-
ValueError: If the agent session is not found or in wrong state
|
281
333
|
"""
|
282
|
-
if
|
283
|
-
|
284
|
-
|
285
|
-
session = self._agent_sessions[agent_id]
|
286
|
-
if session["step"] != "waiting_for_answers":
|
287
|
-
raise ValueError(f"Agent session {agent_id} is not waiting for answers")
|
288
|
-
|
289
|
-
# Convert answers to a more usable format and create question-answer pairs
|
290
|
-
answer_dict = {answer.key: answer.answer for answer in answers.answers}
|
334
|
+
# Check if we have a session with questions to match against
|
335
|
+
session = self._agent_sessions.get(agent_id, {})
|
291
336
|
questions = session.get("questions", [])
|
292
337
|
|
293
|
-
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
qa_pairs
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
338
|
+
if not questions:
|
339
|
+
# No previous questions - treat as simple additional context
|
340
|
+
logger.info(f"📝 Agent {agent_id}: No previous questions found, treating answers as additional context")
|
341
|
+
answer_dict = {answer.key: answer.answer for answer in answers.answers}
|
342
|
+
qa_pairs = []
|
343
|
+
for answer in answers.answers:
|
344
|
+
qa_pairs.append({
|
345
|
+
"question": f"Information about {answer.key}",
|
346
|
+
"key": answer.key,
|
347
|
+
"answer": answer.answer,
|
348
|
+
"required": True
|
349
|
+
})
|
350
|
+
else:
|
351
|
+
# Match answers with previous questions
|
352
|
+
logger.info(f"📝 Agent {agent_id}: Matching {len(answers.answers)} answers with previous questions")
|
353
|
+
answer_dict = {answer.key: answer.answer for answer in answers.answers}
|
354
|
+
|
355
|
+
# Create question-answer pairs for better context
|
356
|
+
qa_pairs = []
|
357
|
+
for question in questions:
|
358
|
+
answer_text = answer_dict.get(question.key, "No answer provided")
|
359
|
+
qa_pairs.append({
|
360
|
+
"question": question.question,
|
361
|
+
"key": question.key,
|
362
|
+
"answer": answer_text,
|
363
|
+
"required": question.required
|
364
|
+
})
|
309
365
|
|
310
366
|
# Process with the provided answers and question context
|
311
367
|
result = self._process_with_answers(
|
312
|
-
|
313
|
-
|
368
|
+
prompt,
|
369
|
+
final_response_structure,
|
314
370
|
qa_pairs,
|
315
371
|
agent_id,
|
316
372
|
token_summary,
|
317
373
|
conversation_history
|
318
374
|
)
|
319
375
|
|
320
|
-
#
|
321
|
-
|
322
|
-
self._agent_sessions[agent_id]["step"] = "completed"
|
323
|
-
self._agent_sessions[agent_id]["conversation_history"] = self._agent_sessions[agent_id].get("conversation_history", [])
|
324
|
-
|
325
|
-
# Add this interaction to history
|
326
|
-
self._agent_sessions[agent_id]["conversation_history"].extend([
|
327
|
-
{"role": "user", "content": session["original_prompt"]},
|
328
|
-
{"role": "assistant", "content": str(result.final_response) if result.final_response else "No response generated"}
|
329
|
-
])
|
330
|
-
|
376
|
+
# Note: History management is now handled in _process_with_answers
|
377
|
+
# No need to duplicate history management here
|
331
378
|
return result
|
332
379
|
|
333
380
|
def _format_qa_context_for_quality_check(self, answers: Union[list, dict[str, str]]) -> str:
|
@@ -406,6 +453,30 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
406
453
|
logger.info(f"📝 Agent {agent_id}: Generating final structured response")
|
407
454
|
final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary, conversation_history)
|
408
455
|
|
456
|
+
# Update session with the final response in conversation history
|
457
|
+
if agent_id in self._agent_sessions:
|
458
|
+
# Update conversation history with assistant response
|
459
|
+
updated_history = conversation_history.copy()
|
460
|
+
updated_history.append({"role": "assistant", "content": str(final_response)})
|
461
|
+
|
462
|
+
self._agent_sessions[agent_id]["conversation_history"] = updated_history
|
463
|
+
self._agent_sessions[agent_id]["step"] = "completed"
|
464
|
+
self._agent_sessions[agent_id]["token_summary"] = token_summary
|
465
|
+
self._agent_sessions[agent_id]["final_response_structure"] = final_response_structure
|
466
|
+
logger.info(f"💾 Agent {agent_id}: Updated session with conversation history ({len(updated_history)} messages)")
|
467
|
+
else:
|
468
|
+
# Create new session if it doesn't exist
|
469
|
+
updated_history = conversation_history.copy()
|
470
|
+
updated_history.append({"role": "assistant", "content": str(final_response)})
|
471
|
+
|
472
|
+
self._agent_sessions[agent_id] = {
|
473
|
+
"step": "completed",
|
474
|
+
"conversation_history": updated_history,
|
475
|
+
"token_summary": token_summary,
|
476
|
+
"final_response_structure": final_response_structure
|
477
|
+
}
|
478
|
+
logger.info(f"💾 Agent {agent_id}: Created new session with conversation history ({len(updated_history)} messages)")
|
479
|
+
|
409
480
|
# Log final token summary
|
410
481
|
logger.info(f"📊 Agent {agent_id}: Token usage summary - Total: {token_summary.total_tokens} "
|
411
482
|
f"(Prompt: {token_summary.total_prompt_tokens}, Completion: {token_summary.total_completion_tokens})")
|
@@ -48,8 +48,8 @@ def example_with_questions():
|
|
48
48
|
Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
|
49
49
|
])
|
50
50
|
|
51
|
-
# Continue the agent process with answers
|
52
|
-
final_response = agent.
|
51
|
+
# Continue the agent process with answers using the unified interface
|
52
|
+
final_response = agent.agent("Continue with previous questions", BookRecommendation, ask_questions=False, agent_id=response.agent_id, answers=answers)
|
53
53
|
|
54
54
|
if final_response.is_complete():
|
55
55
|
book_rec = final_response.final_response
|
@@ -74,7 +74,7 @@ def demonstrate_agent_with_questions():
|
|
74
74
|
])
|
75
75
|
|
76
76
|
print(f"\n📝 Providing answers and continuing...")
|
77
|
-
final_response = agent.
|
77
|
+
final_response = agent.agent("Continue with answers", WeatherResponse, ask_questions=False, agent_id=response.agent_id, answers=answers)
|
78
78
|
|
79
79
|
if final_response.is_complete():
|
80
80
|
print("\n✅ Final response received!")
|
@@ -0,0 +1,169 @@
|
|
1
|
+
"""
|
2
|
+
Test example demonstrating conversation history persistence across multiple interactions.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
|
8
|
+
|
9
|
+
|
10
|
+
class StoryResponse(BaseModel):
|
11
|
+
"""A story response that should reference previous conversation."""
|
12
|
+
story_part: str = Field(description="The current part of the story")
|
13
|
+
references_to_previous: str = Field(description="How this connects to our previous conversation")
|
14
|
+
conversation_context: str = Field(description="Summary of what was discussed before")
|
15
|
+
|
16
|
+
|
17
|
+
class ChatResponse(BaseModel):
|
18
|
+
"""A general chat response."""
|
19
|
+
response: str = Field(description="The response to the user's message")
|
20
|
+
context_awareness: str = Field(description="What the AI remembers from our conversation")
|
21
|
+
|
22
|
+
|
23
|
+
def test_conversation_history_persistence():
|
24
|
+
"""Test that conversation history persists and is used across multiple interactions."""
|
25
|
+
print("🧪 TESTING: Conversation History Persistence")
|
26
|
+
print("=" * 60)
|
27
|
+
|
28
|
+
# Initialize the clients
|
29
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
30
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
31
|
+
|
32
|
+
# First interaction - establish context
|
33
|
+
print("\n1️⃣ First interaction - setting up story context:")
|
34
|
+
prompt1 = "I want to tell a collaborative story about a space explorer named Luna who discovers an ancient alien artifact on Mars."
|
35
|
+
response1 = agent.agent(prompt1, StoryResponse, ask_questions=False)
|
36
|
+
|
37
|
+
agent_id = response1.agent_id
|
38
|
+
print(f"Agent ID: {agent_id}")
|
39
|
+
|
40
|
+
if response1.is_complete():
|
41
|
+
story1 = response1.final_response
|
42
|
+
print(f"Story Part 1: {story1.story_part[:200]}...")
|
43
|
+
print(f"Context awareness: {story1.context_awareness}")
|
44
|
+
|
45
|
+
# Check session info
|
46
|
+
session_info = agent.get_session_info(agent_id)
|
47
|
+
print(f"📊 Session after first interaction: {session_info['conversation_length']} messages")
|
48
|
+
|
49
|
+
# Second interaction - continue story, should reference Luna and the artifact
|
50
|
+
print(f"\n2️⃣ Second interaction - continuing story (should remember Luna and artifact):")
|
51
|
+
prompt2 = "Luna touches the artifact and something amazing happens. Continue the story."
|
52
|
+
response2 = agent.agent(prompt2, StoryResponse, ask_questions=False, agent_id=agent_id)
|
53
|
+
|
54
|
+
if response2.is_complete():
|
55
|
+
story2 = response2.final_response
|
56
|
+
print(f"Story Part 2: {story2.story_part[:200]}...")
|
57
|
+
print(f"References to previous: {story2.references_to_previous}")
|
58
|
+
print(f"Conversation context: {story2.conversation_context}")
|
59
|
+
|
60
|
+
# Check session info
|
61
|
+
session_info = agent.get_session_info(agent_id)
|
62
|
+
print(f"📊 Session after second interaction: {session_info['conversation_length']} messages")
|
63
|
+
|
64
|
+
# Third interaction - change topic but should still remember story context
|
65
|
+
print(f"\n3️⃣ Third interaction - changing topic (should still remember our story):")
|
66
|
+
prompt3 = "Actually, let's pause the story. What do you think Luna's personality is like based on our story so far?"
|
67
|
+
response3 = agent.agent(prompt3, ChatResponse, ask_questions=False, agent_id=agent_id)
|
68
|
+
|
69
|
+
if response3.is_complete():
|
70
|
+
chat3 = response3.final_response
|
71
|
+
print(f"Response: {chat3.response}")
|
72
|
+
print(f"Context awareness: {chat3.context_awareness}")
|
73
|
+
|
74
|
+
# Check session info
|
75
|
+
session_info = agent.get_session_info(agent_id)
|
76
|
+
print(f"📊 Session after third interaction: {session_info['conversation_length']} messages")
|
77
|
+
|
78
|
+
# Fourth interaction - return to story, should remember everything
|
79
|
+
print(f"\n4️⃣ Fourth interaction - returning to story (should remember all previous context):")
|
80
|
+
prompt4 = "Great! Now let's continue Luna's story from where we left off. What happens next with the artifact?"
|
81
|
+
response4 = agent.agent(prompt4, StoryResponse, ask_questions=False, agent_id=agent_id)
|
82
|
+
|
83
|
+
if response4.is_complete():
|
84
|
+
story4 = response4.final_response
|
85
|
+
print(f"Story Part 4: {story4.story_part[:200]}...")
|
86
|
+
print(f"References to previous: {story4.references_to_previous}")
|
87
|
+
print(f"Conversation context: {story4.conversation_context}")
|
88
|
+
|
89
|
+
# Final session info
|
90
|
+
session_info = agent.get_session_info(agent_id)
|
91
|
+
print(f"📊 Final session state: {session_info['conversation_length']} messages")
|
92
|
+
print(f"Session step: {session_info.get('step', 'unknown')}")
|
93
|
+
|
94
|
+
# Display full conversation history
|
95
|
+
print(f"\n💬 FULL CONVERSATION HISTORY:")
|
96
|
+
session_info = agent.get_session_info(agent_id)
|
97
|
+
history = session_info.get('conversation_history', [])
|
98
|
+
for i, msg in enumerate(history, 1):
|
99
|
+
role = msg['role'].upper()
|
100
|
+
content = msg['content'][:100] + "..." if len(msg['content']) > 100 else msg['content']
|
101
|
+
print(f"{i:2d}. {role}: {content}")
|
102
|
+
|
103
|
+
# Cleanup
|
104
|
+
print(f"\n🗑️ Cleaning up session {agent_id}")
|
105
|
+
agent.delete_session(agent_id)
|
106
|
+
|
107
|
+
|
108
|
+
def test_with_questions_and_history():
|
109
|
+
"""Test conversation history with questions and answers."""
|
110
|
+
print("\n🧪 TESTING: Questions + Answers + History")
|
111
|
+
print("=" * 60)
|
112
|
+
|
113
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
114
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
115
|
+
|
116
|
+
# Start with questions
|
117
|
+
print("\n1️⃣ Starting conversation with questions:")
|
118
|
+
response1 = agent.agent("I want a personalized workout plan", ChatResponse, ask_questions=True)
|
119
|
+
|
120
|
+
agent_id = response1.agent_id
|
121
|
+
print(f"Agent ID: {agent_id}")
|
122
|
+
|
123
|
+
if response1.has_questions():
|
124
|
+
print(f"📋 Generated {len(response1.questions)} questions:")
|
125
|
+
for q in response1.questions:
|
126
|
+
print(f" - {q.question}")
|
127
|
+
|
128
|
+
# Answer questions
|
129
|
+
answers = AnswerList(answers=[
|
130
|
+
Answer(key="fitness_level", answer="Beginner"),
|
131
|
+
Answer(key="goals", answer="Weight loss and muscle building"),
|
132
|
+
Answer(key="time_available", answer="30 minutes per day, 4 days a week"),
|
133
|
+
Answer(key="equipment", answer="Home gym with dumbbells and resistance bands")
|
134
|
+
])
|
135
|
+
|
136
|
+
print(f"\n2️⃣ Providing answers:")
|
137
|
+
response2 = agent.agent("Here are my answers", ChatResponse, ask_questions=False, agent_id=agent_id, answers=answers)
|
138
|
+
|
139
|
+
if response2.is_complete():
|
140
|
+
workout_plan = response2.final_response
|
141
|
+
print(f"Workout plan: {workout_plan.response[:200]}...")
|
142
|
+
print(f"Context awareness: {workout_plan.context_awareness}")
|
143
|
+
|
144
|
+
# Continue conversation
|
145
|
+
print(f"\n3️⃣ Follow-up question (should remember all previous context):")
|
146
|
+
response3 = agent.agent("Can you modify this plan to focus more on cardio?", ChatResponse, ask_questions=False, agent_id=agent_id)
|
147
|
+
|
148
|
+
if response3.is_complete():
|
149
|
+
modified_plan = response3.final_response
|
150
|
+
print(f"Modified plan: {modified_plan.response[:200]}...")
|
151
|
+
print(f"Context awareness: {modified_plan.context_awareness}")
|
152
|
+
|
153
|
+
# Show history
|
154
|
+
session_info = agent.get_session_info(agent_id)
|
155
|
+
print(f"\n💬 Conversation had {session_info['conversation_length']} messages")
|
156
|
+
|
157
|
+
# Cleanup
|
158
|
+
agent.delete_session(agent_id)
|
159
|
+
|
160
|
+
|
161
|
+
if __name__ == "__main__":
|
162
|
+
try:
|
163
|
+
test_conversation_history_persistence()
|
164
|
+
print("\n" + "="*80 + "\n")
|
165
|
+
test_with_questions_and_history()
|
166
|
+
except Exception as e:
|
167
|
+
print(f"Error: {e}")
|
168
|
+
import traceback
|
169
|
+
traceback.print_exc()
|
@@ -116,7 +116,7 @@ def demonstrate_dialog_with_questions():
|
|
116
116
|
])
|
117
117
|
|
118
118
|
print(f"\n📝 Providing answers...")
|
119
|
-
final_response = agent.
|
119
|
+
final_response = agent.agent("Continue with answers", BookRecommendation, ask_questions=False, agent_id=agent_id, answers=answers)
|
120
120
|
|
121
121
|
if final_response.is_complete():
|
122
122
|
book_rec = final_response.final_response
|
@@ -135,6 +135,21 @@ def demonstrate_dialog_with_questions():
|
|
135
135
|
print(f"Genre: {book_rec2.genre}")
|
136
136
|
print(f"Reason: {book_rec2.reason}")
|
137
137
|
print(f"Connection to previous: {book_rec2.connection_to_previous}")
|
138
|
+
|
139
|
+
# Continue WITHOUT explicit prompt - using only conversation history
|
140
|
+
print(f"\n3️⃣ Continuing conversation WITHOUT explicit prompt (history-based):")
|
141
|
+
try:
|
142
|
+
response3 = agent.agent(agent_id=agent_id, ask_questions=False) # No prompt provided
|
143
|
+
|
144
|
+
if response3.is_complete():
|
145
|
+
book_rec3 = response3.final_response
|
146
|
+
print(f"History-based recommendation:")
|
147
|
+
print(f"Book: {book_rec3.title} by {book_rec3.author}")
|
148
|
+
print(f"Genre: {book_rec3.genre}")
|
149
|
+
print(f"Reason: {book_rec3.reason}")
|
150
|
+
print(f"Connection to previous: {book_rec3.connection_to_previous}")
|
151
|
+
except Exception as e:
|
152
|
+
print(f"Error with history-based continuation: {e}")
|
138
153
|
|
139
154
|
# Session cleanup
|
140
155
|
print(f"\n🗑️ Session cleanup...")
|
@@ -0,0 +1,164 @@
|
|
1
|
+
"""
|
2
|
+
Example demonstrating optional prompt functionality with existing agent sessions.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
|
8
|
+
|
9
|
+
|
10
|
+
class ConversationResponse(BaseModel):
|
11
|
+
"""A general conversation response."""
|
12
|
+
response: str = Field(description="The response to the conversation")
|
13
|
+
context_used: str = Field(description="How previous conversation history was used")
|
14
|
+
continuation_type: str = Field(description="Type of continuation (new topic, follow-up, etc.)")
|
15
|
+
|
16
|
+
|
17
|
+
class StoryResponse(BaseModel):
|
18
|
+
"""A story response."""
|
19
|
+
story_continuation: str = Field(description="The next part of the story")
|
20
|
+
character_development: str = Field(description="How characters developed in this part")
|
21
|
+
plot_advancement: str = Field(description="How the plot advanced")
|
22
|
+
|
23
|
+
|
24
|
+
def demonstrate_optional_prompt():
|
25
|
+
"""Demonstrate using agent with optional prompts for existing sessions."""
|
26
|
+
print("🎭 DEMO: Optional Prompt with Existing Agent Sessions")
|
27
|
+
print("=" * 60)
|
28
|
+
|
29
|
+
# Initialize the clients
|
30
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
31
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
32
|
+
|
33
|
+
# First interaction - establish a story context
|
34
|
+
print("\n1️⃣ First interaction - establishing story context:")
|
35
|
+
response1 = agent.agent(
|
36
|
+
prompt="Let's create a story about a detective solving a mystery in a small town",
|
37
|
+
final_response_structure=StoryResponse,
|
38
|
+
ask_questions=False
|
39
|
+
)
|
40
|
+
|
41
|
+
agent_id = response1.agent_id
|
42
|
+
print(f"Agent ID: {agent_id}")
|
43
|
+
|
44
|
+
if response1.is_complete():
|
45
|
+
story1 = response1.final_response
|
46
|
+
print(f"Story start: {story1.story_continuation[:150]}...")
|
47
|
+
print(f"Characters: {story1.character_development}")
|
48
|
+
|
49
|
+
# Second interaction - continue with explicit prompt
|
50
|
+
print(f"\n2️⃣ Second interaction - continue with explicit prompt:")
|
51
|
+
response2 = agent.agent(
|
52
|
+
prompt="The detective finds a mysterious letter. What does it say?",
|
53
|
+
agent_id=agent_id,
|
54
|
+
ask_questions=False
|
55
|
+
)
|
56
|
+
|
57
|
+
if response2.is_complete():
|
58
|
+
story2 = response2.final_response
|
59
|
+
print(f"Story continuation: {story2.story_continuation[:150]}...")
|
60
|
+
print(f"Plot advancement: {story2.plot_advancement}")
|
61
|
+
|
62
|
+
# Third interaction - continue WITHOUT explicit prompt (using only history)
|
63
|
+
print(f"\n3️⃣ Third interaction - continue WITHOUT explicit prompt (history-based):")
|
64
|
+
try:
|
65
|
+
response3 = agent.agent(
|
66
|
+
agent_id=agent_id, # Only provide agent_id, no prompt
|
67
|
+
ask_questions=False
|
68
|
+
)
|
69
|
+
|
70
|
+
if response3.is_complete():
|
71
|
+
story3 = response3.final_response
|
72
|
+
print(f"History-based continuation: {story3.story_continuation[:150]}...")
|
73
|
+
print(f"Character development: {story3.character_development}")
|
74
|
+
print(f"Plot advancement: {story3.plot_advancement}")
|
75
|
+
|
76
|
+
except Exception as e:
|
77
|
+
print(f"Error with history-based continuation: {e}")
|
78
|
+
|
79
|
+
# Fourth interaction - switch response format but use same session
|
80
|
+
print(f"\n4️⃣ Fourth interaction - switch to conversation format:")
|
81
|
+
response4 = agent.agent(
|
82
|
+
prompt="What do you think about this story so far?",
|
83
|
+
final_response_structure=ConversationResponse,
|
84
|
+
agent_id=agent_id,
|
85
|
+
ask_questions=False
|
86
|
+
)
|
87
|
+
|
88
|
+
if response4.is_complete():
|
89
|
+
conv4 = response4.final_response
|
90
|
+
print(f"Analysis: {conv4.response[:150]}...")
|
91
|
+
print(f"Context used: {conv4.context_used}")
|
92
|
+
print(f"Continuation type: {conv4.continuation_type}")
|
93
|
+
|
94
|
+
# Fifth interaction - continue conversation without prompt
|
95
|
+
print(f"\n5️⃣ Fifth interaction - continue conversation analysis without prompt:")
|
96
|
+
try:
|
97
|
+
response5 = agent.agent(
|
98
|
+
agent_id=agent_id,
|
99
|
+
ask_questions=False
|
100
|
+
)
|
101
|
+
|
102
|
+
if response5.is_complete():
|
103
|
+
conv5 = response5.final_response
|
104
|
+
print(f"Continued analysis: {conv5.response[:150]}...")
|
105
|
+
print(f"Context used: {conv5.context_used}")
|
106
|
+
print(f"Continuation type: {conv5.continuation_type}")
|
107
|
+
|
108
|
+
except Exception as e:
|
109
|
+
print(f"Error with continued conversation: {e}")
|
110
|
+
|
111
|
+
# Show final session state
|
112
|
+
session_info = agent.get_session_info(agent_id)
|
113
|
+
print(f"\n📊 Final session state:")
|
114
|
+
print(f" - Total messages: {session_info['conversation_length']}")
|
115
|
+
print(f" - Session step: {session_info.get('step', 'unknown')}")
|
116
|
+
print(f" - Has final_response_structure: {'final_response_structure' in session_info}")
|
117
|
+
|
118
|
+
# Cleanup
|
119
|
+
agent.delete_session(agent_id)
|
120
|
+
|
121
|
+
|
122
|
+
def demonstrate_error_cases():
|
123
|
+
"""Demonstrate error cases with optional prompt."""
|
124
|
+
print("\n🚨 DEMO: Error Cases with Optional Prompt")
|
125
|
+
print("=" * 50)
|
126
|
+
|
127
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
128
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
129
|
+
|
130
|
+
# Error case 1: No prompt and no existing session
|
131
|
+
print("\n❌ Error case 1: No prompt, no existing session")
|
132
|
+
try:
|
133
|
+
response = agent.agent(ask_questions=False)
|
134
|
+
print("This should have failed!")
|
135
|
+
except ValueError as e:
|
136
|
+
print(f"Expected error: {e}")
|
137
|
+
|
138
|
+
# Error case 2: No prompt and no final_response_structure for new session
|
139
|
+
print("\n❌ Error case 2: No final_response_structure for new session")
|
140
|
+
try:
|
141
|
+
response = agent.agent(prompt="Test", ask_questions=False)
|
142
|
+
print("This should have failed!")
|
143
|
+
except ValueError as e:
|
144
|
+
print(f"Expected error: {e}")
|
145
|
+
|
146
|
+
# Error case 3: Unknown agent_id without prompt
|
147
|
+
print("\n❌ Error case 3: Unknown agent_id without prompt")
|
148
|
+
try:
|
149
|
+
response = agent.agent(agent_id="unknown-id", ask_questions=False)
|
150
|
+
print("This should have failed!")
|
151
|
+
except ValueError as e:
|
152
|
+
print(f"Expected error: {e}")
|
153
|
+
|
154
|
+
print("\n✅ All error cases handled correctly!")
|
155
|
+
|
156
|
+
|
157
|
+
if __name__ == "__main__":
|
158
|
+
try:
|
159
|
+
demonstrate_optional_prompt()
|
160
|
+
demonstrate_error_cases()
|
161
|
+
except Exception as e:
|
162
|
+
print(f"Unexpected error: {e}")
|
163
|
+
import traceback
|
164
|
+
traceback.print_exc()
|
@@ -0,0 +1,109 @@
|
|
1
|
+
"""
|
2
|
+
Example demonstrating the new unified agent interface.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
|
8
|
+
|
9
|
+
|
10
|
+
class SimpleResponse(BaseModel):
|
11
|
+
"""A simple response."""
|
12
|
+
response: str = Field(description="The response text")
|
13
|
+
context_used: str = Field(description="How context was used in this response")
|
14
|
+
|
15
|
+
|
16
|
+
def demonstrate_unified_interface():
|
17
|
+
"""Demonstrate the unified agent interface with and without questions."""
|
18
|
+
print("🔧 DEMO: Unified Agent Interface")
|
19
|
+
print("=" * 50)
|
20
|
+
|
21
|
+
# Initialize the clients
|
22
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
23
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
24
|
+
|
25
|
+
# Example 1: Start conversation with questions
|
26
|
+
print("\n1️⃣ Starting conversation that generates questions:")
|
27
|
+
response1 = agent.agent("I need help planning a trip", SimpleResponse, ask_questions=True)
|
28
|
+
|
29
|
+
agent_id = response1.agent_id
|
30
|
+
print(f"Agent ID: {agent_id}")
|
31
|
+
|
32
|
+
if response1.has_questions():
|
33
|
+
print(f"📋 Generated {len(response1.questions)} questions:")
|
34
|
+
for q in response1.questions:
|
35
|
+
print(f" - {q.question} (key: {q.key})")
|
36
|
+
|
37
|
+
# Example 2: Provide answers using the unified interface
|
38
|
+
print(f"\n2️⃣ Providing answers using unified interface:")
|
39
|
+
answers = AnswerList(answers=[
|
40
|
+
Answer(key="destination", answer="Japan"),
|
41
|
+
Answer(key="duration", answer="10 days"),
|
42
|
+
Answer(key="budget", answer="$3000"),
|
43
|
+
Answer(key="interests", answer="culture, food, temples")
|
44
|
+
])
|
45
|
+
|
46
|
+
response2 = agent.agent(
|
47
|
+
"Now help me plan the trip",
|
48
|
+
SimpleResponse,
|
49
|
+
ask_questions=False,
|
50
|
+
agent_id=agent_id,
|
51
|
+
answers=answers
|
52
|
+
)
|
53
|
+
|
54
|
+
if response2.is_complete():
|
55
|
+
trip_plan = response2.final_response
|
56
|
+
print(f"Response: {trip_plan.response}")
|
57
|
+
print(f"Context used: {trip_plan.context_used}")
|
58
|
+
|
59
|
+
# Example 3: Continue the conversation
|
60
|
+
print(f"\n3️⃣ Continuing conversation without questions:")
|
61
|
+
response3 = agent.agent(
|
62
|
+
"What about transportation within Japan?",
|
63
|
+
SimpleResponse,
|
64
|
+
ask_questions=False,
|
65
|
+
agent_id=agent_id
|
66
|
+
)
|
67
|
+
|
68
|
+
if response3.is_complete():
|
69
|
+
transport_info = response3.final_response
|
70
|
+
print(f"Response: {transport_info.response}")
|
71
|
+
print(f"Context used: {transport_info.context_used}")
|
72
|
+
|
73
|
+
# Example 4: Using answers without previous questions (new session)
|
74
|
+
print(f"\n4️⃣ Starting new session with direct answers (no questions):")
|
75
|
+
new_answers = AnswerList(answers=[
|
76
|
+
Answer(key="city", answer="Tokyo"),
|
77
|
+
Answer(key="travel_style", answer="luxury"),
|
78
|
+
Answer(key="group_size", answer="2 people")
|
79
|
+
])
|
80
|
+
|
81
|
+
response4 = agent.agent(
|
82
|
+
"Recommend restaurants",
|
83
|
+
SimpleResponse,
|
84
|
+
ask_questions=False,
|
85
|
+
answers=new_answers # New session, no agent_id provided
|
86
|
+
)
|
87
|
+
|
88
|
+
if response4.is_complete():
|
89
|
+
restaurant_info = response4.final_response
|
90
|
+
print(f"New Agent ID: {response4.agent_id}")
|
91
|
+
print(f"Response: {restaurant_info.response}")
|
92
|
+
print(f"Context used: {restaurant_info.context_used}")
|
93
|
+
|
94
|
+
# Show active sessions
|
95
|
+
print(f"\n📊 Active Sessions: {agent.list_sessions()}")
|
96
|
+
|
97
|
+
# Cleanup
|
98
|
+
print(f"\n🗑️ Cleaning up sessions...")
|
99
|
+
agent.delete_session(agent_id)
|
100
|
+
if response4.agent_id != agent_id:
|
101
|
+
agent.delete_session(response4.agent_id)
|
102
|
+
print(f"Active Sessions after cleanup: {agent.list_sessions()}")
|
103
|
+
|
104
|
+
|
105
|
+
if __name__ == "__main__":
|
106
|
+
try:
|
107
|
+
demonstrate_unified_interface()
|
108
|
+
except Exception as e:
|
109
|
+
print(f"Error: {e}")
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|