mbxai 2.0.4__tar.gz → 2.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {mbxai-2.0.4 → mbxai-2.1.0}/PKG-INFO +1 -1
- {mbxai-2.0.4 → mbxai-2.1.0}/pyproject.toml +3 -2
- {mbxai-2.0.4 → mbxai-2.1.0}/setup.py +1 -1
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/__init__.py +1 -1
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/agent/client.py +289 -44
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/agent/models.py +53 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/agent_example.py +35 -0
- mbxai-2.1.0/src/mbxai/examples/agent_logging_example.py +212 -0
- mbxai-2.1.0/src/mbxai/examples/dialog_agent_example.py +157 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/mcp/server.py +1 -1
- {mbxai-2.0.4 → mbxai-2.1.0}/.gitignore +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/LICENSE +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/README.md +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/agent/__init__.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/core.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/agent_iterations_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/agent_tool_registration_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/agent_validation_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/auto_schema_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/mcp/mcp_client_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/mcp/mcp_server_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/openrouter_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/parse_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/parse_tool_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/request.json +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/response.json +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/send_request.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/simple_agent_test.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/examples/tool_client_example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/mcp/__init__.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/mcp/client.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/mcp/example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/openrouter/__init__.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/openrouter/client.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/openrouter/config.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/openrouter/models.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/openrouter/schema.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/tools/__init__.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/tools/client.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/tools/example.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/src/mbxai/tools/types.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/tests/test_mcp_tool_registration.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/tests/test_real_mcp_schema.py +0 -0
- {mbxai-2.0.4 → mbxai-2.1.0}/tests/test_schema_conversion.py +0 -0
@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|
4
4
|
|
5
5
|
[project]
|
6
6
|
name = "mbxai"
|
7
|
-
version = "2.0
|
7
|
+
version = "2.1.0"
|
8
8
|
authors = [
|
9
9
|
{ name = "MBX AI" }
|
10
10
|
]
|
@@ -82,6 +82,7 @@ strict_equality = true
|
|
82
82
|
|
83
83
|
[dependency-groups]
|
84
84
|
dev = [
|
85
|
-
"
|
85
|
+
"pytest>=8.3.5",
|
86
|
+
"pytest-asyncio>=0.26.0",
|
86
87
|
"twine>=6.1.0",
|
87
88
|
]
|
@@ -10,7 +10,7 @@ from pydantic import BaseModel
|
|
10
10
|
from ..openrouter import OpenRouterClient
|
11
11
|
from ..tools import ToolClient
|
12
12
|
from ..mcp import MCPClient
|
13
|
-
from .models import AgentResponse, Question, QuestionList, AnswerList, Result, QualityCheck
|
13
|
+
from .models import AgentResponse, Question, QuestionList, AnswerList, Result, QualityCheck, TokenUsage, TokenSummary
|
14
14
|
|
15
15
|
logger = logging.getLogger(__name__)
|
16
16
|
|
@@ -123,9 +123,29 @@ class AgentClient:
|
|
123
123
|
f"Use MCPClient to register MCP servers."
|
124
124
|
)
|
125
125
|
|
126
|
-
def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel]) -> Any:
|
127
|
-
"""Call the parse method on the AI client."""
|
128
|
-
|
126
|
+
def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel], conversation_history: list[dict[str, Any]] = None) -> Any:
|
127
|
+
"""Call the parse method on the AI client with optional conversation history."""
|
128
|
+
# Combine conversation history with new messages
|
129
|
+
if conversation_history:
|
130
|
+
full_messages = conversation_history + messages
|
131
|
+
else:
|
132
|
+
full_messages = messages
|
133
|
+
return self._ai_client.parse(full_messages, response_format)
|
134
|
+
|
135
|
+
def _extract_token_usage(self, response: Any) -> TokenUsage:
|
136
|
+
"""Extract token usage information from an AI response."""
|
137
|
+
try:
|
138
|
+
if hasattr(response, 'usage') and response.usage:
|
139
|
+
usage = response.usage
|
140
|
+
return TokenUsage(
|
141
|
+
prompt_tokens=getattr(usage, 'prompt_tokens', 0),
|
142
|
+
completion_tokens=getattr(usage, 'completion_tokens', 0),
|
143
|
+
total_tokens=getattr(usage, 'total_tokens', 0)
|
144
|
+
)
|
145
|
+
except (AttributeError, TypeError) as e:
|
146
|
+
logger.debug(f"Could not extract token usage: {e}")
|
147
|
+
|
148
|
+
return TokenUsage() # Return empty usage if extraction fails
|
129
149
|
|
130
150
|
def _extract_parsed_content(self, response: Any, response_format: Type[BaseModel]) -> BaseModel:
|
131
151
|
"""Extract the parsed content from the AI response."""
|
@@ -164,7 +184,8 @@ class AgentClient:
|
|
164
184
|
self,
|
165
185
|
prompt: str,
|
166
186
|
final_response_structure: Type[BaseModel],
|
167
|
-
ask_questions: bool = True
|
187
|
+
ask_questions: bool = True,
|
188
|
+
agent_id: str = None
|
168
189
|
) -> AgentResponse:
|
169
190
|
"""
|
170
191
|
Process a prompt through the agent's thinking process.
|
@@ -173,14 +194,28 @@ class AgentClient:
|
|
173
194
|
prompt: The initial prompt from the user
|
174
195
|
final_response_structure: Pydantic model defining the expected final response format
|
175
196
|
ask_questions: Whether to ask clarifying questions (default: True)
|
197
|
+
agent_id: Optional agent session ID to continue an existing conversation
|
176
198
|
|
177
199
|
Returns:
|
178
200
|
AgentResponse containing either questions to ask or the final response
|
179
201
|
"""
|
180
|
-
|
202
|
+
# Use provided agent_id or create a new one
|
203
|
+
if agent_id is None:
|
204
|
+
agent_id = str(__import__("uuid").uuid4())
|
205
|
+
logger.info(f"🚀 Starting new agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
|
206
|
+
else:
|
207
|
+
logger.info(f"🔄 Continuing agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
|
208
|
+
|
209
|
+
# Initialize token summary
|
210
|
+
token_summary = TokenSummary()
|
211
|
+
|
212
|
+
# Check if this is a continuing conversation
|
213
|
+
existing_session = self._agent_sessions.get(agent_id, {})
|
214
|
+
conversation_history = existing_session.get("conversation_history", [])
|
181
215
|
|
182
216
|
# Step 1: Generate questions (if ask_questions is True)
|
183
217
|
if ask_questions:
|
218
|
+
logger.info(f"❓ Agent {agent_id}: Analyzing prompt and generating clarifying questions")
|
184
219
|
questions_prompt = f"""
|
185
220
|
Understand this prompt and what the user wants to achieve by it:
|
186
221
|
==========
|
@@ -201,28 +236,34 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
201
236
|
messages = [{"role": "user", "content": questions_prompt}]
|
202
237
|
|
203
238
|
try:
|
204
|
-
response = self._call_ai_parse(messages, QuestionList)
|
239
|
+
response = self._call_ai_parse(messages, QuestionList, conversation_history)
|
205
240
|
question_list = self._extract_parsed_content(response, QuestionList)
|
206
241
|
|
207
|
-
|
242
|
+
# Extract token usage for question generation
|
243
|
+
token_summary.question_generation = self._extract_token_usage(response)
|
244
|
+
|
245
|
+
logger.info(f"❓ Agent {agent_id}: Generated {len(question_list.questions)} questions (tokens: {token_summary.question_generation.total_tokens})")
|
208
246
|
|
209
247
|
# If we have questions, return them to the user
|
210
248
|
if question_list.questions:
|
211
|
-
agent_response = AgentResponse(questions=question_list.questions)
|
249
|
+
agent_response = AgentResponse(agent_id=agent_id, questions=question_list.questions, token_summary=token_summary)
|
212
250
|
# Store the session for continuation
|
213
251
|
self._agent_sessions[agent_response.agent_id] = {
|
214
252
|
"original_prompt": prompt,
|
215
253
|
"final_response_structure": final_response_structure,
|
216
254
|
"questions": question_list.questions,
|
217
|
-
"step": "waiting_for_answers"
|
255
|
+
"step": "waiting_for_answers",
|
256
|
+
"token_summary": token_summary
|
218
257
|
}
|
258
|
+
logger.info(f"📋 Agent {agent_id}: Waiting for user answers to {len(question_list.questions)} questions")
|
219
259
|
return agent_response
|
220
260
|
|
221
261
|
except Exception as e:
|
222
262
|
logger.warning(f"Failed to generate questions: {e}. Proceeding without questions.")
|
223
263
|
|
224
264
|
# Step 2 & 3: No questions or ask_questions=False - proceed directly
|
225
|
-
|
265
|
+
logger.info(f"⚡ Agent {agent_id}: No questions needed, proceeding directly to processing")
|
266
|
+
return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary, conversation_history)
|
226
267
|
|
227
268
|
def answer_to_agent(self, agent_id: str, answers: AnswerList) -> AgentResponse:
|
228
269
|
"""
|
@@ -245,26 +286,97 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
245
286
|
if session["step"] != "waiting_for_answers":
|
246
287
|
raise ValueError(f"Agent session {agent_id} is not waiting for answers")
|
247
288
|
|
248
|
-
# Convert answers to a more usable format
|
289
|
+
# Convert answers to a more usable format and create question-answer pairs
|
249
290
|
answer_dict = {answer.key: answer.answer for answer in answers.answers}
|
291
|
+
questions = session.get("questions", [])
|
292
|
+
|
293
|
+
# Create question-answer pairs for better context
|
294
|
+
qa_pairs = []
|
295
|
+
for question in questions:
|
296
|
+
answer_text = answer_dict.get(question.key, "No answer provided")
|
297
|
+
qa_pairs.append({
|
298
|
+
"question": question.question,
|
299
|
+
"key": question.key,
|
300
|
+
"answer": answer_text,
|
301
|
+
"required": question.required
|
302
|
+
})
|
303
|
+
|
304
|
+
logger.info(f"📝 Agent {agent_id}: Received {len(answers.answers)} answers, continuing processing")
|
250
305
|
|
251
|
-
#
|
306
|
+
# Get token summary from session
|
307
|
+
token_summary = session.get("token_summary", TokenSummary())
|
308
|
+
conversation_history = session.get("conversation_history", [])
|
309
|
+
|
310
|
+
# Process with the provided answers and question context
|
252
311
|
result = self._process_with_answers(
|
253
312
|
session["original_prompt"],
|
254
313
|
session["final_response_structure"],
|
255
|
-
|
314
|
+
qa_pairs,
|
315
|
+
agent_id,
|
316
|
+
token_summary,
|
317
|
+
conversation_history
|
256
318
|
)
|
257
319
|
|
258
|
-
#
|
259
|
-
|
320
|
+
# Update session with conversation history but don't delete it
|
321
|
+
if agent_id in self._agent_sessions:
|
322
|
+
self._agent_sessions[agent_id]["step"] = "completed"
|
323
|
+
self._agent_sessions[agent_id]["conversation_history"] = self._agent_sessions[agent_id].get("conversation_history", [])
|
324
|
+
|
325
|
+
# Add this interaction to history
|
326
|
+
self._agent_sessions[agent_id]["conversation_history"].extend([
|
327
|
+
{"role": "user", "content": session["original_prompt"]},
|
328
|
+
{"role": "assistant", "content": str(result.final_response) if result.final_response else "No response generated"}
|
329
|
+
])
|
260
330
|
|
261
331
|
return result
|
262
332
|
|
333
|
+
def _format_qa_context_for_quality_check(self, answers: Union[list, dict[str, str]]) -> str:
|
334
|
+
"""
|
335
|
+
Format question-answer context for quality check and improvement prompts.
|
336
|
+
|
337
|
+
Args:
|
338
|
+
answers: Question-answer pairs or simple answers
|
339
|
+
|
340
|
+
Returns:
|
341
|
+
Formatted context text
|
342
|
+
"""
|
343
|
+
if not answers:
|
344
|
+
return ""
|
345
|
+
|
346
|
+
if isinstance(answers, list) and answers:
|
347
|
+
# Check if it's a list of question-answer pairs (enhanced format)
|
348
|
+
if isinstance(answers[0], dict) and "question" in answers[0]:
|
349
|
+
context_text = "\nContext Information (Questions & Answers):\n"
|
350
|
+
context_text += "The response was generated with the following additional context:\n\n"
|
351
|
+
for i, qa_pair in enumerate(answers, 1):
|
352
|
+
question = qa_pair.get("question", "Unknown question")
|
353
|
+
answer = qa_pair.get("answer", "No answer provided")
|
354
|
+
required = qa_pair.get("required", True)
|
355
|
+
|
356
|
+
status_marker = "🔴 REQUIRED" if required else "🟡 OPTIONAL"
|
357
|
+
context_text += f"{i}. {status_marker} Q: {question}\n"
|
358
|
+
context_text += f" A: {answer}\n\n"
|
359
|
+
return context_text
|
360
|
+
else:
|
361
|
+
# Legacy format - simple list
|
362
|
+
return f"\nAdditional context: {', '.join(str(a) for a in answers)}\n\n"
|
363
|
+
elif isinstance(answers, dict) and answers:
|
364
|
+
# Legacy format - simple dict
|
365
|
+
context_text = "\nAdditional context provided:\n"
|
366
|
+
for key, answer in answers.items():
|
367
|
+
context_text += f"- {key}: {answer}\n"
|
368
|
+
return context_text + "\n"
|
369
|
+
|
370
|
+
return ""
|
371
|
+
|
263
372
|
def _process_with_answers(
|
264
373
|
self,
|
265
374
|
prompt: str,
|
266
375
|
final_response_structure: Type[BaseModel],
|
267
|
-
answers: Union[list, dict[str, str]]
|
376
|
+
answers: Union[list, dict[str, str]],
|
377
|
+
agent_id: str,
|
378
|
+
token_summary: TokenSummary,
|
379
|
+
conversation_history: list[dict[str, Any]] = None
|
268
380
|
) -> AgentResponse:
|
269
381
|
"""
|
270
382
|
Process the prompt with answers through the thinking pipeline.
|
@@ -272,41 +384,77 @@ IMPORTANT: For each question, provide a technical key identifier that:
|
|
272
384
|
Args:
|
273
385
|
prompt: The original prompt
|
274
386
|
final_response_structure: Expected final response structure
|
275
|
-
answers:
|
387
|
+
answers: Question-answer pairs or simple answers (empty if no questions were asked)
|
388
|
+
agent_id: The agent session identifier
|
389
|
+
token_summary: Current token usage summary
|
390
|
+
conversation_history: Optional conversation history for dialog context
|
276
391
|
|
277
392
|
Returns:
|
278
393
|
AgentResponse with the final result
|
279
394
|
"""
|
395
|
+
if conversation_history is None:
|
396
|
+
conversation_history = []
|
397
|
+
|
280
398
|
# Step 3: Process the prompt with thinking
|
281
|
-
|
399
|
+
logger.info(f"🧠 Agent {agent_id}: Processing prompt and generating initial response")
|
400
|
+
result = self._think_and_process(prompt, answers, agent_id, token_summary, conversation_history)
|
282
401
|
|
283
402
|
# Step 4: Quality check and iteration
|
284
|
-
final_result = self._quality_check_and_iterate(prompt, result, answers)
|
403
|
+
final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary, conversation_history)
|
285
404
|
|
286
405
|
# Step 5: Generate final answer in requested format
|
287
|
-
|
406
|
+
logger.info(f"📝 Agent {agent_id}: Generating final structured response")
|
407
|
+
final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary, conversation_history)
|
408
|
+
|
409
|
+
# Log final token summary
|
410
|
+
logger.info(f"📊 Agent {agent_id}: Token usage summary - Total: {token_summary.total_tokens} "
|
411
|
+
f"(Prompt: {token_summary.total_prompt_tokens}, Completion: {token_summary.total_completion_tokens})")
|
288
412
|
|
289
|
-
return AgentResponse(final_response=final_response)
|
413
|
+
return AgentResponse(agent_id=agent_id, final_response=final_response, token_summary=token_summary)
|
290
414
|
|
291
|
-
def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]]) -> str:
|
415
|
+
def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> str:
|
292
416
|
"""
|
293
417
|
Process the prompt with thinking.
|
294
418
|
|
295
419
|
Args:
|
296
420
|
prompt: The original prompt
|
297
|
-
answers:
|
421
|
+
answers: Question-answer pairs or simple answers
|
422
|
+
agent_id: The agent session identifier
|
423
|
+
token_summary: Current token usage summary
|
424
|
+
conversation_history: Optional conversation history for dialog context
|
298
425
|
|
299
426
|
Returns:
|
300
427
|
The AI's result
|
301
428
|
"""
|
302
|
-
|
429
|
+
if conversation_history is None:
|
430
|
+
conversation_history = []
|
431
|
+
# Format answers for the prompt with enhanced context
|
303
432
|
answers_text = ""
|
304
|
-
if isinstance(answers,
|
433
|
+
if isinstance(answers, list) and answers:
|
434
|
+
# Check if it's a list of question-answer pairs (enhanced format)
|
435
|
+
if answers and isinstance(answers[0], dict) and "question" in answers[0]:
|
436
|
+
answers_text = "\n\nQuestion-Answer Context:\n"
|
437
|
+
answers_text += "The following questions were asked to gather more information, along with the answers provided:\n\n"
|
438
|
+
for i, qa_pair in enumerate(answers, 1):
|
439
|
+
question = qa_pair.get("question", "Unknown question")
|
440
|
+
answer = qa_pair.get("answer", "No answer provided")
|
441
|
+
key = qa_pair.get("key", "")
|
442
|
+
required = qa_pair.get("required", True)
|
443
|
+
|
444
|
+
status_marker = "🔴 REQUIRED" if required else "🟡 OPTIONAL"
|
445
|
+
answers_text += f"{i}. {status_marker} Question: {question}\n"
|
446
|
+
answers_text += f" Answer: {answer}\n"
|
447
|
+
if key:
|
448
|
+
answers_text += f" (Key: {key})\n"
|
449
|
+
answers_text += "\n"
|
450
|
+
else:
|
451
|
+
# Legacy format - simple list
|
452
|
+
answers_text = f"\n\nAdditional information: {', '.join(str(a) for a in answers)}\n"
|
453
|
+
elif isinstance(answers, dict) and answers:
|
454
|
+
# Legacy format - simple dict
|
305
455
|
answers_text = "\n\nAdditional information provided:\n"
|
306
456
|
for key, answer in answers.items():
|
307
457
|
answers_text += f"- {key}: {answer}\n"
|
308
|
-
elif isinstance(answers, list) and answers:
|
309
|
-
answers_text = f"\n\nAdditional information: {', '.join(answers)}\n"
|
310
458
|
|
311
459
|
thinking_prompt = f"""
|
312
460
|
Think about this prompt, the goal and the steps required to fulfill it:
|
@@ -315,42 +463,66 @@ Think about this prompt, the goal and the steps required to fulfill it:
|
|
315
463
|
==========
|
316
464
|
{answers_text}
|
317
465
|
|
318
|
-
Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response.
|
466
|
+
Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response.
|
319
467
|
|
320
|
-
|
468
|
+
IMPORTANT: When formulating your response, take into account both the original prompt AND the specific questions that were asked along with their answers. The questions reveal what additional information was deemed necessary, and the answers provide crucial context that should inform your response.
|
469
|
+
|
470
|
+
Use any available tools to gather information or perform actions that would improve your response.
|
471
|
+
|
472
|
+
Provide your best result for the given prompt, incorporating all the context from the question-answer pairs.
|
321
473
|
"""
|
322
474
|
|
323
475
|
messages = [{"role": "user", "content": thinking_prompt}]
|
324
476
|
|
325
477
|
try:
|
326
|
-
response = self._call_ai_parse(messages, Result)
|
478
|
+
response = self._call_ai_parse(messages, Result, conversation_history)
|
327
479
|
result_obj = self._extract_parsed_content(response, Result)
|
480
|
+
|
481
|
+
# Track token usage for thinking process
|
482
|
+
token_summary.thinking_process = self._extract_token_usage(response)
|
483
|
+
logger.info(f"🧠 Agent {agent_id}: Thinking completed (tokens: {token_summary.thinking_process.total_tokens})")
|
484
|
+
|
328
485
|
return result_obj.result
|
329
486
|
except Exception as e:
|
330
487
|
logger.error(f"Error in thinking process: {e}")
|
331
488
|
raise RuntimeError(f"Failed to process prompt with AI client: {e}") from e
|
332
489
|
|
333
|
-
def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]]) -> str:
|
490
|
+
def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> str:
|
334
491
|
"""
|
335
492
|
Check the quality of the result and iterate if needed.
|
336
493
|
|
337
494
|
Args:
|
338
495
|
prompt: The original prompt
|
339
496
|
result: The current result
|
340
|
-
answers:
|
497
|
+
answers: Question-answer pairs or simple answers
|
498
|
+
agent_id: The agent session identifier
|
499
|
+
token_summary: Current token usage summary
|
500
|
+
conversation_history: Optional conversation history for dialog context
|
341
501
|
|
342
502
|
Returns:
|
343
503
|
The final improved result
|
344
504
|
"""
|
505
|
+
if conversation_history is None:
|
506
|
+
conversation_history = []
|
507
|
+
|
345
508
|
current_result = result
|
346
509
|
|
510
|
+
if self._max_iterations == 0:
|
511
|
+
logger.info(f"✅ Agent {agent_id}: Skipping quality check (max_iterations=0)")
|
512
|
+
return current_result
|
513
|
+
|
514
|
+
logger.info(f"🔍 Agent {agent_id}: Starting quality check and improvement process (max iterations: {self._max_iterations})")
|
515
|
+
|
516
|
+
# Format context information for quality checks
|
517
|
+
context_text = self._format_qa_context_for_quality_check(answers)
|
518
|
+
|
347
519
|
for iteration in range(self._max_iterations):
|
348
520
|
quality_prompt = f"""
|
349
521
|
Given this original prompt:
|
350
522
|
==========
|
351
523
|
{prompt}
|
352
524
|
==========
|
353
|
-
|
525
|
+
{context_text}
|
354
526
|
And this result:
|
355
527
|
==========
|
356
528
|
{current_result}
|
@@ -358,20 +530,26 @@ And this result:
|
|
358
530
|
|
359
531
|
Is this result good and comprehensive, or does it need to be improved? Consider if the response fully addresses the prompt, provides sufficient detail, and would be helpful to the user.
|
360
532
|
|
533
|
+
IMPORTANT: Also evaluate whether the result properly incorporates and addresses the information provided through the question-answer pairs above. The response should demonstrate that it has taken this additional context into account.
|
534
|
+
|
361
535
|
Evaluate the quality and provide feedback if improvements are needed.
|
362
536
|
"""
|
363
537
|
|
364
538
|
messages = [{"role": "user", "content": quality_prompt}]
|
365
539
|
|
366
540
|
try:
|
367
|
-
response = self._call_ai_parse(messages, QualityCheck)
|
541
|
+
response = self._call_ai_parse(messages, QualityCheck, conversation_history)
|
368
542
|
quality_check = self._extract_parsed_content(response, QualityCheck)
|
369
543
|
|
544
|
+
# Track token usage for quality check
|
545
|
+
quality_check_tokens = self._extract_token_usage(response)
|
546
|
+
token_summary.quality_checks.append(quality_check_tokens)
|
547
|
+
|
370
548
|
if quality_check.is_good:
|
371
|
-
logger.
|
549
|
+
logger.info(f"✅ Agent {agent_id}: Quality check passed on iteration {iteration + 1} (tokens: {quality_check_tokens.total_tokens})")
|
372
550
|
break
|
373
551
|
|
374
|
-
logger.
|
552
|
+
logger.info(f"🔄 Agent {agent_id}: Quality check iteration {iteration + 1} - Improvements needed: {quality_check.feedback[:100]}... (tokens: {quality_check_tokens.total_tokens})")
|
375
553
|
|
376
554
|
# Improve the result
|
377
555
|
improvement_prompt = f"""
|
@@ -379,7 +557,7 @@ The original prompt was:
|
|
379
557
|
==========
|
380
558
|
{prompt}
|
381
559
|
==========
|
382
|
-
|
560
|
+
{context_text}
|
383
561
|
The current result is:
|
384
562
|
==========
|
385
563
|
{current_result}
|
@@ -390,21 +568,31 @@ Feedback for improvement:
|
|
390
568
|
{quality_check.feedback}
|
391
569
|
==========
|
392
570
|
|
393
|
-
Please provide an improved version that addresses the feedback while maintaining the strengths of the current result.
|
571
|
+
Please provide an improved version that addresses the feedback while maintaining the strengths of the current result. Make sure to incorporate all the context from the question-answer pairs above.
|
394
572
|
"""
|
395
573
|
|
396
574
|
messages = [{"role": "user", "content": improvement_prompt}]
|
397
|
-
|
398
|
-
result_obj = self._extract_parsed_content(
|
575
|
+
improvement_response = self._call_ai_parse(messages, Result, conversation_history)
|
576
|
+
result_obj = self._extract_parsed_content(improvement_response, Result)
|
399
577
|
current_result = result_obj.result
|
400
578
|
|
579
|
+
# Track token usage for improvement
|
580
|
+
improvement_tokens = self._extract_token_usage(improvement_response)
|
581
|
+
token_summary.improvements.append(improvement_tokens)
|
582
|
+
|
583
|
+
logger.info(f"⚡ Agent {agent_id}: Improvement iteration {iteration + 1} completed (tokens: {improvement_tokens.total_tokens})")
|
584
|
+
|
401
585
|
except Exception as e:
|
402
586
|
logger.warning(f"Error in quality check iteration {iteration}: {e}")
|
403
587
|
break
|
404
588
|
|
589
|
+
total_quality_tokens = sum(usage.total_tokens for usage in token_summary.quality_checks)
|
590
|
+
total_improvement_tokens = sum(usage.total_tokens for usage in token_summary.improvements)
|
591
|
+
logger.info(f"🏁 Agent {agent_id}: Quality check completed - {len(token_summary.quality_checks)} checks, {len(token_summary.improvements)} improvements (Quality tokens: {total_quality_tokens}, Improvement tokens: {total_improvement_tokens})")
|
592
|
+
|
405
593
|
return current_result
|
406
594
|
|
407
|
-
def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel]) -> BaseModel:
|
595
|
+
def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary, conversation_history: list[dict[str, Any]] = None) -> BaseModel:
|
408
596
|
"""
|
409
597
|
Generate the final response in the requested format.
|
410
598
|
|
@@ -412,10 +600,15 @@ Please provide an improved version that addresses the feedback while maintaining
|
|
412
600
|
prompt: The original prompt
|
413
601
|
result: The processed result
|
414
602
|
final_response_structure: The expected response structure
|
603
|
+
agent_id: The agent session identifier
|
604
|
+
token_summary: Current token usage summary
|
605
|
+
conversation_history: Optional conversation history for dialog context
|
415
606
|
|
416
607
|
Returns:
|
417
608
|
The final response in the requested format
|
418
609
|
"""
|
610
|
+
if conversation_history is None:
|
611
|
+
conversation_history = []
|
419
612
|
final_prompt = f"""
|
420
613
|
Given this original prompt:
|
421
614
|
==========
|
@@ -433,8 +626,14 @@ Generate the final answer in the exact format requested. Make sure the response
|
|
433
626
|
messages = [{"role": "user", "content": final_prompt}]
|
434
627
|
|
435
628
|
try:
|
436
|
-
response = self._call_ai_parse(messages, final_response_structure)
|
437
|
-
|
629
|
+
response = self._call_ai_parse(messages, final_response_structure, conversation_history)
|
630
|
+
final_response = self._extract_parsed_content(response, final_response_structure)
|
631
|
+
|
632
|
+
# Track token usage for final response generation
|
633
|
+
token_summary.final_response = self._extract_token_usage(response)
|
634
|
+
logger.info(f"📝 Agent {agent_id}: Final structured response generated (tokens: {token_summary.final_response.total_tokens})")
|
635
|
+
|
636
|
+
return final_response
|
438
637
|
except Exception as e:
|
439
638
|
logger.error(f"Error generating final response: {e}")
|
440
639
|
# Fallback - try to create a basic response
|
@@ -454,3 +653,49 @@ Generate the final answer in the exact format requested. Make sure the response
|
|
454
653
|
logger.error(f"Fallback response creation failed: {fallback_error}")
|
455
654
|
# Last resort - return the structure with default values
|
456
655
|
return final_response_structure()
|
656
|
+
|
657
|
+
def get_session_info(self, agent_id: str) -> dict[str, Any]:
|
658
|
+
"""
|
659
|
+
Get information about an agent session.
|
660
|
+
|
661
|
+
Args:
|
662
|
+
agent_id: The agent session identifier
|
663
|
+
|
664
|
+
Returns:
|
665
|
+
Session information dictionary
|
666
|
+
|
667
|
+
Raises:
|
668
|
+
ValueError: If the agent session is not found
|
669
|
+
"""
|
670
|
+
if agent_id not in self._agent_sessions:
|
671
|
+
raise ValueError(f"Agent session {agent_id} not found")
|
672
|
+
|
673
|
+
session = self._agent_sessions[agent_id].copy()
|
674
|
+
# Remove sensitive information and add summary
|
675
|
+
session["conversation_length"] = len(session.get("conversation_history", []))
|
676
|
+
return session
|
677
|
+
|
678
|
+
def delete_session(self, agent_id: str) -> bool:
|
679
|
+
"""
|
680
|
+
Delete an agent session.
|
681
|
+
|
682
|
+
Args:
|
683
|
+
agent_id: The agent session identifier
|
684
|
+
|
685
|
+
Returns:
|
686
|
+
True if session was deleted, False if it didn't exist
|
687
|
+
"""
|
688
|
+
if agent_id in self._agent_sessions:
|
689
|
+
del self._agent_sessions[agent_id]
|
690
|
+
logger.info(f"🗑️ Deleted agent session {agent_id}")
|
691
|
+
return True
|
692
|
+
return False
|
693
|
+
|
694
|
+
def list_sessions(self) -> list[str]:
|
695
|
+
"""
|
696
|
+
List all active agent session IDs.
|
697
|
+
|
698
|
+
Returns:
|
699
|
+
List of agent session IDs
|
700
|
+
"""
|
701
|
+
return list(self._agent_sessions.keys())
|
@@ -46,6 +46,7 @@ class AgentResponse(BaseModel):
|
|
46
46
|
agent_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for this agent session")
|
47
47
|
questions: list[Question] = Field(default_factory=list, description="List of questions for the user")
|
48
48
|
final_response: Optional[Any] = Field(default=None, description="The final response if processing is complete")
|
49
|
+
token_summary: Optional["TokenSummary"] = Field(default=None, description="Summary of token usage for this agent process")
|
49
50
|
|
50
51
|
def has_questions(self) -> bool:
|
51
52
|
"""Check if this response has questions that need to be answered."""
|
@@ -76,3 +77,55 @@ class QualityCheck(BaseModel):
|
|
76
77
|
"""Result of quality checking the AI response."""
|
77
78
|
is_good: bool = Field(description="Whether the result is good enough")
|
78
79
|
feedback: str = Field(description="Feedback on what could be improved if not good")
|
80
|
+
|
81
|
+
|
82
|
+
class TokenUsage(BaseModel):
|
83
|
+
"""Token usage information for a single API call."""
|
84
|
+
prompt_tokens: int = Field(default=0, description="Number of tokens in the prompt")
|
85
|
+
completion_tokens: int = Field(default=0, description="Number of tokens in the completion")
|
86
|
+
total_tokens: int = Field(default=0, description="Total number of tokens used")
|
87
|
+
|
88
|
+
|
89
|
+
class TokenSummary(BaseModel):
|
90
|
+
"""Summary of token usage across all API calls in an agent process."""
|
91
|
+
question_generation: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for question generation")
|
92
|
+
thinking_process: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for thinking/processing")
|
93
|
+
quality_checks: list[TokenUsage] = Field(default_factory=list, description="Tokens used for each quality check iteration")
|
94
|
+
improvements: list[TokenUsage] = Field(default_factory=list, description="Tokens used for each improvement iteration")
|
95
|
+
final_response: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for final response generation")
|
96
|
+
|
97
|
+
@property
|
98
|
+
def total_tokens(self) -> int:
|
99
|
+
"""Calculate total tokens used across all operations."""
|
100
|
+
total = (
|
101
|
+
self.question_generation.total_tokens +
|
102
|
+
self.thinking_process.total_tokens +
|
103
|
+
sum(usage.total_tokens for usage in self.quality_checks) +
|
104
|
+
sum(usage.total_tokens for usage in self.improvements) +
|
105
|
+
self.final_response.total_tokens
|
106
|
+
)
|
107
|
+
return total
|
108
|
+
|
109
|
+
@property
|
110
|
+
def total_prompt_tokens(self) -> int:
|
111
|
+
"""Calculate total prompt tokens used across all operations."""
|
112
|
+
total = (
|
113
|
+
self.question_generation.prompt_tokens +
|
114
|
+
self.thinking_process.prompt_tokens +
|
115
|
+
sum(usage.prompt_tokens for usage in self.quality_checks) +
|
116
|
+
sum(usage.prompt_tokens for usage in self.improvements) +
|
117
|
+
self.final_response.prompt_tokens
|
118
|
+
)
|
119
|
+
return total
|
120
|
+
|
121
|
+
@property
|
122
|
+
def total_completion_tokens(self) -> int:
|
123
|
+
"""Calculate total completion tokens used across all operations."""
|
124
|
+
total = (
|
125
|
+
self.question_generation.completion_tokens +
|
126
|
+
self.thinking_process.completion_tokens +
|
127
|
+
sum(usage.completion_tokens for usage in self.quality_checks) +
|
128
|
+
sum(usage.completion_tokens for usage in self.improvements) +
|
129
|
+
self.final_response.completion_tokens
|
130
|
+
)
|
131
|
+
return total
|
@@ -126,6 +126,33 @@ def example_with_tool_client():
|
|
126
126
|
print(f"- {rec}")
|
127
127
|
|
128
128
|
|
129
|
+
def example_dialog_conversation():
|
130
|
+
"""Example demonstrating persistent dialog functionality."""
|
131
|
+
print("Example of persistent dialog conversation:")
|
132
|
+
|
133
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
134
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
135
|
+
|
136
|
+
# Start conversation
|
137
|
+
response1 = agent.agent("I want a book recommendation for science fiction", BookRecommendation, ask_questions=False)
|
138
|
+
agent_id = response1.agent_id
|
139
|
+
|
140
|
+
if response1.is_complete():
|
141
|
+
book1 = response1.final_response
|
142
|
+
print(f"First recommendation: {book1.title} by {book1.author}")
|
143
|
+
|
144
|
+
# Continue conversation with same agent_id
|
145
|
+
response2 = agent.agent("Can you recommend something by a different author in the same genre?", BookRecommendation, ask_questions=False, agent_id=agent_id)
|
146
|
+
|
147
|
+
if response2.is_complete():
|
148
|
+
book2 = response2.final_response
|
149
|
+
print(f"Second recommendation: {book2.title} by {book2.author}")
|
150
|
+
print(f"Context: {book2.reason}")
|
151
|
+
|
152
|
+
# Clean up session when done
|
153
|
+
agent.delete_session(agent_id)
|
154
|
+
|
155
|
+
|
129
156
|
if __name__ == "__main__":
|
130
157
|
print("=== Agent Client Examples ===\n")
|
131
158
|
|
@@ -150,3 +177,11 @@ if __name__ == "__main__":
|
|
150
177
|
example_with_tool_client()
|
151
178
|
except Exception as e:
|
152
179
|
print(f"Error: {e}")
|
180
|
+
|
181
|
+
print("\n" + "="*50 + "\n")
|
182
|
+
|
183
|
+
print("4. Example with persistent dialog:")
|
184
|
+
try:
|
185
|
+
example_dialog_conversation()
|
186
|
+
except Exception as e:
|
187
|
+
print(f"Error: {e}")
|
@@ -0,0 +1,212 @@
|
|
1
|
+
"""
|
2
|
+
Example demonstrating the enhanced logging and token tracking features of the AgentClient.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import logging
|
7
|
+
from pydantic import BaseModel, Field
|
8
|
+
|
9
|
+
from mbxai.openrouter import OpenRouterClient
|
10
|
+
from mbxai.agent import AgentClient
|
11
|
+
|
12
|
+
# Configure logging to see all the agent information
|
13
|
+
logging.basicConfig(
|
14
|
+
level=logging.INFO,
|
15
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
16
|
+
)
|
17
|
+
|
18
|
+
class WeatherResponse(BaseModel):
|
19
|
+
"""Response format for weather information."""
|
20
|
+
location: str = Field(description="The location requested")
|
21
|
+
current_conditions: str = Field(description="Current weather conditions")
|
22
|
+
temperature: str = Field(description="Current temperature")
|
23
|
+
forecast: str = Field(description="Weather forecast")
|
24
|
+
recommendation: str = Field(description="Clothing or activity recommendation based on weather")
|
25
|
+
|
26
|
+
class AnalysisResponse(BaseModel):
|
27
|
+
"""Response format for complex analysis."""
|
28
|
+
summary: str = Field(description="Executive summary of the analysis")
|
29
|
+
key_findings: list[str] = Field(description="List of key findings")
|
30
|
+
methodology: str = Field(description="How the analysis was conducted")
|
31
|
+
recommendations: list[str] = Field(description="Actionable recommendations")
|
32
|
+
confidence_level: str = Field(description="Confidence level in the analysis")
|
33
|
+
|
34
|
+
def demonstrate_agent_with_questions():
|
35
|
+
"""Demonstrate agent process with question generation."""
|
36
|
+
print("\n" + "="*60)
|
37
|
+
print("🔍 DEMO: Agent with Question Generation")
|
38
|
+
print("="*60)
|
39
|
+
|
40
|
+
try:
|
41
|
+
# Note: This requires a real OpenRouter API key
|
42
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
43
|
+
if not api_key:
|
44
|
+
print("❌ OPENROUTER_API_KEY not found. Using mock example.")
|
45
|
+
print("Set OPENROUTER_API_KEY environment variable to run with real API calls.")
|
46
|
+
return
|
47
|
+
|
48
|
+
openrouter_client = OpenRouterClient(token=api_key)
|
49
|
+
agent = AgentClient(openrouter_client, max_iterations=2)
|
50
|
+
|
51
|
+
prompt = "I need weather information for planning my outdoor activities this weekend."
|
52
|
+
|
53
|
+
print(f"📤 Sending prompt: {prompt}")
|
54
|
+
response = agent.agent(prompt, WeatherResponse, ask_questions=True)
|
55
|
+
|
56
|
+
if response.has_questions():
|
57
|
+
print(f"\n📋 Agent generated {len(response.questions)} questions:")
|
58
|
+
for i, question in enumerate(response.questions, 1):
|
59
|
+
print(f" {i}. {question.question} (key: {question.key})")
|
60
|
+
|
61
|
+
if response.token_summary:
|
62
|
+
print(f"\n📊 Token usage for question generation:")
|
63
|
+
print(f" - Prompt tokens: {response.token_summary.question_generation.prompt_tokens}")
|
64
|
+
print(f" - Completion tokens: {response.token_summary.question_generation.completion_tokens}")
|
65
|
+
print(f" - Total tokens: {response.token_summary.question_generation.total_tokens}")
|
66
|
+
|
67
|
+
# Simulate user providing answers
|
68
|
+
from mbxai.agent.models import AnswerList, Answer
|
69
|
+
|
70
|
+
answers = AnswerList(answers=[
|
71
|
+
Answer(key="location", answer="San Francisco, CA"),
|
72
|
+
Answer(key="activity_type", answer="hiking and outdoor photography"),
|
73
|
+
Answer(key="time_frame", answer="Saturday and Sunday morning")
|
74
|
+
])
|
75
|
+
|
76
|
+
print(f"\n📝 Providing answers and continuing...")
|
77
|
+
final_response = agent.answer_to_agent(response.agent_id, answers)
|
78
|
+
|
79
|
+
if final_response.is_complete():
|
80
|
+
print("\n✅ Final response received!")
|
81
|
+
print(f"📊 Complete token summary:")
|
82
|
+
if final_response.token_summary:
|
83
|
+
ts = final_response.token_summary
|
84
|
+
print(f" - Question generation: {ts.question_generation.total_tokens} tokens")
|
85
|
+
print(f" - Thinking process: {ts.thinking_process.total_tokens} tokens")
|
86
|
+
print(f" - Quality checks: {sum(q.total_tokens for q in ts.quality_checks)} tokens ({len(ts.quality_checks)} checks)")
|
87
|
+
print(f" - Improvements: {sum(i.total_tokens for i in ts.improvements)} tokens ({len(ts.improvements)} iterations)")
|
88
|
+
print(f" - Final response: {ts.final_response.total_tokens} tokens")
|
89
|
+
print(f" - TOTAL: {ts.total_tokens} tokens")
|
90
|
+
|
91
|
+
# Access the structured response
|
92
|
+
weather_data = final_response.final_response
|
93
|
+
print(f"\n🌤️ Weather for {weather_data.location}:")
|
94
|
+
print(f" Current: {weather_data.current_conditions}")
|
95
|
+
print(f" Temperature: {weather_data.temperature}")
|
96
|
+
print(f" Recommendation: {weather_data.recommendation}")
|
97
|
+
|
98
|
+
except Exception as e:
|
99
|
+
print(f"❌ Error: {e}")
|
100
|
+
|
101
|
+
def demonstrate_agent_without_questions():
|
102
|
+
"""Demonstrate agent process without question generation."""
|
103
|
+
print("\n" + "="*60)
|
104
|
+
print("⚡ DEMO: Agent without Question Generation (Direct Processing)")
|
105
|
+
print("="*60)
|
106
|
+
|
107
|
+
try:
|
108
|
+
# Note: This requires a real OpenRouter API key
|
109
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
110
|
+
if not api_key:
|
111
|
+
print("❌ OPENROUTER_API_KEY not found. Using mock example.")
|
112
|
+
print("Set OPENROUTER_API_KEY environment variable to run with real API calls.")
|
113
|
+
return
|
114
|
+
|
115
|
+
openrouter_client = OpenRouterClient(token=api_key)
|
116
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
117
|
+
|
118
|
+
prompt = """
|
119
|
+
Analyze the current state of renewable energy adoption in Europe.
|
120
|
+
Focus on solar and wind power, include recent statistics, challenges,
|
121
|
+
and future outlook for the next 5 years.
|
122
|
+
"""
|
123
|
+
|
124
|
+
print(f"📤 Sending prompt: {prompt[:100]}...")
|
125
|
+
response = agent.agent(prompt, AnalysisResponse, ask_questions=False)
|
126
|
+
|
127
|
+
if response.is_complete():
|
128
|
+
print("\n✅ Analysis completed!")
|
129
|
+
|
130
|
+
if response.token_summary:
|
131
|
+
ts = response.token_summary
|
132
|
+
print(f"\n📊 Token usage breakdown:")
|
133
|
+
print(f" - Thinking process: {ts.thinking_process.total_tokens} tokens")
|
134
|
+
print(f" - Quality checks: {sum(q.total_tokens for q in ts.quality_checks)} tokens ({len(ts.quality_checks)} checks)")
|
135
|
+
print(f" - Improvements: {sum(i.total_tokens for i in ts.improvements)} tokens ({len(ts.improvements)} iterations)")
|
136
|
+
print(f" - Final response: {ts.final_response.total_tokens} tokens")
|
137
|
+
print(f" - TOTAL: {ts.total_tokens} tokens")
|
138
|
+
|
139
|
+
# Access the structured response
|
140
|
+
analysis = response.final_response
|
141
|
+
print(f"\n📊 Analysis Results:")
|
142
|
+
print(f" Summary: {analysis.summary[:150]}...")
|
143
|
+
print(f" Key Findings: {len(analysis.key_findings)} items")
|
144
|
+
print(f" Recommendations: {len(analysis.recommendations)} items")
|
145
|
+
print(f" Confidence: {analysis.confidence_level}")
|
146
|
+
|
147
|
+
except Exception as e:
|
148
|
+
print(f"❌ Error: {e}")
|
149
|
+
|
150
|
+
def demonstrate_different_iteration_settings():
|
151
|
+
"""Demonstrate different max_iterations settings and their effect on token usage."""
|
152
|
+
print("\n" + "="*60)
|
153
|
+
print("🔄 DEMO: Different Iteration Settings")
|
154
|
+
print("="*60)
|
155
|
+
|
156
|
+
iteration_configs = [
|
157
|
+
{"iterations": 0, "description": "No quality checks"},
|
158
|
+
{"iterations": 1, "description": "Basic quality check"},
|
159
|
+
{"iterations": 3, "description": "Thorough quality improvement"}
|
160
|
+
]
|
161
|
+
|
162
|
+
prompt = "Explain quantum computing in simple terms for a business audience."
|
163
|
+
|
164
|
+
for config in iteration_configs:
|
165
|
+
print(f"\n📋 Testing with {config['iterations']} max iterations ({config['description']})")
|
166
|
+
print("-" * 40)
|
167
|
+
|
168
|
+
try:
|
169
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
170
|
+
if not api_key:
|
171
|
+
print(f" ❌ Skipping - OPENROUTER_API_KEY not found")
|
172
|
+
continue
|
173
|
+
|
174
|
+
openrouter_client = OpenRouterClient(token=api_key)
|
175
|
+
agent = AgentClient(openrouter_client, max_iterations=config["iterations"])
|
176
|
+
|
177
|
+
print(f" 🚀 Processing with max_iterations={config['iterations']}")
|
178
|
+
print(f" - Description: {config['description']}")
|
179
|
+
print(f" - Expected processing time: {'Low' if config['iterations'] <= 1 else 'Medium' if config['iterations'] <= 2 else 'High'}")
|
180
|
+
print(f" - Expected response quality: {'Basic' if config['iterations'] == 0 else 'Good' if config['iterations'] <= 2 else 'Excellent'}")
|
181
|
+
|
182
|
+
# In real usage, you would call:
|
183
|
+
# response = agent.agent(prompt, AnalysisResponse, ask_questions=False)
|
184
|
+
|
185
|
+
except Exception as e:
|
186
|
+
print(f" ❌ Error: {e}")
|
187
|
+
|
188
|
+
if __name__ == "__main__":
|
189
|
+
print("🤖 Agent Client Logging and Token Tracking Demo")
|
190
|
+
print("This example demonstrates the enhanced logging and token usage tracking features.")
|
191
|
+
|
192
|
+
# Check for API key
|
193
|
+
api_key = os.getenv("OPENROUTER_API_KEY")
|
194
|
+
if not api_key:
|
195
|
+
print("\n⚠️ Note: To run with real API calls, set the OPENROUTER_API_KEY environment variable.")
|
196
|
+
print("The examples will show the logging structure but won't make actual API calls.")
|
197
|
+
|
198
|
+
# Run demonstrations
|
199
|
+
demonstrate_agent_with_questions()
|
200
|
+
demonstrate_agent_without_questions()
|
201
|
+
demonstrate_different_iteration_settings()
|
202
|
+
|
203
|
+
print("\n✅ Demo completed!")
|
204
|
+
print("\nTo see the logging in action, run this script with a valid OPENROUTER_API_KEY.")
|
205
|
+
print("You'll see detailed logs showing:")
|
206
|
+
print(" - 🚀 Agent process start")
|
207
|
+
print(" - ❓ Question generation")
|
208
|
+
print(" - 🧠 Thinking process")
|
209
|
+
print(" - 🔍 Quality checks")
|
210
|
+
print(" - ⚡ Improvements")
|
211
|
+
print(" - 📝 Final response generation")
|
212
|
+
print(" - 📊 Complete token usage summary")
|
@@ -0,0 +1,157 @@
|
|
1
|
+
"""
|
2
|
+
Example usage of the AgentClient with persistent dialog sessions.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
from pydantic import BaseModel, Field
|
7
|
+
from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
|
8
|
+
|
9
|
+
|
10
|
+
class ChatResponse(BaseModel):
|
11
|
+
"""A general chat response."""
|
12
|
+
response: str = Field(description="The response to the user's message")
|
13
|
+
context_awareness: str = Field(description="How this response relates to previous conversation")
|
14
|
+
|
15
|
+
|
16
|
+
class BookRecommendation(BaseModel):
|
17
|
+
"""A book recommendation response."""
|
18
|
+
title: str = Field(description="The title of the recommended book")
|
19
|
+
author: str = Field(description="The author of the book")
|
20
|
+
genre: str = Field(description="The genre of the book")
|
21
|
+
reason: str = Field(description="Why this book is recommended based on conversation")
|
22
|
+
connection_to_previous: str = Field(description="How this recommendation connects to our previous conversation")
|
23
|
+
|
24
|
+
|
25
|
+
def demonstrate_dialog_conversation():
|
26
|
+
"""Demonstrate persistent dialog functionality."""
|
27
|
+
print("🔄 DEMO: Persistent Dialog Agent")
|
28
|
+
print("=" * 50)
|
29
|
+
|
30
|
+
# Initialize the clients
|
31
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
32
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
33
|
+
|
34
|
+
# First conversation - start new session
|
35
|
+
print("\n1️⃣ Starting new conversation:")
|
36
|
+
prompt1 = "Hi, I'm looking for a good book to read. I love science fiction."
|
37
|
+
response1 = agent.agent(prompt1, ChatResponse, ask_questions=False)
|
38
|
+
|
39
|
+
if response1.is_complete():
|
40
|
+
agent_id = response1.agent_id
|
41
|
+
chat_resp = response1.final_response
|
42
|
+
print(f"Agent ID: {agent_id}")
|
43
|
+
print(f"Response: {chat_resp.response}")
|
44
|
+
print(f"Context awareness: {chat_resp.context_awareness}")
|
45
|
+
|
46
|
+
# Second conversation - continue same session
|
47
|
+
print(f"\n2️⃣ Continuing conversation with agent {agent_id}:")
|
48
|
+
prompt2 = "Actually, I also enjoy fantasy novels. What would you recommend that combines both genres?"
|
49
|
+
response2 = agent.agent(prompt2, BookRecommendation, ask_questions=False, agent_id=agent_id)
|
50
|
+
|
51
|
+
if response2.is_complete():
|
52
|
+
book_rec = response2.final_response
|
53
|
+
print(f"Book: {book_rec.title} by {book_rec.author}")
|
54
|
+
print(f"Genre: {book_rec.genre}")
|
55
|
+
print(f"Reason: {book_rec.reason}")
|
56
|
+
print(f"Connection to previous: {book_rec.connection_to_previous}")
|
57
|
+
|
58
|
+
# Third conversation - continue same session
|
59
|
+
print(f"\n3️⃣ Continuing conversation with agent {agent_id}:")
|
60
|
+
prompt3 = "That sounds great! Can you recommend something similar but from a different author?"
|
61
|
+
response3 = agent.agent(prompt3, BookRecommendation, ask_questions=False, agent_id=agent_id)
|
62
|
+
|
63
|
+
if response3.is_complete():
|
64
|
+
book_rec2 = response3.final_response
|
65
|
+
print(f"Book: {book_rec2.title} by {book_rec2.author}")
|
66
|
+
print(f"Genre: {book_rec2.genre}")
|
67
|
+
print(f"Reason: {book_rec2.reason}")
|
68
|
+
print(f"Connection to previous: {book_rec2.connection_to_previous}")
|
69
|
+
|
70
|
+
# Show session info
|
71
|
+
print(f"\n📊 Session Information:")
|
72
|
+
try:
|
73
|
+
session_info = agent.get_session_info(agent_id)
|
74
|
+
print(f"Conversation length: {session_info['conversation_length']} messages")
|
75
|
+
print(f"Session step: {session_info.get('step', 'unknown')}")
|
76
|
+
except Exception as e:
|
77
|
+
print(f"Error getting session info: {e}")
|
78
|
+
|
79
|
+
# List all sessions
|
80
|
+
print(f"\n📝 Active sessions: {agent.list_sessions()}")
|
81
|
+
|
82
|
+
# Cleanup - optional
|
83
|
+
print(f"\n🗑️ Cleaning up session...")
|
84
|
+
deleted = agent.delete_session(agent_id)
|
85
|
+
print(f"Session deleted: {deleted}")
|
86
|
+
print(f"Active sessions after cleanup: {agent.list_sessions()}")
|
87
|
+
|
88
|
+
|
89
|
+
def demonstrate_dialog_with_questions():
|
90
|
+
"""Demonstrate dialog with question-answer flow."""
|
91
|
+
print("\n🔄 DEMO: Dialog Agent with Questions")
|
92
|
+
print("=" * 50)
|
93
|
+
|
94
|
+
# Initialize the clients
|
95
|
+
openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
|
96
|
+
agent = AgentClient(openrouter_client, max_iterations=1)
|
97
|
+
|
98
|
+
# First conversation with questions
|
99
|
+
print("\n1️⃣ Starting conversation with questions:")
|
100
|
+
prompt1 = "I want a personalized book recommendation"
|
101
|
+
response1 = agent.agent(prompt1, BookRecommendation, ask_questions=True)
|
102
|
+
|
103
|
+
agent_id = response1.agent_id
|
104
|
+
print(f"Agent ID: {agent_id}")
|
105
|
+
|
106
|
+
if response1.has_questions():
|
107
|
+
print(f"\n📋 Agent generated {len(response1.questions)} questions:")
|
108
|
+
for i, question in enumerate(response1.questions, 1):
|
109
|
+
print(f" {i}. {question.question} (key: {question.key})")
|
110
|
+
|
111
|
+
# Simulate answering questions
|
112
|
+
answers = AnswerList(answers=[
|
113
|
+
Answer(key="genre_preference", answer="I love science fiction and fantasy"),
|
114
|
+
Answer(key="reading_level", answer="I prefer complex, adult novels"),
|
115
|
+
Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
|
116
|
+
])
|
117
|
+
|
118
|
+
print(f"\n📝 Providing answers...")
|
119
|
+
final_response = agent.answer_to_agent(agent_id, answers)
|
120
|
+
|
121
|
+
if final_response.is_complete():
|
122
|
+
book_rec = final_response.final_response
|
123
|
+
print(f"Book: {book_rec.title} by {book_rec.author}")
|
124
|
+
print(f"Genre: {book_rec.genre}")
|
125
|
+
print(f"Reason: {book_rec.reason}")
|
126
|
+
|
127
|
+
# Continue conversation - this should remember the previous interaction
|
128
|
+
print(f"\n2️⃣ Continuing conversation with agent {agent_id}:")
|
129
|
+
prompt2 = "Thank you! Can you also recommend something by a female author in the same genres?"
|
130
|
+
response2 = agent.agent(prompt2, BookRecommendation, ask_questions=False, agent_id=agent_id)
|
131
|
+
|
132
|
+
if response2.is_complete():
|
133
|
+
book_rec2 = response2.final_response
|
134
|
+
print(f"Book: {book_rec2.title} by {book_rec2.author}")
|
135
|
+
print(f"Genre: {book_rec2.genre}")
|
136
|
+
print(f"Reason: {book_rec2.reason}")
|
137
|
+
print(f"Connection to previous: {book_rec2.connection_to_previous}")
|
138
|
+
|
139
|
+
# Session cleanup
|
140
|
+
print(f"\n🗑️ Session cleanup...")
|
141
|
+
agent.delete_session(agent_id)
|
142
|
+
|
143
|
+
|
144
|
+
if __name__ == "__main__":
|
145
|
+
print("=== Dialog Agent Examples ===\n")
|
146
|
+
|
147
|
+
try:
|
148
|
+
demonstrate_dialog_conversation()
|
149
|
+
except Exception as e:
|
150
|
+
print(f"Error in dialog conversation demo: {e}")
|
151
|
+
|
152
|
+
print("\n" + "="*80 + "\n")
|
153
|
+
|
154
|
+
try:
|
155
|
+
demonstrate_dialog_with_questions()
|
156
|
+
except Exception as e:
|
157
|
+
print(f"Error in dialog with questions demo: {e}")
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|