mbxai 2.0.4__py3-none-any.whl → 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mbxai/__init__.py CHANGED
@@ -7,7 +7,7 @@ from .openrouter import OpenRouterClient
7
7
  from .tools import ToolClient
8
8
  from .mcp import MCPClient
9
9
 
10
- __version__ = "2.0.4"
10
+ __version__ = "2.0.5"
11
11
 
12
12
  __all__ = [
13
13
  "AgentClient",
mbxai/agent/client.py CHANGED
@@ -10,7 +10,7 @@ from pydantic import BaseModel
10
10
  from ..openrouter import OpenRouterClient
11
11
  from ..tools import ToolClient
12
12
  from ..mcp import MCPClient
13
- from .models import AgentResponse, Question, QuestionList, AnswerList, Result, QualityCheck
13
+ from .models import AgentResponse, Question, QuestionList, AnswerList, Result, QualityCheck, TokenUsage, TokenSummary
14
14
 
15
15
  logger = logging.getLogger(__name__)
16
16
 
@@ -127,6 +127,21 @@ class AgentClient:
127
127
  """Call the parse method on the AI client."""
128
128
  return self._ai_client.parse(messages, response_format)
129
129
 
130
+ def _extract_token_usage(self, response: Any) -> TokenUsage:
131
+ """Extract token usage information from an AI response."""
132
+ try:
133
+ if hasattr(response, 'usage') and response.usage:
134
+ usage = response.usage
135
+ return TokenUsage(
136
+ prompt_tokens=getattr(usage, 'prompt_tokens', 0),
137
+ completion_tokens=getattr(usage, 'completion_tokens', 0),
138
+ total_tokens=getattr(usage, 'total_tokens', 0)
139
+ )
140
+ except (AttributeError, TypeError) as e:
141
+ logger.debug(f"Could not extract token usage: {e}")
142
+
143
+ return TokenUsage() # Return empty usage if extraction fails
144
+
130
145
  def _extract_parsed_content(self, response: Any, response_format: Type[BaseModel]) -> BaseModel:
131
146
  """Extract the parsed content from the AI response."""
132
147
  if hasattr(response, 'choices') and len(response.choices) > 0:
@@ -177,10 +192,15 @@ class AgentClient:
177
192
  Returns:
178
193
  AgentResponse containing either questions to ask or the final response
179
194
  """
180
- logger.debug(f"Starting agent process with prompt: {prompt[:100]}...")
195
+ agent_id = str(__import__("uuid").uuid4())
196
+ logger.info(f"🚀 Starting agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
197
+
198
+ # Initialize token summary
199
+ token_summary = TokenSummary()
181
200
 
182
201
  # Step 1: Generate questions (if ask_questions is True)
183
202
  if ask_questions:
203
+ logger.info(f"❓ Agent {agent_id}: Analyzing prompt and generating clarifying questions")
184
204
  questions_prompt = f"""
185
205
  Understand this prompt and what the user wants to achieve by it:
186
206
  ==========
@@ -204,25 +224,31 @@ IMPORTANT: For each question, provide a technical key identifier that:
204
224
  response = self._call_ai_parse(messages, QuestionList)
205
225
  question_list = self._extract_parsed_content(response, QuestionList)
206
226
 
207
- logger.debug(f"Generated {len(question_list.questions)} questions")
227
+ # Extract token usage for question generation
228
+ token_summary.question_generation = self._extract_token_usage(response)
229
+
230
+ logger.info(f"❓ Agent {agent_id}: Generated {len(question_list.questions)} questions (tokens: {token_summary.question_generation.total_tokens})")
208
231
 
209
232
  # If we have questions, return them to the user
210
233
  if question_list.questions:
211
- agent_response = AgentResponse(questions=question_list.questions)
234
+ agent_response = AgentResponse(agent_id=agent_id, questions=question_list.questions, token_summary=token_summary)
212
235
  # Store the session for continuation
213
236
  self._agent_sessions[agent_response.agent_id] = {
214
237
  "original_prompt": prompt,
215
238
  "final_response_structure": final_response_structure,
216
239
  "questions": question_list.questions,
217
- "step": "waiting_for_answers"
240
+ "step": "waiting_for_answers",
241
+ "token_summary": token_summary
218
242
  }
243
+ logger.info(f"📋 Agent {agent_id}: Waiting for user answers to {len(question_list.questions)} questions")
219
244
  return agent_response
220
245
 
221
246
  except Exception as e:
222
247
  logger.warning(f"Failed to generate questions: {e}. Proceeding without questions.")
223
248
 
224
249
  # Step 2 & 3: No questions or ask_questions=False - proceed directly
225
- return self._process_with_answers(prompt, final_response_structure, [])
250
+ logger.info(f"⚡ Agent {agent_id}: No questions needed, proceeding directly to processing")
251
+ return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary)
226
252
 
227
253
  def answer_to_agent(self, agent_id: str, answers: AnswerList) -> AgentResponse:
228
254
  """
@@ -248,11 +274,18 @@ IMPORTANT: For each question, provide a technical key identifier that:
248
274
  # Convert answers to a more usable format
249
275
  answer_dict = {answer.key: answer.answer for answer in answers.answers}
250
276
 
277
+ logger.info(f"📝 Agent {agent_id}: Received {len(answers.answers)} answers, continuing processing")
278
+
279
+ # Get token summary from session
280
+ token_summary = session.get("token_summary", TokenSummary())
281
+
251
282
  # Process with the provided answers
252
283
  result = self._process_with_answers(
253
284
  session["original_prompt"],
254
285
  session["final_response_structure"],
255
- answer_dict
286
+ answer_dict,
287
+ agent_id,
288
+ token_summary
256
289
  )
257
290
 
258
291
  # Clean up the session
@@ -264,7 +297,9 @@ IMPORTANT: For each question, provide a technical key identifier that:
264
297
  self,
265
298
  prompt: str,
266
299
  final_response_structure: Type[BaseModel],
267
- answers: Union[list, dict[str, str]]
300
+ answers: Union[list, dict[str, str]],
301
+ agent_id: str,
302
+ token_summary: TokenSummary
268
303
  ) -> AgentResponse:
269
304
  """
270
305
  Process the prompt with answers through the thinking pipeline.
@@ -273,28 +308,38 @@ IMPORTANT: For each question, provide a technical key identifier that:
273
308
  prompt: The original prompt
274
309
  final_response_structure: Expected final response structure
275
310
  answers: Answers to questions (empty if no questions were asked)
311
+ agent_id: The agent session identifier
312
+ token_summary: Current token usage summary
276
313
 
277
314
  Returns:
278
315
  AgentResponse with the final result
279
316
  """
280
317
  # Step 3: Process the prompt with thinking
281
- result = self._think_and_process(prompt, answers)
318
+ logger.info(f"🧠 Agent {agent_id}: Processing prompt and generating initial response")
319
+ result = self._think_and_process(prompt, answers, agent_id, token_summary)
282
320
 
283
321
  # Step 4: Quality check and iteration
284
- final_result = self._quality_check_and_iterate(prompt, result, answers)
322
+ final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary)
285
323
 
286
324
  # Step 5: Generate final answer in requested format
287
- final_response = self._generate_final_response(prompt, final_result, final_response_structure)
325
+ logger.info(f"📝 Agent {agent_id}: Generating final structured response")
326
+ final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary)
327
+
328
+ # Log final token summary
329
+ logger.info(f"📊 Agent {agent_id}: Token usage summary - Total: {token_summary.total_tokens} "
330
+ f"(Prompt: {token_summary.total_prompt_tokens}, Completion: {token_summary.total_completion_tokens})")
288
331
 
289
- return AgentResponse(final_response=final_response)
332
+ return AgentResponse(agent_id=agent_id, final_response=final_response, token_summary=token_summary)
290
333
 
291
- def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]]) -> str:
334
+ def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
292
335
  """
293
336
  Process the prompt with thinking.
294
337
 
295
338
  Args:
296
339
  prompt: The original prompt
297
340
  answers: Answers to questions
341
+ agent_id: The agent session identifier
342
+ token_summary: Current token usage summary
298
343
 
299
344
  Returns:
300
345
  The AI's result
@@ -325,12 +370,17 @@ Provide your best result for the given prompt.
325
370
  try:
326
371
  response = self._call_ai_parse(messages, Result)
327
372
  result_obj = self._extract_parsed_content(response, Result)
373
+
374
+ # Track token usage for thinking process
375
+ token_summary.thinking_process = self._extract_token_usage(response)
376
+ logger.info(f"🧠 Agent {agent_id}: Thinking completed (tokens: {token_summary.thinking_process.total_tokens})")
377
+
328
378
  return result_obj.result
329
379
  except Exception as e:
330
380
  logger.error(f"Error in thinking process: {e}")
331
381
  raise RuntimeError(f"Failed to process prompt with AI client: {e}") from e
332
382
 
333
- def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]]) -> str:
383
+ def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
334
384
  """
335
385
  Check the quality of the result and iterate if needed.
336
386
 
@@ -338,12 +388,20 @@ Provide your best result for the given prompt.
338
388
  prompt: The original prompt
339
389
  result: The current result
340
390
  answers: The answers provided
391
+ agent_id: The agent session identifier
392
+ token_summary: Current token usage summary
341
393
 
342
394
  Returns:
343
395
  The final improved result
344
396
  """
345
397
  current_result = result
346
398
 
399
+ if self._max_iterations == 0:
400
+ logger.info(f"✅ Agent {agent_id}: Skipping quality check (max_iterations=0)")
401
+ return current_result
402
+
403
+ logger.info(f"🔍 Agent {agent_id}: Starting quality check and improvement process (max iterations: {self._max_iterations})")
404
+
347
405
  for iteration in range(self._max_iterations):
348
406
  quality_prompt = f"""
349
407
  Given this original prompt:
@@ -367,11 +425,15 @@ Evaluate the quality and provide feedback if improvements are needed.
367
425
  response = self._call_ai_parse(messages, QualityCheck)
368
426
  quality_check = self._extract_parsed_content(response, QualityCheck)
369
427
 
428
+ # Track token usage for quality check
429
+ quality_check_tokens = self._extract_token_usage(response)
430
+ token_summary.quality_checks.append(quality_check_tokens)
431
+
370
432
  if quality_check.is_good:
371
- logger.debug(f"Quality check passed on iteration {iteration}")
433
+ logger.info(f"✅ Agent {agent_id}: Quality check passed on iteration {iteration + 1} (tokens: {quality_check_tokens.total_tokens})")
372
434
  break
373
435
 
374
- logger.debug(f"Quality check failed on iteration {iteration}: {quality_check.feedback}")
436
+ logger.info(f"🔄 Agent {agent_id}: Quality check iteration {iteration + 1} - Improvements needed: {quality_check.feedback[:100]}... (tokens: {quality_check_tokens.total_tokens})")
375
437
 
376
438
  # Improve the result
377
439
  improvement_prompt = f"""
@@ -394,17 +456,27 @@ Please provide an improved version that addresses the feedback while maintaining
394
456
  """
395
457
 
396
458
  messages = [{"role": "user", "content": improvement_prompt}]
397
- response = self._call_ai_parse(messages, Result)
398
- result_obj = self._extract_parsed_content(response, Result)
459
+ improvement_response = self._call_ai_parse(messages, Result)
460
+ result_obj = self._extract_parsed_content(improvement_response, Result)
399
461
  current_result = result_obj.result
400
462
 
463
+ # Track token usage for improvement
464
+ improvement_tokens = self._extract_token_usage(improvement_response)
465
+ token_summary.improvements.append(improvement_tokens)
466
+
467
+ logger.info(f"⚡ Agent {agent_id}: Improvement iteration {iteration + 1} completed (tokens: {improvement_tokens.total_tokens})")
468
+
401
469
  except Exception as e:
402
470
  logger.warning(f"Error in quality check iteration {iteration}: {e}")
403
471
  break
404
472
 
473
+ total_quality_tokens = sum(usage.total_tokens for usage in token_summary.quality_checks)
474
+ total_improvement_tokens = sum(usage.total_tokens for usage in token_summary.improvements)
475
+ logger.info(f"🏁 Agent {agent_id}: Quality check completed - {len(token_summary.quality_checks)} checks, {len(token_summary.improvements)} improvements (Quality tokens: {total_quality_tokens}, Improvement tokens: {total_improvement_tokens})")
476
+
405
477
  return current_result
406
478
 
407
- def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel]) -> BaseModel:
479
+ def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary) -> BaseModel:
408
480
  """
409
481
  Generate the final response in the requested format.
410
482
 
@@ -412,6 +484,8 @@ Please provide an improved version that addresses the feedback while maintaining
412
484
  prompt: The original prompt
413
485
  result: The processed result
414
486
  final_response_structure: The expected response structure
487
+ agent_id: The agent session identifier
488
+ token_summary: Current token usage summary
415
489
 
416
490
  Returns:
417
491
  The final response in the requested format
@@ -434,7 +508,13 @@ Generate the final answer in the exact format requested. Make sure the response
434
508
 
435
509
  try:
436
510
  response = self._call_ai_parse(messages, final_response_structure)
437
- return self._extract_parsed_content(response, final_response_structure)
511
+ final_response = self._extract_parsed_content(response, final_response_structure)
512
+
513
+ # Track token usage for final response generation
514
+ token_summary.final_response = self._extract_token_usage(response)
515
+ logger.info(f"📝 Agent {agent_id}: Final structured response generated (tokens: {token_summary.final_response.total_tokens})")
516
+
517
+ return final_response
438
518
  except Exception as e:
439
519
  logger.error(f"Error generating final response: {e}")
440
520
  # Fallback - try to create a basic response
mbxai/agent/models.py CHANGED
@@ -46,6 +46,7 @@ class AgentResponse(BaseModel):
46
46
  agent_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for this agent session")
47
47
  questions: list[Question] = Field(default_factory=list, description="List of questions for the user")
48
48
  final_response: Optional[Any] = Field(default=None, description="The final response if processing is complete")
49
+ token_summary: Optional["TokenSummary"] = Field(default=None, description="Summary of token usage for this agent process")
49
50
 
50
51
  def has_questions(self) -> bool:
51
52
  """Check if this response has questions that need to be answered."""
@@ -76,3 +77,55 @@ class QualityCheck(BaseModel):
76
77
  """Result of quality checking the AI response."""
77
78
  is_good: bool = Field(description="Whether the result is good enough")
78
79
  feedback: str = Field(description="Feedback on what could be improved if not good")
80
+
81
+
82
+ class TokenUsage(BaseModel):
83
+ """Token usage information for a single API call."""
84
+ prompt_tokens: int = Field(default=0, description="Number of tokens in the prompt")
85
+ completion_tokens: int = Field(default=0, description="Number of tokens in the completion")
86
+ total_tokens: int = Field(default=0, description="Total number of tokens used")
87
+
88
+
89
+ class TokenSummary(BaseModel):
90
+ """Summary of token usage across all API calls in an agent process."""
91
+ question_generation: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for question generation")
92
+ thinking_process: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for thinking/processing")
93
+ quality_checks: list[TokenUsage] = Field(default_factory=list, description="Tokens used for each quality check iteration")
94
+ improvements: list[TokenUsage] = Field(default_factory=list, description="Tokens used for each improvement iteration")
95
+ final_response: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for final response generation")
96
+
97
+ @property
98
+ def total_tokens(self) -> int:
99
+ """Calculate total tokens used across all operations."""
100
+ total = (
101
+ self.question_generation.total_tokens +
102
+ self.thinking_process.total_tokens +
103
+ sum(usage.total_tokens for usage in self.quality_checks) +
104
+ sum(usage.total_tokens for usage in self.improvements) +
105
+ self.final_response.total_tokens
106
+ )
107
+ return total
108
+
109
+ @property
110
+ def total_prompt_tokens(self) -> int:
111
+ """Calculate total prompt tokens used across all operations."""
112
+ total = (
113
+ self.question_generation.prompt_tokens +
114
+ self.thinking_process.prompt_tokens +
115
+ sum(usage.prompt_tokens for usage in self.quality_checks) +
116
+ sum(usage.prompt_tokens for usage in self.improvements) +
117
+ self.final_response.prompt_tokens
118
+ )
119
+ return total
120
+
121
+ @property
122
+ def total_completion_tokens(self) -> int:
123
+ """Calculate total completion tokens used across all operations."""
124
+ total = (
125
+ self.question_generation.completion_tokens +
126
+ self.thinking_process.completion_tokens +
127
+ sum(usage.completion_tokens for usage in self.quality_checks) +
128
+ sum(usage.completion_tokens for usage in self.improvements) +
129
+ self.final_response.completion_tokens
130
+ )
131
+ return total
@@ -0,0 +1,212 @@
1
+ """
2
+ Example demonstrating the enhanced logging and token tracking features of the AgentClient.
3
+ """
4
+
5
+ import os
6
+ import logging
7
+ from pydantic import BaseModel, Field
8
+
9
+ from mbxai.openrouter import OpenRouterClient
10
+ from mbxai.agent import AgentClient
11
+
12
+ # Configure logging to see all the agent information
13
+ logging.basicConfig(
14
+ level=logging.INFO,
15
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
16
+ )
17
+
18
+ class WeatherResponse(BaseModel):
19
+ """Response format for weather information."""
20
+ location: str = Field(description="The location requested")
21
+ current_conditions: str = Field(description="Current weather conditions")
22
+ temperature: str = Field(description="Current temperature")
23
+ forecast: str = Field(description="Weather forecast")
24
+ recommendation: str = Field(description="Clothing or activity recommendation based on weather")
25
+
26
+ class AnalysisResponse(BaseModel):
27
+ """Response format for complex analysis."""
28
+ summary: str = Field(description="Executive summary of the analysis")
29
+ key_findings: list[str] = Field(description="List of key findings")
30
+ methodology: str = Field(description="How the analysis was conducted")
31
+ recommendations: list[str] = Field(description="Actionable recommendations")
32
+ confidence_level: str = Field(description="Confidence level in the analysis")
33
+
34
+ def demonstrate_agent_with_questions():
35
+ """Demonstrate agent process with question generation."""
36
+ print("\n" + "="*60)
37
+ print("🔍 DEMO: Agent with Question Generation")
38
+ print("="*60)
39
+
40
+ try:
41
+ # Note: This requires a real OpenRouter API key
42
+ api_key = os.getenv("OPENROUTER_API_KEY")
43
+ if not api_key:
44
+ print("❌ OPENROUTER_API_KEY not found. Using mock example.")
45
+ print("Set OPENROUTER_API_KEY environment variable to run with real API calls.")
46
+ return
47
+
48
+ openrouter_client = OpenRouterClient(token=api_key)
49
+ agent = AgentClient(openrouter_client, max_iterations=2)
50
+
51
+ prompt = "I need weather information for planning my outdoor activities this weekend."
52
+
53
+ print(f"📤 Sending prompt: {prompt}")
54
+ response = agent.agent(prompt, WeatherResponse, ask_questions=True)
55
+
56
+ if response.has_questions():
57
+ print(f"\n📋 Agent generated {len(response.questions)} questions:")
58
+ for i, question in enumerate(response.questions, 1):
59
+ print(f" {i}. {question.question} (key: {question.key})")
60
+
61
+ if response.token_summary:
62
+ print(f"\n📊 Token usage for question generation:")
63
+ print(f" - Prompt tokens: {response.token_summary.question_generation.prompt_tokens}")
64
+ print(f" - Completion tokens: {response.token_summary.question_generation.completion_tokens}")
65
+ print(f" - Total tokens: {response.token_summary.question_generation.total_tokens}")
66
+
67
+ # Simulate user providing answers
68
+ from mbxai.agent.models import AnswerList, Answer
69
+
70
+ answers = AnswerList(answers=[
71
+ Answer(key="location", answer="San Francisco, CA"),
72
+ Answer(key="activity_type", answer="hiking and outdoor photography"),
73
+ Answer(key="time_frame", answer="Saturday and Sunday morning")
74
+ ])
75
+
76
+ print(f"\n📝 Providing answers and continuing...")
77
+ final_response = agent.answer_to_agent(response.agent_id, answers)
78
+
79
+ if final_response.is_complete():
80
+ print("\n✅ Final response received!")
81
+ print(f"📊 Complete token summary:")
82
+ if final_response.token_summary:
83
+ ts = final_response.token_summary
84
+ print(f" - Question generation: {ts.question_generation.total_tokens} tokens")
85
+ print(f" - Thinking process: {ts.thinking_process.total_tokens} tokens")
86
+ print(f" - Quality checks: {sum(q.total_tokens for q in ts.quality_checks)} tokens ({len(ts.quality_checks)} checks)")
87
+ print(f" - Improvements: {sum(i.total_tokens for i in ts.improvements)} tokens ({len(ts.improvements)} iterations)")
88
+ print(f" - Final response: {ts.final_response.total_tokens} tokens")
89
+ print(f" - TOTAL: {ts.total_tokens} tokens")
90
+
91
+ # Access the structured response
92
+ weather_data = final_response.final_response
93
+ print(f"\n🌤️ Weather for {weather_data.location}:")
94
+ print(f" Current: {weather_data.current_conditions}")
95
+ print(f" Temperature: {weather_data.temperature}")
96
+ print(f" Recommendation: {weather_data.recommendation}")
97
+
98
+ except Exception as e:
99
+ print(f"❌ Error: {e}")
100
+
101
+ def demonstrate_agent_without_questions():
102
+ """Demonstrate agent process without question generation."""
103
+ print("\n" + "="*60)
104
+ print("⚡ DEMO: Agent without Question Generation (Direct Processing)")
105
+ print("="*60)
106
+
107
+ try:
108
+ # Note: This requires a real OpenRouter API key
109
+ api_key = os.getenv("OPENROUTER_API_KEY")
110
+ if not api_key:
111
+ print("❌ OPENROUTER_API_KEY not found. Using mock example.")
112
+ print("Set OPENROUTER_API_KEY environment variable to run with real API calls.")
113
+ return
114
+
115
+ openrouter_client = OpenRouterClient(token=api_key)
116
+ agent = AgentClient(openrouter_client, max_iterations=1)
117
+
118
+ prompt = """
119
+ Analyze the current state of renewable energy adoption in Europe.
120
+ Focus on solar and wind power, include recent statistics, challenges,
121
+ and future outlook for the next 5 years.
122
+ """
123
+
124
+ print(f"📤 Sending prompt: {prompt[:100]}...")
125
+ response = agent.agent(prompt, AnalysisResponse, ask_questions=False)
126
+
127
+ if response.is_complete():
128
+ print("\n✅ Analysis completed!")
129
+
130
+ if response.token_summary:
131
+ ts = response.token_summary
132
+ print(f"\n📊 Token usage breakdown:")
133
+ print(f" - Thinking process: {ts.thinking_process.total_tokens} tokens")
134
+ print(f" - Quality checks: {sum(q.total_tokens for q in ts.quality_checks)} tokens ({len(ts.quality_checks)} checks)")
135
+ print(f" - Improvements: {sum(i.total_tokens for i in ts.improvements)} tokens ({len(ts.improvements)} iterations)")
136
+ print(f" - Final response: {ts.final_response.total_tokens} tokens")
137
+ print(f" - TOTAL: {ts.total_tokens} tokens")
138
+
139
+ # Access the structured response
140
+ analysis = response.final_response
141
+ print(f"\n📊 Analysis Results:")
142
+ print(f" Summary: {analysis.summary[:150]}...")
143
+ print(f" Key Findings: {len(analysis.key_findings)} items")
144
+ print(f" Recommendations: {len(analysis.recommendations)} items")
145
+ print(f" Confidence: {analysis.confidence_level}")
146
+
147
+ except Exception as e:
148
+ print(f"❌ Error: {e}")
149
+
150
+ def demonstrate_different_iteration_settings():
151
+ """Demonstrate different max_iterations settings and their effect on token usage."""
152
+ print("\n" + "="*60)
153
+ print("🔄 DEMO: Different Iteration Settings")
154
+ print("="*60)
155
+
156
+ iteration_configs = [
157
+ {"iterations": 0, "description": "No quality checks"},
158
+ {"iterations": 1, "description": "Basic quality check"},
159
+ {"iterations": 3, "description": "Thorough quality improvement"}
160
+ ]
161
+
162
+ prompt = "Explain quantum computing in simple terms for a business audience."
163
+
164
+ for config in iteration_configs:
165
+ print(f"\n📋 Testing with {config['iterations']} max iterations ({config['description']})")
166
+ print("-" * 40)
167
+
168
+ try:
169
+ api_key = os.getenv("OPENROUTER_API_KEY")
170
+ if not api_key:
171
+ print(f" ❌ Skipping - OPENROUTER_API_KEY not found")
172
+ continue
173
+
174
+ openrouter_client = OpenRouterClient(token=api_key)
175
+ agent = AgentClient(openrouter_client, max_iterations=config["iterations"])
176
+
177
+ print(f" 🚀 Processing with max_iterations={config['iterations']}")
178
+ print(f" - Description: {config['description']}")
179
+ print(f" - Expected processing time: {'Low' if config['iterations'] <= 1 else 'Medium' if config['iterations'] <= 2 else 'High'}")
180
+ print(f" - Expected response quality: {'Basic' if config['iterations'] == 0 else 'Good' if config['iterations'] <= 2 else 'Excellent'}")
181
+
182
+ # In real usage, you would call:
183
+ # response = agent.agent(prompt, AnalysisResponse, ask_questions=False)
184
+
185
+ except Exception as e:
186
+ print(f" ❌ Error: {e}")
187
+
188
+ if __name__ == "__main__":
189
+ print("🤖 Agent Client Logging and Token Tracking Demo")
190
+ print("This example demonstrates the enhanced logging and token usage tracking features.")
191
+
192
+ # Check for API key
193
+ api_key = os.getenv("OPENROUTER_API_KEY")
194
+ if not api_key:
195
+ print("\n⚠️ Note: To run with real API calls, set the OPENROUTER_API_KEY environment variable.")
196
+ print("The examples will show the logging structure but won't make actual API calls.")
197
+
198
+ # Run demonstrations
199
+ demonstrate_agent_with_questions()
200
+ demonstrate_agent_without_questions()
201
+ demonstrate_different_iteration_settings()
202
+
203
+ print("\n✅ Demo completed!")
204
+ print("\nTo see the logging in action, run this script with a valid OPENROUTER_API_KEY.")
205
+ print("You'll see detailed logs showing:")
206
+ print(" - 🚀 Agent process start")
207
+ print(" - ❓ Question generation")
208
+ print(" - 🧠 Thinking process")
209
+ print(" - 🔍 Quality checks")
210
+ print(" - ⚡ Improvements")
211
+ print(" - 📝 Final response generation")
212
+ print(" - 📊 Complete token usage summary")
mbxai/mcp/server.py CHANGED
@@ -31,7 +31,7 @@ class MCPServer:
31
31
  self.app = FastAPI(
32
32
  title=self.name,
33
33
  description=self.description,
34
- version="2.0.4",
34
+ version="2.0.5",
35
35
  )
36
36
 
37
37
  # Initialize MCP server
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mbxai
3
- Version: 2.0.4
3
+ Version: 2.0.5
4
4
  Summary: MBX AI SDK
5
5
  Project-URL: Homepage, https://www.mibexx.de
6
6
  Project-URL: Documentation, https://www.mibexx.de
@@ -1,10 +1,11 @@
1
- mbxai/__init__.py,sha256=tQvBc7DtUU3Y8uBqWgt8a89YWskLrhWMzhuEpFHbtEs,407
1
+ mbxai/__init__.py,sha256=LBQ9uRdY1u9DERFu9g7Q-EN6BbL2Q0jQmovQBZN2E6U,407
2
2
  mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
3
3
  mbxai/agent/__init__.py,sha256=5j3mW2NZtAU1s2w8n833axWBQsxW8U0qKwoQ9JtQZ4k,289
4
- mbxai/agent/client.py,sha256=Gpu3B8NeBSuZeSx-pjvhvCv-yntzvbwFaYpVVwHASFc,18367
5
- mbxai/agent/models.py,sha256=0IYhO2jO40ydpFwuZewZa11Pbd5NXqdofCKmMKGmKc8,3144
4
+ mbxai/agent/client.py,sha256=Rt8S2a7WYa674xgK7T_ZralxCZ-WqppeOlcLCljUClk,23648
5
+ mbxai/agent/models.py,sha256=sjBtaAENDABHl8IqTON1gxFFSZIaQYUCBFHB5804_Fw,5780
6
6
  mbxai/examples/agent_example.py,sha256=uECWy8QX1IhJMVcdw6EJy6sLcvO8vKgEF_YHJOhZO6Y,5947
7
7
  mbxai/examples/agent_iterations_example.py,sha256=xMqZhBWS67EkRkArjOAY2fCgLkQ32Qn9E4CSfEKW4MU,7905
8
+ mbxai/examples/agent_logging_example.py,sha256=su2Ccdfp8aYGCQkZhnNRGbaBd6DZEsYpPoxm7dI2g_o,10162
8
9
  mbxai/examples/agent_tool_registration_example.py,sha256=oWm0-d4mdba-VQ3HobiCIR0IHtEDCtJenb8Lnm9QqCw,9108
9
10
  mbxai/examples/agent_validation_example.py,sha256=xlEf5Mwq5_Iu8bNU4cuHGZVYvAyZNhO2GMFmOom-CLo,4185
10
11
  mbxai/examples/auto_schema_example.py,sha256=ymuJJqqDxYznZT2VN6zVFEM7m_lDuccZ1AKSx-xzLTM,8174
@@ -21,7 +22,7 @@ mbxai/examples/mcp/mcp_server_example.py,sha256=nFfg22Jnc6HMW_ezLO3So1xwDdx2_rIt
21
22
  mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
22
23
  mbxai/mcp/client.py,sha256=QRzId6o4_WRWVv3rtm8cfZZGaoY_UlaOO-oqNjY-tmw,5219
23
24
  mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
24
- mbxai/mcp/server.py,sha256=dDU425A2ufXsjMbYuOH-KZVjCRUyNR716DRREGRfGwc,3332
25
+ mbxai/mcp/server.py,sha256=vJeVQpC616KNAwPzZP8FVfHmYnfIYbGq-L8h1MRVIC0,3332
25
26
  mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
26
27
  mbxai/openrouter/client.py,sha256=3LD6WDJ8wjo_nefH5d1NJCsrWPvBc_KBf2NsItUoSt8,18302
27
28
  mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
@@ -31,7 +32,7 @@ mbxai/tools/__init__.py,sha256=ogxrHvgJ7OR62Lmd5x9Eh5d2C0jqWyQis7Zy3yKpZ78,218
31
32
  mbxai/tools/client.py,sha256=2wFPD-UN3Y2DSyrnqxt2vvFgTYHzUl14_y0r6fhAWmM,17198
32
33
  mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
33
34
  mbxai/tools/types.py,sha256=OFfM7scDGTm4FOcJA2ecj-fxL1MEBkqPsT3hqCL1Jto,9505
34
- mbxai-2.0.4.dist-info/METADATA,sha256=0EqHKwLFaFtmBBXTZRWFEsxQJJmu2TS_32UpSVQm7J8,10018
35
- mbxai-2.0.4.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
36
- mbxai-2.0.4.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
37
- mbxai-2.0.4.dist-info/RECORD,,
35
+ mbxai-2.0.5.dist-info/METADATA,sha256=Wa_VavJcCfmnybtF5oRyECfrYVnjr-5Hjw81Uq-LOZc,10018
36
+ mbxai-2.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
37
+ mbxai-2.0.5.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
38
+ mbxai-2.0.5.dist-info/RECORD,,
File without changes