mbxai 2.0.3__py3-none-any.whl → 2.0.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mbxai/__init__.py CHANGED
@@ -7,7 +7,7 @@ from .openrouter import OpenRouterClient
7
7
  from .tools import ToolClient
8
8
  from .mcp import MCPClient
9
9
 
10
- __version__ = "2.0.3"
10
+ __version__ = "2.0.5"
11
11
 
12
12
  __all__ = [
13
13
  "AgentClient",
mbxai/agent/client.py CHANGED
@@ -10,7 +10,7 @@ from pydantic import BaseModel
10
10
  from ..openrouter import OpenRouterClient
11
11
  from ..tools import ToolClient
12
12
  from ..mcp import MCPClient
13
- from .models import AgentResponse, Question, QuestionList, AnswerList, Result, QualityCheck
13
+ from .models import AgentResponse, Question, QuestionList, AnswerList, Result, QualityCheck, TokenUsage, TokenSummary
14
14
 
15
15
  logger = logging.getLogger(__name__)
16
16
 
@@ -127,6 +127,21 @@ class AgentClient:
127
127
  """Call the parse method on the AI client."""
128
128
  return self._ai_client.parse(messages, response_format)
129
129
 
130
+ def _extract_token_usage(self, response: Any) -> TokenUsage:
131
+ """Extract token usage information from an AI response."""
132
+ try:
133
+ if hasattr(response, 'usage') and response.usage:
134
+ usage = response.usage
135
+ return TokenUsage(
136
+ prompt_tokens=getattr(usage, 'prompt_tokens', 0),
137
+ completion_tokens=getattr(usage, 'completion_tokens', 0),
138
+ total_tokens=getattr(usage, 'total_tokens', 0)
139
+ )
140
+ except (AttributeError, TypeError) as e:
141
+ logger.debug(f"Could not extract token usage: {e}")
142
+
143
+ return TokenUsage() # Return empty usage if extraction fails
144
+
130
145
  def _extract_parsed_content(self, response: Any, response_format: Type[BaseModel]) -> BaseModel:
131
146
  """Extract the parsed content from the AI response."""
132
147
  if hasattr(response, 'choices') and len(response.choices) > 0:
@@ -177,10 +192,15 @@ class AgentClient:
177
192
  Returns:
178
193
  AgentResponse containing either questions to ask or the final response
179
194
  """
180
- logger.debug(f"Starting agent process with prompt: {prompt[:100]}...")
195
+ agent_id = str(__import__("uuid").uuid4())
196
+ logger.info(f"🚀 Starting agent process (ID: {agent_id}) with prompt: {prompt[:100]}...")
197
+
198
+ # Initialize token summary
199
+ token_summary = TokenSummary()
181
200
 
182
201
  # Step 1: Generate questions (if ask_questions is True)
183
202
  if ask_questions:
203
+ logger.info(f"❓ Agent {agent_id}: Analyzing prompt and generating clarifying questions")
184
204
  questions_prompt = f"""
185
205
  Understand this prompt and what the user wants to achieve by it:
186
206
  ==========
@@ -190,6 +210,12 @@ Understand this prompt and what the user wants to achieve by it:
190
210
  Think about useful steps and which information are required for it. First ask for required information and details to improve that process, when that is useful for the given case. When it's not useful, return an empty list of questions.
191
211
  Use available tools to gather information or perform actions that would improve your response.
192
212
  Analyze the prompt carefully and determine if additional information would significantly improve the quality of the response. Only ask questions that are truly necessary and would materially impact the outcome.
213
+
214
+ IMPORTANT: For each question, provide a technical key identifier that:
215
+ - Uses only alphanumeric characters and underscores
216
+ - Starts with a letter
217
+ - Is descriptive but concise (e.g., "user_name", "email_address", "preferred_genre", "budget_range")
218
+ - Contains no spaces, hyphens, or special characters like ?, !, @, etc.
193
219
  """
194
220
 
195
221
  messages = [{"role": "user", "content": questions_prompt}]
@@ -198,25 +224,31 @@ Analyze the prompt carefully and determine if additional information would signi
198
224
  response = self._call_ai_parse(messages, QuestionList)
199
225
  question_list = self._extract_parsed_content(response, QuestionList)
200
226
 
201
- logger.debug(f"Generated {len(question_list.questions)} questions")
227
+ # Extract token usage for question generation
228
+ token_summary.question_generation = self._extract_token_usage(response)
229
+
230
+ logger.info(f"❓ Agent {agent_id}: Generated {len(question_list.questions)} questions (tokens: {token_summary.question_generation.total_tokens})")
202
231
 
203
232
  # If we have questions, return them to the user
204
233
  if question_list.questions:
205
- agent_response = AgentResponse(questions=question_list.questions)
234
+ agent_response = AgentResponse(agent_id=agent_id, questions=question_list.questions, token_summary=token_summary)
206
235
  # Store the session for continuation
207
236
  self._agent_sessions[agent_response.agent_id] = {
208
237
  "original_prompt": prompt,
209
238
  "final_response_structure": final_response_structure,
210
239
  "questions": question_list.questions,
211
- "step": "waiting_for_answers"
240
+ "step": "waiting_for_answers",
241
+ "token_summary": token_summary
212
242
  }
243
+ logger.info(f"📋 Agent {agent_id}: Waiting for user answers to {len(question_list.questions)} questions")
213
244
  return agent_response
214
245
 
215
246
  except Exception as e:
216
247
  logger.warning(f"Failed to generate questions: {e}. Proceeding without questions.")
217
248
 
218
249
  # Step 2 & 3: No questions or ask_questions=False - proceed directly
219
- return self._process_with_answers(prompt, final_response_structure, [])
250
+ logger.info(f"⚡ Agent {agent_id}: No questions needed, proceeding directly to processing")
251
+ return self._process_with_answers(prompt, final_response_structure, [], agent_id, token_summary)
220
252
 
221
253
  def answer_to_agent(self, agent_id: str, answers: AnswerList) -> AgentResponse:
222
254
  """
@@ -242,11 +274,18 @@ Analyze the prompt carefully and determine if additional information would signi
242
274
  # Convert answers to a more usable format
243
275
  answer_dict = {answer.key: answer.answer for answer in answers.answers}
244
276
 
277
+ logger.info(f"📝 Agent {agent_id}: Received {len(answers.answers)} answers, continuing processing")
278
+
279
+ # Get token summary from session
280
+ token_summary = session.get("token_summary", TokenSummary())
281
+
245
282
  # Process with the provided answers
246
283
  result = self._process_with_answers(
247
284
  session["original_prompt"],
248
285
  session["final_response_structure"],
249
- answer_dict
286
+ answer_dict,
287
+ agent_id,
288
+ token_summary
250
289
  )
251
290
 
252
291
  # Clean up the session
@@ -258,7 +297,9 @@ Analyze the prompt carefully and determine if additional information would signi
258
297
  self,
259
298
  prompt: str,
260
299
  final_response_structure: Type[BaseModel],
261
- answers: Union[list, dict[str, str]]
300
+ answers: Union[list, dict[str, str]],
301
+ agent_id: str,
302
+ token_summary: TokenSummary
262
303
  ) -> AgentResponse:
263
304
  """
264
305
  Process the prompt with answers through the thinking pipeline.
@@ -267,28 +308,38 @@ Analyze the prompt carefully and determine if additional information would signi
267
308
  prompt: The original prompt
268
309
  final_response_structure: Expected final response structure
269
310
  answers: Answers to questions (empty if no questions were asked)
311
+ agent_id: The agent session identifier
312
+ token_summary: Current token usage summary
270
313
 
271
314
  Returns:
272
315
  AgentResponse with the final result
273
316
  """
274
317
  # Step 3: Process the prompt with thinking
275
- result = self._think_and_process(prompt, answers)
318
+ logger.info(f"🧠 Agent {agent_id}: Processing prompt and generating initial response")
319
+ result = self._think_and_process(prompt, answers, agent_id, token_summary)
276
320
 
277
321
  # Step 4: Quality check and iteration
278
- final_result = self._quality_check_and_iterate(prompt, result, answers)
322
+ final_result = self._quality_check_and_iterate(prompt, result, answers, agent_id, token_summary)
279
323
 
280
324
  # Step 5: Generate final answer in requested format
281
- final_response = self._generate_final_response(prompt, final_result, final_response_structure)
325
+ logger.info(f"📝 Agent {agent_id}: Generating final structured response")
326
+ final_response = self._generate_final_response(prompt, final_result, final_response_structure, agent_id, token_summary)
282
327
 
283
- return AgentResponse(final_response=final_response)
328
+ # Log final token summary
329
+ logger.info(f"📊 Agent {agent_id}: Token usage summary - Total: {token_summary.total_tokens} "
330
+ f"(Prompt: {token_summary.total_prompt_tokens}, Completion: {token_summary.total_completion_tokens})")
331
+
332
+ return AgentResponse(agent_id=agent_id, final_response=final_response, token_summary=token_summary)
284
333
 
285
- def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]]) -> str:
334
+ def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
286
335
  """
287
336
  Process the prompt with thinking.
288
337
 
289
338
  Args:
290
339
  prompt: The original prompt
291
340
  answers: Answers to questions
341
+ agent_id: The agent session identifier
342
+ token_summary: Current token usage summary
292
343
 
293
344
  Returns:
294
345
  The AI's result
@@ -319,12 +370,17 @@ Provide your best result for the given prompt.
319
370
  try:
320
371
  response = self._call_ai_parse(messages, Result)
321
372
  result_obj = self._extract_parsed_content(response, Result)
373
+
374
+ # Track token usage for thinking process
375
+ token_summary.thinking_process = self._extract_token_usage(response)
376
+ logger.info(f"🧠 Agent {agent_id}: Thinking completed (tokens: {token_summary.thinking_process.total_tokens})")
377
+
322
378
  return result_obj.result
323
379
  except Exception as e:
324
380
  logger.error(f"Error in thinking process: {e}")
325
381
  raise RuntimeError(f"Failed to process prompt with AI client: {e}") from e
326
382
 
327
- def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]]) -> str:
383
+ def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]], agent_id: str, token_summary: TokenSummary) -> str:
328
384
  """
329
385
  Check the quality of the result and iterate if needed.
330
386
 
@@ -332,12 +388,20 @@ Provide your best result for the given prompt.
332
388
  prompt: The original prompt
333
389
  result: The current result
334
390
  answers: The answers provided
391
+ agent_id: The agent session identifier
392
+ token_summary: Current token usage summary
335
393
 
336
394
  Returns:
337
395
  The final improved result
338
396
  """
339
397
  current_result = result
340
398
 
399
+ if self._max_iterations == 0:
400
+ logger.info(f"✅ Agent {agent_id}: Skipping quality check (max_iterations=0)")
401
+ return current_result
402
+
403
+ logger.info(f"🔍 Agent {agent_id}: Starting quality check and improvement process (max iterations: {self._max_iterations})")
404
+
341
405
  for iteration in range(self._max_iterations):
342
406
  quality_prompt = f"""
343
407
  Given this original prompt:
@@ -361,11 +425,15 @@ Evaluate the quality and provide feedback if improvements are needed.
361
425
  response = self._call_ai_parse(messages, QualityCheck)
362
426
  quality_check = self._extract_parsed_content(response, QualityCheck)
363
427
 
428
+ # Track token usage for quality check
429
+ quality_check_tokens = self._extract_token_usage(response)
430
+ token_summary.quality_checks.append(quality_check_tokens)
431
+
364
432
  if quality_check.is_good:
365
- logger.debug(f"Quality check passed on iteration {iteration}")
433
+ logger.info(f"✅ Agent {agent_id}: Quality check passed on iteration {iteration + 1} (tokens: {quality_check_tokens.total_tokens})")
366
434
  break
367
435
 
368
- logger.debug(f"Quality check failed on iteration {iteration}: {quality_check.feedback}")
436
+ logger.info(f"🔄 Agent {agent_id}: Quality check iteration {iteration + 1} - Improvements needed: {quality_check.feedback[:100]}... (tokens: {quality_check_tokens.total_tokens})")
369
437
 
370
438
  # Improve the result
371
439
  improvement_prompt = f"""
@@ -388,17 +456,27 @@ Please provide an improved version that addresses the feedback while maintaining
388
456
  """
389
457
 
390
458
  messages = [{"role": "user", "content": improvement_prompt}]
391
- response = self._call_ai_parse(messages, Result)
392
- result_obj = self._extract_parsed_content(response, Result)
459
+ improvement_response = self._call_ai_parse(messages, Result)
460
+ result_obj = self._extract_parsed_content(improvement_response, Result)
393
461
  current_result = result_obj.result
394
462
 
463
+ # Track token usage for improvement
464
+ improvement_tokens = self._extract_token_usage(improvement_response)
465
+ token_summary.improvements.append(improvement_tokens)
466
+
467
+ logger.info(f"⚡ Agent {agent_id}: Improvement iteration {iteration + 1} completed (tokens: {improvement_tokens.total_tokens})")
468
+
395
469
  except Exception as e:
396
470
  logger.warning(f"Error in quality check iteration {iteration}: {e}")
397
471
  break
398
472
 
473
+ total_quality_tokens = sum(usage.total_tokens for usage in token_summary.quality_checks)
474
+ total_improvement_tokens = sum(usage.total_tokens for usage in token_summary.improvements)
475
+ logger.info(f"🏁 Agent {agent_id}: Quality check completed - {len(token_summary.quality_checks)} checks, {len(token_summary.improvements)} improvements (Quality tokens: {total_quality_tokens}, Improvement tokens: {total_improvement_tokens})")
476
+
399
477
  return current_result
400
478
 
401
- def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel]) -> BaseModel:
479
+ def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel], agent_id: str, token_summary: TokenSummary) -> BaseModel:
402
480
  """
403
481
  Generate the final response in the requested format.
404
482
 
@@ -406,6 +484,8 @@ Please provide an improved version that addresses the feedback while maintaining
406
484
  prompt: The original prompt
407
485
  result: The processed result
408
486
  final_response_structure: The expected response structure
487
+ agent_id: The agent session identifier
488
+ token_summary: Current token usage summary
409
489
 
410
490
  Returns:
411
491
  The final response in the requested format
@@ -428,7 +508,13 @@ Generate the final answer in the exact format requested. Make sure the response
428
508
 
429
509
  try:
430
510
  response = self._call_ai_parse(messages, final_response_structure)
431
- return self._extract_parsed_content(response, final_response_structure)
511
+ final_response = self._extract_parsed_content(response, final_response_structure)
512
+
513
+ # Track token usage for final response generation
514
+ token_summary.final_response = self._extract_token_usage(response)
515
+ logger.info(f"📝 Agent {agent_id}: Final structured response generated (tokens: {token_summary.final_response.total_tokens})")
516
+
517
+ return final_response
432
518
  except Exception as e:
433
519
  logger.error(f"Error generating final response: {e}")
434
520
  # Fallback - try to create a basic response
mbxai/agent/models.py CHANGED
@@ -3,15 +3,37 @@ Pydantic models for the agent client.
3
3
  """
4
4
 
5
5
  from typing import Any, Optional
6
- from pydantic import BaseModel, Field
6
+ from pydantic import BaseModel, Field, field_validator
7
7
  import uuid
8
+ import re
8
9
 
9
10
 
10
11
  class Question(BaseModel):
11
12
  """A question for the user to provide more information."""
12
13
  question: str = Field(description="The question to ask the user")
13
- key: str = Field(description="A unique and short technical key identifier to identify this question don't use spaces or special characters")
14
+ key: str = Field(description="A unique and short technical key identifier using only alphanumeric characters and underscores (e.g., user_name, email_address, age)")
14
15
  required: bool = Field(default=True, description="Whether this question is required")
16
+
17
+ @field_validator('key')
18
+ @classmethod
19
+ def validate_key(cls, v: str) -> str:
20
+ """Ensure the key contains only alphanumeric characters and underscores."""
21
+ if not re.match(r'^[a-zA-Z][a-zA-Z0-9_]*$', v):
22
+ # Convert invalid key to valid format
23
+ # Remove special characters and replace spaces with underscores
24
+ cleaned = re.sub(r'[^a-zA-Z0-9_]', '_', v)
25
+ # Ensure it starts with a letter
26
+ if not cleaned or not cleaned[0].isalpha():
27
+ cleaned = 'key_' + cleaned
28
+ # Remove consecutive underscores
29
+ cleaned = re.sub(r'_+', '_', cleaned)
30
+ # Remove trailing underscores
31
+ cleaned = cleaned.rstrip('_')
32
+ # Ensure it's not empty
33
+ if not cleaned:
34
+ cleaned = 'key'
35
+ return cleaned
36
+ return v
15
37
 
16
38
 
17
39
  class Result(BaseModel):
@@ -24,6 +46,7 @@ class AgentResponse(BaseModel):
24
46
  agent_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for this agent session")
25
47
  questions: list[Question] = Field(default_factory=list, description="List of questions for the user")
26
48
  final_response: Optional[Any] = Field(default=None, description="The final response if processing is complete")
49
+ token_summary: Optional["TokenSummary"] = Field(default=None, description="Summary of token usage for this agent process")
27
50
 
28
51
  def has_questions(self) -> bool:
29
52
  """Check if this response has questions that need to be answered."""
@@ -54,3 +77,55 @@ class QualityCheck(BaseModel):
54
77
  """Result of quality checking the AI response."""
55
78
  is_good: bool = Field(description="Whether the result is good enough")
56
79
  feedback: str = Field(description="Feedback on what could be improved if not good")
80
+
81
+
82
+ class TokenUsage(BaseModel):
83
+ """Token usage information for a single API call."""
84
+ prompt_tokens: int = Field(default=0, description="Number of tokens in the prompt")
85
+ completion_tokens: int = Field(default=0, description="Number of tokens in the completion")
86
+ total_tokens: int = Field(default=0, description="Total number of tokens used")
87
+
88
+
89
+ class TokenSummary(BaseModel):
90
+ """Summary of token usage across all API calls in an agent process."""
91
+ question_generation: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for question generation")
92
+ thinking_process: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for thinking/processing")
93
+ quality_checks: list[TokenUsage] = Field(default_factory=list, description="Tokens used for each quality check iteration")
94
+ improvements: list[TokenUsage] = Field(default_factory=list, description="Tokens used for each improvement iteration")
95
+ final_response: TokenUsage = Field(default_factory=TokenUsage, description="Tokens used for final response generation")
96
+
97
+ @property
98
+ def total_tokens(self) -> int:
99
+ """Calculate total tokens used across all operations."""
100
+ total = (
101
+ self.question_generation.total_tokens +
102
+ self.thinking_process.total_tokens +
103
+ sum(usage.total_tokens for usage in self.quality_checks) +
104
+ sum(usage.total_tokens for usage in self.improvements) +
105
+ self.final_response.total_tokens
106
+ )
107
+ return total
108
+
109
+ @property
110
+ def total_prompt_tokens(self) -> int:
111
+ """Calculate total prompt tokens used across all operations."""
112
+ total = (
113
+ self.question_generation.prompt_tokens +
114
+ self.thinking_process.prompt_tokens +
115
+ sum(usage.prompt_tokens for usage in self.quality_checks) +
116
+ sum(usage.prompt_tokens for usage in self.improvements) +
117
+ self.final_response.prompt_tokens
118
+ )
119
+ return total
120
+
121
+ @property
122
+ def total_completion_tokens(self) -> int:
123
+ """Calculate total completion tokens used across all operations."""
124
+ total = (
125
+ self.question_generation.completion_tokens +
126
+ self.thinking_process.completion_tokens +
127
+ sum(usage.completion_tokens for usage in self.quality_checks) +
128
+ sum(usage.completion_tokens for usage in self.improvements) +
129
+ self.final_response.completion_tokens
130
+ )
131
+ return total
@@ -0,0 +1,212 @@
1
+ """
2
+ Example demonstrating the enhanced logging and token tracking features of the AgentClient.
3
+ """
4
+
5
+ import os
6
+ import logging
7
+ from pydantic import BaseModel, Field
8
+
9
+ from mbxai.openrouter import OpenRouterClient
10
+ from mbxai.agent import AgentClient
11
+
12
+ # Configure logging to see all the agent information
13
+ logging.basicConfig(
14
+ level=logging.INFO,
15
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
16
+ )
17
+
18
+ class WeatherResponse(BaseModel):
19
+ """Response format for weather information."""
20
+ location: str = Field(description="The location requested")
21
+ current_conditions: str = Field(description="Current weather conditions")
22
+ temperature: str = Field(description="Current temperature")
23
+ forecast: str = Field(description="Weather forecast")
24
+ recommendation: str = Field(description="Clothing or activity recommendation based on weather")
25
+
26
+ class AnalysisResponse(BaseModel):
27
+ """Response format for complex analysis."""
28
+ summary: str = Field(description="Executive summary of the analysis")
29
+ key_findings: list[str] = Field(description="List of key findings")
30
+ methodology: str = Field(description="How the analysis was conducted")
31
+ recommendations: list[str] = Field(description="Actionable recommendations")
32
+ confidence_level: str = Field(description="Confidence level in the analysis")
33
+
34
+ def demonstrate_agent_with_questions():
35
+ """Demonstrate agent process with question generation."""
36
+ print("\n" + "="*60)
37
+ print("🔍 DEMO: Agent with Question Generation")
38
+ print("="*60)
39
+
40
+ try:
41
+ # Note: This requires a real OpenRouter API key
42
+ api_key = os.getenv("OPENROUTER_API_KEY")
43
+ if not api_key:
44
+ print("❌ OPENROUTER_API_KEY not found. Using mock example.")
45
+ print("Set OPENROUTER_API_KEY environment variable to run with real API calls.")
46
+ return
47
+
48
+ openrouter_client = OpenRouterClient(token=api_key)
49
+ agent = AgentClient(openrouter_client, max_iterations=2)
50
+
51
+ prompt = "I need weather information for planning my outdoor activities this weekend."
52
+
53
+ print(f"📤 Sending prompt: {prompt}")
54
+ response = agent.agent(prompt, WeatherResponse, ask_questions=True)
55
+
56
+ if response.has_questions():
57
+ print(f"\n📋 Agent generated {len(response.questions)} questions:")
58
+ for i, question in enumerate(response.questions, 1):
59
+ print(f" {i}. {question.question} (key: {question.key})")
60
+
61
+ if response.token_summary:
62
+ print(f"\n📊 Token usage for question generation:")
63
+ print(f" - Prompt tokens: {response.token_summary.question_generation.prompt_tokens}")
64
+ print(f" - Completion tokens: {response.token_summary.question_generation.completion_tokens}")
65
+ print(f" - Total tokens: {response.token_summary.question_generation.total_tokens}")
66
+
67
+ # Simulate user providing answers
68
+ from mbxai.agent.models import AnswerList, Answer
69
+
70
+ answers = AnswerList(answers=[
71
+ Answer(key="location", answer="San Francisco, CA"),
72
+ Answer(key="activity_type", answer="hiking and outdoor photography"),
73
+ Answer(key="time_frame", answer="Saturday and Sunday morning")
74
+ ])
75
+
76
+ print(f"\n📝 Providing answers and continuing...")
77
+ final_response = agent.answer_to_agent(response.agent_id, answers)
78
+
79
+ if final_response.is_complete():
80
+ print("\n✅ Final response received!")
81
+ print(f"📊 Complete token summary:")
82
+ if final_response.token_summary:
83
+ ts = final_response.token_summary
84
+ print(f" - Question generation: {ts.question_generation.total_tokens} tokens")
85
+ print(f" - Thinking process: {ts.thinking_process.total_tokens} tokens")
86
+ print(f" - Quality checks: {sum(q.total_tokens for q in ts.quality_checks)} tokens ({len(ts.quality_checks)} checks)")
87
+ print(f" - Improvements: {sum(i.total_tokens for i in ts.improvements)} tokens ({len(ts.improvements)} iterations)")
88
+ print(f" - Final response: {ts.final_response.total_tokens} tokens")
89
+ print(f" - TOTAL: {ts.total_tokens} tokens")
90
+
91
+ # Access the structured response
92
+ weather_data = final_response.final_response
93
+ print(f"\n🌤️ Weather for {weather_data.location}:")
94
+ print(f" Current: {weather_data.current_conditions}")
95
+ print(f" Temperature: {weather_data.temperature}")
96
+ print(f" Recommendation: {weather_data.recommendation}")
97
+
98
+ except Exception as e:
99
+ print(f"❌ Error: {e}")
100
+
101
+ def demonstrate_agent_without_questions():
102
+ """Demonstrate agent process without question generation."""
103
+ print("\n" + "="*60)
104
+ print("⚡ DEMO: Agent without Question Generation (Direct Processing)")
105
+ print("="*60)
106
+
107
+ try:
108
+ # Note: This requires a real OpenRouter API key
109
+ api_key = os.getenv("OPENROUTER_API_KEY")
110
+ if not api_key:
111
+ print("❌ OPENROUTER_API_KEY not found. Using mock example.")
112
+ print("Set OPENROUTER_API_KEY environment variable to run with real API calls.")
113
+ return
114
+
115
+ openrouter_client = OpenRouterClient(token=api_key)
116
+ agent = AgentClient(openrouter_client, max_iterations=1)
117
+
118
+ prompt = """
119
+ Analyze the current state of renewable energy adoption in Europe.
120
+ Focus on solar and wind power, include recent statistics, challenges,
121
+ and future outlook for the next 5 years.
122
+ """
123
+
124
+ print(f"📤 Sending prompt: {prompt[:100]}...")
125
+ response = agent.agent(prompt, AnalysisResponse, ask_questions=False)
126
+
127
+ if response.is_complete():
128
+ print("\n✅ Analysis completed!")
129
+
130
+ if response.token_summary:
131
+ ts = response.token_summary
132
+ print(f"\n📊 Token usage breakdown:")
133
+ print(f" - Thinking process: {ts.thinking_process.total_tokens} tokens")
134
+ print(f" - Quality checks: {sum(q.total_tokens for q in ts.quality_checks)} tokens ({len(ts.quality_checks)} checks)")
135
+ print(f" - Improvements: {sum(i.total_tokens for i in ts.improvements)} tokens ({len(ts.improvements)} iterations)")
136
+ print(f" - Final response: {ts.final_response.total_tokens} tokens")
137
+ print(f" - TOTAL: {ts.total_tokens} tokens")
138
+
139
+ # Access the structured response
140
+ analysis = response.final_response
141
+ print(f"\n📊 Analysis Results:")
142
+ print(f" Summary: {analysis.summary[:150]}...")
143
+ print(f" Key Findings: {len(analysis.key_findings)} items")
144
+ print(f" Recommendations: {len(analysis.recommendations)} items")
145
+ print(f" Confidence: {analysis.confidence_level}")
146
+
147
+ except Exception as e:
148
+ print(f"❌ Error: {e}")
149
+
150
+ def demonstrate_different_iteration_settings():
151
+ """Demonstrate different max_iterations settings and their effect on token usage."""
152
+ print("\n" + "="*60)
153
+ print("🔄 DEMO: Different Iteration Settings")
154
+ print("="*60)
155
+
156
+ iteration_configs = [
157
+ {"iterations": 0, "description": "No quality checks"},
158
+ {"iterations": 1, "description": "Basic quality check"},
159
+ {"iterations": 3, "description": "Thorough quality improvement"}
160
+ ]
161
+
162
+ prompt = "Explain quantum computing in simple terms for a business audience."
163
+
164
+ for config in iteration_configs:
165
+ print(f"\n📋 Testing with {config['iterations']} max iterations ({config['description']})")
166
+ print("-" * 40)
167
+
168
+ try:
169
+ api_key = os.getenv("OPENROUTER_API_KEY")
170
+ if not api_key:
171
+ print(f" ❌ Skipping - OPENROUTER_API_KEY not found")
172
+ continue
173
+
174
+ openrouter_client = OpenRouterClient(token=api_key)
175
+ agent = AgentClient(openrouter_client, max_iterations=config["iterations"])
176
+
177
+ print(f" 🚀 Processing with max_iterations={config['iterations']}")
178
+ print(f" - Description: {config['description']}")
179
+ print(f" - Expected processing time: {'Low' if config['iterations'] <= 1 else 'Medium' if config['iterations'] <= 2 else 'High'}")
180
+ print(f" - Expected response quality: {'Basic' if config['iterations'] == 0 else 'Good' if config['iterations'] <= 2 else 'Excellent'}")
181
+
182
+ # In real usage, you would call:
183
+ # response = agent.agent(prompt, AnalysisResponse, ask_questions=False)
184
+
185
+ except Exception as e:
186
+ print(f" ❌ Error: {e}")
187
+
188
+ if __name__ == "__main__":
189
+ print("🤖 Agent Client Logging and Token Tracking Demo")
190
+ print("This example demonstrates the enhanced logging and token usage tracking features.")
191
+
192
+ # Check for API key
193
+ api_key = os.getenv("OPENROUTER_API_KEY")
194
+ if not api_key:
195
+ print("\n⚠️ Note: To run with real API calls, set the OPENROUTER_API_KEY environment variable.")
196
+ print("The examples will show the logging structure but won't make actual API calls.")
197
+
198
+ # Run demonstrations
199
+ demonstrate_agent_with_questions()
200
+ demonstrate_agent_without_questions()
201
+ demonstrate_different_iteration_settings()
202
+
203
+ print("\n✅ Demo completed!")
204
+ print("\nTo see the logging in action, run this script with a valid OPENROUTER_API_KEY.")
205
+ print("You'll see detailed logs showing:")
206
+ print(" - 🚀 Agent process start")
207
+ print(" - ❓ Question generation")
208
+ print(" - 🧠 Thinking process")
209
+ print(" - 🔍 Quality checks")
210
+ print(" - ⚡ Improvements")
211
+ print(" - 📝 Final response generation")
212
+ print(" - 📊 Complete token usage summary")
mbxai/mcp/server.py CHANGED
@@ -31,7 +31,7 @@ class MCPServer:
31
31
  self.app = FastAPI(
32
32
  title=self.name,
33
33
  description=self.description,
34
- version="2.0.3",
34
+ version="2.0.5",
35
35
  )
36
36
 
37
37
  # Initialize MCP server
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: mbxai
3
- Version: 2.0.3
3
+ Version: 2.0.5
4
4
  Summary: MBX AI SDK
5
5
  Project-URL: Homepage, https://www.mibexx.de
6
6
  Project-URL: Documentation, https://www.mibexx.de
@@ -1,10 +1,11 @@
1
- mbxai/__init__.py,sha256=MdEVRprKcXX-BOYFC6PqE2VHCJoZwSIxMvdu8hSTqyM,407
1
+ mbxai/__init__.py,sha256=LBQ9uRdY1u9DERFu9g7Q-EN6BbL2Q0jQmovQBZN2E6U,407
2
2
  mbxai/core.py,sha256=WMvmU9TTa7M_m-qWsUew4xH8Ul6xseCZ2iBCXJTW-Bs,196
3
3
  mbxai/agent/__init__.py,sha256=5j3mW2NZtAU1s2w8n833axWBQsxW8U0qKwoQ9JtQZ4k,289
4
- mbxai/agent/client.py,sha256=wMgFu_sSkMDx9THemsNmvGZZ3dViJkvOS0p0XrcvKn8,18047
5
- mbxai/agent/models.py,sha256=Mo-bfH6eLVmzws60SYgSqbvnEBkb51T9eG-Fj2HBR18,2217
4
+ mbxai/agent/client.py,sha256=Rt8S2a7WYa674xgK7T_ZralxCZ-WqppeOlcLCljUClk,23648
5
+ mbxai/agent/models.py,sha256=sjBtaAENDABHl8IqTON1gxFFSZIaQYUCBFHB5804_Fw,5780
6
6
  mbxai/examples/agent_example.py,sha256=uECWy8QX1IhJMVcdw6EJy6sLcvO8vKgEF_YHJOhZO6Y,5947
7
7
  mbxai/examples/agent_iterations_example.py,sha256=xMqZhBWS67EkRkArjOAY2fCgLkQ32Qn9E4CSfEKW4MU,7905
8
+ mbxai/examples/agent_logging_example.py,sha256=su2Ccdfp8aYGCQkZhnNRGbaBd6DZEsYpPoxm7dI2g_o,10162
8
9
  mbxai/examples/agent_tool_registration_example.py,sha256=oWm0-d4mdba-VQ3HobiCIR0IHtEDCtJenb8Lnm9QqCw,9108
9
10
  mbxai/examples/agent_validation_example.py,sha256=xlEf5Mwq5_Iu8bNU4cuHGZVYvAyZNhO2GMFmOom-CLo,4185
10
11
  mbxai/examples/auto_schema_example.py,sha256=ymuJJqqDxYznZT2VN6zVFEM7m_lDuccZ1AKSx-xzLTM,8174
@@ -21,7 +22,7 @@ mbxai/examples/mcp/mcp_server_example.py,sha256=nFfg22Jnc6HMW_ezLO3So1xwDdx2_rIt
21
22
  mbxai/mcp/__init__.py,sha256=_ek9iYdYqW5saKetj4qDci11jxesQDiHPJRpHMKkxgU,175
22
23
  mbxai/mcp/client.py,sha256=QRzId6o4_WRWVv3rtm8cfZZGaoY_UlaOO-oqNjY-tmw,5219
23
24
  mbxai/mcp/example.py,sha256=oaol7AvvZnX86JWNz64KvPjab5gg1VjVN3G8eFSzuaE,2350
24
- mbxai/mcp/server.py,sha256=eZZQXo4xJSzDbhOVWyUHi9BJi3weldlxjvQptel7J3A,3332
25
+ mbxai/mcp/server.py,sha256=vJeVQpC616KNAwPzZP8FVfHmYnfIYbGq-L8h1MRVIC0,3332
25
26
  mbxai/openrouter/__init__.py,sha256=Ito9Qp_B6q-RLGAQcYyTJVWwR2YAZvNqE-HIYXxhtD8,298
26
27
  mbxai/openrouter/client.py,sha256=3LD6WDJ8wjo_nefH5d1NJCsrWPvBc_KBf2NsItUoSt8,18302
27
28
  mbxai/openrouter/config.py,sha256=Ia93s-auim9Sq71eunVDbn9ET5xX2zusXpV4JBdHAzs,3251
@@ -31,7 +32,7 @@ mbxai/tools/__init__.py,sha256=ogxrHvgJ7OR62Lmd5x9Eh5d2C0jqWyQis7Zy3yKpZ78,218
31
32
  mbxai/tools/client.py,sha256=2wFPD-UN3Y2DSyrnqxt2vvFgTYHzUl14_y0r6fhAWmM,17198
32
33
  mbxai/tools/example.py,sha256=1HgKK39zzUuwFbnp3f0ThyWVfA_8P28PZcTwaUw5K78,2232
33
34
  mbxai/tools/types.py,sha256=OFfM7scDGTm4FOcJA2ecj-fxL1MEBkqPsT3hqCL1Jto,9505
34
- mbxai-2.0.3.dist-info/METADATA,sha256=9Azd_uWgRny0XZ9EY21rO9GiJJNzVp8aUQ-xn6QBN2w,10018
35
- mbxai-2.0.3.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
36
- mbxai-2.0.3.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
37
- mbxai-2.0.3.dist-info/RECORD,,
35
+ mbxai-2.0.5.dist-info/METADATA,sha256=Wa_VavJcCfmnybtF5oRyECfrYVnjr-5Hjw81Uq-LOZc,10018
36
+ mbxai-2.0.5.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
37
+ mbxai-2.0.5.dist-info/licenses/LICENSE,sha256=hEyhc4FxwYo3NQ40yNgZ7STqwVk-1_XcTXOnAPbGJAw,1069
38
+ mbxai-2.0.5.dist-info/RECORD,,
File without changes