mbxai 1.6.0__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
mbxai/__init__.py CHANGED
@@ -2,4 +2,21 @@
2
2
  MBX AI package.
3
3
  """
4
4
 
5
- __version__ = "1.6.0"
5
+ from .agent import AgentClient, AgentResponse, Question, Result, AnswerList, Answer
6
+ from .openrouter import OpenRouterClient
7
+ from .tools import ToolClient
8
+ from .mcp import MCPClient
9
+
10
+ __version__ = "2.0.1"
11
+
12
+ __all__ = [
13
+ "AgentClient",
14
+ "AgentResponse",
15
+ "Question",
16
+ "Result",
17
+ "AnswerList",
18
+ "Answer",
19
+ "OpenRouterClient",
20
+ "ToolClient",
21
+ "MCPClient"
22
+ ]
@@ -0,0 +1,8 @@
1
+ """
2
+ Agent package for MBX AI.
3
+ """
4
+
5
+ from .client import AgentClient
6
+ from .models import AgentResponse, Question, Result, AnswerList, Answer, QuestionList, QualityCheck
7
+
8
+ __all__ = ["AgentClient", "AgentResponse", "Question", "Result", "AnswerList", "Answer", "QuestionList", "QualityCheck"]
mbxai/agent/client.py ADDED
@@ -0,0 +1,450 @@
1
+ """
2
+ Agent client implementation for MBX AI.
3
+ """
4
+
5
+ from typing import Any, Union, Type, Callable
6
+ import logging
7
+ import json
8
+ from pydantic import BaseModel
9
+
10
+ from ..openrouter import OpenRouterClient
11
+ from ..tools import ToolClient
12
+ from ..mcp import MCPClient
13
+ from .models import AgentResponse, Question, QuestionList, AnswerList, Result, QualityCheck
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ class AgentClient:
19
+ """
20
+ Agent client that wraps other AI clients with a dialog-based thinking process.
21
+
22
+ The agent follows a multi-step process:
23
+ 1. Analyze the prompt and generate clarifying questions (if ask_questions=True)
24
+ 2. Wait for user answers or auto-answer questions
25
+ 3. Process the prompt with available information
26
+ 4. Quality check the result and iterate if needed
27
+ 5. Generate final response in the requested format
28
+
29
+ Requirements:
30
+ - The wrapped AI client MUST have a 'parse' method for structured responses
31
+ - All AI interactions use structured Pydantic models for reliable parsing
32
+ - Supports OpenRouterClient, ToolClient, and MCPClient (all have parse methods)
33
+
34
+ Tool Registration:
35
+ - Provides proxy methods for tool registration when supported by the underlying client
36
+ - register_tool(): Available with ToolClient and MCPClient
37
+ - register_mcp_server(): Available with MCPClient only
38
+ - Throws AttributeError for unsupported clients (e.g., OpenRouterClient)
39
+
40
+ Configuration:
41
+ - max_iterations: Controls how many times the agent will iterate to improve results (default: 2)
42
+ - Set to 0 to disable quality improvement iterations
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ ai_client: Union[OpenRouterClient, ToolClient, MCPClient],
48
+ max_iterations: int = 2
49
+ ) -> None:
50
+ """
51
+ Initialize the AgentClient.
52
+
53
+ Args:
54
+ ai_client: The underlying AI client (OpenRouterClient, ToolClient, or MCPClient)
55
+ max_iterations: Maximum number of quality improvement iterations (default: 2)
56
+
57
+ Raises:
58
+ ValueError: If the client doesn't support structured responses (no parse method)
59
+ """
60
+ if not hasattr(ai_client, 'parse'):
61
+ raise ValueError(
62
+ f"AgentClient requires a client with structured response support (parse method). "
63
+ f"The provided client {type(ai_client).__name__} does not have a parse method."
64
+ )
65
+
66
+ if max_iterations < 0:
67
+ raise ValueError("max_iterations must be non-negative")
68
+
69
+ self._ai_client = ai_client
70
+ self._max_iterations = max_iterations
71
+ self._agent_sessions: dict[str, dict[str, Any]] = {}
72
+
73
+ def register_tool(
74
+ self,
75
+ name: str,
76
+ description: str,
77
+ function: Callable[..., Any],
78
+ schema: dict[str, Any] | None = None,
79
+ ) -> None:
80
+ """
81
+ Register a new tool with the underlying AI client.
82
+
83
+ This method proxies to the register_tool method of ToolClient or MCPClient.
84
+
85
+ Args:
86
+ name: The name of the tool
87
+ description: A description of what the tool does
88
+ function: The function to call when the tool is used
89
+ schema: The JSON schema for the tool's parameters. If None or empty,
90
+ will be automatically generated from the function signature.
91
+
92
+ Raises:
93
+ AttributeError: If the underlying client doesn't support tool registration (e.g., OpenRouterClient)
94
+ """
95
+ if hasattr(self._ai_client, 'register_tool'):
96
+ self._ai_client.register_tool(name, description, function, schema)
97
+ logger.debug(f"Registered tool '{name}' with {type(self._ai_client).__name__}")
98
+ else:
99
+ raise AttributeError(
100
+ f"Tool registration is not supported by {type(self._ai_client).__name__}. "
101
+ f"Use ToolClient or MCPClient to register tools."
102
+ )
103
+
104
+ def register_mcp_server(self, name: str, base_url: str) -> None:
105
+ """
106
+ Register an MCP server and load its tools.
107
+
108
+ This method proxies to the register_mcp_server method of MCPClient.
109
+
110
+ Args:
111
+ name: The name of the MCP server
112
+ base_url: The base URL of the MCP server
113
+
114
+ Raises:
115
+ AttributeError: If the underlying client doesn't support MCP server registration (e.g., OpenRouterClient, ToolClient)
116
+ """
117
+ if hasattr(self._ai_client, 'register_mcp_server'):
118
+ self._ai_client.register_mcp_server(name, base_url)
119
+ logger.debug(f"Registered MCP server '{name}' at {base_url} with {type(self._ai_client).__name__}")
120
+ else:
121
+ raise AttributeError(
122
+ f"MCP server registration is not supported by {type(self._ai_client).__name__}. "
123
+ f"Use MCPClient to register MCP servers."
124
+ )
125
+
126
+ def _call_ai_parse(self, messages: list[dict[str, Any]], response_format: Type[BaseModel]) -> Any:
127
+ """Call the parse method on the AI client."""
128
+ return self._ai_client.parse(messages, response_format)
129
+
130
+ def _extract_parsed_content(self, response: Any, response_format: Type[BaseModel]) -> BaseModel:
131
+ """Extract the parsed content from the AI response."""
132
+ if hasattr(response, 'choices') and len(response.choices) > 0:
133
+ choice = response.choices[0]
134
+ if hasattr(choice.message, 'parsed') and choice.message.parsed:
135
+ return choice.message.parsed
136
+ elif hasattr(choice.message, 'content'):
137
+ # Try to parse the content as JSON
138
+ try:
139
+ content_dict = json.loads(choice.message.content)
140
+ return response_format(**content_dict)
141
+ except (json.JSONDecodeError, TypeError):
142
+ # If parsing fails, create a default response
143
+ if response_format == QuestionList:
144
+ return QuestionList(questions=[])
145
+ elif response_format == Result:
146
+ return Result(result=choice.message.content)
147
+ elif response_format == QualityCheck:
148
+ return QualityCheck(is_good=True, feedback="")
149
+ else:
150
+ # For other formats, try to create with content
151
+ return response_format(result=choice.message.content)
152
+
153
+ # Fallback - create empty/default response
154
+ if response_format == QuestionList:
155
+ return QuestionList(questions=[])
156
+ elif response_format == Result:
157
+ return Result(result="No response generated")
158
+ elif response_format == QualityCheck:
159
+ return QualityCheck(is_good=True, feedback="")
160
+ else:
161
+ return response_format()
162
+
163
+ def agent(
164
+ self,
165
+ prompt: str,
166
+ final_response_structure: Type[BaseModel],
167
+ ask_questions: bool = True
168
+ ) -> AgentResponse:
169
+ """
170
+ Process a prompt through the agent's thinking process.
171
+
172
+ Args:
173
+ prompt: The initial prompt from the user
174
+ final_response_structure: Pydantic model defining the expected final response format
175
+ ask_questions: Whether to ask clarifying questions (default: True)
176
+
177
+ Returns:
178
+ AgentResponse containing either questions to ask or the final response
179
+ """
180
+ logger.debug(f"Starting agent process with prompt: {prompt[:100]}...")
181
+
182
+ # Step 1: Generate questions (if ask_questions is True)
183
+ if ask_questions:
184
+ questions_prompt = f"""
185
+ Understand this prompt and what the user wants to achieve by it:
186
+ ==========
187
+ {prompt}
188
+ ==========
189
+
190
+ Think about useful steps and which information are required for it. First ask for required information and details to improve that process, when that is useful for the given case. When it's not useful, return an empty list of questions.
191
+ Use available tools to gather information or perform actions that would improve your response.
192
+ Analyze the prompt carefully and determine if additional information would significantly improve the quality of the response. Only ask questions that are truly necessary and would materially impact the outcome.
193
+ """
194
+
195
+ messages = [{"role": "user", "content": questions_prompt}]
196
+
197
+ try:
198
+ response = self._call_ai_parse(messages, QuestionList)
199
+ question_list = self._extract_parsed_content(response, QuestionList)
200
+
201
+ logger.debug(f"Generated {len(question_list.questions)} questions")
202
+
203
+ # If we have questions, return them to the user
204
+ if question_list.questions:
205
+ agent_response = AgentResponse(questions=question_list.questions)
206
+ # Store the session for continuation
207
+ self._agent_sessions[agent_response.agent_id] = {
208
+ "original_prompt": prompt,
209
+ "final_response_structure": final_response_structure,
210
+ "questions": question_list.questions,
211
+ "step": "waiting_for_answers"
212
+ }
213
+ return agent_response
214
+
215
+ except Exception as e:
216
+ logger.warning(f"Failed to generate questions: {e}. Proceeding without questions.")
217
+
218
+ # Step 2 & 3: No questions or ask_questions=False - proceed directly
219
+ return self._process_with_answers(prompt, final_response_structure, [])
220
+
221
+ def answer_to_agent(self, agent_id: str, answers: AnswerList) -> AgentResponse:
222
+ """
223
+ Continue an agent session by providing answers to questions.
224
+
225
+ Args:
226
+ agent_id: The agent session identifier
227
+ answers: List of answers to the questions
228
+
229
+ Returns:
230
+ AgentResponse with the final result
231
+
232
+ Raises:
233
+ ValueError: If the agent session is not found or in wrong state
234
+ """
235
+ if agent_id not in self._agent_sessions:
236
+ raise ValueError(f"Agent session {agent_id} not found")
237
+
238
+ session = self._agent_sessions[agent_id]
239
+ if session["step"] != "waiting_for_answers":
240
+ raise ValueError(f"Agent session {agent_id} is not waiting for answers")
241
+
242
+ # Convert answers to a more usable format
243
+ answer_dict = {answer.key: answer.answer for answer in answers.answers}
244
+
245
+ # Process with the provided answers
246
+ result = self._process_with_answers(
247
+ session["original_prompt"],
248
+ session["final_response_structure"],
249
+ answer_dict
250
+ )
251
+
252
+ # Clean up the session
253
+ del self._agent_sessions[agent_id]
254
+
255
+ return result
256
+
257
+ def _process_with_answers(
258
+ self,
259
+ prompt: str,
260
+ final_response_structure: Type[BaseModel],
261
+ answers: Union[list, dict[str, str]]
262
+ ) -> AgentResponse:
263
+ """
264
+ Process the prompt with answers through the thinking pipeline.
265
+
266
+ Args:
267
+ prompt: The original prompt
268
+ final_response_structure: Expected final response structure
269
+ answers: Answers to questions (empty if no questions were asked)
270
+
271
+ Returns:
272
+ AgentResponse with the final result
273
+ """
274
+ # Step 3: Process the prompt with thinking
275
+ result = self._think_and_process(prompt, answers)
276
+
277
+ # Step 4: Quality check and iteration
278
+ final_result = self._quality_check_and_iterate(prompt, result, answers)
279
+
280
+ # Step 5: Generate final answer in requested format
281
+ final_response = self._generate_final_response(prompt, final_result, final_response_structure)
282
+
283
+ return AgentResponse(final_response=final_response)
284
+
285
+ def _think_and_process(self, prompt: str, answers: Union[list, dict[str, str]]) -> str:
286
+ """
287
+ Process the prompt with thinking.
288
+
289
+ Args:
290
+ prompt: The original prompt
291
+ answers: Answers to questions
292
+
293
+ Returns:
294
+ The AI's result
295
+ """
296
+ # Format answers for the prompt
297
+ answers_text = ""
298
+ if isinstance(answers, dict) and answers:
299
+ answers_text = "\n\nAdditional information provided:\n"
300
+ for key, answer in answers.items():
301
+ answers_text += f"- {key}: {answer}\n"
302
+ elif isinstance(answers, list) and answers:
303
+ answers_text = f"\n\nAdditional information: {', '.join(answers)}\n"
304
+
305
+ thinking_prompt = f"""
306
+ Think about this prompt, the goal and the steps required to fulfill it:
307
+ ==========
308
+ {prompt}
309
+ ==========
310
+ {answers_text}
311
+
312
+ Consider the prompt carefully, analyze what the user wants to achieve, and think through the best approach to provide a comprehensive and helpful response. Use any available tools to gather information or perform actions that would improve your response.
313
+
314
+ Provide your best result for the given prompt.
315
+ """
316
+
317
+ messages = [{"role": "user", "content": thinking_prompt}]
318
+
319
+ try:
320
+ response = self._call_ai_parse(messages, Result)
321
+ result_obj = self._extract_parsed_content(response, Result)
322
+ return result_obj.result
323
+ except Exception as e:
324
+ logger.error(f"Error in thinking process: {e}")
325
+ raise RuntimeError(f"Failed to process prompt with AI client: {e}") from e
326
+
327
+ def _quality_check_and_iterate(self, prompt: str, result: str, answers: Union[list, dict[str, str]]) -> str:
328
+ """
329
+ Check the quality of the result and iterate if needed.
330
+
331
+ Args:
332
+ prompt: The original prompt
333
+ result: The current result
334
+ answers: The answers provided
335
+
336
+ Returns:
337
+ The final improved result
338
+ """
339
+ current_result = result
340
+
341
+ for iteration in range(self._max_iterations):
342
+ quality_prompt = f"""
343
+ Given this original prompt:
344
+ ==========
345
+ {prompt}
346
+ ==========
347
+
348
+ And this result:
349
+ ==========
350
+ {current_result}
351
+ ==========
352
+
353
+ Is this result good and comprehensive, or does it need to be improved? Consider if the response fully addresses the prompt, provides sufficient detail, and would be helpful to the user.
354
+
355
+ Evaluate the quality and provide feedback if improvements are needed.
356
+ """
357
+
358
+ messages = [{"role": "user", "content": quality_prompt}]
359
+
360
+ try:
361
+ response = self._call_ai_parse(messages, QualityCheck)
362
+ quality_check = self._extract_parsed_content(response, QualityCheck)
363
+
364
+ if quality_check.is_good:
365
+ logger.debug(f"Quality check passed on iteration {iteration}")
366
+ break
367
+
368
+ logger.debug(f"Quality check failed on iteration {iteration}: {quality_check.feedback}")
369
+
370
+ # Improve the result
371
+ improvement_prompt = f"""
372
+ The original prompt was:
373
+ ==========
374
+ {prompt}
375
+ ==========
376
+
377
+ The current result is:
378
+ ==========
379
+ {current_result}
380
+ ==========
381
+
382
+ Feedback for improvement:
383
+ ==========
384
+ {quality_check.feedback}
385
+ ==========
386
+
387
+ Please provide an improved version that addresses the feedback while maintaining the strengths of the current result.
388
+ """
389
+
390
+ messages = [{"role": "user", "content": improvement_prompt}]
391
+ response = self._call_ai_parse(messages, Result)
392
+ result_obj = self._extract_parsed_content(response, Result)
393
+ current_result = result_obj.result
394
+
395
+ except Exception as e:
396
+ logger.warning(f"Error in quality check iteration {iteration}: {e}")
397
+ break
398
+
399
+ return current_result
400
+
401
+ def _generate_final_response(self, prompt: str, result: str, final_response_structure: Type[BaseModel]) -> BaseModel:
402
+ """
403
+ Generate the final response in the requested format.
404
+
405
+ Args:
406
+ prompt: The original prompt
407
+ result: The processed result
408
+ final_response_structure: The expected response structure
409
+
410
+ Returns:
411
+ The final response in the requested format
412
+ """
413
+ final_prompt = f"""
414
+ Given this original prompt:
415
+ ==========
416
+ {prompt}
417
+ ==========
418
+
419
+ And this processed result:
420
+ ==========
421
+ {result}
422
+ ==========
423
+
424
+ Generate the final answer in the exact format requested. Make sure the response is well-structured and addresses all aspects of the original prompt.
425
+ """
426
+
427
+ messages = [{"role": "user", "content": final_prompt}]
428
+
429
+ try:
430
+ response = self._call_ai_parse(messages, final_response_structure)
431
+ return self._extract_parsed_content(response, final_response_structure)
432
+ except Exception as e:
433
+ logger.error(f"Error generating final response: {e}")
434
+ # Fallback - try to create a basic response
435
+ try:
436
+ # If the structure has a 'result' field, use that
437
+ if hasattr(final_response_structure, 'model_fields') and 'result' in final_response_structure.model_fields:
438
+ return final_response_structure(result=result)
439
+ else:
440
+ # Try to create with the first field
441
+ fields = final_response_structure.model_fields
442
+ if fields:
443
+ first_field = next(iter(fields.keys()))
444
+ return final_response_structure(**{first_field: result})
445
+ else:
446
+ return final_response_structure()
447
+ except Exception as fallback_error:
448
+ logger.error(f"Fallback response creation failed: {fallback_error}")
449
+ # Last resort - return the structure with default values
450
+ return final_response_structure()
mbxai/agent/models.py ADDED
@@ -0,0 +1,56 @@
1
+ """
2
+ Pydantic models for the agent client.
3
+ """
4
+
5
+ from typing import Any, Optional
6
+ from pydantic import BaseModel, Field
7
+ import uuid
8
+
9
+
10
+ class Question(BaseModel):
11
+ """A question for the user to provide more information."""
12
+ question: str = Field(description="The question to ask the user")
13
+ key: str = Field(description="A unique key to identify this question")
14
+ required: bool = Field(default=True, description="Whether this question is required")
15
+
16
+
17
+ class Result(BaseModel):
18
+ """A simple result wrapper containing just text."""
19
+ result: str = Field(description="The result text from the AI")
20
+
21
+
22
+ class AgentResponse(BaseModel):
23
+ """Response from the agent that can contain questions or a final result."""
24
+ agent_id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for this agent session")
25
+ questions: list[Question] = Field(default_factory=list, description="List of questions for the user")
26
+ final_response: Optional[Any] = Field(default=None, description="The final response if processing is complete")
27
+
28
+ def has_questions(self) -> bool:
29
+ """Check if this response has questions that need to be answered."""
30
+ return len(self.questions) > 0
31
+
32
+ def is_complete(self) -> bool:
33
+ """Check if this response contains a final result."""
34
+ return self.final_response is not None
35
+
36
+
37
+ class QuestionList(BaseModel):
38
+ """A list of questions to ask the user."""
39
+ questions: list[Question] = Field(description="List of questions to ask the user")
40
+
41
+
42
+ class Answer(BaseModel):
43
+ """An answer to a question."""
44
+ key: str = Field(description="The key of the question being answered")
45
+ answer: str = Field(description="The answer to the question")
46
+
47
+
48
+ class AnswerList(BaseModel):
49
+ """A list of answers from the user."""
50
+ answers: list[Answer] = Field(description="List of answers to questions")
51
+
52
+
53
+ class QualityCheck(BaseModel):
54
+ """Result of quality checking the AI response."""
55
+ is_good: bool = Field(description="Whether the result is good enough")
56
+ feedback: str = Field(description="Feedback on what could be improved if not good")
@@ -0,0 +1,152 @@
1
+ """
2
+ Example usage of the AgentClient.
3
+ """
4
+
5
+ import os
6
+ from pydantic import BaseModel, Field
7
+ from mbxai import AgentClient, OpenRouterClient, AnswerList, Answer
8
+
9
+
10
+ class BookRecommendation(BaseModel):
11
+ """A book recommendation response."""
12
+ title: str = Field(description="The title of the recommended book")
13
+ author: str = Field(description="The author of the book")
14
+ genre: str = Field(description="The genre of the book")
15
+ reason: str = Field(description="Why this book is recommended")
16
+ summary: str = Field(description="A brief summary of the book")
17
+
18
+
19
+ class TravelPlan(BaseModel):
20
+ """A travel plan response."""
21
+ destination: str = Field(description="The travel destination")
22
+ duration: str = Field(description="Duration of the trip")
23
+ activities: list[str] = Field(description="List of recommended activities")
24
+ budget_estimate: str = Field(description="Estimated budget for the trip")
25
+ best_time_to_visit: str = Field(description="Best time of year to visit")
26
+
27
+
28
+ def example_with_questions():
29
+ """Example that demonstrates the agent asking questions."""
30
+ # Initialize the clients
31
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
32
+ # Create agent with custom max_iterations (default is 2)
33
+ agent = AgentClient(openrouter_client, max_iterations=3)
34
+
35
+ # Example 1: Book recommendation with questions
36
+ prompt = "I want a book recommendation"
37
+ response = agent.agent(prompt, BookRecommendation, ask_questions=True)
38
+
39
+ if response.has_questions():
40
+ print("The agent has questions for you:")
41
+ for question in response.questions:
42
+ print(f"- {question.question}")
43
+
44
+ # Simulate answering the questions
45
+ answers = AnswerList(answers=[
46
+ Answer(key="genre_preference", answer="I love science fiction and fantasy"),
47
+ Answer(key="reading_level", answer="I prefer complex, adult novels"),
48
+ Answer(key="recent_books", answer="I recently read and loved Dune and The Name of the Wind")
49
+ ])
50
+
51
+ # Continue the agent process with answers
52
+ final_response = agent.answer_to_agent(response.agent_id, answers)
53
+
54
+ if final_response.is_complete():
55
+ book_rec = final_response.final_response
56
+ print(f"\nRecommended Book: {book_rec.title} by {book_rec.author}")
57
+ print(f"Genre: {book_rec.genre}")
58
+ print(f"Reason: {book_rec.reason}")
59
+ print(f"Summary: {book_rec.summary}")
60
+ else:
61
+ # No questions, direct response
62
+ book_rec = response.final_response
63
+ print(f"Recommended Book: {book_rec.title} by {book_rec.author}")
64
+
65
+
66
+ def example_without_questions():
67
+ """Example that demonstrates the agent without asking questions."""
68
+ # Initialize the clients
69
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
70
+ # Create agent with no quality iterations for faster responses
71
+ agent = AgentClient(openrouter_client, max_iterations=0)
72
+
73
+ # Example 2: Travel plan without questions
74
+ prompt = "Plan a 5-day trip to Japan for someone interested in culture and food"
75
+ response = agent.agent(prompt, TravelPlan, ask_questions=False)
76
+
77
+ if response.is_complete():
78
+ travel_plan = response.final_response
79
+ print(f"\nTravel Plan for {travel_plan.destination}")
80
+ print(f"Duration: {travel_plan.duration}")
81
+ print(f"Best time to visit: {travel_plan.best_time_to_visit}")
82
+ print(f"Budget estimate: {travel_plan.budget_estimate}")
83
+ print("Recommended activities:")
84
+ for activity in travel_plan.activities:
85
+ print(f"- {activity}")
86
+
87
+
88
+ def example_with_tool_client():
89
+ """Example using AgentClient with ToolClient."""
90
+ from mbxai import ToolClient
91
+
92
+ # Initialize the clients
93
+ openrouter_client = OpenRouterClient(token=os.getenv("OPENROUTER_API_KEY", "your-token-here"))
94
+ tool_client = ToolClient(openrouter_client)
95
+
96
+ # Create agent with tool client
97
+ agent = AgentClient(tool_client)
98
+
99
+ # Register a simple tool via the agent (proxy method)
100
+ def get_weather(location: str) -> str:
101
+ """Get weather information for a location."""
102
+ # This is a mock implementation
103
+ return f"The weather in {location} is sunny and 72°F"
104
+
105
+ # Use the agent's register_tool proxy method (schema auto-generated!)
106
+ agent.register_tool(
107
+ name="get_weather",
108
+ description="Get current weather for a location",
109
+ function=get_weather
110
+ # No schema needed - automatically generated from function signature!
111
+ )
112
+
113
+ class WeatherResponse(BaseModel):
114
+ location: str = Field(description="The location")
115
+ weather: str = Field(description="Weather description")
116
+ recommendations: list[str] = Field(description="Recommendations based on weather")
117
+
118
+ prompt = "What's the weather like in San Francisco and what should I wear?"
119
+ response = agent.agent(prompt, WeatherResponse, ask_questions=False)
120
+
121
+ if response.is_complete():
122
+ weather_info = response.final_response
123
+ print(f"\nWeather in {weather_info.location}: {weather_info.weather}")
124
+ print("Recommendations:")
125
+ for rec in weather_info.recommendations:
126
+ print(f"- {rec}")
127
+
128
+
129
+ if __name__ == "__main__":
130
+ print("=== Agent Client Examples ===\n")
131
+
132
+ print("1. Example with questions:")
133
+ try:
134
+ example_with_questions()
135
+ except Exception as e:
136
+ print(f"Error: {e}")
137
+
138
+ print("\n" + "="*50 + "\n")
139
+
140
+ print("2. Example without questions:")
141
+ try:
142
+ example_without_questions()
143
+ except Exception as e:
144
+ print(f"Error: {e}")
145
+
146
+ print("\n" + "="*50 + "\n")
147
+
148
+ print("3. Example with ToolClient:")
149
+ try:
150
+ example_with_tool_client()
151
+ except Exception as e:
152
+ print(f"Error: {e}")