hanzo-mcp 0.7.2__py3-none-any.whl → 0.7.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (35) hide show
  1. hanzo_mcp/cli.py +10 -0
  2. hanzo_mcp/prompts/__init__.py +43 -0
  3. hanzo_mcp/prompts/example_custom_prompt.py +40 -0
  4. hanzo_mcp/prompts/tool_explorer.py +603 -0
  5. hanzo_mcp/tools/__init__.py +52 -51
  6. hanzo_mcp/tools/agent/__init__.py +3 -16
  7. hanzo_mcp/tools/agent/agent_tool.py +365 -525
  8. hanzo_mcp/tools/agent/agent_tool_v1_deprecated.py +641 -0
  9. hanzo_mcp/tools/agent/network_tool.py +3 -5
  10. hanzo_mcp/tools/agent/swarm_tool.py +447 -349
  11. hanzo_mcp/tools/agent/swarm_tool_v1_deprecated.py +535 -0
  12. hanzo_mcp/tools/agent/tool_adapter.py +21 -2
  13. hanzo_mcp/tools/common/forgiving_edit.py +24 -14
  14. hanzo_mcp/tools/common/permissions.py +8 -0
  15. hanzo_mcp/tools/filesystem/__init__.py +5 -5
  16. hanzo_mcp/tools/filesystem/{symbols.py → ast_tool.py} +8 -8
  17. hanzo_mcp/tools/filesystem/batch_search.py +2 -2
  18. hanzo_mcp/tools/filesystem/directory_tree.py +8 -1
  19. hanzo_mcp/tools/filesystem/find.py +1 -0
  20. hanzo_mcp/tools/filesystem/grep.py +11 -2
  21. hanzo_mcp/tools/filesystem/read.py +8 -1
  22. hanzo_mcp/tools/filesystem/search_tool.py +1 -1
  23. hanzo_mcp/tools/jupyter/__init__.py +5 -1
  24. hanzo_mcp/tools/jupyter/base.py +2 -2
  25. hanzo_mcp/tools/jupyter/jupyter.py +89 -18
  26. hanzo_mcp/tools/search/find_tool.py +49 -8
  27. hanzo_mcp/tools/shell/base_process.py +7 -1
  28. hanzo_mcp/tools/shell/streaming_command.py +34 -1
  29. {hanzo_mcp-0.7.2.dist-info → hanzo_mcp-0.7.6.dist-info}/METADATA +8 -1
  30. {hanzo_mcp-0.7.2.dist-info → hanzo_mcp-0.7.6.dist-info}/RECORD +33 -31
  31. hanzo_mcp/tools/agent/agent_tool_v2.py +0 -492
  32. hanzo_mcp/tools/agent/swarm_tool_v2.py +0 -654
  33. {hanzo_mcp-0.7.2.dist-info → hanzo_mcp-0.7.6.dist-info}/WHEEL +0 -0
  34. {hanzo_mcp-0.7.2.dist-info → hanzo_mcp-0.7.6.dist-info}/entry_points.txt +0 -0
  35. {hanzo_mcp-0.7.2.dist-info → hanzo_mcp-0.7.6.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,641 @@
1
+ """Agent tool implementation for Hanzo AI.
2
+
3
+ This module implements the AgentTool that allows Claude to delegate tasks to sub-agents,
4
+ enabling concurrent execution of multiple operations and specialized processing.
5
+ """
6
+
7
+ import asyncio
8
+ import json
9
+ import re
10
+ import time
11
+ from collections.abc import Iterable
12
+ from typing import Annotated, TypedDict, Unpack, final, override
13
+
14
+ # Import litellm with warnings suppressed
15
+ import warnings
16
+ with warnings.catch_warnings():
17
+ warnings.simplefilter("ignore", DeprecationWarning)
18
+ import litellm
19
+ from mcp.server.fastmcp import Context as MCPContext
20
+ from mcp.server import FastMCP
21
+ from openai.types.chat import ChatCompletionMessageParam, ChatCompletionToolParam
22
+ from pydantic import Field
23
+
24
+ from hanzo_mcp.tools.agent.prompt import (
25
+ get_allowed_agent_tools,
26
+ get_default_model,
27
+ get_model_parameters,
28
+ get_system_prompt,
29
+ )
30
+ from hanzo_mcp.tools.agent.tool_adapter import (
31
+ convert_tools_to_openai_functions,
32
+ )
33
+ from hanzo_mcp.tools.agent.clarification_protocol import (
34
+ AgentClarificationMixin,
35
+ ClarificationType,
36
+ )
37
+ from hanzo_mcp.tools.agent.clarification_tool import ClarificationTool
38
+ from hanzo_mcp.tools.agent.critic_tool import CriticTool, CriticProtocol
39
+ from hanzo_mcp.tools.agent.review_tool import ReviewTool, ReviewProtocol
40
+ from hanzo_mcp.tools.agent.iching_tool import IChingTool
41
+ from hanzo_mcp.tools.common.base import BaseTool
42
+ from hanzo_mcp.tools.common.batch_tool import BatchTool
43
+ from hanzo_mcp.tools.common.context import (
44
+ ToolContext,
45
+ create_tool_context,
46
+ )
47
+ from hanzo_mcp.tools.common.permissions import PermissionManager
48
+ from hanzo_mcp.tools.filesystem import get_read_only_filesystem_tools, Edit, MultiEdit
49
+ from hanzo_mcp.tools.jupyter import get_read_only_jupyter_tools
50
+
51
+ Prompt = Annotated[
52
+ str,
53
+ Field(
54
+ description="Task for the agent to perform (must include absolute paths starting with /)",
55
+ min_length=1,
56
+ ),
57
+ ]
58
+
59
+
60
+ class AgentToolParams(TypedDict, total=False):
61
+ """Parameters for the AgentTool.
62
+
63
+ Attributes:
64
+ prompts: Task(s) for the agent to perform (must include absolute paths starting with /)
65
+ """
66
+
67
+ prompts: str | list[str]
68
+
69
+
70
+ @final
71
+ class AgentTool(AgentClarificationMixin, BaseTool):
72
+ """Tool for delegating tasks to sub-agents.
73
+
74
+ The AgentTool allows Claude to create and manage sub-agents for performing
75
+ specialized tasks concurrently, such as code search, analysis, and more.
76
+
77
+ Agents can request clarification from the main loop up to once per task.
78
+ """
79
+
80
+ @property
81
+ @override
82
+ def name(self) -> str:
83
+ """Get the tool name.
84
+
85
+ Returns:
86
+ Tool name
87
+ """
88
+ return "agent"
89
+
90
+ @property
91
+ @override
92
+ def description(self) -> str:
93
+ """Get the tool description.
94
+
95
+ Returns:
96
+ Tool description
97
+ """
98
+ # TODO: Add glob when it is implemented
99
+ at = [t.name for t in self.available_tools]
100
+
101
+ return f"""Launch a new agent that has access to the following tools: {at}. When you are searching for a keyword or file and are not confident that you will find the right match in the first few tries, use the Agent tool to perform the search for you.
102
+
103
+ When to use the Agent tool:
104
+ - If you are searching for a keyword like \"config\" or \"logger\", or for questions like \"which file does X?\", the Agent tool is strongly recommended
105
+ - When you need to perform edits across multiple files based on search results
106
+ - When you need to delegate complex file modification tasks
107
+
108
+ When NOT to use the Agent tool:
109
+ - If you want to read a specific file path, use the read or glob tool instead of the Agent tool, to find the match more quickly
110
+ - If you are searching for a specific class definition like \"class Foo\", use the glob tool instead, to find the match more quickly
111
+ - If you are searching for code within a specific file or set of 2-3 files, use the read tool instead of the Agent tool, to find the match more quickly
112
+ - Writing code and running bash commands (use other tools for that)
113
+ - Other tasks that are not related to searching for a keyword or file
114
+
115
+ Usage notes:
116
+ 1. Launch multiple agents concurrently whenever possible, to maximize performance; to do that, use a single message with multiple tool uses
117
+ 2. When the agent is done, it will return a single message back to you. The result returned by the agent is not visible to the user. To show the user the result, you should send a text message back to the user with a concise summary of the result.
118
+ 3. Each agent invocation is stateless. You will not be able to send additional messages to the agent, nor will the agent be able to communicate with you outside of its final report. Therefore, your prompt should contain a highly detailed task description for the agent to perform autonomously and you should specify exactly what information the agent should return back to you in its final and only message to you.
119
+ 4. The agent's outputs should generally be trusted
120
+ 5. Clearly tell the agent whether you expect it to write code or just to do research (search, file reads, web fetches, etc.), since it is not aware of the user's intent"""
121
+
122
+ def __init__(
123
+ self,
124
+ permission_manager: PermissionManager,
125
+ model: str | None = None,
126
+ api_key: str | None = None,
127
+ base_url: str | None = None,
128
+ max_tokens: int | None = None,
129
+ max_iterations: int = 10,
130
+ max_tool_uses: int = 30,
131
+ ) -> None:
132
+ """Initialize the agent tool.
133
+
134
+ Args:
135
+
136
+ permission_manager: Permission manager for access control
137
+ model: Optional model name override in LiteLLM format (e.g., "openai/gpt-4o")
138
+ api_key: Optional API key for the model provider
139
+ base_url: Optional base URL for the model provider API endpoint
140
+ max_tokens: Optional maximum tokens for model responses
141
+ max_iterations: Maximum number of iterations for agent (default: 10)
142
+ max_tool_uses: Maximum number of total tool uses for agent (default: 30)
143
+ """
144
+
145
+ self.permission_manager = permission_manager
146
+ self.model_override = model
147
+ self.api_key_override = api_key
148
+ self.base_url_override = base_url
149
+ self.max_tokens_override = max_tokens
150
+ self.max_iterations = max_iterations
151
+ self.max_tool_uses = max_tool_uses
152
+ self.available_tools: list[BaseTool] = []
153
+ self.available_tools.extend(
154
+ get_read_only_filesystem_tools(self.permission_manager)
155
+ )
156
+ self.available_tools.extend(
157
+ get_read_only_jupyter_tools(self.permission_manager)
158
+ )
159
+
160
+ # Always add edit tools - agents should have edit access
161
+ self.available_tools.append(Edit(self.permission_manager))
162
+ self.available_tools.append(MultiEdit(self.permission_manager))
163
+
164
+ # Add clarification tool for agents
165
+ self.available_tools.append(ClarificationTool())
166
+
167
+ # Add critic tool for agents (devil's advocate)
168
+ self.available_tools.append(CriticTool())
169
+
170
+ # Add review tool for agents (balanced review)
171
+ self.available_tools.append(ReviewTool())
172
+
173
+ # Add I Ching tool for creative guidance
174
+ self.available_tools.append(IChingTool())
175
+
176
+ self.available_tools.append(
177
+ BatchTool({t.name: t for t in self.available_tools})
178
+ )
179
+
180
+ # Initialize protocols
181
+ self.critic_protocol = CriticProtocol()
182
+ self.review_protocol = ReviewProtocol()
183
+
184
+ @override
185
+ async def call(
186
+ self,
187
+ ctx: MCPContext,
188
+ **params: Unpack[AgentToolParams],
189
+ ) -> str:
190
+ """Execute the tool with the given parameters.
191
+
192
+ Args:
193
+ ctx: MCP context
194
+ **params: Tool parameters
195
+
196
+ Returns:
197
+ Tool execution result
198
+ """
199
+ start_time = time.time()
200
+
201
+ # Create tool context
202
+ tool_ctx = create_tool_context(ctx)
203
+ await tool_ctx.set_tool_info(self.name)
204
+
205
+ # Extract and validate parameters
206
+ prompts = params.get("prompts")
207
+
208
+ if prompts is None:
209
+ await tool_ctx.error("No prompts provided")
210
+ return """Error: At least one prompt must be provided.
211
+
212
+ IMPORTANT REMINDER FOR CLAUDE:
213
+ The dispatch_agent tool requires prompts parameter. Please provide either:
214
+ - A single prompt as a string
215
+ - Multiple prompts as an array of strings
216
+
217
+ Each prompt must contain absolute paths starting with /.
218
+ Example of correct usage:
219
+ - prompts: "Search for all instances of the 'config' variable in /Users/bytedance/project/hanzo-mcp"
220
+ - prompts: ["Find files in /path/to/project", "Search code in /path/to/src"]"""
221
+
222
+ # Handle both string and list inputs
223
+ if isinstance(prompts, str):
224
+ prompt_list = [prompts]
225
+ elif isinstance(prompts, list):
226
+ if not prompts:
227
+ await tool_ctx.error("Empty prompts list provided")
228
+ return "Error: At least one prompt must be provided when using a list."
229
+ if not all(isinstance(p, str) for p in prompts):
230
+ await tool_ctx.error("All prompts must be strings")
231
+ return "Error: All prompts in the list must be strings."
232
+ prompt_list = prompts
233
+ else:
234
+ await tool_ctx.error("Invalid prompts parameter type")
235
+ return "Error: Parameter 'prompts' must be a string or an array of strings."
236
+
237
+ # Validate absolute paths in all prompts
238
+ absolute_path_pattern = r"/(?:[^/\s]+/)*[^/\s]+"
239
+ for prompt in prompt_list:
240
+ if not re.search(absolute_path_pattern, prompt):
241
+ await tool_ctx.error(f"Prompt does not contain absolute path: {prompt[:50]}...")
242
+ return """Error: All prompts must contain at least one absolute path.
243
+
244
+ IMPORTANT REMINDER FOR CLAUDE:
245
+ When using the dispatch_agent tool, always include absolute paths in your prompts.
246
+ Example of correct usage:
247
+ - "Search for all instances of the 'config' variable in /Users/bytedance/project/hanzo-mcp"
248
+ - "Find files that import the database module in /Users/bytedance/project/hanzo-mcp/src"
249
+
250
+ The agent cannot access files without knowing their absolute locations."""
251
+
252
+ # Execute agent(s) - always use _execute_multiple_agents for list inputs
253
+ if len(prompt_list) == 1:
254
+ await tool_ctx.info("Launching agent")
255
+ result = await self._execute_multiple_agents(prompt_list, tool_ctx)
256
+ execution_time = time.time() - start_time
257
+ formatted_result = f"""Agent execution completed in {execution_time:.2f} seconds.
258
+
259
+ AGENT RESPONSE:
260
+ {result}"""
261
+ await tool_ctx.info(f"Agent execution completed in {execution_time:.2f}s")
262
+ return formatted_result
263
+ else:
264
+ await tool_ctx.info(f"Launching {len(prompt_list)} agents in parallel")
265
+ result = await self._execute_multiple_agents(prompt_list, tool_ctx)
266
+ execution_time = time.time() - start_time
267
+ formatted_result = f"""Multi-agent execution completed in {execution_time:.2f} seconds ({len(prompt_list)} agents).
268
+
269
+ AGENT RESPONSES:
270
+ {result}"""
271
+ await tool_ctx.info(f"Multi-agent execution completed in {execution_time:.2f}s")
272
+ return formatted_result
273
+
274
+ async def _execute_agent(self, prompt: str, tool_ctx: ToolContext) -> str:
275
+ """Execute a single agent with the given prompt.
276
+
277
+ Args:
278
+ prompt: The task prompt for the agent
279
+ tool_ctx: Tool context for logging
280
+
281
+ Returns:
282
+ Agent execution result
283
+ """
284
+ # Get available tools for the agent
285
+ agent_tools = get_allowed_agent_tools(
286
+ self.available_tools,
287
+ self.permission_manager,
288
+ )
289
+
290
+ # Convert tools to OpenAI format
291
+ openai_tools = convert_tools_to_openai_functions(agent_tools)
292
+
293
+ # Log execution start
294
+ await tool_ctx.info("Starting agent execution")
295
+
296
+ # Create a result container
297
+ result = ""
298
+
299
+ try:
300
+ # Create system prompt for this agent
301
+ system_prompt = get_system_prompt(
302
+ agent_tools,
303
+ self.permission_manager,
304
+ )
305
+
306
+ # Execute agent
307
+ await tool_ctx.info(f"Executing agent task: {prompt[:50]}...")
308
+ result = await self._execute_agent_with_tools(
309
+ system_prompt, prompt, agent_tools, openai_tools, tool_ctx
310
+ )
311
+ except Exception as e:
312
+ # Log and return error result
313
+ error_message = f"Error executing agent: {str(e)}"
314
+ await tool_ctx.error(error_message)
315
+ return f"Error: {error_message}"
316
+
317
+ return result if result else "No results returned from agent"
318
+
319
+ async def _execute_multiple_agents(self, prompts: list[str], tool_ctx: ToolContext) -> str:
320
+ """Execute multiple agents concurrently.
321
+
322
+ Args:
323
+ prompts: List of prompts for the agents
324
+ tool_ctx: Tool context for logging
325
+
326
+ Returns:
327
+ Combined results from all agents
328
+ """
329
+ # Get available tools for the agents
330
+ agent_tools = get_allowed_agent_tools(
331
+ self.available_tools,
332
+ self.permission_manager,
333
+ )
334
+
335
+ # Convert tools to OpenAI format
336
+ openai_tools = convert_tools_to_openai_functions(agent_tools)
337
+
338
+ # Create system prompt for the agents
339
+ system_prompt = get_system_prompt(
340
+ agent_tools,
341
+ self.permission_manager,
342
+ )
343
+
344
+ # Create tasks for parallel execution
345
+ tasks = []
346
+ for i, prompt in enumerate(prompts):
347
+ await tool_ctx.info(f"Creating agent task {i+1}: {prompt[:50]}...")
348
+ task = self._execute_agent_with_tools(
349
+ system_prompt, prompt, agent_tools, openai_tools, tool_ctx
350
+ )
351
+ tasks.append(task)
352
+
353
+ # Execute all agents concurrently
354
+ await tool_ctx.info(f"Executing {len(tasks)} agents in parallel")
355
+ results = await asyncio.gather(*tasks, return_exceptions=True)
356
+
357
+ # Handle single agent case
358
+ if len(results) == 1:
359
+ if isinstance(results[0], Exception):
360
+ await tool_ctx.error(f"Agent execution failed: {str(results[0])}")
361
+ return f"Error: {str(results[0])}"
362
+ return results[0]
363
+
364
+ # Format results for multiple agents
365
+ formatted_results = []
366
+ for i, result in enumerate(results):
367
+ if isinstance(result, Exception):
368
+ formatted_results.append(f"Agent {i+1} Error:\n{str(result)}")
369
+ await tool_ctx.error(f"Agent {i+1} failed: {str(result)}")
370
+ else:
371
+ formatted_results.append(f"Agent {i+1} Result:\n{result}")
372
+
373
+ return "\n\n---\n\n".join(formatted_results)
374
+
375
+ async def _execute_agent_with_tools(
376
+ self,
377
+ system_prompt: str,
378
+ user_prompt: str,
379
+ available_tools: list[BaseTool],
380
+ openai_tools: list[ChatCompletionToolParam],
381
+ tool_ctx: ToolContext,
382
+ ) -> str:
383
+ """Execute agent with tool handling.
384
+
385
+ Args:
386
+ system_prompt: System prompt for the agent
387
+ user_prompt: User prompt for the agent
388
+ available_tools: List of available tools
389
+ openai_tools: List of tools in OpenAI format
390
+ tool_ctx: Tool context for logging
391
+
392
+ Returns:
393
+ Agent execution result
394
+ """
395
+ # Get model parameters and name
396
+ model = get_default_model(self.model_override)
397
+ params = get_model_parameters(max_tokens=self.max_tokens_override)
398
+
399
+ # Initialize messages
400
+ messages: Iterable[ChatCompletionMessageParam] = []
401
+ messages.append({"role": "system", "content": system_prompt})
402
+ messages.append({"role": "user", "content": user_prompt})
403
+
404
+ # Track tool usage for metrics
405
+ tool_usage = {}
406
+ total_tool_use_count = 0
407
+ iteration_count = 0
408
+ max_tool_uses = self.max_tool_uses # Safety limit to prevent infinite loops
409
+ max_iterations = (
410
+ self.max_iterations
411
+ ) # Add a maximum number of iterations for safety
412
+
413
+ # Execute until the agent completes or reaches the limit
414
+ while total_tool_use_count < max_tool_uses and iteration_count < max_iterations:
415
+ iteration_count += 1
416
+ await tool_ctx.info(f"Calling model (iteration {iteration_count})...")
417
+
418
+ try:
419
+ # Configure model parameters based on capabilities
420
+ completion_params = {
421
+ "model": model,
422
+ "messages": messages,
423
+ "tools": openai_tools,
424
+ "tool_choice": "auto",
425
+ "temperature": params["temperature"],
426
+ "timeout": params["timeout"],
427
+ }
428
+
429
+ if self.api_key_override:
430
+ completion_params["api_key"] = self.api_key_override
431
+
432
+ # Add max_tokens if provided
433
+ if params.get("max_tokens"):
434
+ completion_params["max_tokens"] = params.get("max_tokens")
435
+
436
+ # Add base_url if provided
437
+ if self.base_url_override:
438
+ completion_params["base_url"] = self.base_url_override
439
+
440
+ # Make the model call
441
+ response = litellm.completion(
442
+ **completion_params # pyright: ignore
443
+ )
444
+
445
+ if len(response.choices) == 0: # pyright: ignore
446
+ raise ValueError("No response choices returned")
447
+
448
+ message = response.choices[0].message # pyright: ignore
449
+
450
+ # Add message to conversation history
451
+ messages.append(message) # pyright: ignore
452
+
453
+ # If no tool calls, we're done
454
+ if not message.tool_calls:
455
+ return message.content or "Agent completed with no response."
456
+
457
+ # Process tool calls
458
+ tool_call_count = len(message.tool_calls)
459
+ await tool_ctx.info(f"Processing {tool_call_count} tool calls")
460
+
461
+ for tool_call in message.tool_calls:
462
+ total_tool_use_count += 1
463
+ function_name = tool_call.function.name
464
+
465
+ # Track usage
466
+ tool_usage[function_name] = tool_usage.get(function_name, 0) + 1
467
+
468
+ # Log tool usage
469
+ await tool_ctx.info(f"Agent using tool: {function_name}")
470
+
471
+ # Parse the arguments
472
+ try:
473
+ function_args = json.loads(tool_call.function.arguments)
474
+ except json.JSONDecodeError:
475
+ function_args = {}
476
+
477
+ # Find the matching tool
478
+ tool = next(
479
+ (t for t in available_tools if t.name == function_name), None
480
+ )
481
+ if not tool:
482
+ tool_result = f"Error: Tool '{function_name}' not found"
483
+ # Special handling for clarification requests
484
+ elif function_name == "request_clarification":
485
+ try:
486
+ # Extract clarification parameters
487
+ request_type = function_args.get("type", "ADDITIONAL_INFO")
488
+ question = function_args.get("question", "")
489
+ context = function_args.get("context", {})
490
+ options = function_args.get("options", None)
491
+
492
+ # Convert string type to enum
493
+ clarification_type = ClarificationType[request_type]
494
+
495
+ # Request clarification
496
+ answer = await self.request_clarification(
497
+ request_type=clarification_type,
498
+ question=question,
499
+ context=context,
500
+ options=options
501
+ )
502
+
503
+ tool_result = self.format_clarification_in_output(question, answer)
504
+ except Exception as e:
505
+ tool_result = f"Error processing clarification: {str(e)}"
506
+ # Special handling for critic requests
507
+ elif function_name == "critic":
508
+ try:
509
+ # Extract critic parameters
510
+ review_type = function_args.get("review_type", "GENERAL")
511
+ work_description = function_args.get("work_description", "")
512
+ code_snippets = function_args.get("code_snippets", None)
513
+ file_paths = function_args.get("file_paths", None)
514
+ specific_concerns = function_args.get("specific_concerns", None)
515
+
516
+ # Request critical review
517
+ tool_result = self.critic_protocol.request_review(
518
+ review_type=review_type,
519
+ work_description=work_description,
520
+ code_snippets=code_snippets,
521
+ file_paths=file_paths,
522
+ specific_concerns=specific_concerns
523
+ )
524
+ except Exception as e:
525
+ tool_result = f"Error processing critic review: {str(e)}"
526
+ # Special handling for review requests
527
+ elif function_name == "review":
528
+ try:
529
+ # Extract review parameters
530
+ focus = function_args.get("focus", "GENERAL")
531
+ work_description = function_args.get("work_description", "")
532
+ code_snippets = function_args.get("code_snippets", None)
533
+ file_paths = function_args.get("file_paths", None)
534
+ context = function_args.get("context", None)
535
+
536
+ # Request balanced review
537
+ tool_result = self.review_protocol.request_review(
538
+ focus=focus,
539
+ work_description=work_description,
540
+ code_snippets=code_snippets,
541
+ file_paths=file_paths,
542
+ context=context
543
+ )
544
+ except Exception as e:
545
+ tool_result = f"Error processing review: {str(e)}"
546
+ else:
547
+ try:
548
+ tool_result = await tool.call(
549
+ ctx=tool_ctx.mcp_context, **function_args
550
+ )
551
+ except Exception as e:
552
+ tool_result = f"Error executing {function_name}: {str(e)}"
553
+
554
+ await tool_ctx.info(
555
+ f"tool {function_name} run with args {function_args} and return {tool_result[: min(100, len(tool_result))]}"
556
+ )
557
+ # Add the tool result to messages
558
+ messages.append(
559
+ {
560
+ "role": "tool",
561
+ "tool_call_id": tool_call.id,
562
+ "name": function_name,
563
+ "content": tool_result,
564
+ }
565
+ )
566
+
567
+ # Log progress
568
+ await tool_ctx.info(
569
+ f"Processed {len(message.tool_calls)} tool calls. Total: {total_tool_use_count}"
570
+ )
571
+
572
+ except Exception as e:
573
+ await tool_ctx.error(f"Error in model call: {str(e)}")
574
+ # Avoid trying to JSON serialize message objects
575
+ await tool_ctx.error(f"Message count: {len(messages)}")
576
+ return f"Error in agent execution: {str(e)}"
577
+
578
+ # If we've reached the limit, add a warning and get final response
579
+ if total_tool_use_count >= max_tool_uses or iteration_count >= max_iterations:
580
+ messages.append(
581
+ {
582
+ "role": "system",
583
+ "content": "You have reached the maximum iteration. Please provide your final response.",
584
+ }
585
+ )
586
+
587
+ try:
588
+ # Make a final call to get the result
589
+ final_response = litellm.completion(
590
+ model=model,
591
+ messages=messages,
592
+ temperature=params["temperature"],
593
+ timeout=params["timeout"],
594
+ max_tokens=params.get("max_tokens"),
595
+ )
596
+
597
+ return (
598
+ final_response.choices[0].message.content
599
+ or "Agent reached max iteration limit without a response."
600
+ ) # pyright: ignore
601
+ except Exception as e:
602
+ await tool_ctx.error(f"Error in final model call: {str(e)}")
603
+ return f"Error in final response: {str(e)}"
604
+
605
+ # Should not reach here but just in case
606
+ return "Agent execution completed after maximum iterations."
607
+
608
+ def _format_result(self, result: str, execution_time: float) -> str:
609
+ """Format agent result with metrics.
610
+
611
+ Args:
612
+ result: Raw result from agent
613
+ execution_time: Execution time in seconds
614
+
615
+ Returns:
616
+ Formatted result with metrics
617
+ """
618
+ return f"""Agent execution completed in {execution_time:.2f} seconds.
619
+
620
+ AGENT RESPONSE:
621
+ {result}
622
+ """
623
+
624
+ @override
625
+ def register(self, mcp_server: FastMCP) -> None:
626
+ """Register this agent tool with the MCP server.
627
+
628
+ Creates a wrapper function with explicitly defined parameters that match
629
+ the tool's parameter schema and registers it with the MCP server.
630
+
631
+ Args:
632
+ mcp_server: The FastMCP server instance
633
+ """
634
+ tool_self = self # Create a reference to self for use in the closure
635
+
636
+ @mcp_server.tool(name=self.name, description=self.description)
637
+ async def dispatch_agent(
638
+ prompts: str | list[str],
639
+ ctx: MCPContext
640
+ ) -> str:
641
+ return await tool_self.call(ctx, prompts=prompts)
@@ -68,7 +68,7 @@ class NetworkTool(BaseTool):
68
68
  default_mode: Default execution mode
69
69
  cluster_endpoint: Optional cluster endpoint
70
70
  """
71
- super().__init__(permission_manager)
71
+ self.permission_manager = permission_manager
72
72
  self.default_mode = default_mode
73
73
  self.cluster_endpoint = cluster_endpoint or os.environ.get(
74
74
  "HANZO_CLUSTER_ENDPOINT",
@@ -207,15 +207,13 @@ class NetworkTool(BaseTool):
207
207
 
208
208
  return json.dumps(results, indent=2)
209
209
 
210
- @classmethod
211
- def register(cls, server: FastMCP, permission_manager: PermissionManager):
210
+ def register(self, server: FastMCP):
212
211
  """Register the network tool with the server.
213
212
 
214
213
  Args:
215
214
  server: FastMCP server instance
216
- permission_manager: Permission manager
217
215
  """
218
- tool = cls(permission_manager=permission_manager)
216
+ tool = self
219
217
 
220
218
  @server.tool(
221
219
  name=tool.name,