hanzo-mcp 0.1.25__py3-none-any.whl → 0.1.30__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo-mcp might be problematic. Click here for more details.
- hanzo_mcp/__init__.py +2 -2
- hanzo_mcp/cli.py +80 -9
- hanzo_mcp/server.py +41 -10
- hanzo_mcp/tools/__init__.py +51 -32
- hanzo_mcp/tools/agent/__init__.py +59 -0
- hanzo_mcp/tools/agent/agent_tool.py +474 -0
- hanzo_mcp/tools/agent/prompt.py +137 -0
- hanzo_mcp/tools/agent/tool_adapter.py +75 -0
- hanzo_mcp/tools/common/__init__.py +17 -0
- hanzo_mcp/tools/common/base.py +216 -0
- hanzo_mcp/tools/common/context.py +7 -3
- hanzo_mcp/tools/common/permissions.py +63 -119
- hanzo_mcp/tools/common/session.py +91 -0
- hanzo_mcp/tools/common/thinking_tool.py +123 -0
- hanzo_mcp/tools/filesystem/__init__.py +85 -5
- hanzo_mcp/tools/filesystem/base.py +113 -0
- hanzo_mcp/tools/filesystem/content_replace.py +287 -0
- hanzo_mcp/tools/filesystem/directory_tree.py +286 -0
- hanzo_mcp/tools/filesystem/edit_file.py +287 -0
- hanzo_mcp/tools/filesystem/get_file_info.py +170 -0
- hanzo_mcp/tools/filesystem/read_files.py +198 -0
- hanzo_mcp/tools/filesystem/search_content.py +275 -0
- hanzo_mcp/tools/filesystem/write_file.py +162 -0
- hanzo_mcp/tools/jupyter/__init__.py +67 -4
- hanzo_mcp/tools/jupyter/base.py +284 -0
- hanzo_mcp/tools/jupyter/edit_notebook.py +295 -0
- hanzo_mcp/tools/jupyter/notebook_operations.py +72 -112
- hanzo_mcp/tools/jupyter/read_notebook.py +165 -0
- hanzo_mcp/tools/project/__init__.py +64 -1
- hanzo_mcp/tools/project/analysis.py +9 -6
- hanzo_mcp/tools/project/base.py +66 -0
- hanzo_mcp/tools/project/project_analyze.py +173 -0
- hanzo_mcp/tools/shell/__init__.py +58 -1
- hanzo_mcp/tools/shell/base.py +148 -0
- hanzo_mcp/tools/shell/command_executor.py +203 -322
- hanzo_mcp/tools/shell/run_command.py +204 -0
- hanzo_mcp/tools/shell/run_script.py +215 -0
- hanzo_mcp/tools/shell/script_tool.py +244 -0
- {hanzo_mcp-0.1.25.dist-info → hanzo_mcp-0.1.30.dist-info}/METADATA +72 -77
- hanzo_mcp-0.1.30.dist-info/RECORD +45 -0
- {hanzo_mcp-0.1.25.dist-info → hanzo_mcp-0.1.30.dist-info}/licenses/LICENSE +2 -2
- hanzo_mcp/tools/common/thinking.py +0 -65
- hanzo_mcp/tools/filesystem/file_operations.py +0 -1050
- hanzo_mcp-0.1.25.dist-info/RECORD +0 -24
- hanzo_mcp-0.1.25.dist-info/zip-safe +0 -1
- {hanzo_mcp-0.1.25.dist-info → hanzo_mcp-0.1.30.dist-info}/WHEEL +0 -0
- {hanzo_mcp-0.1.25.dist-info → hanzo_mcp-0.1.30.dist-info}/entry_points.txt +0 -0
- {hanzo_mcp-0.1.25.dist-info → hanzo_mcp-0.1.30.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,474 @@
|
|
|
1
|
+
"""Agent tool implementation for Hanzo MCP.
|
|
2
|
+
|
|
3
|
+
This module implements the AgentTool that allows Claude to delegate tasks to sub-agents,
|
|
4
|
+
enabling concurrent execution of multiple operations and specialized processing.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import time
|
|
9
|
+
from collections.abc import Iterable
|
|
10
|
+
from typing import Any, final, override
|
|
11
|
+
|
|
12
|
+
import litellm
|
|
13
|
+
from mcp.server.fastmcp import Context as MCPContext
|
|
14
|
+
from mcp.server.fastmcp import FastMCP
|
|
15
|
+
from openai.types.chat import ChatCompletionMessageParam, ChatCompletionToolParam
|
|
16
|
+
|
|
17
|
+
from hanzo_mcp.tools.agent.prompt import (
|
|
18
|
+
get_allowed_agent_tools,
|
|
19
|
+
get_default_model,
|
|
20
|
+
get_model_parameters,
|
|
21
|
+
get_system_prompt,
|
|
22
|
+
)
|
|
23
|
+
from hanzo_mcp.tools.agent.tool_adapter import (
|
|
24
|
+
convert_tools_to_openai_functions,
|
|
25
|
+
)
|
|
26
|
+
from hanzo_mcp.tools.common.base import BaseTool
|
|
27
|
+
from hanzo_mcp.tools.common.context import DocumentContext, ToolContext, create_tool_context
|
|
28
|
+
from hanzo_mcp.tools.common.permissions import PermissionManager
|
|
29
|
+
from hanzo_mcp.tools.filesystem import get_read_only_filesystem_tools
|
|
30
|
+
from hanzo_mcp.tools.jupyter import get_read_only_jupyter_tools
|
|
31
|
+
from hanzo_mcp.tools.project import get_project_tools
|
|
32
|
+
from hanzo_mcp.tools.shell.command_executor import CommandExecutor
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@final
|
|
36
|
+
class AgentTool(BaseTool):
|
|
37
|
+
"""Tool for delegating tasks to sub-agents.
|
|
38
|
+
|
|
39
|
+
The AgentTool allows Claude to create and manage sub-agents for performing
|
|
40
|
+
specialized tasks concurrently, such as code search, analysis, and more.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
@property
|
|
44
|
+
@override
|
|
45
|
+
def name(self) -> str:
|
|
46
|
+
"""Get the tool name.
|
|
47
|
+
|
|
48
|
+
Returns:
|
|
49
|
+
Tool name
|
|
50
|
+
"""
|
|
51
|
+
return "dispatch_agent"
|
|
52
|
+
|
|
53
|
+
@property
|
|
54
|
+
@override
|
|
55
|
+
def description(self) -> str:
|
|
56
|
+
"""Get the tool description.
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Tool description
|
|
60
|
+
"""
|
|
61
|
+
return """Launch one or more agents that can perform tasks using read-only tools.
|
|
62
|
+
|
|
63
|
+
This tool creates agents for delegation of tasks such as multi-step searches, complex analyses,
|
|
64
|
+
or other operations that benefit from focused processing. Multiple agents can work concurrently
|
|
65
|
+
on independent tasks, improving performance for complex operations.
|
|
66
|
+
|
|
67
|
+
Each agent works with its own context and provides a response containing the results of its work.
|
|
68
|
+
Results from all agents are combined in the final response.
|
|
69
|
+
|
|
70
|
+
Args:
|
|
71
|
+
prompts: A list of task descriptions, where each item launches an independent agent.
|
|
72
|
+
|
|
73
|
+
Returns:
|
|
74
|
+
Combined results from all agent executions
|
|
75
|
+
"""
|
|
76
|
+
|
|
77
|
+
@property
|
|
78
|
+
@override
|
|
79
|
+
def parameters(self) -> dict[str, Any]:
|
|
80
|
+
"""Get the parameter specifications for the tool.
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
Parameter specifications
|
|
84
|
+
"""
|
|
85
|
+
return {
|
|
86
|
+
"properties": {
|
|
87
|
+
"prompts": {
|
|
88
|
+
"anyOf": [
|
|
89
|
+
{
|
|
90
|
+
"type": "array",
|
|
91
|
+
"items": {
|
|
92
|
+
"type": "string"
|
|
93
|
+
},
|
|
94
|
+
"description": "List of tasks for agents to perform concurrently"
|
|
95
|
+
},
|
|
96
|
+
{
|
|
97
|
+
"type": "string",
|
|
98
|
+
"description": "Single task for the agent to perform"
|
|
99
|
+
}
|
|
100
|
+
],
|
|
101
|
+
"description": "Task(s) for agent(s) to perform"
|
|
102
|
+
}
|
|
103
|
+
},
|
|
104
|
+
"required": ["prompts"],
|
|
105
|
+
"type": "object",
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
@override
|
|
110
|
+
def required(self) -> list[str]:
|
|
111
|
+
"""Get the list of required parameter names.
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
List of required parameter names
|
|
115
|
+
"""
|
|
116
|
+
return ["prompts"]
|
|
117
|
+
|
|
118
|
+
def __init__(
|
|
119
|
+
self, document_context: DocumentContext, permission_manager: PermissionManager, command_executor: CommandExecutor,
|
|
120
|
+
model: str | None = None, api_key: str | None = None, max_tokens: int | None = None,
|
|
121
|
+
max_iterations: int = 10, max_tool_uses: int = 30
|
|
122
|
+
) -> None:
|
|
123
|
+
"""Initialize the agent tool.
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
document_context: Document context for tracking file contents
|
|
127
|
+
permission_manager: Permission manager for access control
|
|
128
|
+
command_executor: Command executor for running shell commands
|
|
129
|
+
model: Optional model name override in LiteLLM format (e.g., "openai/gpt-4o")
|
|
130
|
+
api_key: Optional API key for the model provider
|
|
131
|
+
max_tokens: Optional maximum tokens for model responses
|
|
132
|
+
max_iterations: Maximum number of iterations for agent (default: 10)
|
|
133
|
+
max_tool_uses: Maximum number of total tool uses for agent (default: 30)
|
|
134
|
+
"""
|
|
135
|
+
self.document_context = document_context
|
|
136
|
+
self.permission_manager = permission_manager
|
|
137
|
+
self.command_executor = command_executor
|
|
138
|
+
self.model_override = model
|
|
139
|
+
self.api_key_override = api_key
|
|
140
|
+
self.max_tokens_override = max_tokens
|
|
141
|
+
self.max_iterations = max_iterations
|
|
142
|
+
self.max_tool_uses = max_tool_uses
|
|
143
|
+
self.available_tools :list[BaseTool] = []
|
|
144
|
+
self.available_tools.extend(get_read_only_filesystem_tools(self.document_context, self.permission_manager))
|
|
145
|
+
self.available_tools.extend(get_read_only_jupyter_tools(self.document_context, self.permission_manager))
|
|
146
|
+
self.available_tools.extend(get_project_tools(self.document_context, self.permission_manager,self.command_executor))
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
@override
|
|
150
|
+
async def call(self, ctx: MCPContext, **params: Any) -> str:
|
|
151
|
+
"""Execute the tool with the given parameters.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
ctx: MCP context
|
|
155
|
+
**params: Tool parameters
|
|
156
|
+
|
|
157
|
+
Returns:
|
|
158
|
+
Tool execution result
|
|
159
|
+
"""
|
|
160
|
+
start_time = time.time()
|
|
161
|
+
|
|
162
|
+
# Create tool context
|
|
163
|
+
tool_ctx = create_tool_context(ctx)
|
|
164
|
+
tool_ctx.set_tool_info(self.name)
|
|
165
|
+
|
|
166
|
+
# Extract parameters
|
|
167
|
+
prompts = params.get("prompts")
|
|
168
|
+
if prompts is None:
|
|
169
|
+
await tool_ctx.error("Parameter 'prompts' is required but was not provided")
|
|
170
|
+
return "Error: Parameter 'prompts' is required but was not provided"
|
|
171
|
+
|
|
172
|
+
if not isinstance(prompts, list) and not isinstance(prompts, str):
|
|
173
|
+
await tool_ctx.error("Parameter 'prompts' must be a string or an array of strings")
|
|
174
|
+
return "Error: Parameter 'prompts' must be a string or an array of strings"
|
|
175
|
+
|
|
176
|
+
if isinstance(prompts, str):
|
|
177
|
+
prompts = [prompts]
|
|
178
|
+
|
|
179
|
+
if not prompts: # Empty list
|
|
180
|
+
await tool_ctx.error("At least one prompt must be provided in the array")
|
|
181
|
+
return "Error: At least one prompt must be provided in the array"
|
|
182
|
+
|
|
183
|
+
# Check for empty strings in the list
|
|
184
|
+
if any(not isinstance(p, str) or not p.strip() for p in prompts):
|
|
185
|
+
await tool_ctx.error("All prompts must be non-empty strings")
|
|
186
|
+
return "Error: All prompts must be non-empty strings"
|
|
187
|
+
|
|
188
|
+
# Always use _execute_multiple_agents, treating single agent as a special case
|
|
189
|
+
await tool_ctx.info(f"Launching {len(prompts)} agent{'s' if len(prompts) > 1 else ''}")
|
|
190
|
+
result = await self._execute_multiple_agents(prompts, tool_ctx)
|
|
191
|
+
|
|
192
|
+
# Calculate execution time
|
|
193
|
+
execution_time = time.time() - start_time
|
|
194
|
+
|
|
195
|
+
# Format the result
|
|
196
|
+
formatted_result = self._format_result(result, execution_time, len(prompts))
|
|
197
|
+
|
|
198
|
+
# Log completion
|
|
199
|
+
await tool_ctx.info(f"Agent execution completed in {execution_time:.2f}s")
|
|
200
|
+
|
|
201
|
+
return formatted_result
|
|
202
|
+
|
|
203
|
+
async def _execute_multiple_agents(self, prompts: list[str], tool_ctx: ToolContext) -> str:
|
|
204
|
+
"""Execute multiple agents concurrently with the given prompts.
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
prompts: List of prompts for the agents
|
|
208
|
+
tool_ctx: Tool context for logging
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Combined agent execution results
|
|
212
|
+
"""
|
|
213
|
+
# Get available tools for the agents (do this once to avoid redundant work)
|
|
214
|
+
agent_tools = get_allowed_agent_tools(
|
|
215
|
+
self.available_tools,
|
|
216
|
+
self.permission_manager,
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# Convert tools to OpenAI format (do this once to avoid redundant work)
|
|
220
|
+
openai_tools = convert_tools_to_openai_functions(agent_tools)
|
|
221
|
+
|
|
222
|
+
# Log execution start
|
|
223
|
+
await tool_ctx.info(f"Starting execution of {len(prompts)} agent{'s' if len(prompts) > 1 else ''}")
|
|
224
|
+
|
|
225
|
+
# Create a list to store the tasks
|
|
226
|
+
tasks = []
|
|
227
|
+
results = []
|
|
228
|
+
|
|
229
|
+
# Handle exceptions for individual agent executions
|
|
230
|
+
for i, prompt in enumerate(prompts):
|
|
231
|
+
try:
|
|
232
|
+
# Create system prompt for this agent
|
|
233
|
+
system_prompt = get_system_prompt(
|
|
234
|
+
agent_tools,
|
|
235
|
+
self.permission_manager,
|
|
236
|
+
)
|
|
237
|
+
|
|
238
|
+
# Execute agent and collect the task
|
|
239
|
+
await tool_ctx.info(f"Launching agent {i+1}/{len(prompts)}: {prompt[:50]}...")
|
|
240
|
+
task = self._execute_agent_with_tools(
|
|
241
|
+
system_prompt,
|
|
242
|
+
prompt,
|
|
243
|
+
agent_tools,
|
|
244
|
+
openai_tools,
|
|
245
|
+
tool_ctx
|
|
246
|
+
)
|
|
247
|
+
tasks.append(task)
|
|
248
|
+
except Exception as e:
|
|
249
|
+
# Log and add error result
|
|
250
|
+
error_message = f"Error preparing agent {i+1}: {str(e)}"
|
|
251
|
+
await tool_ctx.error(error_message)
|
|
252
|
+
results.append(f"Agent {i+1} Error: {error_message}")
|
|
253
|
+
|
|
254
|
+
# Execute all pending tasks concurrently
|
|
255
|
+
if tasks:
|
|
256
|
+
import asyncio
|
|
257
|
+
try:
|
|
258
|
+
# Wait for all tasks to complete
|
|
259
|
+
completed_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
260
|
+
|
|
261
|
+
# Process results, handling any exceptions
|
|
262
|
+
for i, result in enumerate(completed_results):
|
|
263
|
+
if isinstance(result, Exception):
|
|
264
|
+
results.append(f"Agent {i+1} Error: {str(result)}")
|
|
265
|
+
else:
|
|
266
|
+
# For multi-agent case, add agent number prefix
|
|
267
|
+
if len(prompts) > 1:
|
|
268
|
+
results.append(f"Agent {i+1} Result:\n{result}")
|
|
269
|
+
else:
|
|
270
|
+
# For single agent case, just add the result
|
|
271
|
+
results.append(result)
|
|
272
|
+
except Exception as e:
|
|
273
|
+
# Handle any unexpected exceptions during gathering
|
|
274
|
+
error_message = f"Error executing agents concurrently: {str(e)}"
|
|
275
|
+
await tool_ctx.error(error_message)
|
|
276
|
+
results.append(f"Error: {error_message}")
|
|
277
|
+
|
|
278
|
+
# Combine results - different handling for single vs multi-agent
|
|
279
|
+
if len(prompts) > 1:
|
|
280
|
+
# Multi-agent: add separator between results
|
|
281
|
+
combined_result = "\n\n" + "\n\n---\n\n".join(results)
|
|
282
|
+
else:
|
|
283
|
+
# Single agent: just return the result
|
|
284
|
+
combined_result = results[0] if results else "No results returned from agent"
|
|
285
|
+
|
|
286
|
+
return combined_result
|
|
287
|
+
|
|
288
|
+
async def _execute_agent_with_tools(
|
|
289
|
+
self,
|
|
290
|
+
system_prompt: str,
|
|
291
|
+
user_prompt: str,
|
|
292
|
+
available_tools: list[BaseTool],
|
|
293
|
+
openai_tools: list[ChatCompletionToolParam],
|
|
294
|
+
tool_ctx: ToolContext,
|
|
295
|
+
) -> str:
|
|
296
|
+
"""Execute agent with tool handling.
|
|
297
|
+
|
|
298
|
+
Args:
|
|
299
|
+
system_prompt: System prompt for the agent
|
|
300
|
+
user_prompt: User prompt for the agent
|
|
301
|
+
available_tools: List of available tools
|
|
302
|
+
openai_tools: List of tools in OpenAI format
|
|
303
|
+
tool_ctx: Tool context for logging
|
|
304
|
+
|
|
305
|
+
Returns:
|
|
306
|
+
Agent execution result
|
|
307
|
+
"""
|
|
308
|
+
# Get model parameters and name
|
|
309
|
+
model = get_default_model(self.model_override)
|
|
310
|
+
params = get_model_parameters(max_tokens=self.max_tokens_override)
|
|
311
|
+
|
|
312
|
+
# Initialize messages
|
|
313
|
+
messages:Iterable[ChatCompletionMessageParam] = []
|
|
314
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
315
|
+
messages.append({"role": "user", "content": user_prompt})
|
|
316
|
+
|
|
317
|
+
# Track tool usage for metrics
|
|
318
|
+
tool_usage = {}
|
|
319
|
+
total_tool_use_count = 0
|
|
320
|
+
iteration_count = 0
|
|
321
|
+
max_tool_uses = self.max_tool_uses # Safety limit to prevent infinite loops
|
|
322
|
+
max_iterations = self.max_iterations # Add a maximum number of iterations for safety
|
|
323
|
+
|
|
324
|
+
# Execute until the agent completes or reaches the limit
|
|
325
|
+
while total_tool_use_count < max_tool_uses and iteration_count < max_iterations:
|
|
326
|
+
iteration_count += 1
|
|
327
|
+
await tool_ctx.info(f"Calling model (iteration {iteration_count})...")
|
|
328
|
+
|
|
329
|
+
try:
|
|
330
|
+
# Configure model parameters based on capabilities
|
|
331
|
+
completion_params = {
|
|
332
|
+
"model": model,
|
|
333
|
+
"messages": messages,
|
|
334
|
+
"tools": openai_tools,
|
|
335
|
+
"tool_choice": "auto",
|
|
336
|
+
"temperature": params["temperature"],
|
|
337
|
+
"timeout": params["timeout"],
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
if self.api_key_override:
|
|
341
|
+
completion_params["api_key"] = self.api_key_override
|
|
342
|
+
|
|
343
|
+
# Add max_tokens if provided
|
|
344
|
+
if params.get("max_tokens"):
|
|
345
|
+
completion_params["max_tokens"] = params.get("max_tokens")
|
|
346
|
+
|
|
347
|
+
# Make the model call
|
|
348
|
+
response = litellm.completion(
|
|
349
|
+
**completion_params #pyright: ignore
|
|
350
|
+
)
|
|
351
|
+
|
|
352
|
+
if len(response.choices) == 0: #pyright: ignore
|
|
353
|
+
raise ValueError("No response choices returned")
|
|
354
|
+
|
|
355
|
+
message = response.choices[0].message #pyright: ignore
|
|
356
|
+
|
|
357
|
+
# Add message to conversation history
|
|
358
|
+
messages.append(message) #pyright: ignore
|
|
359
|
+
|
|
360
|
+
# If no tool calls, we're done
|
|
361
|
+
if not message.tool_calls:
|
|
362
|
+
return message.content or "Agent completed with no response."
|
|
363
|
+
|
|
364
|
+
# Process tool calls
|
|
365
|
+
tool_call_count = len(message.tool_calls)
|
|
366
|
+
await tool_ctx.info(f"Processing {tool_call_count} tool calls")
|
|
367
|
+
|
|
368
|
+
for tool_call in message.tool_calls:
|
|
369
|
+
total_tool_use_count += 1
|
|
370
|
+
function_name = tool_call.function.name
|
|
371
|
+
|
|
372
|
+
# Track usage
|
|
373
|
+
tool_usage[function_name] = tool_usage.get(function_name, 0) + 1
|
|
374
|
+
|
|
375
|
+
# Log tool usage
|
|
376
|
+
await tool_ctx.info(f"Agent using tool: {function_name}")
|
|
377
|
+
|
|
378
|
+
# Parse the arguments
|
|
379
|
+
try:
|
|
380
|
+
function_args = json.loads(tool_call.function.arguments)
|
|
381
|
+
except json.JSONDecodeError:
|
|
382
|
+
function_args = {}
|
|
383
|
+
|
|
384
|
+
# Find the matching tool
|
|
385
|
+
tool = next((t for t in available_tools if t.name == function_name), None)
|
|
386
|
+
if not tool:
|
|
387
|
+
tool_result = f"Error: Tool '{function_name}' not found"
|
|
388
|
+
else:
|
|
389
|
+
try:
|
|
390
|
+
tool_result = await tool.call(ctx=tool_ctx.mcp_context, **function_args)
|
|
391
|
+
except Exception as e:
|
|
392
|
+
tool_result = f"Error executing {function_name}: {str(e)}"
|
|
393
|
+
|
|
394
|
+
# Add the tool result to messages
|
|
395
|
+
messages.append(
|
|
396
|
+
{
|
|
397
|
+
"role": "tool",
|
|
398
|
+
"tool_call_id": tool_call.id,
|
|
399
|
+
"content": tool_result,
|
|
400
|
+
}
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
# Log progress
|
|
404
|
+
await tool_ctx.info(f"Processed {len(message.tool_calls)} tool calls. Total: {total_tool_use_count}")
|
|
405
|
+
|
|
406
|
+
except Exception as e:
|
|
407
|
+
await tool_ctx.error(f"Error in model call: {str(e)}")
|
|
408
|
+
# Avoid trying to JSON serialize message objects
|
|
409
|
+
await tool_ctx.error(f"Message count: {len(messages)}")
|
|
410
|
+
return f"Error in agent execution: {str(e)}"
|
|
411
|
+
|
|
412
|
+
# If we've reached the limit, add a warning and get final response
|
|
413
|
+
if total_tool_use_count >= max_tool_uses or iteration_count >= max_iterations:
|
|
414
|
+
limit_type = "tool usage" if total_tool_use_count >= max_tool_uses else "iterations"
|
|
415
|
+
await tool_ctx.info(f"Reached maximum {limit_type} limit. Getting final response.")
|
|
416
|
+
|
|
417
|
+
messages.append(
|
|
418
|
+
{
|
|
419
|
+
"role": "system",
|
|
420
|
+
"content": f"You have reached the maximum number of {limit_type}. Please provide your final response.",
|
|
421
|
+
}
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
try:
|
|
425
|
+
# Make a final call to get the result
|
|
426
|
+
final_response = litellm.completion(
|
|
427
|
+
model=model,
|
|
428
|
+
messages=messages,
|
|
429
|
+
temperature=params["temperature"],
|
|
430
|
+
timeout=params["timeout"],
|
|
431
|
+
max_tokens=params.get("max_tokens"),
|
|
432
|
+
)
|
|
433
|
+
|
|
434
|
+
return final_response.choices[0].message.content or f"Agent reached {limit_type} limit without a response." #pyright: ignore
|
|
435
|
+
except Exception as e:
|
|
436
|
+
await tool_ctx.error(f"Error in final model call: {str(e)}")
|
|
437
|
+
return f"Error in final response: {str(e)}"
|
|
438
|
+
|
|
439
|
+
# Should not reach here but just in case
|
|
440
|
+
return "Agent execution completed after maximum iterations."
|
|
441
|
+
|
|
442
|
+
def _format_result(self, result: str, execution_time: float, agent_count: int) -> str:
|
|
443
|
+
"""Format agent result with metrics.
|
|
444
|
+
|
|
445
|
+
Args:
|
|
446
|
+
result: Raw result from agent(s)
|
|
447
|
+
execution_time: Execution time in seconds
|
|
448
|
+
agent_count: Number of agents used
|
|
449
|
+
|
|
450
|
+
Returns:
|
|
451
|
+
Formatted result with metrics
|
|
452
|
+
"""
|
|
453
|
+
# Different format based on agent count
|
|
454
|
+
if agent_count > 1:
|
|
455
|
+
# Multi-agent response
|
|
456
|
+
return f"""Multi-agent execution completed in {execution_time:.2f} seconds ({agent_count} agents).
|
|
457
|
+
|
|
458
|
+
{result}
|
|
459
|
+
"""
|
|
460
|
+
else:
|
|
461
|
+
# Single agent response
|
|
462
|
+
return f"""Agent execution completed in {execution_time:.2f} seconds.
|
|
463
|
+
|
|
464
|
+
AGENT RESPONSE:
|
|
465
|
+
{result}
|
|
466
|
+
"""
|
|
467
|
+
|
|
468
|
+
@override
|
|
469
|
+
def register(self, mcp_server: FastMCP) -> None:
|
|
470
|
+
tool_self = self # Create a reference to self for use in the closure
|
|
471
|
+
|
|
472
|
+
@mcp_server.tool(name=self.name, description=self.mcp_description)
|
|
473
|
+
async def dispatch_agent(ctx: MCPContext, prompts: list[str] | str) -> str:
|
|
474
|
+
return await tool_self.call(ctx, prompts=prompts)
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
"""Prompt generation utilities for agent tool.
|
|
2
|
+
|
|
3
|
+
This module provides functions for generating effective prompts for sub-agents,
|
|
4
|
+
including filtering tools based on permissions and formatting system instructions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import os
|
|
8
|
+
from typing import Any
|
|
9
|
+
|
|
10
|
+
from hanzo_mcp.tools.common.base import BaseTool
|
|
11
|
+
from hanzo_mcp.tools.common.permissions import PermissionManager
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def get_allowed_agent_tools(
|
|
15
|
+
tools: list[BaseTool],
|
|
16
|
+
permission_manager: PermissionManager,
|
|
17
|
+
) -> list[BaseTool]:
|
|
18
|
+
"""Filter tools available to the agent based on permissions.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
tools: List of available tools
|
|
22
|
+
permission_manager: Permission manager for checking tool access
|
|
23
|
+
|
|
24
|
+
Returns:
|
|
25
|
+
Filtered list of tools available to the agent
|
|
26
|
+
"""
|
|
27
|
+
# Get all tools except for the agent tool itself (avoid recursion)
|
|
28
|
+
filtered_tools = [tool for tool in tools if tool.name != "agent"]
|
|
29
|
+
|
|
30
|
+
return filtered_tools
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def get_system_prompt(
|
|
34
|
+
tools: list[BaseTool],
|
|
35
|
+
permission_manager: PermissionManager,
|
|
36
|
+
) -> str:
|
|
37
|
+
"""Generate system prompt for the sub-agent.
|
|
38
|
+
|
|
39
|
+
Args:
|
|
40
|
+
tools: List of available tools
|
|
41
|
+
permission_manager: Permission manager for checking tool access
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
System prompt for the sub-agent
|
|
45
|
+
"""
|
|
46
|
+
# Get filtered tools
|
|
47
|
+
filtered_tools = get_allowed_agent_tools(
|
|
48
|
+
tools, permission_manager
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Extract tool names for display
|
|
52
|
+
tool_names = ", ".join(f"`{tool.name}`" for tool in filtered_tools)
|
|
53
|
+
|
|
54
|
+
# Base system prompt
|
|
55
|
+
system_prompt = f"""You are a sub-agent assistant with access to these tools: {tool_names}.
|
|
56
|
+
|
|
57
|
+
GUIDELINES:
|
|
58
|
+
1. You work autonomously - you cannot ask follow-up questions
|
|
59
|
+
2. You have access to read-only tools - you cannot modify files or execute commands
|
|
60
|
+
3. Your response is returned directly to the main assistant, not the user
|
|
61
|
+
4. Be concise and focus on the specific task assigned
|
|
62
|
+
5. When relevant, share file names and code snippets relevant to the query
|
|
63
|
+
6. Any file paths you return in your final response MUST be absolute. DO NOT use relative paths.
|
|
64
|
+
|
|
65
|
+
RESPONSE FORMAT:
|
|
66
|
+
- Begin with a summary of findings
|
|
67
|
+
- Include relevant details and context
|
|
68
|
+
- Organize information logically
|
|
69
|
+
- End with clear conclusions
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
return system_prompt
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def get_default_model(model_override: str | None = None) -> str:
|
|
76
|
+
"""Get the default model for agent execution.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
model_override: Optional model override string in LiteLLM format (e.g., "openai/gpt-4o")
|
|
80
|
+
|
|
81
|
+
Returns:
|
|
82
|
+
Model identifier string with provider prefix
|
|
83
|
+
"""
|
|
84
|
+
# Use model override if provided
|
|
85
|
+
if model_override:
|
|
86
|
+
# If in testing mode and using a test model, return as-is
|
|
87
|
+
if model_override.startswith("test-model") or "TEST_MODE" in os.environ:
|
|
88
|
+
return model_override
|
|
89
|
+
|
|
90
|
+
# If the model already has a provider prefix, return as-is
|
|
91
|
+
if "/" in model_override:
|
|
92
|
+
return model_override
|
|
93
|
+
|
|
94
|
+
# Otherwise, add the default provider prefix
|
|
95
|
+
provider = os.environ.get("AGENT_PROVIDER", "openai")
|
|
96
|
+
return f"{provider}/{model_override}"
|
|
97
|
+
|
|
98
|
+
# Fall back to environment variables
|
|
99
|
+
model = os.environ.get("AGENT_MODEL", "gpt-4o")
|
|
100
|
+
|
|
101
|
+
# Special cases for tests
|
|
102
|
+
if model.startswith("test-model") or model == "gpt-4o" and "TEST_MODE" in os.environ:
|
|
103
|
+
return model
|
|
104
|
+
|
|
105
|
+
provider = os.environ.get("AGENT_PROVIDER", "openai")
|
|
106
|
+
|
|
107
|
+
# Only add provider prefix if it's not already in the model name
|
|
108
|
+
if "/" not in model and provider != "openai":
|
|
109
|
+
return f"{provider}/{model}"
|
|
110
|
+
elif "/" not in model:
|
|
111
|
+
return f"openai/{model}"
|
|
112
|
+
else:
|
|
113
|
+
# Model already has a provider prefix
|
|
114
|
+
return model
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
def get_model_parameters(max_tokens: int | None = None) -> dict[str, Any]:
|
|
118
|
+
"""Get model parameters from environment variables.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
max_tokens: Optional maximum tokens parameter override
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
Dictionary of model parameters
|
|
125
|
+
"""
|
|
126
|
+
params = {
|
|
127
|
+
"temperature": float(os.environ.get("AGENT_TEMPERATURE", "0.7")),
|
|
128
|
+
"timeout": int(os.environ.get("AGENT_API_TIMEOUT", "60")),
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
# Add max_tokens if provided or if set in environment variable
|
|
132
|
+
if max_tokens is not None:
|
|
133
|
+
params["max_tokens"] = max_tokens
|
|
134
|
+
elif os.environ.get("AGENT_MAX_TOKENS"):
|
|
135
|
+
params["max_tokens"] = int(os.environ.get("AGENT_MAX_TOKENS", "1000"))
|
|
136
|
+
|
|
137
|
+
return params
|
|
@@ -0,0 +1,75 @@
|
|
|
1
|
+
"""Tool adapters for converting between MCP tools and OpenAI tools.
|
|
2
|
+
|
|
3
|
+
This module handles conversion between MCP tool formats and OpenAI function
|
|
4
|
+
formats, making MCP tools available to the OpenAI API, and processing tool inputs
|
|
5
|
+
and outputs for agent execution.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
from openai.types import FunctionParameters
|
|
10
|
+
from openai.types.chat import ChatCompletionToolParam
|
|
11
|
+
import litellm
|
|
12
|
+
|
|
13
|
+
from hanzo_mcp.tools.common.base import BaseTool
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def convert_tools_to_openai_functions(tools: list[BaseTool]) -> list[ChatCompletionToolParam]:
|
|
17
|
+
"""Convert MCP tools to OpenAI function format.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
tools: List of MCP tools
|
|
21
|
+
|
|
22
|
+
Returns:
|
|
23
|
+
List of tools formatted for OpenAI API
|
|
24
|
+
"""
|
|
25
|
+
openai_tools:list[ChatCompletionToolParam] = []
|
|
26
|
+
for tool in tools:
|
|
27
|
+
openai_tool:ChatCompletionToolParam = {
|
|
28
|
+
"type": "function",
|
|
29
|
+
"function": {
|
|
30
|
+
"name": tool.name,
|
|
31
|
+
"description": tool.description,
|
|
32
|
+
"parameters": convert_tool_parameters(tool),
|
|
33
|
+
},
|
|
34
|
+
}
|
|
35
|
+
openai_tools.append(openai_tool)
|
|
36
|
+
return openai_tools
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def convert_tool_parameters(tool: BaseTool) -> FunctionParameters:
|
|
40
|
+
"""Convert tool parameters to OpenAI format.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
tool: MCP tool
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
Parameter schema in OpenAI format
|
|
47
|
+
"""
|
|
48
|
+
# Start with a copy of the parameters
|
|
49
|
+
params = tool.parameters.copy()
|
|
50
|
+
|
|
51
|
+
# Ensure the schema has the right format for OpenAI
|
|
52
|
+
if "properties" not in params:
|
|
53
|
+
params["properties"] = {}
|
|
54
|
+
|
|
55
|
+
if "type" not in params:
|
|
56
|
+
params["type"] = "object"
|
|
57
|
+
|
|
58
|
+
if "required" not in params:
|
|
59
|
+
params["required"] = tool.required
|
|
60
|
+
|
|
61
|
+
return params
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def supports_parallel_function_calling(model: str) -> bool:
|
|
66
|
+
"""Check if a model supports parallel function calling.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
model: Model identifier in LiteLLM format (e.g., "openai/gpt-4-turbo-preview")
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
True if the model supports parallel function calling, False otherwise
|
|
73
|
+
"""
|
|
74
|
+
# Use litellm's built-in parallel function calling support check
|
|
75
|
+
return litellm.supports_parallel_function_calling(model=model)
|