hanzo-mcp 0.6.12__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo-mcp might be problematic. Click here for more details.
- hanzo_mcp/__init__.py +2 -2
- hanzo_mcp/analytics/__init__.py +5 -0
- hanzo_mcp/analytics/posthog_analytics.py +364 -0
- hanzo_mcp/cli.py +5 -5
- hanzo_mcp/cli_enhanced.py +7 -7
- hanzo_mcp/cli_plugin.py +91 -0
- hanzo_mcp/config/__init__.py +1 -1
- hanzo_mcp/config/settings.py +70 -7
- hanzo_mcp/config/tool_config.py +20 -6
- hanzo_mcp/dev_server.py +3 -3
- hanzo_mcp/prompts/project_system.py +1 -1
- hanzo_mcp/server.py +40 -3
- hanzo_mcp/server_enhanced.py +69 -0
- hanzo_mcp/tools/__init__.py +140 -31
- hanzo_mcp/tools/agent/__init__.py +85 -4
- hanzo_mcp/tools/agent/agent_tool.py +104 -6
- hanzo_mcp/tools/agent/agent_tool_v2.py +459 -0
- hanzo_mcp/tools/agent/clarification_protocol.py +220 -0
- hanzo_mcp/tools/agent/clarification_tool.py +68 -0
- hanzo_mcp/tools/agent/claude_cli_tool.py +125 -0
- hanzo_mcp/tools/agent/claude_desktop_auth.py +508 -0
- hanzo_mcp/tools/agent/cli_agent_base.py +191 -0
- hanzo_mcp/tools/agent/code_auth.py +436 -0
- hanzo_mcp/tools/agent/code_auth_tool.py +194 -0
- hanzo_mcp/tools/agent/codex_cli_tool.py +123 -0
- hanzo_mcp/tools/agent/critic_tool.py +376 -0
- hanzo_mcp/tools/agent/gemini_cli_tool.py +128 -0
- hanzo_mcp/tools/agent/grok_cli_tool.py +128 -0
- hanzo_mcp/tools/agent/iching_tool.py +380 -0
- hanzo_mcp/tools/agent/network_tool.py +273 -0
- hanzo_mcp/tools/agent/prompt.py +62 -20
- hanzo_mcp/tools/agent/review_tool.py +433 -0
- hanzo_mcp/tools/agent/swarm_tool.py +535 -0
- hanzo_mcp/tools/agent/swarm_tool_v2.py +594 -0
- hanzo_mcp/tools/common/__init__.py +15 -1
- hanzo_mcp/tools/common/base.py +5 -4
- hanzo_mcp/tools/common/batch_tool.py +103 -11
- hanzo_mcp/tools/common/config_tool.py +2 -2
- hanzo_mcp/tools/common/context.py +2 -2
- hanzo_mcp/tools/common/context_fix.py +26 -0
- hanzo_mcp/tools/common/critic_tool.py +196 -0
- hanzo_mcp/tools/common/decorators.py +208 -0
- hanzo_mcp/tools/common/enhanced_base.py +106 -0
- hanzo_mcp/tools/common/fastmcp_pagination.py +369 -0
- hanzo_mcp/tools/common/forgiving_edit.py +243 -0
- hanzo_mcp/tools/common/mode.py +116 -0
- hanzo_mcp/tools/common/mode_loader.py +105 -0
- hanzo_mcp/tools/common/paginated_base.py +230 -0
- hanzo_mcp/tools/common/paginated_response.py +307 -0
- hanzo_mcp/tools/common/pagination.py +226 -0
- hanzo_mcp/tools/common/permissions.py +1 -1
- hanzo_mcp/tools/common/personality.py +936 -0
- hanzo_mcp/tools/common/plugin_loader.py +287 -0
- hanzo_mcp/tools/common/stats.py +4 -4
- hanzo_mcp/tools/common/tool_list.py +4 -1
- hanzo_mcp/tools/common/truncate.py +101 -0
- hanzo_mcp/tools/common/validation.py +1 -1
- hanzo_mcp/tools/config/__init__.py +3 -1
- hanzo_mcp/tools/config/config_tool.py +1 -1
- hanzo_mcp/tools/config/mode_tool.py +209 -0
- hanzo_mcp/tools/database/__init__.py +1 -1
- hanzo_mcp/tools/editor/__init__.py +1 -1
- hanzo_mcp/tools/filesystem/__init__.py +48 -14
- hanzo_mcp/tools/filesystem/ast_multi_edit.py +562 -0
- hanzo_mcp/tools/filesystem/batch_search.py +3 -3
- hanzo_mcp/tools/filesystem/diff.py +2 -2
- hanzo_mcp/tools/filesystem/directory_tree_paginated.py +338 -0
- hanzo_mcp/tools/filesystem/rules_tool.py +235 -0
- hanzo_mcp/tools/filesystem/{unified_search.py → search_tool.py} +12 -12
- hanzo_mcp/tools/filesystem/{symbols_unified.py → symbols_tool.py} +104 -5
- hanzo_mcp/tools/filesystem/watch.py +3 -2
- hanzo_mcp/tools/jupyter/__init__.py +2 -2
- hanzo_mcp/tools/jupyter/jupyter.py +1 -1
- hanzo_mcp/tools/llm/__init__.py +3 -3
- hanzo_mcp/tools/llm/llm_tool.py +648 -143
- hanzo_mcp/tools/lsp/__init__.py +5 -0
- hanzo_mcp/tools/lsp/lsp_tool.py +512 -0
- hanzo_mcp/tools/mcp/__init__.py +2 -2
- hanzo_mcp/tools/mcp/{mcp_unified.py → mcp_tool.py} +3 -3
- hanzo_mcp/tools/memory/__init__.py +76 -0
- hanzo_mcp/tools/memory/knowledge_tools.py +518 -0
- hanzo_mcp/tools/memory/memory_tools.py +456 -0
- hanzo_mcp/tools/search/__init__.py +6 -0
- hanzo_mcp/tools/search/find_tool.py +581 -0
- hanzo_mcp/tools/search/unified_search.py +953 -0
- hanzo_mcp/tools/shell/__init__.py +11 -6
- hanzo_mcp/tools/shell/auto_background.py +203 -0
- hanzo_mcp/tools/shell/base_process.py +57 -29
- hanzo_mcp/tools/shell/bash_session_executor.py +1 -1
- hanzo_mcp/tools/shell/{bash_unified.py → bash_tool.py} +18 -34
- hanzo_mcp/tools/shell/command_executor.py +2 -2
- hanzo_mcp/tools/shell/{npx_unified.py → npx_tool.py} +16 -33
- hanzo_mcp/tools/shell/open.py +2 -2
- hanzo_mcp/tools/shell/{process_unified.py → process_tool.py} +1 -1
- hanzo_mcp/tools/shell/run_command_windows.py +1 -1
- hanzo_mcp/tools/shell/streaming_command.py +594 -0
- hanzo_mcp/tools/shell/uvx.py +47 -2
- hanzo_mcp/tools/shell/uvx_background.py +47 -2
- hanzo_mcp/tools/shell/{uvx_unified.py → uvx_tool.py} +16 -33
- hanzo_mcp/tools/todo/__init__.py +14 -19
- hanzo_mcp/tools/todo/todo.py +22 -1
- hanzo_mcp/tools/vector/__init__.py +1 -1
- hanzo_mcp/tools/vector/infinity_store.py +2 -2
- hanzo_mcp/tools/vector/project_manager.py +1 -1
- hanzo_mcp/types.py +23 -0
- hanzo_mcp-0.7.0.dist-info/METADATA +516 -0
- hanzo_mcp-0.7.0.dist-info/RECORD +180 -0
- {hanzo_mcp-0.6.12.dist-info → hanzo_mcp-0.7.0.dist-info}/entry_points.txt +1 -0
- hanzo_mcp/tools/common/palette.py +0 -344
- hanzo_mcp/tools/common/palette_loader.py +0 -108
- hanzo_mcp/tools/config/palette_tool.py +0 -179
- hanzo_mcp/tools/llm/llm_unified.py +0 -851
- hanzo_mcp-0.6.12.dist-info/METADATA +0 -339
- hanzo_mcp-0.6.12.dist-info/RECORD +0 -135
- hanzo_mcp-0.6.12.dist-info/licenses/LICENSE +0 -21
- {hanzo_mcp-0.6.12.dist-info → hanzo_mcp-0.7.0.dist-info}/WHEEL +0 -0
- {hanzo_mcp-0.6.12.dist-info → hanzo_mcp-0.7.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,594 @@
|
|
|
1
|
+
"""Streaming command execution with disk-based logging and session management."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
import os
|
|
6
|
+
import re
|
|
7
|
+
import shutil
|
|
8
|
+
import subprocess
|
|
9
|
+
import tempfile
|
|
10
|
+
import time
|
|
11
|
+
import uuid
|
|
12
|
+
from datetime import datetime, timedelta
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
15
|
+
|
|
16
|
+
from hanzo_mcp.tools.common.base import BaseTool
|
|
17
|
+
from hanzo_mcp.tools.shell.base_process import BaseProcessTool
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class StreamingCommandTool(BaseProcessTool):
|
|
21
|
+
"""Execute commands with disk-based streaming and session persistence.
|
|
22
|
+
|
|
23
|
+
Features:
|
|
24
|
+
- All output streamed directly to disk (no memory usage)
|
|
25
|
+
- Session-based organization of logs
|
|
26
|
+
- Easy continuation/resumption of output
|
|
27
|
+
- Forgiving parameter handling for AI usage
|
|
28
|
+
- Automatic session detection from MCP context
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
name = "streaming_command"
|
|
32
|
+
description = "Run commands with disk-based output streaming and easy resumption"
|
|
33
|
+
|
|
34
|
+
# Base directory for all session data
|
|
35
|
+
SESSION_BASE_DIR = Path.home() / ".hanzo" / "sessions"
|
|
36
|
+
|
|
37
|
+
# Chunk size for streaming (25k tokens ≈ 100KB)
|
|
38
|
+
STREAM_CHUNK_SIZE = 100_000
|
|
39
|
+
|
|
40
|
+
# Session retention
|
|
41
|
+
SESSION_RETENTION_DAYS = 30
|
|
42
|
+
|
|
43
|
+
def __init__(self):
|
|
44
|
+
"""Initialize the streaming command tool."""
|
|
45
|
+
super().__init__()
|
|
46
|
+
self.session_id = self._get_or_create_session()
|
|
47
|
+
self.session_dir = self.SESSION_BASE_DIR / self.session_id
|
|
48
|
+
self.session_dir.mkdir(parents=True, exist_ok=True)
|
|
49
|
+
|
|
50
|
+
# Create subdirectories
|
|
51
|
+
self.commands_dir = self.session_dir / "commands"
|
|
52
|
+
self.commands_dir.mkdir(exist_ok=True)
|
|
53
|
+
|
|
54
|
+
# Session metadata file
|
|
55
|
+
self.session_meta_file = self.session_dir / "session.json"
|
|
56
|
+
self._update_session_metadata()
|
|
57
|
+
|
|
58
|
+
# Cleanup old sessions on init
|
|
59
|
+
self._cleanup_old_sessions()
|
|
60
|
+
|
|
61
|
+
def _get_or_create_session(self) -> str:
|
|
62
|
+
"""Get session ID from MCP context or create a new one.
|
|
63
|
+
|
|
64
|
+
Returns:
|
|
65
|
+
Session ID string
|
|
66
|
+
"""
|
|
67
|
+
# Try to get from environment (MCP might set this)
|
|
68
|
+
session_id = os.environ.get("MCP_SESSION_ID")
|
|
69
|
+
|
|
70
|
+
if not session_id:
|
|
71
|
+
# Try to get from Claude Desktop session marker
|
|
72
|
+
claude_session = os.environ.get("CLAUDE_SESSION_ID")
|
|
73
|
+
if claude_session:
|
|
74
|
+
session_id = f"claude_{claude_session}"
|
|
75
|
+
else:
|
|
76
|
+
# Generate new session ID with timestamp
|
|
77
|
+
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
78
|
+
session_id = f"session_{timestamp}_{uuid.uuid4().hex[:8]}"
|
|
79
|
+
|
|
80
|
+
return session_id
|
|
81
|
+
|
|
82
|
+
def _update_session_metadata(self) -> None:
|
|
83
|
+
"""Update session metadata file."""
|
|
84
|
+
metadata = {
|
|
85
|
+
"session_id": self.session_id,
|
|
86
|
+
"created": datetime.now().isoformat(),
|
|
87
|
+
"last_accessed": datetime.now().isoformat(),
|
|
88
|
+
"mcp_context": {
|
|
89
|
+
"session_id": os.environ.get("MCP_SESSION_ID"),
|
|
90
|
+
"claude_session": os.environ.get("CLAUDE_SESSION_ID"),
|
|
91
|
+
"user": os.environ.get("USER"),
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
# Merge with existing metadata if present
|
|
96
|
+
if self.session_meta_file.exists():
|
|
97
|
+
try:
|
|
98
|
+
with open(self.session_meta_file, "r") as f:
|
|
99
|
+
existing = json.load(f)
|
|
100
|
+
metadata["created"] = existing.get("created", metadata["created"])
|
|
101
|
+
except Exception:
|
|
102
|
+
pass
|
|
103
|
+
|
|
104
|
+
with open(self.session_meta_file, "w") as f:
|
|
105
|
+
json.dump(metadata, f, indent=2)
|
|
106
|
+
|
|
107
|
+
def _cleanup_old_sessions(self) -> None:
|
|
108
|
+
"""Remove sessions older than retention period."""
|
|
109
|
+
if not self.SESSION_BASE_DIR.exists():
|
|
110
|
+
return
|
|
111
|
+
|
|
112
|
+
cutoff = datetime.now() - timedelta(days=self.SESSION_RETENTION_DAYS)
|
|
113
|
+
|
|
114
|
+
for session_dir in self.SESSION_BASE_DIR.iterdir():
|
|
115
|
+
if not session_dir.is_dir():
|
|
116
|
+
continue
|
|
117
|
+
|
|
118
|
+
meta_file = session_dir / "session.json"
|
|
119
|
+
if meta_file.exists():
|
|
120
|
+
try:
|
|
121
|
+
with open(meta_file, "r") as f:
|
|
122
|
+
meta = json.load(f)
|
|
123
|
+
last_accessed = datetime.fromisoformat(meta.get("last_accessed", ""))
|
|
124
|
+
if last_accessed < cutoff:
|
|
125
|
+
shutil.rmtree(session_dir)
|
|
126
|
+
except Exception:
|
|
127
|
+
# If we can't read metadata, check directory mtime
|
|
128
|
+
if datetime.fromtimestamp(session_dir.stat().st_mtime) < cutoff:
|
|
129
|
+
shutil.rmtree(session_dir)
|
|
130
|
+
|
|
131
|
+
def _normalize_command_ref(self, ref: Union[str, int, None]) -> Optional[str]:
|
|
132
|
+
"""Normalize various command reference formats.
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
ref: Command reference - can be:
|
|
136
|
+
- Full command ID (UUID)
|
|
137
|
+
- Short ID (first 8 chars)
|
|
138
|
+
- Index number (1, 2, 3...)
|
|
139
|
+
- "last" or "latest"
|
|
140
|
+
- None
|
|
141
|
+
|
|
142
|
+
Returns:
|
|
143
|
+
Full command ID or None
|
|
144
|
+
"""
|
|
145
|
+
if not ref:
|
|
146
|
+
return None
|
|
147
|
+
|
|
148
|
+
ref_str = str(ref).strip().lower()
|
|
149
|
+
|
|
150
|
+
# Handle special cases
|
|
151
|
+
if ref_str in ["last", "latest", "recent"]:
|
|
152
|
+
# Get most recent command
|
|
153
|
+
commands = list(self.commands_dir.glob("*/metadata.json"))
|
|
154
|
+
if not commands:
|
|
155
|
+
return None
|
|
156
|
+
latest = max(commands, key=lambda p: p.stat().st_mtime)
|
|
157
|
+
return latest.parent.name
|
|
158
|
+
|
|
159
|
+
# Handle numeric index (1-based for user friendliness)
|
|
160
|
+
if ref_str.isdigit():
|
|
161
|
+
index = int(ref_str) - 1
|
|
162
|
+
commands = sorted(self.commands_dir.glob("*/metadata.json"),
|
|
163
|
+
key=lambda p: p.stat().st_mtime)
|
|
164
|
+
if 0 <= index < len(commands):
|
|
165
|
+
return commands[index].parent.name
|
|
166
|
+
return None
|
|
167
|
+
|
|
168
|
+
# Handle short ID (first 8 chars)
|
|
169
|
+
if len(ref_str) >= 8:
|
|
170
|
+
# Could be short or full ID
|
|
171
|
+
for cmd_dir in self.commands_dir.iterdir():
|
|
172
|
+
if cmd_dir.name.startswith(ref_str):
|
|
173
|
+
return cmd_dir.name
|
|
174
|
+
|
|
175
|
+
return None
|
|
176
|
+
|
|
177
|
+
async def run(
|
|
178
|
+
self,
|
|
179
|
+
command: Optional[str] = None,
|
|
180
|
+
cmd: Optional[str] = None, # Alias for command
|
|
181
|
+
working_dir: Optional[str] = None,
|
|
182
|
+
cwd: Optional[str] = None, # Alias for working_dir
|
|
183
|
+
timeout: Optional[Union[int, str]] = None,
|
|
184
|
+
continue_from: Optional[Union[str, int]] = None,
|
|
185
|
+
resume: Optional[Union[str, int]] = None, # Alias for continue_from
|
|
186
|
+
from_byte: Optional[Union[int, str]] = None,
|
|
187
|
+
chunk_size: Optional[Union[int, str]] = None,
|
|
188
|
+
) -> Dict[str, Any]:
|
|
189
|
+
"""Execute or continue reading a command with maximum forgiveness.
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
command/cmd: The command to execute (either works)
|
|
193
|
+
working_dir/cwd: Directory to run in (either works)
|
|
194
|
+
timeout: Timeout in seconds (accepts int or string)
|
|
195
|
+
continue_from/resume: Continue reading output from a command
|
|
196
|
+
from_byte: Specific byte position to read from
|
|
197
|
+
chunk_size: Custom chunk size for this read
|
|
198
|
+
|
|
199
|
+
Returns:
|
|
200
|
+
Command output with metadata for easy continuation
|
|
201
|
+
"""
|
|
202
|
+
# Normalize parameters for maximum forgiveness
|
|
203
|
+
command = command or cmd
|
|
204
|
+
working_dir = working_dir or cwd
|
|
205
|
+
continue_from = continue_from or resume
|
|
206
|
+
|
|
207
|
+
# Convert string numbers to int
|
|
208
|
+
if isinstance(timeout, str) and timeout.isdigit():
|
|
209
|
+
timeout = int(timeout)
|
|
210
|
+
if isinstance(from_byte, str) and from_byte.isdigit():
|
|
211
|
+
from_byte = int(from_byte)
|
|
212
|
+
if isinstance(chunk_size, str) and chunk_size.isdigit():
|
|
213
|
+
chunk_size = int(chunk_size)
|
|
214
|
+
|
|
215
|
+
chunk_size = chunk_size or self.STREAM_CHUNK_SIZE
|
|
216
|
+
|
|
217
|
+
# Handle continuation
|
|
218
|
+
if continue_from:
|
|
219
|
+
return await self._continue_reading(continue_from, from_byte, chunk_size)
|
|
220
|
+
|
|
221
|
+
# Need a command for new execution
|
|
222
|
+
if not command:
|
|
223
|
+
return {
|
|
224
|
+
"error": "No command provided. Use 'command' or 'cmd' parameter.",
|
|
225
|
+
"hint": "To continue a previous command, use 'continue_from' with command ID or number.",
|
|
226
|
+
"recent_commands": await self._get_recent_commands(),
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
# Execute new command
|
|
230
|
+
return await self._execute_new_command(command, working_dir, timeout, chunk_size)
|
|
231
|
+
|
|
232
|
+
async def _execute_new_command(
|
|
233
|
+
self,
|
|
234
|
+
command: str,
|
|
235
|
+
working_dir: Optional[str],
|
|
236
|
+
timeout: Optional[int],
|
|
237
|
+
chunk_size: int,
|
|
238
|
+
) -> Dict[str, Any]:
|
|
239
|
+
"""Execute a new command with disk-based streaming."""
|
|
240
|
+
# Create command directory
|
|
241
|
+
cmd_id = str(uuid.uuid4())
|
|
242
|
+
cmd_dir = self.commands_dir / cmd_id
|
|
243
|
+
cmd_dir.mkdir()
|
|
244
|
+
|
|
245
|
+
# File paths
|
|
246
|
+
output_file = cmd_dir / "output.log"
|
|
247
|
+
error_file = cmd_dir / "error.log"
|
|
248
|
+
metadata_file = cmd_dir / "metadata.json"
|
|
249
|
+
|
|
250
|
+
# Save metadata
|
|
251
|
+
metadata = {
|
|
252
|
+
"command_id": cmd_id,
|
|
253
|
+
"command": command,
|
|
254
|
+
"working_dir": working_dir or os.getcwd(),
|
|
255
|
+
"start_time": datetime.now().isoformat(),
|
|
256
|
+
"timeout": timeout,
|
|
257
|
+
"status": "running",
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
with open(metadata_file, "w") as f:
|
|
261
|
+
json.dump(metadata, f, indent=2)
|
|
262
|
+
|
|
263
|
+
# Start process with output redirection
|
|
264
|
+
try:
|
|
265
|
+
process = await asyncio.create_subprocess_shell(
|
|
266
|
+
command,
|
|
267
|
+
stdout=asyncio.subprocess.PIPE,
|
|
268
|
+
stderr=asyncio.subprocess.PIPE,
|
|
269
|
+
cwd=working_dir,
|
|
270
|
+
)
|
|
271
|
+
|
|
272
|
+
# Create tasks for streaming stdout and stderr to files
|
|
273
|
+
async def stream_to_file(stream, file_path):
|
|
274
|
+
"""Stream from async pipe to file."""
|
|
275
|
+
with open(file_path, "wb") as f:
|
|
276
|
+
while True:
|
|
277
|
+
chunk = await stream.read(8192)
|
|
278
|
+
if not chunk:
|
|
279
|
+
break
|
|
280
|
+
f.write(chunk)
|
|
281
|
+
f.flush() # Ensure immediate write
|
|
282
|
+
|
|
283
|
+
# Start streaming tasks
|
|
284
|
+
stdout_task = asyncio.create_task(stream_to_file(process.stdout, output_file))
|
|
285
|
+
stderr_task = asyncio.create_task(stream_to_file(process.stderr, error_file))
|
|
286
|
+
|
|
287
|
+
# Wait for initial output or timeout
|
|
288
|
+
start_time = time.time()
|
|
289
|
+
initial_timeout = min(timeout or 5, 5) # Wait max 5 seconds for initial output
|
|
290
|
+
|
|
291
|
+
while time.time() - start_time < initial_timeout:
|
|
292
|
+
if output_file.stat().st_size > 0 or error_file.stat().st_size > 0:
|
|
293
|
+
break
|
|
294
|
+
await asyncio.sleep(0.1)
|
|
295
|
+
|
|
296
|
+
# Read initial chunk
|
|
297
|
+
output_content = ""
|
|
298
|
+
error_content = ""
|
|
299
|
+
|
|
300
|
+
if output_file.exists() and output_file.stat().st_size > 0:
|
|
301
|
+
with open(output_file, "r", errors="replace") as f:
|
|
302
|
+
output_content = f.read(chunk_size)
|
|
303
|
+
|
|
304
|
+
if error_file.exists() and error_file.stat().st_size > 0:
|
|
305
|
+
with open(error_file, "r", errors="replace") as f:
|
|
306
|
+
error_content = f.read(1000) # Just first 1KB of errors
|
|
307
|
+
|
|
308
|
+
# Check if process completed quickly
|
|
309
|
+
try:
|
|
310
|
+
await asyncio.wait_for(process.wait(), timeout=0.1)
|
|
311
|
+
exit_code = process.returncode
|
|
312
|
+
status = "completed"
|
|
313
|
+
except asyncio.TimeoutError:
|
|
314
|
+
exit_code = None
|
|
315
|
+
status = "running"
|
|
316
|
+
|
|
317
|
+
# Update metadata
|
|
318
|
+
metadata["status"] = status
|
|
319
|
+
if exit_code is not None:
|
|
320
|
+
metadata["exit_code"] = exit_code
|
|
321
|
+
metadata["end_time"] = datetime.now().isoformat()
|
|
322
|
+
|
|
323
|
+
with open(metadata_file, "w") as f:
|
|
324
|
+
json.dump(metadata, f, indent=2)
|
|
325
|
+
|
|
326
|
+
# Build response
|
|
327
|
+
result = {
|
|
328
|
+
"command_id": cmd_id,
|
|
329
|
+
"short_id": cmd_id[:8],
|
|
330
|
+
"command": command,
|
|
331
|
+
"output": output_content,
|
|
332
|
+
"status": status,
|
|
333
|
+
"bytes_read": len(output_content),
|
|
334
|
+
"session_path": str(cmd_dir),
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
if error_content:
|
|
338
|
+
result["stderr"] = error_content
|
|
339
|
+
|
|
340
|
+
if exit_code is not None:
|
|
341
|
+
result["exit_code"] = exit_code
|
|
342
|
+
|
|
343
|
+
# Add continuation info if more output available
|
|
344
|
+
total_size = output_file.stat().st_size
|
|
345
|
+
if total_size > len(output_content) or status == "running":
|
|
346
|
+
result["has_more"] = True
|
|
347
|
+
result["total_bytes"] = total_size
|
|
348
|
+
result["continue_hints"] = [
|
|
349
|
+
f"continue_from='{cmd_id[:8]}'",
|
|
350
|
+
f"resume='last'",
|
|
351
|
+
f"continue_from={cmd_id}",
|
|
352
|
+
]
|
|
353
|
+
result["message"] = (
|
|
354
|
+
f"Command {'is still running' if status == 'running' else 'has more output'}. "
|
|
355
|
+
f"Use any of: {', '.join(result['continue_hints'])}"
|
|
356
|
+
)
|
|
357
|
+
|
|
358
|
+
# Ensure tasks complete
|
|
359
|
+
if status == "completed":
|
|
360
|
+
await stdout_task
|
|
361
|
+
await stderr_task
|
|
362
|
+
|
|
363
|
+
return result
|
|
364
|
+
|
|
365
|
+
except Exception as e:
|
|
366
|
+
# Update metadata with error
|
|
367
|
+
metadata["status"] = "error"
|
|
368
|
+
metadata["error"] = str(e)
|
|
369
|
+
metadata["end_time"] = datetime.now().isoformat()
|
|
370
|
+
|
|
371
|
+
with open(metadata_file, "w") as f:
|
|
372
|
+
json.dump(metadata, f, indent=2)
|
|
373
|
+
|
|
374
|
+
return {
|
|
375
|
+
"error": str(e),
|
|
376
|
+
"command_id": cmd_id,
|
|
377
|
+
"short_id": cmd_id[:8],
|
|
378
|
+
"command": command,
|
|
379
|
+
}
|
|
380
|
+
|
|
381
|
+
async def _continue_reading(
|
|
382
|
+
self,
|
|
383
|
+
ref: Union[str, int],
|
|
384
|
+
from_byte: Optional[int],
|
|
385
|
+
chunk_size: int,
|
|
386
|
+
) -> Dict[str, Any]:
|
|
387
|
+
"""Continue reading output from a previous command."""
|
|
388
|
+
# Normalize reference
|
|
389
|
+
cmd_id = self._normalize_command_ref(ref)
|
|
390
|
+
|
|
391
|
+
if not cmd_id:
|
|
392
|
+
return {
|
|
393
|
+
"error": f"Command not found: {ref}",
|
|
394
|
+
"hint": "Use 'list' to see available commands",
|
|
395
|
+
"recent_commands": await self._get_recent_commands(),
|
|
396
|
+
}
|
|
397
|
+
|
|
398
|
+
cmd_dir = self.commands_dir / cmd_id
|
|
399
|
+
if not cmd_dir.exists():
|
|
400
|
+
return {"error": f"Command directory not found: {cmd_id}"}
|
|
401
|
+
|
|
402
|
+
# Load metadata
|
|
403
|
+
metadata_file = cmd_dir / "metadata.json"
|
|
404
|
+
with open(metadata_file, "r") as f:
|
|
405
|
+
metadata = json.load(f)
|
|
406
|
+
|
|
407
|
+
# Determine start position
|
|
408
|
+
output_file = cmd_dir / "output.log"
|
|
409
|
+
if not output_file.exists():
|
|
410
|
+
return {"error": "No output file found"}
|
|
411
|
+
|
|
412
|
+
# If no from_byte specified, read from where we left off
|
|
413
|
+
if from_byte is None:
|
|
414
|
+
# Try to determine from previous reads (could track this)
|
|
415
|
+
from_byte = 0 # For now, start from beginning if not specified
|
|
416
|
+
|
|
417
|
+
# Read chunk
|
|
418
|
+
try:
|
|
419
|
+
with open(output_file, "r", errors="replace") as f:
|
|
420
|
+
f.seek(from_byte)
|
|
421
|
+
content = f.read(chunk_size)
|
|
422
|
+
new_position = f.tell()
|
|
423
|
+
file_size = output_file.stat().st_size
|
|
424
|
+
|
|
425
|
+
# Check if process is still running
|
|
426
|
+
status = metadata.get("status", "unknown")
|
|
427
|
+
|
|
428
|
+
# Build response
|
|
429
|
+
result = {
|
|
430
|
+
"command_id": cmd_id,
|
|
431
|
+
"short_id": cmd_id[:8],
|
|
432
|
+
"command": metadata["command"],
|
|
433
|
+
"output": content,
|
|
434
|
+
"status": status,
|
|
435
|
+
"bytes_read": len(content),
|
|
436
|
+
"read_from": from_byte,
|
|
437
|
+
"read_to": new_position,
|
|
438
|
+
"total_bytes": file_size,
|
|
439
|
+
}
|
|
440
|
+
|
|
441
|
+
# Add stderr if needed
|
|
442
|
+
error_file = cmd_dir / "error.log"
|
|
443
|
+
if error_file.exists() and error_file.stat().st_size > 0:
|
|
444
|
+
with open(error_file, "r", errors="replace") as f:
|
|
445
|
+
result["stderr"] = f.read(1000)
|
|
446
|
+
|
|
447
|
+
# Add continuation info
|
|
448
|
+
if new_position < file_size or status == "running":
|
|
449
|
+
result["has_more"] = True
|
|
450
|
+
result["continue_hints"] = [
|
|
451
|
+
f"continue_from='{cmd_id[:8]}' from_byte={new_position}",
|
|
452
|
+
f"resume='last' from_byte={new_position}",
|
|
453
|
+
]
|
|
454
|
+
result["message"] = (
|
|
455
|
+
f"Read {len(content)} bytes. "
|
|
456
|
+
f"{file_size - new_position} bytes remaining. "
|
|
457
|
+
f"Use: {result['continue_hints'][0]}"
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
return result
|
|
461
|
+
|
|
462
|
+
except Exception as e:
|
|
463
|
+
return {"error": f"Error reading output: {str(e)}"}
|
|
464
|
+
|
|
465
|
+
async def _get_recent_commands(self, limit: int = 5) -> List[Dict[str, Any]]:
|
|
466
|
+
"""Get list of recent commands for hints."""
|
|
467
|
+
commands = []
|
|
468
|
+
|
|
469
|
+
for cmd_dir in sorted(self.commands_dir.iterdir(),
|
|
470
|
+
key=lambda p: p.stat().st_mtime,
|
|
471
|
+
reverse=True)[:limit]:
|
|
472
|
+
try:
|
|
473
|
+
with open(cmd_dir / "metadata.json", "r") as f:
|
|
474
|
+
meta = json.load(f)
|
|
475
|
+
|
|
476
|
+
output_size = 0
|
|
477
|
+
output_file = cmd_dir / "output.log"
|
|
478
|
+
if output_file.exists():
|
|
479
|
+
output_size = output_file.stat().st_size
|
|
480
|
+
|
|
481
|
+
commands.append({
|
|
482
|
+
"id": meta["command_id"][:8],
|
|
483
|
+
"command": meta["command"][:50] + "..." if len(meta["command"]) > 50 else meta["command"],
|
|
484
|
+
"status": meta.get("status", "unknown"),
|
|
485
|
+
"output_size": output_size,
|
|
486
|
+
"time": meta.get("start_time", ""),
|
|
487
|
+
})
|
|
488
|
+
except Exception:
|
|
489
|
+
continue
|
|
490
|
+
|
|
491
|
+
return commands
|
|
492
|
+
|
|
493
|
+
async def list(self, limit: Optional[int] = 10) -> Dict[str, Any]:
|
|
494
|
+
"""List recent commands in this session.
|
|
495
|
+
|
|
496
|
+
Args:
|
|
497
|
+
limit: Maximum number of commands to show
|
|
498
|
+
|
|
499
|
+
Returns:
|
|
500
|
+
List of recent commands with details
|
|
501
|
+
"""
|
|
502
|
+
commands = await self._get_recent_commands(limit or 10)
|
|
503
|
+
|
|
504
|
+
return {
|
|
505
|
+
"session_id": self.session_id,
|
|
506
|
+
"session_path": str(self.session_dir),
|
|
507
|
+
"commands": commands,
|
|
508
|
+
"hint": "Use continue_from='<id>' or resume='last' to read output",
|
|
509
|
+
}
|
|
510
|
+
|
|
511
|
+
async def tail(
|
|
512
|
+
self,
|
|
513
|
+
ref: Optional[Union[str, int]] = None,
|
|
514
|
+
lines: Optional[int] = 20,
|
|
515
|
+
) -> Dict[str, Any]:
|
|
516
|
+
"""Get the tail of a command's output (like 'tail -f').
|
|
517
|
+
|
|
518
|
+
Args:
|
|
519
|
+
ref: Command reference (defaults to 'last')
|
|
520
|
+
lines: Number of lines to show
|
|
521
|
+
|
|
522
|
+
Returns:
|
|
523
|
+
Last N lines of output
|
|
524
|
+
"""
|
|
525
|
+
ref = ref or "last"
|
|
526
|
+
cmd_id = self._normalize_command_ref(ref)
|
|
527
|
+
|
|
528
|
+
if not cmd_id:
|
|
529
|
+
return {"error": f"Command not found: {ref}"}
|
|
530
|
+
|
|
531
|
+
output_file = self.commands_dir / cmd_id / "output.log"
|
|
532
|
+
if not output_file.exists():
|
|
533
|
+
return {"error": "No output file found"}
|
|
534
|
+
|
|
535
|
+
try:
|
|
536
|
+
# Use tail command for efficiency
|
|
537
|
+
result = subprocess.run(
|
|
538
|
+
["tail", "-n", str(lines or 20), str(output_file)],
|
|
539
|
+
capture_output=True,
|
|
540
|
+
text=True,
|
|
541
|
+
)
|
|
542
|
+
|
|
543
|
+
return {
|
|
544
|
+
"command_id": cmd_id[:8],
|
|
545
|
+
"output": result.stdout,
|
|
546
|
+
"lines": lines,
|
|
547
|
+
}
|
|
548
|
+
except Exception as e:
|
|
549
|
+
return {"error": f"Error tailing output: {str(e)}"}
|
|
550
|
+
|
|
551
|
+
def get_params_schema(self) -> Dict[str, Any]:
|
|
552
|
+
"""Get parameter schema - very forgiving."""
|
|
553
|
+
return {
|
|
554
|
+
"type": "object",
|
|
555
|
+
"properties": {
|
|
556
|
+
"command": {
|
|
557
|
+
"type": "string",
|
|
558
|
+
"description": "Command to execute (alias: 'cmd')",
|
|
559
|
+
},
|
|
560
|
+
"cmd": {
|
|
561
|
+
"type": "string",
|
|
562
|
+
"description": "Alias for 'command'",
|
|
563
|
+
},
|
|
564
|
+
"working_dir": {
|
|
565
|
+
"type": "string",
|
|
566
|
+
"description": "Working directory (alias: 'cwd')",
|
|
567
|
+
},
|
|
568
|
+
"cwd": {
|
|
569
|
+
"type": "string",
|
|
570
|
+
"description": "Alias for 'working_dir'",
|
|
571
|
+
},
|
|
572
|
+
"timeout": {
|
|
573
|
+
"type": ["integer", "string"],
|
|
574
|
+
"description": "Timeout in seconds",
|
|
575
|
+
},
|
|
576
|
+
"continue_from": {
|
|
577
|
+
"type": ["string", "integer"],
|
|
578
|
+
"description": "Continue reading from command (ID, number, or 'last')",
|
|
579
|
+
},
|
|
580
|
+
"resume": {
|
|
581
|
+
"type": ["string", "integer"],
|
|
582
|
+
"description": "Alias for 'continue_from'",
|
|
583
|
+
},
|
|
584
|
+
"from_byte": {
|
|
585
|
+
"type": ["integer", "string"],
|
|
586
|
+
"description": "Byte position to read from",
|
|
587
|
+
},
|
|
588
|
+
"chunk_size": {
|
|
589
|
+
"type": ["integer", "string"],
|
|
590
|
+
"description": "Custom chunk size for reading",
|
|
591
|
+
},
|
|
592
|
+
},
|
|
593
|
+
"required": [], # No required fields for maximum forgiveness
|
|
594
|
+
}
|
hanzo_mcp/tools/shell/uvx.py
CHANGED
|
@@ -129,11 +129,56 @@ For long-running servers, use uvx_background instead.
|
|
|
129
129
|
|
|
130
130
|
# Check if uvx is available
|
|
131
131
|
if not shutil.which("uvx"):
|
|
132
|
-
|
|
132
|
+
await tool_ctx.info("uvx not found, attempting to install...")
|
|
133
|
+
|
|
134
|
+
# Try to auto-install uvx
|
|
135
|
+
install_cmd = "curl -LsSf https://astral.sh/uv/install.sh | sh"
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
# Run installation
|
|
139
|
+
install_result = subprocess.run(
|
|
140
|
+
install_cmd,
|
|
141
|
+
shell=True,
|
|
142
|
+
capture_output=True,
|
|
143
|
+
text=True,
|
|
144
|
+
timeout=60
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
if install_result.returncode == 0:
|
|
148
|
+
await tool_ctx.info("uvx installed successfully!")
|
|
149
|
+
|
|
150
|
+
# Add to PATH for current session
|
|
151
|
+
import os
|
|
152
|
+
home = os.path.expanduser("~")
|
|
153
|
+
os.environ["PATH"] = f"{home}/.cargo/bin:{os.environ.get('PATH', '')}"
|
|
154
|
+
|
|
155
|
+
# Check again
|
|
156
|
+
if not shutil.which("uvx"):
|
|
157
|
+
return """Error: uvx installed but not found in PATH.
|
|
158
|
+
Please add ~/.cargo/bin to your PATH and restart your shell.
|
|
159
|
+
|
|
160
|
+
Add to ~/.zshrc or ~/.bashrc:
|
|
161
|
+
export PATH="$HOME/.cargo/bin:$PATH"
|
|
162
|
+
"""
|
|
163
|
+
else:
|
|
164
|
+
return f"""Error: Failed to install uvx automatically.
|
|
165
|
+
|
|
166
|
+
Install manually with:
|
|
133
167
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
|
134
168
|
|
|
135
169
|
Or on macOS:
|
|
136
|
-
brew install uv
|
|
170
|
+
brew install uv
|
|
171
|
+
|
|
172
|
+
Error details: {install_result.stderr}"""
|
|
173
|
+
|
|
174
|
+
except subprocess.TimeoutExpired:
|
|
175
|
+
return """Error: Installation timed out. Install uvx manually with:
|
|
176
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh"""
|
|
177
|
+
except Exception as e:
|
|
178
|
+
return f"""Error: Failed to auto-install uvx: {str(e)}
|
|
179
|
+
|
|
180
|
+
Install manually with:
|
|
181
|
+
curl -LsSf https://astral.sh/uv/install.sh | sh"""
|
|
137
182
|
|
|
138
183
|
# Build command
|
|
139
184
|
cmd = ["uvx"]
|