hanzo-mcp 0.6.13__py3-none-any.whl → 0.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (62) hide show
  1. hanzo_mcp/analytics/__init__.py +5 -0
  2. hanzo_mcp/analytics/posthog_analytics.py +364 -0
  3. hanzo_mcp/cli.py +3 -3
  4. hanzo_mcp/cli_enhanced.py +3 -3
  5. hanzo_mcp/config/settings.py +1 -1
  6. hanzo_mcp/config/tool_config.py +18 -4
  7. hanzo_mcp/server.py +34 -1
  8. hanzo_mcp/tools/__init__.py +65 -2
  9. hanzo_mcp/tools/agent/__init__.py +84 -3
  10. hanzo_mcp/tools/agent/agent_tool.py +102 -4
  11. hanzo_mcp/tools/agent/agent_tool_v2.py +492 -0
  12. hanzo_mcp/tools/agent/clarification_protocol.py +220 -0
  13. hanzo_mcp/tools/agent/clarification_tool.py +68 -0
  14. hanzo_mcp/tools/agent/claude_cli_tool.py +125 -0
  15. hanzo_mcp/tools/agent/claude_desktop_auth.py +508 -0
  16. hanzo_mcp/tools/agent/cli_agent_base.py +191 -0
  17. hanzo_mcp/tools/agent/code_auth.py +436 -0
  18. hanzo_mcp/tools/agent/code_auth_tool.py +194 -0
  19. hanzo_mcp/tools/agent/codex_cli_tool.py +123 -0
  20. hanzo_mcp/tools/agent/critic_tool.py +376 -0
  21. hanzo_mcp/tools/agent/gemini_cli_tool.py +128 -0
  22. hanzo_mcp/tools/agent/grok_cli_tool.py +128 -0
  23. hanzo_mcp/tools/agent/iching_tool.py +380 -0
  24. hanzo_mcp/tools/agent/network_tool.py +273 -0
  25. hanzo_mcp/tools/agent/prompt.py +62 -20
  26. hanzo_mcp/tools/agent/review_tool.py +433 -0
  27. hanzo_mcp/tools/agent/swarm_tool.py +535 -0
  28. hanzo_mcp/tools/agent/swarm_tool_v2.py +654 -0
  29. hanzo_mcp/tools/common/base.py +1 -0
  30. hanzo_mcp/tools/common/batch_tool.py +102 -10
  31. hanzo_mcp/tools/common/fastmcp_pagination.py +369 -0
  32. hanzo_mcp/tools/common/forgiving_edit.py +243 -0
  33. hanzo_mcp/tools/common/paginated_base.py +230 -0
  34. hanzo_mcp/tools/common/paginated_response.py +307 -0
  35. hanzo_mcp/tools/common/pagination.py +226 -0
  36. hanzo_mcp/tools/common/tool_list.py +3 -0
  37. hanzo_mcp/tools/common/truncate.py +101 -0
  38. hanzo_mcp/tools/filesystem/__init__.py +29 -0
  39. hanzo_mcp/tools/filesystem/ast_multi_edit.py +562 -0
  40. hanzo_mcp/tools/filesystem/directory_tree_paginated.py +338 -0
  41. hanzo_mcp/tools/lsp/__init__.py +5 -0
  42. hanzo_mcp/tools/lsp/lsp_tool.py +512 -0
  43. hanzo_mcp/tools/memory/__init__.py +76 -0
  44. hanzo_mcp/tools/memory/knowledge_tools.py +518 -0
  45. hanzo_mcp/tools/memory/memory_tools.py +456 -0
  46. hanzo_mcp/tools/search/__init__.py +6 -0
  47. hanzo_mcp/tools/search/find_tool.py +581 -0
  48. hanzo_mcp/tools/search/unified_search.py +953 -0
  49. hanzo_mcp/tools/shell/__init__.py +5 -0
  50. hanzo_mcp/tools/shell/auto_background.py +203 -0
  51. hanzo_mcp/tools/shell/base_process.py +53 -27
  52. hanzo_mcp/tools/shell/bash_tool.py +17 -33
  53. hanzo_mcp/tools/shell/npx_tool.py +15 -32
  54. hanzo_mcp/tools/shell/streaming_command.py +594 -0
  55. hanzo_mcp/tools/shell/uvx_tool.py +15 -32
  56. hanzo_mcp/types.py +23 -0
  57. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.1.dist-info}/METADATA +229 -71
  58. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.1.dist-info}/RECORD +61 -24
  59. hanzo_mcp-0.6.13.dist-info/licenses/LICENSE +0 -21
  60. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.1.dist-info}/WHEEL +0 -0
  61. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.1.dist-info}/entry_points.txt +0 -0
  62. {hanzo_mcp-0.6.13.dist-info → hanzo_mcp-0.7.1.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,594 @@
1
+ """Streaming command execution with disk-based logging and session management."""
2
+
3
+ import asyncio
4
+ import json
5
+ import os
6
+ import re
7
+ import shutil
8
+ import subprocess
9
+ import tempfile
10
+ import time
11
+ import uuid
12
+ from datetime import datetime, timedelta
13
+ from pathlib import Path
14
+ from typing import Any, Dict, List, Optional, Tuple, Union
15
+
16
+ from hanzo_mcp.tools.common.base import BaseTool
17
+ from hanzo_mcp.tools.shell.base_process import BaseProcessTool
18
+
19
+
20
+ class StreamingCommandTool(BaseProcessTool):
21
+ """Execute commands with disk-based streaming and session persistence.
22
+
23
+ Features:
24
+ - All output streamed directly to disk (no memory usage)
25
+ - Session-based organization of logs
26
+ - Easy continuation/resumption of output
27
+ - Forgiving parameter handling for AI usage
28
+ - Automatic session detection from MCP context
29
+ """
30
+
31
+ name = "streaming_command"
32
+ description = "Run commands with disk-based output streaming and easy resumption"
33
+
34
+ # Base directory for all session data
35
+ SESSION_BASE_DIR = Path.home() / ".hanzo" / "sessions"
36
+
37
+ # Chunk size for streaming (25k tokens ≈ 100KB)
38
+ STREAM_CHUNK_SIZE = 100_000
39
+
40
+ # Session retention
41
+ SESSION_RETENTION_DAYS = 30
42
+
43
+ def __init__(self):
44
+ """Initialize the streaming command tool."""
45
+ super().__init__()
46
+ self.session_id = self._get_or_create_session()
47
+ self.session_dir = self.SESSION_BASE_DIR / self.session_id
48
+ self.session_dir.mkdir(parents=True, exist_ok=True)
49
+
50
+ # Create subdirectories
51
+ self.commands_dir = self.session_dir / "commands"
52
+ self.commands_dir.mkdir(exist_ok=True)
53
+
54
+ # Session metadata file
55
+ self.session_meta_file = self.session_dir / "session.json"
56
+ self._update_session_metadata()
57
+
58
+ # Cleanup old sessions on init
59
+ self._cleanup_old_sessions()
60
+
61
+ def _get_or_create_session(self) -> str:
62
+ """Get session ID from MCP context or create a new one.
63
+
64
+ Returns:
65
+ Session ID string
66
+ """
67
+ # Try to get from environment (MCP might set this)
68
+ session_id = os.environ.get("MCP_SESSION_ID")
69
+
70
+ if not session_id:
71
+ # Try to get from Claude Desktop session marker
72
+ claude_session = os.environ.get("CLAUDE_SESSION_ID")
73
+ if claude_session:
74
+ session_id = f"claude_{claude_session}"
75
+ else:
76
+ # Generate new session ID with timestamp
77
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
78
+ session_id = f"session_{timestamp}_{uuid.uuid4().hex[:8]}"
79
+
80
+ return session_id
81
+
82
+ def _update_session_metadata(self) -> None:
83
+ """Update session metadata file."""
84
+ metadata = {
85
+ "session_id": self.session_id,
86
+ "created": datetime.now().isoformat(),
87
+ "last_accessed": datetime.now().isoformat(),
88
+ "mcp_context": {
89
+ "session_id": os.environ.get("MCP_SESSION_ID"),
90
+ "claude_session": os.environ.get("CLAUDE_SESSION_ID"),
91
+ "user": os.environ.get("USER"),
92
+ }
93
+ }
94
+
95
+ # Merge with existing metadata if present
96
+ if self.session_meta_file.exists():
97
+ try:
98
+ with open(self.session_meta_file, "r") as f:
99
+ existing = json.load(f)
100
+ metadata["created"] = existing.get("created", metadata["created"])
101
+ except Exception:
102
+ pass
103
+
104
+ with open(self.session_meta_file, "w") as f:
105
+ json.dump(metadata, f, indent=2)
106
+
107
+ def _cleanup_old_sessions(self) -> None:
108
+ """Remove sessions older than retention period."""
109
+ if not self.SESSION_BASE_DIR.exists():
110
+ return
111
+
112
+ cutoff = datetime.now() - timedelta(days=self.SESSION_RETENTION_DAYS)
113
+
114
+ for session_dir in self.SESSION_BASE_DIR.iterdir():
115
+ if not session_dir.is_dir():
116
+ continue
117
+
118
+ meta_file = session_dir / "session.json"
119
+ if meta_file.exists():
120
+ try:
121
+ with open(meta_file, "r") as f:
122
+ meta = json.load(f)
123
+ last_accessed = datetime.fromisoformat(meta.get("last_accessed", ""))
124
+ if last_accessed < cutoff:
125
+ shutil.rmtree(session_dir)
126
+ except Exception:
127
+ # If we can't read metadata, check directory mtime
128
+ if datetime.fromtimestamp(session_dir.stat().st_mtime) < cutoff:
129
+ shutil.rmtree(session_dir)
130
+
131
+ def _normalize_command_ref(self, ref: Union[str, int, None]) -> Optional[str]:
132
+ """Normalize various command reference formats.
133
+
134
+ Args:
135
+ ref: Command reference - can be:
136
+ - Full command ID (UUID)
137
+ - Short ID (first 8 chars)
138
+ - Index number (1, 2, 3...)
139
+ - "last" or "latest"
140
+ - None
141
+
142
+ Returns:
143
+ Full command ID or None
144
+ """
145
+ if not ref:
146
+ return None
147
+
148
+ ref_str = str(ref).strip().lower()
149
+
150
+ # Handle special cases
151
+ if ref_str in ["last", "latest", "recent"]:
152
+ # Get most recent command
153
+ commands = list(self.commands_dir.glob("*/metadata.json"))
154
+ if not commands:
155
+ return None
156
+ latest = max(commands, key=lambda p: p.stat().st_mtime)
157
+ return latest.parent.name
158
+
159
+ # Handle numeric index (1-based for user friendliness)
160
+ if ref_str.isdigit():
161
+ index = int(ref_str) - 1
162
+ commands = sorted(self.commands_dir.glob("*/metadata.json"),
163
+ key=lambda p: p.stat().st_mtime)
164
+ if 0 <= index < len(commands):
165
+ return commands[index].parent.name
166
+ return None
167
+
168
+ # Handle short ID (first 8 chars)
169
+ if len(ref_str) >= 8:
170
+ # Could be short or full ID
171
+ for cmd_dir in self.commands_dir.iterdir():
172
+ if cmd_dir.name.startswith(ref_str):
173
+ return cmd_dir.name
174
+
175
+ return None
176
+
177
+ async def run(
178
+ self,
179
+ command: Optional[str] = None,
180
+ cmd: Optional[str] = None, # Alias for command
181
+ working_dir: Optional[str] = None,
182
+ cwd: Optional[str] = None, # Alias for working_dir
183
+ timeout: Optional[Union[int, str]] = None,
184
+ continue_from: Optional[Union[str, int]] = None,
185
+ resume: Optional[Union[str, int]] = None, # Alias for continue_from
186
+ from_byte: Optional[Union[int, str]] = None,
187
+ chunk_size: Optional[Union[int, str]] = None,
188
+ ) -> Dict[str, Any]:
189
+ """Execute or continue reading a command with maximum forgiveness.
190
+
191
+ Args:
192
+ command/cmd: The command to execute (either works)
193
+ working_dir/cwd: Directory to run in (either works)
194
+ timeout: Timeout in seconds (accepts int or string)
195
+ continue_from/resume: Continue reading output from a command
196
+ from_byte: Specific byte position to read from
197
+ chunk_size: Custom chunk size for this read
198
+
199
+ Returns:
200
+ Command output with metadata for easy continuation
201
+ """
202
+ # Normalize parameters for maximum forgiveness
203
+ command = command or cmd
204
+ working_dir = working_dir or cwd
205
+ continue_from = continue_from or resume
206
+
207
+ # Convert string numbers to int
208
+ if isinstance(timeout, str) and timeout.isdigit():
209
+ timeout = int(timeout)
210
+ if isinstance(from_byte, str) and from_byte.isdigit():
211
+ from_byte = int(from_byte)
212
+ if isinstance(chunk_size, str) and chunk_size.isdigit():
213
+ chunk_size = int(chunk_size)
214
+
215
+ chunk_size = chunk_size or self.STREAM_CHUNK_SIZE
216
+
217
+ # Handle continuation
218
+ if continue_from:
219
+ return await self._continue_reading(continue_from, from_byte, chunk_size)
220
+
221
+ # Need a command for new execution
222
+ if not command:
223
+ return {
224
+ "error": "No command provided. Use 'command' or 'cmd' parameter.",
225
+ "hint": "To continue a previous command, use 'continue_from' with command ID or number.",
226
+ "recent_commands": await self._get_recent_commands(),
227
+ }
228
+
229
+ # Execute new command
230
+ return await self._execute_new_command(command, working_dir, timeout, chunk_size)
231
+
232
+ async def _execute_new_command(
233
+ self,
234
+ command: str,
235
+ working_dir: Optional[str],
236
+ timeout: Optional[int],
237
+ chunk_size: int,
238
+ ) -> Dict[str, Any]:
239
+ """Execute a new command with disk-based streaming."""
240
+ # Create command directory
241
+ cmd_id = str(uuid.uuid4())
242
+ cmd_dir = self.commands_dir / cmd_id
243
+ cmd_dir.mkdir()
244
+
245
+ # File paths
246
+ output_file = cmd_dir / "output.log"
247
+ error_file = cmd_dir / "error.log"
248
+ metadata_file = cmd_dir / "metadata.json"
249
+
250
+ # Save metadata
251
+ metadata = {
252
+ "command_id": cmd_id,
253
+ "command": command,
254
+ "working_dir": working_dir or os.getcwd(),
255
+ "start_time": datetime.now().isoformat(),
256
+ "timeout": timeout,
257
+ "status": "running",
258
+ }
259
+
260
+ with open(metadata_file, "w") as f:
261
+ json.dump(metadata, f, indent=2)
262
+
263
+ # Start process with output redirection
264
+ try:
265
+ process = await asyncio.create_subprocess_shell(
266
+ command,
267
+ stdout=asyncio.subprocess.PIPE,
268
+ stderr=asyncio.subprocess.PIPE,
269
+ cwd=working_dir,
270
+ )
271
+
272
+ # Create tasks for streaming stdout and stderr to files
273
+ async def stream_to_file(stream, file_path):
274
+ """Stream from async pipe to file."""
275
+ with open(file_path, "wb") as f:
276
+ while True:
277
+ chunk = await stream.read(8192)
278
+ if not chunk:
279
+ break
280
+ f.write(chunk)
281
+ f.flush() # Ensure immediate write
282
+
283
+ # Start streaming tasks
284
+ stdout_task = asyncio.create_task(stream_to_file(process.stdout, output_file))
285
+ stderr_task = asyncio.create_task(stream_to_file(process.stderr, error_file))
286
+
287
+ # Wait for initial output or timeout
288
+ start_time = time.time()
289
+ initial_timeout = min(timeout or 5, 5) # Wait max 5 seconds for initial output
290
+
291
+ while time.time() - start_time < initial_timeout:
292
+ if output_file.stat().st_size > 0 or error_file.stat().st_size > 0:
293
+ break
294
+ await asyncio.sleep(0.1)
295
+
296
+ # Read initial chunk
297
+ output_content = ""
298
+ error_content = ""
299
+
300
+ if output_file.exists() and output_file.stat().st_size > 0:
301
+ with open(output_file, "r", errors="replace") as f:
302
+ output_content = f.read(chunk_size)
303
+
304
+ if error_file.exists() and error_file.stat().st_size > 0:
305
+ with open(error_file, "r", errors="replace") as f:
306
+ error_content = f.read(1000) # Just first 1KB of errors
307
+
308
+ # Check if process completed quickly
309
+ try:
310
+ await asyncio.wait_for(process.wait(), timeout=0.1)
311
+ exit_code = process.returncode
312
+ status = "completed"
313
+ except asyncio.TimeoutError:
314
+ exit_code = None
315
+ status = "running"
316
+
317
+ # Update metadata
318
+ metadata["status"] = status
319
+ if exit_code is not None:
320
+ metadata["exit_code"] = exit_code
321
+ metadata["end_time"] = datetime.now().isoformat()
322
+
323
+ with open(metadata_file, "w") as f:
324
+ json.dump(metadata, f, indent=2)
325
+
326
+ # Build response
327
+ result = {
328
+ "command_id": cmd_id,
329
+ "short_id": cmd_id[:8],
330
+ "command": command,
331
+ "output": output_content,
332
+ "status": status,
333
+ "bytes_read": len(output_content),
334
+ "session_path": str(cmd_dir),
335
+ }
336
+
337
+ if error_content:
338
+ result["stderr"] = error_content
339
+
340
+ if exit_code is not None:
341
+ result["exit_code"] = exit_code
342
+
343
+ # Add continuation info if more output available
344
+ total_size = output_file.stat().st_size
345
+ if total_size > len(output_content) or status == "running":
346
+ result["has_more"] = True
347
+ result["total_bytes"] = total_size
348
+ result["continue_hints"] = [
349
+ f"continue_from='{cmd_id[:8]}'",
350
+ f"resume='last'",
351
+ f"continue_from={cmd_id}",
352
+ ]
353
+ result["message"] = (
354
+ f"Command {'is still running' if status == 'running' else 'has more output'}. "
355
+ f"Use any of: {', '.join(result['continue_hints'])}"
356
+ )
357
+
358
+ # Ensure tasks complete
359
+ if status == "completed":
360
+ await stdout_task
361
+ await stderr_task
362
+
363
+ return result
364
+
365
+ except Exception as e:
366
+ # Update metadata with error
367
+ metadata["status"] = "error"
368
+ metadata["error"] = str(e)
369
+ metadata["end_time"] = datetime.now().isoformat()
370
+
371
+ with open(metadata_file, "w") as f:
372
+ json.dump(metadata, f, indent=2)
373
+
374
+ return {
375
+ "error": str(e),
376
+ "command_id": cmd_id,
377
+ "short_id": cmd_id[:8],
378
+ "command": command,
379
+ }
380
+
381
+ async def _continue_reading(
382
+ self,
383
+ ref: Union[str, int],
384
+ from_byte: Optional[int],
385
+ chunk_size: int,
386
+ ) -> Dict[str, Any]:
387
+ """Continue reading output from a previous command."""
388
+ # Normalize reference
389
+ cmd_id = self._normalize_command_ref(ref)
390
+
391
+ if not cmd_id:
392
+ return {
393
+ "error": f"Command not found: {ref}",
394
+ "hint": "Use 'list' to see available commands",
395
+ "recent_commands": await self._get_recent_commands(),
396
+ }
397
+
398
+ cmd_dir = self.commands_dir / cmd_id
399
+ if not cmd_dir.exists():
400
+ return {"error": f"Command directory not found: {cmd_id}"}
401
+
402
+ # Load metadata
403
+ metadata_file = cmd_dir / "metadata.json"
404
+ with open(metadata_file, "r") as f:
405
+ metadata = json.load(f)
406
+
407
+ # Determine start position
408
+ output_file = cmd_dir / "output.log"
409
+ if not output_file.exists():
410
+ return {"error": "No output file found"}
411
+
412
+ # If no from_byte specified, read from where we left off
413
+ if from_byte is None:
414
+ # Try to determine from previous reads (could track this)
415
+ from_byte = 0 # For now, start from beginning if not specified
416
+
417
+ # Read chunk
418
+ try:
419
+ with open(output_file, "r", errors="replace") as f:
420
+ f.seek(from_byte)
421
+ content = f.read(chunk_size)
422
+ new_position = f.tell()
423
+ file_size = output_file.stat().st_size
424
+
425
+ # Check if process is still running
426
+ status = metadata.get("status", "unknown")
427
+
428
+ # Build response
429
+ result = {
430
+ "command_id": cmd_id,
431
+ "short_id": cmd_id[:8],
432
+ "command": metadata["command"],
433
+ "output": content,
434
+ "status": status,
435
+ "bytes_read": len(content),
436
+ "read_from": from_byte,
437
+ "read_to": new_position,
438
+ "total_bytes": file_size,
439
+ }
440
+
441
+ # Add stderr if needed
442
+ error_file = cmd_dir / "error.log"
443
+ if error_file.exists() and error_file.stat().st_size > 0:
444
+ with open(error_file, "r", errors="replace") as f:
445
+ result["stderr"] = f.read(1000)
446
+
447
+ # Add continuation info
448
+ if new_position < file_size or status == "running":
449
+ result["has_more"] = True
450
+ result["continue_hints"] = [
451
+ f"continue_from='{cmd_id[:8]}' from_byte={new_position}",
452
+ f"resume='last' from_byte={new_position}",
453
+ ]
454
+ result["message"] = (
455
+ f"Read {len(content)} bytes. "
456
+ f"{file_size - new_position} bytes remaining. "
457
+ f"Use: {result['continue_hints'][0]}"
458
+ )
459
+
460
+ return result
461
+
462
+ except Exception as e:
463
+ return {"error": f"Error reading output: {str(e)}"}
464
+
465
+ async def _get_recent_commands(self, limit: int = 5) -> List[Dict[str, Any]]:
466
+ """Get list of recent commands for hints."""
467
+ commands = []
468
+
469
+ for cmd_dir in sorted(self.commands_dir.iterdir(),
470
+ key=lambda p: p.stat().st_mtime,
471
+ reverse=True)[:limit]:
472
+ try:
473
+ with open(cmd_dir / "metadata.json", "r") as f:
474
+ meta = json.load(f)
475
+
476
+ output_size = 0
477
+ output_file = cmd_dir / "output.log"
478
+ if output_file.exists():
479
+ output_size = output_file.stat().st_size
480
+
481
+ commands.append({
482
+ "id": meta["command_id"][:8],
483
+ "command": meta["command"][:50] + "..." if len(meta["command"]) > 50 else meta["command"],
484
+ "status": meta.get("status", "unknown"),
485
+ "output_size": output_size,
486
+ "time": meta.get("start_time", ""),
487
+ })
488
+ except Exception:
489
+ continue
490
+
491
+ return commands
492
+
493
+ async def list(self, limit: Optional[int] = 10) -> Dict[str, Any]:
494
+ """List recent commands in this session.
495
+
496
+ Args:
497
+ limit: Maximum number of commands to show
498
+
499
+ Returns:
500
+ List of recent commands with details
501
+ """
502
+ commands = await self._get_recent_commands(limit or 10)
503
+
504
+ return {
505
+ "session_id": self.session_id,
506
+ "session_path": str(self.session_dir),
507
+ "commands": commands,
508
+ "hint": "Use continue_from='<id>' or resume='last' to read output",
509
+ }
510
+
511
+ async def tail(
512
+ self,
513
+ ref: Optional[Union[str, int]] = None,
514
+ lines: Optional[int] = 20,
515
+ ) -> Dict[str, Any]:
516
+ """Get the tail of a command's output (like 'tail -f').
517
+
518
+ Args:
519
+ ref: Command reference (defaults to 'last')
520
+ lines: Number of lines to show
521
+
522
+ Returns:
523
+ Last N lines of output
524
+ """
525
+ ref = ref or "last"
526
+ cmd_id = self._normalize_command_ref(ref)
527
+
528
+ if not cmd_id:
529
+ return {"error": f"Command not found: {ref}"}
530
+
531
+ output_file = self.commands_dir / cmd_id / "output.log"
532
+ if not output_file.exists():
533
+ return {"error": "No output file found"}
534
+
535
+ try:
536
+ # Use tail command for efficiency
537
+ result = subprocess.run(
538
+ ["tail", "-n", str(lines or 20), str(output_file)],
539
+ capture_output=True,
540
+ text=True,
541
+ )
542
+
543
+ return {
544
+ "command_id": cmd_id[:8],
545
+ "output": result.stdout,
546
+ "lines": lines,
547
+ }
548
+ except Exception as e:
549
+ return {"error": f"Error tailing output: {str(e)}"}
550
+
551
+ def get_params_schema(self) -> Dict[str, Any]:
552
+ """Get parameter schema - very forgiving."""
553
+ return {
554
+ "type": "object",
555
+ "properties": {
556
+ "command": {
557
+ "type": "string",
558
+ "description": "Command to execute (alias: 'cmd')",
559
+ },
560
+ "cmd": {
561
+ "type": "string",
562
+ "description": "Alias for 'command'",
563
+ },
564
+ "working_dir": {
565
+ "type": "string",
566
+ "description": "Working directory (alias: 'cwd')",
567
+ },
568
+ "cwd": {
569
+ "type": "string",
570
+ "description": "Alias for 'working_dir'",
571
+ },
572
+ "timeout": {
573
+ "type": ["integer", "string"],
574
+ "description": "Timeout in seconds",
575
+ },
576
+ "continue_from": {
577
+ "type": ["string", "integer"],
578
+ "description": "Continue reading from command (ID, number, or 'last')",
579
+ },
580
+ "resume": {
581
+ "type": ["string", "integer"],
582
+ "description": "Alias for 'continue_from'",
583
+ },
584
+ "from_byte": {
585
+ "type": ["integer", "string"],
586
+ "description": "Byte position to read from",
587
+ },
588
+ "chunk_size": {
589
+ "type": ["integer", "string"],
590
+ "description": "Custom chunk size for reading",
591
+ },
592
+ },
593
+ "required": [], # No required fields for maximum forgiveness
594
+ }
@@ -18,13 +18,15 @@ class UvxTool(BaseBinaryTool):
18
18
  @override
19
19
  def description(self) -> str:
20
20
  """Get the tool description."""
21
- return """Run Python packages with uvx. Actions: run (default), background.
21
+ return """Run Python packages with uvx with automatic backgrounding for long-running processes.
22
+
23
+ Commands that run for more than 2 minutes will automatically continue in the background.
22
24
 
23
25
  Usage:
24
26
  uvx ruff check .
25
- uvx --action background mkdocs serve
27
+ uvx mkdocs serve # Auto-backgrounds after 2 minutes
26
28
  uvx black --check src/
27
- uvx --action background jupyter lab --port 8888"""
29
+ uvx jupyter lab --port 8888 # Auto-backgrounds if needed"""
28
30
 
29
31
  @override
30
32
  def get_binary_name(self) -> str:
@@ -37,22 +39,20 @@ uvx --action background jupyter lab --port 8888"""
37
39
  ctx: MCPContext,
38
40
  package: str,
39
41
  args: str = "",
40
- action: str = "run",
41
42
  cwd: Optional[str] = None,
42
43
  python: Optional[str] = None,
43
44
  ) -> str:
44
- """Run a uvx command.
45
+ """Run a uvx command with auto-backgrounding.
45
46
 
46
47
  Args:
47
48
  ctx: MCP context
48
49
  package: Python package to run
49
50
  args: Additional arguments
50
- action: Action to perform (run, background)
51
51
  cwd: Working directory
52
52
  python: Python version constraint
53
53
 
54
54
  Returns:
55
- Command output or process info
55
+ Command output or background status
56
56
  """
57
57
  # Prepare working directory
58
58
  work_dir = Path(cwd).resolve() if cwd else Path.cwd()
@@ -65,28 +65,14 @@ uvx --action background jupyter lab --port 8888"""
65
65
  # Build full command
66
66
  full_args = args.split() if args else []
67
67
 
68
- if action == "background":
69
- result = await self.execute_background(
70
- package,
71
- cwd=work_dir,
72
- flags=flags,
73
- args=full_args
74
- )
75
- return (
76
- f"Started uvx process in background\n"
77
- f"Process ID: {result['process_id']}\n"
78
- f"PID: {result['pid']}\n"
79
- f"Log file: {result['log_file']}"
80
- )
81
- else:
82
- # Default to sync execution
83
- return await self.execute_sync(
84
- package,
85
- cwd=work_dir,
86
- flags=flags,
87
- args=full_args,
88
- timeout=300 # 5 minute timeout for uvx
89
- )
68
+ # Always use execute_sync which now has auto-backgrounding
69
+ return await self.execute_sync(
70
+ package,
71
+ cwd=work_dir,
72
+ flags=flags,
73
+ args=full_args,
74
+ timeout=None # Let auto-backgrounding handle timeout
75
+ )
90
76
 
91
77
  def register(self, server: FastMCP) -> None:
92
78
  """Register the tool with the MCP server."""
@@ -97,7 +83,6 @@ uvx --action background jupyter lab --port 8888"""
97
83
  ctx: MCPContext,
98
84
  package: str,
99
85
  args: str = "",
100
- action: str = "run",
101
86
  cwd: Optional[str] = None,
102
87
  python: Optional[str] = None
103
88
  ) -> str:
@@ -105,7 +90,6 @@ uvx --action background jupyter lab --port 8888"""
105
90
  ctx,
106
91
  package=package,
107
92
  args=args,
108
- action=action,
109
93
  cwd=cwd,
110
94
  python=python
111
95
  )
@@ -116,7 +100,6 @@ uvx --action background jupyter lab --port 8888"""
116
100
  ctx,
117
101
  package=params["package"],
118
102
  args=params.get("args", ""),
119
- action=params.get("action", "run"),
120
103
  cwd=params.get("cwd"),
121
104
  python=params.get("python")
122
105
  )