hanzo-mcp 0.8.11__py3-none-any.whl → 0.8.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of hanzo-mcp might be problematic. Click here for more details.
- hanzo_mcp/__init__.py +2 -4
- hanzo_mcp/analytics/posthog_analytics.py +3 -9
- hanzo_mcp/bridge.py +9 -25
- hanzo_mcp/cli.py +6 -15
- hanzo_mcp/cli_enhanced.py +5 -14
- hanzo_mcp/cli_plugin.py +3 -9
- hanzo_mcp/config/settings.py +6 -20
- hanzo_mcp/config/tool_config.py +1 -3
- hanzo_mcp/core/base_agent.py +88 -88
- hanzo_mcp/core/model_registry.py +238 -210
- hanzo_mcp/dev_server.py +5 -15
- hanzo_mcp/prompts/__init__.py +2 -6
- hanzo_mcp/prompts/project_todo_reminder.py +3 -9
- hanzo_mcp/prompts/tool_explorer.py +1 -3
- hanzo_mcp/prompts/utils.py +7 -21
- hanzo_mcp/server.py +13 -6
- hanzo_mcp/tools/__init__.py +10 -24
- hanzo_mcp/tools/agent/__init__.py +2 -1
- hanzo_mcp/tools/agent/agent.py +10 -30
- hanzo_mcp/tools/agent/agent_tool.py +5 -15
- hanzo_mcp/tools/agent/agent_tool_v1_deprecated.py +14 -41
- hanzo_mcp/tools/agent/claude_desktop_auth.py +3 -9
- hanzo_mcp/tools/agent/cli_agent_base.py +7 -24
- hanzo_mcp/tools/agent/cli_tools.py +75 -74
- hanzo_mcp/tools/agent/code_auth.py +1 -3
- hanzo_mcp/tools/agent/code_auth_tool.py +2 -6
- hanzo_mcp/tools/agent/critic_tool.py +8 -24
- hanzo_mcp/tools/agent/iching_tool.py +12 -36
- hanzo_mcp/tools/agent/network_tool.py +7 -18
- hanzo_mcp/tools/agent/prompt.py +1 -5
- hanzo_mcp/tools/agent/review_tool.py +10 -25
- hanzo_mcp/tools/agent/swarm_alias.py +1 -3
- hanzo_mcp/tools/agent/swarm_tool.py +9 -29
- hanzo_mcp/tools/agent/swarm_tool_v1_deprecated.py +11 -39
- hanzo_mcp/tools/agent/unified_cli_tools.py +38 -38
- hanzo_mcp/tools/common/batch_tool.py +15 -45
- hanzo_mcp/tools/common/config_tool.py +9 -28
- hanzo_mcp/tools/common/context.py +1 -3
- hanzo_mcp/tools/common/critic_tool.py +1 -3
- hanzo_mcp/tools/common/decorators.py +2 -6
- hanzo_mcp/tools/common/enhanced_base.py +2 -6
- hanzo_mcp/tools/common/fastmcp_pagination.py +4 -12
- hanzo_mcp/tools/common/forgiving_edit.py +9 -28
- hanzo_mcp/tools/common/mode.py +1 -5
- hanzo_mcp/tools/common/paginated_base.py +3 -11
- hanzo_mcp/tools/common/paginated_response.py +10 -30
- hanzo_mcp/tools/common/pagination.py +3 -9
- hanzo_mcp/tools/common/permissions.py +38 -11
- hanzo_mcp/tools/common/personality.py +9 -34
- hanzo_mcp/tools/common/plugin_loader.py +3 -15
- hanzo_mcp/tools/common/stats.py +6 -18
- hanzo_mcp/tools/common/thinking_tool.py +1 -3
- hanzo_mcp/tools/common/tool_disable.py +2 -6
- hanzo_mcp/tools/common/tool_list.py +2 -6
- hanzo_mcp/tools/common/validation.py +1 -3
- hanzo_mcp/tools/config/config_tool.py +7 -13
- hanzo_mcp/tools/config/index_config.py +1 -3
- hanzo_mcp/tools/config/mode_tool.py +5 -15
- hanzo_mcp/tools/database/database_manager.py +3 -9
- hanzo_mcp/tools/database/graph.py +1 -3
- hanzo_mcp/tools/database/graph_add.py +3 -9
- hanzo_mcp/tools/database/graph_query.py +11 -34
- hanzo_mcp/tools/database/graph_remove.py +3 -9
- hanzo_mcp/tools/database/graph_search.py +6 -20
- hanzo_mcp/tools/database/graph_stats.py +11 -33
- hanzo_mcp/tools/database/sql.py +4 -12
- hanzo_mcp/tools/database/sql_query.py +6 -10
- hanzo_mcp/tools/database/sql_search.py +2 -6
- hanzo_mcp/tools/database/sql_stats.py +5 -15
- hanzo_mcp/tools/editor/neovim_command.py +1 -3
- hanzo_mcp/tools/editor/neovim_edit.py +2 -2
- hanzo_mcp/tools/editor/neovim_session.py +7 -13
- hanzo_mcp/tools/filesystem/__init__.py +2 -3
- hanzo_mcp/tools/filesystem/ast_multi_edit.py +14 -43
- hanzo_mcp/tools/filesystem/base.py +4 -12
- hanzo_mcp/tools/filesystem/batch_search.py +35 -115
- hanzo_mcp/tools/filesystem/content_replace.py +4 -12
- hanzo_mcp/tools/filesystem/diff.py +2 -10
- hanzo_mcp/tools/filesystem/directory_tree.py +9 -27
- hanzo_mcp/tools/filesystem/directory_tree_paginated.py +5 -15
- hanzo_mcp/tools/filesystem/edit.py +6 -18
- hanzo_mcp/tools/filesystem/find.py +3 -9
- hanzo_mcp/tools/filesystem/find_files.py +2 -6
- hanzo_mcp/tools/filesystem/git_search.py +9 -24
- hanzo_mcp/tools/filesystem/grep.py +9 -27
- hanzo_mcp/tools/filesystem/multi_edit.py +6 -18
- hanzo_mcp/tools/filesystem/read.py +8 -26
- hanzo_mcp/tools/filesystem/rules_tool.py +6 -17
- hanzo_mcp/tools/filesystem/search_tool.py +18 -62
- hanzo_mcp/tools/filesystem/symbols_tool.py +5 -15
- hanzo_mcp/tools/filesystem/tree.py +1 -3
- hanzo_mcp/tools/filesystem/watch.py +1 -3
- hanzo_mcp/tools/filesystem/write.py +1 -3
- hanzo_mcp/tools/jupyter/base.py +6 -20
- hanzo_mcp/tools/jupyter/jupyter.py +4 -12
- hanzo_mcp/tools/jupyter/notebook_edit.py +11 -35
- hanzo_mcp/tools/jupyter/notebook_read.py +2 -6
- hanzo_mcp/tools/llm/consensus_tool.py +8 -24
- hanzo_mcp/tools/llm/llm_manage.py +2 -6
- hanzo_mcp/tools/llm/llm_tool.py +17 -58
- hanzo_mcp/tools/llm/llm_unified.py +18 -59
- hanzo_mcp/tools/llm/provider_tools.py +1 -3
- hanzo_mcp/tools/lsp/lsp_tool.py +5 -17
- hanzo_mcp/tools/mcp/mcp_add.py +1 -3
- hanzo_mcp/tools/mcp/mcp_stats.py +1 -3
- hanzo_mcp/tools/mcp/mcp_tool.py +9 -23
- hanzo_mcp/tools/memory/__init__.py +10 -27
- hanzo_mcp/tools/memory/knowledge_tools.py +7 -25
- hanzo_mcp/tools/memory/memory_tools.py +6 -18
- hanzo_mcp/tools/search/find_tool.py +10 -32
- hanzo_mcp/tools/search/unified_search.py +24 -78
- hanzo_mcp/tools/shell/__init__.py +2 -2
- hanzo_mcp/tools/shell/auto_background.py +2 -6
- hanzo_mcp/tools/shell/base.py +1 -5
- hanzo_mcp/tools/shell/base_process.py +5 -7
- hanzo_mcp/tools/shell/bash_session.py +7 -24
- hanzo_mcp/tools/shell/bash_session_executor.py +5 -15
- hanzo_mcp/tools/shell/bash_tool.py +3 -7
- hanzo_mcp/tools/shell/command_executor.py +33 -86
- hanzo_mcp/tools/shell/logs.py +4 -16
- hanzo_mcp/tools/shell/npx.py +2 -8
- hanzo_mcp/tools/shell/npx_tool.py +1 -3
- hanzo_mcp/tools/shell/pkill.py +4 -12
- hanzo_mcp/tools/shell/process_tool.py +2 -8
- hanzo_mcp/tools/shell/processes.py +5 -17
- hanzo_mcp/tools/shell/run_background.py +1 -3
- hanzo_mcp/tools/shell/run_command.py +1 -3
- hanzo_mcp/tools/shell/run_command_windows.py +1 -3
- hanzo_mcp/tools/shell/session_manager.py +2 -6
- hanzo_mcp/tools/shell/session_storage.py +2 -6
- hanzo_mcp/tools/shell/streaming_command.py +7 -23
- hanzo_mcp/tools/shell/uvx.py +4 -14
- hanzo_mcp/tools/shell/uvx_background.py +2 -6
- hanzo_mcp/tools/shell/uvx_tool.py +1 -3
- hanzo_mcp/tools/shell/zsh_tool.py +12 -20
- hanzo_mcp/tools/todo/todo.py +1 -3
- hanzo_mcp/tools/todo/todo_read.py +3 -9
- hanzo_mcp/tools/todo/todo_write.py +6 -18
- hanzo_mcp/tools/vector/__init__.py +3 -9
- hanzo_mcp/tools/vector/ast_analyzer.py +6 -20
- hanzo_mcp/tools/vector/git_ingester.py +10 -30
- hanzo_mcp/tools/vector/index_tool.py +3 -9
- hanzo_mcp/tools/vector/infinity_store.py +7 -27
- hanzo_mcp/tools/vector/mock_infinity.py +1 -3
- hanzo_mcp/tools/vector/project_manager.py +4 -12
- hanzo_mcp/tools/vector/vector.py +2 -6
- hanzo_mcp/tools/vector/vector_index.py +8 -8
- hanzo_mcp/tools/vector/vector_search.py +7 -21
- {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/METADATA +2 -2
- hanzo_mcp-0.8.14.dist-info/RECORD +193 -0
- hanzo_mcp-0.8.11.dist-info/RECORD +0 -193
- {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/WHEEL +0 -0
- {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/entry_points.txt +0 -0
- {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/top_level.txt +0 -0
|
@@ -248,9 +248,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
248
248
|
) -> List[SearchResult]:
|
|
249
249
|
"""Run grep search and parse results."""
|
|
250
250
|
try:
|
|
251
|
-
result = await self.grep_tool.call(
|
|
252
|
-
tool_ctx.mcp_context, pattern=pattern, path=path, include=include
|
|
253
|
-
)
|
|
251
|
+
result = await self.grep_tool.call(tool_ctx.mcp_context, pattern=pattern, path=path, include=include)
|
|
254
252
|
|
|
255
253
|
results = []
|
|
256
254
|
if "Found" in result and "matches" in result:
|
|
@@ -281,9 +279,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
281
279
|
await tool_ctx.error(f"Grep search failed: {e}")
|
|
282
280
|
return []
|
|
283
281
|
|
|
284
|
-
async def _run_grep_ast_search(
|
|
285
|
-
self, pattern: str, path: str, tool_ctx, max_results: int
|
|
286
|
-
) -> List[SearchResult]:
|
|
282
|
+
async def _run_grep_ast_search(self, pattern: str, path: str, tool_ctx, max_results: int) -> List[SearchResult]:
|
|
287
283
|
"""Run AST-aware search and parse results."""
|
|
288
284
|
try:
|
|
289
285
|
result = await self.grep_ast_tool.call(
|
|
@@ -317,11 +313,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
317
313
|
content=content,
|
|
318
314
|
search_type=SearchType.GREP_AST,
|
|
319
315
|
score=0.95, # High score for AST matches
|
|
320
|
-
context=(
|
|
321
|
-
" > ".join(current_context)
|
|
322
|
-
if current_context
|
|
323
|
-
else None
|
|
324
|
-
),
|
|
316
|
+
context=(" > ".join(current_context) if current_context else None),
|
|
325
317
|
)
|
|
326
318
|
)
|
|
327
319
|
|
|
@@ -339,9 +331,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
339
331
|
await tool_ctx.error(f"AST search failed: {e}")
|
|
340
332
|
return []
|
|
341
333
|
|
|
342
|
-
async def _run_vector_search(
|
|
343
|
-
self, pattern: str, path: str, tool_ctx, max_results: int
|
|
344
|
-
) -> List[SearchResult]:
|
|
334
|
+
async def _run_vector_search(self, pattern: str, path: str, tool_ctx, max_results: int) -> List[SearchResult]:
|
|
345
335
|
"""Run semantic vector search."""
|
|
346
336
|
if not self.vector_tool:
|
|
347
337
|
return []
|
|
@@ -399,9 +389,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
399
389
|
await tool_ctx.error(f"Vector search failed: {e}")
|
|
400
390
|
return []
|
|
401
391
|
|
|
402
|
-
async def _run_git_search(
|
|
403
|
-
self, pattern: str, path: str, tool_ctx, max_results: int
|
|
404
|
-
) -> List[SearchResult]:
|
|
392
|
+
async def _run_git_search(self, pattern: str, path: str, tool_ctx, max_results: int) -> List[SearchResult]:
|
|
405
393
|
"""Run git history search."""
|
|
406
394
|
try:
|
|
407
395
|
# Search in both content and commits
|
|
@@ -440,11 +428,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
440
428
|
SearchResult(
|
|
441
429
|
file_path=parts[0].strip(),
|
|
442
430
|
line_number=None,
|
|
443
|
-
content=(
|
|
444
|
-
parts[-1].strip()
|
|
445
|
-
if len(parts) > 2
|
|
446
|
-
else line
|
|
447
|
-
),
|
|
431
|
+
content=(parts[-1].strip() if len(parts) > 2 else line),
|
|
448
432
|
search_type=SearchType.GIT,
|
|
449
433
|
score=0.8, # Good score for git matches
|
|
450
434
|
)
|
|
@@ -460,9 +444,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
460
444
|
await tool_ctx.error(f"Git search failed: {e}")
|
|
461
445
|
return []
|
|
462
446
|
|
|
463
|
-
async def _run_symbol_search(
|
|
464
|
-
self, pattern: str, path: str, tool_ctx, max_results: int
|
|
465
|
-
) -> List[SearchResult]:
|
|
447
|
+
async def _run_symbol_search(self, pattern: str, path: str, tool_ctx, max_results: int) -> List[SearchResult]:
|
|
466
448
|
"""Search for symbol definitions using grep with specific patterns."""
|
|
467
449
|
try:
|
|
468
450
|
# Create patterns for common symbol definitions
|
|
@@ -477,11 +459,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
477
459
|
# Run grep searches in parallel for each pattern
|
|
478
460
|
tasks = []
|
|
479
461
|
for sp in symbol_patterns:
|
|
480
|
-
tasks.append(
|
|
481
|
-
self.grep_tool.call(
|
|
482
|
-
tool_ctx.mcp_context, pattern=sp, path=path, include="*"
|
|
483
|
-
)
|
|
484
|
-
)
|
|
462
|
+
tasks.append(self.grep_tool.call(tool_ctx.mcp_context, pattern=sp, path=path, include="*"))
|
|
485
463
|
|
|
486
464
|
grep_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
487
465
|
|
|
@@ -518,9 +496,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
518
496
|
await tool_ctx.error(f"Symbol search failed: {e}")
|
|
519
497
|
return []
|
|
520
498
|
|
|
521
|
-
def _deduplicate_results(
|
|
522
|
-
self, all_results: List[SearchResult]
|
|
523
|
-
) -> List[SearchResult]:
|
|
499
|
+
def _deduplicate_results(self, all_results: List[SearchResult]) -> List[SearchResult]:
|
|
524
500
|
"""Deduplicate results, keeping the highest scoring version."""
|
|
525
501
|
seen = {}
|
|
526
502
|
|
|
@@ -547,9 +523,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
547
523
|
}
|
|
548
524
|
|
|
549
525
|
# Sort by score (descending) and then by type priority
|
|
550
|
-
results.sort(
|
|
551
|
-
key=lambda r: (r.score, type_priority.get(r.search_type, 0)), reverse=True
|
|
552
|
-
)
|
|
526
|
+
results.sort(key=lambda r: (r.score, type_priority.get(r.search_type, 0)), reverse=True)
|
|
553
527
|
|
|
554
528
|
return results
|
|
555
529
|
|
|
@@ -599,42 +573,26 @@ This is the recommended search tool for comprehensive results."""
|
|
|
599
573
|
search_names = []
|
|
600
574
|
|
|
601
575
|
if params.get("enable_grep", True) and pattern_analysis["use_grep"]:
|
|
602
|
-
search_tasks.append(
|
|
603
|
-
self._run_grep_search(pattern, path, include, tool_ctx, max_results)
|
|
604
|
-
)
|
|
576
|
+
search_tasks.append(self._run_grep_search(pattern, path, include, tool_ctx, max_results))
|
|
605
577
|
search_names.append("grep")
|
|
606
578
|
|
|
607
579
|
if params.get("enable_grep_ast", True) and pattern_analysis["use_grep_ast"]:
|
|
608
|
-
search_tasks.append(
|
|
609
|
-
self._run_grep_ast_search(pattern, path, tool_ctx, max_results)
|
|
610
|
-
)
|
|
580
|
+
search_tasks.append(self._run_grep_ast_search(pattern, path, tool_ctx, max_results))
|
|
611
581
|
search_names.append("grep_ast")
|
|
612
582
|
|
|
613
|
-
if (
|
|
614
|
-
|
|
615
|
-
and self.vector_tool
|
|
616
|
-
and pattern_analysis["use_vector"]
|
|
617
|
-
):
|
|
618
|
-
search_tasks.append(
|
|
619
|
-
self._run_vector_search(pattern, path, tool_ctx, max_results)
|
|
620
|
-
)
|
|
583
|
+
if params.get("enable_vector", True) and self.vector_tool and pattern_analysis["use_vector"]:
|
|
584
|
+
search_tasks.append(self._run_vector_search(pattern, path, tool_ctx, max_results))
|
|
621
585
|
search_names.append("vector")
|
|
622
586
|
|
|
623
587
|
if params.get("enable_git", True) and pattern_analysis["use_git"]:
|
|
624
|
-
search_tasks.append(
|
|
625
|
-
self._run_git_search(pattern, path, tool_ctx, max_results)
|
|
626
|
-
)
|
|
588
|
+
search_tasks.append(self._run_git_search(pattern, path, tool_ctx, max_results))
|
|
627
589
|
search_names.append("git")
|
|
628
590
|
|
|
629
591
|
if params.get("enable_symbol", True) and pattern_analysis["use_symbol"]:
|
|
630
|
-
search_tasks.append(
|
|
631
|
-
self._run_symbol_search(pattern, path, tool_ctx, max_results)
|
|
632
|
-
)
|
|
592
|
+
search_tasks.append(self._run_symbol_search(pattern, path, tool_ctx, max_results))
|
|
633
593
|
search_names.append("symbol")
|
|
634
594
|
|
|
635
|
-
await tool_ctx.info(
|
|
636
|
-
f"Running {len(search_tasks)} search types in parallel: {', '.join(search_names)}"
|
|
637
|
-
)
|
|
595
|
+
await tool_ctx.info(f"Running {len(search_tasks)} search types in parallel: {', '.join(search_names)}")
|
|
638
596
|
|
|
639
597
|
# Run all searches in parallel
|
|
640
598
|
search_results = await asyncio.gather(*search_tasks, return_exceptions=True)
|
|
@@ -719,9 +677,7 @@ This is the recommended search tool for comprehensive results."""
|
|
|
719
677
|
score_str = f"[{result.search_type.value} {result.score:.2f}]"
|
|
720
678
|
|
|
721
679
|
if result.line_number:
|
|
722
|
-
output.append(
|
|
723
|
-
f" {result.line_number:>4}: {score_str} {result.content}"
|
|
724
|
-
)
|
|
680
|
+
output.append(f" {result.line_number:>4}: {score_str} {result.content}")
|
|
725
681
|
else:
|
|
726
682
|
output.append(f" {score_str} {result.content}")
|
|
727
683
|
|
|
@@ -141,9 +141,7 @@ Finds code structures (functions, classes, methods) with full context."""
|
|
|
141
141
|
# Route to appropriate handler
|
|
142
142
|
if action == "search":
|
|
143
143
|
return await self._handle_search(params, tool_ctx)
|
|
144
|
-
elif
|
|
145
|
-
action == "ast" or action == "grep_ast"
|
|
146
|
-
): # Support both for backward compatibility
|
|
144
|
+
elif action == "ast" or action == "grep_ast": # Support both for backward compatibility
|
|
147
145
|
return await self._handle_ast(params, tool_ctx)
|
|
148
146
|
elif action == "index":
|
|
149
147
|
return await self._handle_index(params, tool_ctx)
|
|
@@ -218,9 +216,7 @@ Finds code structures (functions, classes, methods) with full context."""
|
|
|
218
216
|
output = tc.format()
|
|
219
217
|
else:
|
|
220
218
|
# Just show matching lines
|
|
221
|
-
output = "\n".join(
|
|
222
|
-
[f"{line}: {code.splitlines()[line - 1]}" for line in loi]
|
|
223
|
-
)
|
|
219
|
+
output = "\n".join([f"{line}: {code.splitlines()[line - 1]}" for line in loi])
|
|
224
220
|
|
|
225
221
|
results.append(f"\n{file_path}:\n{output}\n")
|
|
226
222
|
match_count += len(loi)
|
|
@@ -325,9 +321,7 @@ Finds code structures (functions, classes, methods) with full context."""
|
|
|
325
321
|
return f"No matches found for '{pattern}' in {path}"
|
|
326
322
|
|
|
327
323
|
output = [f"=== AST-aware Grep Results for '{pattern}' ==="]
|
|
328
|
-
output.append(
|
|
329
|
-
f"Total matches: {match_count} in {len([r for r in results if '===' in str(r)]) // 4} files\n"
|
|
330
|
-
)
|
|
324
|
+
output.append(f"Total matches: {match_count} in {len([r for r in results if '===' in str(r)]) // 4} files\n")
|
|
331
325
|
output.extend(results)
|
|
332
326
|
|
|
333
327
|
if match_count >= limit:
|
|
@@ -490,16 +484,12 @@ Finds code structures (functions, classes, methods) with full context."""
|
|
|
490
484
|
for root, _, files in os.walk(path_obj):
|
|
491
485
|
for file in files:
|
|
492
486
|
file_path = Path(root) / file
|
|
493
|
-
if file_path.suffix in extensions and self.is_path_allowed(
|
|
494
|
-
str(file_path)
|
|
495
|
-
):
|
|
487
|
+
if file_path.suffix in extensions and self.is_path_allowed(str(file_path)):
|
|
496
488
|
files_to_process.append(str(file_path))
|
|
497
489
|
|
|
498
490
|
return files_to_process
|
|
499
491
|
|
|
500
|
-
def _extract_symbols(
|
|
501
|
-
self, tc: TreeContext, file_path: str
|
|
502
|
-
) -> Dict[str, List[Dict[str, Any]]]:
|
|
492
|
+
def _extract_symbols(self, tc: TreeContext, file_path: str) -> Dict[str, List[Dict[str, Any]]]:
|
|
503
493
|
"""Extract symbols from a TreeContext (placeholder implementation)."""
|
|
504
494
|
# This would need proper tree-sitter queries to extract symbols
|
|
505
495
|
# For now, return empty structure
|
|
@@ -188,9 +188,7 @@ tree --pattern "*.py" --show-size"""
|
|
|
188
188
|
if pattern:
|
|
189
189
|
import fnmatch
|
|
190
190
|
|
|
191
|
-
entries = [
|
|
192
|
-
e for e in entries if fnmatch.fnmatch(e.name, pattern) or e.is_dir()
|
|
193
|
-
]
|
|
191
|
+
entries = [e for e in entries if fnmatch.fnmatch(e.name, pattern) or e.is_dir()]
|
|
194
192
|
|
|
195
193
|
# Filter dirs only
|
|
196
194
|
if dirs_only:
|
|
@@ -191,9 +191,7 @@ watch . --recursive --exclude "__pycache__"
|
|
|
191
191
|
output.append("\nWatch cancelled")
|
|
192
192
|
|
|
193
193
|
# Summary
|
|
194
|
-
output.append(
|
|
195
|
-
f"\nWatch completed after {int(time.time() - start_time)} seconds"
|
|
196
|
-
)
|
|
194
|
+
output.append(f"\nWatch completed after {int(time.time() - start_time)} seconds")
|
|
197
195
|
output.append(f"Total changes detected: {len(changes)}")
|
|
198
196
|
|
|
199
197
|
return "\n".join(output)
|
|
@@ -125,9 +125,7 @@ Usage:
|
|
|
125
125
|
with open(path_obj, "w", encoding="utf-8") as f:
|
|
126
126
|
f.write(content)
|
|
127
127
|
|
|
128
|
-
await tool_ctx.info(
|
|
129
|
-
f"Successfully wrote file: {file_path} ({len(content)} bytes)"
|
|
130
|
-
)
|
|
128
|
+
await tool_ctx.info(f"Successfully wrote file: {file_path} ({len(content)} bytes)")
|
|
131
129
|
return f"Successfully wrote file: {file_path} ({len(content)} bytes)"
|
|
132
130
|
except Exception as e:
|
|
133
131
|
await tool_ctx.error(f"Error writing file: {str(e)}")
|
hanzo_mcp/tools/jupyter/base.py
CHANGED
|
@@ -127,9 +127,7 @@ class JupyterBaseTool(FilesystemBaseTool, ABC):
|
|
|
127
127
|
"""
|
|
128
128
|
tool_ctx.set_tool_info(self.name)
|
|
129
129
|
|
|
130
|
-
async def parse_notebook(
|
|
131
|
-
self, file_path: Path
|
|
132
|
-
) -> tuple[dict[str, Any], list[NotebookCellSource]]:
|
|
130
|
+
async def parse_notebook(self, file_path: Path) -> tuple[dict[str, Any], list[NotebookCellSource]]:
|
|
133
131
|
"""Parse a Jupyter notebook file.
|
|
134
132
|
|
|
135
133
|
Args:
|
|
@@ -143,9 +141,7 @@ class JupyterBaseTool(FilesystemBaseTool, ABC):
|
|
|
143
141
|
notebook = json.loads(content)
|
|
144
142
|
|
|
145
143
|
# Get notebook language
|
|
146
|
-
language = (
|
|
147
|
-
notebook.get("metadata", {}).get("language_info", {}).get("name", "python")
|
|
148
|
-
)
|
|
144
|
+
language = notebook.get("metadata", {}).get("language_info", {}).get("name", "python")
|
|
149
145
|
cells = notebook.get("cells", [])
|
|
150
146
|
processed_cells = []
|
|
151
147
|
|
|
@@ -177,9 +173,7 @@ class JupyterBaseTool(FilesystemBaseTool, ABC):
|
|
|
177
173
|
text = output.get("text", "")
|
|
178
174
|
if isinstance(text, list):
|
|
179
175
|
text = "".join(text)
|
|
180
|
-
outputs.append(
|
|
181
|
-
NotebookCellOutput(output_type="stream", text=text)
|
|
182
|
-
)
|
|
176
|
+
outputs.append(NotebookCellOutput(output_type="stream", text=text))
|
|
183
177
|
|
|
184
178
|
elif output_type in ["execute_result", "display_data"]:
|
|
185
179
|
# Process text output
|
|
@@ -205,11 +199,7 @@ class JupyterBaseTool(FilesystemBaseTool, ABC):
|
|
|
205
199
|
media_type="image/jpeg",
|
|
206
200
|
)
|
|
207
201
|
|
|
208
|
-
outputs.append(
|
|
209
|
-
NotebookCellOutput(
|
|
210
|
-
output_type=output_type, text=text, image=image
|
|
211
|
-
)
|
|
212
|
-
)
|
|
202
|
+
outputs.append(NotebookCellOutput(output_type=output_type, text=text, image=image))
|
|
213
203
|
|
|
214
204
|
elif output_type == "error":
|
|
215
205
|
# Format error traceback
|
|
@@ -220,17 +210,13 @@ class JupyterBaseTool(FilesystemBaseTool, ABC):
|
|
|
220
210
|
# Handle raw text strings and lists of strings
|
|
221
211
|
if isinstance(traceback, list):
|
|
222
212
|
# Clean ANSI escape codes and join the list but preserve the formatting
|
|
223
|
-
clean_traceback = [
|
|
224
|
-
clean_ansi_escapes(line) for line in traceback
|
|
225
|
-
]
|
|
213
|
+
clean_traceback = [clean_ansi_escapes(line) for line in traceback]
|
|
226
214
|
traceback_text = "\n".join(clean_traceback)
|
|
227
215
|
else:
|
|
228
216
|
traceback_text = clean_ansi_escapes(str(traceback))
|
|
229
217
|
|
|
230
218
|
error_text = f"{ename}: {evalue}\n{traceback_text}"
|
|
231
|
-
outputs.append(
|
|
232
|
-
NotebookCellOutput(output_type="error", text=error_text)
|
|
233
|
-
)
|
|
219
|
+
outputs.append(NotebookCellOutput(output_type="error", text=error_text))
|
|
234
220
|
|
|
235
221
|
# Create cell object
|
|
236
222
|
processed_cell = NotebookCellSource(
|
|
@@ -151,9 +151,7 @@ jupyter --action create "new.ipynb"
|
|
|
151
151
|
else:
|
|
152
152
|
return f"Error: Unknown action '{action}'. Valid actions: read, edit, create, delete, execute"
|
|
153
153
|
|
|
154
|
-
async def _handle_read(
|
|
155
|
-
self, notebook_path: str, params: Dict[str, Any], tool_ctx
|
|
156
|
-
) -> str:
|
|
154
|
+
async def _handle_read(self, notebook_path: str, params: Dict[str, Any], tool_ctx) -> str:
|
|
157
155
|
"""Read notebook or specific cell."""
|
|
158
156
|
exists, error_msg = await self.check_path_exists(notebook_path, tool_ctx)
|
|
159
157
|
if not exists:
|
|
@@ -188,9 +186,7 @@ jupyter --action create "new.ipynb"
|
|
|
188
186
|
await tool_ctx.error(f"Failed to read notebook: {str(e)}")
|
|
189
187
|
return f"Error reading notebook: {str(e)}"
|
|
190
188
|
|
|
191
|
-
async def _handle_edit(
|
|
192
|
-
self, notebook_path: str, params: Dict[str, Any], tool_ctx
|
|
193
|
-
) -> str:
|
|
189
|
+
async def _handle_edit(self, notebook_path: str, params: Dict[str, Any], tool_ctx) -> str:
|
|
194
190
|
"""Edit notebook cell."""
|
|
195
191
|
exists, error_msg = await self.check_path_exists(notebook_path, tool_ctx)
|
|
196
192
|
if not exists:
|
|
@@ -295,9 +291,7 @@ jupyter --action create "new.ipynb"
|
|
|
295
291
|
await tool_ctx.error(f"Failed to create notebook: {str(e)}")
|
|
296
292
|
return f"Error creating notebook: {str(e)}"
|
|
297
293
|
|
|
298
|
-
async def _handle_delete(
|
|
299
|
-
self, notebook_path: str, params: Dict[str, Any], tool_ctx
|
|
300
|
-
) -> str:
|
|
294
|
+
async def _handle_delete(self, notebook_path: str, params: Dict[str, Any], tool_ctx) -> str:
|
|
301
295
|
"""Delete notebook or cell."""
|
|
302
296
|
# If cell specified, delegate to edit with delete mode
|
|
303
297
|
if params.get("cell_id") or params.get("cell_index") is not None:
|
|
@@ -316,9 +310,7 @@ jupyter --action create "new.ipynb"
|
|
|
316
310
|
await tool_ctx.error(f"Failed to delete notebook: {str(e)}")
|
|
317
311
|
return f"Error deleting notebook: {str(e)}"
|
|
318
312
|
|
|
319
|
-
async def _handle_execute(
|
|
320
|
-
self, notebook_path: str, params: Dict[str, Any], tool_ctx
|
|
321
|
-
) -> str:
|
|
313
|
+
async def _handle_execute(self, notebook_path: str, params: Dict[str, Any], tool_ctx) -> str:
|
|
322
314
|
"""Execute notebook cells (placeholder for future implementation)."""
|
|
323
315
|
return "Error: Cell execution not yet implemented. Use a Jupyter kernel or server for execution."
|
|
324
316
|
|
|
@@ -137,20 +137,14 @@ class NoteBookEditTool(JupyterBaseTool):
|
|
|
137
137
|
|
|
138
138
|
# Don't validate new_source for delete mode
|
|
139
139
|
if edit_mode != "delete" and not new_source:
|
|
140
|
-
await tool_ctx.error(
|
|
141
|
-
"New source is required for replace or insert operations"
|
|
142
|
-
)
|
|
140
|
+
await tool_ctx.error("New source is required for replace or insert operations")
|
|
143
141
|
return "Error: New source is required for replace or insert operations"
|
|
144
142
|
|
|
145
|
-
await tool_ctx.info(
|
|
146
|
-
f"Editing notebook: {notebook_path} (cell: {cell_number}, mode: {edit_mode})"
|
|
147
|
-
)
|
|
143
|
+
await tool_ctx.info(f"Editing notebook: {notebook_path} (cell: {cell_number}, mode: {edit_mode})")
|
|
148
144
|
|
|
149
145
|
# Check if path is allowed
|
|
150
146
|
if not self.is_path_allowed(notebook_path):
|
|
151
|
-
await tool_ctx.error(
|
|
152
|
-
f"Access denied - path outside allowed directories: {notebook_path}"
|
|
153
|
-
)
|
|
147
|
+
await tool_ctx.error(f"Access denied - path outside allowed directories: {notebook_path}")
|
|
154
148
|
return f"Error: Access denied - path outside allowed directories: {notebook_path}"
|
|
155
149
|
|
|
156
150
|
try:
|
|
@@ -186,23 +180,15 @@ class NoteBookEditTool(JupyterBaseTool):
|
|
|
186
180
|
|
|
187
181
|
if edit_mode == "insert":
|
|
188
182
|
if cell_number > len(cells):
|
|
189
|
-
await tool_ctx.error(
|
|
190
|
-
f"Cell number {cell_number} is out of bounds for insert (max: {len(cells)})"
|
|
191
|
-
)
|
|
183
|
+
await tool_ctx.error(f"Cell number {cell_number} is out of bounds for insert (max: {len(cells)})")
|
|
192
184
|
return f"Error: Cell number {cell_number} is out of bounds for insert (max: {len(cells)})"
|
|
193
185
|
else: # replace or delete
|
|
194
186
|
if cell_number >= len(cells):
|
|
195
|
-
await tool_ctx.error(
|
|
196
|
-
f"Cell number {cell_number} is out of bounds (max: {len(cells) - 1})"
|
|
197
|
-
)
|
|
187
|
+
await tool_ctx.error(f"Cell number {cell_number} is out of bounds (max: {len(cells) - 1})")
|
|
198
188
|
return f"Error: Cell number {cell_number} is out of bounds (max: {len(cells) - 1})"
|
|
199
189
|
|
|
200
190
|
# Get notebook language (needed for context but not directly used in this block)
|
|
201
|
-
_ = (
|
|
202
|
-
notebook.get("metadata", {})
|
|
203
|
-
.get("language_info", {})
|
|
204
|
-
.get("name", "python")
|
|
205
|
-
)
|
|
191
|
+
_ = notebook.get("metadata", {}).get("language_info", {}).get("name", "python")
|
|
206
192
|
|
|
207
193
|
# Perform the requested operation
|
|
208
194
|
if edit_mode == "replace":
|
|
@@ -238,9 +224,7 @@ class NoteBookEditTool(JupyterBaseTool):
|
|
|
238
224
|
|
|
239
225
|
change_description = f"Replaced cell {cell_number}"
|
|
240
226
|
if cell_type is not None and cell_type != old_type:
|
|
241
|
-
change_description += (
|
|
242
|
-
f" (changed type from {old_type} to {cell_type})"
|
|
243
|
-
)
|
|
227
|
+
change_description += f" (changed type from {old_type} to {cell_type})"
|
|
244
228
|
|
|
245
229
|
elif edit_mode == "insert":
|
|
246
230
|
# Create new cell
|
|
@@ -257,9 +241,7 @@ class NoteBookEditTool(JupyterBaseTool):
|
|
|
257
241
|
|
|
258
242
|
# Insert the cell
|
|
259
243
|
cells.insert(cell_number, new_cell)
|
|
260
|
-
change_description =
|
|
261
|
-
f"Inserted new {cell_type} cell at position {cell_number}"
|
|
262
|
-
)
|
|
244
|
+
change_description = f"Inserted new {cell_type} cell at position {cell_number}"
|
|
263
245
|
|
|
264
246
|
else: # delete
|
|
265
247
|
# Store deleted cell info for reporting
|
|
@@ -268,20 +250,14 @@ class NoteBookEditTool(JupyterBaseTool):
|
|
|
268
250
|
|
|
269
251
|
# Remove the cell
|
|
270
252
|
del cells[cell_number]
|
|
271
|
-
change_description =
|
|
272
|
-
f"Deleted {deleted_type} cell at position {cell_number}"
|
|
273
|
-
)
|
|
253
|
+
change_description = f"Deleted {deleted_type} cell at position {cell_number}"
|
|
274
254
|
|
|
275
255
|
# Write the updated notebook back to file
|
|
276
256
|
with open(file_path, "w", encoding="utf-8") as f:
|
|
277
257
|
json.dump(notebook, f, indent=1)
|
|
278
258
|
|
|
279
|
-
await tool_ctx.info(
|
|
280
|
-
|
|
281
|
-
)
|
|
282
|
-
return (
|
|
283
|
-
f"Successfully edited notebook: {notebook_path} - {change_description}"
|
|
284
|
-
)
|
|
259
|
+
await tool_ctx.info(f"Successfully edited notebook: {notebook_path} - {change_description}")
|
|
260
|
+
return f"Successfully edited notebook: {notebook_path} - {change_description}"
|
|
285
261
|
except Exception as e:
|
|
286
262
|
await tool_ctx.error(f"Error editing notebook: {str(e)}")
|
|
287
263
|
return f"Error editing notebook: {str(e)}"
|
|
@@ -86,9 +86,7 @@ class NotebookReadTool(JupyterBaseTool):
|
|
|
86
86
|
|
|
87
87
|
# Check if path is allowed
|
|
88
88
|
if not self.is_path_allowed(notebook_path):
|
|
89
|
-
await tool_ctx.error(
|
|
90
|
-
f"Access denied - path outside allowed directories: {notebook_path}"
|
|
91
|
-
)
|
|
89
|
+
await tool_ctx.error(f"Access denied - path outside allowed directories: {notebook_path}")
|
|
92
90
|
return f"Error: Access denied - path outside allowed directories: {notebook_path}"
|
|
93
91
|
|
|
94
92
|
try:
|
|
@@ -115,9 +113,7 @@ class NotebookReadTool(JupyterBaseTool):
|
|
|
115
113
|
# Format the notebook content as a readable string
|
|
116
114
|
result = self.format_notebook_cells(processed_cells)
|
|
117
115
|
|
|
118
|
-
await tool_ctx.info(
|
|
119
|
-
f"Successfully read notebook: {notebook_path} ({len(processed_cells)} cells)"
|
|
120
|
-
)
|
|
116
|
+
await tool_ctx.info(f"Successfully read notebook: {notebook_path} ({len(processed_cells)} cells)")
|
|
121
117
|
return result
|
|
122
118
|
except json.JSONDecodeError:
|
|
123
119
|
await tool_ctx.error(f"Invalid notebook format: {notebook_path}")
|
|
@@ -211,22 +211,14 @@ The tool will:
|
|
|
211
211
|
)
|
|
212
212
|
|
|
213
213
|
# Prepare summary of results
|
|
214
|
-
successful_responses = [
|
|
215
|
-
|
|
216
|
-
]
|
|
217
|
-
failed_responses = [
|
|
218
|
-
(m, r) for m, r in results.items() if r.startswith("Error:")
|
|
219
|
-
]
|
|
214
|
+
successful_responses = [(m, r) for m, r in results.items() if not r.startswith("Error:")]
|
|
215
|
+
failed_responses = [(m, r) for m, r in results.items() if r.startswith("Error:")]
|
|
220
216
|
|
|
221
217
|
if not successful_responses:
|
|
222
|
-
return "Error: All model queries failed:\n\n" + "\n".join(
|
|
223
|
-
[f"{m}: {r}" for m, r in failed_responses]
|
|
224
|
-
)
|
|
218
|
+
return "Error: All model queries failed:\n\n" + "\n".join([f"{m}: {r}" for m, r in failed_responses])
|
|
225
219
|
|
|
226
220
|
# Use aggregation model to synthesize responses
|
|
227
|
-
consensus = await self._aggregate_responses(
|
|
228
|
-
successful_responses, prompt, aggregation_model
|
|
229
|
-
)
|
|
221
|
+
consensus = await self._aggregate_responses(successful_responses, prompt, aggregation_model)
|
|
230
222
|
|
|
231
223
|
# Format output
|
|
232
224
|
output = ["=== LLM Consensus Analysis ==="]
|
|
@@ -245,9 +237,7 @@ The tool will:
|
|
|
245
237
|
output.append("\n=== Individual Responses ===")
|
|
246
238
|
for model, response in successful_responses:
|
|
247
239
|
output.append(f"\n--- {model} ---")
|
|
248
|
-
output.append(
|
|
249
|
-
response[:500] + "..." if len(response) > 500 else response
|
|
250
|
-
)
|
|
240
|
+
output.append(response[:500] + "..." if len(response) > 500 else response)
|
|
251
241
|
|
|
252
242
|
if failed_responses:
|
|
253
243
|
output.append("\n=== Failed Queries ===")
|
|
@@ -282,9 +272,7 @@ The tool will:
|
|
|
282
272
|
# Create a mock context for the LLM tool
|
|
283
273
|
mock_ctx = type("MockContext", (), {"client": None})()
|
|
284
274
|
|
|
285
|
-
result = await asyncio.wait_for(
|
|
286
|
-
self.llm_tool.call(mock_ctx, **params), timeout=timeout
|
|
287
|
-
)
|
|
275
|
+
result = await asyncio.wait_for(self.llm_tool.call(mock_ctx, **params), timeout=timeout)
|
|
288
276
|
return (model, result)
|
|
289
277
|
except asyncio.TimeoutError:
|
|
290
278
|
return (model, f"Error: Timeout after {timeout} seconds")
|
|
@@ -305,9 +293,7 @@ The tool will:
|
|
|
305
293
|
) -> str:
|
|
306
294
|
"""Use an LLM to aggregate and analyze responses."""
|
|
307
295
|
# Prepare the aggregation prompt
|
|
308
|
-
response_summary = "\n\n".join(
|
|
309
|
-
[f"Model: {model}\nResponse: {response}" for model, response in responses]
|
|
310
|
-
)
|
|
296
|
+
response_summary = "\n\n".join([f"Model: {model}\nResponse: {response}" for model, response in responses])
|
|
311
297
|
|
|
312
298
|
aggregation_prompt = f"""You are analyzing responses from multiple AI models to the following prompt:
|
|
313
299
|
|
|
@@ -360,9 +346,7 @@ Be concise but thorough. Focus on providing actionable insights."""
|
|
|
360
346
|
for model, response in responses:
|
|
361
347
|
output.append(f"- {model}: {len(response)} characters")
|
|
362
348
|
|
|
363
|
-
output.append(
|
|
364
|
-
"\nNote: Advanced consensus analysis unavailable. Showing basic summary only."
|
|
365
|
-
)
|
|
349
|
+
output.append("\nNote: Advanced consensus analysis unavailable. Showing basic summary only.")
|
|
366
350
|
|
|
367
351
|
return "\n".join(output)
|
|
368
352
|
|
|
@@ -192,9 +192,7 @@ Providers are automatically detected based on environment variables:
|
|
|
192
192
|
for provider in available_but_disabled:
|
|
193
193
|
env_vars = available_providers.get(provider, [])
|
|
194
194
|
output.append(f" - {provider}: {', '.join(env_vars)}")
|
|
195
|
-
output.append(
|
|
196
|
-
f" Use: llm_manage --action enable --provider {provider}"
|
|
197
|
-
)
|
|
195
|
+
output.append(f" Use: llm_manage --action enable --provider {provider}")
|
|
198
196
|
output.append("")
|
|
199
197
|
|
|
200
198
|
# Show providers without API keys
|
|
@@ -356,9 +354,7 @@ Providers are automatically detected based on environment variables:
|
|
|
356
354
|
except Exception as e:
|
|
357
355
|
return f"Error listing models: {str(e)}"
|
|
358
356
|
|
|
359
|
-
async def _test_model(
|
|
360
|
-
self, ctx: MCPContext, provider: Optional[str], model: Optional[str]
|
|
361
|
-
) -> str:
|
|
357
|
+
async def _test_model(self, ctx: MCPContext, provider: Optional[str], model: Optional[str]) -> str:
|
|
362
358
|
"""Test a model to verify it works."""
|
|
363
359
|
if not model and not provider:
|
|
364
360
|
return "Error: Either model or provider is required for test action"
|