hanzo-mcp 0.3.4__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (87) hide show
  1. hanzo_mcp/__init__.py +1 -1
  2. hanzo_mcp/cli.py +123 -160
  3. hanzo_mcp/cli_enhanced.py +438 -0
  4. hanzo_mcp/config/__init__.py +19 -0
  5. hanzo_mcp/config/settings.py +388 -0
  6. hanzo_mcp/config/tool_config.py +197 -0
  7. hanzo_mcp/prompts/__init__.py +117 -0
  8. hanzo_mcp/prompts/compact_conversation.py +77 -0
  9. hanzo_mcp/prompts/create_release.py +38 -0
  10. hanzo_mcp/prompts/project_system.py +120 -0
  11. hanzo_mcp/prompts/project_todo_reminder.py +111 -0
  12. hanzo_mcp/prompts/utils.py +286 -0
  13. hanzo_mcp/server.py +120 -98
  14. hanzo_mcp/tools/__init__.py +107 -31
  15. hanzo_mcp/tools/agent/__init__.py +8 -11
  16. hanzo_mcp/tools/agent/agent_tool.py +290 -224
  17. hanzo_mcp/tools/agent/prompt.py +16 -13
  18. hanzo_mcp/tools/agent/tool_adapter.py +9 -9
  19. hanzo_mcp/tools/common/__init__.py +17 -16
  20. hanzo_mcp/tools/common/base.py +79 -110
  21. hanzo_mcp/tools/common/batch_tool.py +330 -0
  22. hanzo_mcp/tools/common/context.py +26 -292
  23. hanzo_mcp/tools/common/permissions.py +12 -12
  24. hanzo_mcp/tools/common/thinking_tool.py +153 -0
  25. hanzo_mcp/tools/common/validation.py +1 -63
  26. hanzo_mcp/tools/filesystem/__init__.py +88 -41
  27. hanzo_mcp/tools/filesystem/base.py +32 -24
  28. hanzo_mcp/tools/filesystem/content_replace.py +114 -107
  29. hanzo_mcp/tools/filesystem/directory_tree.py +129 -105
  30. hanzo_mcp/tools/filesystem/edit.py +279 -0
  31. hanzo_mcp/tools/filesystem/grep.py +458 -0
  32. hanzo_mcp/tools/filesystem/grep_ast_tool.py +250 -0
  33. hanzo_mcp/tools/filesystem/multi_edit.py +362 -0
  34. hanzo_mcp/tools/filesystem/read.py +255 -0
  35. hanzo_mcp/tools/filesystem/write.py +156 -0
  36. hanzo_mcp/tools/jupyter/__init__.py +41 -29
  37. hanzo_mcp/tools/jupyter/base.py +66 -57
  38. hanzo_mcp/tools/jupyter/{edit_notebook.py → notebook_edit.py} +162 -139
  39. hanzo_mcp/tools/jupyter/notebook_read.py +152 -0
  40. hanzo_mcp/tools/shell/__init__.py +29 -20
  41. hanzo_mcp/tools/shell/base.py +87 -45
  42. hanzo_mcp/tools/shell/bash_session.py +731 -0
  43. hanzo_mcp/tools/shell/bash_session_executor.py +295 -0
  44. hanzo_mcp/tools/shell/command_executor.py +435 -384
  45. hanzo_mcp/tools/shell/run_command.py +284 -131
  46. hanzo_mcp/tools/shell/run_command_windows.py +328 -0
  47. hanzo_mcp/tools/shell/session_manager.py +196 -0
  48. hanzo_mcp/tools/shell/session_storage.py +325 -0
  49. hanzo_mcp/tools/todo/__init__.py +66 -0
  50. hanzo_mcp/tools/todo/base.py +319 -0
  51. hanzo_mcp/tools/todo/todo_read.py +148 -0
  52. hanzo_mcp/tools/todo/todo_write.py +378 -0
  53. hanzo_mcp/tools/vector/__init__.py +95 -0
  54. hanzo_mcp/tools/vector/infinity_store.py +365 -0
  55. hanzo_mcp/tools/vector/project_manager.py +361 -0
  56. hanzo_mcp/tools/vector/vector_index.py +115 -0
  57. hanzo_mcp/tools/vector/vector_search.py +215 -0
  58. {hanzo_mcp-0.3.4.dist-info → hanzo_mcp-0.5.0.dist-info}/METADATA +35 -3
  59. hanzo_mcp-0.5.0.dist-info/RECORD +63 -0
  60. {hanzo_mcp-0.3.4.dist-info → hanzo_mcp-0.5.0.dist-info}/WHEEL +1 -1
  61. hanzo_mcp/tools/agent/base_provider.py +0 -73
  62. hanzo_mcp/tools/agent/litellm_provider.py +0 -45
  63. hanzo_mcp/tools/agent/lmstudio_agent.py +0 -385
  64. hanzo_mcp/tools/agent/lmstudio_provider.py +0 -219
  65. hanzo_mcp/tools/agent/provider_registry.py +0 -120
  66. hanzo_mcp/tools/common/error_handling.py +0 -86
  67. hanzo_mcp/tools/common/logging_config.py +0 -115
  68. hanzo_mcp/tools/common/session.py +0 -91
  69. hanzo_mcp/tools/common/think_tool.py +0 -123
  70. hanzo_mcp/tools/common/version_tool.py +0 -120
  71. hanzo_mcp/tools/filesystem/edit_file.py +0 -287
  72. hanzo_mcp/tools/filesystem/get_file_info.py +0 -170
  73. hanzo_mcp/tools/filesystem/read_files.py +0 -198
  74. hanzo_mcp/tools/filesystem/search_content.py +0 -275
  75. hanzo_mcp/tools/filesystem/write_file.py +0 -162
  76. hanzo_mcp/tools/jupyter/notebook_operations.py +0 -514
  77. hanzo_mcp/tools/jupyter/read_notebook.py +0 -165
  78. hanzo_mcp/tools/project/__init__.py +0 -64
  79. hanzo_mcp/tools/project/analysis.py +0 -882
  80. hanzo_mcp/tools/project/base.py +0 -66
  81. hanzo_mcp/tools/project/project_analyze.py +0 -173
  82. hanzo_mcp/tools/shell/run_script.py +0 -215
  83. hanzo_mcp/tools/shell/script_tool.py +0 -244
  84. hanzo_mcp-0.3.4.dist-info/RECORD +0 -53
  85. {hanzo_mcp-0.3.4.dist-info → hanzo_mcp-0.5.0.dist-info}/entry_points.txt +0 -0
  86. {hanzo_mcp-0.3.4.dist-info → hanzo_mcp-0.5.0.dist-info}/licenses/LICENSE +0 -0
  87. {hanzo_mcp-0.3.4.dist-info → hanzo_mcp-0.5.0.dist-info}/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: hanzo-mcp
3
- Version: 0.3.4
3
+ Version: 0.5.0
4
4
  Summary: MCP implementation of Hanzo capabilities
5
5
  Author-email: Hanzo Industries Inc <dev@hanzo.ai>
6
6
  License: MIT
@@ -20,6 +20,7 @@ Requires-Dist: uvicorn>=0.23.1
20
20
  Requires-Dist: openai>=1.50.0
21
21
  Requires-Dist: python-dotenv>=1.0.0
22
22
  Requires-Dist: litellm>=1.40.14
23
+ Requires-Dist: grep-ast>=0.8.1
23
24
  Provides-Extra: dev
24
25
  Requires-Dist: pytest>=7.0.0; extra == "dev"
25
26
  Requires-Dist: pytest-cov>=4.1.0; extra == "dev"
@@ -50,7 +51,7 @@ An implementation of Hanzo capabilities using the Model Context Protocol (MCP).
50
51
 
51
52
  This project provides an MCP server that implements Hanzo-like functionality, allowing Claude to directly execute instructions for modifying and improving project files. By leveraging the Model Context Protocol, this implementation enables seamless integration with various MCP clients including Claude Desktop.
52
53
 
53
- ![example](./doc/example.gif)
54
+ ![example](./docs/example.gif)
54
55
 
55
56
  ## Features
56
57
 
@@ -96,9 +97,40 @@ uv pip install hanzo-mcp
96
97
  pip install hanzo-mcp
97
98
  ```
98
99
 
100
+ ### Claude Desktop Integration
101
+
102
+ To install and configure hanzo-mcp for use with Claude Desktop:
103
+
104
+ ```bash
105
+ # Install the package globally
106
+ uv pip install hanzo-mcp
107
+
108
+ # Install configuration to Claude Desktop with default settings
109
+ hanzo-mcp --install
110
+ ```
111
+
112
+ For development, if you want to install your local version to Claude Desktop:
113
+
114
+ ```bash
115
+ # Clone and navigate to the repository
116
+ git clone https://github.com/hanzoai/mcp.git
117
+ cd mcp
118
+
119
+ # Install and configure for Claude Desktop
120
+ make install-desktop
121
+
122
+ # With custom paths and server name
123
+ make install-desktop ALLOWED_PATHS="/path/to/projects,/another/path" SERVER_NAME="hanzo-dev"
124
+
125
+ # Disable write tools (useful if you prefer using your IDE for edits)
126
+ make install-desktop DISABLE_WRITE=1
127
+ ```
128
+
129
+ After installation, restart Claude Desktop. You'll see "hanzo" (or your custom server name) available in the MCP server dropdown.
130
+
99
131
  For detailed installation and configuration instructions, please refer to the [documentation](./docs/).
100
132
 
101
- Of course, you can also read [USEFUL_PROMPTS](./doc/USEFUL_PROMPTS.md) for some inspiration on how to use hanzo-mcp.
133
+ Of course, you can also read [USEFUL_PROMPTS](./docs/USEFUL_PROMPTS.md) for some inspiration on how to use hanzo-mcp.
102
134
 
103
135
  ## Security
104
136
 
@@ -0,0 +1,63 @@
1
+ hanzo_mcp/__init__.py,sha256=_HhgLXRNztyXYLjqCEuPOeVQ7tZ0pRxqkZziKkpSUrE,89
2
+ hanzo_mcp/cli.py,sha256=fX8HtsD44elZhYre7Dn6RmH5lUt2AFRWz6ehGeoCkUY,9784
3
+ hanzo_mcp/cli_enhanced.py,sha256=rqh9gqyjMuUznIlPTC5pcIGYZTKAScacMsDb1e68ReE,15819
4
+ hanzo_mcp/server.py,sha256=mYiIcsAtQO2c_MGExYbzk5tj2U-MjcDWfTU5T22KuwQ,8107
5
+ hanzo_mcp/config/__init__.py,sha256=iZYGSJMsC1c97gRFqgyowfP4XW480BBVRAQq1r-Dp7g,506
6
+ hanzo_mcp/config/settings.py,sha256=ibMmec9Zm5wHu-Y8e3o1F0_rgzP9V7pVOZjcW35l3KI,14058
7
+ hanzo_mcp/config/tool_config.py,sha256=AT5eJRZAL8VTLu5DCdoC_MkDxtufVE_QOj7Yp_Fyi8k,6317
8
+ hanzo_mcp/prompts/__init__.py,sha256=L3eolRTyTohIp5JA0xv50TSFU4YSf_ycEEaODta7Ve0,3989
9
+ hanzo_mcp/prompts/compact_conversation.py,sha256=nvD068KEesiMcevxxMBeIJh6AqT7YHOqyH6RepRFFfA,4206
10
+ hanzo_mcp/prompts/create_release.py,sha256=1Z8xSTtz5vAm0rWFnERpFu7wIYExT4iXhM6nGmQaM-s,1374
11
+ hanzo_mcp/prompts/project_system.py,sha256=fQhOM6AGb6VIZQE_fSPDeS9slBGVkz_f_UbNNhxPRdw,7031
12
+ hanzo_mcp/prompts/project_todo_reminder.py,sha256=otiBdmzxssBSb3MZZSQsjYDGLBqi1bM0HgraELP_Nf4,3645
13
+ hanzo_mcp/prompts/utils.py,sha256=IwxIhzZfYJ2anToPulbrpcc07u4Dozo9ok6VE3BC_4A,9963
14
+ hanzo_mcp/tools/__init__.py,sha256=q8bVRv11mHTRSZL9PPHlTl3kr4GUAygPk78QjCYavT4,6728
15
+ hanzo_mcp/tools/agent/__init__.py,sha256=MZ-LMIYptodQn1JpAEyNMbqRlioS4R8scgzNgsU189E,1897
16
+ hanzo_mcp/tools/agent/agent_tool.py,sha256=w-Oy2wPPTz79SCzmi7NsI8RU4eLbFKMTXDi-sFKrrbo,21268
17
+ hanzo_mcp/tools/agent/prompt.py,sha256=Wi9Z45hmQ92eUNZbOWzj9ZVCCr-fM1K9iyaRvTCAgrQ,4529
18
+ hanzo_mcp/tools/agent/tool_adapter.py,sha256=Od7VtD9qqDbgxhDHj0L-rohX4wOSMtYjZnU2BRuWSqI,2151
19
+ hanzo_mcp/tools/common/__init__.py,sha256=6LOEE9anSTsiPofgGNcD8CVHdU4SiaHjoQcRzNT2xos,921
20
+ hanzo_mcp/tools/common/base.py,sha256=HB7glx3O9eq2B8nHQu1FbRjtlQZM77CKB1lwMGb-CuE,5631
21
+ hanzo_mcp/tools/common/batch_tool.py,sha256=-FaZtH1cqd3xSHUrMaIvB664WEK0rKtTvzpUeEl0DhY,12073
22
+ hanzo_mcp/tools/common/context.py,sha256=XrgzJwPQP8ooKoReveezVgRyOSJe-zfD5-knhusBgbg,5175
23
+ hanzo_mcp/tools/common/permissions.py,sha256=LR1tuQAPMoaKvqNtHPRaiB0ZUb0Tbsg3e9L6vvd4FLU,7562
24
+ hanzo_mcp/tools/common/thinking_tool.py,sha256=pEBSymlJZJIS2X0pc-2VX2dUAPi4ho2un-wa69yYTD8,5142
25
+ hanzo_mcp/tools/common/validation.py,sha256=VV3VbDvYlAYl2Bi98xE7gFo0xnmqHHUGJGNPswm97qo,1694
26
+ hanzo_mcp/tools/filesystem/__init__.py,sha256=M_Q8Z-w8UyXCKUXLUwPYMrKsOPybZoUs888THA6xqPY,4561
27
+ hanzo_mcp/tools/filesystem/base.py,sha256=qwxer1jHgPIfyaUeC4QLzR9pjGWJCLP2L3qggUAulFY,3807
28
+ hanzo_mcp/tools/filesystem/content_replace.py,sha256=hCiw9oQXS2_b6CjgC7XHOrRo5NH6H8zOFaSDS6Uwfgw,10015
29
+ hanzo_mcp/tools/filesystem/directory_tree.py,sha256=LZTJRmrDdSFpq9EpcTmVytimCp_glpCVKDxf7UCyq20,10755
30
+ hanzo_mcp/tools/filesystem/edit.py,sha256=PIlFsMjBG9WQw9IWC6dzLZly6UIBUcAUrohRkqyKFZY,10699
31
+ hanzo_mcp/tools/filesystem/grep.py,sha256=-JKrBUk04tmObvwPh8UvBpLOc27NNndNt6eR5qSkCLs,16818
32
+ hanzo_mcp/tools/filesystem/grep_ast_tool.py,sha256=F-HacdAISZI_jDGJrxIcZ-dyj3OG919JUVimpvgAZNA,8142
33
+ hanzo_mcp/tools/filesystem/multi_edit.py,sha256=j8ytsFVsdQqJ9AWCJMQa8kWHyH4UpbBdHRIc7XepEJc,14313
34
+ hanzo_mcp/tools/filesystem/read.py,sha256=uF1KdIAsKL8-oQiwOfL9-dkTzKOqQK0nKLVe6hW-5KE,8892
35
+ hanzo_mcp/tools/filesystem/write.py,sha256=dkbZ61kYGRTzKPVtMG8ETYw8YHyo6YXb1cLI70ePYcQ,4833
36
+ hanzo_mcp/tools/jupyter/__init__.py,sha256=IJnkx6vwxP2ZJOGvUxG25fhstlny-uFnNBLjGlUt5hs,2515
37
+ hanzo_mcp/tools/jupyter/base.py,sha256=oxTz_exSsYni2cQJvL4gHZtC4EG5EU_1-nWyEdc-ZQ8,10090
38
+ hanzo_mcp/tools/jupyter/notebook_edit.py,sha256=wKEEQJ36pfgB0JHQi2nV_X7ApXqy6HXZY9XO4lZ9Efg,11848
39
+ hanzo_mcp/tools/jupyter/notebook_read.py,sha256=t2fkP5wAp8SBBaWHrty-uWsnn6l5WO2zIqISVSHnQus,5293
40
+ hanzo_mcp/tools/shell/__init__.py,sha256=CcVnsAqSd8FLtVpkuHQK4cbKHWrac6o9enEIqNlxz4k,1951
41
+ hanzo_mcp/tools/shell/base.py,sha256=twbz3EuX64cwvNlcHraZ5CcEhDpUvMI5mLTZvMADtbQ,5821
42
+ hanzo_mcp/tools/shell/bash_session.py,sha256=YPtdtC0pc6Q04RJqKUy0u0RPTbiT2IGtsvFqejK5Hu4,27271
43
+ hanzo_mcp/tools/shell/bash_session_executor.py,sha256=zRnrzj4sdQOxO22XXBENT6k2dXt3LDk5fxjWjUYyU_Q,10723
44
+ hanzo_mcp/tools/shell/command_executor.py,sha256=IuoRY48PMmpKHL5CFIExebjoiRRS5ZEl73UDzYTR3kU,36406
45
+ hanzo_mcp/tools/shell/run_command.py,sha256=Io6LyLm8XWZKZ-Zjhx3L-H5vmdNGoqbkU9jJzwL7zLs,16137
46
+ hanzo_mcp/tools/shell/run_command_windows.py,sha256=MGXC76b0uYKhxg1-d9CijPP36ufRusgyq9Zurpo1vSc,15363
47
+ hanzo_mcp/tools/shell/session_manager.py,sha256=o8iS4PFCnq28vPqYtdtH9M8lfGyzyhtNL0hmNI13Uuc,6509
48
+ hanzo_mcp/tools/shell/session_storage.py,sha256=elnyFgn0FwsmVvoWAoJFAqiEeNaK4_yByT8-zXa6r-o,10141
49
+ hanzo_mcp/tools/todo/__init__.py,sha256=Ai-rlVWcy-CkJf1H2zIsbyx0wkxzWNLR3WAbGszbXKg,1720
50
+ hanzo_mcp/tools/todo/base.py,sha256=8sYZYAsFE5SjHRqynZCmCIKEobWB3aZwwSApg26keDo,10655
51
+ hanzo_mcp/tools/todo/todo_read.py,sha256=zXI9jn-kWXGSj88tI63yoAv-EWPDpkX1E6m0QfMUQHE,4759
52
+ hanzo_mcp/tools/todo/todo_write.py,sha256=fTAvrxrzkpdYwi7nYcJky2wjukChYsdXu5axqIUJg_c,15465
53
+ hanzo_mcp/tools/vector/__init__.py,sha256=vNz7GM1P98TW-v6Y3KTwvaW7cO5P80mXAf6uKS0Y29M,3390
54
+ hanzo_mcp/tools/vector/infinity_store.py,sha256=hYaSuRxqukwVDMgzrxs_Qdia7Gx-FFEGuHE-4KIACo0,12002
55
+ hanzo_mcp/tools/vector/project_manager.py,sha256=JZ6c0m4RWKbV4JjkxAI6ZgyOy2Ymk8-o4ficTLZrIo0,12500
56
+ hanzo_mcp/tools/vector/vector_index.py,sha256=FePsCTeVkA3Uth3PFIZykXprCPfef7cqCCV_vH7z0Eo,4316
57
+ hanzo_mcp/tools/vector/vector_search.py,sha256=3dWS_jueMt07DSTZOKtz9El4uSwU9KgHUjRAiF14CxQ,9182
58
+ hanzo_mcp-0.5.0.dist-info/licenses/LICENSE,sha256=mf1qZGFsPGskoPgytp9B-RsahfKvXsBpmaAbTLGTt8Y,1063
59
+ hanzo_mcp-0.5.0.dist-info/METADATA,sha256=RRh07Zv5iMZRJZb2-nv92groJ5ql1VG2tS51Rj-fzhE,8930
60
+ hanzo_mcp-0.5.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
61
+ hanzo_mcp-0.5.0.dist-info/entry_points.txt,sha256=aRKOKXtuQr-idSr-yH4efnRl2v8te94AcgN3ysqqSYs,49
62
+ hanzo_mcp-0.5.0.dist-info/top_level.txt,sha256=eGFANatA0MHWiVlpS56fTYRIShtibrSom1uXI6XU0GU,10
63
+ hanzo_mcp-0.5.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,73 +0,0 @@
1
- """Base model provider for agent delegation.
2
-
3
- Defines the interface for model providers.
4
- """
5
-
6
- import logging
7
- from abc import ABC, abstractmethod
8
- from typing import Any, Dict, List, Optional, Tuple
9
-
10
- logger = logging.getLogger(__name__)
11
-
12
-
13
- class BaseModelProvider(ABC):
14
- """Base class for model providers."""
15
-
16
- @abstractmethod
17
- async def initialize(self) -> None:
18
- """Initialize the provider."""
19
- pass
20
-
21
- @abstractmethod
22
- async def load_model(self, model_name: str, identifier: Optional[str] = None) -> str:
23
- """Load a model.
24
-
25
- Args:
26
- model_name: The name of the model to load
27
- identifier: Optional identifier for the model instance
28
-
29
- Returns:
30
- The identifier for the loaded model
31
- """
32
- pass
33
-
34
- @abstractmethod
35
- async def generate(
36
- self,
37
- model_id: str,
38
- prompt: str,
39
- system_prompt: Optional[str] = None,
40
- max_tokens: int = 4096,
41
- temperature: float = 0.7,
42
- top_p: float = 0.95,
43
- stop_sequences: Optional[List[str]] = None,
44
- ) -> Tuple[str, Dict[str, Any]]:
45
- """Generate a response from the model.
46
-
47
- Args:
48
- model_id: The identifier of the model to use
49
- prompt: The prompt to send to the model
50
- system_prompt: Optional system prompt to send to the model
51
- max_tokens: Maximum number of tokens to generate
52
- temperature: Sampling temperature
53
- top_p: Top-p sampling parameter
54
- stop_sequences: Optional list of strings that will stop generation
55
-
56
- Returns:
57
- A tuple of (generated text, metadata)
58
- """
59
- pass
60
-
61
- @abstractmethod
62
- async def unload_model(self, model_id: str) -> None:
63
- """Unload a model.
64
-
65
- Args:
66
- model_id: The identifier of the model to unload
67
- """
68
- pass
69
-
70
- @abstractmethod
71
- async def shutdown(self) -> None:
72
- """Shutdown the provider."""
73
- pass
@@ -1,45 +0,0 @@
1
- """LiteLLM provider for agent delegation.
2
-
3
- Enables the use of various cloud LLM providers via LiteLLM.
4
- """
5
-
6
- import asyncio
7
- import logging
8
- import json
9
- from typing import Any, Dict, List, Optional, Tuple
10
-
11
- from hanzo_mcp.tools.agent.base_provider import BaseModelProvider
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
- # Define model capabilities
16
- DEFAULT_MAX_TOKENS = 4096
17
- DEFAULT_CONTEXT_WINDOW = 8192
18
-
19
-
20
- class LiteLLMProvider(BaseModelProvider):
21
- """Provider for cloud models via LiteLLM."""
22
-
23
- def __init__(self):
24
- """Initialize the LiteLLM provider."""
25
- self.models = {}
26
- self.initialized = False
27
-
28
- async def initialize(self) -> None:
29
- """Initialize the LiteLLM provider."""
30
- if self.initialized:
31
- return
32
-
33
- try:
34
- # Import LiteLLM
35
- import litellm
36
- self.litellm = litellm
37
- self.initialized = True
38
- logger.info("LiteLLM provider initialized successfully")
39
- except ImportError:
40
- logger.error("Failed to import LiteLLM")
41
- logger.error("Install LiteLLM with 'pip install litellm'")
42
- except Exception as e:
43
- logger.error(f"Failed to initialize LiteLLM provider: {str(e)}")
44
-
45
- async def load_model(self, model_name: str, identifier: Optional[str] = None
@@ -1,385 +0,0 @@
1
- """LM Studio agent tool for parallel model execution.
2
-
3
- This module provides a tool for running tasks in parallel across multiple LM Studio models.
4
- """
5
-
6
- import asyncio
7
- import json
8
- import logging
9
- import time
10
- from typing import Any, Dict, List, Optional, final, override
11
-
12
- from mcp.server.fastmcp import Context as MCPContext
13
- from mcp.server.fastmcp import FastMCP
14
-
15
- from hanzo_mcp.tools.common.base import BaseTool
16
- from hanzo_mcp.tools.common.context import DocumentContext, create_tool_context
17
- from hanzo_mcp.tools.common.permissions import PermissionManager
18
- from hanzo_mcp.tools.agent.lmstudio_provider import LMStudioProvider
19
-
20
- logger = logging.getLogger(__name__)
21
-
22
-
23
- @final
24
- class LMStudioAgentTool(BaseTool):
25
- """Tool for parallel execution of tasks across multiple LM Studio models."""
26
-
27
- @property
28
- @override
29
- def name(self) -> str:
30
- """Get the tool name.
31
-
32
- Returns:
33
- Tool name
34
- """
35
- return "lmstudio_dispatch"
36
-
37
- @property
38
- @override
39
- def description(self) -> str:
40
- """Get the tool description.
41
-
42
- Returns:
43
- Tool description
44
- """
45
- return """Run tasks in parallel across multiple LM Studio models.
46
-
47
- This tool allows you to dispatch the same task or different tasks to multiple locally available
48
- LM Studio models and execute them in parallel. This is useful for comparing model responses,
49
- leveraging different model strengths, or simply speeding up processing by distributing tasks.
50
-
51
- The task prompts can be the same for all models or different per model.
52
-
53
- Args:
54
- model_tasks: A list of configurations, each with a 'model' name, and a 'prompt'.
55
- Optionally can include 'system_prompt', 'temperature', 'max_tokens' and 'top_p'
56
-
57
- Returns:
58
- Combined results from all model executions with performance metrics
59
- """
60
-
61
- @property
62
- @override
63
- def parameters(self) -> dict[str, Any]:
64
- """Get the parameter specifications for the tool.
65
-
66
- Returns:
67
- Parameter specifications
68
- """
69
- return {
70
- "properties": {
71
- "model_tasks": {
72
- "type": "array",
73
- "items": {
74
- "type": "object",
75
- "properties": {
76
- "model": {
77
- "type": "string",
78
- "description": "Name of the LM Studio model to use"
79
- },
80
- "identifier": {
81
- "type": "string",
82
- "description": "Optional identifier for the model instance"
83
- },
84
- "prompt": {
85
- "type": "string",
86
- "description": "Task prompt for the model"
87
- },
88
- "system_prompt": {
89
- "type": "string",
90
- "description": "Optional system prompt for the model"
91
- },
92
- "temperature": {
93
- "type": "number",
94
- "description": "Sampling temperature (defaults to 0.7)"
95
- },
96
- "max_tokens": {
97
- "type": "integer",
98
- "description": "Maximum tokens to generate (defaults to 2048)"
99
- },
100
- "top_p": {
101
- "type": "number",
102
- "description": "Top-p sampling parameter (defaults to 0.95)"
103
- }
104
- },
105
- "required": ["model", "prompt"]
106
- },
107
- "description": "List of model task configurations to execute in parallel"
108
- }
109
- },
110
- "required": ["model_tasks"],
111
- "type": "object"
112
- }
113
-
114
- @property
115
- @override
116
- def required(self) -> list[str]:
117
- """Get the list of required parameter names.
118
-
119
- Returns:
120
- List of required parameter names
121
- """
122
- return ["model_tasks"]
123
-
124
- def __init__(self, document_context: DocumentContext, permission_manager: PermissionManager) -> None:
125
- """Initialize the LM Studio agent tool.
126
-
127
- Args:
128
- document_context: Document context for tracking file contents
129
- permission_manager: Permission manager for access control
130
- """
131
- self.document_context = document_context
132
- self.permission_manager = permission_manager
133
- self.provider = LMStudioProvider()
134
-
135
- @override
136
- async def call(self, ctx: MCPContext, **params: Any) -> str:
137
- """Execute the tool with the given parameters.
138
-
139
- Args:
140
- ctx: MCP context
141
- **params: Tool parameters
142
-
143
- Returns:
144
- Tool execution result
145
- """
146
- start_time = time.time()
147
-
148
- # Create tool context
149
- tool_ctx = create_tool_context(ctx)
150
- tool_ctx.set_tool_info(self.name)
151
-
152
- # Extract parameters
153
- model_tasks = params.get("model_tasks")
154
- if not model_tasks:
155
- await tool_ctx.error("Parameter 'model_tasks' is required but was not provided")
156
- return "Error: Parameter 'model_tasks' is required but was not provided"
157
-
158
- if not isinstance(model_tasks, list):
159
- await tool_ctx.error("Parameter 'model_tasks' must be an array")
160
- return "Error: Parameter 'model_tasks' must be an array"
161
-
162
- if not model_tasks:
163
- await tool_ctx.error("At least one model task must be provided")
164
- return "Error: At least one model task must be provided"
165
-
166
- # Validate each model task
167
- for i, task in enumerate(model_tasks):
168
- if not isinstance(task, dict):
169
- await tool_ctx.error(f"Model task at index {i} must be an object")
170
- return f"Error: Model task at index {i} must be an object"
171
-
172
- if "model" not in task:
173
- await tool_ctx.error(f"Model task at index {i} must have a 'model' property")
174
- return f"Error: Model task at index {i} must have a 'model' property"
175
-
176
- if "prompt" not in task:
177
- await tool_ctx.error(f"Model task at index {i} must have a 'prompt' property")
178
- return f"Error: Model task at index {i} must have a 'prompt' property"
179
-
180
- # Initialize the provider if needed
181
- await self.provider.initialize()
182
-
183
- # Execute the tasks in parallel
184
- await tool_ctx.info(f"Executing {len(model_tasks)} tasks across LM Studio models")
185
- result = await self._execute_parallel_tasks(model_tasks, tool_ctx)
186
-
187
- # Calculate execution time
188
- execution_time = time.time() - start_time
189
-
190
- # Format the result
191
- formatted_result = self._format_result(result, execution_time)
192
-
193
- # Log completion
194
- await tool_ctx.info(f"LM Studio model execution completed in {execution_time:.2f}s")
195
-
196
- return formatted_result
197
-
198
- async def _execute_parallel_tasks(self, model_tasks: List[Dict[str, Any]], tool_ctx: Any) -> List[Dict[str, Any]]:
199
- """Execute multiple model tasks in parallel.
200
-
201
- Args:
202
- model_tasks: List of model task configurations
203
- tool_ctx: Tool context for logging
204
-
205
- Returns:
206
- List of task results
207
- """
208
- # Create tasks for loading models
209
- load_tasks = []
210
-
211
- for task in model_tasks:
212
- model_name = task["model"]
213
- identifier = task.get("identifier")
214
-
215
- await tool_ctx.info(f"Loading model: {model_name}" + (f" as {identifier}" if identifier else ""))
216
- load_tasks.append(self.provider.load_model(model_name, identifier))
217
-
218
- # Wait for all models to load
219
- try:
220
- model_ids = await asyncio.gather(*load_tasks)
221
- except Exception as e:
222
- await tool_ctx.error(f"Failed to load models: {str(e)}")
223
- return [{"error": f"Failed to load models: {str(e)}"}]
224
-
225
- # Create tasks for generating responses
226
- generation_tasks = []
227
-
228
- for i, (task, model_id) in enumerate(zip(model_tasks, model_ids)):
229
- prompt = task["prompt"]
230
- system_prompt = task.get("system_prompt")
231
- max_tokens = task.get("max_tokens", 2048)
232
- temperature = task.get("temperature", 0.7)
233
- top_p = task.get("top_p", 0.95)
234
-
235
- await tool_ctx.info(f"Generating with model {model_id}")
236
- generation_tasks.append(
237
- self._execute_single_task(
238
- model_id=model_id,
239
- prompt=prompt,
240
- system_prompt=system_prompt,
241
- max_tokens=max_tokens,
242
- temperature=temperature,
243
- top_p=top_p,
244
- task_index=i,
245
- tool_ctx=tool_ctx,
246
- original_task=task
247
- )
248
- )
249
-
250
- # Wait for all generation tasks to complete
251
- results = await asyncio.gather(*generation_tasks, return_exceptions=True)
252
-
253
- # Process results, handling any exceptions
254
- processed_results = []
255
- for i, result in enumerate(results):
256
- if isinstance(result, Exception):
257
- processed_results.append({
258
- "model": model_tasks[i]["model"],
259
- "error": str(result),
260
- "success": False
261
- })
262
- else:
263
- processed_results.append(result)
264
-
265
- return processed_results
266
-
267
- async def _execute_single_task(
268
- self,
269
- model_id: str,
270
- prompt: str,
271
- system_prompt: Optional[str],
272
- max_tokens: int,
273
- temperature: float,
274
- top_p: float,
275
- task_index: int,
276
- tool_ctx: Any,
277
- original_task: Dict[str, Any]
278
- ) -> Dict[str, Any]:
279
- """Execute a single model task.
280
-
281
- Args:
282
- model_id: Model identifier
283
- prompt: Prompt for the model
284
- system_prompt: Optional system prompt
285
- max_tokens: Maximum tokens to generate
286
- temperature: Sampling temperature
287
- top_p: Top-p sampling parameter
288
- task_index: Index of the task
289
- tool_ctx: Tool context for logging
290
- original_task: Original task configuration
291
-
292
- Returns:
293
- Task result
294
- """
295
- task_start_time = time.time()
296
-
297
- try:
298
- # Generate response
299
- generated_text, metadata = await self.provider.generate(
300
- model_id=model_id,
301
- prompt=prompt,
302
- system_prompt=system_prompt,
303
- max_tokens=max_tokens,
304
- temperature=temperature,
305
- top_p=top_p
306
- )
307
-
308
- # Calculate execution time
309
- task_execution_time = time.time() - task_start_time
310
-
311
- await tool_ctx.info(f"Task {task_index} completed in {task_execution_time:.2f}s")
312
-
313
- # Return result
314
- return {
315
- "model": original_task["model"],
316
- "identifier": model_id,
317
- "result": generated_text,
318
- "execution_time": task_execution_time,
319
- "success": True,
320
- "metadata": metadata
321
- }
322
- except Exception as e:
323
- await tool_ctx.error(f"Error executing task {task_index}: {str(e)}")
324
- return {
325
- "model": original_task["model"],
326
- "identifier": model_id,
327
- "error": str(e),
328
- "execution_time": time.time() - task_start_time,
329
- "success": False
330
- }
331
-
332
- def _format_result(self, results: List[Dict[str, Any]], total_execution_time: float) -> str:
333
- """Format the task results.
334
-
335
- Args:
336
- results: List of task results
337
- total_execution_time: Total execution time
338
-
339
- Returns:
340
- Formatted results
341
- """
342
- # Calculate summary statistics
343
- successful = [r for r in results if r.get("success", False)]
344
- failed = [r for r in results if not r.get("success", False)]
345
-
346
- # Create the result string
347
- output = [f"### LM Studio Dispatch Results\n"]
348
- output.append(f"**Total execution time:** {total_execution_time:.2f}s")
349
- output.append(f"**Models used:** {len(results)}")
350
- output.append(f"**Successful:** {len(successful)}")
351
- output.append(f"**Failed:** {len(failed)}\n")
352
-
353
- # Add the results for each model
354
- for i, result in enumerate(results):
355
- model_name = result.get("model", "Unknown model")
356
- model_id = result.get("identifier", model_name)
357
-
358
- output.append(f"## Model {i+1}: {model_name}")
359
-
360
- if result.get("success", False):
361
- exec_time = result.get("execution_time", 0)
362
- output.append(f"**Execution time:** {exec_time:.2f}s")
363
-
364
- # Add the result
365
- output.append("\n**Result:**\n")
366
- output.append(result.get("result", "No result"))
367
- else:
368
- output.append(f"**Error:** {result.get('error', 'Unknown error')}")
369
-
370
- output.append("\n" + "-" * 40 + "\n")
371
-
372
- return "\n".join(output)
373
-
374
- @override
375
- def register(self, mcp_server: FastMCP) -> None:
376
- """Register this tool with the MCP server.
377
-
378
- Args:
379
- mcp_server: The FastMCP server instance
380
- """
381
- tool_self = self # Create a reference to self for use in the closure
382
-
383
- @mcp_server.tool(name=self.name, description=self.mcp_description)
384
- async def lmstudio_dispatch(ctx: MCPContext, model_tasks: List[Dict[str, Any]]) -> str:
385
- return await tool_self.call(ctx, model_tasks=model_tasks)