hanzo-mcp 0.5.0__py3-none-any.whl → 0.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (60) hide show
  1. hanzo_mcp/__init__.py +1 -1
  2. hanzo_mcp/config/settings.py +61 -0
  3. hanzo_mcp/tools/__init__.py +158 -12
  4. hanzo_mcp/tools/common/base.py +7 -2
  5. hanzo_mcp/tools/common/config_tool.py +396 -0
  6. hanzo_mcp/tools/common/stats.py +261 -0
  7. hanzo_mcp/tools/common/tool_disable.py +144 -0
  8. hanzo_mcp/tools/common/tool_enable.py +182 -0
  9. hanzo_mcp/tools/common/tool_list.py +263 -0
  10. hanzo_mcp/tools/database/__init__.py +71 -0
  11. hanzo_mcp/tools/database/database_manager.py +246 -0
  12. hanzo_mcp/tools/database/graph_add.py +257 -0
  13. hanzo_mcp/tools/database/graph_query.py +536 -0
  14. hanzo_mcp/tools/database/graph_remove.py +267 -0
  15. hanzo_mcp/tools/database/graph_search.py +348 -0
  16. hanzo_mcp/tools/database/graph_stats.py +345 -0
  17. hanzo_mcp/tools/database/sql_query.py +229 -0
  18. hanzo_mcp/tools/database/sql_search.py +296 -0
  19. hanzo_mcp/tools/database/sql_stats.py +254 -0
  20. hanzo_mcp/tools/editor/__init__.py +11 -0
  21. hanzo_mcp/tools/editor/neovim_command.py +272 -0
  22. hanzo_mcp/tools/editor/neovim_edit.py +290 -0
  23. hanzo_mcp/tools/editor/neovim_session.py +356 -0
  24. hanzo_mcp/tools/filesystem/__init__.py +20 -1
  25. hanzo_mcp/tools/filesystem/batch_search.py +812 -0
  26. hanzo_mcp/tools/filesystem/find_files.py +348 -0
  27. hanzo_mcp/tools/filesystem/git_search.py +505 -0
  28. hanzo_mcp/tools/llm/__init__.py +27 -0
  29. hanzo_mcp/tools/llm/consensus_tool.py +351 -0
  30. hanzo_mcp/tools/llm/llm_manage.py +413 -0
  31. hanzo_mcp/tools/llm/llm_tool.py +346 -0
  32. hanzo_mcp/tools/llm/provider_tools.py +412 -0
  33. hanzo_mcp/tools/mcp/__init__.py +11 -0
  34. hanzo_mcp/tools/mcp/mcp_add.py +263 -0
  35. hanzo_mcp/tools/mcp/mcp_remove.py +127 -0
  36. hanzo_mcp/tools/mcp/mcp_stats.py +165 -0
  37. hanzo_mcp/tools/shell/__init__.py +27 -7
  38. hanzo_mcp/tools/shell/logs.py +265 -0
  39. hanzo_mcp/tools/shell/npx.py +194 -0
  40. hanzo_mcp/tools/shell/npx_background.py +254 -0
  41. hanzo_mcp/tools/shell/pkill.py +262 -0
  42. hanzo_mcp/tools/shell/processes.py +279 -0
  43. hanzo_mcp/tools/shell/run_background.py +326 -0
  44. hanzo_mcp/tools/shell/uvx.py +187 -0
  45. hanzo_mcp/tools/shell/uvx_background.py +249 -0
  46. hanzo_mcp/tools/vector/__init__.py +21 -12
  47. hanzo_mcp/tools/vector/ast_analyzer.py +459 -0
  48. hanzo_mcp/tools/vector/git_ingester.py +485 -0
  49. hanzo_mcp/tools/vector/index_tool.py +358 -0
  50. hanzo_mcp/tools/vector/infinity_store.py +465 -1
  51. hanzo_mcp/tools/vector/mock_infinity.py +162 -0
  52. hanzo_mcp/tools/vector/vector_index.py +7 -6
  53. hanzo_mcp/tools/vector/vector_search.py +22 -7
  54. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/METADATA +68 -20
  55. hanzo_mcp-0.5.2.dist-info/RECORD +106 -0
  56. hanzo_mcp-0.5.0.dist-info/RECORD +0 -63
  57. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/WHEEL +0 -0
  58. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/entry_points.txt +0 -0
  59. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/licenses/LICENSE +0 -0
  60. {hanzo_mcp-0.5.0.dist-info → hanzo_mcp-0.5.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,346 @@
1
+ """Universal LLM tool using LiteLLM."""
2
+
3
+ import os
4
+ import json
5
+ from typing import Annotated, Optional, TypedDict, Unpack, final, override, List, Dict, Any
6
+ import asyncio
7
+
8
+ from fastmcp import Context as MCPContext
9
+ from pydantic import Field
10
+
11
+ from hanzo_mcp.tools.common.base import BaseTool
12
+ from hanzo_mcp.tools.common.context import create_tool_context
13
+
14
+ try:
15
+ import litellm
16
+ from litellm import completion, acompletion
17
+ LITELLM_AVAILABLE = True
18
+ except ImportError:
19
+ LITELLM_AVAILABLE = False
20
+
21
+
22
+ Model = Annotated[
23
+ str,
24
+ Field(
25
+ description="Model name in LiteLLM format (e.g., 'gpt-4', 'claude-3-opus-20240229', 'gemini/gemini-pro')",
26
+ min_length=1,
27
+ ),
28
+ ]
29
+
30
+ Prompt = Annotated[
31
+ str,
32
+ Field(
33
+ description="The prompt or question to send to the model",
34
+ min_length=1,
35
+ ),
36
+ ]
37
+
38
+ SystemPrompt = Annotated[
39
+ Optional[str],
40
+ Field(
41
+ description="System prompt to set context",
42
+ default=None,
43
+ ),
44
+ ]
45
+
46
+ Temperature = Annotated[
47
+ float,
48
+ Field(
49
+ description="Temperature for response randomness (0.0-2.0)",
50
+ default=0.7,
51
+ ),
52
+ ]
53
+
54
+ MaxTokens = Annotated[
55
+ Optional[int],
56
+ Field(
57
+ description="Maximum tokens in response",
58
+ default=None,
59
+ ),
60
+ ]
61
+
62
+ JsonMode = Annotated[
63
+ bool,
64
+ Field(
65
+ description="Request JSON formatted response",
66
+ default=False,
67
+ ),
68
+ ]
69
+
70
+ Stream = Annotated[
71
+ bool,
72
+ Field(
73
+ description="Stream the response",
74
+ default=False,
75
+ ),
76
+ ]
77
+
78
+
79
+ class LLMToolParams(TypedDict, total=False):
80
+ """Parameters for LLM tool."""
81
+
82
+ model: str
83
+ prompt: str
84
+ system_prompt: Optional[str]
85
+ temperature: float
86
+ max_tokens: Optional[int]
87
+ json_mode: bool
88
+ stream: bool
89
+
90
+
91
+ @final
92
+ class LLMTool(BaseTool):
93
+ """Universal LLM tool using LiteLLM."""
94
+
95
+ # Common environment variables for API keys
96
+ API_KEY_ENV_VARS = {
97
+ "openai": ["OPENAI_API_KEY"],
98
+ "anthropic": ["ANTHROPIC_API_KEY", "CLAUDE_API_KEY"],
99
+ "google": ["GOOGLE_API_KEY", "GEMINI_API_KEY", "GOOGLE_APPLICATION_CREDENTIALS"],
100
+ "groq": ["GROQ_API_KEY"],
101
+ "cohere": ["COHERE_API_KEY"],
102
+ "replicate": ["REPLICATE_API_KEY"],
103
+ "huggingface": ["HUGGINGFACE_API_KEY", "HF_TOKEN"],
104
+ "together": ["TOGETHER_API_KEY", "TOGETHERAI_API_KEY"],
105
+ "mistral": ["MISTRAL_API_KEY"],
106
+ "perplexity": ["PERPLEXITY_API_KEY"],
107
+ "anyscale": ["ANYSCALE_API_KEY"],
108
+ "deepinfra": ["DEEPINFRA_API_KEY"],
109
+ "ai21": ["AI21_API_KEY"],
110
+ "nvidia": ["NVIDIA_API_KEY"],
111
+ "voyage": ["VOYAGE_API_KEY"],
112
+ "aws": ["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"], # For Bedrock
113
+ "azure": ["AZURE_API_KEY", "AZURE_OPENAI_API_KEY"],
114
+ }
115
+
116
+ # Model prefixes for each provider
117
+ PROVIDER_MODELS = {
118
+ "openai": ["gpt-4", "gpt-3.5", "o1", "davinci", "curie", "babbage", "ada"],
119
+ "anthropic": ["claude-3", "claude-2", "claude-instant"],
120
+ "google": ["gemini", "palm", "bison", "gecko"],
121
+ "groq": ["mixtral", "llama2", "llama3"],
122
+ "cohere": ["command", "command-light"],
123
+ "mistral": ["mistral-tiny", "mistral-small", "mistral-medium", "mistral-large"],
124
+ "perplexity": ["pplx", "sonar"],
125
+ "together": ["together"],
126
+ "bedrock": ["bedrock/"],
127
+ "azure": ["azure/"],
128
+ }
129
+
130
+ def __init__(self):
131
+ """Initialize the LLM tool."""
132
+ self.available_providers = self._detect_available_providers()
133
+
134
+ # Configure LiteLLM settings
135
+ if LITELLM_AVAILABLE:
136
+ # Enable verbose logging for debugging
137
+ litellm.set_verbose = False
138
+ # Set default timeout
139
+ litellm.request_timeout = 120
140
+
141
+ def _detect_available_providers(self) -> Dict[str, List[str]]:
142
+ """Detect which LLM providers have API keys configured."""
143
+ available = {}
144
+
145
+ for provider, env_vars in self.API_KEY_ENV_VARS.items():
146
+ for var in env_vars:
147
+ if os.getenv(var):
148
+ if provider not in available:
149
+ available[provider] = []
150
+ available[provider].append(var)
151
+ break
152
+
153
+ return available
154
+
155
+ @property
156
+ @override
157
+ def name(self) -> str:
158
+ """Get the tool name."""
159
+ return "llm"
160
+
161
+ @property
162
+ @override
163
+ def description(self) -> str:
164
+ """Get the tool description."""
165
+ providers_list = ", ".join(sorted(self.available_providers.keys())) if self.available_providers else "None"
166
+
167
+ return f"""Query any LLM using LiteLLM's unified interface.
168
+
169
+ Supports 100+ models from various providers through a single interface.
170
+ Automatically uses API keys from environment variables.
171
+
172
+ Detected providers: {providers_list}
173
+
174
+ Common models:
175
+ - OpenAI: gpt-4o, gpt-4, gpt-3.5-turbo, o1-preview, o1-mini
176
+ - Anthropic: claude-3-opus-20240229, claude-3-sonnet-20240229, claude-3-haiku-20240307
177
+ - Google: gemini/gemini-pro, gemini/gemini-1.5-pro, gemini/gemini-1.5-flash
178
+ - Groq: groq/mixtral-8x7b-32768, groq/llama3-70b-8192
179
+ - Mistral: mistral/mistral-large-latest, mistral/mistral-medium
180
+ - Perplexity: perplexity/sonar-medium-online
181
+ - Together: together/mixtral-8x22b
182
+
183
+ Examples:
184
+ - llm --model "gpt-4" --prompt "Explain quantum computing"
185
+ - llm --model "claude-3-opus-20240229" --prompt "Write a haiku about coding"
186
+ - llm --model "gemini/gemini-pro" --prompt "What is the meaning of life?" --temperature 0.9
187
+ - llm --model "groq/mixtral-8x7b-32768" --prompt "Generate a JSON schema" --json-mode
188
+
189
+ For provider-specific tools, use: openai, anthropic, gemini, groq, etc.
190
+ For consensus across models, use: consensus
191
+ """
192
+
193
+ @override
194
+ async def call(
195
+ self,
196
+ ctx: MCPContext,
197
+ **params: Unpack[LLMToolParams],
198
+ ) -> str:
199
+ """Query an LLM.
200
+
201
+ Args:
202
+ ctx: MCP context
203
+ **params: Tool parameters
204
+
205
+ Returns:
206
+ LLM response
207
+ """
208
+ tool_ctx = create_tool_context(ctx)
209
+ await tool_ctx.set_tool_info(self.name)
210
+
211
+ if not LITELLM_AVAILABLE:
212
+ return "Error: LiteLLM is not installed. Install it with: pip install litellm"
213
+
214
+ # Extract parameters
215
+ model = params.get("model")
216
+ if not model:
217
+ return "Error: model is required"
218
+
219
+ prompt = params.get("prompt")
220
+ if not prompt:
221
+ return "Error: prompt is required"
222
+
223
+ system_prompt = params.get("system_prompt")
224
+ temperature = params.get("temperature", 0.7)
225
+ max_tokens = params.get("max_tokens")
226
+ json_mode = params.get("json_mode", False)
227
+ stream = params.get("stream", False)
228
+
229
+ # Check if we have API key for this model
230
+ provider = self._get_provider_for_model(model)
231
+ if provider and provider not in self.available_providers:
232
+ env_vars = self.API_KEY_ENV_VARS.get(provider, [])
233
+ return f"Error: No API key found for {provider}. Set one of: {', '.join(env_vars)}"
234
+
235
+ # Build messages
236
+ messages = []
237
+ if system_prompt:
238
+ messages.append({"role": "system", "content": system_prompt})
239
+ messages.append({"role": "user", "content": prompt})
240
+
241
+ # Build kwargs
242
+ kwargs = {
243
+ "model": model,
244
+ "messages": messages,
245
+ "temperature": temperature,
246
+ }
247
+
248
+ if max_tokens:
249
+ kwargs["max_tokens"] = max_tokens
250
+
251
+ if json_mode:
252
+ kwargs["response_format"] = {"type": "json_object"}
253
+
254
+ await tool_ctx.info(f"Querying {model}...")
255
+
256
+ try:
257
+ if stream:
258
+ # Streaming response
259
+ response_text = ""
260
+ async for chunk in await acompletion(**kwargs, stream=True):
261
+ if chunk.choices[0].delta.content:
262
+ response_text += chunk.choices[0].delta.content
263
+ # Could emit progress here if needed
264
+
265
+ return response_text
266
+ else:
267
+ # Non-streaming response
268
+ response = await acompletion(**kwargs)
269
+ return response.choices[0].message.content
270
+
271
+ except Exception as e:
272
+ error_msg = str(e)
273
+
274
+ # Provide helpful error messages
275
+ if "api_key" in error_msg.lower():
276
+ provider = self._get_provider_for_model(model)
277
+ env_vars = self.API_KEY_ENV_VARS.get(provider, [])
278
+ return f"Error: API key issue for {provider}. Make sure one of these is set: {', '.join(env_vars)}\n\nOriginal error: {error_msg}"
279
+ elif "model" in error_msg.lower() and "not found" in error_msg.lower():
280
+ return f"Error: Model '{model}' not found or not accessible. Check the model name and your API permissions.\n\nOriginal error: {error_msg}"
281
+ else:
282
+ return f"Error calling LLM: {error_msg}"
283
+
284
+ def _get_provider_for_model(self, model: str) -> Optional[str]:
285
+ """Determine the provider for a given model."""
286
+ model_lower = model.lower()
287
+
288
+ # Check explicit provider prefix (e.g., "groq/mixtral")
289
+ if "/" in model:
290
+ provider = model.split("/")[0]
291
+ return provider
292
+
293
+ # Check model prefixes
294
+ for provider, prefixes in self.PROVIDER_MODELS.items():
295
+ for prefix in prefixes:
296
+ if model_lower.startswith(prefix.lower()):
297
+ return provider
298
+
299
+ # Default to OpenAI for unknown models
300
+ return "openai"
301
+
302
+ @classmethod
303
+ def get_all_models(cls) -> Dict[str, List[str]]:
304
+ """Get all available models from LiteLLM organized by provider."""
305
+ if not LITELLM_AVAILABLE:
306
+ return {}
307
+
308
+ try:
309
+ import litellm
310
+
311
+ # Get all models
312
+ all_models = litellm.model_list
313
+
314
+ # Organize by provider
315
+ providers = {}
316
+
317
+ for model in all_models:
318
+ # Extract provider
319
+ if "/" in model:
320
+ provider = model.split("/")[0]
321
+ elif model.startswith("gpt"):
322
+ provider = "openai"
323
+ elif model.startswith("claude"):
324
+ provider = "anthropic"
325
+ elif model.startswith("gemini"):
326
+ provider = "google"
327
+ elif model.startswith("command"):
328
+ provider = "cohere"
329
+ else:
330
+ provider = "other"
331
+
332
+ if provider not in providers:
333
+ providers[provider] = []
334
+ providers[provider].append(model)
335
+
336
+ # Sort models within each provider
337
+ for provider in providers:
338
+ providers[provider] = sorted(providers[provider])
339
+
340
+ return providers
341
+ except Exception:
342
+ return {}
343
+
344
+ def register(self, mcp_server) -> None:
345
+ """Register this tool with the MCP server."""
346
+ pass