hanzo-mcp 0.7.7__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (178) hide show
  1. hanzo_mcp/__init__.py +6 -0
  2. hanzo_mcp/__main__.py +1 -1
  3. hanzo_mcp/analytics/__init__.py +2 -2
  4. hanzo_mcp/analytics/posthog_analytics.py +76 -82
  5. hanzo_mcp/cli.py +31 -36
  6. hanzo_mcp/cli_enhanced.py +94 -72
  7. hanzo_mcp/cli_plugin.py +27 -17
  8. hanzo_mcp/config/__init__.py +2 -2
  9. hanzo_mcp/config/settings.py +112 -88
  10. hanzo_mcp/config/tool_config.py +32 -34
  11. hanzo_mcp/dev_server.py +66 -67
  12. hanzo_mcp/prompts/__init__.py +94 -12
  13. hanzo_mcp/prompts/enhanced_prompts.py +809 -0
  14. hanzo_mcp/prompts/example_custom_prompt.py +6 -5
  15. hanzo_mcp/prompts/project_todo_reminder.py +0 -1
  16. hanzo_mcp/prompts/tool_explorer.py +10 -7
  17. hanzo_mcp/server.py +17 -21
  18. hanzo_mcp/server_enhanced.py +15 -22
  19. hanzo_mcp/tools/__init__.py +56 -28
  20. hanzo_mcp/tools/agent/__init__.py +16 -19
  21. hanzo_mcp/tools/agent/agent.py +82 -65
  22. hanzo_mcp/tools/agent/agent_tool.py +152 -122
  23. hanzo_mcp/tools/agent/agent_tool_v1_deprecated.py +66 -62
  24. hanzo_mcp/tools/agent/clarification_protocol.py +55 -50
  25. hanzo_mcp/tools/agent/clarification_tool.py +11 -10
  26. hanzo_mcp/tools/agent/claude_cli_tool.py +21 -20
  27. hanzo_mcp/tools/agent/claude_desktop_auth.py +130 -144
  28. hanzo_mcp/tools/agent/cli_agent_base.py +59 -53
  29. hanzo_mcp/tools/agent/code_auth.py +102 -107
  30. hanzo_mcp/tools/agent/code_auth_tool.py +28 -27
  31. hanzo_mcp/tools/agent/codex_cli_tool.py +20 -19
  32. hanzo_mcp/tools/agent/critic_tool.py +86 -73
  33. hanzo_mcp/tools/agent/gemini_cli_tool.py +21 -20
  34. hanzo_mcp/tools/agent/grok_cli_tool.py +21 -20
  35. hanzo_mcp/tools/agent/iching_tool.py +404 -139
  36. hanzo_mcp/tools/agent/network_tool.py +89 -73
  37. hanzo_mcp/tools/agent/prompt.py +2 -1
  38. hanzo_mcp/tools/agent/review_tool.py +101 -98
  39. hanzo_mcp/tools/agent/swarm_alias.py +87 -0
  40. hanzo_mcp/tools/agent/swarm_tool.py +246 -161
  41. hanzo_mcp/tools/agent/swarm_tool_v1_deprecated.py +134 -92
  42. hanzo_mcp/tools/agent/tool_adapter.py +21 -11
  43. hanzo_mcp/tools/common/__init__.py +1 -1
  44. hanzo_mcp/tools/common/base.py +3 -5
  45. hanzo_mcp/tools/common/batch_tool.py +46 -39
  46. hanzo_mcp/tools/common/config_tool.py +120 -84
  47. hanzo_mcp/tools/common/context.py +1 -5
  48. hanzo_mcp/tools/common/context_fix.py +5 -3
  49. hanzo_mcp/tools/common/critic_tool.py +4 -8
  50. hanzo_mcp/tools/common/decorators.py +58 -56
  51. hanzo_mcp/tools/common/enhanced_base.py +29 -32
  52. hanzo_mcp/tools/common/fastmcp_pagination.py +91 -94
  53. hanzo_mcp/tools/common/forgiving_edit.py +91 -87
  54. hanzo_mcp/tools/common/mode.py +15 -17
  55. hanzo_mcp/tools/common/mode_loader.py +27 -24
  56. hanzo_mcp/tools/common/paginated_base.py +61 -53
  57. hanzo_mcp/tools/common/paginated_response.py +72 -79
  58. hanzo_mcp/tools/common/pagination.py +50 -53
  59. hanzo_mcp/tools/common/permissions.py +4 -4
  60. hanzo_mcp/tools/common/personality.py +186 -138
  61. hanzo_mcp/tools/common/plugin_loader.py +54 -54
  62. hanzo_mcp/tools/common/stats.py +65 -47
  63. hanzo_mcp/tools/common/test_helpers.py +31 -0
  64. hanzo_mcp/tools/common/thinking_tool.py +4 -8
  65. hanzo_mcp/tools/common/tool_disable.py +17 -12
  66. hanzo_mcp/tools/common/tool_enable.py +13 -14
  67. hanzo_mcp/tools/common/tool_list.py +36 -28
  68. hanzo_mcp/tools/common/truncate.py +23 -23
  69. hanzo_mcp/tools/config/__init__.py +4 -4
  70. hanzo_mcp/tools/config/config_tool.py +42 -29
  71. hanzo_mcp/tools/config/index_config.py +37 -34
  72. hanzo_mcp/tools/config/mode_tool.py +175 -55
  73. hanzo_mcp/tools/database/__init__.py +15 -12
  74. hanzo_mcp/tools/database/database_manager.py +77 -75
  75. hanzo_mcp/tools/database/graph.py +137 -91
  76. hanzo_mcp/tools/database/graph_add.py +30 -18
  77. hanzo_mcp/tools/database/graph_query.py +178 -102
  78. hanzo_mcp/tools/database/graph_remove.py +33 -28
  79. hanzo_mcp/tools/database/graph_search.py +97 -75
  80. hanzo_mcp/tools/database/graph_stats.py +91 -59
  81. hanzo_mcp/tools/database/sql.py +107 -79
  82. hanzo_mcp/tools/database/sql_query.py +30 -24
  83. hanzo_mcp/tools/database/sql_search.py +29 -25
  84. hanzo_mcp/tools/database/sql_stats.py +47 -35
  85. hanzo_mcp/tools/editor/neovim_command.py +25 -28
  86. hanzo_mcp/tools/editor/neovim_edit.py +21 -23
  87. hanzo_mcp/tools/editor/neovim_session.py +60 -54
  88. hanzo_mcp/tools/filesystem/__init__.py +31 -30
  89. hanzo_mcp/tools/filesystem/ast_multi_edit.py +329 -249
  90. hanzo_mcp/tools/filesystem/ast_tool.py +4 -4
  91. hanzo_mcp/tools/filesystem/base.py +1 -1
  92. hanzo_mcp/tools/filesystem/batch_search.py +316 -224
  93. hanzo_mcp/tools/filesystem/content_replace.py +4 -4
  94. hanzo_mcp/tools/filesystem/diff.py +71 -59
  95. hanzo_mcp/tools/filesystem/directory_tree.py +7 -7
  96. hanzo_mcp/tools/filesystem/directory_tree_paginated.py +49 -37
  97. hanzo_mcp/tools/filesystem/edit.py +4 -4
  98. hanzo_mcp/tools/filesystem/find.py +173 -80
  99. hanzo_mcp/tools/filesystem/find_files.py +73 -52
  100. hanzo_mcp/tools/filesystem/git_search.py +157 -104
  101. hanzo_mcp/tools/filesystem/grep.py +8 -8
  102. hanzo_mcp/tools/filesystem/multi_edit.py +4 -8
  103. hanzo_mcp/tools/filesystem/read.py +12 -10
  104. hanzo_mcp/tools/filesystem/rules_tool.py +59 -43
  105. hanzo_mcp/tools/filesystem/search_tool.py +263 -207
  106. hanzo_mcp/tools/filesystem/symbols_tool.py +94 -54
  107. hanzo_mcp/tools/filesystem/tree.py +35 -33
  108. hanzo_mcp/tools/filesystem/unix_aliases.py +13 -18
  109. hanzo_mcp/tools/filesystem/watch.py +37 -36
  110. hanzo_mcp/tools/filesystem/write.py +4 -8
  111. hanzo_mcp/tools/jupyter/__init__.py +4 -4
  112. hanzo_mcp/tools/jupyter/base.py +4 -5
  113. hanzo_mcp/tools/jupyter/jupyter.py +67 -47
  114. hanzo_mcp/tools/jupyter/notebook_edit.py +4 -4
  115. hanzo_mcp/tools/jupyter/notebook_read.py +4 -7
  116. hanzo_mcp/tools/llm/__init__.py +5 -7
  117. hanzo_mcp/tools/llm/consensus_tool.py +72 -52
  118. hanzo_mcp/tools/llm/llm_manage.py +101 -60
  119. hanzo_mcp/tools/llm/llm_tool.py +226 -166
  120. hanzo_mcp/tools/llm/provider_tools.py +25 -26
  121. hanzo_mcp/tools/lsp/__init__.py +1 -1
  122. hanzo_mcp/tools/lsp/lsp_tool.py +228 -143
  123. hanzo_mcp/tools/mcp/__init__.py +2 -3
  124. hanzo_mcp/tools/mcp/mcp_add.py +27 -25
  125. hanzo_mcp/tools/mcp/mcp_remove.py +7 -8
  126. hanzo_mcp/tools/mcp/mcp_stats.py +23 -22
  127. hanzo_mcp/tools/mcp/mcp_tool.py +129 -98
  128. hanzo_mcp/tools/memory/__init__.py +39 -21
  129. hanzo_mcp/tools/memory/knowledge_tools.py +124 -99
  130. hanzo_mcp/tools/memory/memory_tools.py +90 -108
  131. hanzo_mcp/tools/search/__init__.py +7 -2
  132. hanzo_mcp/tools/search/find_tool.py +297 -212
  133. hanzo_mcp/tools/search/unified_search.py +366 -314
  134. hanzo_mcp/tools/shell/__init__.py +8 -7
  135. hanzo_mcp/tools/shell/auto_background.py +56 -49
  136. hanzo_mcp/tools/shell/base.py +1 -1
  137. hanzo_mcp/tools/shell/base_process.py +75 -75
  138. hanzo_mcp/tools/shell/bash_session.py +2 -2
  139. hanzo_mcp/tools/shell/bash_session_executor.py +4 -4
  140. hanzo_mcp/tools/shell/bash_tool.py +24 -31
  141. hanzo_mcp/tools/shell/command_executor.py +12 -12
  142. hanzo_mcp/tools/shell/logs.py +43 -33
  143. hanzo_mcp/tools/shell/npx.py +13 -13
  144. hanzo_mcp/tools/shell/npx_background.py +24 -21
  145. hanzo_mcp/tools/shell/npx_tool.py +18 -22
  146. hanzo_mcp/tools/shell/open.py +19 -21
  147. hanzo_mcp/tools/shell/pkill.py +31 -26
  148. hanzo_mcp/tools/shell/process_tool.py +32 -32
  149. hanzo_mcp/tools/shell/processes.py +57 -58
  150. hanzo_mcp/tools/shell/run_background.py +24 -25
  151. hanzo_mcp/tools/shell/run_command.py +5 -5
  152. hanzo_mcp/tools/shell/run_command_windows.py +5 -5
  153. hanzo_mcp/tools/shell/session_storage.py +3 -3
  154. hanzo_mcp/tools/shell/streaming_command.py +141 -126
  155. hanzo_mcp/tools/shell/uvx.py +24 -25
  156. hanzo_mcp/tools/shell/uvx_background.py +35 -33
  157. hanzo_mcp/tools/shell/uvx_tool.py +18 -22
  158. hanzo_mcp/tools/todo/__init__.py +6 -2
  159. hanzo_mcp/tools/todo/todo.py +50 -37
  160. hanzo_mcp/tools/todo/todo_read.py +5 -8
  161. hanzo_mcp/tools/todo/todo_write.py +5 -7
  162. hanzo_mcp/tools/vector/__init__.py +40 -28
  163. hanzo_mcp/tools/vector/ast_analyzer.py +176 -143
  164. hanzo_mcp/tools/vector/git_ingester.py +170 -179
  165. hanzo_mcp/tools/vector/index_tool.py +96 -44
  166. hanzo_mcp/tools/vector/infinity_store.py +283 -228
  167. hanzo_mcp/tools/vector/mock_infinity.py +39 -40
  168. hanzo_mcp/tools/vector/project_manager.py +88 -78
  169. hanzo_mcp/tools/vector/vector.py +59 -42
  170. hanzo_mcp/tools/vector/vector_index.py +30 -27
  171. hanzo_mcp/tools/vector/vector_search.py +64 -45
  172. hanzo_mcp/types.py +6 -4
  173. {hanzo_mcp-0.7.7.dist-info → hanzo_mcp-0.8.0.dist-info}/METADATA +1 -1
  174. hanzo_mcp-0.8.0.dist-info/RECORD +185 -0
  175. hanzo_mcp-0.7.7.dist-info/RECORD +0 -182
  176. {hanzo_mcp-0.7.7.dist-info → hanzo_mcp-0.8.0.dist-info}/WHEEL +0 -0
  177. {hanzo_mcp-0.7.7.dist-info → hanzo_mcp-0.8.0.dist-info}/entry_points.txt +0 -0
  178. {hanzo_mcp-0.7.7.dist-info → hanzo_mcp-0.8.0.dist-info}/top_level.txt +0 -0
@@ -1,17 +1,23 @@
1
1
  """Consensus tool for querying multiple LLMs in parallel."""
2
2
 
3
3
  import asyncio
4
- import json
5
- from typing import Annotated, Optional, TypedDict, Unpack, final, override, List, Dict, Any
6
- from datetime import datetime
4
+ from typing import (
5
+ Dict,
6
+ List,
7
+ Unpack,
8
+ Optional,
9
+ Annotated,
10
+ TypedDict,
11
+ final,
12
+ override,
13
+ )
7
14
 
8
- from mcp.server.fastmcp import Context as MCPContext
9
15
  from pydantic import Field
16
+ from mcp.server.fastmcp import Context as MCPContext
10
17
 
11
18
  from hanzo_mcp.tools.common.base import BaseTool
12
- from hanzo_mcp.tools.common.context import create_tool_context
13
19
  from hanzo_mcp.tools.llm.llm_tool import LLMTool
14
-
20
+ from hanzo_mcp.tools.common.context import create_tool_context
15
21
 
16
22
  Prompt = Annotated[
17
23
  str,
@@ -94,7 +100,7 @@ class ConsensusToolParams(TypedDict, total=False):
94
100
  @final
95
101
  class ConsensusTool(BaseTool):
96
102
  """Tool for getting consensus from multiple LLMs."""
97
-
103
+
98
104
  # Default models to use if none specified - mix of fast and powerful models
99
105
  DEFAULT_MODELS = [
100
106
  "gpt-4o-mini", # OpenAI's fast model
@@ -119,13 +125,13 @@ class ConsensusTool(BaseTool):
119
125
  def description(self) -> str:
120
126
  """Get the tool description."""
121
127
  available_providers = list(self.llm_tool.available_providers.keys())
122
-
128
+
123
129
  return f"""Query multiple LLMs in parallel and get a consensus response.
124
130
 
125
131
  Queries multiple models simultaneously, then uses another model to
126
132
  synthesize and analyze the responses for consensus, disagreements, and insights.
127
133
 
128
- Available providers: {', '.join(available_providers)}
134
+ Available providers: {", ".join(available_providers)}
129
135
 
130
136
  Default models (if available):
131
137
  - GPT-4 (OpenAI)
@@ -182,35 +188,40 @@ The tool will:
182
188
  # Filter models to only those with available API keys
183
189
  available_models = []
184
190
  skipped_models = []
185
-
191
+
186
192
  for model in models:
187
193
  provider = self.llm_tool._get_provider_for_model(model)
188
194
  if provider in self.llm_tool.available_providers:
189
195
  available_models.append(model)
190
196
  else:
191
197
  skipped_models.append((model, provider))
192
-
198
+
193
199
  if not available_models:
194
200
  return "Error: No models available with configured API keys. Please set API keys for at least one provider."
195
-
201
+
196
202
  await tool_ctx.info(f"Querying {len(available_models)} models in parallel...")
197
-
203
+
198
204
  if skipped_models:
199
205
  skipped_info = ", ".join([f"{m[0]} ({m[1]})" for m in skipped_models])
200
206
  await tool_ctx.info(f"Skipping models without API keys: {skipped_info}")
201
207
 
202
208
  # Query all models in parallel
203
209
  results = await self._query_models_parallel(
204
- available_models, prompt, system_prompt,
205
- temperature, max_tokens, timeout
210
+ available_models, prompt, system_prompt, temperature, max_tokens, timeout
206
211
  )
207
212
 
208
213
  # Prepare summary of results
209
- successful_responses = [(m, r) for m, r in results.items() if not r.startswith("Error:")]
210
- failed_responses = [(m, r) for m, r in results.items() if r.startswith("Error:")]
214
+ successful_responses = [
215
+ (m, r) for m, r in results.items() if not r.startswith("Error:")
216
+ ]
217
+ failed_responses = [
218
+ (m, r) for m, r in results.items() if r.startswith("Error:")
219
+ ]
211
220
 
212
221
  if not successful_responses:
213
- return "Error: All model queries failed:\n\n" + "\n".join([f"{m}: {r}" for m, r in failed_responses])
222
+ return "Error: All model queries failed:\n\n" + "\n".join(
223
+ [f"{m}: {r}" for m, r in failed_responses]
224
+ )
214
225
 
215
226
  # Use aggregation model to synthesize responses
216
227
  consensus = await self._aggregate_responses(
@@ -222,33 +233,40 @@ The tool will:
222
233
  output.append(f"Query: {prompt}")
223
234
  output.append(f"Models queried: {len(available_models)}")
224
235
  output.append(f"Successful responses: {len(successful_responses)}")
225
-
236
+
226
237
  if failed_responses:
227
238
  output.append(f"Failed responses: {len(failed_responses)}")
228
-
239
+
229
240
  output.append("")
230
241
  output.append("=== Consensus Summary ===")
231
242
  output.append(consensus)
232
-
243
+
233
244
  if include_raw:
234
245
  output.append("\n=== Individual Responses ===")
235
246
  for model, response in successful_responses:
236
247
  output.append(f"\n--- {model} ---")
237
- output.append(response[:500] + "..." if len(response) > 500 else response)
238
-
248
+ output.append(
249
+ response[:500] + "..." if len(response) > 500 else response
250
+ )
251
+
239
252
  if failed_responses:
240
253
  output.append("\n=== Failed Queries ===")
241
254
  for model, error in failed_responses:
242
255
  output.append(f"{model}: {error}")
243
-
256
+
244
257
  return "\n".join(output)
245
258
 
246
259
  async def _query_models_parallel(
247
- self, models: List[str], prompt: str,
248
- system_prompt: Optional[str], temperature: float,
249
- max_tokens: Optional[int], timeout: int
260
+ self,
261
+ models: List[str],
262
+ prompt: str,
263
+ system_prompt: Optional[str],
264
+ temperature: float,
265
+ max_tokens: Optional[int],
266
+ timeout: int,
250
267
  ) -> Dict[str, str]:
251
268
  """Query multiple models in parallel."""
269
+
252
270
  async def query_with_timeout(model: str) -> tuple[str, str]:
253
271
  try:
254
272
  params = {
@@ -260,37 +278,37 @@ The tool will:
260
278
  params["system_prompt"] = system_prompt
261
279
  if max_tokens:
262
280
  params["max_tokens"] = max_tokens
263
-
281
+
264
282
  # Create a mock context for the LLM tool
265
- mock_ctx = type('MockContext', (), {'client': None})()
266
-
283
+ mock_ctx = type("MockContext", (), {"client": None})()
284
+
267
285
  result = await asyncio.wait_for(
268
- self.llm_tool.call(mock_ctx, **params),
269
- timeout=timeout
286
+ self.llm_tool.call(mock_ctx, **params), timeout=timeout
270
287
  )
271
288
  return (model, result)
272
289
  except asyncio.TimeoutError:
273
290
  return (model, f"Error: Timeout after {timeout} seconds")
274
291
  except Exception as e:
275
292
  return (model, f"Error: {str(e)}")
276
-
293
+
277
294
  # Run all queries in parallel
278
295
  tasks = [query_with_timeout(model) for model in models]
279
296
  results = await asyncio.gather(*tasks)
280
-
297
+
281
298
  return dict(results)
282
299
 
283
300
  async def _aggregate_responses(
284
- self, responses: List[tuple[str, str]],
285
- original_prompt: str, aggregation_model: str
301
+ self,
302
+ responses: List[tuple[str, str]],
303
+ original_prompt: str,
304
+ aggregation_model: str,
286
305
  ) -> str:
287
306
  """Use an LLM to aggregate and analyze responses."""
288
307
  # Prepare the aggregation prompt
289
- response_summary = "\n\n".join([
290
- f"Model: {model}\nResponse: {response}"
291
- for model, response in responses
292
- ])
293
-
308
+ response_summary = "\n\n".join(
309
+ [f"Model: {model}\nResponse: {response}" for model, response in responses]
310
+ )
311
+
294
312
  aggregation_prompt = f"""You are analyzing responses from multiple AI models to the following prompt:
295
313
 
296
314
  <original_prompt>
@@ -313,19 +331,19 @@ Be concise but thorough. Focus on providing actionable insights."""
313
331
 
314
332
  try:
315
333
  # Use the LLM tool to get the aggregation
316
- mock_ctx = type('MockContext', (), {'client': None})()
317
-
334
+ mock_ctx = type("MockContext", (), {"client": None})()
335
+
318
336
  aggregation_params = {
319
337
  "model": aggregation_model,
320
338
  "prompt": aggregation_prompt,
321
339
  "temperature": 0.3, # Lower temperature for more consistent analysis
322
- "system_prompt": "You are an expert at analyzing and synthesizing multiple AI responses to provide balanced, insightful consensus."
340
+ "system_prompt": "You are an expert at analyzing and synthesizing multiple AI responses to provide balanced, insightful consensus.",
323
341
  }
324
-
342
+
325
343
  result = await self.llm_tool.call(mock_ctx, **aggregation_params)
326
344
  return result
327
-
328
- except Exception as e:
345
+
346
+ except Exception:
329
347
  # Fallback to simple aggregation if LLM fails
330
348
  return self._simple_aggregate(responses)
331
349
 
@@ -334,16 +352,18 @@ Be concise but thorough. Focus on providing actionable insights."""
334
352
  output = []
335
353
  output.append("Summary of responses:")
336
354
  output.append("")
337
-
355
+
338
356
  # Find common themes (very basic)
339
357
  all_text = " ".join([r[1] for r in responses]).lower()
340
-
358
+
341
359
  output.append("Response lengths:")
342
360
  for model, response in responses:
343
361
  output.append(f"- {model}: {len(response)} characters")
344
-
345
- output.append("\nNote: Advanced consensus analysis unavailable. Showing basic summary only.")
346
-
362
+
363
+ output.append(
364
+ "\nNote: Advanced consensus analysis unavailable. Showing basic summary only."
365
+ )
366
+
347
367
  return "\n".join(output)
348
368
 
349
369
  def register(self, mcp_server) -> None:
@@ -1,17 +1,15 @@
1
1
  """LLM management tool for enabling/disabling LLM providers."""
2
2
 
3
- import os
4
3
  import json
5
- from typing import Annotated, Optional, TypedDict, Unpack, final, override
4
+ from typing import Unpack, Optional, Annotated, TypedDict, final, override
6
5
  from pathlib import Path
7
6
 
8
- from mcp.server.fastmcp import Context as MCPContext
9
7
  from pydantic import Field
8
+ from mcp.server.fastmcp import Context as MCPContext
10
9
 
11
10
  from hanzo_mcp.tools.common.base import BaseTool
12
- from hanzo_mcp.tools.common.context import create_tool_context
13
11
  from hanzo_mcp.tools.llm.llm_tool import LLMTool
14
-
12
+ from hanzo_mcp.tools.common.context import create_tool_context
15
13
 
16
14
  Action = Annotated[
17
15
  str,
@@ -49,7 +47,7 @@ class LLMManageParams(TypedDict, total=False):
49
47
  @final
50
48
  class LLMManageTool(BaseTool):
51
49
  """Tool for managing LLM providers."""
52
-
50
+
53
51
  def __init__(self):
54
52
  """Initialize the LLM management tool."""
55
53
  self.llm_tool = LLMTool()
@@ -61,16 +59,16 @@ class LLMManageTool(BaseTool):
61
59
  """Load provider configuration."""
62
60
  if self.config_file.exists():
63
61
  try:
64
- with open(self.config_file, 'r') as f:
62
+ with open(self.config_file, "r") as f:
65
63
  self.config = json.load(f)
66
- except:
64
+ except Exception:
67
65
  self.config = {"disabled_providers": []}
68
66
  else:
69
67
  self.config = {"disabled_providers": []}
70
68
 
71
69
  def _save_config(self):
72
70
  """Save provider configuration."""
73
- with open(self.config_file, 'w') as f:
71
+ with open(self.config_file, "w") as f:
74
72
  json.dump(self.config, f, indent=2)
75
73
 
76
74
  @property
@@ -155,17 +153,17 @@ Providers are automatically detected based on environment variables:
155
153
  """List all providers and their status."""
156
154
  output = ["=== LLM Providers ==="]
157
155
  output.append("")
158
-
156
+
159
157
  # Get all possible providers
160
158
  all_providers = sorted(LLMTool.API_KEY_ENV_VARS.keys())
161
159
  available_providers = self.llm_tool.available_providers
162
160
  disabled_providers = self.config.get("disabled_providers", [])
163
-
161
+
164
162
  # Categorize providers
165
163
  active = []
166
164
  available_but_disabled = []
167
165
  no_api_key = []
168
-
166
+
169
167
  for provider in all_providers:
170
168
  if provider in available_providers:
171
169
  if provider in disabled_providers:
@@ -174,29 +172,31 @@ Providers are automatically detected based on environment variables:
174
172
  active.append(provider)
175
173
  else:
176
174
  no_api_key.append(provider)
177
-
175
+
178
176
  # Show active providers
179
177
  if active:
180
178
  output.append("✅ Active Providers (API key found, enabled):")
181
179
  for provider in active:
182
180
  env_vars = available_providers.get(provider, [])
183
181
  output.append(f" - {provider}: {', '.join(env_vars)}")
184
-
182
+
185
183
  # Show example models
186
184
  examples = self._get_example_models(provider)
187
185
  if examples:
188
186
  output.append(f" Models: {', '.join(examples[:3])}")
189
187
  output.append("")
190
-
188
+
191
189
  # Show disabled providers
192
190
  if available_but_disabled:
193
191
  output.append("⚠️ Available but Disabled (API key found, disabled):")
194
192
  for provider in available_but_disabled:
195
193
  env_vars = available_providers.get(provider, [])
196
194
  output.append(f" - {provider}: {', '.join(env_vars)}")
197
- output.append(f" Use: llm_manage --action enable --provider {provider}")
195
+ output.append(
196
+ f" Use: llm_manage --action enable --provider {provider}"
197
+ )
198
198
  output.append("")
199
-
199
+
200
200
  # Show providers without API keys
201
201
  if no_api_key:
202
202
  output.append("❌ No API Key Found:")
@@ -206,43 +206,50 @@ Providers are automatically detected based on environment variables:
206
206
  if len(no_api_key) > 10:
207
207
  output.append(f" ... and {len(no_api_key) - 10} more")
208
208
  output.append("")
209
-
209
+
210
210
  # Summary
211
211
  output.append("=== Summary ===")
212
212
  output.append(f"Total providers: {len(all_providers)}")
213
213
  output.append(f"Active: {len(active)}")
214
214
  output.append(f"Disabled: {len(available_but_disabled)}")
215
215
  output.append(f"No API key: {len(no_api_key)}")
216
-
216
+
217
217
  # Show available tools
218
218
  if active:
219
219
  output.append("\n=== Available LLM Tools ===")
220
220
  output.append("- llm: Universal LLM tool (all providers)")
221
221
  output.append("- consensus: Query multiple models in parallel")
222
-
222
+
223
223
  provider_tools = []
224
224
  for provider in active:
225
- if provider in ["openai", "anthropic", "google", "groq", "mistral", "perplexity"]:
225
+ if provider in [
226
+ "openai",
227
+ "anthropic",
228
+ "google",
229
+ "groq",
230
+ "mistral",
231
+ "perplexity",
232
+ ]:
226
233
  tool_name = "gemini" if provider == "google" else provider
227
234
  provider_tools.append(tool_name)
228
-
235
+
229
236
  if provider_tools:
230
237
  output.append(f"- Provider tools: {', '.join(provider_tools)}")
231
-
238
+
232
239
  return "\n".join(output)
233
240
 
234
241
  def _enable_provider(self, provider: Optional[str]) -> str:
235
242
  """Enable a provider."""
236
243
  if not provider:
237
244
  return "Error: provider is required for enable action"
238
-
245
+
239
246
  if provider not in self.llm_tool.available_providers:
240
247
  env_vars = LLMTool.API_KEY_ENV_VARS.get(provider, [])
241
248
  if env_vars:
242
249
  return f"Error: No API key found for {provider}. Set one of: {', '.join(env_vars)}"
243
250
  else:
244
251
  return f"Error: Unknown provider '{provider}'"
245
-
252
+
246
253
  disabled = self.config.get("disabled_providers", [])
247
254
  if provider in disabled:
248
255
  disabled.remove(provider)
@@ -256,7 +263,7 @@ Providers are automatically detected based on environment variables:
256
263
  """Disable a provider."""
257
264
  if not provider:
258
265
  return "Error: provider is required for disable action"
259
-
266
+
260
267
  disabled = self.config.get("disabled_providers", [])
261
268
  if provider not in disabled:
262
269
  disabled.append(provider)
@@ -270,29 +277,29 @@ Providers are automatically detected based on environment variables:
270
277
  """List all available models from LiteLLM."""
271
278
  try:
272
279
  from hanzo_mcp.tools.llm.llm_tool import LLMTool
273
-
280
+
274
281
  all_models = LLMTool.get_all_models()
275
-
282
+
276
283
  if not all_models:
277
284
  return "No models available or LiteLLM not installed"
278
-
285
+
279
286
  output = ["=== Available LLM Models ==="]
280
-
287
+
281
288
  if provider:
282
289
  # Show models for specific provider
283
290
  provider_lower = provider.lower()
284
291
  models = all_models.get(provider_lower, [])
285
-
292
+
286
293
  if not models:
287
294
  return f"No models found for provider '{provider}'"
288
-
295
+
289
296
  output.append(f"\n{provider.upper()} ({len(models)} models):")
290
297
  output.append("-" * 40)
291
-
298
+
292
299
  # Show first 50 models
293
300
  for model in models[:50]:
294
301
  output.append(f" {model}")
295
-
302
+
296
303
  if len(models) > 50:
297
304
  output.append(f" ... and {len(models) - 50} more")
298
305
  else:
@@ -300,28 +307,41 @@ Providers are automatically detected based on environment variables:
300
307
  total_models = sum(len(models) for models in all_models.values())
301
308
  output.append(f"Total models available: {total_models}")
302
309
  output.append("")
303
-
310
+
304
311
  # Show providers with counts
305
312
  for provider_name, models in sorted(all_models.items()):
306
313
  if models:
307
314
  output.append(f"{provider_name}: {len(models)} models")
308
-
309
- output.append("\nUse 'llm_manage --action models --provider <name>' to see models for a specific provider")
310
-
315
+
316
+ output.append(
317
+ "\nUse 'llm_manage --action models --provider <name>' to see models for a specific provider"
318
+ )
319
+
311
320
  # Show recommended models
312
321
  output.append("\n=== Recommended Models ===")
313
322
  recommended = {
314
323
  "OpenAI": ["gpt-4o", "gpt-4o-mini", "gpt-3.5-turbo"],
315
- "Anthropic": ["claude-3-opus-20240229", "claude-3-5-sonnet-20241022", "claude-3-haiku-20240307"],
324
+ "Anthropic": [
325
+ "claude-3-opus-20240229",
326
+ "claude-3-5-sonnet-20241022",
327
+ "claude-3-haiku-20240307",
328
+ ],
316
329
  "Google": ["gemini/gemini-1.5-pro", "gemini/gemini-1.5-flash"],
317
- "Groq": ["groq/llama3-70b-8192", "groq/llama3-8b-8192", "groq/gemma2-9b-it"],
318
- "Mistral": ["mistral/mistral-large-latest", "mistral/mistral-medium"],
330
+ "Groq": [
331
+ "groq/llama3-70b-8192",
332
+ "groq/llama3-8b-8192",
333
+ "groq/gemma2-9b-it",
334
+ ],
335
+ "Mistral": [
336
+ "mistral/mistral-large-latest",
337
+ "mistral/mistral-medium",
338
+ ],
319
339
  }
320
-
340
+
321
341
  for provider_name, models in recommended.items():
322
342
  available = LLMTool().available_providers
323
343
  provider_key = provider_name.lower()
324
-
344
+
325
345
  if provider_key in available:
326
346
  output.append(f"\n{provider_name} (✅ API key found):")
327
347
  for model in models:
@@ -330,17 +350,19 @@ Providers are automatically detected based on environment variables:
330
350
  output.append(f"\n{provider_name} (❌ No API key):")
331
351
  for model in models:
332
352
  output.append(f" - {model}")
333
-
353
+
334
354
  return "\n".join(output)
335
-
355
+
336
356
  except Exception as e:
337
357
  return f"Error listing models: {str(e)}"
338
358
 
339
- async def _test_model(self, ctx: MCPContext, provider: Optional[str], model: Optional[str]) -> str:
359
+ async def _test_model(
360
+ self, ctx: MCPContext, provider: Optional[str], model: Optional[str]
361
+ ) -> str:
340
362
  """Test a model to verify it works."""
341
363
  if not model and not provider:
342
364
  return "Error: Either model or provider is required for test action"
343
-
365
+
344
366
  # Determine model to test
345
367
  if model:
346
368
  test_model = model
@@ -357,14 +379,14 @@ Providers are automatically detected based on environment variables:
357
379
  test_model = default_models.get(provider)
358
380
  if not test_model:
359
381
  return f"Error: No default model for provider '{provider}'. Please specify a model."
360
-
382
+
361
383
  # Test the model
362
384
  test_prompt = "Hello! Please respond with 'OK' if you can hear me."
363
-
385
+
364
386
  output = [f"Testing model: {test_model}"]
365
387
  output.append(f"Prompt: {test_prompt}")
366
388
  output.append("")
367
-
389
+
368
390
  try:
369
391
  # Call the LLM
370
392
  params = {
@@ -373,9 +395,9 @@ Providers are automatically detected based on environment variables:
373
395
  "max_tokens": 10,
374
396
  "temperature": 0,
375
397
  }
376
-
398
+
377
399
  response = await self.llm_tool.call(ctx, **params)
378
-
400
+
379
401
  if response.startswith("Error:"):
380
402
  output.append("❌ Test failed:")
381
403
  output.append(response)
@@ -384,27 +406,46 @@ Providers are automatically detected based on environment variables:
384
406
  output.append(f"Response: {response}")
385
407
  output.append("")
386
408
  output.append(f"Model '{test_model}' is working correctly.")
387
-
409
+
388
410
  # Show provider info
389
411
  detected_provider = self.llm_tool._get_provider_for_model(test_model)
390
412
  if detected_provider:
391
413
  output.append(f"Provider: {detected_provider}")
392
-
414
+
393
415
  except Exception as e:
394
416
  output.append("❌ Test failed with exception:")
395
417
  output.append(str(e))
396
-
418
+
397
419
  return "\n".join(output)
398
420
 
399
421
  def _get_example_models(self, provider: str) -> list[str]:
400
422
  """Get example models for a provider."""
401
423
  examples = {
402
424
  "openai": ["gpt-4o", "gpt-4", "gpt-3.5-turbo", "o1-preview"],
403
- "anthropic": ["claude-3-opus-20240229", "claude-3-sonnet-20240229", "claude-3-haiku-20240307"],
404
- "google": ["gemini/gemini-pro", "gemini/gemini-1.5-pro", "gemini/gemini-1.5-flash"],
405
- "groq": ["groq/mixtral-8x7b-32768", "groq/llama3-70b-8192", "groq/llama3-8b-8192"],
406
- "mistral": ["mistral/mistral-large-latest", "mistral/mistral-medium", "mistral/mistral-small"],
407
- "perplexity": ["perplexity/sonar-medium-online", "perplexity/sonar-small-online"],
425
+ "anthropic": [
426
+ "claude-3-opus-20240229",
427
+ "claude-3-sonnet-20240229",
428
+ "claude-3-haiku-20240307",
429
+ ],
430
+ "google": [
431
+ "gemini/gemini-pro",
432
+ "gemini/gemini-1.5-pro",
433
+ "gemini/gemini-1.5-flash",
434
+ ],
435
+ "groq": [
436
+ "groq/mixtral-8x7b-32768",
437
+ "groq/llama3-70b-8192",
438
+ "groq/llama3-8b-8192",
439
+ ],
440
+ "mistral": [
441
+ "mistral/mistral-large-latest",
442
+ "mistral/mistral-medium",
443
+ "mistral/mistral-small",
444
+ ],
445
+ "perplexity": [
446
+ "perplexity/sonar-medium-online",
447
+ "perplexity/sonar-small-online",
448
+ ],
408
449
  }
409
450
  return examples.get(provider, [])
410
451