hanzo-mcp 0.8.11__py3-none-any.whl → 0.8.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of hanzo-mcp might be problematic. Click here for more details.

Files changed (154) hide show
  1. hanzo_mcp/__init__.py +2 -4
  2. hanzo_mcp/analytics/posthog_analytics.py +3 -9
  3. hanzo_mcp/bridge.py +9 -25
  4. hanzo_mcp/cli.py +6 -15
  5. hanzo_mcp/cli_enhanced.py +5 -14
  6. hanzo_mcp/cli_plugin.py +3 -9
  7. hanzo_mcp/config/settings.py +6 -20
  8. hanzo_mcp/config/tool_config.py +1 -3
  9. hanzo_mcp/core/base_agent.py +88 -88
  10. hanzo_mcp/core/model_registry.py +238 -210
  11. hanzo_mcp/dev_server.py +5 -15
  12. hanzo_mcp/prompts/__init__.py +2 -6
  13. hanzo_mcp/prompts/project_todo_reminder.py +3 -9
  14. hanzo_mcp/prompts/tool_explorer.py +1 -3
  15. hanzo_mcp/prompts/utils.py +7 -21
  16. hanzo_mcp/server.py +13 -6
  17. hanzo_mcp/tools/__init__.py +10 -24
  18. hanzo_mcp/tools/agent/__init__.py +2 -1
  19. hanzo_mcp/tools/agent/agent.py +10 -30
  20. hanzo_mcp/tools/agent/agent_tool.py +5 -15
  21. hanzo_mcp/tools/agent/agent_tool_v1_deprecated.py +14 -41
  22. hanzo_mcp/tools/agent/claude_desktop_auth.py +3 -9
  23. hanzo_mcp/tools/agent/cli_agent_base.py +7 -24
  24. hanzo_mcp/tools/agent/cli_tools.py +75 -74
  25. hanzo_mcp/tools/agent/code_auth.py +1 -3
  26. hanzo_mcp/tools/agent/code_auth_tool.py +2 -6
  27. hanzo_mcp/tools/agent/critic_tool.py +8 -24
  28. hanzo_mcp/tools/agent/iching_tool.py +12 -36
  29. hanzo_mcp/tools/agent/network_tool.py +7 -18
  30. hanzo_mcp/tools/agent/prompt.py +1 -5
  31. hanzo_mcp/tools/agent/review_tool.py +10 -25
  32. hanzo_mcp/tools/agent/swarm_alias.py +1 -3
  33. hanzo_mcp/tools/agent/swarm_tool.py +9 -29
  34. hanzo_mcp/tools/agent/swarm_tool_v1_deprecated.py +11 -39
  35. hanzo_mcp/tools/agent/unified_cli_tools.py +38 -38
  36. hanzo_mcp/tools/common/batch_tool.py +15 -45
  37. hanzo_mcp/tools/common/config_tool.py +9 -28
  38. hanzo_mcp/tools/common/context.py +1 -3
  39. hanzo_mcp/tools/common/critic_tool.py +1 -3
  40. hanzo_mcp/tools/common/decorators.py +2 -6
  41. hanzo_mcp/tools/common/enhanced_base.py +2 -6
  42. hanzo_mcp/tools/common/fastmcp_pagination.py +4 -12
  43. hanzo_mcp/tools/common/forgiving_edit.py +9 -28
  44. hanzo_mcp/tools/common/mode.py +1 -5
  45. hanzo_mcp/tools/common/paginated_base.py +3 -11
  46. hanzo_mcp/tools/common/paginated_response.py +10 -30
  47. hanzo_mcp/tools/common/pagination.py +3 -9
  48. hanzo_mcp/tools/common/permissions.py +38 -11
  49. hanzo_mcp/tools/common/personality.py +9 -34
  50. hanzo_mcp/tools/common/plugin_loader.py +3 -15
  51. hanzo_mcp/tools/common/stats.py +6 -18
  52. hanzo_mcp/tools/common/thinking_tool.py +1 -3
  53. hanzo_mcp/tools/common/tool_disable.py +2 -6
  54. hanzo_mcp/tools/common/tool_list.py +2 -6
  55. hanzo_mcp/tools/common/validation.py +1 -3
  56. hanzo_mcp/tools/config/config_tool.py +7 -13
  57. hanzo_mcp/tools/config/index_config.py +1 -3
  58. hanzo_mcp/tools/config/mode_tool.py +5 -15
  59. hanzo_mcp/tools/database/database_manager.py +3 -9
  60. hanzo_mcp/tools/database/graph.py +1 -3
  61. hanzo_mcp/tools/database/graph_add.py +3 -9
  62. hanzo_mcp/tools/database/graph_query.py +11 -34
  63. hanzo_mcp/tools/database/graph_remove.py +3 -9
  64. hanzo_mcp/tools/database/graph_search.py +6 -20
  65. hanzo_mcp/tools/database/graph_stats.py +11 -33
  66. hanzo_mcp/tools/database/sql.py +4 -12
  67. hanzo_mcp/tools/database/sql_query.py +6 -10
  68. hanzo_mcp/tools/database/sql_search.py +2 -6
  69. hanzo_mcp/tools/database/sql_stats.py +5 -15
  70. hanzo_mcp/tools/editor/neovim_command.py +1 -3
  71. hanzo_mcp/tools/editor/neovim_edit.py +2 -2
  72. hanzo_mcp/tools/editor/neovim_session.py +7 -13
  73. hanzo_mcp/tools/filesystem/__init__.py +2 -3
  74. hanzo_mcp/tools/filesystem/ast_multi_edit.py +14 -43
  75. hanzo_mcp/tools/filesystem/base.py +4 -12
  76. hanzo_mcp/tools/filesystem/batch_search.py +35 -115
  77. hanzo_mcp/tools/filesystem/content_replace.py +4 -12
  78. hanzo_mcp/tools/filesystem/diff.py +2 -10
  79. hanzo_mcp/tools/filesystem/directory_tree.py +9 -27
  80. hanzo_mcp/tools/filesystem/directory_tree_paginated.py +5 -15
  81. hanzo_mcp/tools/filesystem/edit.py +6 -18
  82. hanzo_mcp/tools/filesystem/find.py +3 -9
  83. hanzo_mcp/tools/filesystem/find_files.py +2 -6
  84. hanzo_mcp/tools/filesystem/git_search.py +9 -24
  85. hanzo_mcp/tools/filesystem/grep.py +9 -27
  86. hanzo_mcp/tools/filesystem/multi_edit.py +6 -18
  87. hanzo_mcp/tools/filesystem/read.py +8 -26
  88. hanzo_mcp/tools/filesystem/rules_tool.py +6 -17
  89. hanzo_mcp/tools/filesystem/search_tool.py +18 -62
  90. hanzo_mcp/tools/filesystem/symbols_tool.py +5 -15
  91. hanzo_mcp/tools/filesystem/tree.py +1 -3
  92. hanzo_mcp/tools/filesystem/watch.py +1 -3
  93. hanzo_mcp/tools/filesystem/write.py +1 -3
  94. hanzo_mcp/tools/jupyter/base.py +6 -20
  95. hanzo_mcp/tools/jupyter/jupyter.py +4 -12
  96. hanzo_mcp/tools/jupyter/notebook_edit.py +11 -35
  97. hanzo_mcp/tools/jupyter/notebook_read.py +2 -6
  98. hanzo_mcp/tools/llm/consensus_tool.py +8 -24
  99. hanzo_mcp/tools/llm/llm_manage.py +2 -6
  100. hanzo_mcp/tools/llm/llm_tool.py +17 -58
  101. hanzo_mcp/tools/llm/llm_unified.py +18 -59
  102. hanzo_mcp/tools/llm/provider_tools.py +1 -3
  103. hanzo_mcp/tools/lsp/lsp_tool.py +5 -17
  104. hanzo_mcp/tools/mcp/mcp_add.py +1 -3
  105. hanzo_mcp/tools/mcp/mcp_stats.py +1 -3
  106. hanzo_mcp/tools/mcp/mcp_tool.py +9 -23
  107. hanzo_mcp/tools/memory/__init__.py +10 -27
  108. hanzo_mcp/tools/memory/knowledge_tools.py +7 -25
  109. hanzo_mcp/tools/memory/memory_tools.py +6 -18
  110. hanzo_mcp/tools/search/find_tool.py +10 -32
  111. hanzo_mcp/tools/search/unified_search.py +24 -78
  112. hanzo_mcp/tools/shell/__init__.py +2 -2
  113. hanzo_mcp/tools/shell/auto_background.py +2 -6
  114. hanzo_mcp/tools/shell/base.py +1 -5
  115. hanzo_mcp/tools/shell/base_process.py +5 -7
  116. hanzo_mcp/tools/shell/bash_session.py +7 -24
  117. hanzo_mcp/tools/shell/bash_session_executor.py +5 -15
  118. hanzo_mcp/tools/shell/bash_tool.py +3 -7
  119. hanzo_mcp/tools/shell/command_executor.py +33 -86
  120. hanzo_mcp/tools/shell/logs.py +4 -16
  121. hanzo_mcp/tools/shell/npx.py +2 -8
  122. hanzo_mcp/tools/shell/npx_tool.py +1 -3
  123. hanzo_mcp/tools/shell/pkill.py +4 -12
  124. hanzo_mcp/tools/shell/process_tool.py +2 -8
  125. hanzo_mcp/tools/shell/processes.py +5 -17
  126. hanzo_mcp/tools/shell/run_background.py +1 -3
  127. hanzo_mcp/tools/shell/run_command.py +1 -3
  128. hanzo_mcp/tools/shell/run_command_windows.py +1 -3
  129. hanzo_mcp/tools/shell/session_manager.py +2 -6
  130. hanzo_mcp/tools/shell/session_storage.py +2 -6
  131. hanzo_mcp/tools/shell/streaming_command.py +7 -23
  132. hanzo_mcp/tools/shell/uvx.py +4 -14
  133. hanzo_mcp/tools/shell/uvx_background.py +2 -6
  134. hanzo_mcp/tools/shell/uvx_tool.py +1 -3
  135. hanzo_mcp/tools/shell/zsh_tool.py +12 -20
  136. hanzo_mcp/tools/todo/todo.py +1 -3
  137. hanzo_mcp/tools/todo/todo_read.py +3 -9
  138. hanzo_mcp/tools/todo/todo_write.py +6 -18
  139. hanzo_mcp/tools/vector/__init__.py +3 -9
  140. hanzo_mcp/tools/vector/ast_analyzer.py +6 -20
  141. hanzo_mcp/tools/vector/git_ingester.py +10 -30
  142. hanzo_mcp/tools/vector/index_tool.py +3 -9
  143. hanzo_mcp/tools/vector/infinity_store.py +7 -27
  144. hanzo_mcp/tools/vector/mock_infinity.py +1 -3
  145. hanzo_mcp/tools/vector/project_manager.py +4 -12
  146. hanzo_mcp/tools/vector/vector.py +2 -6
  147. hanzo_mcp/tools/vector/vector_index.py +8 -8
  148. hanzo_mcp/tools/vector/vector_search.py +7 -21
  149. {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/METADATA +2 -2
  150. hanzo_mcp-0.8.14.dist-info/RECORD +193 -0
  151. hanzo_mcp-0.8.11.dist-info/RECORD +0 -193
  152. {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/WHEEL +0 -0
  153. {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/entry_points.txt +0 -0
  154. {hanzo_mcp-0.8.11.dist-info → hanzo_mcp-0.8.14.dist-info}/top_level.txt +0 -0
@@ -294,9 +294,7 @@ Available: {", ".join(available) if available else "None"}"""
294
294
  # Other actions can fall through to regular path if available
295
295
 
296
296
  if not LITELLM_AVAILABLE:
297
- return (
298
- "Error: LiteLLM is not installed. Install it with: pip install litellm"
299
- )
297
+ return "Error: LiteLLM is not installed. Install it with: pip install litellm"
300
298
 
301
299
  # Extract action
302
300
  action = params.get("action", "query")
@@ -315,9 +313,7 @@ Available: {", ".join(available) if available else "None"}"""
315
313
  elif action == "disable":
316
314
  return self._handle_disable(params.get("provider"))
317
315
  elif action == "test":
318
- return await self._handle_test(
319
- tool_ctx, params.get("model"), params.get("provider")
320
- )
316
+ return await self._handle_test(tool_ctx, params.get("model"), params.get("provider"))
321
317
  else:
322
318
  return f"Error: Unknown action '{action}'. Valid actions: query, consensus, list, models, enable, disable, test"
323
319
 
@@ -407,9 +403,7 @@ Available: {", ".join(available) if available else "None"}"""
407
403
  models = params.get("models")
408
404
  if not models:
409
405
  # Use configured or default models
410
- consensus_size = params.get("consensus_size") or self.config.get(
411
- "consensus_size", 3
412
- )
406
+ consensus_size = params.get("consensus_size") or self.config.get("consensus_size", 3)
413
407
  models = self._get_consensus_models(consensus_size)
414
408
 
415
409
  if not models:
@@ -449,11 +443,7 @@ Available: {", ".join(available) if available else "None"}"""
449
443
  if devil_model:
450
444
  # Create devil's advocate prompt
451
445
  responses_text = "\n\n".join(
452
- [
453
- f"Model {i + 1}: {resp['response']}"
454
- for i, resp in enumerate(responses)
455
- if resp["response"]
456
- ]
446
+ [f"Model {i + 1}: {resp['response']}" for i, resp in enumerate(responses) if resp["response"]]
457
447
  )
458
448
 
459
449
  devil_prompt = f"""You are a critical analyst. Review these responses to the question below and provide a devil's advocate perspective. Challenge assumptions, point out weaknesses, and suggest alternative viewpoints.
@@ -477,14 +467,10 @@ Provide your critical analysis:"""
477
467
  }
478
468
 
479
469
  # Aggregate responses
480
- judge_model = params.get("judge_model") or self.config.get(
481
- "default_judge_model", "gpt-4o"
482
- )
470
+ judge_model = params.get("judge_model") or self.config.get("default_judge_model", "gpt-4o")
483
471
  include_raw = params.get("include_raw", False)
484
472
 
485
- return await self._aggregate_consensus(
486
- responses, prompt, judge_model, include_raw, devil_response, tool_ctx
487
- )
473
+ return await self._aggregate_consensus(responses, prompt, judge_model, include_raw, devil_response, tool_ctx)
488
474
 
489
475
  def _handle_list(self) -> str:
490
476
  """List available providers."""
@@ -518,9 +504,7 @@ Provide your critical analysis:"""
518
504
  output.append(f"{provider}: {status}")
519
505
  output.append(f" Environment variables: {', '.join(env_vars)}")
520
506
 
521
- output.append(
522
- "\nUse 'llm --action enable/disable --provider <name>' to manage providers"
523
- )
507
+ output.append("\nUse 'llm --action enable/disable --provider <name>' to manage providers")
524
508
 
525
509
  return "\n".join(output)
526
510
 
@@ -560,16 +544,10 @@ Provide your critical analysis:"""
560
544
  # Show providers with counts
561
545
  for provider_name, models in sorted(all_models.items()):
562
546
  if models:
563
- available = (
564
- "✅" if provider_name in self.available_providers else "❌"
565
- )
566
- output.append(
567
- f"{available} {provider_name}: {len(models)} models"
568
- )
547
+ available = "✅" if provider_name in self.available_providers else "❌"
548
+ output.append(f"{available} {provider_name}: {len(models)} models")
569
549
 
570
- output.append(
571
- "\nUse 'llm --action models --provider <name>' to see specific models"
572
- )
550
+ output.append("\nUse 'llm --action models --provider <name>' to see specific models")
573
551
 
574
552
  return "\n".join(output)
575
553
 
@@ -608,9 +586,7 @@ Provide your critical analysis:"""
608
586
  else:
609
587
  return f"{provider} is already disabled"
610
588
 
611
- async def _handle_test(
612
- self, tool_ctx, model: Optional[str], provider: Optional[str]
613
- ) -> str:
589
+ async def _handle_test(self, tool_ctx, model: Optional[str], provider: Optional[str]) -> str:
614
590
  """Test a model or provider."""
615
591
  if not model and not provider:
616
592
  return "Error: Either model or provider is required for test action"
@@ -666,11 +642,7 @@ Provide your critical analysis:"""
666
642
  break
667
643
 
668
644
  provider = self._get_provider_for_model(model)
669
- if (
670
- provider
671
- and provider in self.available_providers
672
- and provider not in disabled
673
- ):
645
+ if provider and provider in self.available_providers and provider not in disabled:
674
646
  models.append(model)
675
647
 
676
648
  # If still need more, add from available providers
@@ -703,9 +675,7 @@ Provide your critical analysis:"""
703
675
  """Query multiple models in parallel."""
704
676
 
705
677
  async def query_with_info(model: str) -> Dict[str, Any]:
706
- result = await self._query_single_model(
707
- model, prompt, system_prompt, temperature, max_tokens
708
- )
678
+ result = await self._query_single_model(model, prompt, system_prompt, temperature, max_tokens)
709
679
  return {
710
680
  "model": model,
711
681
  "response": result.get("response"),
@@ -784,12 +754,7 @@ Provide your critical analysis:"""
784
754
  return "Error: All models failed to respond"
785
755
 
786
756
  # Format responses for aggregation
787
- responses_text = "\n\n".join(
788
- [
789
- f"Model: {r['model']}\nResponse: {r['response']}"
790
- for r in successful_responses
791
- ]
792
- )
757
+ responses_text = "\n\n".join([f"Model: {r['model']}\nResponse: {r['response']}" for r in successful_responses])
793
758
 
794
759
  if devil_response:
795
760
  responses_text += f"\n\nDevil's Advocate ({devil_response['model']}):\n{devil_response['response']}"
@@ -818,23 +783,17 @@ Be concise and highlight the most important findings."""
818
783
  if tool_ctx:
819
784
  await tool_ctx.info(f"Aggregating responses with {judge_model}...")
820
785
 
821
- judge_result = await self._query_single_model(
822
- judge_model, aggregation_prompt, None, 0.3, None
823
- )
786
+ judge_result = await self._query_single_model(judge_model, aggregation_prompt, None, 0.3, None)
824
787
 
825
788
  if not judge_result["success"]:
826
789
  return f"Error: Judge model failed: {judge_result.get('error', 'Unknown error')}"
827
790
 
828
791
  # Format output
829
- output = [
830
- f"=== Consensus Analysis ({len(successful_responses)} models) ===\n"
831
- ]
792
+ output = [f"=== Consensus Analysis ({len(successful_responses)} models) ===\n"]
832
793
  output.append(judge_result["response"])
833
794
 
834
795
  # Add model list
835
- output.append(
836
- f"\nModels consulted: {', '.join([r['model'] for r in successful_responses])}"
837
- )
796
+ output.append(f"\nModels consulted: {', '.join([r['model'] for r in successful_responses])}")
838
797
  if devil_response:
839
798
  output.append(f"Devil's Advocate: {devil_response['model']}")
840
799
 
@@ -260,7 +260,7 @@ llm "Explain this code" --model gpt-4o
260
260
  llm --action consensus "Is this approach correct?" --devils-advocate
261
261
  llm --action models --provider openai
262
262
 
263
- Available: {', '.join(available) if available else 'None'}"""
263
+ Available: {", ".join(available) if available else "None"}"""
264
264
 
265
265
  @override
266
266
  async def call(
@@ -281,9 +281,7 @@ Available: {', '.join(available) if available else 'None'}"""
281
281
  pass
282
282
 
283
283
  if not LITELLM_AVAILABLE:
284
- return (
285
- "Error: LiteLLM is not installed. Install it with: pip install litellm"
286
- )
284
+ return "Error: LiteLLM is not installed. Install it with: pip install litellm"
287
285
 
288
286
  # Extract action
289
287
  action = params.get("action", "query")
@@ -302,9 +300,7 @@ Available: {', '.join(available) if available else 'None'}"""
302
300
  elif action == "disable":
303
301
  return self._handle_disable(params.get("provider"))
304
302
  elif action == "test":
305
- return await self._handle_test(
306
- tool_ctx, params.get("model"), params.get("provider")
307
- )
303
+ return await self._handle_test(tool_ctx, params.get("model"), params.get("provider"))
308
304
  else:
309
305
  return f"Error: Unknown action '{action}'. Valid actions: query, consensus, list, models, enable, disable, test"
310
306
 
@@ -394,9 +390,7 @@ Available: {', '.join(available) if available else 'None'}"""
394
390
  models = params.get("models")
395
391
  if not models:
396
392
  # Use configured or default models
397
- consensus_size = params.get("consensus_size") or self.config.get(
398
- "consensus_size", 3
399
- )
393
+ consensus_size = params.get("consensus_size") or self.config.get("consensus_size", 3)
400
394
  models = self._get_consensus_models(consensus_size)
401
395
 
402
396
  if not models:
@@ -436,11 +430,7 @@ Available: {', '.join(available) if available else 'None'}"""
436
430
  if devil_model:
437
431
  # Create devil's advocate prompt
438
432
  responses_text = "\n\n".join(
439
- [
440
- f"Model {i+1}: {resp['response']}"
441
- for i, resp in enumerate(responses)
442
- if resp["response"]
443
- ]
433
+ [f"Model {i + 1}: {resp['response']}" for i, resp in enumerate(responses) if resp["response"]]
444
434
  )
445
435
 
446
436
  devil_prompt = f"""You are a critical analyst. Review these responses to the question below and provide a devil's advocate perspective. Challenge assumptions, point out weaknesses, and suggest alternative viewpoints.
@@ -464,14 +454,10 @@ Provide your critical analysis:"""
464
454
  }
465
455
 
466
456
  # Aggregate responses
467
- judge_model = params.get("judge_model") or self.config.get(
468
- "default_judge_model", "gpt-4o"
469
- )
457
+ judge_model = params.get("judge_model") or self.config.get("default_judge_model", "gpt-4o")
470
458
  include_raw = params.get("include_raw", False)
471
459
 
472
- return await self._aggregate_consensus(
473
- responses, prompt, judge_model, include_raw, devil_response, tool_ctx
474
- )
460
+ return await self._aggregate_consensus(responses, prompt, judge_model, include_raw, devil_response, tool_ctx)
475
461
 
476
462
  def _handle_list(self) -> str:
477
463
  """List available providers."""
@@ -505,9 +491,7 @@ Provide your critical analysis:"""
505
491
  output.append(f"{provider}: {status}")
506
492
  output.append(f" Environment variables: {', '.join(env_vars)}")
507
493
 
508
- output.append(
509
- "\nUse 'llm --action enable/disable --provider <name>' to manage providers"
510
- )
494
+ output.append("\nUse 'llm --action enable/disable --provider <name>' to manage providers")
511
495
 
512
496
  return "\n".join(output)
513
497
 
@@ -547,16 +531,10 @@ Provide your critical analysis:"""
547
531
  # Show providers with counts
548
532
  for provider_name, models in sorted(all_models.items()):
549
533
  if models:
550
- available = (
551
- "✅" if provider_name in self.available_providers else "❌"
552
- )
553
- output.append(
554
- f"{available} {provider_name}: {len(models)} models"
555
- )
534
+ available = "✅" if provider_name in self.available_providers else "❌"
535
+ output.append(f"{available} {provider_name}: {len(models)} models")
556
536
 
557
- output.append(
558
- "\nUse 'llm --action models --provider <name>' to see specific models"
559
- )
537
+ output.append("\nUse 'llm --action models --provider <name>' to see specific models")
560
538
 
561
539
  return "\n".join(output)
562
540
 
@@ -595,9 +573,7 @@ Provide your critical analysis:"""
595
573
  else:
596
574
  return f"{provider} is already disabled"
597
575
 
598
- async def _handle_test(
599
- self, tool_ctx, model: Optional[str], provider: Optional[str]
600
- ) -> str:
576
+ async def _handle_test(self, tool_ctx, model: Optional[str], provider: Optional[str]) -> str:
601
577
  """Test a model or provider."""
602
578
  if not model and not provider:
603
579
  return "Error: Either model or provider is required for test action"
@@ -653,11 +629,7 @@ Provide your critical analysis:"""
653
629
  break
654
630
 
655
631
  provider = self._get_provider_for_model(model)
656
- if (
657
- provider
658
- and provider in self.available_providers
659
- and provider not in disabled
660
- ):
632
+ if provider and provider in self.available_providers and provider not in disabled:
661
633
  models.append(model)
662
634
 
663
635
  # If still need more, add from available providers
@@ -690,9 +662,7 @@ Provide your critical analysis:"""
690
662
  """Query multiple models in parallel."""
691
663
 
692
664
  async def query_with_info(model: str) -> Dict[str, Any]:
693
- result = await self._query_single_model(
694
- model, prompt, system_prompt, temperature, max_tokens
695
- )
665
+ result = await self._query_single_model(model, prompt, system_prompt, temperature, max_tokens)
696
666
  return {
697
667
  "model": model,
698
668
  "response": result.get("response"),
@@ -771,12 +741,7 @@ Provide your critical analysis:"""
771
741
  return "Error: All models failed to respond"
772
742
 
773
743
  # Format responses for aggregation
774
- responses_text = "\n\n".join(
775
- [
776
- f"Model: {r['model']}\nResponse: {r['response']}"
777
- for r in successful_responses
778
- ]
779
- )
744
+ responses_text = "\n\n".join([f"Model: {r['model']}\nResponse: {r['response']}" for r in successful_responses])
780
745
 
781
746
  if devil_response:
782
747
  responses_text += f"\n\nDevil's Advocate ({devil_response['model']}):\n{devil_response['response']}"
@@ -805,23 +770,17 @@ Be concise and highlight the most important findings."""
805
770
  if tool_ctx:
806
771
  await tool_ctx.info(f"Aggregating responses with {judge_model}...")
807
772
 
808
- judge_result = await self._query_single_model(
809
- judge_model, aggregation_prompt, None, 0.3, None
810
- )
773
+ judge_result = await self._query_single_model(judge_model, aggregation_prompt, None, 0.3, None)
811
774
 
812
775
  if not judge_result["success"]:
813
776
  return f"Error: Judge model failed: {judge_result.get('error', 'Unknown error')}"
814
777
 
815
778
  # Format output
816
- output = [
817
- f"=== Consensus Analysis ({len(successful_responses)} models) ===\n"
818
- ]
779
+ output = [f"=== Consensus Analysis ({len(successful_responses)} models) ===\n"]
819
780
  output.append(judge_result["response"])
820
781
 
821
782
  # Add model list
822
- output.append(
823
- f"\nModels consulted: {', '.join([r['model'] for r in successful_responses])}"
824
- )
783
+ output.append(f"\nModels consulted: {', '.join([r['model'] for r in successful_responses])}")
825
784
  if devil_response:
826
785
  output.append(f"Devil's Advocate: {devil_response['model']}")
827
786
 
@@ -71,9 +71,7 @@ class ProviderToolParams(TypedDict, total=False):
71
71
  class BaseProviderTool(BaseTool):
72
72
  """Base class for provider-specific LLM tools."""
73
73
 
74
- def __init__(
75
- self, provider: str, default_model: str, model_variants: Dict[str, str]
76
- ):
74
+ def __init__(self, provider: str, default_model: str, model_variants: Dict[str, str]):
77
75
  """Initialize provider tool.
78
76
 
79
77
  Args:
@@ -297,9 +297,7 @@ class LSPTool(BaseTool):
297
297
  self.logger.error(f"Installation error: {e}")
298
298
  return False
299
299
 
300
- async def _ensure_lsp_running(
301
- self, language: str, root_uri: str
302
- ) -> Optional[LSPServer]:
300
+ async def _ensure_lsp_running(self, language: str, root_uri: str) -> Optional[LSPServer]:
303
301
  """Ensure LSP server is running for language."""
304
302
  # Check if already running
305
303
  server_key = f"{language}:{root_uri}"
@@ -326,9 +324,7 @@ class LSPTool(BaseTool):
326
324
  cwd=root_uri,
327
325
  )
328
326
 
329
- server = LSPServer(
330
- language=language, process=process, config=config, root_uri=root_uri
331
- )
327
+ server = LSPServer(language=language, process=process, config=config, root_uri=root_uri)
332
328
 
333
329
  # Initialize LSP
334
330
  await self._initialize_lsp(server)
@@ -367,9 +363,7 @@ class LSPTool(BaseTool):
367
363
  await self._send_request(server, request)
368
364
  server.initialized = True
369
365
 
370
- async def _send_request(
371
- self, server: LSPServer, request: Dict[str, Any]
372
- ) -> Optional[Dict[str, Any]]:
366
+ async def _send_request(self, server: LSPServer, request: Dict[str, Any]) -> Optional[Dict[str, Any]]:
373
367
  """Send JSON-RPC request to LSP server."""
374
368
  if not server.process or server.process.returncode is not None:
375
369
  return None
@@ -426,11 +420,7 @@ class LSPTool(BaseTool):
426
420
  "status",
427
421
  ]
428
422
  if action not in valid_actions:
429
- return MCPResourceDocument(
430
- data={
431
- "error": f"Invalid action. Must be one of: {', '.join(valid_actions)}"
432
- }
433
- )
423
+ return MCPResourceDocument(data={"error": f"Invalid action. Must be one of: {', '.join(valid_actions)}"})
434
424
 
435
425
  # Get language from file
436
426
  language = self._get_language_from_file(file)
@@ -478,9 +468,7 @@ class LSPTool(BaseTool):
478
468
  )
479
469
 
480
470
  # Execute action
481
- result = await self._execute_lsp_action(
482
- server, action, file, line, character, new_name
483
- )
471
+ result = await self._execute_lsp_action(server, action, file, line, character, new_name)
484
472
 
485
473
  return MCPResourceDocument(data=result)
486
474
 
@@ -189,9 +189,7 @@ Use 'mcp_stats' to see all added servers and their status.
189
189
 
190
190
  full_command = shlex.split(command)
191
191
 
192
- await tool_ctx.info(
193
- f"Adding MCP server '{name}' with command: {' '.join(full_command)}"
194
- )
192
+ await tool_ctx.info(f"Adding MCP server '{name}' with command: {' '.join(full_command)}")
195
193
 
196
194
  # Create server configuration
197
195
  server_config = {
@@ -68,9 +68,7 @@ Example:
68
68
  servers = McpAddTool.get_servers()
69
69
 
70
70
  if not servers:
71
- return (
72
- "No MCP servers have been added yet.\n\nUse 'mcp_add' to add servers."
73
- )
71
+ return "No MCP servers have been added yet.\n\nUse 'mcp_add' to add servers."
74
72
 
75
73
  output = []
76
74
  output.append("=== MCP Server Statistics ===")
@@ -173,9 +173,7 @@ class MCPTool(BaseTool):
173
173
  def _auto_start_servers(self):
174
174
  """Auto-start servers configured for auto-start."""
175
175
  for name, server_config in self.config.get("servers", {}).items():
176
- if server_config.get("enabled", False) and server_config.get(
177
- "auto_start", False
178
- ):
176
+ if server_config.get("enabled", False) and server_config.get("auto_start", False):
179
177
  self._start_server(name, server_config)
180
178
 
181
179
  def _start_server(self, name: str, config: Dict[str, Any]) -> bool:
@@ -202,9 +200,7 @@ class MCPTool(BaseTool):
202
200
  cmd = [config["command"]] + config.get("args", [])
203
201
 
204
202
  # Create log directory
205
- log_dir = Path(
206
- self.config.get("log_dir", str(Path.home() / ".hanzo" / "mcp" / "logs"))
207
- )
203
+ log_dir = Path(self.config.get("log_dir", str(Path.home() / ".hanzo" / "mcp" / "logs")))
208
204
  log_dir.mkdir(parents=True, exist_ok=True)
209
205
 
210
206
  # Start process
@@ -307,11 +303,11 @@ Status: {enabled} enabled, {running} running"""
307
303
  elif action == "restart":
308
304
  return self._handle_restart(params.get("name"))
309
305
  elif action == "config":
310
- return self._handle_config(
311
- params.get("config_key"), params.get("config_value")
312
- )
306
+ return self._handle_config(params.get("config_key"), params.get("config_value"))
313
307
  else:
314
- return f"Error: Unknown action '{action}'. Valid actions: list, add, remove, enable, disable, restart, config"
308
+ return (
309
+ f"Error: Unknown action '{action}'. Valid actions: list, add, remove, enable, disable, restart, config"
310
+ )
315
311
 
316
312
  def _handle_list(self) -> str:
317
313
  """List all MCP servers."""
@@ -355,9 +351,7 @@ Status: {enabled} enabled, {running} running"""
355
351
  output.append(f"{name}: {status}")
356
352
  if config.get("description"):
357
353
  output.append(f" Description: {config['description']}")
358
- output.append(
359
- f" Command: {config['command']} {' '.join(config.get('args', []))}"
360
- )
354
+ output.append(f" Command: {config['command']} {' '.join(config.get('args', []))}")
361
355
 
362
356
  if config.get("env"):
363
357
  env_str = ", ".join([f"{k}={v}" for k, v in config["env"].items()])
@@ -498,11 +492,7 @@ Status: {enabled} enabled, {running} running"""
498
492
  else:
499
493
  return f"Configuration key '{key}' not found"
500
494
 
501
- return (
502
- json.dumps(current, indent=2)
503
- if isinstance(current, (dict, list))
504
- else str(current)
505
- )
495
+ return json.dumps(current, indent=2) if isinstance(current, (dict, list)) else str(current)
506
496
  else:
507
497
  # Set value
508
498
  # Navigate to parent
@@ -513,11 +503,7 @@ Status: {enabled} enabled, {running} running"""
513
503
  current = current[k]
514
504
 
515
505
  # Parse value if it looks like JSON
516
- if (
517
- isinstance(value, str)
518
- and value.startswith("{")
519
- or value.startswith("[")
520
- ):
506
+ if isinstance(value, str) and value.startswith("{") or value.startswith("["):
521
507
  try:
522
508
  value = json.loads(value)
523
509
  except Exception:
@@ -20,6 +20,7 @@ try:
20
20
  SummarizeToMemoryTool,
21
21
  ManageKnowledgeBasesTool,
22
22
  )
23
+
23
24
  MEMORY_TOOLS_AVAILABLE = True
24
25
  except ImportError:
25
26
  MEMORY_TOOLS_AVAILABLE = False
@@ -49,35 +50,17 @@ def register_memory_tools(
49
50
  return []
50
51
 
51
52
  # Create memory tools
52
- recall_tool = RecallMemoriesTool(
53
- user_id=user_id, project_id=project_id, **memory_config
54
- )
55
- create_tool = CreateMemoriesTool(
56
- user_id=user_id, project_id=project_id, **memory_config
57
- )
58
- update_tool = UpdateMemoriesTool(
59
- user_id=user_id, project_id=project_id, **memory_config
60
- )
61
- delete_tool = DeleteMemoriesTool(
62
- user_id=user_id, project_id=project_id, **memory_config
63
- )
64
- manage_tool = ManageMemoriesTool(
65
- user_id=user_id, project_id=project_id, **memory_config
66
- )
53
+ recall_tool = RecallMemoriesTool(user_id=user_id, project_id=project_id, **memory_config)
54
+ create_tool = CreateMemoriesTool(user_id=user_id, project_id=project_id, **memory_config)
55
+ update_tool = UpdateMemoriesTool(user_id=user_id, project_id=project_id, **memory_config)
56
+ delete_tool = DeleteMemoriesTool(user_id=user_id, project_id=project_id, **memory_config)
57
+ manage_tool = ManageMemoriesTool(user_id=user_id, project_id=project_id, **memory_config)
67
58
 
68
59
  # Create knowledge tools
69
- recall_facts_tool = RecallFactsTool(
70
- user_id=user_id, project_id=project_id, **memory_config
71
- )
72
- store_facts_tool = StoreFactsTool(
73
- user_id=user_id, project_id=project_id, **memory_config
74
- )
75
- summarize_tool = SummarizeToMemoryTool(
76
- user_id=user_id, project_id=project_id, **memory_config
77
- )
78
- manage_kb_tool = ManageKnowledgeBasesTool(
79
- user_id=user_id, project_id=project_id, **memory_config
80
- )
60
+ recall_facts_tool = RecallFactsTool(user_id=user_id, project_id=project_id, **memory_config)
61
+ store_facts_tool = StoreFactsTool(user_id=user_id, project_id=project_id, **memory_config)
62
+ summarize_tool = SummarizeToMemoryTool(user_id=user_id, project_id=project_id, **memory_config)
63
+ manage_kb_tool = ManageKnowledgeBasesTool(user_id=user_id, project_id=project_id, **memory_config)
81
64
 
82
65
  # Register tools
83
66
  ToolRegistry.register_tool(mcp_server, recall_tool)
@@ -140,15 +140,9 @@ recall_facts(queries=["company policies"], scope="global", limit=5)
140
140
  if fact.metadata and fact.metadata.get("kb_name"):
141
141
  kb_info = f" (KB: {fact.metadata['kb_name']})"
142
142
  formatted.append(f"{i}. {fact.content}{kb_info}")
143
- if (
144
- fact.metadata and len(fact.metadata) > 2
145
- ): # More than just type and kb_name
143
+ if fact.metadata and len(fact.metadata) > 2: # More than just type and kb_name
146
144
  # Show other metadata
147
- other_meta = {
148
- k: v
149
- for k, v in fact.metadata.items()
150
- if k not in ["type", "kb_name"]
151
- }
145
+ other_meta = {k: v for k, v in fact.metadata.items() if k not in ["type", "kb_name"]}
152
146
  if other_meta:
153
147
  formatted.append(f" Metadata: {other_meta}")
154
148
 
@@ -167,9 +161,7 @@ recall_facts(queries=["company policies"], scope="global", limit=5)
167
161
  scope: str = "project",
168
162
  limit: int = 10,
169
163
  ) -> str:
170
- return await tool_self.call(
171
- ctx, queries=queries, kb_name=kb_name, scope=scope, limit=limit
172
- )
164
+ return await tool_self.call(ctx, queries=queries, kb_name=kb_name, scope=scope, limit=limit)
173
165
 
174
166
 
175
167
  @final
@@ -265,9 +257,7 @@ store_facts(facts=["Company founded in 2020"], scope="global", kb_name="company_
265
257
  scope: str = "project",
266
258
  metadata: Optional[Dict[str, Any]] = None,
267
259
  ) -> str:
268
- return await tool_self.call(
269
- ctx, facts=facts, kb_name=kb_name, scope=scope, metadata=metadata
270
- )
260
+ return await tool_self.call(ctx, facts=facts, kb_name=kb_name, scope=scope, metadata=metadata)
271
261
 
272
262
 
273
263
  @final
@@ -323,11 +313,7 @@ summarize_to_memory(content="Company guidelines...", topic="Guidelines", scope="
323
313
 
324
314
  # Use the memory service to create a summary
325
315
  # This would typically use an LLM to summarize, but for now we'll store as-is
326
- summary = (
327
- f"Summary of {topic}:\n{content[:500]}..."
328
- if len(content) > 500
329
- else content
330
- )
316
+ summary = f"Summary of {topic}:\n{content[:500]}..." if len(content) > 500 else content
331
317
 
332
318
  # Store the summary as a memory
333
319
  from hanzo_memory.services.memory import get_memory_service
@@ -358,9 +344,7 @@ summarize_to_memory(content="Company guidelines...", topic="Guidelines", scope="
358
344
  if auto_facts:
359
345
  # In a real implementation, this would use LLM to extract key facts
360
346
  # For now, we'll just note it
361
- result += (
362
- "\n(Auto-fact extraction would extract key facts from the summary)"
363
- )
347
+ result += "\n(Auto-fact extraction would extract key facts from the summary)"
364
348
 
365
349
  return result
366
350
 
@@ -377,9 +361,7 @@ summarize_to_memory(content="Company guidelines...", topic="Guidelines", scope="
377
361
  scope: str = "project",
378
362
  auto_facts: bool = True,
379
363
  ) -> str:
380
- return await tool_self.call(
381
- ctx, content=content, topic=topic, scope=scope, auto_facts=auto_facts
382
- )
364
+ return await tool_self.call(ctx, content=content, topic=topic, scope=scope, auto_facts=auto_facts)
383
365
 
384
366
 
385
367
  @final