mcp-vector-search 1.0.3__py3-none-any.whl → 1.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_vector_search/__init__.py +3 -3
- mcp_vector_search/analysis/__init__.py +48 -1
- mcp_vector_search/analysis/baseline/__init__.py +68 -0
- mcp_vector_search/analysis/baseline/comparator.py +462 -0
- mcp_vector_search/analysis/baseline/manager.py +621 -0
- mcp_vector_search/analysis/collectors/__init__.py +35 -0
- mcp_vector_search/analysis/collectors/cohesion.py +463 -0
- mcp_vector_search/analysis/collectors/coupling.py +1162 -0
- mcp_vector_search/analysis/collectors/halstead.py +514 -0
- mcp_vector_search/analysis/collectors/smells.py +325 -0
- mcp_vector_search/analysis/debt.py +516 -0
- mcp_vector_search/analysis/interpretation.py +685 -0
- mcp_vector_search/analysis/metrics.py +74 -1
- mcp_vector_search/analysis/reporters/__init__.py +3 -1
- mcp_vector_search/analysis/reporters/console.py +424 -0
- mcp_vector_search/analysis/reporters/markdown.py +480 -0
- mcp_vector_search/analysis/reporters/sarif.py +377 -0
- mcp_vector_search/analysis/storage/__init__.py +93 -0
- mcp_vector_search/analysis/storage/metrics_store.py +762 -0
- mcp_vector_search/analysis/storage/schema.py +245 -0
- mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
- mcp_vector_search/analysis/trends.py +308 -0
- mcp_vector_search/analysis/visualizer/__init__.py +90 -0
- mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
- mcp_vector_search/analysis/visualizer/exporter.py +484 -0
- mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
- mcp_vector_search/analysis/visualizer/schemas.py +525 -0
- mcp_vector_search/cli/commands/analyze.py +665 -11
- mcp_vector_search/cli/commands/chat.py +193 -0
- mcp_vector_search/cli/commands/index.py +600 -2
- mcp_vector_search/cli/commands/index_background.py +467 -0
- mcp_vector_search/cli/commands/search.py +194 -1
- mcp_vector_search/cli/commands/setup.py +64 -13
- mcp_vector_search/cli/commands/status.py +302 -3
- mcp_vector_search/cli/commands/visualize/cli.py +26 -10
- mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +8 -4
- mcp_vector_search/cli/commands/visualize/graph_builder.py +167 -234
- mcp_vector_search/cli/commands/visualize/server.py +304 -15
- mcp_vector_search/cli/commands/visualize/templates/base.py +60 -6
- mcp_vector_search/cli/commands/visualize/templates/scripts.py +2100 -65
- mcp_vector_search/cli/commands/visualize/templates/styles.py +1297 -88
- mcp_vector_search/cli/didyoumean.py +5 -0
- mcp_vector_search/cli/main.py +16 -5
- mcp_vector_search/cli/output.py +134 -5
- mcp_vector_search/config/thresholds.py +89 -1
- mcp_vector_search/core/__init__.py +16 -0
- mcp_vector_search/core/database.py +39 -2
- mcp_vector_search/core/embeddings.py +24 -0
- mcp_vector_search/core/git.py +380 -0
- mcp_vector_search/core/indexer.py +445 -84
- mcp_vector_search/core/llm_client.py +9 -4
- mcp_vector_search/core/models.py +88 -1
- mcp_vector_search/core/relationships.py +473 -0
- mcp_vector_search/core/search.py +1 -1
- mcp_vector_search/mcp/server.py +795 -4
- mcp_vector_search/parsers/python.py +285 -5
- mcp_vector_search/utils/gitignore.py +0 -3
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +3 -2
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/RECORD +62 -39
- mcp_vector_search/cli/commands/visualize.py.original +0 -2536
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
|
@@ -401,6 +401,19 @@ async def run_chat_with_intent(
|
|
|
401
401
|
files=files,
|
|
402
402
|
think=think,
|
|
403
403
|
)
|
|
404
|
+
elif intent == "analyze":
|
|
405
|
+
# Analysis mode - analyze code quality and metrics
|
|
406
|
+
console.print(
|
|
407
|
+
"\n[cyan]📊 Intent: Analyze[/cyan] - Analyzing code quality\n"
|
|
408
|
+
)
|
|
409
|
+
await run_chat_analyze(
|
|
410
|
+
project_root=project_root,
|
|
411
|
+
query=query,
|
|
412
|
+
model=model,
|
|
413
|
+
provider=provider,
|
|
414
|
+
timeout=timeout,
|
|
415
|
+
think=think,
|
|
416
|
+
)
|
|
404
417
|
else:
|
|
405
418
|
# Answer mode - force think mode and enter interactive session
|
|
406
419
|
console.print(
|
|
@@ -562,6 +575,186 @@ Guidelines:
|
|
|
562
575
|
print_error(f"Error: {e}")
|
|
563
576
|
|
|
564
577
|
|
|
578
|
+
async def run_chat_analyze(
|
|
579
|
+
project_root: Path,
|
|
580
|
+
query: str,
|
|
581
|
+
model: str | None = None,
|
|
582
|
+
provider: str | None = None,
|
|
583
|
+
timeout: float = 30.0,
|
|
584
|
+
think: bool = False,
|
|
585
|
+
) -> None:
|
|
586
|
+
"""Run analysis mode with streaming interpretation.
|
|
587
|
+
|
|
588
|
+
This function:
|
|
589
|
+
1. Parses the user's analysis question
|
|
590
|
+
2. Determines which metrics/tools to invoke
|
|
591
|
+
3. Calls appropriate analysis tools
|
|
592
|
+
4. Passes results to LLM with specialized analysis prompt
|
|
593
|
+
5. Returns interpreted insights with streaming output
|
|
594
|
+
|
|
595
|
+
Args:
|
|
596
|
+
project_root: Project root directory
|
|
597
|
+
query: User's analysis question
|
|
598
|
+
model: Model to use (optional)
|
|
599
|
+
provider: LLM provider
|
|
600
|
+
timeout: API timeout
|
|
601
|
+
think: Use advanced model for complex analysis
|
|
602
|
+
"""
|
|
603
|
+
import json
|
|
604
|
+
|
|
605
|
+
from ...analysis import ProjectMetrics
|
|
606
|
+
from ...analysis.interpretation import AnalysisInterpreter, EnhancedJSONExporter
|
|
607
|
+
from ...core.config_utils import get_openai_api_key, get_openrouter_api_key
|
|
608
|
+
from ...parsers.registry import ParserRegistry
|
|
609
|
+
|
|
610
|
+
config_dir = project_root / ".mcp-vector-search"
|
|
611
|
+
openai_key = get_openai_api_key(config_dir)
|
|
612
|
+
openrouter_key = get_openrouter_api_key(config_dir)
|
|
613
|
+
|
|
614
|
+
# Load project configuration
|
|
615
|
+
project_manager = ProjectManager(project_root)
|
|
616
|
+
if not project_manager.is_initialized():
|
|
617
|
+
raise ProjectNotFoundError(
|
|
618
|
+
f"Project not initialized at {project_root}. Run 'mcp-vector-search init' first."
|
|
619
|
+
)
|
|
620
|
+
|
|
621
|
+
config = project_manager.load_config()
|
|
622
|
+
|
|
623
|
+
# Initialize LLM client (use advanced model for analysis)
|
|
624
|
+
try:
|
|
625
|
+
llm_client = LLMClient(
|
|
626
|
+
openai_api_key=openai_key,
|
|
627
|
+
openrouter_api_key=openrouter_key,
|
|
628
|
+
model=model,
|
|
629
|
+
provider=provider,
|
|
630
|
+
timeout=timeout,
|
|
631
|
+
think=True, # Always use advanced model for analysis
|
|
632
|
+
)
|
|
633
|
+
provider_display = llm_client.provider.capitalize()
|
|
634
|
+
model_info = f"{llm_client.model} [bold magenta](analysis mode)[/bold magenta]"
|
|
635
|
+
print_success(f"Connected to {provider_display}: {model_info}")
|
|
636
|
+
except ValueError as e:
|
|
637
|
+
print_error(str(e))
|
|
638
|
+
raise typer.Exit(1)
|
|
639
|
+
|
|
640
|
+
# Determine query type and run appropriate analysis
|
|
641
|
+
console.print(f"\n[cyan]🔍 Analyzing:[/cyan] [white]{query}[/white]\n")
|
|
642
|
+
|
|
643
|
+
# Initialize parser registry and collect metrics
|
|
644
|
+
console.print("[cyan]📊 Collecting metrics...[/cyan]")
|
|
645
|
+
parser_registry = ParserRegistry()
|
|
646
|
+
project_metrics = ProjectMetrics(root_path=project_root)
|
|
647
|
+
|
|
648
|
+
# Parse all files
|
|
649
|
+
for file_ext in config.file_extensions:
|
|
650
|
+
parser = parser_registry.get_parser(file_ext)
|
|
651
|
+
if parser:
|
|
652
|
+
# Find all files with this extension
|
|
653
|
+
for file_path in project_root.rglob(f"*{file_ext}"):
|
|
654
|
+
# Skip ignored directories
|
|
655
|
+
should_skip = False
|
|
656
|
+
for ignore_pattern in config.ignore_patterns:
|
|
657
|
+
if ignore_pattern in str(file_path):
|
|
658
|
+
should_skip = True
|
|
659
|
+
break
|
|
660
|
+
|
|
661
|
+
if should_skip:
|
|
662
|
+
continue
|
|
663
|
+
|
|
664
|
+
try:
|
|
665
|
+
chunks = parser.parse_file(file_path)
|
|
666
|
+
project_metrics.add_file(file_path, chunks)
|
|
667
|
+
except Exception as e:
|
|
668
|
+
logger.warning(f"Failed to parse {file_path}: {e}")
|
|
669
|
+
|
|
670
|
+
# Generate enhanced export with LLM context
|
|
671
|
+
console.print("[cyan]🧮 Computing analysis context...[/cyan]")
|
|
672
|
+
exporter = EnhancedJSONExporter(project_root=project_root)
|
|
673
|
+
enhanced_export = exporter.export_with_context(
|
|
674
|
+
project_metrics,
|
|
675
|
+
include_smells=True,
|
|
676
|
+
)
|
|
677
|
+
|
|
678
|
+
# Create analysis prompt based on query type
|
|
679
|
+
analysis_context = json.dumps(enhanced_export.model_dump(), indent=2)
|
|
680
|
+
|
|
681
|
+
# Analysis system prompt with grading rubric and code smell interpretation
|
|
682
|
+
analysis_system_prompt = """You are a code quality expert analyzing a codebase. You have access to comprehensive metrics and code smell analysis.
|
|
683
|
+
|
|
684
|
+
**Metric Definitions:**
|
|
685
|
+
- **Cognitive Complexity**: Measures how difficult code is to understand (control flow, nesting, operators)
|
|
686
|
+
- Grade A: 0-5 (simple), B: 6-10 (moderate), C: 11-15 (complex), D: 16-20 (very complex), F: 21+ (extremely complex)
|
|
687
|
+
- **Cyclomatic Complexity**: Counts independent paths through code (branches, loops)
|
|
688
|
+
- Low: 1-5, Moderate: 6-10, High: 11-20, Very High: 21+
|
|
689
|
+
- **Instability**: Ratio of outgoing to total dependencies (I = Ce / (Ca + Ce))
|
|
690
|
+
- 0.0 = Stable (hard to change), 1.0 = Unstable (easy to change)
|
|
691
|
+
- **LCOM4**: Lack of Cohesion - number of connected components in class
|
|
692
|
+
- 1 = Highly cohesive (single responsibility), 2+ = Low cohesion (multiple responsibilities)
|
|
693
|
+
|
|
694
|
+
**Code Smell Severity:**
|
|
695
|
+
- **Error**: Critical issues blocking maintainability (God Classes, Extreme Complexity)
|
|
696
|
+
- **Warning**: Moderate issues needing attention (Long Methods, Deep Nesting)
|
|
697
|
+
- **Info**: Minor issues, cosmetic improvements (Long Parameter Lists)
|
|
698
|
+
|
|
699
|
+
**Threshold Context:**
|
|
700
|
+
- **Well Below**: <50% of threshold (healthy)
|
|
701
|
+
- **Below**: 50-100% of threshold (acceptable)
|
|
702
|
+
- **At Threshold**: 100-110% (monitor closely)
|
|
703
|
+
- **Above**: 110-150% (needs attention)
|
|
704
|
+
- **Well Above**: >150% (urgent action required)
|
|
705
|
+
|
|
706
|
+
**Output Format:**
|
|
707
|
+
Provide structured insights with:
|
|
708
|
+
1. **Executive Summary**: Overall quality grade and key findings
|
|
709
|
+
2. **Priority Issues**: Most critical problems to address (if any)
|
|
710
|
+
3. **Specific Metrics**: Answer the user's specific question with data
|
|
711
|
+
4. **Recommendations**: Actionable next steps prioritized by impact
|
|
712
|
+
|
|
713
|
+
Use markdown formatting. Be concise but thorough. Reference specific files, functions, or classes when relevant."""
|
|
714
|
+
|
|
715
|
+
# Build messages for analysis
|
|
716
|
+
messages = [
|
|
717
|
+
{"role": "system", "content": analysis_system_prompt},
|
|
718
|
+
{
|
|
719
|
+
"role": "user",
|
|
720
|
+
"content": f"""Analysis Data:
|
|
721
|
+
{analysis_context}
|
|
722
|
+
|
|
723
|
+
User Question: {query}
|
|
724
|
+
|
|
725
|
+
Please analyze the codebase and answer the user's question based on the metrics and code smell data provided.""",
|
|
726
|
+
},
|
|
727
|
+
]
|
|
728
|
+
|
|
729
|
+
# Stream the response
|
|
730
|
+
console.print("\n[bold cyan]🤖 Analysis:[/bold cyan]\n")
|
|
731
|
+
|
|
732
|
+
try:
|
|
733
|
+
# Use Rich Live for rendering streamed markdown
|
|
734
|
+
accumulated_response = ""
|
|
735
|
+
with Live(
|
|
736
|
+
"", console=console, auto_refresh=True, vertical_overflow="visible"
|
|
737
|
+
) as live:
|
|
738
|
+
async for chunk in llm_client.stream_chat_completion(messages):
|
|
739
|
+
accumulated_response += chunk
|
|
740
|
+
# Update live display with accumulated markdown
|
|
741
|
+
live.update(Markdown(accumulated_response))
|
|
742
|
+
|
|
743
|
+
console.print() # Blank line after completion
|
|
744
|
+
|
|
745
|
+
except Exception as e:
|
|
746
|
+
logger.error(f"Analysis streaming failed: {e}")
|
|
747
|
+
print_error(f"Failed to stream analysis: {e}")
|
|
748
|
+
|
|
749
|
+
# Fallback: Use interpreter for summary
|
|
750
|
+
console.print("\n[yellow]⚠ Falling back to summary interpretation[/yellow]\n")
|
|
751
|
+
interpreter = AnalysisInterpreter()
|
|
752
|
+
summary = interpreter.interpret(
|
|
753
|
+
enhanced_export, focus="summary", verbosity="normal"
|
|
754
|
+
)
|
|
755
|
+
console.print(Markdown(summary))
|
|
756
|
+
|
|
757
|
+
|
|
565
758
|
async def _process_answer_query(
|
|
566
759
|
query: str,
|
|
567
760
|
llm_client: LLMClient,
|