mcp-vector-search 1.0.3__py3-none-any.whl → 1.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_vector_search/__init__.py +3 -3
- mcp_vector_search/analysis/__init__.py +48 -1
- mcp_vector_search/analysis/baseline/__init__.py +68 -0
- mcp_vector_search/analysis/baseline/comparator.py +462 -0
- mcp_vector_search/analysis/baseline/manager.py +621 -0
- mcp_vector_search/analysis/collectors/__init__.py +35 -0
- mcp_vector_search/analysis/collectors/cohesion.py +463 -0
- mcp_vector_search/analysis/collectors/coupling.py +1162 -0
- mcp_vector_search/analysis/collectors/halstead.py +514 -0
- mcp_vector_search/analysis/collectors/smells.py +325 -0
- mcp_vector_search/analysis/debt.py +516 -0
- mcp_vector_search/analysis/interpretation.py +685 -0
- mcp_vector_search/analysis/metrics.py +74 -1
- mcp_vector_search/analysis/reporters/__init__.py +3 -1
- mcp_vector_search/analysis/reporters/console.py +424 -0
- mcp_vector_search/analysis/reporters/markdown.py +480 -0
- mcp_vector_search/analysis/reporters/sarif.py +377 -0
- mcp_vector_search/analysis/storage/__init__.py +93 -0
- mcp_vector_search/analysis/storage/metrics_store.py +762 -0
- mcp_vector_search/analysis/storage/schema.py +245 -0
- mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
- mcp_vector_search/analysis/trends.py +308 -0
- mcp_vector_search/analysis/visualizer/__init__.py +90 -0
- mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
- mcp_vector_search/analysis/visualizer/exporter.py +484 -0
- mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
- mcp_vector_search/analysis/visualizer/schemas.py +525 -0
- mcp_vector_search/cli/commands/analyze.py +665 -11
- mcp_vector_search/cli/commands/chat.py +193 -0
- mcp_vector_search/cli/commands/index.py +600 -2
- mcp_vector_search/cli/commands/index_background.py +467 -0
- mcp_vector_search/cli/commands/search.py +194 -1
- mcp_vector_search/cli/commands/setup.py +64 -13
- mcp_vector_search/cli/commands/status.py +302 -3
- mcp_vector_search/cli/commands/visualize/cli.py +26 -10
- mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +8 -4
- mcp_vector_search/cli/commands/visualize/graph_builder.py +167 -234
- mcp_vector_search/cli/commands/visualize/server.py +304 -15
- mcp_vector_search/cli/commands/visualize/templates/base.py +60 -6
- mcp_vector_search/cli/commands/visualize/templates/scripts.py +2100 -65
- mcp_vector_search/cli/commands/visualize/templates/styles.py +1297 -88
- mcp_vector_search/cli/didyoumean.py +5 -0
- mcp_vector_search/cli/main.py +16 -5
- mcp_vector_search/cli/output.py +134 -5
- mcp_vector_search/config/thresholds.py +89 -1
- mcp_vector_search/core/__init__.py +16 -0
- mcp_vector_search/core/database.py +39 -2
- mcp_vector_search/core/embeddings.py +24 -0
- mcp_vector_search/core/git.py +380 -0
- mcp_vector_search/core/indexer.py +445 -84
- mcp_vector_search/core/llm_client.py +9 -4
- mcp_vector_search/core/models.py +88 -1
- mcp_vector_search/core/relationships.py +473 -0
- mcp_vector_search/core/search.py +1 -1
- mcp_vector_search/mcp/server.py +795 -4
- mcp_vector_search/parsers/python.py +285 -5
- mcp_vector_search/utils/gitignore.py +0 -3
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +3 -2
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/RECORD +62 -39
- mcp_vector_search/cli/commands/visualize.py.original +0 -2536
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
|
@@ -8,8 +8,11 @@ from typing import Any
|
|
|
8
8
|
|
|
9
9
|
import typer
|
|
10
10
|
from loguru import logger
|
|
11
|
+
from rich.panel import Panel
|
|
12
|
+
from rich.table import Table
|
|
11
13
|
|
|
12
14
|
from ... import __version__
|
|
15
|
+
from ...analysis.storage.metrics_store import MetricsStore, MetricsStoreError
|
|
13
16
|
from ...core.database import ChromaVectorDatabase
|
|
14
17
|
from ...core.embeddings import create_embedding_function
|
|
15
18
|
from ...core.exceptions import ProjectNotFoundError
|
|
@@ -60,6 +63,13 @@ def main(
|
|
|
60
63
|
help="Check Claude Code MCP integration status",
|
|
61
64
|
rich_help_panel="🔍 Diagnostics",
|
|
62
65
|
),
|
|
66
|
+
metrics: bool = typer.Option(
|
|
67
|
+
False,
|
|
68
|
+
"--metrics",
|
|
69
|
+
"-m",
|
|
70
|
+
help="Show project metrics summary from latest analysis",
|
|
71
|
+
rich_help_panel="📊 Display Options",
|
|
72
|
+
),
|
|
63
73
|
json_output: bool = typer.Option(
|
|
64
74
|
False,
|
|
65
75
|
"--json",
|
|
@@ -78,6 +88,9 @@ def main(
|
|
|
78
88
|
[green]Quick status check:[/green]
|
|
79
89
|
$ mcp-vector-search status
|
|
80
90
|
|
|
91
|
+
[green]Show code metrics summary:[/green]
|
|
92
|
+
$ mcp-vector-search status --metrics
|
|
93
|
+
|
|
81
94
|
[green]Detailed status with all information:[/green]
|
|
82
95
|
$ mcp-vector-search status --verbose
|
|
83
96
|
|
|
@@ -89,13 +102,13 @@ def main(
|
|
|
89
102
|
[green]Full health check:[/green]
|
|
90
103
|
$ mcp-vector-search status --health-check
|
|
91
104
|
|
|
92
|
-
[green]Export
|
|
93
|
-
$ mcp-vector-search status --json >
|
|
105
|
+
[green]Export metrics to JSON:[/green]
|
|
106
|
+
$ mcp-vector-search status --metrics --json > metrics.json
|
|
94
107
|
|
|
95
108
|
[green]Combined diagnostics:[/green]
|
|
96
109
|
$ mcp-vector-search status --verbose --health-check --mcp
|
|
97
110
|
|
|
98
|
-
[dim]💡 Tip: Use --
|
|
111
|
+
[dim]💡 Tip: Use --metrics to see code quality analysis from 'mcp-vector-search analyze'[/dim]
|
|
99
112
|
"""
|
|
100
113
|
try:
|
|
101
114
|
# Use provided project_root or current working directory
|
|
@@ -111,6 +124,7 @@ def main(
|
|
|
111
124
|
verbose=verbose,
|
|
112
125
|
health_check=health_check,
|
|
113
126
|
mcp=mcp,
|
|
127
|
+
metrics=metrics,
|
|
114
128
|
json_output=json_output,
|
|
115
129
|
),
|
|
116
130
|
timeout=30.0, # 30 second timeout
|
|
@@ -136,12 +150,20 @@ async def show_status(
|
|
|
136
150
|
verbose: bool = False,
|
|
137
151
|
health_check: bool = False,
|
|
138
152
|
mcp: bool = False,
|
|
153
|
+
metrics: bool = False,
|
|
139
154
|
json_output: bool = False,
|
|
140
155
|
) -> None:
|
|
141
156
|
"""Show comprehensive project status."""
|
|
142
157
|
status_data = {}
|
|
143
158
|
|
|
144
159
|
try:
|
|
160
|
+
# If metrics flag is set, show metrics summary and return
|
|
161
|
+
if metrics:
|
|
162
|
+
await show_metrics_summary(
|
|
163
|
+
project_root=project_root,
|
|
164
|
+
json_output=json_output,
|
|
165
|
+
)
|
|
166
|
+
return
|
|
145
167
|
# Check if project is initialized - use the specified project root
|
|
146
168
|
project_manager = ProjectManager(project_root)
|
|
147
169
|
|
|
@@ -518,6 +540,283 @@ async def check_mcp_integration(
|
|
|
518
540
|
return mcp_status
|
|
519
541
|
|
|
520
542
|
|
|
543
|
+
async def show_metrics_summary(
|
|
544
|
+
project_root: Path,
|
|
545
|
+
json_output: bool = False,
|
|
546
|
+
) -> None:
|
|
547
|
+
"""Show code metrics summary from latest analysis.
|
|
548
|
+
|
|
549
|
+
Args:
|
|
550
|
+
project_root: Project root directory
|
|
551
|
+
json_output: Output as JSON instead of formatted console
|
|
552
|
+
|
|
553
|
+
Raises:
|
|
554
|
+
typer.Exit: If no metrics found or error occurs
|
|
555
|
+
"""
|
|
556
|
+
try:
|
|
557
|
+
# Get metrics storage location
|
|
558
|
+
storage_dir = project_root / ".mcp-vector-search"
|
|
559
|
+
db_path = storage_dir / "metrics.db"
|
|
560
|
+
|
|
561
|
+
# Check if metrics database exists
|
|
562
|
+
if not db_path.exists():
|
|
563
|
+
if json_output:
|
|
564
|
+
print_json(
|
|
565
|
+
{
|
|
566
|
+
"status": "error",
|
|
567
|
+
"error": "No metrics found",
|
|
568
|
+
"message": "Run 'mcp-vector-search analyze' first",
|
|
569
|
+
}
|
|
570
|
+
)
|
|
571
|
+
else:
|
|
572
|
+
console.print(
|
|
573
|
+
"[yellow]No metrics found. Run 'mcp-vector-search analyze' first.[/yellow]"
|
|
574
|
+
)
|
|
575
|
+
raise typer.Exit(1)
|
|
576
|
+
|
|
577
|
+
# Load metrics store
|
|
578
|
+
store = MetricsStore(db_path)
|
|
579
|
+
|
|
580
|
+
# Get latest snapshot for this project
|
|
581
|
+
snapshots = store.get_project_history(str(project_root), limit=1)
|
|
582
|
+
|
|
583
|
+
if not snapshots:
|
|
584
|
+
if json_output:
|
|
585
|
+
print_json(
|
|
586
|
+
{
|
|
587
|
+
"status": "error",
|
|
588
|
+
"error": "No metrics found for this project",
|
|
589
|
+
"message": "Run 'mcp-vector-search analyze' first",
|
|
590
|
+
}
|
|
591
|
+
)
|
|
592
|
+
else:
|
|
593
|
+
console.print(
|
|
594
|
+
"[yellow]No metrics found for this project. "
|
|
595
|
+
"Run 'mcp-vector-search analyze' first.[/yellow]"
|
|
596
|
+
)
|
|
597
|
+
raise typer.Exit(1)
|
|
598
|
+
|
|
599
|
+
latest = snapshots[0]
|
|
600
|
+
|
|
601
|
+
# Output JSON or formatted
|
|
602
|
+
if json_output:
|
|
603
|
+
_output_metrics_json(latest)
|
|
604
|
+
else:
|
|
605
|
+
_print_metrics_summary(latest)
|
|
606
|
+
|
|
607
|
+
except MetricsStoreError as e:
|
|
608
|
+
logger.error(f"Failed to load metrics: {e}")
|
|
609
|
+
if json_output:
|
|
610
|
+
print_json({"status": "error", "error": str(e)})
|
|
611
|
+
else:
|
|
612
|
+
print_error(f"Failed to load metrics: {e}")
|
|
613
|
+
raise typer.Exit(1)
|
|
614
|
+
except Exception as e:
|
|
615
|
+
logger.error(f"Unexpected error loading metrics: {e}")
|
|
616
|
+
if json_output:
|
|
617
|
+
print_json({"status": "error", "error": str(e)})
|
|
618
|
+
else:
|
|
619
|
+
print_error(f"Unexpected error: {e}")
|
|
620
|
+
raise typer.Exit(1)
|
|
621
|
+
|
|
622
|
+
|
|
623
|
+
def _output_metrics_json(snapshot) -> None:
|
|
624
|
+
"""Output metrics snapshot as JSON.
|
|
625
|
+
|
|
626
|
+
Args:
|
|
627
|
+
snapshot: ProjectSnapshot from metrics store
|
|
628
|
+
"""
|
|
629
|
+
output = {
|
|
630
|
+
"status": "success",
|
|
631
|
+
"snapshot_id": snapshot.snapshot_id,
|
|
632
|
+
"project_path": snapshot.project_path,
|
|
633
|
+
"timestamp": snapshot.timestamp.isoformat(),
|
|
634
|
+
"metrics": {
|
|
635
|
+
"files": {
|
|
636
|
+
"total": snapshot.total_files,
|
|
637
|
+
"needing_attention": sum(
|
|
638
|
+
1
|
|
639
|
+
for grade, count in snapshot.grade_distribution.items()
|
|
640
|
+
if grade in ["D", "F"]
|
|
641
|
+
for _ in range(count)
|
|
642
|
+
),
|
|
643
|
+
},
|
|
644
|
+
"lines": {
|
|
645
|
+
"total": snapshot.total_lines,
|
|
646
|
+
},
|
|
647
|
+
"functions": {
|
|
648
|
+
"total": snapshot.total_functions,
|
|
649
|
+
},
|
|
650
|
+
"classes": {
|
|
651
|
+
"total": snapshot.total_classes,
|
|
652
|
+
},
|
|
653
|
+
"complexity": {
|
|
654
|
+
"average": round(snapshot.avg_complexity, 2),
|
|
655
|
+
"maximum": snapshot.max_complexity,
|
|
656
|
+
"total": snapshot.total_complexity,
|
|
657
|
+
"grade_distribution": snapshot.grade_distribution,
|
|
658
|
+
},
|
|
659
|
+
"code_smells": {
|
|
660
|
+
"total": snapshot.total_smells,
|
|
661
|
+
},
|
|
662
|
+
"health": {
|
|
663
|
+
"average_score": round(snapshot.avg_health_score, 2),
|
|
664
|
+
},
|
|
665
|
+
},
|
|
666
|
+
"metadata": {
|
|
667
|
+
"git_commit": snapshot.git_commit,
|
|
668
|
+
"git_branch": snapshot.git_branch,
|
|
669
|
+
"tool_version": snapshot.tool_version,
|
|
670
|
+
},
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
print_json(output)
|
|
674
|
+
|
|
675
|
+
|
|
676
|
+
def _print_metrics_summary(snapshot) -> None:
|
|
677
|
+
"""Print formatted metrics summary using Rich.
|
|
678
|
+
|
|
679
|
+
Args:
|
|
680
|
+
snapshot: ProjectSnapshot from metrics store
|
|
681
|
+
"""
|
|
682
|
+
# Header panel with overall stats
|
|
683
|
+
console.print(
|
|
684
|
+
Panel.fit(
|
|
685
|
+
f"[bold]Project Metrics Summary[/bold]\n"
|
|
686
|
+
f"Files: {snapshot.total_files} | "
|
|
687
|
+
f"Functions: {snapshot.total_functions} | "
|
|
688
|
+
f"Classes: {snapshot.total_classes} | "
|
|
689
|
+
f"Lines: {snapshot.total_lines:,}\n"
|
|
690
|
+
f"Analyzed: {snapshot.timestamp.strftime('%Y-%m-%d %H:%M:%S')}",
|
|
691
|
+
title="📊 mcp-vector-search",
|
|
692
|
+
border_style="blue",
|
|
693
|
+
)
|
|
694
|
+
)
|
|
695
|
+
console.print()
|
|
696
|
+
|
|
697
|
+
# Complexity metrics table
|
|
698
|
+
complexity_table = Table(title="Complexity Metrics", show_header=True)
|
|
699
|
+
complexity_table.add_column("Metric", style="cyan", no_wrap=True)
|
|
700
|
+
complexity_table.add_column("Average", justify="right")
|
|
701
|
+
complexity_table.add_column("Maximum", justify="right")
|
|
702
|
+
complexity_table.add_column("Total", justify="right")
|
|
703
|
+
complexity_table.add_column("Status", justify="center")
|
|
704
|
+
|
|
705
|
+
complexity_table.add_row(
|
|
706
|
+
"Cognitive Complexity",
|
|
707
|
+
f"{snapshot.avg_complexity:.1f}",
|
|
708
|
+
f"{snapshot.max_complexity}",
|
|
709
|
+
f"{snapshot.total_complexity}",
|
|
710
|
+
_status_indicator(snapshot.avg_complexity, 10, 20),
|
|
711
|
+
)
|
|
712
|
+
|
|
713
|
+
console.print(complexity_table)
|
|
714
|
+
console.print()
|
|
715
|
+
|
|
716
|
+
# Grade distribution table
|
|
717
|
+
grade_table = Table(title="Complexity Grade Distribution", show_header=True)
|
|
718
|
+
grade_table.add_column("Grade", style="cyan", no_wrap=True)
|
|
719
|
+
grade_table.add_column("Count", justify="right")
|
|
720
|
+
grade_table.add_column("Percentage", justify="right")
|
|
721
|
+
grade_table.add_column("Description")
|
|
722
|
+
|
|
723
|
+
total_chunks = sum(snapshot.grade_distribution.values())
|
|
724
|
+
grade_descriptions = {
|
|
725
|
+
"A": "Excellent (0-5)",
|
|
726
|
+
"B": "Good (6-10)",
|
|
727
|
+
"C": "Acceptable (11-20)",
|
|
728
|
+
"D": "Needs Improvement (21-30)",
|
|
729
|
+
"F": "Refactor Recommended (31+)",
|
|
730
|
+
}
|
|
731
|
+
|
|
732
|
+
for grade in ["A", "B", "C", "D", "F"]:
|
|
733
|
+
count = snapshot.grade_distribution.get(grade, 0)
|
|
734
|
+
percentage = (count / total_chunks * 100) if total_chunks > 0 else 0
|
|
735
|
+
|
|
736
|
+
# Color code the grade
|
|
737
|
+
grade_color = {
|
|
738
|
+
"A": "green",
|
|
739
|
+
"B": "blue",
|
|
740
|
+
"C": "yellow",
|
|
741
|
+
"D": "orange1",
|
|
742
|
+
"F": "red",
|
|
743
|
+
}.get(grade, "white")
|
|
744
|
+
|
|
745
|
+
grade_table.add_row(
|
|
746
|
+
f"[{grade_color}]{grade}[/{grade_color}]",
|
|
747
|
+
str(count),
|
|
748
|
+
f"{percentage:.1f}%",
|
|
749
|
+
grade_descriptions[grade],
|
|
750
|
+
)
|
|
751
|
+
|
|
752
|
+
console.print(grade_table)
|
|
753
|
+
console.print()
|
|
754
|
+
|
|
755
|
+
# Code smells summary
|
|
756
|
+
if snapshot.total_smells > 0:
|
|
757
|
+
console.print(
|
|
758
|
+
f"[yellow]Code Smells:[/yellow] {snapshot.total_smells} issues detected"
|
|
759
|
+
)
|
|
760
|
+
console.print()
|
|
761
|
+
|
|
762
|
+
# Health score
|
|
763
|
+
health_color = (
|
|
764
|
+
"green"
|
|
765
|
+
if snapshot.avg_health_score >= 0.8
|
|
766
|
+
else "yellow"
|
|
767
|
+
if snapshot.avg_health_score >= 0.6
|
|
768
|
+
else "red"
|
|
769
|
+
)
|
|
770
|
+
console.print(
|
|
771
|
+
f"[bold]Health Score:[/bold] [{health_color}]{snapshot.avg_health_score:.2f}[/{health_color}] / 1.00"
|
|
772
|
+
)
|
|
773
|
+
console.print()
|
|
774
|
+
|
|
775
|
+
# Git metadata (if available)
|
|
776
|
+
if snapshot.git_commit or snapshot.git_branch:
|
|
777
|
+
metadata_parts = []
|
|
778
|
+
if snapshot.git_branch:
|
|
779
|
+
metadata_parts.append(f"Branch: {snapshot.git_branch}")
|
|
780
|
+
if snapshot.git_commit:
|
|
781
|
+
metadata_parts.append(f"Commit: {snapshot.git_commit[:8]}")
|
|
782
|
+
if snapshot.tool_version:
|
|
783
|
+
metadata_parts.append(f"Version: {snapshot.tool_version}")
|
|
784
|
+
|
|
785
|
+
console.print(f"[dim]{' | '.join(metadata_parts)}[/dim]")
|
|
786
|
+
console.print()
|
|
787
|
+
|
|
788
|
+
# Files needing attention
|
|
789
|
+
files_needing_attention = snapshot.grade_distribution.get(
|
|
790
|
+
"D", 0
|
|
791
|
+
) + snapshot.grade_distribution.get("F", 0)
|
|
792
|
+
if files_needing_attention > 0:
|
|
793
|
+
console.print(
|
|
794
|
+
f"[yellow]⚠️ {files_needing_attention} code chunks need attention (grades D or F)[/yellow]"
|
|
795
|
+
)
|
|
796
|
+
console.print()
|
|
797
|
+
|
|
798
|
+
|
|
799
|
+
def _status_indicator(
|
|
800
|
+
value: float, warning_threshold: float, error_threshold: float
|
|
801
|
+
) -> str:
|
|
802
|
+
"""Return colored status indicator based on thresholds.
|
|
803
|
+
|
|
804
|
+
Args:
|
|
805
|
+
value: Value to check
|
|
806
|
+
warning_threshold: Warning threshold (yellow)
|
|
807
|
+
error_threshold: Error threshold (red)
|
|
808
|
+
|
|
809
|
+
Returns:
|
|
810
|
+
Colored status indicator (green, yellow, or red dot)
|
|
811
|
+
"""
|
|
812
|
+
if value < warning_threshold:
|
|
813
|
+
return "[green]●[/green]"
|
|
814
|
+
elif value < error_threshold:
|
|
815
|
+
return "[yellow]●[/yellow]"
|
|
816
|
+
else:
|
|
817
|
+
return "[red]●[/red]"
|
|
818
|
+
|
|
819
|
+
|
|
521
820
|
def check_dependencies() -> bool:
|
|
522
821
|
"""Check if all required dependencies are available.
|
|
523
822
|
|
|
@@ -24,21 +24,22 @@ from .graph_builder import build_graph_data
|
|
|
24
24
|
from .server import find_free_port, start_visualization_server
|
|
25
25
|
|
|
26
26
|
app = typer.Typer(
|
|
27
|
-
help="Visualize code chunk relationships",
|
|
27
|
+
help="📊 Visualize code chunk relationships",
|
|
28
|
+
invoke_without_command=True,
|
|
28
29
|
)
|
|
29
30
|
console = Console()
|
|
30
31
|
|
|
31
32
|
|
|
32
|
-
@app.callback(
|
|
33
|
+
@app.callback()
|
|
33
34
|
def visualize_callback(ctx: typer.Context) -> None:
|
|
34
35
|
"""Visualize code chunk relationships.
|
|
35
36
|
|
|
36
|
-
|
|
37
|
+
If no subcommand is provided, defaults to starting the visualization server.
|
|
37
38
|
"""
|
|
38
|
-
# If no subcommand was invoked, run serve with defaults
|
|
39
39
|
if ctx.invoked_subcommand is None:
|
|
40
|
-
#
|
|
41
|
-
|
|
40
|
+
# Default to serve when no subcommand given
|
|
41
|
+
# Must pass explicit defaults since typer.Option doesn't work when called directly
|
|
42
|
+
serve(port=8501, graph_file=Path("chunk-graph.json"), code_only=False)
|
|
42
43
|
|
|
43
44
|
|
|
44
45
|
@app.command()
|
|
@@ -185,7 +186,7 @@ async def _export_chunks(
|
|
|
185
186
|
@app.command()
|
|
186
187
|
def serve(
|
|
187
188
|
port: int = typer.Option(
|
|
188
|
-
|
|
189
|
+
8501, "--port", "-p", help="Port for visualization server"
|
|
189
190
|
),
|
|
190
191
|
graph_file: Path = typer.Option(
|
|
191
192
|
Path("chunk-graph.json"),
|
|
@@ -202,7 +203,7 @@ def serve(
|
|
|
202
203
|
"""Start local HTTP server for D3.js visualization.
|
|
203
204
|
|
|
204
205
|
Examples:
|
|
205
|
-
# Start server on default port
|
|
206
|
+
# Start server on default port 8501
|
|
206
207
|
mcp-vector-search visualize serve
|
|
207
208
|
|
|
208
209
|
# Custom port
|
|
@@ -215,9 +216,9 @@ def serve(
|
|
|
215
216
|
mcp-vector-search visualize serve --code-only
|
|
216
217
|
"""
|
|
217
218
|
# Use specified port or find free one
|
|
218
|
-
if port ==
|
|
219
|
+
if port == 8501: # Default port, try to find free one
|
|
219
220
|
try:
|
|
220
|
-
port = find_free_port(
|
|
221
|
+
port = find_free_port(8501, 8599)
|
|
221
222
|
except OSError as e:
|
|
222
223
|
console.print(f"[red]✗ {e}[/red]")
|
|
223
224
|
raise typer.Exit(1)
|
|
@@ -245,8 +246,23 @@ def serve(
|
|
|
245
246
|
export_to_html(html_file)
|
|
246
247
|
|
|
247
248
|
# Check if we need to regenerate the graph file
|
|
249
|
+
# Regenerate if: graph doesn't exist, code_only filter, or index is newer than graph
|
|
248
250
|
needs_regeneration = not graph_file.exists() or code_only
|
|
249
251
|
|
|
252
|
+
# Check if index database is newer than graph (stale graph detection)
|
|
253
|
+
if graph_file.exists() and not needs_regeneration:
|
|
254
|
+
index_db = (
|
|
255
|
+
project_manager.project_root / ".mcp-vector-search" / "chroma.sqlite3"
|
|
256
|
+
)
|
|
257
|
+
if index_db.exists():
|
|
258
|
+
graph_mtime = graph_file.stat().st_mtime
|
|
259
|
+
index_mtime = index_db.stat().st_mtime
|
|
260
|
+
if index_mtime > graph_mtime:
|
|
261
|
+
console.print(
|
|
262
|
+
"[yellow]Index has changed since graph was generated. Regenerating...[/yellow]"
|
|
263
|
+
)
|
|
264
|
+
needs_regeneration = True
|
|
265
|
+
|
|
250
266
|
if graph_file.exists() and not needs_regeneration:
|
|
251
267
|
# Use existing unfiltered file
|
|
252
268
|
dest = viz_dir / "chunk-graph.json"
|
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
"""JSON export functionality for graph data.
|
|
2
2
|
|
|
3
3
|
This module handles exporting graph data to JSON format.
|
|
4
|
+
Uses orjson for 5-10x faster serialization performance.
|
|
4
5
|
"""
|
|
5
6
|
|
|
6
|
-
import json
|
|
7
7
|
from pathlib import Path
|
|
8
8
|
from typing import Any
|
|
9
9
|
|
|
10
|
+
import orjson
|
|
10
11
|
from rich.console import Console
|
|
11
12
|
|
|
12
13
|
console = Console()
|
|
@@ -15,6 +16,8 @@ console = Console()
|
|
|
15
16
|
def export_to_json(graph_data: dict[str, Any], output_path: Path) -> None:
|
|
16
17
|
"""Export graph data to JSON file.
|
|
17
18
|
|
|
19
|
+
Uses orjson for fast serialization (5-10x faster than stdlib json).
|
|
20
|
+
|
|
18
21
|
Args:
|
|
19
22
|
graph_data: Graph data dictionary containing nodes, links, and metadata
|
|
20
23
|
output_path: Path to output JSON file
|
|
@@ -22,8 +25,9 @@ def export_to_json(graph_data: dict[str, Any], output_path: Path) -> None:
|
|
|
22
25
|
# Ensure output directory exists
|
|
23
26
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
24
27
|
|
|
25
|
-
# Write to file
|
|
26
|
-
|
|
27
|
-
|
|
28
|
+
# Write to file using orjson for fast serialization
|
|
29
|
+
# OPT_INDENT_2 gives readable output, OPT_SORT_KEYS for consistency
|
|
30
|
+
with open(output_path, "wb") as f:
|
|
31
|
+
f.write(orjson.dumps(graph_data, option=orjson.OPT_INDENT_2))
|
|
28
32
|
|
|
29
33
|
console.print(f"[green]✓[/green] Exported graph data to [cyan]{output_path}[/cyan]")
|