mcp-vector-search 0.12.6__py3-none-any.whl → 1.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (65) hide show
  1. mcp_vector_search/__init__.py +2 -2
  2. mcp_vector_search/analysis/__init__.py +64 -0
  3. mcp_vector_search/analysis/collectors/__init__.py +39 -0
  4. mcp_vector_search/analysis/collectors/base.py +164 -0
  5. mcp_vector_search/analysis/collectors/complexity.py +743 -0
  6. mcp_vector_search/analysis/metrics.py +341 -0
  7. mcp_vector_search/analysis/reporters/__init__.py +5 -0
  8. mcp_vector_search/analysis/reporters/console.py +222 -0
  9. mcp_vector_search/cli/commands/analyze.py +408 -0
  10. mcp_vector_search/cli/commands/chat.py +1262 -0
  11. mcp_vector_search/cli/commands/index.py +21 -3
  12. mcp_vector_search/cli/commands/init.py +13 -0
  13. mcp_vector_search/cli/commands/install.py +597 -335
  14. mcp_vector_search/cli/commands/install_old.py +8 -4
  15. mcp_vector_search/cli/commands/mcp.py +78 -6
  16. mcp_vector_search/cli/commands/reset.py +68 -26
  17. mcp_vector_search/cli/commands/search.py +30 -7
  18. mcp_vector_search/cli/commands/setup.py +1133 -0
  19. mcp_vector_search/cli/commands/status.py +37 -2
  20. mcp_vector_search/cli/commands/uninstall.py +276 -357
  21. mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
  22. mcp_vector_search/cli/commands/visualize/cli.py +276 -0
  23. mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
  24. mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
  25. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +29 -0
  26. mcp_vector_search/cli/commands/visualize/graph_builder.py +714 -0
  27. mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
  28. mcp_vector_search/cli/commands/visualize/server.py +311 -0
  29. mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
  30. mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
  31. mcp_vector_search/cli/commands/visualize/templates/base.py +180 -0
  32. mcp_vector_search/cli/commands/visualize/templates/scripts.py +2507 -0
  33. mcp_vector_search/cli/commands/visualize/templates/styles.py +1313 -0
  34. mcp_vector_search/cli/commands/visualize.py.original +2536 -0
  35. mcp_vector_search/cli/didyoumean.py +22 -2
  36. mcp_vector_search/cli/main.py +115 -159
  37. mcp_vector_search/cli/output.py +24 -8
  38. mcp_vector_search/config/__init__.py +4 -0
  39. mcp_vector_search/config/default_thresholds.yaml +52 -0
  40. mcp_vector_search/config/settings.py +12 -0
  41. mcp_vector_search/config/thresholds.py +185 -0
  42. mcp_vector_search/core/auto_indexer.py +3 -3
  43. mcp_vector_search/core/boilerplate.py +186 -0
  44. mcp_vector_search/core/config_utils.py +394 -0
  45. mcp_vector_search/core/database.py +369 -94
  46. mcp_vector_search/core/exceptions.py +11 -0
  47. mcp_vector_search/core/git_hooks.py +4 -4
  48. mcp_vector_search/core/indexer.py +221 -4
  49. mcp_vector_search/core/llm_client.py +751 -0
  50. mcp_vector_search/core/models.py +3 -0
  51. mcp_vector_search/core/project.py +17 -0
  52. mcp_vector_search/core/scheduler.py +11 -11
  53. mcp_vector_search/core/search.py +179 -29
  54. mcp_vector_search/mcp/server.py +24 -5
  55. mcp_vector_search/utils/__init__.py +2 -0
  56. mcp_vector_search/utils/gitignore_updater.py +212 -0
  57. mcp_vector_search/utils/monorepo.py +66 -4
  58. mcp_vector_search/utils/timing.py +10 -6
  59. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/METADATA +182 -52
  60. mcp_vector_search-1.0.3.dist-info/RECORD +97 -0
  61. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/WHEEL +1 -1
  62. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/entry_points.txt +1 -0
  63. mcp_vector_search/cli/commands/visualize.py +0 -1467
  64. mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
  65. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.0.3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,408 @@
1
+ """Analyze command for MCP Vector Search CLI."""
2
+
3
+ import asyncio
4
+ from pathlib import Path
5
+
6
+ import typer
7
+ from loguru import logger
8
+
9
+ from ...analysis import (
10
+ CognitiveComplexityCollector,
11
+ CyclomaticComplexityCollector,
12
+ FileMetrics,
13
+ ProjectMetrics,
14
+ )
15
+ from ...core.exceptions import ProjectNotFoundError
16
+ from ...core.project import ProjectManager
17
+ from ...parsers.registry import ParserRegistry
18
+ from ..output import console, print_error, print_info, print_json
19
+
20
+ # Create analyze subcommand app
21
+ analyze_app = typer.Typer(help="📈 Analyze code complexity and quality")
22
+
23
+
24
+ @analyze_app.callback(invoke_without_command=True)
25
+ def main(
26
+ ctx: typer.Context,
27
+ project_root: Path | None = typer.Option(
28
+ None,
29
+ "--project-root",
30
+ "-p",
31
+ help="Project root directory (auto-detected if not specified)",
32
+ exists=True,
33
+ file_okay=False,
34
+ dir_okay=True,
35
+ readable=True,
36
+ rich_help_panel="🔧 Global Options",
37
+ ),
38
+ quick: bool = typer.Option(
39
+ False,
40
+ "--quick",
41
+ help="Quick mode (cognitive + cyclomatic complexity only)",
42
+ rich_help_panel="⚡ Performance Options",
43
+ ),
44
+ language: str | None = typer.Option(
45
+ None,
46
+ "--language",
47
+ help="Filter by programming language (python, javascript, typescript)",
48
+ rich_help_panel="🔍 Filters",
49
+ ),
50
+ path: Path | None = typer.Option(
51
+ None,
52
+ "--path",
53
+ help="Analyze specific file or directory",
54
+ rich_help_panel="🔍 Filters",
55
+ ),
56
+ top: int = typer.Option(
57
+ 10,
58
+ "--top",
59
+ help="Number of top complexity hotspots to show",
60
+ min=1,
61
+ max=100,
62
+ rich_help_panel="📊 Display Options",
63
+ ),
64
+ json_output: bool = typer.Option(
65
+ False,
66
+ "--json",
67
+ help="Output results in JSON format",
68
+ rich_help_panel="📊 Display Options",
69
+ ),
70
+ ) -> None:
71
+ """📈 Analyze code complexity and quality.
72
+
73
+ Performs structural code analysis to identify complexity hotspots,
74
+ code smells, and quality metrics across your codebase.
75
+
76
+ [bold cyan]Basic Examples:[/bold cyan]
77
+
78
+ [green]Quick analysis (cognitive + cyclomatic complexity):[/green]
79
+ $ mcp-vector-search analyze --quick
80
+
81
+ [green]Full analysis (all collectors):[/green]
82
+ $ mcp-vector-search analyze
83
+
84
+ [green]Filter by language:[/green]
85
+ $ mcp-vector-search analyze --language python
86
+
87
+ [green]Analyze specific directory:[/green]
88
+ $ mcp-vector-search analyze --path src/core
89
+
90
+ [bold cyan]Output Options:[/bold cyan]
91
+
92
+ [green]Show top 5 hotspots:[/green]
93
+ $ mcp-vector-search analyze --top 5
94
+
95
+ [green]Export to JSON:[/green]
96
+ $ mcp-vector-search analyze --json > analysis.json
97
+
98
+ [dim]💡 Tip: Use --quick for faster analysis on large projects.[/dim]
99
+ """
100
+ if ctx.invoked_subcommand is not None:
101
+ # A subcommand was invoked - let it handle the request
102
+ return
103
+
104
+ try:
105
+ # Use provided project_root or current working directory
106
+ if project_root is None:
107
+ project_root = Path.cwd()
108
+
109
+ asyncio.run(
110
+ run_analysis(
111
+ project_root=project_root,
112
+ quick_mode=quick,
113
+ language_filter=language,
114
+ path_filter=path,
115
+ top_n=top,
116
+ json_output=json_output,
117
+ )
118
+ )
119
+
120
+ except Exception as e:
121
+ logger.error(f"Analysis failed: {e}")
122
+ print_error(f"Analysis failed: {e}")
123
+ raise typer.Exit(1)
124
+
125
+
126
+ async def run_analysis(
127
+ project_root: Path,
128
+ quick_mode: bool = False,
129
+ language_filter: str | None = None,
130
+ path_filter: Path | None = None,
131
+ top_n: int = 10,
132
+ json_output: bool = False,
133
+ ) -> None:
134
+ """Run code complexity analysis.
135
+
136
+ Args:
137
+ project_root: Root directory of the project
138
+ quick_mode: Use only cognitive + cyclomatic complexity (faster)
139
+ language_filter: Filter files by language
140
+ path_filter: Analyze specific file or directory
141
+ top_n: Number of top hotspots to show
142
+ json_output: Output results as JSON
143
+ """
144
+ try:
145
+ # Check if project is initialized (optional - we can analyze any directory)
146
+ project_manager = ProjectManager(project_root)
147
+ initialized = project_manager.is_initialized()
148
+
149
+ if not initialized and not json_output:
150
+ print_info(
151
+ f"Analyzing directory: {project_root} (not initialized as MCP project)"
152
+ )
153
+
154
+ # Initialize parser registry
155
+ parser_registry = ParserRegistry()
156
+
157
+ # Determine which collectors to use
158
+ if quick_mode:
159
+ collectors = [
160
+ CognitiveComplexityCollector(),
161
+ CyclomaticComplexityCollector(),
162
+ ]
163
+ mode_label = "Quick Mode (2 collectors)"
164
+ else:
165
+ # Import all collectors for full mode
166
+ from ...analysis import (
167
+ MethodCountCollector,
168
+ NestingDepthCollector,
169
+ ParameterCountCollector,
170
+ )
171
+
172
+ collectors = [
173
+ CognitiveComplexityCollector(),
174
+ CyclomaticComplexityCollector(),
175
+ NestingDepthCollector(),
176
+ ParameterCountCollector(),
177
+ MethodCountCollector(),
178
+ ]
179
+ mode_label = "Full Mode (5 collectors)"
180
+
181
+ # Find files to analyze
182
+ files_to_analyze = _find_analyzable_files(
183
+ project_root, language_filter, path_filter, parser_registry
184
+ )
185
+
186
+ if not files_to_analyze:
187
+ if json_output:
188
+ print_json({"error": "No files found to analyze"})
189
+ else:
190
+ print_error("No files found to analyze")
191
+ return
192
+
193
+ if not json_output:
194
+ console.print(
195
+ f"\n[bold blue]Starting Code Analysis[/bold blue] - {mode_label}"
196
+ )
197
+ console.print(f"Files to analyze: {len(files_to_analyze)}\n")
198
+
199
+ # Analyze files
200
+ project_metrics = ProjectMetrics(project_root=str(project_root))
201
+
202
+ for file_path in files_to_analyze:
203
+ try:
204
+ file_metrics = await _analyze_file(
205
+ file_path, parser_registry, collectors
206
+ )
207
+ if file_metrics and file_metrics.chunks:
208
+ project_metrics.files[str(file_path)] = file_metrics
209
+ except Exception as e:
210
+ logger.debug(f"Failed to analyze {file_path}: {e}")
211
+ continue
212
+
213
+ # Compute aggregates
214
+ project_metrics.compute_aggregates()
215
+
216
+ # Output results
217
+ if json_output:
218
+ output = project_metrics.to_summary()
219
+ print_json(output)
220
+ else:
221
+ # Import console reporter
222
+ from ...analysis.reporters.console import ConsoleReporter
223
+
224
+ reporter = ConsoleReporter()
225
+ reporter.print_summary(project_metrics)
226
+ reporter.print_distribution(project_metrics)
227
+ reporter.print_hotspots(project_metrics, top=top_n)
228
+ reporter.print_recommendations(project_metrics)
229
+
230
+ except ProjectNotFoundError as e:
231
+ if json_output:
232
+ print_json({"error": str(e)})
233
+ else:
234
+ print_error(str(e))
235
+ raise typer.Exit(1)
236
+ except Exception as e:
237
+ logger.error(f"Analysis failed: {e}", exc_info=True)
238
+ if json_output:
239
+ print_json({"error": str(e)})
240
+ else:
241
+ print_error(f"Analysis failed: {e}")
242
+ raise
243
+
244
+
245
+ def _find_analyzable_files(
246
+ project_root: Path,
247
+ language_filter: str | None,
248
+ path_filter: Path | None,
249
+ parser_registry: ParserRegistry,
250
+ ) -> list[Path]:
251
+ """Find files that can be analyzed.
252
+
253
+ Args:
254
+ project_root: Root directory
255
+ language_filter: Optional language filter
256
+ path_filter: Optional path filter
257
+ parser_registry: Parser registry for checking supported files
258
+
259
+ Returns:
260
+ List of file paths to analyze
261
+ """
262
+ import fnmatch
263
+
264
+ # Determine base path to search
265
+ base_path = path_filter if path_filter and path_filter.exists() else project_root
266
+
267
+ # If path_filter is a file, return just that file
268
+ if base_path.is_file():
269
+ # Check if file extension is supported
270
+ if base_path.suffix.lower() in parser_registry.get_supported_extensions():
271
+ return [base_path]
272
+ return []
273
+
274
+ # Find all supported files
275
+ files: list[Path] = []
276
+ supported_extensions = parser_registry.get_supported_extensions()
277
+
278
+ # Common ignore patterns
279
+ ignore_patterns = {
280
+ ".git",
281
+ ".venv",
282
+ "venv",
283
+ "node_modules",
284
+ "__pycache__",
285
+ ".pytest_cache",
286
+ "dist",
287
+ "build",
288
+ ".tox",
289
+ ".eggs",
290
+ }
291
+
292
+ for file_path in base_path.rglob("*"):
293
+ # Skip directories
294
+ if file_path.is_dir():
295
+ continue
296
+
297
+ # Skip ignored directories
298
+ if any(
299
+ ignored in file_path.parts or fnmatch.fnmatch(file_path.name, f"{ignored}*")
300
+ for ignored in ignore_patterns
301
+ ):
302
+ continue
303
+
304
+ # Check if file extension is supported
305
+ if file_path.suffix.lower() not in supported_extensions:
306
+ continue
307
+
308
+ # Apply language filter
309
+ if language_filter:
310
+ parser = parser_registry.get_parser_for_file(file_path)
311
+ if parser.language.lower() != language_filter.lower():
312
+ continue
313
+
314
+ files.append(file_path)
315
+
316
+ return sorted(files)
317
+
318
+
319
+ async def _analyze_file(
320
+ file_path: Path, parser_registry: ParserRegistry, collectors: list
321
+ ) -> FileMetrics | None:
322
+ """Analyze a single file and return metrics.
323
+
324
+ Args:
325
+ file_path: Path to file
326
+ parser_registry: Parser registry
327
+ collectors: List of metric collectors
328
+
329
+ Returns:
330
+ FileMetrics or None if analysis failed
331
+ """
332
+ try:
333
+ # Get parser for file
334
+ parser = parser_registry.get_parser_for_file(file_path)
335
+
336
+ # Parse file into chunks
337
+ chunks = await parser.parse_file(file_path)
338
+
339
+ if not chunks:
340
+ return None
341
+
342
+ # Create file metrics
343
+ file_metrics = FileMetrics(file_path=str(file_path))
344
+
345
+ # Count lines
346
+ try:
347
+ with open(file_path, encoding="utf-8") as f:
348
+ lines = f.readlines()
349
+ file_metrics.total_lines = len(lines)
350
+ file_metrics.code_lines = sum(
351
+ 1
352
+ for line in lines
353
+ if line.strip() and not line.strip().startswith("#")
354
+ )
355
+ file_metrics.comment_lines = sum(
356
+ 1 for line in lines if line.strip().startswith("#")
357
+ )
358
+ file_metrics.blank_lines = sum(1 for line in lines if not line.strip())
359
+ except Exception:
360
+ pass
361
+
362
+ # Count functions and classes from chunks
363
+ for chunk in chunks:
364
+ if chunk.chunk_type == "function":
365
+ file_metrics.function_count += 1
366
+ elif chunk.chunk_type == "class":
367
+ file_metrics.class_count += 1
368
+ elif chunk.chunk_type == "method":
369
+ file_metrics.method_count += 1
370
+
371
+ # Extract chunk metrics from parsed chunks
372
+ from ...analysis.metrics import ChunkMetrics
373
+
374
+ for chunk in chunks:
375
+ # Use complexity_score from parser (cyclomatic complexity)
376
+ # For quick mode, this is sufficient
377
+ complexity = (
378
+ int(chunk.complexity_score) if chunk.complexity_score > 0 else 1
379
+ )
380
+
381
+ # Count parameters if available
382
+ param_count = len(chunk.parameters) if chunk.parameters else 0
383
+
384
+ # Estimate cognitive complexity from cyclomatic (rough approximation)
385
+ # Cognitive is typically 1.2-1.5x cyclomatic for complex code
386
+ cognitive = int(complexity * 1.3)
387
+
388
+ chunk_metrics = ChunkMetrics(
389
+ cognitive_complexity=cognitive,
390
+ cyclomatic_complexity=complexity,
391
+ max_nesting_depth=0, # Not available without collectors
392
+ parameter_count=param_count,
393
+ lines_of_code=chunk.end_line - chunk.start_line + 1,
394
+ )
395
+ file_metrics.chunks.append(chunk_metrics)
396
+
397
+ # Compute aggregates
398
+ file_metrics.compute_aggregates()
399
+
400
+ return file_metrics
401
+
402
+ except Exception as e:
403
+ logger.debug(f"Failed to analyze file {file_path}: {e}")
404
+ return None
405
+
406
+
407
+ if __name__ == "__main__":
408
+ analyze_app()