mcp-vector-search 1.0.3__py3-none-any.whl → 1.1.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_vector_search/__init__.py +3 -3
- mcp_vector_search/analysis/__init__.py +48 -1
- mcp_vector_search/analysis/baseline/__init__.py +68 -0
- mcp_vector_search/analysis/baseline/comparator.py +462 -0
- mcp_vector_search/analysis/baseline/manager.py +621 -0
- mcp_vector_search/analysis/collectors/__init__.py +35 -0
- mcp_vector_search/analysis/collectors/cohesion.py +463 -0
- mcp_vector_search/analysis/collectors/coupling.py +1162 -0
- mcp_vector_search/analysis/collectors/halstead.py +514 -0
- mcp_vector_search/analysis/collectors/smells.py +325 -0
- mcp_vector_search/analysis/debt.py +516 -0
- mcp_vector_search/analysis/interpretation.py +685 -0
- mcp_vector_search/analysis/metrics.py +74 -1
- mcp_vector_search/analysis/reporters/__init__.py +3 -1
- mcp_vector_search/analysis/reporters/console.py +424 -0
- mcp_vector_search/analysis/reporters/markdown.py +480 -0
- mcp_vector_search/analysis/reporters/sarif.py +377 -0
- mcp_vector_search/analysis/storage/__init__.py +93 -0
- mcp_vector_search/analysis/storage/metrics_store.py +762 -0
- mcp_vector_search/analysis/storage/schema.py +245 -0
- mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
- mcp_vector_search/analysis/trends.py +308 -0
- mcp_vector_search/analysis/visualizer/__init__.py +90 -0
- mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
- mcp_vector_search/analysis/visualizer/exporter.py +484 -0
- mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
- mcp_vector_search/analysis/visualizer/schemas.py +525 -0
- mcp_vector_search/cli/commands/analyze.py +665 -11
- mcp_vector_search/cli/commands/chat.py +193 -0
- mcp_vector_search/cli/commands/index.py +600 -2
- mcp_vector_search/cli/commands/index_background.py +467 -0
- mcp_vector_search/cli/commands/search.py +194 -1
- mcp_vector_search/cli/commands/setup.py +64 -13
- mcp_vector_search/cli/commands/status.py +302 -3
- mcp_vector_search/cli/commands/visualize/cli.py +26 -10
- mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +8 -4
- mcp_vector_search/cli/commands/visualize/graph_builder.py +167 -234
- mcp_vector_search/cli/commands/visualize/server.py +304 -15
- mcp_vector_search/cli/commands/visualize/templates/base.py +60 -6
- mcp_vector_search/cli/commands/visualize/templates/scripts.py +2100 -65
- mcp_vector_search/cli/commands/visualize/templates/styles.py +1297 -88
- mcp_vector_search/cli/didyoumean.py +5 -0
- mcp_vector_search/cli/main.py +16 -5
- mcp_vector_search/cli/output.py +134 -5
- mcp_vector_search/config/thresholds.py +89 -1
- mcp_vector_search/core/__init__.py +16 -0
- mcp_vector_search/core/database.py +39 -2
- mcp_vector_search/core/embeddings.py +24 -0
- mcp_vector_search/core/git.py +380 -0
- mcp_vector_search/core/indexer.py +445 -84
- mcp_vector_search/core/llm_client.py +9 -4
- mcp_vector_search/core/models.py +88 -1
- mcp_vector_search/core/relationships.py +473 -0
- mcp_vector_search/core/search.py +1 -1
- mcp_vector_search/mcp/server.py +795 -4
- mcp_vector_search/parsers/python.py +285 -5
- mcp_vector_search/utils/gitignore.py +0 -3
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +3 -2
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/RECORD +62 -39
- mcp_vector_search/cli/commands/visualize.py.original +0 -2536
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +0 -0
- {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
mcp_vector_search/mcp/server.py
CHANGED
|
@@ -18,6 +18,12 @@ from mcp.types import (
|
|
|
18
18
|
Tool,
|
|
19
19
|
)
|
|
20
20
|
|
|
21
|
+
from ..analysis import (
|
|
22
|
+
ProjectMetrics,
|
|
23
|
+
SmellDetector,
|
|
24
|
+
SmellSeverity,
|
|
25
|
+
)
|
|
26
|
+
from ..config.thresholds import ThresholdConfig
|
|
21
27
|
from ..core.database import ChromaVectorDatabase
|
|
22
28
|
from ..core.embeddings import create_embedding_function
|
|
23
29
|
from ..core.exceptions import ProjectNotFoundError
|
|
@@ -25,6 +31,7 @@ from ..core.indexer import SemanticIndexer
|
|
|
25
31
|
from ..core.project import ProjectManager
|
|
26
32
|
from ..core.search import SemanticSearchEngine
|
|
27
33
|
from ..core.watcher import FileWatcher
|
|
34
|
+
from ..parsers.registry import ParserRegistry
|
|
28
35
|
|
|
29
36
|
|
|
30
37
|
class MCPVectorSearchServer:
|
|
@@ -159,7 +166,7 @@ class MCPVectorSearchServer:
|
|
|
159
166
|
tools = [
|
|
160
167
|
Tool(
|
|
161
168
|
name="search_code",
|
|
162
|
-
description="Search
|
|
169
|
+
description="Search codebase using natural language queries (text-to-code search). Use when you know what functionality you're looking for but not where it's implemented. Example: 'authentication middleware' or 'database connection pooling' to find relevant code.",
|
|
163
170
|
inputSchema={
|
|
164
171
|
"type": "object",
|
|
165
172
|
"properties": {
|
|
@@ -208,7 +215,7 @@ class MCPVectorSearchServer:
|
|
|
208
215
|
),
|
|
209
216
|
Tool(
|
|
210
217
|
name="search_similar",
|
|
211
|
-
description="Find code similar to a specific file or function",
|
|
218
|
+
description="Find code snippets similar to a specific file or function (code-to-code similarity). Use when looking for duplicate code, similar patterns, or related implementations. Example: 'Find functions similar to auth_handler.py' to discover related authentication code.",
|
|
212
219
|
inputSchema={
|
|
213
220
|
"type": "object",
|
|
214
221
|
"properties": {
|
|
@@ -240,7 +247,7 @@ class MCPVectorSearchServer:
|
|
|
240
247
|
),
|
|
241
248
|
Tool(
|
|
242
249
|
name="search_context",
|
|
243
|
-
description="Search for code
|
|
250
|
+
description="Search for code using rich contextual descriptions with optional focus areas. Use when you need broader context around specific concerns. Example: 'code handling user sessions' with focus_areas=['security', 'authentication'] to find session management with security emphasis.",
|
|
244
251
|
inputSchema={
|
|
245
252
|
"type": "object",
|
|
246
253
|
"properties": {
|
|
@@ -289,6 +296,116 @@ class MCPVectorSearchServer:
|
|
|
289
296
|
"required": [],
|
|
290
297
|
},
|
|
291
298
|
),
|
|
299
|
+
Tool(
|
|
300
|
+
name="analyze_project",
|
|
301
|
+
description="Returns project-wide metrics summary",
|
|
302
|
+
inputSchema={
|
|
303
|
+
"type": "object",
|
|
304
|
+
"properties": {
|
|
305
|
+
"threshold_preset": {
|
|
306
|
+
"type": "string",
|
|
307
|
+
"description": "Threshold preset: 'strict', 'standard', or 'relaxed'",
|
|
308
|
+
"enum": ["strict", "standard", "relaxed"],
|
|
309
|
+
"default": "standard",
|
|
310
|
+
},
|
|
311
|
+
"output_format": {
|
|
312
|
+
"type": "string",
|
|
313
|
+
"description": "Output format: 'summary' or 'detailed'",
|
|
314
|
+
"enum": ["summary", "detailed"],
|
|
315
|
+
"default": "summary",
|
|
316
|
+
},
|
|
317
|
+
},
|
|
318
|
+
"required": [],
|
|
319
|
+
},
|
|
320
|
+
),
|
|
321
|
+
Tool(
|
|
322
|
+
name="analyze_file",
|
|
323
|
+
description="Returns file-level metrics",
|
|
324
|
+
inputSchema={
|
|
325
|
+
"type": "object",
|
|
326
|
+
"properties": {
|
|
327
|
+
"file_path": {
|
|
328
|
+
"type": "string",
|
|
329
|
+
"description": "Path to the file to analyze (relative or absolute)",
|
|
330
|
+
},
|
|
331
|
+
},
|
|
332
|
+
"required": ["file_path"],
|
|
333
|
+
},
|
|
334
|
+
),
|
|
335
|
+
Tool(
|
|
336
|
+
name="find_smells",
|
|
337
|
+
description="Identify code quality issues, anti-patterns, bad practices, and technical debt. Detects Long Methods, Deep Nesting, Long Parameter Lists, God Classes, and Complex Methods. Use when assessing code quality, finding refactoring opportunities, or identifying maintainability issues.",
|
|
338
|
+
inputSchema={
|
|
339
|
+
"type": "object",
|
|
340
|
+
"properties": {
|
|
341
|
+
"smell_type": {
|
|
342
|
+
"type": "string",
|
|
343
|
+
"description": "Filter by smell type: 'Long Method', 'Deep Nesting', 'Long Parameter List', 'God Class', 'Complex Method'",
|
|
344
|
+
"enum": [
|
|
345
|
+
"Long Method",
|
|
346
|
+
"Deep Nesting",
|
|
347
|
+
"Long Parameter List",
|
|
348
|
+
"God Class",
|
|
349
|
+
"Complex Method",
|
|
350
|
+
],
|
|
351
|
+
},
|
|
352
|
+
"severity": {
|
|
353
|
+
"type": "string",
|
|
354
|
+
"description": "Filter by severity level",
|
|
355
|
+
"enum": ["info", "warning", "error"],
|
|
356
|
+
},
|
|
357
|
+
},
|
|
358
|
+
"required": [],
|
|
359
|
+
},
|
|
360
|
+
),
|
|
361
|
+
Tool(
|
|
362
|
+
name="get_complexity_hotspots",
|
|
363
|
+
description="Returns top N most complex functions",
|
|
364
|
+
inputSchema={
|
|
365
|
+
"type": "object",
|
|
366
|
+
"properties": {
|
|
367
|
+
"limit": {
|
|
368
|
+
"type": "integer",
|
|
369
|
+
"description": "Maximum number of hotspots to return",
|
|
370
|
+
"default": 10,
|
|
371
|
+
"minimum": 1,
|
|
372
|
+
"maximum": 50,
|
|
373
|
+
},
|
|
374
|
+
},
|
|
375
|
+
"required": [],
|
|
376
|
+
},
|
|
377
|
+
),
|
|
378
|
+
Tool(
|
|
379
|
+
name="check_circular_dependencies",
|
|
380
|
+
description="Returns circular dependency cycles",
|
|
381
|
+
inputSchema={"type": "object", "properties": {}, "required": []},
|
|
382
|
+
),
|
|
383
|
+
Tool(
|
|
384
|
+
name="interpret_analysis",
|
|
385
|
+
description="Interpret analysis results with natural language explanations and recommendations",
|
|
386
|
+
inputSchema={
|
|
387
|
+
"type": "object",
|
|
388
|
+
"properties": {
|
|
389
|
+
"analysis_json": {
|
|
390
|
+
"type": "string",
|
|
391
|
+
"description": "JSON string from analyze command with --include-context",
|
|
392
|
+
},
|
|
393
|
+
"focus": {
|
|
394
|
+
"type": "string",
|
|
395
|
+
"description": "Focus area: 'summary', 'recommendations', or 'priorities'",
|
|
396
|
+
"enum": ["summary", "recommendations", "priorities"],
|
|
397
|
+
"default": "summary",
|
|
398
|
+
},
|
|
399
|
+
"verbosity": {
|
|
400
|
+
"type": "string",
|
|
401
|
+
"description": "Verbosity level: 'brief', 'normal', or 'detailed'",
|
|
402
|
+
"enum": ["brief", "normal", "detailed"],
|
|
403
|
+
"default": "normal",
|
|
404
|
+
},
|
|
405
|
+
},
|
|
406
|
+
"required": ["analysis_json"],
|
|
407
|
+
},
|
|
408
|
+
),
|
|
292
409
|
]
|
|
293
410
|
|
|
294
411
|
return tools
|
|
@@ -299,7 +416,8 @@ class MCPVectorSearchServer:
|
|
|
299
416
|
|
|
300
417
|
async def call_tool(self, request: CallToolRequest) -> CallToolResult:
|
|
301
418
|
"""Handle tool calls."""
|
|
302
|
-
|
|
419
|
+
# Skip initialization for interpret_analysis (doesn't need project config)
|
|
420
|
+
if request.params.name != "interpret_analysis" and not self._initialized:
|
|
303
421
|
await self.initialize()
|
|
304
422
|
|
|
305
423
|
try:
|
|
@@ -313,6 +431,18 @@ class MCPVectorSearchServer:
|
|
|
313
431
|
return await self._get_project_status(request.params.arguments)
|
|
314
432
|
elif request.params.name == "index_project":
|
|
315
433
|
return await self._index_project(request.params.arguments)
|
|
434
|
+
elif request.params.name == "analyze_project":
|
|
435
|
+
return await self._analyze_project(request.params.arguments)
|
|
436
|
+
elif request.params.name == "analyze_file":
|
|
437
|
+
return await self._analyze_file(request.params.arguments)
|
|
438
|
+
elif request.params.name == "find_smells":
|
|
439
|
+
return await self._find_smells(request.params.arguments)
|
|
440
|
+
elif request.params.name == "get_complexity_hotspots":
|
|
441
|
+
return await self._get_complexity_hotspots(request.params.arguments)
|
|
442
|
+
elif request.params.name == "check_circular_dependencies":
|
|
443
|
+
return await self._check_circular_dependencies(request.params.arguments)
|
|
444
|
+
elif request.params.name == "interpret_analysis":
|
|
445
|
+
return await self._interpret_analysis(request.params.arguments)
|
|
316
446
|
else:
|
|
317
447
|
return CallToolResult(
|
|
318
448
|
content=[
|
|
@@ -661,6 +791,667 @@ class MCPVectorSearchServer:
|
|
|
661
791
|
isError=True,
|
|
662
792
|
)
|
|
663
793
|
|
|
794
|
+
async def _analyze_project(self, args: dict[str, Any]) -> CallToolResult:
|
|
795
|
+
"""Handle analyze_project tool call."""
|
|
796
|
+
threshold_preset = args.get("threshold_preset", "standard")
|
|
797
|
+
output_format = args.get("output_format", "summary")
|
|
798
|
+
|
|
799
|
+
try:
|
|
800
|
+
# Load threshold configuration based on preset
|
|
801
|
+
threshold_config = self._get_threshold_config(threshold_preset)
|
|
802
|
+
|
|
803
|
+
# Run analysis using CLI analyze logic
|
|
804
|
+
from ..cli.commands.analyze import _analyze_file, _find_analyzable_files
|
|
805
|
+
|
|
806
|
+
parser_registry = ParserRegistry()
|
|
807
|
+
files_to_analyze = _find_analyzable_files(
|
|
808
|
+
self.project_root, None, None, parser_registry, None
|
|
809
|
+
)
|
|
810
|
+
|
|
811
|
+
if not files_to_analyze:
|
|
812
|
+
return CallToolResult(
|
|
813
|
+
content=[
|
|
814
|
+
TextContent(
|
|
815
|
+
type="text",
|
|
816
|
+
text="No analyzable files found in project",
|
|
817
|
+
)
|
|
818
|
+
],
|
|
819
|
+
isError=True,
|
|
820
|
+
)
|
|
821
|
+
|
|
822
|
+
# Analyze files
|
|
823
|
+
from ..analysis import (
|
|
824
|
+
CognitiveComplexityCollector,
|
|
825
|
+
CyclomaticComplexityCollector,
|
|
826
|
+
MethodCountCollector,
|
|
827
|
+
NestingDepthCollector,
|
|
828
|
+
ParameterCountCollector,
|
|
829
|
+
)
|
|
830
|
+
|
|
831
|
+
collectors = [
|
|
832
|
+
CognitiveComplexityCollector(),
|
|
833
|
+
CyclomaticComplexityCollector(),
|
|
834
|
+
NestingDepthCollector(),
|
|
835
|
+
ParameterCountCollector(),
|
|
836
|
+
MethodCountCollector(),
|
|
837
|
+
]
|
|
838
|
+
|
|
839
|
+
project_metrics = ProjectMetrics(project_root=str(self.project_root))
|
|
840
|
+
|
|
841
|
+
for file_path in files_to_analyze:
|
|
842
|
+
try:
|
|
843
|
+
file_metrics = await _analyze_file(
|
|
844
|
+
file_path, parser_registry, collectors
|
|
845
|
+
)
|
|
846
|
+
if file_metrics and file_metrics.chunks:
|
|
847
|
+
project_metrics.files[str(file_path)] = file_metrics
|
|
848
|
+
except Exception as e:
|
|
849
|
+
logger.debug(f"Failed to analyze {file_path}: {e}")
|
|
850
|
+
continue
|
|
851
|
+
|
|
852
|
+
project_metrics.compute_aggregates()
|
|
853
|
+
|
|
854
|
+
# Detect code smells
|
|
855
|
+
smell_detector = SmellDetector(thresholds=threshold_config)
|
|
856
|
+
all_smells = []
|
|
857
|
+
for file_path, file_metrics in project_metrics.files.items():
|
|
858
|
+
file_smells = smell_detector.detect_all(file_metrics, file_path)
|
|
859
|
+
all_smells.extend(file_smells)
|
|
860
|
+
|
|
861
|
+
# Format response
|
|
862
|
+
if output_format == "detailed":
|
|
863
|
+
# Return full JSON output
|
|
864
|
+
import json
|
|
865
|
+
|
|
866
|
+
output = project_metrics.to_summary()
|
|
867
|
+
output["smells"] = {
|
|
868
|
+
"total": len(all_smells),
|
|
869
|
+
"by_severity": {
|
|
870
|
+
"error": sum(
|
|
871
|
+
1 for s in all_smells if s.severity == SmellSeverity.ERROR
|
|
872
|
+
),
|
|
873
|
+
"warning": sum(
|
|
874
|
+
1 for s in all_smells if s.severity == SmellSeverity.WARNING
|
|
875
|
+
),
|
|
876
|
+
"info": sum(
|
|
877
|
+
1 for s in all_smells if s.severity == SmellSeverity.INFO
|
|
878
|
+
),
|
|
879
|
+
},
|
|
880
|
+
}
|
|
881
|
+
response_text = json.dumps(output, indent=2)
|
|
882
|
+
else:
|
|
883
|
+
# Return summary
|
|
884
|
+
summary = project_metrics.to_summary()
|
|
885
|
+
response_lines = [
|
|
886
|
+
"# Project Analysis Summary\n",
|
|
887
|
+
f"**Project Root:** {summary['project_root']}",
|
|
888
|
+
f"**Total Files:** {summary['total_files']}",
|
|
889
|
+
f"**Total Functions:** {summary['total_functions']}",
|
|
890
|
+
f"**Total Classes:** {summary['total_classes']}",
|
|
891
|
+
f"**Average File Complexity:** {summary['avg_file_complexity']}\n",
|
|
892
|
+
"## Complexity Distribution",
|
|
893
|
+
]
|
|
894
|
+
|
|
895
|
+
dist = summary["complexity_distribution"]
|
|
896
|
+
for grade in ["A", "B", "C", "D", "F"]:
|
|
897
|
+
response_lines.append(f"- Grade {grade}: {dist[grade]} chunks")
|
|
898
|
+
|
|
899
|
+
response_lines.extend(
|
|
900
|
+
[
|
|
901
|
+
"\n## Health Metrics",
|
|
902
|
+
f"- Average Health Score: {summary['health_metrics']['avg_health_score']:.2f}",
|
|
903
|
+
f"- Files Needing Attention: {summary['health_metrics']['files_needing_attention']}",
|
|
904
|
+
"\n## Code Smells",
|
|
905
|
+
f"- Total: {len(all_smells)}",
|
|
906
|
+
f"- Errors: {sum(1 for s in all_smells if s.severity == SmellSeverity.ERROR)}",
|
|
907
|
+
f"- Warnings: {sum(1 for s in all_smells if s.severity == SmellSeverity.WARNING)}",
|
|
908
|
+
f"- Info: {sum(1 for s in all_smells if s.severity == SmellSeverity.INFO)}",
|
|
909
|
+
]
|
|
910
|
+
)
|
|
911
|
+
|
|
912
|
+
response_text = "\n".join(response_lines)
|
|
913
|
+
|
|
914
|
+
return CallToolResult(
|
|
915
|
+
content=[TextContent(type="text", text=response_text)]
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
except Exception as e:
|
|
919
|
+
logger.error(f"Project analysis failed: {e}")
|
|
920
|
+
return CallToolResult(
|
|
921
|
+
content=[
|
|
922
|
+
TextContent(type="text", text=f"Project analysis failed: {str(e)}")
|
|
923
|
+
],
|
|
924
|
+
isError=True,
|
|
925
|
+
)
|
|
926
|
+
|
|
927
|
+
async def _analyze_file(self, args: dict[str, Any]) -> CallToolResult:
|
|
928
|
+
"""Handle analyze_file tool call."""
|
|
929
|
+
file_path_str = args.get("file_path", "")
|
|
930
|
+
|
|
931
|
+
if not file_path_str:
|
|
932
|
+
return CallToolResult(
|
|
933
|
+
content=[
|
|
934
|
+
TextContent(type="text", text="file_path parameter is required")
|
|
935
|
+
],
|
|
936
|
+
isError=True,
|
|
937
|
+
)
|
|
938
|
+
|
|
939
|
+
try:
|
|
940
|
+
file_path = Path(file_path_str)
|
|
941
|
+
if not file_path.is_absolute():
|
|
942
|
+
file_path = self.project_root / file_path
|
|
943
|
+
|
|
944
|
+
if not file_path.exists():
|
|
945
|
+
return CallToolResult(
|
|
946
|
+
content=[
|
|
947
|
+
TextContent(
|
|
948
|
+
type="text", text=f"File not found: {file_path_str}"
|
|
949
|
+
)
|
|
950
|
+
],
|
|
951
|
+
isError=True,
|
|
952
|
+
)
|
|
953
|
+
|
|
954
|
+
# Analyze single file
|
|
955
|
+
from ..analysis import (
|
|
956
|
+
CognitiveComplexityCollector,
|
|
957
|
+
CyclomaticComplexityCollector,
|
|
958
|
+
MethodCountCollector,
|
|
959
|
+
NestingDepthCollector,
|
|
960
|
+
ParameterCountCollector,
|
|
961
|
+
)
|
|
962
|
+
from ..cli.commands.analyze import _analyze_file
|
|
963
|
+
|
|
964
|
+
parser_registry = ParserRegistry()
|
|
965
|
+
collectors = [
|
|
966
|
+
CognitiveComplexityCollector(),
|
|
967
|
+
CyclomaticComplexityCollector(),
|
|
968
|
+
NestingDepthCollector(),
|
|
969
|
+
ParameterCountCollector(),
|
|
970
|
+
MethodCountCollector(),
|
|
971
|
+
]
|
|
972
|
+
|
|
973
|
+
file_metrics = await _analyze_file(file_path, parser_registry, collectors)
|
|
974
|
+
|
|
975
|
+
if not file_metrics:
|
|
976
|
+
return CallToolResult(
|
|
977
|
+
content=[
|
|
978
|
+
TextContent(
|
|
979
|
+
type="text",
|
|
980
|
+
text=f"Unable to analyze file: {file_path_str}",
|
|
981
|
+
)
|
|
982
|
+
],
|
|
983
|
+
isError=True,
|
|
984
|
+
)
|
|
985
|
+
|
|
986
|
+
# Detect smells
|
|
987
|
+
smell_detector = SmellDetector()
|
|
988
|
+
smells = smell_detector.detect_all(file_metrics, str(file_path))
|
|
989
|
+
|
|
990
|
+
# Format response
|
|
991
|
+
response_lines = [
|
|
992
|
+
f"# File Analysis: {file_path.name}\n",
|
|
993
|
+
f"**Path:** {file_path}",
|
|
994
|
+
f"**Total Lines:** {file_metrics.total_lines}",
|
|
995
|
+
f"**Code Lines:** {file_metrics.code_lines}",
|
|
996
|
+
f"**Comment Lines:** {file_metrics.comment_lines}",
|
|
997
|
+
f"**Functions:** {file_metrics.function_count}",
|
|
998
|
+
f"**Classes:** {file_metrics.class_count}",
|
|
999
|
+
f"**Methods:** {file_metrics.method_count}\n",
|
|
1000
|
+
"## Complexity Metrics",
|
|
1001
|
+
f"- Total Complexity: {file_metrics.total_complexity}",
|
|
1002
|
+
f"- Average Complexity: {file_metrics.avg_complexity:.2f}",
|
|
1003
|
+
f"- Max Complexity: {file_metrics.max_complexity}",
|
|
1004
|
+
f"- Health Score: {file_metrics.health_score:.2f}\n",
|
|
1005
|
+
]
|
|
1006
|
+
|
|
1007
|
+
if smells:
|
|
1008
|
+
response_lines.append(f"## Code Smells ({len(smells)})\n")
|
|
1009
|
+
for smell in smells[:10]: # Show top 10
|
|
1010
|
+
response_lines.append(
|
|
1011
|
+
f"- [{smell.severity.value.upper()}] {smell.name}: {smell.description}"
|
|
1012
|
+
)
|
|
1013
|
+
if len(smells) > 10:
|
|
1014
|
+
response_lines.append(f"\n... and {len(smells) - 10} more")
|
|
1015
|
+
else:
|
|
1016
|
+
response_lines.append("## Code Smells\n- None detected")
|
|
1017
|
+
|
|
1018
|
+
response_text = "\n".join(response_lines)
|
|
1019
|
+
|
|
1020
|
+
return CallToolResult(
|
|
1021
|
+
content=[TextContent(type="text", text=response_text)]
|
|
1022
|
+
)
|
|
1023
|
+
|
|
1024
|
+
except Exception as e:
|
|
1025
|
+
logger.error(f"File analysis failed: {e}")
|
|
1026
|
+
return CallToolResult(
|
|
1027
|
+
content=[
|
|
1028
|
+
TextContent(type="text", text=f"File analysis failed: {str(e)}")
|
|
1029
|
+
],
|
|
1030
|
+
isError=True,
|
|
1031
|
+
)
|
|
1032
|
+
|
|
1033
|
+
async def _find_smells(self, args: dict[str, Any]) -> CallToolResult:
|
|
1034
|
+
"""Handle find_smells tool call."""
|
|
1035
|
+
smell_type_filter = args.get("smell_type")
|
|
1036
|
+
severity_filter = args.get("severity")
|
|
1037
|
+
|
|
1038
|
+
try:
|
|
1039
|
+
# Run full project analysis
|
|
1040
|
+
from ..analysis import (
|
|
1041
|
+
CognitiveComplexityCollector,
|
|
1042
|
+
CyclomaticComplexityCollector,
|
|
1043
|
+
MethodCountCollector,
|
|
1044
|
+
NestingDepthCollector,
|
|
1045
|
+
ParameterCountCollector,
|
|
1046
|
+
)
|
|
1047
|
+
from ..cli.commands.analyze import _analyze_file, _find_analyzable_files
|
|
1048
|
+
|
|
1049
|
+
parser_registry = ParserRegistry()
|
|
1050
|
+
files_to_analyze = _find_analyzable_files(
|
|
1051
|
+
self.project_root, None, None, parser_registry, None
|
|
1052
|
+
)
|
|
1053
|
+
|
|
1054
|
+
collectors = [
|
|
1055
|
+
CognitiveComplexityCollector(),
|
|
1056
|
+
CyclomaticComplexityCollector(),
|
|
1057
|
+
NestingDepthCollector(),
|
|
1058
|
+
ParameterCountCollector(),
|
|
1059
|
+
MethodCountCollector(),
|
|
1060
|
+
]
|
|
1061
|
+
|
|
1062
|
+
project_metrics = ProjectMetrics(project_root=str(self.project_root))
|
|
1063
|
+
|
|
1064
|
+
for file_path in files_to_analyze:
|
|
1065
|
+
try:
|
|
1066
|
+
file_metrics = await _analyze_file(
|
|
1067
|
+
file_path, parser_registry, collectors
|
|
1068
|
+
)
|
|
1069
|
+
if file_metrics and file_metrics.chunks:
|
|
1070
|
+
project_metrics.files[str(file_path)] = file_metrics
|
|
1071
|
+
except Exception: # nosec B112 - intentional skip of unparseable files
|
|
1072
|
+
continue
|
|
1073
|
+
|
|
1074
|
+
# Detect all smells
|
|
1075
|
+
smell_detector = SmellDetector()
|
|
1076
|
+
all_smells = []
|
|
1077
|
+
for file_path, file_metrics in project_metrics.files.items():
|
|
1078
|
+
file_smells = smell_detector.detect_all(file_metrics, file_path)
|
|
1079
|
+
all_smells.extend(file_smells)
|
|
1080
|
+
|
|
1081
|
+
# Apply filters
|
|
1082
|
+
filtered_smells = all_smells
|
|
1083
|
+
|
|
1084
|
+
if smell_type_filter:
|
|
1085
|
+
filtered_smells = [
|
|
1086
|
+
s for s in filtered_smells if s.name == smell_type_filter
|
|
1087
|
+
]
|
|
1088
|
+
|
|
1089
|
+
if severity_filter:
|
|
1090
|
+
severity_enum = SmellSeverity(severity_filter)
|
|
1091
|
+
filtered_smells = [
|
|
1092
|
+
s for s in filtered_smells if s.severity == severity_enum
|
|
1093
|
+
]
|
|
1094
|
+
|
|
1095
|
+
# Format response
|
|
1096
|
+
if not filtered_smells:
|
|
1097
|
+
filter_desc = []
|
|
1098
|
+
if smell_type_filter:
|
|
1099
|
+
filter_desc.append(f"type={smell_type_filter}")
|
|
1100
|
+
if severity_filter:
|
|
1101
|
+
filter_desc.append(f"severity={severity_filter}")
|
|
1102
|
+
filter_str = f" ({', '.join(filter_desc)})" if filter_desc else ""
|
|
1103
|
+
response_text = f"No code smells found{filter_str}"
|
|
1104
|
+
else:
|
|
1105
|
+
response_lines = [f"# Code Smells Found: {len(filtered_smells)}\n"]
|
|
1106
|
+
|
|
1107
|
+
# Group by severity
|
|
1108
|
+
by_severity = {
|
|
1109
|
+
"error": [
|
|
1110
|
+
s for s in filtered_smells if s.severity == SmellSeverity.ERROR
|
|
1111
|
+
],
|
|
1112
|
+
"warning": [
|
|
1113
|
+
s
|
|
1114
|
+
for s in filtered_smells
|
|
1115
|
+
if s.severity == SmellSeverity.WARNING
|
|
1116
|
+
],
|
|
1117
|
+
"info": [
|
|
1118
|
+
s for s in filtered_smells if s.severity == SmellSeverity.INFO
|
|
1119
|
+
],
|
|
1120
|
+
}
|
|
1121
|
+
|
|
1122
|
+
for severity_level in ["error", "warning", "info"]:
|
|
1123
|
+
smells = by_severity[severity_level]
|
|
1124
|
+
if smells:
|
|
1125
|
+
response_lines.append(
|
|
1126
|
+
f"## {severity_level.upper()} ({len(smells)})\n"
|
|
1127
|
+
)
|
|
1128
|
+
for smell in smells[:20]: # Show top 20 per severity
|
|
1129
|
+
response_lines.append(
|
|
1130
|
+
f"- **{smell.name}** at `{smell.location}`"
|
|
1131
|
+
)
|
|
1132
|
+
response_lines.append(f" {smell.description}")
|
|
1133
|
+
if smell.suggestion:
|
|
1134
|
+
response_lines.append(
|
|
1135
|
+
f" *Suggestion: {smell.suggestion}*"
|
|
1136
|
+
)
|
|
1137
|
+
response_lines.append("")
|
|
1138
|
+
|
|
1139
|
+
response_text = "\n".join(response_lines)
|
|
1140
|
+
|
|
1141
|
+
return CallToolResult(
|
|
1142
|
+
content=[TextContent(type="text", text=response_text)]
|
|
1143
|
+
)
|
|
1144
|
+
|
|
1145
|
+
except Exception as e:
|
|
1146
|
+
logger.error(f"Smell detection failed: {e}")
|
|
1147
|
+
return CallToolResult(
|
|
1148
|
+
content=[
|
|
1149
|
+
TextContent(type="text", text=f"Smell detection failed: {str(e)}")
|
|
1150
|
+
],
|
|
1151
|
+
isError=True,
|
|
1152
|
+
)
|
|
1153
|
+
|
|
1154
|
+
async def _get_complexity_hotspots(self, args: dict[str, Any]) -> CallToolResult:
|
|
1155
|
+
"""Handle get_complexity_hotspots tool call."""
|
|
1156
|
+
limit = args.get("limit", 10)
|
|
1157
|
+
|
|
1158
|
+
try:
|
|
1159
|
+
# Run full project analysis
|
|
1160
|
+
from ..analysis import (
|
|
1161
|
+
CognitiveComplexityCollector,
|
|
1162
|
+
CyclomaticComplexityCollector,
|
|
1163
|
+
MethodCountCollector,
|
|
1164
|
+
NestingDepthCollector,
|
|
1165
|
+
ParameterCountCollector,
|
|
1166
|
+
)
|
|
1167
|
+
from ..cli.commands.analyze import _analyze_file, _find_analyzable_files
|
|
1168
|
+
|
|
1169
|
+
parser_registry = ParserRegistry()
|
|
1170
|
+
files_to_analyze = _find_analyzable_files(
|
|
1171
|
+
self.project_root, None, None, parser_registry, None
|
|
1172
|
+
)
|
|
1173
|
+
|
|
1174
|
+
collectors = [
|
|
1175
|
+
CognitiveComplexityCollector(),
|
|
1176
|
+
CyclomaticComplexityCollector(),
|
|
1177
|
+
NestingDepthCollector(),
|
|
1178
|
+
ParameterCountCollector(),
|
|
1179
|
+
MethodCountCollector(),
|
|
1180
|
+
]
|
|
1181
|
+
|
|
1182
|
+
project_metrics = ProjectMetrics(project_root=str(self.project_root))
|
|
1183
|
+
|
|
1184
|
+
for file_path in files_to_analyze:
|
|
1185
|
+
try:
|
|
1186
|
+
file_metrics = await _analyze_file(
|
|
1187
|
+
file_path, parser_registry, collectors
|
|
1188
|
+
)
|
|
1189
|
+
if file_metrics and file_metrics.chunks:
|
|
1190
|
+
project_metrics.files[str(file_path)] = file_metrics
|
|
1191
|
+
except Exception: # nosec B112 - intentional skip of unparseable files
|
|
1192
|
+
continue
|
|
1193
|
+
|
|
1194
|
+
# Get top N complex files
|
|
1195
|
+
hotspots = project_metrics.get_hotspots(limit=limit)
|
|
1196
|
+
|
|
1197
|
+
# Format response
|
|
1198
|
+
if not hotspots:
|
|
1199
|
+
response_text = "No complexity hotspots found"
|
|
1200
|
+
else:
|
|
1201
|
+
response_lines = [f"# Top {len(hotspots)} Complexity Hotspots\n"]
|
|
1202
|
+
|
|
1203
|
+
for i, file_metrics in enumerate(hotspots, 1):
|
|
1204
|
+
response_lines.extend(
|
|
1205
|
+
[
|
|
1206
|
+
f"## {i}. {Path(file_metrics.file_path).name}",
|
|
1207
|
+
f"**Path:** `{file_metrics.file_path}`",
|
|
1208
|
+
f"**Average Complexity:** {file_metrics.avg_complexity:.2f}",
|
|
1209
|
+
f"**Max Complexity:** {file_metrics.max_complexity}",
|
|
1210
|
+
f"**Total Complexity:** {file_metrics.total_complexity}",
|
|
1211
|
+
f"**Functions:** {file_metrics.function_count}",
|
|
1212
|
+
f"**Health Score:** {file_metrics.health_score:.2f}\n",
|
|
1213
|
+
]
|
|
1214
|
+
)
|
|
1215
|
+
|
|
1216
|
+
response_text = "\n".join(response_lines)
|
|
1217
|
+
|
|
1218
|
+
return CallToolResult(
|
|
1219
|
+
content=[TextContent(type="text", text=response_text)]
|
|
1220
|
+
)
|
|
1221
|
+
|
|
1222
|
+
except Exception as e:
|
|
1223
|
+
logger.error(f"Hotspot detection failed: {e}")
|
|
1224
|
+
return CallToolResult(
|
|
1225
|
+
content=[
|
|
1226
|
+
TextContent(type="text", text=f"Hotspot detection failed: {str(e)}")
|
|
1227
|
+
],
|
|
1228
|
+
isError=True,
|
|
1229
|
+
)
|
|
1230
|
+
|
|
1231
|
+
async def _check_circular_dependencies(
|
|
1232
|
+
self, args: dict[str, Any]
|
|
1233
|
+
) -> CallToolResult:
|
|
1234
|
+
"""Handle check_circular_dependencies tool call."""
|
|
1235
|
+
try:
|
|
1236
|
+
# Find analyzable files to build import graph
|
|
1237
|
+
from ..cli.commands.analyze import _find_analyzable_files
|
|
1238
|
+
|
|
1239
|
+
parser_registry = ParserRegistry()
|
|
1240
|
+
files_to_analyze = _find_analyzable_files(
|
|
1241
|
+
self.project_root, None, None, parser_registry, None
|
|
1242
|
+
)
|
|
1243
|
+
|
|
1244
|
+
if not files_to_analyze:
|
|
1245
|
+
return CallToolResult(
|
|
1246
|
+
content=[
|
|
1247
|
+
TextContent(
|
|
1248
|
+
type="text",
|
|
1249
|
+
text="No analyzable files found in project",
|
|
1250
|
+
)
|
|
1251
|
+
],
|
|
1252
|
+
isError=True,
|
|
1253
|
+
)
|
|
1254
|
+
|
|
1255
|
+
# Import circular dependency detection
|
|
1256
|
+
from ..analysis.collectors.coupling import build_import_graph
|
|
1257
|
+
|
|
1258
|
+
# Build import graph for the project (reverse dependency graph)
|
|
1259
|
+
import_graph = build_import_graph(
|
|
1260
|
+
self.project_root, files_to_analyze, language="python"
|
|
1261
|
+
)
|
|
1262
|
+
|
|
1263
|
+
# Convert to forward dependency graph for cycle detection
|
|
1264
|
+
# import_graph maps: module -> set of files that import it (reverse)
|
|
1265
|
+
# We need: file -> list of files it imports (forward)
|
|
1266
|
+
forward_graph: dict[str, list[str]] = {}
|
|
1267
|
+
|
|
1268
|
+
# Build forward graph by reading imports from files
|
|
1269
|
+
for file_path in files_to_analyze:
|
|
1270
|
+
file_str = str(file_path.relative_to(self.project_root))
|
|
1271
|
+
if file_str not in forward_graph:
|
|
1272
|
+
forward_graph[file_str] = []
|
|
1273
|
+
|
|
1274
|
+
# For each module in import_graph, if this file imports it, add edge
|
|
1275
|
+
for module, importers in import_graph.items():
|
|
1276
|
+
for importer in importers:
|
|
1277
|
+
importer_str = str(
|
|
1278
|
+
Path(importer).relative_to(self.project_root)
|
|
1279
|
+
if Path(importer).is_absolute()
|
|
1280
|
+
else importer
|
|
1281
|
+
)
|
|
1282
|
+
if importer_str == file_str:
|
|
1283
|
+
# This file imports the module, add forward edge
|
|
1284
|
+
if module not in forward_graph[file_str]:
|
|
1285
|
+
forward_graph[file_str].append(module)
|
|
1286
|
+
|
|
1287
|
+
# Detect circular dependencies using DFS
|
|
1288
|
+
def find_cycles(graph: dict[str, list[str]]) -> list[list[str]]:
|
|
1289
|
+
"""Find all cycles in the import graph using DFS."""
|
|
1290
|
+
cycles = []
|
|
1291
|
+
visited = set()
|
|
1292
|
+
rec_stack = set()
|
|
1293
|
+
|
|
1294
|
+
def dfs(node: str, path: list[str]) -> None:
|
|
1295
|
+
visited.add(node)
|
|
1296
|
+
rec_stack.add(node)
|
|
1297
|
+
path.append(node)
|
|
1298
|
+
|
|
1299
|
+
for neighbor in graph.get(node, []):
|
|
1300
|
+
if neighbor not in visited:
|
|
1301
|
+
dfs(neighbor, path.copy())
|
|
1302
|
+
elif neighbor in rec_stack:
|
|
1303
|
+
# Found a cycle
|
|
1304
|
+
try:
|
|
1305
|
+
cycle_start = path.index(neighbor)
|
|
1306
|
+
cycle = path[cycle_start:] + [neighbor]
|
|
1307
|
+
# Normalize cycle representation to avoid duplicates
|
|
1308
|
+
cycle_tuple = tuple(sorted(cycle))
|
|
1309
|
+
if not any(
|
|
1310
|
+
tuple(sorted(c)) == cycle_tuple for c in cycles
|
|
1311
|
+
):
|
|
1312
|
+
cycles.append(cycle)
|
|
1313
|
+
except ValueError:
|
|
1314
|
+
pass
|
|
1315
|
+
|
|
1316
|
+
rec_stack.remove(node)
|
|
1317
|
+
|
|
1318
|
+
for node in graph:
|
|
1319
|
+
if node not in visited:
|
|
1320
|
+
dfs(node, [])
|
|
1321
|
+
|
|
1322
|
+
return cycles
|
|
1323
|
+
|
|
1324
|
+
cycles = find_cycles(forward_graph)
|
|
1325
|
+
|
|
1326
|
+
# Format response
|
|
1327
|
+
if not cycles:
|
|
1328
|
+
response_text = "No circular dependencies detected"
|
|
1329
|
+
else:
|
|
1330
|
+
response_lines = [f"# Circular Dependencies Found: {len(cycles)}\n"]
|
|
1331
|
+
|
|
1332
|
+
for i, cycle in enumerate(cycles, 1):
|
|
1333
|
+
response_lines.append(f"## Cycle {i}")
|
|
1334
|
+
response_lines.append("```")
|
|
1335
|
+
for j, node in enumerate(cycle):
|
|
1336
|
+
if j < len(cycle) - 1:
|
|
1337
|
+
response_lines.append(f"{node}")
|
|
1338
|
+
response_lines.append(" ↓")
|
|
1339
|
+
else:
|
|
1340
|
+
response_lines.append(f"{node} (back to {cycle[0]})")
|
|
1341
|
+
response_lines.append("```\n")
|
|
1342
|
+
|
|
1343
|
+
response_text = "\n".join(response_lines)
|
|
1344
|
+
|
|
1345
|
+
return CallToolResult(
|
|
1346
|
+
content=[TextContent(type="text", text=response_text)]
|
|
1347
|
+
)
|
|
1348
|
+
|
|
1349
|
+
except Exception as e:
|
|
1350
|
+
logger.error(f"Circular dependency check failed: {e}")
|
|
1351
|
+
return CallToolResult(
|
|
1352
|
+
content=[
|
|
1353
|
+
TextContent(
|
|
1354
|
+
type="text",
|
|
1355
|
+
text=f"Circular dependency check failed: {str(e)}",
|
|
1356
|
+
)
|
|
1357
|
+
],
|
|
1358
|
+
isError=True,
|
|
1359
|
+
)
|
|
1360
|
+
|
|
1361
|
+
async def _interpret_analysis(self, args: dict[str, Any]) -> CallToolResult:
|
|
1362
|
+
"""Handle interpret_analysis tool call."""
|
|
1363
|
+
analysis_json_str = args.get("analysis_json", "")
|
|
1364
|
+
focus = args.get("focus", "summary")
|
|
1365
|
+
verbosity = args.get("verbosity", "normal")
|
|
1366
|
+
|
|
1367
|
+
if not analysis_json_str:
|
|
1368
|
+
return CallToolResult(
|
|
1369
|
+
content=[
|
|
1370
|
+
TextContent(type="text", text="analysis_json parameter is required")
|
|
1371
|
+
],
|
|
1372
|
+
isError=True,
|
|
1373
|
+
)
|
|
1374
|
+
|
|
1375
|
+
try:
|
|
1376
|
+
import json
|
|
1377
|
+
|
|
1378
|
+
from ..analysis.interpretation import AnalysisInterpreter, LLMContextExport
|
|
1379
|
+
|
|
1380
|
+
# Parse JSON input
|
|
1381
|
+
analysis_data = json.loads(analysis_json_str)
|
|
1382
|
+
|
|
1383
|
+
# Convert to LLMContextExport
|
|
1384
|
+
export = LLMContextExport(**analysis_data)
|
|
1385
|
+
|
|
1386
|
+
# Create interpreter and generate interpretation
|
|
1387
|
+
interpreter = AnalysisInterpreter()
|
|
1388
|
+
interpretation = interpreter.interpret(
|
|
1389
|
+
export, focus=focus, verbosity=verbosity
|
|
1390
|
+
)
|
|
1391
|
+
|
|
1392
|
+
return CallToolResult(
|
|
1393
|
+
content=[TextContent(type="text", text=interpretation)]
|
|
1394
|
+
)
|
|
1395
|
+
|
|
1396
|
+
except json.JSONDecodeError as e:
|
|
1397
|
+
return CallToolResult(
|
|
1398
|
+
content=[
|
|
1399
|
+
TextContent(
|
|
1400
|
+
type="text",
|
|
1401
|
+
text=f"Invalid JSON input: {str(e)}",
|
|
1402
|
+
)
|
|
1403
|
+
],
|
|
1404
|
+
isError=True,
|
|
1405
|
+
)
|
|
1406
|
+
except Exception as e:
|
|
1407
|
+
logger.error(f"Analysis interpretation failed: {e}")
|
|
1408
|
+
return CallToolResult(
|
|
1409
|
+
content=[
|
|
1410
|
+
TextContent(
|
|
1411
|
+
type="text",
|
|
1412
|
+
text=f"Interpretation failed: {str(e)}",
|
|
1413
|
+
)
|
|
1414
|
+
],
|
|
1415
|
+
isError=True,
|
|
1416
|
+
)
|
|
1417
|
+
|
|
1418
|
+
def _get_threshold_config(self, preset: str) -> ThresholdConfig:
|
|
1419
|
+
"""Get threshold configuration based on preset.
|
|
1420
|
+
|
|
1421
|
+
Args:
|
|
1422
|
+
preset: Threshold preset ('strict', 'standard', or 'relaxed')
|
|
1423
|
+
|
|
1424
|
+
Returns:
|
|
1425
|
+
ThresholdConfig instance
|
|
1426
|
+
"""
|
|
1427
|
+
if preset == "strict":
|
|
1428
|
+
# Stricter thresholds
|
|
1429
|
+
config = ThresholdConfig()
|
|
1430
|
+
config.complexity.cognitive_a = 3
|
|
1431
|
+
config.complexity.cognitive_b = 7
|
|
1432
|
+
config.complexity.cognitive_c = 15
|
|
1433
|
+
config.complexity.cognitive_d = 20
|
|
1434
|
+
config.smells.long_method_lines = 30
|
|
1435
|
+
config.smells.high_complexity = 10
|
|
1436
|
+
config.smells.too_many_parameters = 3
|
|
1437
|
+
config.smells.deep_nesting_depth = 3
|
|
1438
|
+
return config
|
|
1439
|
+
elif preset == "relaxed":
|
|
1440
|
+
# More relaxed thresholds
|
|
1441
|
+
config = ThresholdConfig()
|
|
1442
|
+
config.complexity.cognitive_a = 7
|
|
1443
|
+
config.complexity.cognitive_b = 15
|
|
1444
|
+
config.complexity.cognitive_c = 25
|
|
1445
|
+
config.complexity.cognitive_d = 40
|
|
1446
|
+
config.smells.long_method_lines = 75
|
|
1447
|
+
config.smells.high_complexity = 20
|
|
1448
|
+
config.smells.too_many_parameters = 7
|
|
1449
|
+
config.smells.deep_nesting_depth = 5
|
|
1450
|
+
return config
|
|
1451
|
+
else:
|
|
1452
|
+
# Standard (default)
|
|
1453
|
+
return ThresholdConfig()
|
|
1454
|
+
|
|
664
1455
|
|
|
665
1456
|
def create_mcp_server(
|
|
666
1457
|
project_root: Path | None = None, enable_file_watching: bool | None = None
|