mcp-vector-search 1.0.3__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. mcp_vector_search/__init__.py +3 -3
  2. mcp_vector_search/analysis/__init__.py +48 -1
  3. mcp_vector_search/analysis/baseline/__init__.py +68 -0
  4. mcp_vector_search/analysis/baseline/comparator.py +462 -0
  5. mcp_vector_search/analysis/baseline/manager.py +621 -0
  6. mcp_vector_search/analysis/collectors/__init__.py +35 -0
  7. mcp_vector_search/analysis/collectors/cohesion.py +463 -0
  8. mcp_vector_search/analysis/collectors/coupling.py +1162 -0
  9. mcp_vector_search/analysis/collectors/halstead.py +514 -0
  10. mcp_vector_search/analysis/collectors/smells.py +325 -0
  11. mcp_vector_search/analysis/debt.py +516 -0
  12. mcp_vector_search/analysis/interpretation.py +685 -0
  13. mcp_vector_search/analysis/metrics.py +74 -1
  14. mcp_vector_search/analysis/reporters/__init__.py +3 -1
  15. mcp_vector_search/analysis/reporters/console.py +424 -0
  16. mcp_vector_search/analysis/reporters/markdown.py +480 -0
  17. mcp_vector_search/analysis/reporters/sarif.py +377 -0
  18. mcp_vector_search/analysis/storage/__init__.py +93 -0
  19. mcp_vector_search/analysis/storage/metrics_store.py +762 -0
  20. mcp_vector_search/analysis/storage/schema.py +245 -0
  21. mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
  22. mcp_vector_search/analysis/trends.py +308 -0
  23. mcp_vector_search/analysis/visualizer/__init__.py +90 -0
  24. mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
  25. mcp_vector_search/analysis/visualizer/exporter.py +484 -0
  26. mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
  27. mcp_vector_search/analysis/visualizer/schemas.py +525 -0
  28. mcp_vector_search/cli/commands/analyze.py +665 -11
  29. mcp_vector_search/cli/commands/chat.py +193 -0
  30. mcp_vector_search/cli/commands/index.py +600 -2
  31. mcp_vector_search/cli/commands/index_background.py +467 -0
  32. mcp_vector_search/cli/commands/search.py +194 -1
  33. mcp_vector_search/cli/commands/setup.py +64 -13
  34. mcp_vector_search/cli/commands/status.py +302 -3
  35. mcp_vector_search/cli/commands/visualize/cli.py +26 -10
  36. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +8 -4
  37. mcp_vector_search/cli/commands/visualize/graph_builder.py +167 -234
  38. mcp_vector_search/cli/commands/visualize/server.py +304 -15
  39. mcp_vector_search/cli/commands/visualize/templates/base.py +60 -6
  40. mcp_vector_search/cli/commands/visualize/templates/scripts.py +2100 -65
  41. mcp_vector_search/cli/commands/visualize/templates/styles.py +1297 -88
  42. mcp_vector_search/cli/didyoumean.py +5 -0
  43. mcp_vector_search/cli/main.py +16 -5
  44. mcp_vector_search/cli/output.py +134 -5
  45. mcp_vector_search/config/thresholds.py +89 -1
  46. mcp_vector_search/core/__init__.py +16 -0
  47. mcp_vector_search/core/database.py +39 -2
  48. mcp_vector_search/core/embeddings.py +24 -0
  49. mcp_vector_search/core/git.py +380 -0
  50. mcp_vector_search/core/indexer.py +445 -84
  51. mcp_vector_search/core/llm_client.py +9 -4
  52. mcp_vector_search/core/models.py +88 -1
  53. mcp_vector_search/core/relationships.py +473 -0
  54. mcp_vector_search/core/search.py +1 -1
  55. mcp_vector_search/mcp/server.py +795 -4
  56. mcp_vector_search/parsers/python.py +285 -5
  57. mcp_vector_search/utils/gitignore.py +0 -3
  58. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +3 -2
  59. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/RECORD +62 -39
  60. mcp_vector_search/cli/commands/visualize.py.original +0 -2536
  61. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +0 -0
  62. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +0 -0
  63. {mcp_vector_search-1.0.3.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,685 @@
1
+ """LLM-friendly interpretation of code analysis results.
2
+
3
+ This module provides enhanced JSON export with semantic context and natural
4
+ language interpretation capabilities for LLM consumption.
5
+
6
+ Key Features:
7
+ - Threshold comparisons with semantic labels (above/below, by how much)
8
+ - Code smell classifications with severity and remediation estimates
9
+ - Semantic context (callers, callees, purpose hints)
10
+ - Natural language interpretation templates
11
+
12
+ Example:
13
+ >>> from pathlib import Path
14
+ >>> from mcp_vector_search.analysis import ProjectMetrics
15
+ >>> from mcp_vector_search.analysis.interpretation import (
16
+ ... EnhancedJSONExporter,
17
+ ... AnalysisInterpreter
18
+ ... )
19
+ >>>
20
+ >>> # Enhanced export with LLM context
21
+ >>> exporter = EnhancedJSONExporter(project_root=Path("/path/to/project"))
22
+ >>> export = exporter.export_with_context(project_metrics)
23
+ >>>
24
+ >>> # Natural language interpretation
25
+ >>> interpreter = AnalysisInterpreter()
26
+ >>> summary = interpreter.interpret(export, focus="summary")
27
+ >>> print(summary)
28
+ """
29
+
30
+ from __future__ import annotations
31
+
32
+ from dataclasses import dataclass
33
+ from enum import Enum
34
+ from pathlib import Path
35
+ from typing import TYPE_CHECKING, Any
36
+
37
+ from pydantic import BaseModel, Field
38
+
39
+ from ..config.thresholds import ThresholdConfig
40
+ from .collectors.smells import SmellSeverity
41
+ from .visualizer.exporter import JSONExporter
42
+ from .visualizer.schemas import AnalysisExport, SmellLocation
43
+
44
+ if TYPE_CHECKING:
45
+ from .metrics import ProjectMetrics
46
+
47
+
48
+ class ThresholdComparison(str, Enum):
49
+ """Comparison status against thresholds."""
50
+
51
+ WELL_BELOW = "well_below" # <50% of threshold
52
+ BELOW = "below" # 50-100% of threshold
53
+ AT_THRESHOLD = "at_threshold" # 100-110% of threshold
54
+ ABOVE = "above" # 110-150% of threshold
55
+ WELL_ABOVE = "well_above" # >150% of threshold
56
+
57
+
58
+ class RemediationPriority(str, Enum):
59
+ """Priority level for remediation."""
60
+
61
+ LOW = "low" # Info-level smells, cosmetic issues
62
+ MEDIUM = "medium" # Warning-level, should fix during refactoring
63
+ HIGH = "high" # Error-level, needs attention soon
64
+ CRITICAL = "critical" # Severe issues blocking maintainability
65
+
66
+
67
+ @dataclass
68
+ class ThresholdContext:
69
+ """Context about metric threshold comparison.
70
+
71
+ Attributes:
72
+ metric_name: Name of the metric being compared
73
+ value: Actual metric value
74
+ threshold: Threshold value for this metric
75
+ comparison: How value compares to threshold
76
+ percentage_of_threshold: Value as percentage of threshold
77
+ severity: Severity level based on comparison
78
+ """
79
+
80
+ metric_name: str
81
+ value: float
82
+ threshold: float
83
+ comparison: ThresholdComparison
84
+ percentage_of_threshold: float
85
+ severity: SmellSeverity
86
+
87
+ def to_dict(self) -> dict[str, Any]:
88
+ """Convert to dictionary for JSON serialization."""
89
+ return {
90
+ "metric_name": self.metric_name,
91
+ "value": self.value,
92
+ "threshold": self.threshold,
93
+ "comparison": self.comparison.value,
94
+ "percentage_of_threshold": round(self.percentage_of_threshold, 1),
95
+ "severity": (
96
+ self.severity.value
97
+ if isinstance(self.severity, SmellSeverity)
98
+ else self.severity
99
+ ),
100
+ "interpretation": self.get_interpretation(),
101
+ }
102
+
103
+ def get_interpretation(self) -> str:
104
+ """Get natural language interpretation of threshold comparison."""
105
+ diff_pct = abs(100 - self.percentage_of_threshold)
106
+
107
+ if self.comparison == ThresholdComparison.WELL_BELOW:
108
+ return f"{self.metric_name} is {diff_pct:.0f}% below threshold (healthy)"
109
+ elif self.comparison == ThresholdComparison.BELOW:
110
+ return f"{self.metric_name} is within acceptable range"
111
+ elif self.comparison == ThresholdComparison.AT_THRESHOLD:
112
+ return f"{self.metric_name} is at threshold (monitor closely)"
113
+ elif self.comparison == ThresholdComparison.ABOVE:
114
+ return f"{self.metric_name} exceeds threshold by {diff_pct:.0f}% (needs attention)"
115
+ else: # WELL_ABOVE
116
+ return f"{self.metric_name} significantly exceeds threshold by {diff_pct:.0f}% (urgent)"
117
+
118
+
119
+ class EnhancedSmellLocation(BaseModel):
120
+ """Enhanced smell location with interpretation context.
121
+
122
+ Extends SmellLocation with remediation estimates and priority.
123
+
124
+ Attributes:
125
+ smell_type: Type of code smell
126
+ severity: Severity level
127
+ message: Human-readable description
128
+ line: Starting line number
129
+ column: Starting column (optional)
130
+ end_line: Ending line number (optional)
131
+ function_name: Function where smell occurs (optional)
132
+ class_name: Class where smell occurs (optional)
133
+ remediation_minutes: Estimated time to fix
134
+ priority: Remediation priority level
135
+ threshold_context: Threshold comparison context
136
+ suggested_actions: List of specific remediation steps
137
+ """
138
+
139
+ smell_type: str
140
+ severity: str
141
+ message: str
142
+ line: int
143
+ column: int | None = None
144
+ end_line: int | None = None
145
+ function_name: str | None = None
146
+ class_name: str | None = None
147
+ remediation_minutes: int | None = None
148
+ priority: str = Field(default="medium")
149
+ threshold_context: dict[str, Any] | None = None
150
+ suggested_actions: list[str] = Field(default_factory=list)
151
+
152
+
153
+ class LLMContextExport(BaseModel):
154
+ """Extended analysis export with LLM-consumable context.
155
+
156
+ Attributes:
157
+ analysis: Base analysis export
158
+ threshold_comparisons: Threshold context for each metric
159
+ remediation_summary: Summary of remediation priorities
160
+ code_quality_grade: Overall quality grade (A-F)
161
+ interpretation_hints: Natural language hints for LLM
162
+ """
163
+
164
+ analysis: AnalysisExport
165
+ threshold_comparisons: dict[str, list[dict[str, Any]]] = Field(default_factory=dict)
166
+ remediation_summary: dict[str, Any] = Field(default_factory=dict)
167
+ code_quality_grade: str = "C"
168
+ interpretation_hints: list[str] = Field(default_factory=list)
169
+
170
+
171
+ class EnhancedJSONExporter(JSONExporter):
172
+ """Extended JSON exporter with LLM-consumable context.
173
+
174
+ Adds semantic context, threshold comparisons, and remediation estimates
175
+ to the base JSON export format.
176
+ """
177
+
178
+ def __init__(
179
+ self,
180
+ project_root: Path,
181
+ threshold_config: ThresholdConfig | None = None,
182
+ **kwargs: Any,
183
+ ):
184
+ """Initialize enhanced exporter.
185
+
186
+ Args:
187
+ project_root: Root directory of project
188
+ threshold_config: Threshold configuration for comparisons
189
+ **kwargs: Additional arguments passed to JSONExporter
190
+ """
191
+ super().__init__(project_root, **kwargs)
192
+ self.threshold_config = threshold_config or ThresholdConfig()
193
+
194
+ def export_with_context(
195
+ self,
196
+ project_metrics: ProjectMetrics,
197
+ include_smells: bool = True,
198
+ **kwargs: Any,
199
+ ) -> LLMContextExport:
200
+ """Export with enhanced LLM context.
201
+
202
+ Args:
203
+ project_metrics: Project metrics to export
204
+ include_smells: Whether to detect and include code smells
205
+ **kwargs: Additional arguments passed to export()
206
+
207
+ Returns:
208
+ Enhanced export with LLM-friendly context
209
+ """
210
+ # Get base export
211
+ base_export = self.export(project_metrics, **kwargs)
212
+
213
+ # Compute threshold comparisons
214
+ threshold_comparisons = self._compute_threshold_comparisons(project_metrics)
215
+
216
+ # Generate remediation summary
217
+ remediation_summary = self._generate_remediation_summary(base_export)
218
+
219
+ # Calculate quality grade
220
+ quality_grade = self._calculate_quality_grade(project_metrics)
221
+
222
+ # Generate interpretation hints
223
+ interpretation_hints = self._generate_interpretation_hints(
224
+ project_metrics, threshold_comparisons
225
+ )
226
+
227
+ return LLMContextExport(
228
+ analysis=base_export,
229
+ threshold_comparisons=threshold_comparisons,
230
+ remediation_summary=remediation_summary,
231
+ code_quality_grade=quality_grade,
232
+ interpretation_hints=interpretation_hints,
233
+ )
234
+
235
+ def _compute_threshold_comparisons(
236
+ self, project_metrics: ProjectMetrics
237
+ ) -> dict[str, list[dict[str, Any]]]:
238
+ """Compute threshold comparisons for all metrics.
239
+
240
+ Args:
241
+ project_metrics: Project metrics to analyze
242
+
243
+ Returns:
244
+ Dictionary mapping metric categories to threshold contexts
245
+ """
246
+ comparisons: dict[str, list[dict[str, Any]]] = {
247
+ "complexity": [],
248
+ "size": [],
249
+ "coupling": [],
250
+ }
251
+
252
+ # Average complexity comparison
253
+ all_chunks = [
254
+ chunk for file in project_metrics.files.values() for chunk in file.chunks
255
+ ]
256
+ if all_chunks:
257
+ avg_cognitive = sum(c.cognitive_complexity for c in all_chunks) / len(
258
+ all_chunks
259
+ )
260
+ avg_cyclomatic = sum(c.cyclomatic_complexity for c in all_chunks) / len(
261
+ all_chunks
262
+ )
263
+
264
+ comparisons["complexity"].append(
265
+ self._create_threshold_context(
266
+ "avg_cognitive_complexity",
267
+ avg_cognitive,
268
+ float(self.threshold_config.complexity.cognitive_b),
269
+ ).to_dict()
270
+ )
271
+
272
+ comparisons["complexity"].append(
273
+ self._create_threshold_context(
274
+ "avg_cyclomatic_complexity",
275
+ avg_cyclomatic,
276
+ float(self.threshold_config.complexity.cyclomatic_moderate),
277
+ ).to_dict()
278
+ )
279
+
280
+ # Size metrics
281
+ all_files = list(project_metrics.files.values())
282
+ if all_files:
283
+ avg_file_lines = sum(f.total_lines for f in all_files) / len(all_files)
284
+ comparisons["size"].append(
285
+ self._create_threshold_context(
286
+ "avg_file_lines",
287
+ avg_file_lines,
288
+ float(self.threshold_config.smells.god_class_lines),
289
+ ).to_dict()
290
+ )
291
+
292
+ return comparisons
293
+
294
+ def _create_threshold_context(
295
+ self, metric_name: str, value: float, threshold: float
296
+ ) -> ThresholdContext:
297
+ """Create threshold context for a metric.
298
+
299
+ Args:
300
+ metric_name: Name of the metric
301
+ value: Actual value
302
+ threshold: Threshold value
303
+
304
+ Returns:
305
+ ThresholdContext with comparison and severity
306
+ """
307
+ percentage = (value / threshold * 100) if threshold > 0 else 0
308
+
309
+ # Determine comparison level
310
+ if percentage < 50:
311
+ comparison = ThresholdComparison.WELL_BELOW
312
+ severity = SmellSeverity.INFO
313
+ elif percentage < 100:
314
+ comparison = ThresholdComparison.BELOW
315
+ severity = SmellSeverity.INFO
316
+ elif percentage <= 110:
317
+ comparison = ThresholdComparison.AT_THRESHOLD
318
+ severity = SmellSeverity.WARNING
319
+ elif percentage <= 150:
320
+ comparison = ThresholdComparison.ABOVE
321
+ severity = SmellSeverity.WARNING
322
+ else:
323
+ comparison = ThresholdComparison.WELL_ABOVE
324
+ severity = SmellSeverity.ERROR
325
+
326
+ return ThresholdContext(
327
+ metric_name=metric_name,
328
+ value=value,
329
+ threshold=threshold,
330
+ comparison=comparison,
331
+ percentage_of_threshold=percentage,
332
+ severity=severity,
333
+ )
334
+
335
+ def _generate_remediation_summary(self, export: AnalysisExport) -> dict[str, Any]:
336
+ """Generate remediation summary from analysis.
337
+
338
+ Args:
339
+ export: Base analysis export
340
+
341
+ Returns:
342
+ Summary with priorities and estimates
343
+ """
344
+ # Count smells by severity
345
+ smells_by_severity = export.summary.smells_by_severity
346
+
347
+ # Estimate total remediation time
348
+ total_minutes = 0
349
+ priority_counts = {"low": 0, "medium": 0, "high": 0, "critical": 0}
350
+
351
+ # Base estimates (minutes) per smell type
352
+ remediation_estimates = {
353
+ "Long Method": 30,
354
+ "Deep Nesting": 20,
355
+ "Long Parameter List": 15,
356
+ "God Class": 120,
357
+ "Complex Method": 25,
358
+ }
359
+
360
+ for file_detail in export.files:
361
+ for smell in file_detail.smells:
362
+ estimate = remediation_estimates.get(smell.smell_type, 20)
363
+ total_minutes += estimate
364
+
365
+ # Map severity to priority
366
+ if smell.severity == "error":
367
+ priority_counts["critical"] += 1
368
+ elif smell.severity == "warning":
369
+ priority_counts["high"] += 1
370
+ else:
371
+ priority_counts["medium"] += 1
372
+
373
+ return {
374
+ "total_smells": export.summary.total_smells,
375
+ "smells_by_severity": smells_by_severity,
376
+ "priority_counts": priority_counts,
377
+ "estimated_remediation_hours": round(total_minutes / 60, 1),
378
+ "recommended_focus": self._determine_recommended_focus(export),
379
+ }
380
+
381
+ def _determine_recommended_focus(self, export: AnalysisExport) -> list[str]:
382
+ """Determine recommended focus areas based on analysis.
383
+
384
+ Args:
385
+ export: Analysis export
386
+
387
+ Returns:
388
+ List of recommended focus areas
389
+ """
390
+ recommendations = []
391
+
392
+ # Check for God Classes
393
+ god_class_count = sum(
394
+ 1 for f in export.files for s in f.smells if s.smell_type == "God Class"
395
+ )
396
+ if god_class_count > 0:
397
+ recommendations.append(
398
+ f"Address {god_class_count} God Class smell(s) - highest impact refactoring"
399
+ )
400
+
401
+ # Check complexity
402
+ if export.summary.avg_complexity > 15:
403
+ recommendations.append(
404
+ "Reduce overall complexity - average exceeds recommended threshold"
405
+ )
406
+
407
+ # Check circular dependencies
408
+ if export.summary.circular_dependencies > 0:
409
+ recommendations.append(
410
+ f"Resolve {export.summary.circular_dependencies} circular dependency cycles"
411
+ )
412
+
413
+ # Check instability
414
+ if export.summary.avg_instability and export.summary.avg_instability > 0.7:
415
+ recommendations.append(
416
+ "Improve coupling - high instability indicates fragile architecture"
417
+ )
418
+
419
+ if not recommendations:
420
+ recommendations.append(
421
+ "Code quality is good - focus on preventive maintenance"
422
+ )
423
+
424
+ return recommendations
425
+
426
+ def _calculate_quality_grade(self, project_metrics: ProjectMetrics) -> str:
427
+ """Calculate overall quality grade A-F.
428
+
429
+ Args:
430
+ project_metrics: Project metrics
431
+
432
+ Returns:
433
+ Grade letter (A-F)
434
+ """
435
+ # Simple scoring based on complexity distribution
436
+ summary = project_metrics.to_summary()
437
+ dist = summary.get("complexity_distribution", {})
438
+
439
+ # Calculate weighted score (A=100, B=80, C=60, D=40, F=20)
440
+ total_chunks = sum(dist.values())
441
+ if total_chunks == 0:
442
+ return "C"
443
+
444
+ score = (
445
+ dist.get("A", 0) * 100
446
+ + dist.get("B", 0) * 80
447
+ + dist.get("C", 0) * 60
448
+ + dist.get("D", 0) * 40
449
+ + dist.get("F", 0) * 20
450
+ ) / total_chunks
451
+
452
+ # Map to grade
453
+ if score >= 90:
454
+ return "A"
455
+ elif score >= 80:
456
+ return "B"
457
+ elif score >= 70:
458
+ return "C"
459
+ elif score >= 60:
460
+ return "D"
461
+ else:
462
+ return "F"
463
+
464
+ def _generate_interpretation_hints(
465
+ self,
466
+ project_metrics: ProjectMetrics,
467
+ threshold_comparisons: dict[str, list[dict[str, Any]]],
468
+ ) -> list[str]:
469
+ """Generate natural language interpretation hints.
470
+
471
+ Args:
472
+ project_metrics: Project metrics
473
+ threshold_comparisons: Threshold comparison contexts
474
+
475
+ Returns:
476
+ List of interpretation hints for LLM
477
+ """
478
+ hints = []
479
+
480
+ # Complexity hints
481
+ for comparison in threshold_comparisons.get("complexity", []):
482
+ if comparison["comparison"] in ["above", "well_above"]:
483
+ hints.append(comparison["interpretation"])
484
+
485
+ # File count context
486
+ total_files = len(project_metrics.files)
487
+ if total_files < 10:
488
+ hints.append("Small project - focus on establishing good patterns")
489
+ elif total_files < 50:
490
+ hints.append(
491
+ "Medium project - maintain modularity and separation of concerns"
492
+ )
493
+ else:
494
+ hints.append("Large project - architectural consistency is critical")
495
+
496
+ return hints
497
+
498
+
499
+ class AnalysisInterpreter:
500
+ """Natural language interpreter for analysis results.
501
+
502
+ Provides LLM-friendly interpretation of analysis exports with
503
+ configurable focus and verbosity levels.
504
+ """
505
+
506
+ def __init__(self) -> None:
507
+ """Initialize interpreter."""
508
+ self.prompt_templates = self._load_prompt_templates()
509
+
510
+ def interpret(
511
+ self,
512
+ export: LLMContextExport,
513
+ focus: str = "summary",
514
+ verbosity: str = "normal",
515
+ ) -> str:
516
+ """Generate natural language interpretation.
517
+
518
+ Args:
519
+ export: Enhanced analysis export
520
+ focus: Focus area - "summary", "recommendations", "priorities"
521
+ verbosity: Verbosity level - "brief", "normal", "detailed"
522
+
523
+ Returns:
524
+ Natural language interpretation
525
+ """
526
+ if focus == "summary":
527
+ return self._interpret_summary(export, verbosity)
528
+ elif focus == "recommendations":
529
+ return self._interpret_recommendations(export, verbosity)
530
+ elif focus == "priorities":
531
+ return self._interpret_priorities(export, verbosity)
532
+ else:
533
+ return self._interpret_summary(export, verbosity)
534
+
535
+ def _interpret_summary(self, export: LLMContextExport, verbosity: str) -> str:
536
+ """Generate summary interpretation.
537
+
538
+ Args:
539
+ export: Analysis export
540
+ verbosity: Verbosity level
541
+
542
+ Returns:
543
+ Summary interpretation
544
+ """
545
+ summary = export.analysis.summary
546
+ lines = []
547
+
548
+ # Overall assessment
549
+ lines.append(f"# Code Quality Assessment: Grade {export.code_quality_grade}")
550
+ lines.append("")
551
+
552
+ # High-level metrics
553
+ lines.append(
554
+ f"**Project Size**: {summary.total_files} files, {summary.total_functions} functions"
555
+ )
556
+ lines.append(
557
+ f"**Average Complexity**: {summary.avg_complexity:.1f} (cognitive: {summary.avg_cognitive_complexity:.1f})"
558
+ )
559
+ lines.append("")
560
+
561
+ # Code smells summary
562
+ if summary.total_smells > 0:
563
+ lines.append(f"**Code Smells Detected**: {summary.total_smells} total")
564
+ smells_by_sev = summary.smells_by_severity
565
+ lines.append(f" - Errors: {smells_by_sev.get('error', 0)}")
566
+ lines.append(f" - Warnings: {smells_by_sev.get('warning', 0)}")
567
+ lines.append(f" - Info: {smells_by_sev.get('info', 0)}")
568
+ else:
569
+ lines.append("**Code Smells**: None detected ✓")
570
+
571
+ lines.append("")
572
+
573
+ # Interpretation hints
574
+ if export.interpretation_hints:
575
+ lines.append("**Key Insights**:")
576
+ for hint in export.interpretation_hints:
577
+ lines.append(f" - {hint}")
578
+
579
+ if verbosity == "detailed":
580
+ # Add threshold comparisons
581
+ lines.append("")
582
+ lines.append("**Threshold Comparisons**:")
583
+ for category, comparisons in export.threshold_comparisons.items():
584
+ if comparisons:
585
+ lines.append(f"\n_{category.title()}_:")
586
+ for comp in comparisons:
587
+ lines.append(f" - {comp['interpretation']}")
588
+
589
+ return "\n".join(lines)
590
+
591
+ def _interpret_recommendations(
592
+ self, export: LLMContextExport, verbosity: str
593
+ ) -> str:
594
+ """Generate recommendations interpretation.
595
+
596
+ Args:
597
+ export: Analysis export
598
+ verbosity: Verbosity level
599
+
600
+ Returns:
601
+ Recommendations interpretation
602
+ """
603
+ lines = ["# Recommended Actions", ""]
604
+
605
+ remediation = export.remediation_summary
606
+ focus_areas = remediation.get("recommended_focus", [])
607
+
608
+ lines.append(
609
+ f"**Estimated Effort**: {remediation.get('estimated_remediation_hours', 0)} hours"
610
+ )
611
+ lines.append("")
612
+
613
+ lines.append("**Priority Focus Areas**:")
614
+ for i, area in enumerate(focus_areas, 1):
615
+ lines.append(f"{i}. {area}")
616
+
617
+ if verbosity in ["normal", "detailed"]:
618
+ lines.append("")
619
+ lines.append("**Quick Wins** (low effort, high impact):")
620
+ # Find simple refactorings
621
+ quick_wins = []
622
+ for file_detail in export.analysis.files:
623
+ for smell in file_detail.smells:
624
+ if smell.smell_type in ["Long Parameter List", "Deep Nesting"]:
625
+ quick_wins.append(
626
+ f" - Fix {smell.smell_type} in {Path(file_detail.path).name}"
627
+ )
628
+ if quick_wins:
629
+ lines.extend(quick_wins[:5]) # Top 5
630
+ else:
631
+ lines.append(" - No quick wins identified")
632
+
633
+ return "\n".join(lines)
634
+
635
+ def _interpret_priorities(self, export: LLMContextExport, verbosity: str) -> str:
636
+ """Generate priorities interpretation.
637
+
638
+ Args:
639
+ export: Analysis export
640
+ verbosity: Verbosity level
641
+
642
+ Returns:
643
+ Priorities interpretation
644
+ """
645
+ lines = ["# Remediation Priorities", ""]
646
+
647
+ remediation = export.remediation_summary
648
+ priority_counts = remediation.get("priority_counts", {})
649
+
650
+ lines.append("**By Priority Level**:")
651
+ lines.append(f" - Critical: {priority_counts.get('critical', 0)} issues")
652
+ lines.append(f" - High: {priority_counts.get('high', 0)} issues")
653
+ lines.append(f" - Medium: {priority_counts.get('medium', 0)} issues")
654
+ lines.append(f" - Low: {priority_counts.get('low', 0)} issues")
655
+ lines.append("")
656
+
657
+ if verbosity in ["normal", "detailed"]:
658
+ # Group smells by file
659
+ smells_by_file: dict[str, list[SmellLocation]] = {}
660
+ for file_detail in export.analysis.files:
661
+ if file_detail.smells:
662
+ smells_by_file[file_detail.path] = file_detail.smells
663
+
664
+ # Sort files by smell count
665
+ sorted_files = sorted(
666
+ smells_by_file.items(), key=lambda x: len(x[1]), reverse=True
667
+ )
668
+
669
+ lines.append("**Files Needing Most Attention**:")
670
+ for file_path, smells in sorted_files[:5]:
671
+ lines.append(f" - {Path(file_path).name}: {len(smells)} smell(s)")
672
+
673
+ return "\n".join(lines)
674
+
675
+ def _load_prompt_templates(self) -> dict[str, str]:
676
+ """Load prompt templates for interpretation.
677
+
678
+ Returns:
679
+ Dictionary of prompt templates
680
+ """
681
+ return {
682
+ "summary": "Provide a high-level assessment of code quality",
683
+ "recommendations": "Suggest actionable improvements prioritized by impact",
684
+ "priorities": "Prioritize issues by severity and remediation effort",
685
+ }