mcp-vector-search 0.12.6__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. mcp_vector_search/__init__.py +3 -3
  2. mcp_vector_search/analysis/__init__.py +111 -0
  3. mcp_vector_search/analysis/baseline/__init__.py +68 -0
  4. mcp_vector_search/analysis/baseline/comparator.py +462 -0
  5. mcp_vector_search/analysis/baseline/manager.py +621 -0
  6. mcp_vector_search/analysis/collectors/__init__.py +74 -0
  7. mcp_vector_search/analysis/collectors/base.py +164 -0
  8. mcp_vector_search/analysis/collectors/cohesion.py +463 -0
  9. mcp_vector_search/analysis/collectors/complexity.py +743 -0
  10. mcp_vector_search/analysis/collectors/coupling.py +1162 -0
  11. mcp_vector_search/analysis/collectors/halstead.py +514 -0
  12. mcp_vector_search/analysis/collectors/smells.py +325 -0
  13. mcp_vector_search/analysis/debt.py +516 -0
  14. mcp_vector_search/analysis/interpretation.py +685 -0
  15. mcp_vector_search/analysis/metrics.py +414 -0
  16. mcp_vector_search/analysis/reporters/__init__.py +7 -0
  17. mcp_vector_search/analysis/reporters/console.py +646 -0
  18. mcp_vector_search/analysis/reporters/markdown.py +480 -0
  19. mcp_vector_search/analysis/reporters/sarif.py +377 -0
  20. mcp_vector_search/analysis/storage/__init__.py +93 -0
  21. mcp_vector_search/analysis/storage/metrics_store.py +762 -0
  22. mcp_vector_search/analysis/storage/schema.py +245 -0
  23. mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
  24. mcp_vector_search/analysis/trends.py +308 -0
  25. mcp_vector_search/analysis/visualizer/__init__.py +90 -0
  26. mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
  27. mcp_vector_search/analysis/visualizer/exporter.py +484 -0
  28. mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
  29. mcp_vector_search/analysis/visualizer/schemas.py +525 -0
  30. mcp_vector_search/cli/commands/analyze.py +1062 -0
  31. mcp_vector_search/cli/commands/chat.py +1455 -0
  32. mcp_vector_search/cli/commands/index.py +621 -5
  33. mcp_vector_search/cli/commands/index_background.py +467 -0
  34. mcp_vector_search/cli/commands/init.py +13 -0
  35. mcp_vector_search/cli/commands/install.py +597 -335
  36. mcp_vector_search/cli/commands/install_old.py +8 -4
  37. mcp_vector_search/cli/commands/mcp.py +78 -6
  38. mcp_vector_search/cli/commands/reset.py +68 -26
  39. mcp_vector_search/cli/commands/search.py +224 -8
  40. mcp_vector_search/cli/commands/setup.py +1184 -0
  41. mcp_vector_search/cli/commands/status.py +339 -5
  42. mcp_vector_search/cli/commands/uninstall.py +276 -357
  43. mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
  44. mcp_vector_search/cli/commands/visualize/cli.py +292 -0
  45. mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
  46. mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
  47. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +33 -0
  48. mcp_vector_search/cli/commands/visualize/graph_builder.py +647 -0
  49. mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
  50. mcp_vector_search/cli/commands/visualize/server.py +600 -0
  51. mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
  52. mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
  53. mcp_vector_search/cli/commands/visualize/templates/base.py +234 -0
  54. mcp_vector_search/cli/commands/visualize/templates/scripts.py +4542 -0
  55. mcp_vector_search/cli/commands/visualize/templates/styles.py +2522 -0
  56. mcp_vector_search/cli/didyoumean.py +27 -2
  57. mcp_vector_search/cli/main.py +127 -160
  58. mcp_vector_search/cli/output.py +158 -13
  59. mcp_vector_search/config/__init__.py +4 -0
  60. mcp_vector_search/config/default_thresholds.yaml +52 -0
  61. mcp_vector_search/config/settings.py +12 -0
  62. mcp_vector_search/config/thresholds.py +273 -0
  63. mcp_vector_search/core/__init__.py +16 -0
  64. mcp_vector_search/core/auto_indexer.py +3 -3
  65. mcp_vector_search/core/boilerplate.py +186 -0
  66. mcp_vector_search/core/config_utils.py +394 -0
  67. mcp_vector_search/core/database.py +406 -94
  68. mcp_vector_search/core/embeddings.py +24 -0
  69. mcp_vector_search/core/exceptions.py +11 -0
  70. mcp_vector_search/core/git.py +380 -0
  71. mcp_vector_search/core/git_hooks.py +4 -4
  72. mcp_vector_search/core/indexer.py +632 -54
  73. mcp_vector_search/core/llm_client.py +756 -0
  74. mcp_vector_search/core/models.py +91 -1
  75. mcp_vector_search/core/project.py +17 -0
  76. mcp_vector_search/core/relationships.py +473 -0
  77. mcp_vector_search/core/scheduler.py +11 -11
  78. mcp_vector_search/core/search.py +179 -29
  79. mcp_vector_search/mcp/server.py +819 -9
  80. mcp_vector_search/parsers/python.py +285 -5
  81. mcp_vector_search/utils/__init__.py +2 -0
  82. mcp_vector_search/utils/gitignore.py +0 -3
  83. mcp_vector_search/utils/gitignore_updater.py +212 -0
  84. mcp_vector_search/utils/monorepo.py +66 -4
  85. mcp_vector_search/utils/timing.py +10 -6
  86. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +184 -53
  87. mcp_vector_search-1.1.22.dist-info/RECORD +120 -0
  88. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +1 -1
  89. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +1 -0
  90. mcp_vector_search/cli/commands/visualize.py +0 -1467
  91. mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
  92. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,525 @@
1
+ """JSON export schema for structural code analysis results.
2
+
3
+ This module provides Pydantic v2 models that define the JSON export format
4
+ for comprehensive code analysis results, including metrics, smells, dependencies,
5
+ and trend data.
6
+
7
+ The schema is designed to be:
8
+ - Version-stable: Includes schema version for compatibility
9
+ - Complete: Captures all analysis aspects (metrics, smells, dependencies, trends)
10
+ - Tool-agnostic: Can be consumed by various visualization and analysis tools
11
+ - Git-aware: Tracks commit and branch information for historical analysis
12
+
13
+ Example:
14
+ >>> from datetime import datetime
15
+ >>> from pathlib import Path
16
+ >>> metadata = ExportMetadata(
17
+ ... version="1.0.0",
18
+ ... generated_at=datetime.now(),
19
+ ... tool_version="0.19.0",
20
+ ... project_root="/path/to/project",
21
+ ... git_commit="abc123",
22
+ ... git_branch="main"
23
+ ... )
24
+ >>> export = AnalysisExport(
25
+ ... metadata=metadata,
26
+ ... summary=MetricsSummary(...),
27
+ ... files=[],
28
+ ... dependencies=DependencyGraph(edges=[], circular_dependencies=[])
29
+ ... )
30
+ >>> json_output = export.model_dump_json(indent=2)
31
+ """
32
+
33
+ from __future__ import annotations
34
+
35
+ from datetime import datetime
36
+ from typing import Any
37
+
38
+ from pydantic import BaseModel, Field, field_validator
39
+
40
+
41
+ class ExportMetadata(BaseModel):
42
+ """Metadata about the export itself.
43
+
44
+ Tracks version information, generation timestamp, and git context
45
+ to enable historical comparison and tool compatibility checks.
46
+
47
+ Attributes:
48
+ version: Schema version (e.g., "1.0.0") for compatibility tracking
49
+ generated_at: UTC timestamp when export was generated
50
+ tool_version: mcp-vector-search version that generated the export
51
+ project_root: Absolute path to project root directory
52
+ git_commit: Git commit SHA if available (optional)
53
+ git_branch: Git branch name if available (optional)
54
+ """
55
+
56
+ version: str = Field(
57
+ default="1.0.0", description="Schema version for compatibility tracking"
58
+ )
59
+ generated_at: datetime = Field(
60
+ description="UTC timestamp when analysis was performed"
61
+ )
62
+ tool_version: str = Field(
63
+ description="Version of mcp-vector-search that generated this export"
64
+ )
65
+ project_root: str = Field(description="Absolute path to project root directory")
66
+ git_commit: str | None = Field(
67
+ default=None, description="Git commit SHA (if available)"
68
+ )
69
+ git_branch: str | None = Field(
70
+ default=None, description="Git branch name (if available)"
71
+ )
72
+
73
+
74
+ class MetricsSummary(BaseModel):
75
+ """Project-level summary statistics.
76
+
77
+ Aggregates key metrics across the entire codebase to provide
78
+ a high-level health overview and identify areas needing attention.
79
+
80
+ Attributes:
81
+ total_files: Total number of analyzed files
82
+ total_functions: Total number of functions/methods across all files
83
+ total_classes: Total number of classes
84
+ total_lines: Total lines of code (excluding blank lines)
85
+ avg_complexity: Average cyclomatic complexity across all functions
86
+ avg_cognitive_complexity: Average cognitive complexity
87
+ avg_nesting_depth: Average maximum nesting depth
88
+ total_smells: Total number of detected code smells
89
+ smells_by_severity: Distribution of smells by severity level
90
+ avg_instability: Average instability metric (optional, Phase 3)
91
+ circular_dependencies: Count of circular dependency cycles
92
+ avg_halstead_volume: Average Halstead volume (optional, future)
93
+ avg_halstead_difficulty: Average Halstead difficulty (optional, future)
94
+ estimated_debt_minutes: Estimated technical debt in minutes (optional)
95
+ """
96
+
97
+ total_files: int = Field(ge=0, description="Total number of analyzed files")
98
+ total_functions: int = Field(ge=0, description="Total number of functions/methods")
99
+ total_classes: int = Field(ge=0, description="Total number of classes")
100
+ total_lines: int = Field(ge=0, description="Total lines of code")
101
+ avg_complexity: float = Field(ge=0.0, description="Average cyclomatic complexity")
102
+ avg_cognitive_complexity: float = Field(
103
+ ge=0.0, description="Average cognitive complexity"
104
+ )
105
+ avg_nesting_depth: float = Field(
106
+ ge=0.0, description="Average maximum nesting depth"
107
+ )
108
+ total_smells: int = Field(ge=0, description="Total number of code smells detected")
109
+ smells_by_severity: dict[str, int] = Field(
110
+ default_factory=dict,
111
+ description="Distribution of smells by severity (error, warning, info)",
112
+ )
113
+
114
+ # Coupling metrics (Phase 3)
115
+ avg_instability: float | None = Field(
116
+ default=None, ge=0.0, le=1.0, description="Average instability metric (0-1)"
117
+ )
118
+ circular_dependencies: int = Field(
119
+ default=0, ge=0, description="Number of circular dependency cycles"
120
+ )
121
+
122
+ # Halstead metrics (future, optional)
123
+ avg_halstead_volume: float | None = Field(
124
+ default=None, ge=0.0, description="Average Halstead volume (optional)"
125
+ )
126
+ avg_halstead_difficulty: float | None = Field(
127
+ default=None, ge=0.0, description="Average Halstead difficulty (optional)"
128
+ )
129
+
130
+ # Technical debt estimation
131
+ estimated_debt_minutes: int | None = Field(
132
+ default=None, ge=0, description="Estimated technical debt in minutes (optional)"
133
+ )
134
+
135
+
136
+ class FunctionMetrics(BaseModel):
137
+ """Metrics for a single function/method.
138
+
139
+ Captures complexity and size metrics for individual functions
140
+ to identify refactoring candidates.
141
+
142
+ Attributes:
143
+ name: Function/method name
144
+ line_start: Starting line number
145
+ line_end: Ending line number
146
+ cyclomatic_complexity: Cyclomatic complexity score
147
+ cognitive_complexity: Cognitive complexity score
148
+ nesting_depth: Maximum nesting depth
149
+ parameter_count: Number of parameters
150
+ lines_of_code: Total lines in function body
151
+ halstead_volume: Halstead volume (optional, future)
152
+ halstead_difficulty: Halstead difficulty (optional, future)
153
+ halstead_effort: Halstead effort (optional, future)
154
+ """
155
+
156
+ name: str = Field(description="Function or method name")
157
+ line_start: int = Field(ge=1, description="Starting line number")
158
+ line_end: int = Field(ge=1, description="Ending line number")
159
+ cyclomatic_complexity: int = Field(ge=1, description="Cyclomatic complexity score")
160
+ cognitive_complexity: int = Field(ge=0, description="Cognitive complexity score")
161
+ nesting_depth: int = Field(ge=0, description="Maximum nesting depth")
162
+ parameter_count: int = Field(ge=0, description="Number of parameters")
163
+ lines_of_code: int = Field(ge=1, description="Total lines in function")
164
+
165
+ # Halstead metrics (optional, future)
166
+ halstead_volume: float | None = Field(
167
+ default=None, ge=0.0, description="Halstead volume (optional)"
168
+ )
169
+ halstead_difficulty: float | None = Field(
170
+ default=None, ge=0.0, description="Halstead difficulty (optional)"
171
+ )
172
+ halstead_effort: float | None = Field(
173
+ default=None, ge=0.0, description="Halstead effort (optional)"
174
+ )
175
+
176
+ @field_validator("line_end")
177
+ @classmethod
178
+ def validate_line_range(cls, v: int, info: Any) -> int:
179
+ """Ensure line_end >= line_start."""
180
+ if "line_start" in info.data and v < info.data["line_start"]:
181
+ raise ValueError("line_end must be >= line_start")
182
+ return v
183
+
184
+
185
+ class ClassMetrics(BaseModel):
186
+ """Metrics for a single class.
187
+
188
+ Tracks class-level metrics including cohesion and method counts
189
+ to identify classes that violate single responsibility principle.
190
+
191
+ Attributes:
192
+ name: Class name
193
+ line_start: Starting line number
194
+ line_end: Ending line number
195
+ method_count: Number of methods in class
196
+ lcom4: Lack of Cohesion of Methods metric (optional, Phase 2)
197
+ methods: List of method metrics
198
+ """
199
+
200
+ name: str = Field(description="Class name")
201
+ line_start: int = Field(ge=1, description="Starting line number")
202
+ line_end: int = Field(ge=1, description="Ending line number")
203
+ method_count: int = Field(ge=0, description="Number of methods")
204
+ lcom4: int | None = Field(
205
+ default=None, ge=1, description="Lack of Cohesion metric (LCOM4)"
206
+ )
207
+ methods: list[FunctionMetrics] = Field(
208
+ default_factory=list, description="Metrics for each method"
209
+ )
210
+
211
+ @field_validator("line_end")
212
+ @classmethod
213
+ def validate_line_range(cls, v: int, info: Any) -> int:
214
+ """Ensure line_end >= line_start."""
215
+ if "line_start" in info.data and v < info.data["line_start"]:
216
+ raise ValueError("line_end must be >= line_start")
217
+ return v
218
+
219
+
220
+ class SmellLocation(BaseModel):
221
+ """A detected code smell.
222
+
223
+ Represents a single code smell instance with location, severity,
224
+ and remediation information for visualization and prioritization.
225
+
226
+ Attributes:
227
+ smell_type: Type of smell (e.g., "long_method", "deep_nesting")
228
+ severity: Severity level ("error", "warning", "info")
229
+ message: Human-readable description
230
+ line: Starting line number
231
+ column: Starting column (optional)
232
+ end_line: Ending line number (optional)
233
+ function_name: Function where smell occurs (optional)
234
+ class_name: Class where smell occurs (optional)
235
+ remediation_minutes: Estimated time to fix (optional)
236
+ """
237
+
238
+ smell_type: str = Field(description="Type of code smell")
239
+ severity: str = Field(description="Severity level (error, warning, info)")
240
+ message: str = Field(description="Human-readable description")
241
+ line: int = Field(ge=1, description="Starting line number")
242
+ column: int | None = Field(default=None, ge=0, description="Starting column")
243
+ end_line: int | None = Field(default=None, ge=1, description="Ending line number")
244
+ function_name: str | None = Field(
245
+ default=None, description="Function where smell occurs"
246
+ )
247
+ class_name: str | None = Field(default=None, description="Class where smell occurs")
248
+ remediation_minutes: int | None = Field(
249
+ default=None, ge=0, description="Estimated remediation time in minutes"
250
+ )
251
+
252
+ @field_validator("severity")
253
+ @classmethod
254
+ def validate_severity(cls, v: str) -> str:
255
+ """Ensure severity is one of the allowed values."""
256
+ allowed = {"error", "warning", "info"}
257
+ if v not in allowed:
258
+ raise ValueError(f"severity must be one of {allowed}, got {v}")
259
+ return v
260
+
261
+
262
+ class FileDetail(BaseModel):
263
+ """Complete metrics for a single file.
264
+
265
+ Comprehensive file-level metrics including complexity, coupling,
266
+ and detailed function/class breakdowns for drill-down analysis.
267
+
268
+ Attributes:
269
+ path: Relative path from project root
270
+ language: Programming language detected
271
+ lines_of_code: Total lines of code (excluding blanks)
272
+ cyclomatic_complexity: Sum of cyclomatic complexity
273
+ cognitive_complexity: Sum of cognitive complexity
274
+ max_nesting_depth: Maximum nesting depth in file
275
+ function_count: Number of top-level functions
276
+ class_count: Number of classes
277
+ efferent_coupling: Outgoing dependencies (files this depends on)
278
+ afferent_coupling: Incoming dependencies (files depending on this)
279
+ instability: Instability metric (Ce / (Ce + Ca))
280
+ functions: Metrics for each function
281
+ classes: Metrics for each class
282
+ smells: Detected code smells
283
+ imports: List of imported modules/files
284
+ """
285
+
286
+ path: str = Field(description="Relative path from project root")
287
+ language: str = Field(description="Programming language")
288
+ lines_of_code: int = Field(ge=0, description="Total lines of code")
289
+
290
+ # Aggregate complexity metrics
291
+ cyclomatic_complexity: int = Field(ge=0, description="Sum of cyclomatic complexity")
292
+ cognitive_complexity: int = Field(ge=0, description="Sum of cognitive complexity")
293
+ max_nesting_depth: int = Field(ge=0, description="Maximum nesting depth")
294
+ function_count: int = Field(ge=0, description="Number of functions")
295
+ class_count: int = Field(ge=0, description="Number of classes")
296
+
297
+ # Coupling metrics (Phase 3)
298
+ efferent_coupling: int = Field(ge=0, description="Outgoing dependencies")
299
+ afferent_coupling: int = Field(ge=0, description="Incoming dependencies")
300
+ instability: float | None = Field(
301
+ default=None, ge=0.0, le=1.0, description="Instability metric (0-1)"
302
+ )
303
+
304
+ # Collections
305
+ functions: list[FunctionMetrics] = Field(
306
+ default_factory=list, description="Function-level metrics"
307
+ )
308
+ classes: list[ClassMetrics] = Field(
309
+ default_factory=list, description="Class-level metrics"
310
+ )
311
+ smells: list[SmellLocation] = Field(
312
+ default_factory=list, description="Detected code smells"
313
+ )
314
+ imports: list[str] = Field(
315
+ default_factory=list, description="Imported modules/files"
316
+ )
317
+
318
+
319
+ class DependencyEdge(BaseModel):
320
+ """An edge in the dependency graph.
321
+
322
+ Represents a single import/dependency relationship between files
323
+ to enable dependency analysis and visualization.
324
+
325
+ Attributes:
326
+ source: Source file path (relative to project root)
327
+ target: Target file/module path
328
+ import_type: Type of import ("import", "from_import", "dynamic")
329
+ """
330
+
331
+ source: str = Field(description="Source file path")
332
+ target: str = Field(description="Target file/module")
333
+ import_type: str = Field(description="Import type (import, from_import, dynamic)")
334
+
335
+ @field_validator("import_type")
336
+ @classmethod
337
+ def validate_import_type(cls, v: str) -> str:
338
+ """Ensure import_type is one of the allowed values."""
339
+ allowed = {"import", "from_import", "dynamic"}
340
+ if v not in allowed:
341
+ raise ValueError(f"import_type must be one of {allowed}, got {v}")
342
+ return v
343
+
344
+
345
+ class CyclicDependency(BaseModel):
346
+ """A detected circular dependency.
347
+
348
+ Represents a cycle in the dependency graph that should be resolved
349
+ to improve modularity and testability.
350
+
351
+ Attributes:
352
+ cycle: Ordered list of file paths forming the cycle
353
+ length: Number of files in the cycle
354
+ """
355
+
356
+ cycle: list[str] = Field(
357
+ min_length=2, description="List of files in the cycle (ordered)"
358
+ )
359
+ length: int = Field(ge=2, description="Number of files in cycle")
360
+
361
+ @field_validator("length")
362
+ @classmethod
363
+ def validate_length_matches_cycle(cls, v: int, info: Any) -> int:
364
+ """Ensure length matches actual cycle length."""
365
+ if "cycle" in info.data and v != len(info.data["cycle"]):
366
+ raise ValueError(
367
+ f"length {v} does not match cycle length {len(info.data['cycle'])}"
368
+ )
369
+ return v
370
+
371
+
372
+ class DependencyGraph(BaseModel):
373
+ """Project dependency structure.
374
+
375
+ Represents the complete dependency graph including edges,
376
+ circular dependencies, and coupling hotspots.
377
+
378
+ Attributes:
379
+ edges: All dependency edges in the graph
380
+ circular_dependencies: Detected circular dependency cycles
381
+ most_depended_on: Top files by afferent coupling (file, count)
382
+ most_dependent: Top files by efferent coupling (file, count)
383
+ """
384
+
385
+ edges: list[DependencyEdge] = Field(
386
+ default_factory=list, description="All dependency edges"
387
+ )
388
+ circular_dependencies: list[CyclicDependency] = Field(
389
+ default_factory=list, description="Detected circular dependencies"
390
+ )
391
+ most_depended_on: list[tuple[str, int]] = Field(
392
+ default_factory=list,
393
+ description="Top files by afferent coupling (incoming dependencies)",
394
+ )
395
+ most_dependent: list[tuple[str, int]] = Field(
396
+ default_factory=list,
397
+ description="Top files by efferent coupling (outgoing dependencies)",
398
+ )
399
+
400
+
401
+ class TrendDataPoint(BaseModel):
402
+ """A single point in trend history.
403
+
404
+ Represents one measurement of a metric at a specific point in time,
405
+ enabling trend analysis and regression detection.
406
+
407
+ Attributes:
408
+ timestamp: When the measurement was taken (UTC)
409
+ commit: Git commit SHA (optional)
410
+ value: Metric value at this point
411
+ """
412
+
413
+ timestamp: datetime = Field(description="When measurement was taken (UTC)")
414
+ commit: str | None = Field(default=None, description="Git commit SHA")
415
+ value: float = Field(description="Metric value at this point")
416
+
417
+
418
+ class MetricTrend(BaseModel):
419
+ """Trend data for a specific metric.
420
+
421
+ Tracks how a metric changes over time with direction indicators
422
+ to highlight improving or worsening trends.
423
+
424
+ Attributes:
425
+ metric_name: Name of the metric being tracked
426
+ current_value: Most recent value
427
+ previous_value: Previous measurement value (optional)
428
+ change_percent: Percentage change from previous (optional)
429
+ trend_direction: Direction ("improving", "worsening", "stable")
430
+ history: Time series of historical values
431
+ """
432
+
433
+ metric_name: str = Field(description="Name of metric being tracked")
434
+ current_value: float = Field(description="Current metric value")
435
+ previous_value: float | None = Field(
436
+ default=None, description="Previous measurement value"
437
+ )
438
+ change_percent: float | None = Field(
439
+ default=None, description="Percentage change from previous"
440
+ )
441
+ trend_direction: str = Field(
442
+ description="Trend direction (improving, worsening, stable)"
443
+ )
444
+ history: list[TrendDataPoint] = Field(
445
+ default_factory=list, description="Historical data points"
446
+ )
447
+
448
+ @field_validator("trend_direction")
449
+ @classmethod
450
+ def validate_trend_direction(cls, v: str) -> str:
451
+ """Ensure trend_direction is one of the allowed values."""
452
+ allowed = {"improving", "worsening", "stable"}
453
+ if v not in allowed:
454
+ raise ValueError(f"trend_direction must be one of {allowed}, got {v}")
455
+ return v
456
+
457
+
458
+ class TrendData(BaseModel):
459
+ """Historical trend information.
460
+
461
+ Aggregates trend data for multiple metrics with baseline tracking
462
+ for regression detection and historical comparison.
463
+
464
+ Attributes:
465
+ metrics: List of metric trends
466
+ baseline_name: Name of baseline (e.g., "main", "v1.0.0")
467
+ baseline_date: Date baseline was established (optional)
468
+ """
469
+
470
+ metrics: list[MetricTrend] = Field(
471
+ default_factory=list, description="Trend data for each tracked metric"
472
+ )
473
+ baseline_name: str | None = Field(
474
+ default=None, description="Name of baseline for comparison"
475
+ )
476
+ baseline_date: datetime | None = Field(
477
+ default=None, description="When baseline was established"
478
+ )
479
+
480
+
481
+ class AnalysisExport(BaseModel):
482
+ """Root schema for complete analysis export.
483
+
484
+ Top-level container for all analysis data including metadata,
485
+ summary statistics, file details, dependencies, and trends.
486
+
487
+ This is the primary export format for visualization tools and
488
+ external integrations.
489
+
490
+ Attributes:
491
+ metadata: Export metadata and version information
492
+ summary: Project-level summary statistics
493
+ files: Detailed metrics for each analyzed file
494
+ dependencies: Dependency graph and coupling analysis
495
+ trends: Historical trend data (optional)
496
+ """
497
+
498
+ metadata: ExportMetadata = Field(description="Export metadata and versioning")
499
+ summary: MetricsSummary = Field(description="Project-level summary statistics")
500
+ files: list[FileDetail] = Field(
501
+ default_factory=list, description="File-level metrics"
502
+ )
503
+ dependencies: DependencyGraph = Field(
504
+ description="Dependency graph and coupling analysis"
505
+ )
506
+ trends: TrendData | None = Field(
507
+ default=None, description="Historical trend data (optional)"
508
+ )
509
+
510
+
511
+ def generate_json_schema() -> dict[str, Any]:
512
+ """Generate JSON Schema for documentation and validation.
513
+
514
+ Creates a JSON Schema document describing the AnalysisExport format
515
+ for use in documentation, validation tools, and IDE autocomplete.
516
+
517
+ Returns:
518
+ Dictionary containing JSON Schema for AnalysisExport
519
+
520
+ Example:
521
+ >>> schema = generate_json_schema()
522
+ >>> import json
523
+ >>> print(json.dumps(schema, indent=2))
524
+ """
525
+ return AnalysisExport.model_json_schema()