mcp-vector-search 0.12.6__py3-none-any.whl → 1.1.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. mcp_vector_search/__init__.py +3 -3
  2. mcp_vector_search/analysis/__init__.py +111 -0
  3. mcp_vector_search/analysis/baseline/__init__.py +68 -0
  4. mcp_vector_search/analysis/baseline/comparator.py +462 -0
  5. mcp_vector_search/analysis/baseline/manager.py +621 -0
  6. mcp_vector_search/analysis/collectors/__init__.py +74 -0
  7. mcp_vector_search/analysis/collectors/base.py +164 -0
  8. mcp_vector_search/analysis/collectors/cohesion.py +463 -0
  9. mcp_vector_search/analysis/collectors/complexity.py +743 -0
  10. mcp_vector_search/analysis/collectors/coupling.py +1162 -0
  11. mcp_vector_search/analysis/collectors/halstead.py +514 -0
  12. mcp_vector_search/analysis/collectors/smells.py +325 -0
  13. mcp_vector_search/analysis/debt.py +516 -0
  14. mcp_vector_search/analysis/interpretation.py +685 -0
  15. mcp_vector_search/analysis/metrics.py +414 -0
  16. mcp_vector_search/analysis/reporters/__init__.py +7 -0
  17. mcp_vector_search/analysis/reporters/console.py +646 -0
  18. mcp_vector_search/analysis/reporters/markdown.py +480 -0
  19. mcp_vector_search/analysis/reporters/sarif.py +377 -0
  20. mcp_vector_search/analysis/storage/__init__.py +93 -0
  21. mcp_vector_search/analysis/storage/metrics_store.py +762 -0
  22. mcp_vector_search/analysis/storage/schema.py +245 -0
  23. mcp_vector_search/analysis/storage/trend_tracker.py +560 -0
  24. mcp_vector_search/analysis/trends.py +308 -0
  25. mcp_vector_search/analysis/visualizer/__init__.py +90 -0
  26. mcp_vector_search/analysis/visualizer/d3_data.py +534 -0
  27. mcp_vector_search/analysis/visualizer/exporter.py +484 -0
  28. mcp_vector_search/analysis/visualizer/html_report.py +2895 -0
  29. mcp_vector_search/analysis/visualizer/schemas.py +525 -0
  30. mcp_vector_search/cli/commands/analyze.py +1062 -0
  31. mcp_vector_search/cli/commands/chat.py +1455 -0
  32. mcp_vector_search/cli/commands/index.py +621 -5
  33. mcp_vector_search/cli/commands/index_background.py +467 -0
  34. mcp_vector_search/cli/commands/init.py +13 -0
  35. mcp_vector_search/cli/commands/install.py +597 -335
  36. mcp_vector_search/cli/commands/install_old.py +8 -4
  37. mcp_vector_search/cli/commands/mcp.py +78 -6
  38. mcp_vector_search/cli/commands/reset.py +68 -26
  39. mcp_vector_search/cli/commands/search.py +224 -8
  40. mcp_vector_search/cli/commands/setup.py +1184 -0
  41. mcp_vector_search/cli/commands/status.py +339 -5
  42. mcp_vector_search/cli/commands/uninstall.py +276 -357
  43. mcp_vector_search/cli/commands/visualize/__init__.py +39 -0
  44. mcp_vector_search/cli/commands/visualize/cli.py +292 -0
  45. mcp_vector_search/cli/commands/visualize/exporters/__init__.py +12 -0
  46. mcp_vector_search/cli/commands/visualize/exporters/html_exporter.py +33 -0
  47. mcp_vector_search/cli/commands/visualize/exporters/json_exporter.py +33 -0
  48. mcp_vector_search/cli/commands/visualize/graph_builder.py +647 -0
  49. mcp_vector_search/cli/commands/visualize/layout_engine.py +469 -0
  50. mcp_vector_search/cli/commands/visualize/server.py +600 -0
  51. mcp_vector_search/cli/commands/visualize/state_manager.py +428 -0
  52. mcp_vector_search/cli/commands/visualize/templates/__init__.py +16 -0
  53. mcp_vector_search/cli/commands/visualize/templates/base.py +234 -0
  54. mcp_vector_search/cli/commands/visualize/templates/scripts.py +4542 -0
  55. mcp_vector_search/cli/commands/visualize/templates/styles.py +2522 -0
  56. mcp_vector_search/cli/didyoumean.py +27 -2
  57. mcp_vector_search/cli/main.py +127 -160
  58. mcp_vector_search/cli/output.py +158 -13
  59. mcp_vector_search/config/__init__.py +4 -0
  60. mcp_vector_search/config/default_thresholds.yaml +52 -0
  61. mcp_vector_search/config/settings.py +12 -0
  62. mcp_vector_search/config/thresholds.py +273 -0
  63. mcp_vector_search/core/__init__.py +16 -0
  64. mcp_vector_search/core/auto_indexer.py +3 -3
  65. mcp_vector_search/core/boilerplate.py +186 -0
  66. mcp_vector_search/core/config_utils.py +394 -0
  67. mcp_vector_search/core/database.py +406 -94
  68. mcp_vector_search/core/embeddings.py +24 -0
  69. mcp_vector_search/core/exceptions.py +11 -0
  70. mcp_vector_search/core/git.py +380 -0
  71. mcp_vector_search/core/git_hooks.py +4 -4
  72. mcp_vector_search/core/indexer.py +632 -54
  73. mcp_vector_search/core/llm_client.py +756 -0
  74. mcp_vector_search/core/models.py +91 -1
  75. mcp_vector_search/core/project.py +17 -0
  76. mcp_vector_search/core/relationships.py +473 -0
  77. mcp_vector_search/core/scheduler.py +11 -11
  78. mcp_vector_search/core/search.py +179 -29
  79. mcp_vector_search/mcp/server.py +819 -9
  80. mcp_vector_search/parsers/python.py +285 -5
  81. mcp_vector_search/utils/__init__.py +2 -0
  82. mcp_vector_search/utils/gitignore.py +0 -3
  83. mcp_vector_search/utils/gitignore_updater.py +212 -0
  84. mcp_vector_search/utils/monorepo.py +66 -4
  85. mcp_vector_search/utils/timing.py +10 -6
  86. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/METADATA +184 -53
  87. mcp_vector_search-1.1.22.dist-info/RECORD +120 -0
  88. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/WHEEL +1 -1
  89. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/entry_points.txt +1 -0
  90. mcp_vector_search/cli/commands/visualize.py +0 -1467
  91. mcp_vector_search-0.12.6.dist-info/RECORD +0 -68
  92. {mcp_vector_search-0.12.6.dist-info → mcp_vector_search-1.1.22.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,484 @@
1
+ """JSON exporter for structural code analysis results.
2
+
3
+ This module provides the JSONExporter class that converts analysis metrics
4
+ into the standardized JSON export format defined by schemas.py.
5
+
6
+ The exporter handles:
7
+ - Project-level metric aggregation
8
+ - File and function/class detail conversion
9
+ - Dependency graph construction
10
+ - Historical trend data integration
11
+ - Git-aware metadata generation
12
+
13
+ Example:
14
+ >>> from pathlib import Path
15
+ >>> from mcp_vector_search.analysis import ProjectMetrics
16
+ >>> from mcp_vector_search.analysis.visualizer import JSONExporter
17
+ >>>
18
+ >>> exporter = JSONExporter(project_root=Path("/path/to/project"))
19
+ >>> export = exporter.export(project_metrics)
20
+ >>> json_output = export.model_dump_json(indent=2)
21
+ >>>
22
+ >>> # Or export directly to file
23
+ >>> output_path = exporter.export_to_file(
24
+ ... project_metrics,
25
+ ... Path("analysis-results.json")
26
+ ... )
27
+ """
28
+
29
+ from __future__ import annotations
30
+
31
+ import subprocess
32
+ from collections import Counter
33
+ from datetime import datetime
34
+ from pathlib import Path
35
+ from typing import TYPE_CHECKING
36
+
37
+ from loguru import logger
38
+
39
+ from .schemas import (
40
+ AnalysisExport,
41
+ ClassMetrics,
42
+ CyclicDependency,
43
+ DependencyEdge,
44
+ DependencyGraph,
45
+ ExportMetadata,
46
+ FileDetail,
47
+ FunctionMetrics,
48
+ MetricsSummary,
49
+ SmellLocation,
50
+ TrendData,
51
+ )
52
+
53
+ if TYPE_CHECKING:
54
+ from ..metrics import ChunkMetrics, FileMetrics, ProjectMetrics
55
+ from ..storage.metrics_store import MetricsStore
56
+ from ..storage.trend_tracker import TrendTracker
57
+
58
+
59
+ class JSONExporter:
60
+ """Exports analysis results to JSON format using the defined schema.
61
+
62
+ This exporter converts internal metric dataclasses to the standardized
63
+ Pydantic-based export schema for consumption by visualization tools
64
+ and external analysis platforms.
65
+
66
+ Attributes:
67
+ project_root: Root directory of the analyzed project
68
+ metrics_store: Optional metrics store for historical data
69
+ trend_tracker: Optional trend tracker for regression analysis
70
+ """
71
+
72
+ def __init__(
73
+ self,
74
+ project_root: Path,
75
+ metrics_store: MetricsStore | None = None,
76
+ trend_tracker: TrendTracker | None = None,
77
+ ):
78
+ """Initialize JSON exporter.
79
+
80
+ Args:
81
+ project_root: Root directory of project being analyzed
82
+ metrics_store: Optional store for historical metrics snapshots
83
+ trend_tracker: Optional tracker for trend analysis
84
+ """
85
+ self.project_root = project_root
86
+ self.metrics_store = metrics_store
87
+ self.trend_tracker = trend_tracker
88
+
89
+ def export(
90
+ self,
91
+ project_metrics: ProjectMetrics,
92
+ include_trends: bool = True,
93
+ include_dependencies: bool = True,
94
+ ) -> AnalysisExport:
95
+ """Export project metrics to the JSON schema format.
96
+
97
+ Args:
98
+ project_metrics: Project-level metrics to export
99
+ include_trends: Whether to include historical trend data
100
+ include_dependencies: Whether to include dependency graph
101
+
102
+ Returns:
103
+ Complete analysis export in schema format
104
+ """
105
+ metadata = self._create_metadata()
106
+ summary = self._create_summary(project_metrics)
107
+ files = self._create_file_details(project_metrics)
108
+ dependencies = (
109
+ self._create_dependency_graph(project_metrics)
110
+ if include_dependencies
111
+ else DependencyGraph()
112
+ )
113
+ trends = (
114
+ self._create_trend_data() if include_trends and self.metrics_store else None
115
+ )
116
+
117
+ return AnalysisExport(
118
+ metadata=metadata,
119
+ summary=summary,
120
+ files=files,
121
+ dependencies=dependencies,
122
+ trends=trends,
123
+ )
124
+
125
+ def export_to_file(
126
+ self,
127
+ project_metrics: ProjectMetrics,
128
+ output_path: Path,
129
+ indent: int = 2,
130
+ **kwargs,
131
+ ) -> Path:
132
+ """Export to a JSON file.
133
+
134
+ Args:
135
+ project_metrics: Project metrics to export
136
+ output_path: Path where JSON file will be written
137
+ indent: Number of spaces for JSON indentation
138
+ **kwargs: Additional arguments passed to export()
139
+
140
+ Returns:
141
+ Path to the created JSON file
142
+ """
143
+ export = self.export(project_metrics, **kwargs)
144
+ output_path.parent.mkdir(parents=True, exist_ok=True)
145
+ output_path.write_text(export.model_dump_json(indent=indent))
146
+ logger.info(f"Exported analysis results to {output_path}")
147
+ return output_path
148
+
149
+ def _create_metadata(self) -> ExportMetadata:
150
+ """Create export metadata with version and git info.
151
+
152
+ Returns:
153
+ Metadata with tool version and git context
154
+ """
155
+ # Get version from package
156
+ from mcp_vector_search import __version__
157
+
158
+ # Get git info
159
+ git_commit, git_branch = self._get_git_info()
160
+
161
+ return ExportMetadata(
162
+ version="1.0.0", # Schema version
163
+ generated_at=datetime.now(),
164
+ tool_version=__version__,
165
+ project_root=str(self.project_root),
166
+ git_commit=git_commit,
167
+ git_branch=git_branch,
168
+ )
169
+
170
+ def _create_summary(self, project_metrics: ProjectMetrics) -> MetricsSummary:
171
+ """Create project-level summary from metrics.
172
+
173
+ Args:
174
+ project_metrics: Project metrics to summarize
175
+
176
+ Returns:
177
+ Aggregated summary statistics
178
+ """
179
+ # Get all files and chunks
180
+ all_files = list(project_metrics.files.values())
181
+ all_chunks = [chunk for file in all_files for chunk in file.chunks]
182
+
183
+ # Basic counts
184
+ total_files = len(all_files)
185
+ total_functions = sum(file.function_count for file in all_files)
186
+ total_classes = sum(file.class_count for file in all_files)
187
+ total_lines = sum(file.total_lines for file in all_files)
188
+
189
+ # Calculate complexity averages
190
+ avg_complexity = (
191
+ sum(chunk.cognitive_complexity for chunk in all_chunks) / len(all_chunks)
192
+ if all_chunks
193
+ else 0.0
194
+ )
195
+ avg_cognitive_complexity = avg_complexity # Same as above
196
+
197
+ avg_nesting_depth = (
198
+ sum(chunk.max_nesting_depth for chunk in all_chunks) / len(all_chunks)
199
+ if all_chunks
200
+ else 0.0
201
+ )
202
+
203
+ # Count smells by severity
204
+ smells_by_severity: dict[str, int] = Counter()
205
+ total_smells = 0
206
+
207
+ # Note: Smells are currently stored as string lists in ChunkMetrics.smells
208
+ # We'll need to parse them or use a smell detector to get severity
209
+ for chunk in all_chunks:
210
+ total_smells += len(chunk.smells)
211
+ # Default to 'warning' for now since we don't have severity info in ChunkMetrics
212
+ for _ in chunk.smells:
213
+ smells_by_severity["warning"] += 1
214
+
215
+ # Calculate coupling metrics
216
+ instabilities = [
217
+ file.coupling.instability
218
+ for file in all_files
219
+ if file.coupling.efferent_coupling + file.coupling.afferent_coupling > 0
220
+ ]
221
+ avg_instability = (
222
+ sum(instabilities) / len(instabilities) if instabilities else None
223
+ )
224
+
225
+ # Count circular dependencies
226
+ circular_dependencies = 0
227
+ # TODO: Get this from coupling collectors when available
228
+
229
+ # Halstead metrics (optional, from Phase 4)
230
+ halstead_volumes = [
231
+ chunk.halstead_volume
232
+ for chunk in all_chunks
233
+ if chunk.halstead_volume is not None
234
+ ]
235
+ avg_halstead_volume = (
236
+ sum(halstead_volumes) / len(halstead_volumes) if halstead_volumes else None
237
+ )
238
+
239
+ halstead_difficulties = [
240
+ chunk.halstead_difficulty
241
+ for chunk in all_chunks
242
+ if chunk.halstead_difficulty is not None
243
+ ]
244
+ avg_halstead_difficulty = (
245
+ sum(halstead_difficulties) / len(halstead_difficulties)
246
+ if halstead_difficulties
247
+ else None
248
+ )
249
+
250
+ # Technical debt estimation (optional)
251
+ # TODO: Calculate from DebtEstimator when available
252
+ estimated_debt_minutes = None
253
+
254
+ return MetricsSummary(
255
+ total_files=total_files,
256
+ total_functions=total_functions,
257
+ total_classes=total_classes,
258
+ total_lines=total_lines,
259
+ avg_complexity=avg_complexity,
260
+ avg_cognitive_complexity=avg_cognitive_complexity,
261
+ avg_nesting_depth=avg_nesting_depth,
262
+ total_smells=total_smells,
263
+ smells_by_severity=dict(smells_by_severity),
264
+ avg_instability=avg_instability,
265
+ circular_dependencies=circular_dependencies,
266
+ avg_halstead_volume=avg_halstead_volume,
267
+ avg_halstead_difficulty=avg_halstead_difficulty,
268
+ estimated_debt_minutes=estimated_debt_minutes,
269
+ )
270
+
271
+ def _create_file_details(self, project_metrics: ProjectMetrics) -> list[FileDetail]:
272
+ """Convert FileMetrics to FileDetail schema.
273
+
274
+ Args:
275
+ project_metrics: Project metrics containing file data
276
+
277
+ Returns:
278
+ List of file details in schema format
279
+ """
280
+ return [
281
+ self._convert_file(file_metrics)
282
+ for file_metrics in project_metrics.files.values()
283
+ ]
284
+
285
+ def _convert_file(self, file_metrics: FileMetrics) -> FileDetail:
286
+ """Convert a single FileMetrics to FileDetail.
287
+
288
+ Args:
289
+ file_metrics: File-level metrics to convert
290
+
291
+ Returns:
292
+ File detail in schema format
293
+ """
294
+ # Separate functions and classes (methods inside classes)
295
+ # For now, we'll treat all chunks as functions since ChunkMetrics doesn't distinguish
296
+ functions = []
297
+ classes = []
298
+
299
+ for chunk in file_metrics.chunks:
300
+ # TODO: Need to add chunk_type or similar field to distinguish
301
+ # For now, assume all are functions
302
+ func_metrics = self._convert_function(chunk, 0) # Line numbers TBD
303
+ functions.append(func_metrics)
304
+
305
+ # Convert smells from chunks
306
+ smells = []
307
+ for chunk in file_metrics.chunks:
308
+ for smell_name in chunk.smells:
309
+ smell = SmellLocation(
310
+ smell_type=smell_name,
311
+ severity="warning", # Default severity
312
+ message=f"Code smell detected: {smell_name}",
313
+ line=1, # TODO: Get actual line numbers from chunk
314
+ )
315
+ smells.append(smell)
316
+
317
+ # Calculate aggregated complexity
318
+ cyclomatic_complexity = sum(
319
+ chunk.cyclomatic_complexity for chunk in file_metrics.chunks
320
+ )
321
+ cognitive_complexity = sum(
322
+ chunk.cognitive_complexity for chunk in file_metrics.chunks
323
+ )
324
+ max_nesting_depth = (
325
+ max(chunk.max_nesting_depth for chunk in file_metrics.chunks)
326
+ if file_metrics.chunks
327
+ else 0
328
+ )
329
+
330
+ return FileDetail(
331
+ path=file_metrics.file_path,
332
+ language="python", # TODO: Detect language from file extension
333
+ lines_of_code=file_metrics.total_lines,
334
+ cyclomatic_complexity=cyclomatic_complexity,
335
+ cognitive_complexity=cognitive_complexity,
336
+ max_nesting_depth=max_nesting_depth,
337
+ function_count=file_metrics.function_count,
338
+ class_count=file_metrics.class_count,
339
+ efferent_coupling=file_metrics.coupling.efferent_coupling,
340
+ afferent_coupling=file_metrics.coupling.afferent_coupling,
341
+ instability=file_metrics.coupling.instability,
342
+ functions=functions,
343
+ classes=classes,
344
+ smells=smells,
345
+ imports=file_metrics.coupling.imports,
346
+ )
347
+
348
+ def _convert_function(
349
+ self, chunk_metrics: ChunkMetrics, line_start: int
350
+ ) -> FunctionMetrics:
351
+ """Convert a ChunkMetrics to FunctionMetrics.
352
+
353
+ Args:
354
+ chunk_metrics: Chunk-level metrics to convert
355
+ line_start: Starting line number of function
356
+
357
+ Returns:
358
+ Function metrics in schema format
359
+ """
360
+ # Estimate line_end from lines_of_code
361
+ line_end = line_start + chunk_metrics.lines_of_code
362
+
363
+ return FunctionMetrics(
364
+ name="function", # TODO: Get actual function name from chunk
365
+ line_start=max(1, line_start),
366
+ line_end=max(1, line_end),
367
+ cyclomatic_complexity=chunk_metrics.cyclomatic_complexity,
368
+ cognitive_complexity=chunk_metrics.cognitive_complexity,
369
+ nesting_depth=chunk_metrics.max_nesting_depth,
370
+ parameter_count=chunk_metrics.parameter_count,
371
+ lines_of_code=chunk_metrics.lines_of_code,
372
+ halstead_volume=chunk_metrics.halstead_volume,
373
+ halstead_difficulty=chunk_metrics.halstead_difficulty,
374
+ halstead_effort=chunk_metrics.halstead_effort,
375
+ )
376
+
377
+ def _convert_class(self, chunk_metrics: ChunkMetrics) -> ClassMetrics:
378
+ """Convert a ChunkMetrics to ClassMetrics.
379
+
380
+ Args:
381
+ chunk_metrics: Chunk-level metrics for class
382
+
383
+ Returns:
384
+ Class metrics in schema format
385
+ """
386
+ # TODO: Implement when we have proper class detection
387
+ return ClassMetrics(
388
+ name="Class",
389
+ line_start=1,
390
+ line_end=chunk_metrics.lines_of_code,
391
+ method_count=0,
392
+ lcom4=None,
393
+ methods=[],
394
+ )
395
+
396
+ def _create_dependency_graph(
397
+ self, project_metrics: ProjectMetrics
398
+ ) -> DependencyGraph:
399
+ """Create dependency graph from coupling data.
400
+
401
+ Args:
402
+ project_metrics: Project metrics with coupling information
403
+
404
+ Returns:
405
+ Dependency graph with edges and circular dependencies
406
+ """
407
+ edges: list[DependencyEdge] = []
408
+ all_files = list(project_metrics.files.values())
409
+
410
+ # Build edges from coupling data
411
+ for file_metrics in all_files:
412
+ source = file_metrics.file_path
413
+ for target in file_metrics.coupling.imports:
414
+ # Classify import type (simplified for now)
415
+ import_type = "import" # Could be "from_import" or "dynamic"
416
+ edges.append(
417
+ DependencyEdge(
418
+ source=source, target=target, import_type=import_type
419
+ )
420
+ )
421
+
422
+ # Calculate most depended on files (afferent coupling)
423
+ afferent_counts = [
424
+ (file.file_path, file.coupling.afferent_coupling) for file in all_files
425
+ ]
426
+ most_depended_on = sorted(afferent_counts, key=lambda x: x[1], reverse=True)[
427
+ :10
428
+ ]
429
+
430
+ # Calculate most dependent files (efferent coupling)
431
+ efferent_counts = [
432
+ (file.file_path, file.coupling.efferent_coupling) for file in all_files
433
+ ]
434
+ most_dependent = sorted(efferent_counts, key=lambda x: x[1], reverse=True)[:10]
435
+
436
+ # Circular dependencies (TODO: implement detection)
437
+ circular_dependencies: list[CyclicDependency] = []
438
+
439
+ return DependencyGraph(
440
+ edges=edges,
441
+ circular_dependencies=circular_dependencies,
442
+ most_depended_on=most_depended_on,
443
+ most_dependent=most_dependent,
444
+ )
445
+
446
+ def _create_trend_data(self) -> TrendData | None:
447
+ """Create trend data from metrics store.
448
+
449
+ Returns:
450
+ Historical trend data if available, None otherwise
451
+ """
452
+ if not self.metrics_store or not self.trend_tracker:
453
+ return None
454
+
455
+ # TODO: Implement trend data extraction from metrics store
456
+ # This will require querying historical snapshots and formatting as MetricTrends
457
+
458
+ return None
459
+
460
+ def _get_git_info(self) -> tuple[str | None, str | None]:
461
+ """Get current git commit and branch.
462
+
463
+ Returns:
464
+ Tuple of (commit_sha, branch_name) or (None, None) if not in git repo
465
+ """
466
+ try:
467
+ commit = subprocess.check_output(
468
+ ["git", "rev-parse", "HEAD"], # nosec B607
469
+ cwd=self.project_root,
470
+ stderr=subprocess.DEVNULL,
471
+ text=True,
472
+ ).strip()
473
+
474
+ branch = subprocess.check_output(
475
+ ["git", "rev-parse", "--abbrev-ref", "HEAD"], # nosec B607
476
+ cwd=self.project_root,
477
+ stderr=subprocess.DEVNULL,
478
+ text=True,
479
+ ).strip()
480
+
481
+ return commit, branch
482
+ except (subprocess.CalledProcessError, FileNotFoundError):
483
+ logger.debug("Git information not available")
484
+ return None, None