shotgun-sh 0.4.0.dev1__py3-none-any.whl → 0.6.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. shotgun/agents/agent_manager.py +307 -8
  2. shotgun/agents/cancellation.py +103 -0
  3. shotgun/agents/common.py +12 -0
  4. shotgun/agents/config/README.md +0 -1
  5. shotgun/agents/config/manager.py +10 -7
  6. shotgun/agents/config/models.py +5 -27
  7. shotgun/agents/config/provider.py +44 -27
  8. shotgun/agents/conversation/history/token_counting/base.py +51 -9
  9. shotgun/agents/file_read.py +176 -0
  10. shotgun/agents/messages.py +15 -3
  11. shotgun/agents/models.py +24 -1
  12. shotgun/agents/router/models.py +8 -0
  13. shotgun/agents/router/tools/delegation_tools.py +55 -1
  14. shotgun/agents/router/tools/plan_tools.py +88 -7
  15. shotgun/agents/runner.py +17 -2
  16. shotgun/agents/tools/__init__.py +8 -0
  17. shotgun/agents/tools/codebase/directory_lister.py +27 -39
  18. shotgun/agents/tools/codebase/file_read.py +26 -35
  19. shotgun/agents/tools/codebase/query_graph.py +9 -0
  20. shotgun/agents/tools/codebase/retrieve_code.py +9 -0
  21. shotgun/agents/tools/file_management.py +32 -2
  22. shotgun/agents/tools/file_read_tools/__init__.py +7 -0
  23. shotgun/agents/tools/file_read_tools/multimodal_file_read.py +167 -0
  24. shotgun/agents/tools/markdown_tools/__init__.py +62 -0
  25. shotgun/agents/tools/markdown_tools/insert_section.py +148 -0
  26. shotgun/agents/tools/markdown_tools/models.py +86 -0
  27. shotgun/agents/tools/markdown_tools/remove_section.py +114 -0
  28. shotgun/agents/tools/markdown_tools/replace_section.py +119 -0
  29. shotgun/agents/tools/markdown_tools/utils.py +453 -0
  30. shotgun/agents/tools/registry.py +44 -6
  31. shotgun/agents/tools/web_search/openai.py +42 -23
  32. shotgun/attachments/__init__.py +41 -0
  33. shotgun/attachments/errors.py +60 -0
  34. shotgun/attachments/models.py +107 -0
  35. shotgun/attachments/parser.py +257 -0
  36. shotgun/attachments/processor.py +193 -0
  37. shotgun/build_constants.py +4 -7
  38. shotgun/cli/clear.py +2 -2
  39. shotgun/cli/codebase/commands.py +181 -65
  40. shotgun/cli/compact.py +2 -2
  41. shotgun/cli/context.py +2 -2
  42. shotgun/cli/error_handler.py +2 -2
  43. shotgun/cli/run.py +90 -0
  44. shotgun/cli/spec/backup.py +2 -1
  45. shotgun/codebase/__init__.py +2 -0
  46. shotgun/codebase/benchmarks/__init__.py +35 -0
  47. shotgun/codebase/benchmarks/benchmark_runner.py +309 -0
  48. shotgun/codebase/benchmarks/exporters.py +119 -0
  49. shotgun/codebase/benchmarks/formatters/__init__.py +49 -0
  50. shotgun/codebase/benchmarks/formatters/base.py +34 -0
  51. shotgun/codebase/benchmarks/formatters/json_formatter.py +106 -0
  52. shotgun/codebase/benchmarks/formatters/markdown.py +136 -0
  53. shotgun/codebase/benchmarks/models.py +129 -0
  54. shotgun/codebase/core/__init__.py +4 -0
  55. shotgun/codebase/core/call_resolution.py +91 -0
  56. shotgun/codebase/core/change_detector.py +11 -6
  57. shotgun/codebase/core/errors.py +159 -0
  58. shotgun/codebase/core/extractors/__init__.py +23 -0
  59. shotgun/codebase/core/extractors/base.py +138 -0
  60. shotgun/codebase/core/extractors/factory.py +63 -0
  61. shotgun/codebase/core/extractors/go/__init__.py +7 -0
  62. shotgun/codebase/core/extractors/go/extractor.py +122 -0
  63. shotgun/codebase/core/extractors/javascript/__init__.py +7 -0
  64. shotgun/codebase/core/extractors/javascript/extractor.py +132 -0
  65. shotgun/codebase/core/extractors/protocol.py +109 -0
  66. shotgun/codebase/core/extractors/python/__init__.py +7 -0
  67. shotgun/codebase/core/extractors/python/extractor.py +141 -0
  68. shotgun/codebase/core/extractors/rust/__init__.py +7 -0
  69. shotgun/codebase/core/extractors/rust/extractor.py +139 -0
  70. shotgun/codebase/core/extractors/types.py +15 -0
  71. shotgun/codebase/core/extractors/typescript/__init__.py +7 -0
  72. shotgun/codebase/core/extractors/typescript/extractor.py +92 -0
  73. shotgun/codebase/core/gitignore.py +252 -0
  74. shotgun/codebase/core/ingestor.py +644 -354
  75. shotgun/codebase/core/kuzu_compat.py +119 -0
  76. shotgun/codebase/core/language_config.py +239 -0
  77. shotgun/codebase/core/manager.py +256 -46
  78. shotgun/codebase/core/metrics_collector.py +310 -0
  79. shotgun/codebase/core/metrics_types.py +347 -0
  80. shotgun/codebase/core/parallel_executor.py +424 -0
  81. shotgun/codebase/core/work_distributor.py +254 -0
  82. shotgun/codebase/core/worker.py +768 -0
  83. shotgun/codebase/indexing_state.py +86 -0
  84. shotgun/codebase/models.py +94 -0
  85. shotgun/codebase/service.py +13 -0
  86. shotgun/exceptions.py +9 -9
  87. shotgun/main.py +3 -16
  88. shotgun/posthog_telemetry.py +165 -24
  89. shotgun/prompts/agents/file_read.j2 +48 -0
  90. shotgun/prompts/agents/partials/common_agent_system_prompt.j2 +19 -47
  91. shotgun/prompts/agents/partials/content_formatting.j2 +12 -33
  92. shotgun/prompts/agents/partials/interactive_mode.j2 +9 -32
  93. shotgun/prompts/agents/partials/router_delegation_mode.j2 +21 -22
  94. shotgun/prompts/agents/plan.j2 +14 -0
  95. shotgun/prompts/agents/router.j2 +531 -258
  96. shotgun/prompts/agents/specify.j2 +14 -0
  97. shotgun/prompts/agents/state/codebase/codebase_graphs_available.j2 +14 -1
  98. shotgun/prompts/agents/state/system_state.j2 +13 -11
  99. shotgun/prompts/agents/tasks.j2 +14 -0
  100. shotgun/settings.py +49 -10
  101. shotgun/tui/app.py +149 -18
  102. shotgun/tui/commands/__init__.py +9 -1
  103. shotgun/tui/components/attachment_bar.py +87 -0
  104. shotgun/tui/components/prompt_input.py +25 -28
  105. shotgun/tui/components/status_bar.py +14 -7
  106. shotgun/tui/dependencies.py +3 -8
  107. shotgun/tui/protocols.py +18 -0
  108. shotgun/tui/screens/chat/chat.tcss +15 -0
  109. shotgun/tui/screens/chat/chat_screen.py +766 -235
  110. shotgun/tui/screens/chat/codebase_index_prompt_screen.py +8 -4
  111. shotgun/tui/screens/chat_screen/attachment_hint.py +40 -0
  112. shotgun/tui/screens/chat_screen/command_providers.py +0 -10
  113. shotgun/tui/screens/chat_screen/history/chat_history.py +54 -14
  114. shotgun/tui/screens/chat_screen/history/formatters.py +22 -0
  115. shotgun/tui/screens/chat_screen/history/user_question.py +25 -3
  116. shotgun/tui/screens/database_locked_dialog.py +219 -0
  117. shotgun/tui/screens/database_timeout_dialog.py +158 -0
  118. shotgun/tui/screens/kuzu_error_dialog.py +135 -0
  119. shotgun/tui/screens/model_picker.py +1 -3
  120. shotgun/tui/screens/models.py +11 -0
  121. shotgun/tui/state/processing_state.py +19 -0
  122. shotgun/tui/widgets/widget_coordinator.py +18 -0
  123. shotgun/utils/file_system_utils.py +4 -1
  124. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/METADATA +87 -34
  125. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/RECORD +128 -79
  126. shotgun/cli/export.py +0 -81
  127. shotgun/cli/plan.py +0 -73
  128. shotgun/cli/research.py +0 -93
  129. shotgun/cli/specify.py +0 -70
  130. shotgun/cli/tasks.py +0 -78
  131. shotgun/sentry_telemetry.py +0 -232
  132. shotgun/tui/screens/onboarding.py +0 -584
  133. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/WHEEL +0 -0
  134. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/entry_points.txt +0 -0
  135. {shotgun_sh-0.4.0.dev1.dist-info → shotgun_sh-0.6.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,309 @@
1
+ """Benchmark runner for codebase indexing performance analysis.
2
+
3
+ This module provides the BenchmarkRunner class for running benchmark iterations
4
+ and collecting performance statistics.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import gc
10
+ import hashlib
11
+ import json
12
+ import shutil
13
+ import time
14
+ from collections.abc import Callable
15
+ from pathlib import Path
16
+ from typing import Any
17
+
18
+ from shotgun.codebase.benchmarks.models import (
19
+ BenchmarkConfig,
20
+ BenchmarkMode,
21
+ BenchmarkResults,
22
+ BenchmarkRun,
23
+ )
24
+ from shotgun.codebase.core import Ingestor, SimpleGraphBuilder
25
+ from shotgun.codebase.core.kuzu_compat import get_kuzu
26
+ from shotgun.codebase.core.metrics_collector import MetricsCollector
27
+ from shotgun.codebase.core.parser_loader import load_parsers
28
+ from shotgun.logging_config import get_logger
29
+ from shotgun.sdk.services import get_codebase_service
30
+ from shotgun.utils.file_system_utils import get_shotgun_home
31
+
32
+ logger = get_logger(__name__)
33
+
34
+
35
+ def _compute_graph_id(codebase_path: Path) -> str:
36
+ """Compute a unique graph ID from the codebase path.
37
+
38
+ Args:
39
+ codebase_path: Path to the codebase
40
+
41
+ Returns:
42
+ A 12-character hex string identifying this codebase
43
+ """
44
+ return hashlib.sha256(str(codebase_path).encode()).hexdigest()[:12]
45
+
46
+
47
+ class BenchmarkRunner:
48
+ """Runs benchmark iterations and collects statistics."""
49
+
50
+ def __init__(
51
+ self,
52
+ codebase_path: Path,
53
+ codebase_name: str,
54
+ iterations: int = 1,
55
+ warmup_iterations: int = 0,
56
+ parallel: bool = True,
57
+ worker_count: int | None = None,
58
+ collect_file_metrics: bool = True,
59
+ collect_worker_metrics: bool = True,
60
+ progress_callback: Callable[..., Any] | None = None,
61
+ ) -> None:
62
+ """Initialize benchmark runner.
63
+
64
+ Args:
65
+ codebase_path: Path to repository to benchmark
66
+ codebase_name: Human-readable name for the codebase
67
+ iterations: Number of measured benchmark runs
68
+ warmup_iterations: Number of warmup runs (not measured)
69
+ parallel: Whether to use parallel execution
70
+ worker_count: Number of workers (None = auto)
71
+ collect_file_metrics: Whether to collect per-file metrics
72
+ collect_worker_metrics: Whether to collect per-worker metrics
73
+ progress_callback: Optional callback for progress updates
74
+ """
75
+ self.codebase_path = codebase_path.resolve()
76
+ self.codebase_name = codebase_name
77
+ self.iterations = iterations
78
+ self.warmup_iterations = warmup_iterations
79
+ self.parallel = parallel
80
+ self.worker_count = worker_count
81
+ self.collect_file_metrics = collect_file_metrics
82
+ self.collect_worker_metrics = collect_worker_metrics
83
+ self.progress_callback = progress_callback
84
+
85
+ # Configuration object
86
+ self.config = BenchmarkConfig(
87
+ mode=BenchmarkMode.PARALLEL if parallel else BenchmarkMode.SEQUENTIAL,
88
+ worker_count=worker_count,
89
+ iterations=iterations,
90
+ warmup_iterations=warmup_iterations,
91
+ collect_file_metrics=collect_file_metrics,
92
+ collect_worker_metrics=collect_worker_metrics,
93
+ )
94
+
95
+ # Storage for database operations
96
+ self._storage_dir = get_shotgun_home() / "codebases"
97
+ self._service = get_codebase_service(self._storage_dir)
98
+
99
+ async def run(self) -> BenchmarkResults:
100
+ """Run all benchmark iterations and return aggregated results.
101
+
102
+ Returns:
103
+ BenchmarkResults with all run data and statistics
104
+ """
105
+ results = BenchmarkResults(
106
+ codebase_name=self.codebase_name,
107
+ codebase_path=str(self.codebase_path),
108
+ config=self.config,
109
+ )
110
+
111
+ # Run warmup iterations
112
+ for i in range(self.warmup_iterations):
113
+ logger.info(f"Running warmup iteration {i + 1}/{self.warmup_iterations}...")
114
+ if self.progress_callback:
115
+ self.progress_callback(
116
+ f"Warmup {i + 1}/{self.warmup_iterations}", None, None
117
+ )
118
+
119
+ run = await self._run_single_iteration(
120
+ run_id=i,
121
+ is_warmup=True,
122
+ )
123
+ results.add_run(run)
124
+ await self._cleanup_database()
125
+
126
+ # Run measured iterations
127
+ for i in range(self.iterations):
128
+ logger.info(f"Running benchmark iteration {i + 1}/{self.iterations}...")
129
+ if self.progress_callback:
130
+ self.progress_callback(
131
+ f"Benchmark {i + 1}/{self.iterations}", None, None
132
+ )
133
+
134
+ run = await self._run_single_iteration(
135
+ run_id=i,
136
+ is_warmup=False,
137
+ )
138
+ results.add_run(run)
139
+
140
+ # Clean up between iterations (but not after the last one)
141
+ if i < self.iterations - 1:
142
+ await self._cleanup_database()
143
+
144
+ # Register the codebase so it persists after benchmark
145
+ await self._register_codebase()
146
+
147
+ # Calculate statistics
148
+ results.calculate_statistics()
149
+
150
+ logger.info(
151
+ f"Benchmark complete: {self.iterations} iterations, "
152
+ f"avg {results.avg_duration_seconds:.2f}s"
153
+ )
154
+
155
+ return results
156
+
157
+ async def _run_single_iteration(
158
+ self,
159
+ run_id: int,
160
+ is_warmup: bool,
161
+ ) -> BenchmarkRun:
162
+ """Run a single benchmark iteration.
163
+
164
+ Args:
165
+ run_id: Run number
166
+ is_warmup: Whether this is a warmup run
167
+
168
+ Returns:
169
+ BenchmarkRun with collected metrics
170
+ """
171
+ # Create metrics collector
172
+ metrics_collector = MetricsCollector(
173
+ codebase_name=self.codebase_name,
174
+ collect_file_metrics=self.collect_file_metrics,
175
+ collect_worker_metrics=self.collect_worker_metrics,
176
+ )
177
+
178
+ # Generate unique graph ID for this run
179
+ graph_id = _compute_graph_id(self.codebase_path)
180
+
181
+ # Create database
182
+ kuzu = get_kuzu()
183
+ graph_path = self._storage_dir / f"{graph_id}.kuzu"
184
+
185
+ # Ensure clean state
186
+ if graph_path.exists():
187
+ if graph_path.is_dir():
188
+ shutil.rmtree(graph_path)
189
+ else:
190
+ graph_path.unlink()
191
+ wal_path = self._storage_dir / f"{graph_id}.kuzu.wal"
192
+ if wal_path.exists():
193
+ wal_path.unlink()
194
+
195
+ # Create database and connection
196
+ db = kuzu.Database(str(graph_path))
197
+ conn = kuzu.Connection(db)
198
+
199
+ # Load parsers
200
+ parsers, queries = load_parsers()
201
+
202
+ # Create ingestor and builder
203
+ ingestor = Ingestor(conn)
204
+ ingestor.create_schema()
205
+
206
+ builder = SimpleGraphBuilder(
207
+ ingestor=ingestor,
208
+ repo_path=self.codebase_path,
209
+ parsers=parsers,
210
+ queries=queries,
211
+ metrics_collector=metrics_collector,
212
+ enable_parallel=self.parallel,
213
+ progress_callback=None, # Disable TUI progress in benchmark mode
214
+ )
215
+
216
+ # Run indexing
217
+ await builder.run()
218
+
219
+ # Get metrics
220
+ metrics = metrics_collector.get_metrics()
221
+
222
+ # Close connection
223
+ del conn
224
+ del db
225
+
226
+ return BenchmarkRun(
227
+ run_id=run_id,
228
+ is_warmup=is_warmup,
229
+ metrics=metrics,
230
+ )
231
+
232
+ async def _cleanup_database(self) -> None:
233
+ """Delete database files and clear caches between runs."""
234
+ graph_id = _compute_graph_id(self.codebase_path)
235
+
236
+ # Delete database file
237
+ graph_path = self._storage_dir / f"{graph_id}.kuzu"
238
+ if graph_path.exists():
239
+ if graph_path.is_dir():
240
+ shutil.rmtree(graph_path)
241
+ else:
242
+ graph_path.unlink()
243
+ logger.debug(f"Deleted database: {graph_path}")
244
+
245
+ # Delete WAL file
246
+ wal_path = self._storage_dir / f"{graph_id}.kuzu.wal"
247
+ if wal_path.exists():
248
+ wal_path.unlink()
249
+ logger.debug(f"Deleted WAL: {wal_path}")
250
+
251
+ # Force garbage collection
252
+ gc.collect()
253
+
254
+ async def _register_codebase(self) -> None:
255
+ """Register the codebase so it appears in `shotgun codebase list`.
256
+
257
+ This creates a Project node in the database with metadata that
258
+ identifies the indexed codebase.
259
+ """
260
+ graph_id = _compute_graph_id(self.codebase_path)
261
+ graph_path = self._storage_dir / f"{graph_id}.kuzu"
262
+
263
+ if not graph_path.exists():
264
+ logger.warning("Cannot register codebase: database not found")
265
+ return
266
+
267
+ kuzu = get_kuzu()
268
+ db = kuzu.Database(str(graph_path))
269
+ conn = kuzu.Connection(db)
270
+
271
+ try:
272
+ # Check if Project node already exists
273
+ result = conn.execute("MATCH (p:Project) RETURN p.graph_id LIMIT 1")
274
+ if result.has_next():
275
+ logger.debug("Project node already exists, skipping registration")
276
+ return
277
+
278
+ # Create Project node with metadata
279
+ current_time = int(time.time())
280
+ conn.execute(
281
+ """
282
+ CREATE (p:Project {
283
+ name: $name,
284
+ repo_path: $repo_path,
285
+ graph_id: $graph_id,
286
+ created_at: $created_at,
287
+ updated_at: $updated_at,
288
+ schema_version: $schema_version,
289
+ build_options: $build_options,
290
+ indexed_from_cwds: $indexed_from_cwds
291
+ })
292
+ """,
293
+ {
294
+ "name": self.codebase_name,
295
+ "repo_path": str(self.codebase_path),
296
+ "graph_id": graph_id,
297
+ "created_at": current_time,
298
+ "updated_at": current_time,
299
+ "schema_version": "1.0.0",
300
+ "build_options": json.dumps({}),
301
+ "indexed_from_cwds": json.dumps([str(Path.cwd())]),
302
+ },
303
+ )
304
+ logger.info(
305
+ f"Registered codebase '{self.codebase_name}' with graph_id: {graph_id}"
306
+ )
307
+ finally:
308
+ del conn
309
+ del db
@@ -0,0 +1,119 @@
1
+ """Metrics exporters for saving benchmark results to files.
2
+
3
+ This module provides the MetricsExporter class for exporting benchmark
4
+ results to various file formats.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from pathlib import Path
10
+ from typing import TYPE_CHECKING
11
+
12
+ from shotgun.codebase.benchmarks.formatters import (
13
+ JsonFormatter,
14
+ MarkdownFormatter,
15
+ MetricsDisplayOptions,
16
+ )
17
+ from shotgun.logging_config import get_logger
18
+
19
+ if TYPE_CHECKING:
20
+ from shotgun.codebase.benchmarks.models import BenchmarkResults
21
+
22
+ logger = get_logger(__name__)
23
+
24
+
25
+ class MetricsExporter:
26
+ """Export benchmark metrics to files."""
27
+
28
+ def __init__(self) -> None:
29
+ """Initialize metrics exporter."""
30
+ self._format_map = {
31
+ ".json": self._export_json,
32
+ ".md": self._export_markdown,
33
+ ".markdown": self._export_markdown,
34
+ }
35
+
36
+ def export(
37
+ self,
38
+ results: BenchmarkResults,
39
+ filepath: Path | str,
40
+ format: str | None = None,
41
+ options: MetricsDisplayOptions | None = None,
42
+ ) -> None:
43
+ """Export benchmark results to file.
44
+
45
+ The format is auto-detected from the file extension if not specified.
46
+
47
+ Args:
48
+ results: Benchmark results to export
49
+ filepath: Path to export file
50
+ format: Optional format override ("json", "markdown")
51
+ options: Display options for controlling what to include
52
+
53
+ Raises:
54
+ ValueError: If format cannot be determined or is unsupported
55
+ OSError: If file cannot be written
56
+ """
57
+ filepath = Path(filepath)
58
+ options = options or MetricsDisplayOptions()
59
+
60
+ # Determine format
61
+ if format:
62
+ format_lower = format.lower()
63
+ if format_lower == "json":
64
+ export_func = self._export_json
65
+ elif format_lower in ("markdown", "md"):
66
+ export_func = self._export_markdown
67
+ else:
68
+ raise ValueError(f"Unknown export format: {format}")
69
+ else:
70
+ # Auto-detect from extension
71
+ suffix = filepath.suffix.lower()
72
+ if suffix not in self._format_map:
73
+ raise ValueError(
74
+ f"Cannot determine format from extension '{suffix}'. "
75
+ f"Supported extensions: {', '.join(self._format_map.keys())}. "
76
+ f"Or specify format explicitly."
77
+ )
78
+ export_func = self._format_map[suffix]
79
+
80
+ # Ensure parent directory exists
81
+ filepath.parent.mkdir(parents=True, exist_ok=True)
82
+
83
+ # Export
84
+ export_func(results, filepath, options)
85
+ logger.info(f"Exported benchmark results to {filepath}")
86
+
87
+ def _export_json(
88
+ self,
89
+ results: BenchmarkResults,
90
+ filepath: Path,
91
+ options: MetricsDisplayOptions,
92
+ ) -> None:
93
+ """Export results to JSON file.
94
+
95
+ Args:
96
+ results: Benchmark results
97
+ filepath: Output path
98
+ options: Display options
99
+ """
100
+ formatter = JsonFormatter()
101
+ content = formatter.format_results(results, options)
102
+ filepath.write_text(content)
103
+
104
+ def _export_markdown(
105
+ self,
106
+ results: BenchmarkResults,
107
+ filepath: Path,
108
+ options: MetricsDisplayOptions,
109
+ ) -> None:
110
+ """Export results to Markdown file.
111
+
112
+ Args:
113
+ results: Benchmark results
114
+ filepath: Output path
115
+ options: Display options
116
+ """
117
+ formatter = MarkdownFormatter()
118
+ content = formatter.format_results(results, options)
119
+ filepath.write_text(content)
@@ -0,0 +1,49 @@
1
+ """Result formatters for benchmark output.
2
+
3
+ This package provides formatters for displaying benchmark results in various
4
+ formats: JSON and Markdown.
5
+ """
6
+
7
+ from shotgun.codebase.benchmarks.formatters.base import ResultFormatter
8
+ from shotgun.codebase.benchmarks.formatters.json_formatter import JsonFormatter
9
+ from shotgun.codebase.benchmarks.formatters.markdown import MarkdownFormatter
10
+
11
+ # Re-export MetricsDisplayOptions from models for convenience
12
+ from shotgun.codebase.benchmarks.models import MetricsDisplayOptions
13
+
14
+ __all__ = [
15
+ "JsonFormatter",
16
+ "MarkdownFormatter",
17
+ "MetricsDisplayOptions",
18
+ "ResultFormatter",
19
+ "get_formatter",
20
+ ]
21
+
22
+
23
+ def get_formatter(
24
+ output_format: str,
25
+ ) -> JsonFormatter | MarkdownFormatter:
26
+ """Get appropriate formatter for output format.
27
+
28
+ Args:
29
+ output_format: Format name - "json" or "markdown"
30
+
31
+ Returns:
32
+ Formatter instance
33
+
34
+ Raises:
35
+ ValueError: If output format is unknown
36
+ """
37
+ formatters: dict[str, type[JsonFormatter | MarkdownFormatter]] = {
38
+ "json": JsonFormatter,
39
+ "markdown": MarkdownFormatter,
40
+ }
41
+
42
+ format_lower = output_format.lower()
43
+ if format_lower not in formatters:
44
+ raise ValueError(
45
+ f"Unknown output format: {output_format}. "
46
+ f"Valid formats: {', '.join(formatters.keys())}"
47
+ )
48
+
49
+ return formatters[format_lower]()
@@ -0,0 +1,34 @@
1
+ """Base classes and protocols for formatters.
2
+
3
+ This module provides the Protocol interface that all formatters implement.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import TYPE_CHECKING, Protocol
9
+
10
+ if TYPE_CHECKING:
11
+ from shotgun.codebase.benchmarks.models import (
12
+ BenchmarkResults,
13
+ MetricsDisplayOptions,
14
+ )
15
+
16
+
17
+ class ResultFormatter(Protocol):
18
+ """Protocol for formatting benchmark results."""
19
+
20
+ def format_results(
21
+ self,
22
+ results: BenchmarkResults,
23
+ options: MetricsDisplayOptions,
24
+ ) -> str:
25
+ """Format benchmark results for display.
26
+
27
+ Args:
28
+ results: Benchmark results to format
29
+ options: Display options
30
+
31
+ Returns:
32
+ Formatted string ready for output
33
+ """
34
+ ...
@@ -0,0 +1,106 @@
1
+ """JSON formatter for benchmark results.
2
+
3
+ This module provides the JsonFormatter class for displaying benchmark results
4
+ as JSON.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ from typing import TYPE_CHECKING
11
+
12
+ if TYPE_CHECKING:
13
+ from shotgun.codebase.benchmarks.models import (
14
+ BenchmarkResults,
15
+ MetricsDisplayOptions,
16
+ )
17
+
18
+
19
+ class JsonFormatter:
20
+ """Format benchmark results as JSON."""
21
+
22
+ def format_results(
23
+ self,
24
+ results: BenchmarkResults,
25
+ options: MetricsDisplayOptions,
26
+ ) -> str:
27
+ """Format benchmark results as JSON.
28
+
29
+ Args:
30
+ results: Benchmark results to format
31
+ options: Display options
32
+
33
+ Returns:
34
+ JSON string
35
+ """
36
+ data = {
37
+ "codebase_name": results.codebase_name,
38
+ "codebase_path": results.codebase_path,
39
+ "config": {
40
+ "mode": results.config.mode,
41
+ "worker_count": results.config.worker_count,
42
+ "iterations": results.config.iterations,
43
+ "warmup_iterations": results.config.warmup_iterations,
44
+ },
45
+ "statistics": {
46
+ "avg_duration_seconds": results.avg_duration_seconds,
47
+ "min_duration_seconds": results.min_duration_seconds,
48
+ "max_duration_seconds": results.max_duration_seconds,
49
+ "std_dev_seconds": results.std_dev_seconds,
50
+ "avg_throughput": results.avg_throughput,
51
+ "avg_memory_mb": results.avg_memory_mb,
52
+ "speedup_factor": results.speedup_factor,
53
+ "efficiency": results.efficiency,
54
+ },
55
+ "runs": [],
56
+ }
57
+
58
+ # Add run data
59
+ for run in results.measured_runs:
60
+ run_data: dict[str, object] = {
61
+ "run_id": run.run_id,
62
+ "duration_seconds": run.metrics.total_duration_seconds,
63
+ "total_files": run.metrics.total_files,
64
+ "total_nodes": run.metrics.total_nodes,
65
+ "total_relationships": run.metrics.total_relationships,
66
+ "throughput": run.metrics.avg_throughput,
67
+ "peak_memory_mb": run.metrics.peak_memory_mb,
68
+ }
69
+
70
+ # Add phase metrics
71
+ if options.show_phase_metrics:
72
+ phase_data: dict[str, dict[str, object]] = {}
73
+ for name, phase in run.metrics.phase_metrics.items():
74
+ phase_data[name] = {
75
+ "duration_seconds": phase.duration_seconds,
76
+ "items_processed": phase.items_processed,
77
+ "throughput": phase.throughput,
78
+ "memory_mb": phase.memory_mb,
79
+ }
80
+ run_data["phase_metrics"] = phase_data
81
+
82
+ # Add file metrics
83
+ if options.show_file_metrics and run.metrics.file_metrics:
84
+ file_metrics_list = run.metrics.file_metrics
85
+ if options.top_n_files:
86
+ file_metrics_list = sorted(
87
+ file_metrics_list,
88
+ key=lambda f: f.parse_time_ms,
89
+ reverse=True,
90
+ )[: options.top_n_files]
91
+
92
+ run_data["file_metrics"] = [
93
+ {
94
+ "file_path": f.file_path,
95
+ "language": f.language,
96
+ "file_size_bytes": f.file_size_bytes,
97
+ "parse_time_ms": f.parse_time_ms,
98
+ "definitions_extracted": f.definitions_extracted,
99
+ }
100
+ for f in file_metrics_list
101
+ ]
102
+
103
+ runs_list: list[dict[str, object]] = data["runs"] # type: ignore[assignment]
104
+ runs_list.append(run_data)
105
+
106
+ return json.dumps(data, indent=2)