codebase-intel 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codebase_intel/__init__.py +3 -0
- codebase_intel/analytics/__init__.py +1 -0
- codebase_intel/analytics/benchmark.py +406 -0
- codebase_intel/analytics/feedback.py +496 -0
- codebase_intel/analytics/tracker.py +439 -0
- codebase_intel/cli/__init__.py +1 -0
- codebase_intel/cli/main.py +740 -0
- codebase_intel/contracts/__init__.py +1 -0
- codebase_intel/contracts/auto_generator.py +438 -0
- codebase_intel/contracts/evaluator.py +531 -0
- codebase_intel/contracts/models.py +433 -0
- codebase_intel/contracts/registry.py +225 -0
- codebase_intel/core/__init__.py +1 -0
- codebase_intel/core/config.py +248 -0
- codebase_intel/core/exceptions.py +454 -0
- codebase_intel/core/types.py +375 -0
- codebase_intel/decisions/__init__.py +1 -0
- codebase_intel/decisions/miner.py +297 -0
- codebase_intel/decisions/models.py +302 -0
- codebase_intel/decisions/store.py +411 -0
- codebase_intel/drift/__init__.py +1 -0
- codebase_intel/drift/detector.py +443 -0
- codebase_intel/graph/__init__.py +1 -0
- codebase_intel/graph/builder.py +391 -0
- codebase_intel/graph/parser.py +1232 -0
- codebase_intel/graph/query.py +377 -0
- codebase_intel/graph/storage.py +736 -0
- codebase_intel/mcp/__init__.py +1 -0
- codebase_intel/mcp/server.py +710 -0
- codebase_intel/orchestrator/__init__.py +1 -0
- codebase_intel/orchestrator/assembler.py +649 -0
- codebase_intel-0.1.0.dist-info/METADATA +361 -0
- codebase_intel-0.1.0.dist-info/RECORD +36 -0
- codebase_intel-0.1.0.dist-info/WHEEL +4 -0
- codebase_intel-0.1.0.dist-info/entry_points.txt +2 -0
- codebase_intel-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,740 @@
|
|
|
1
|
+
"""CLI entry point — the developer-facing interface.
|
|
2
|
+
|
|
3
|
+
Commands:
|
|
4
|
+
- init: Initialize codebase-intel for a project
|
|
5
|
+
- analyze: Build or update the code graph
|
|
6
|
+
- query: Query the graph, decisions, or contracts
|
|
7
|
+
- drift: Run drift detection
|
|
8
|
+
- mine: Mine git history for decision candidates
|
|
9
|
+
- serve: Start the MCP server
|
|
10
|
+
- status: Show component health
|
|
11
|
+
|
|
12
|
+
Design:
|
|
13
|
+
- Uses Typer for declarative CLI definition
|
|
14
|
+
- Rich for pretty output (tables, panels, progress bars)
|
|
15
|
+
- All heavy operations are async (run via asyncio.run)
|
|
16
|
+
- Commands degrade gracefully if components aren't initialized
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
from __future__ import annotations
|
|
20
|
+
|
|
21
|
+
import asyncio
|
|
22
|
+
import logging
|
|
23
|
+
import sys
|
|
24
|
+
from pathlib import Path
|
|
25
|
+
from typing import Annotated, Optional
|
|
26
|
+
|
|
27
|
+
import typer
|
|
28
|
+
from rich.console import Console
|
|
29
|
+
from rich.panel import Panel
|
|
30
|
+
from rich.progress import Progress, SpinnerColumn, TextColumn
|
|
31
|
+
from rich.table import Table
|
|
32
|
+
|
|
33
|
+
app = typer.Typer(
|
|
34
|
+
name="codebase-intel",
|
|
35
|
+
help="Codebase Intelligence Platform — structured context for AI coding agents",
|
|
36
|
+
no_args_is_help=True,
|
|
37
|
+
)
|
|
38
|
+
console = Console()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _setup_logging(verbose: bool) -> None:
|
|
42
|
+
level = logging.DEBUG if verbose else logging.INFO
|
|
43
|
+
logging.basicConfig(
|
|
44
|
+
level=level,
|
|
45
|
+
format="%(levelname)s %(name)s: %(message)s",
|
|
46
|
+
handlers=[logging.StreamHandler()],
|
|
47
|
+
)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# -------------------------------------------------------------------
|
|
51
|
+
# init
|
|
52
|
+
# -------------------------------------------------------------------
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
@app.command()
|
|
56
|
+
def init(
|
|
57
|
+
path: Annotated[
|
|
58
|
+
Path,
|
|
59
|
+
typer.Argument(help="Project root directory"),
|
|
60
|
+
] = Path("."),
|
|
61
|
+
verbose: bool = typer.Option(False, "--verbose", "-v"),
|
|
62
|
+
) -> None:
|
|
63
|
+
"""Initialize codebase-intel for a project.
|
|
64
|
+
|
|
65
|
+
Creates the .codebase-intel directory, builds the initial code graph,
|
|
66
|
+
and generates starter configuration and contract templates.
|
|
67
|
+
"""
|
|
68
|
+
_setup_logging(verbose)
|
|
69
|
+
project_root = path.resolve()
|
|
70
|
+
|
|
71
|
+
if not project_root.is_dir():
|
|
72
|
+
console.print(f"[red]Not a directory: {project_root}[/red]")
|
|
73
|
+
raise typer.Exit(1)
|
|
74
|
+
|
|
75
|
+
console.print(Panel(
|
|
76
|
+
f"Initializing codebase-intel for [bold]{project_root.name}[/bold]",
|
|
77
|
+
title="codebase-intel init",
|
|
78
|
+
))
|
|
79
|
+
|
|
80
|
+
asyncio.run(_init_async(project_root, verbose))
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
async def _init_async(project_root: Path, verbose: bool) -> None:
|
|
84
|
+
from codebase_intel.core.config import ProjectConfig
|
|
85
|
+
|
|
86
|
+
# Create config
|
|
87
|
+
config = ProjectConfig(project_root=project_root)
|
|
88
|
+
config.ensure_dirs()
|
|
89
|
+
|
|
90
|
+
# Save default config
|
|
91
|
+
import yaml
|
|
92
|
+
config_file = project_root / ".codebase-intel" / "config.yaml"
|
|
93
|
+
if not config_file.exists():
|
|
94
|
+
config_file.write_text(
|
|
95
|
+
yaml.dump(config.to_yaml_dict(), default_flow_style=False, sort_keys=False),
|
|
96
|
+
encoding="utf-8",
|
|
97
|
+
)
|
|
98
|
+
console.print(f" Created config: {config_file.relative_to(project_root)}")
|
|
99
|
+
|
|
100
|
+
# Create starter contract template
|
|
101
|
+
from codebase_intel.contracts.registry import ContractRegistry
|
|
102
|
+
registry = ContractRegistry(config.contracts, project_root)
|
|
103
|
+
template_path = await registry.create_template(
|
|
104
|
+
"project-rules", f"{project_root.name} Quality Rules"
|
|
105
|
+
)
|
|
106
|
+
console.print(f" Created contract template: {template_path.relative_to(project_root)}")
|
|
107
|
+
|
|
108
|
+
# Build initial graph
|
|
109
|
+
console.print()
|
|
110
|
+
with Progress(
|
|
111
|
+
SpinnerColumn(),
|
|
112
|
+
TextColumn("[progress.description]{task.description}"),
|
|
113
|
+
console=console,
|
|
114
|
+
) as progress:
|
|
115
|
+
task = progress.add_task("Building code graph...", total=None)
|
|
116
|
+
|
|
117
|
+
from codebase_intel.graph.storage import GraphStorage
|
|
118
|
+
from codebase_intel.graph.builder import GraphBuilder
|
|
119
|
+
|
|
120
|
+
async with GraphStorage.open(config.graph, project_root) as storage:
|
|
121
|
+
builder = GraphBuilder(config, storage)
|
|
122
|
+
build_progress = await builder.full_build()
|
|
123
|
+
|
|
124
|
+
progress.update(task, description="Done!", completed=True)
|
|
125
|
+
|
|
126
|
+
# Summary
|
|
127
|
+
table = Table(title="Initialization Summary")
|
|
128
|
+
table.add_column("Metric", style="bold")
|
|
129
|
+
table.add_column("Value", justify="right")
|
|
130
|
+
table.add_row("Files processed", str(build_progress.processed))
|
|
131
|
+
table.add_row("Files skipped", str(build_progress.skipped))
|
|
132
|
+
table.add_row("Nodes created", str(build_progress.nodes_created))
|
|
133
|
+
table.add_row("Edges created", str(build_progress.edges_created))
|
|
134
|
+
table.add_row("Warnings", str(len(build_progress.warnings)))
|
|
135
|
+
console.print(table)
|
|
136
|
+
|
|
137
|
+
if build_progress.warnings:
|
|
138
|
+
console.print(f"\n[yellow]Warnings ({len(build_progress.warnings)}):[/yellow]")
|
|
139
|
+
for w in build_progress.warnings[:10]:
|
|
140
|
+
console.print(f" {w}")
|
|
141
|
+
if len(build_progress.warnings) > 10:
|
|
142
|
+
console.print(f" ... and {len(build_progress.warnings) - 10} more")
|
|
143
|
+
|
|
144
|
+
# Mine git history for decisions
|
|
145
|
+
try:
|
|
146
|
+
from codebase_intel.decisions.miner import GitMiner
|
|
147
|
+
miner = GitMiner(config.decisions, project_root)
|
|
148
|
+
candidates = await miner.mine_commits(max_commits=200)
|
|
149
|
+
if candidates:
|
|
150
|
+
console.print(
|
|
151
|
+
f"\n[cyan]Found {len(candidates)} decision candidates in git history.[/cyan]"
|
|
152
|
+
)
|
|
153
|
+
console.print(" Run `codebase-intel mine` to review and save them.")
|
|
154
|
+
except Exception:
|
|
155
|
+
pass # Git mining is best-effort
|
|
156
|
+
|
|
157
|
+
console.print(
|
|
158
|
+
"\n[green]Initialization complete![/green] "
|
|
159
|
+
"The MCP server is ready: `codebase-intel serve`"
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# -------------------------------------------------------------------
|
|
164
|
+
# analyze
|
|
165
|
+
# -------------------------------------------------------------------
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
@app.command()
|
|
169
|
+
def analyze(
|
|
170
|
+
path: Annotated[Path, typer.Argument(help="Project root")] = Path("."),
|
|
171
|
+
incremental: bool = typer.Option(False, "--incremental", "-i", help="Only re-parse changed files"),
|
|
172
|
+
verbose: bool = typer.Option(False, "--verbose", "-v"),
|
|
173
|
+
) -> None:
|
|
174
|
+
"""Build or update the code graph."""
|
|
175
|
+
_setup_logging(verbose)
|
|
176
|
+
asyncio.run(_analyze_async(path.resolve(), incremental))
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
async def _analyze_async(project_root: Path, incremental: bool) -> None:
|
|
180
|
+
from codebase_intel.core.config import ProjectConfig
|
|
181
|
+
from codebase_intel.graph.builder import GraphBuilder
|
|
182
|
+
from codebase_intel.graph.storage import GraphStorage
|
|
183
|
+
|
|
184
|
+
config = ProjectConfig(project_root=project_root)
|
|
185
|
+
|
|
186
|
+
if not config.graph.db_path.parent.exists():
|
|
187
|
+
console.print("[red]Not initialized. Run `codebase-intel init` first.[/red]")
|
|
188
|
+
raise typer.Exit(1)
|
|
189
|
+
|
|
190
|
+
with Progress(
|
|
191
|
+
SpinnerColumn(),
|
|
192
|
+
TextColumn("[progress.description]{task.description}"),
|
|
193
|
+
console=console,
|
|
194
|
+
) as progress:
|
|
195
|
+
mode = "incremental" if incremental else "full"
|
|
196
|
+
task = progress.add_task(f"Running {mode} analysis...", total=None)
|
|
197
|
+
|
|
198
|
+
async with GraphStorage.open(config.graph, project_root) as storage:
|
|
199
|
+
builder = GraphBuilder(config, storage)
|
|
200
|
+
if incremental:
|
|
201
|
+
result = await builder.incremental_build()
|
|
202
|
+
else:
|
|
203
|
+
result = await builder.full_build()
|
|
204
|
+
|
|
205
|
+
progress.update(task, description="Done!", completed=True)
|
|
206
|
+
|
|
207
|
+
console.print(
|
|
208
|
+
f"[green]Analysis complete:[/green] "
|
|
209
|
+
f"{result.processed} files, {result.nodes_created} nodes, "
|
|
210
|
+
f"{result.edges_created} edges"
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
# -------------------------------------------------------------------
|
|
215
|
+
# drift
|
|
216
|
+
# -------------------------------------------------------------------
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
@app.command()
|
|
220
|
+
def drift(
|
|
221
|
+
path: Annotated[Path, typer.Argument(help="Project root")] = Path("."),
|
|
222
|
+
verbose: bool = typer.Option(False, "--verbose", "-v"),
|
|
223
|
+
) -> None:
|
|
224
|
+
"""Run drift detection across all components."""
|
|
225
|
+
_setup_logging(verbose)
|
|
226
|
+
asyncio.run(_drift_async(path.resolve()))
|
|
227
|
+
|
|
228
|
+
|
|
229
|
+
async def _drift_async(project_root: Path) -> None:
|
|
230
|
+
from codebase_intel.core.config import ProjectConfig
|
|
231
|
+
from codebase_intel.decisions.store import DecisionStore
|
|
232
|
+
from codebase_intel.drift.detector import DriftDetector
|
|
233
|
+
|
|
234
|
+
config = ProjectConfig(project_root=project_root)
|
|
235
|
+
decision_store = DecisionStore(config.decisions, project_root)
|
|
236
|
+
|
|
237
|
+
detector = DriftDetector(
|
|
238
|
+
config=config.drift,
|
|
239
|
+
project_root=project_root,
|
|
240
|
+
decision_store=decision_store,
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
report = await detector.full_check()
|
|
244
|
+
|
|
245
|
+
# Display results
|
|
246
|
+
level_colors = {
|
|
247
|
+
"none": "green",
|
|
248
|
+
"low": "cyan",
|
|
249
|
+
"medium": "yellow",
|
|
250
|
+
"high": "red",
|
|
251
|
+
"critical": "bold red",
|
|
252
|
+
}
|
|
253
|
+
|
|
254
|
+
color = level_colors.get(report.overall_level.value, "white")
|
|
255
|
+
console.print(Panel(
|
|
256
|
+
f"Overall: [{color}]{report.overall_level.value.upper()}[/{color}]\n{report.summary}",
|
|
257
|
+
title="Drift Report",
|
|
258
|
+
))
|
|
259
|
+
|
|
260
|
+
if report.items:
|
|
261
|
+
table = Table(title="Drift Items")
|
|
262
|
+
table.add_column("Level", style="bold")
|
|
263
|
+
table.add_column("Component")
|
|
264
|
+
table.add_column("Description")
|
|
265
|
+
table.add_column("Fix")
|
|
266
|
+
|
|
267
|
+
for item in report.items[:20]:
|
|
268
|
+
level_color = level_colors.get(item.level.value, "white")
|
|
269
|
+
table.add_row(
|
|
270
|
+
f"[{level_color}]{item.level.value}[/{level_color}]",
|
|
271
|
+
item.component,
|
|
272
|
+
item.description[:80],
|
|
273
|
+
item.remediation[:50] if item.remediation else "",
|
|
274
|
+
)
|
|
275
|
+
console.print(table)
|
|
276
|
+
|
|
277
|
+
if len(report.items) > 20:
|
|
278
|
+
console.print(f"\n... and {len(report.items) - 20} more items")
|
|
279
|
+
|
|
280
|
+
if report.rot_detected:
|
|
281
|
+
console.print(
|
|
282
|
+
f"\n[bold red]CONTEXT ROT DETECTED:[/bold red] "
|
|
283
|
+
f"{report.rot_percentage:.0%} of records are stale. "
|
|
284
|
+
f"Consider running a team review session."
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
# -------------------------------------------------------------------
|
|
289
|
+
# mine
|
|
290
|
+
# -------------------------------------------------------------------
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
@app.command()
|
|
294
|
+
def mine(
|
|
295
|
+
path: Annotated[Path, typer.Argument(help="Project root")] = Path("."),
|
|
296
|
+
max_commits: int = typer.Option(200, "--max", "-m", help="Max commits to scan"),
|
|
297
|
+
save: bool = typer.Option(False, "--save", "-s", help="Auto-save candidates as draft decisions"),
|
|
298
|
+
verbose: bool = typer.Option(False, "--verbose", "-v"),
|
|
299
|
+
) -> None:
|
|
300
|
+
"""Mine git history for decision candidates."""
|
|
301
|
+
_setup_logging(verbose)
|
|
302
|
+
asyncio.run(_mine_async(path.resolve(), max_commits, save))
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
async def _mine_async(project_root: Path, max_commits: int, save: bool) -> None:
|
|
306
|
+
from codebase_intel.core.config import ProjectConfig
|
|
307
|
+
from codebase_intel.decisions.miner import GitMiner
|
|
308
|
+
from codebase_intel.decisions.store import DecisionStore
|
|
309
|
+
|
|
310
|
+
config = ProjectConfig(project_root=project_root)
|
|
311
|
+
miner = GitMiner(config.decisions, project_root)
|
|
312
|
+
|
|
313
|
+
with Progress(
|
|
314
|
+
SpinnerColumn(),
|
|
315
|
+
TextColumn("[progress.description]{task.description}"),
|
|
316
|
+
console=console,
|
|
317
|
+
) as progress:
|
|
318
|
+
task = progress.add_task(f"Mining last {max_commits} commits...", total=None)
|
|
319
|
+
candidates = await miner.mine_commits(max_commits=max_commits)
|
|
320
|
+
progress.update(task, description="Done!", completed=True)
|
|
321
|
+
|
|
322
|
+
if not candidates:
|
|
323
|
+
console.print("[yellow]No decision candidates found in recent commits.[/yellow]")
|
|
324
|
+
return
|
|
325
|
+
|
|
326
|
+
table = Table(title=f"Decision Candidates ({len(candidates)} found)")
|
|
327
|
+
table.add_column("#", style="dim")
|
|
328
|
+
table.add_column("Title")
|
|
329
|
+
table.add_column("Source")
|
|
330
|
+
table.add_column("Confidence", justify="right")
|
|
331
|
+
table.add_column("Keywords")
|
|
332
|
+
|
|
333
|
+
for i, c in enumerate(candidates[:20], 1):
|
|
334
|
+
table.add_row(
|
|
335
|
+
str(i),
|
|
336
|
+
c.title[:60],
|
|
337
|
+
f"{c.source_type}:{c.source_ref[:8]}",
|
|
338
|
+
f"{c.confidence:.0%}",
|
|
339
|
+
", ".join(c.keywords_matched[:3]),
|
|
340
|
+
)
|
|
341
|
+
console.print(table)
|
|
342
|
+
|
|
343
|
+
if save:
|
|
344
|
+
store = DecisionStore(config.decisions, project_root)
|
|
345
|
+
saved = 0
|
|
346
|
+
for candidate in candidates:
|
|
347
|
+
decision_id = await store.next_id()
|
|
348
|
+
record = candidate.to_decision_record(decision_id)
|
|
349
|
+
await store.save(record)
|
|
350
|
+
saved += 1
|
|
351
|
+
console.print(f"\n[green]Saved {saved} draft decisions.[/green] Review them in .codebase-intel/decisions/")
|
|
352
|
+
else:
|
|
353
|
+
console.print("\nRun with `--save` to save these as draft decisions.")
|
|
354
|
+
|
|
355
|
+
|
|
356
|
+
# -------------------------------------------------------------------
|
|
357
|
+
# detect-patterns
|
|
358
|
+
# -------------------------------------------------------------------
|
|
359
|
+
|
|
360
|
+
|
|
361
|
+
@app.command(name="detect-patterns")
|
|
362
|
+
def detect_patterns(
|
|
363
|
+
path: Annotated[Path, typer.Argument(help="Project root")] = Path("."),
|
|
364
|
+
save: bool = typer.Option(False, "--save", "-s", help="Save as draft contract"),
|
|
365
|
+
verbose: bool = typer.Option(False, "--verbose", "-v"),
|
|
366
|
+
) -> None:
|
|
367
|
+
"""Auto-detect code patterns and generate quality contracts."""
|
|
368
|
+
_setup_logging(verbose)
|
|
369
|
+
asyncio.run(_detect_patterns_async(path.resolve(), save))
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
async def _detect_patterns_async(project_root: Path, save: bool) -> None:
|
|
373
|
+
from codebase_intel.contracts.auto_generator import AutoContractGenerator
|
|
374
|
+
|
|
375
|
+
generator = AutoContractGenerator(project_root)
|
|
376
|
+
|
|
377
|
+
with Progress(
|
|
378
|
+
SpinnerColumn(),
|
|
379
|
+
TextColumn("[progress.description]{task.description}"),
|
|
380
|
+
console=console,
|
|
381
|
+
) as progress:
|
|
382
|
+
task = progress.add_task("Analyzing codebase patterns...", total=None)
|
|
383
|
+
patterns = await generator.analyze()
|
|
384
|
+
progress.update(task, description="Done!", completed=True)
|
|
385
|
+
|
|
386
|
+
if not patterns:
|
|
387
|
+
console.print("[yellow]No strong patterns detected (need 70%+ consistency).[/yellow]")
|
|
388
|
+
return
|
|
389
|
+
|
|
390
|
+
console.print(Panel(
|
|
391
|
+
f"Found [bold]{len(patterns)}[/bold] consistent patterns in your codebase",
|
|
392
|
+
title="Auto-Detected Patterns",
|
|
393
|
+
))
|
|
394
|
+
|
|
395
|
+
table = Table()
|
|
396
|
+
table.add_column("#", style="dim")
|
|
397
|
+
table.add_column("Pattern", style="bold")
|
|
398
|
+
table.add_column("Confidence", justify="right", style="cyan")
|
|
399
|
+
table.add_column("Follows", justify="right", style="green")
|
|
400
|
+
table.add_column("Violates", justify="right", style="red")
|
|
401
|
+
table.add_column("Type")
|
|
402
|
+
|
|
403
|
+
for i, p in enumerate(patterns, 1):
|
|
404
|
+
table.add_row(
|
|
405
|
+
str(i),
|
|
406
|
+
p.name,
|
|
407
|
+
f"{p.confidence:.0%}",
|
|
408
|
+
str(p.occurrences),
|
|
409
|
+
str(p.violations),
|
|
410
|
+
p.kind.value,
|
|
411
|
+
)
|
|
412
|
+
console.print(table)
|
|
413
|
+
|
|
414
|
+
if save:
|
|
415
|
+
contract = generator.generate_contract(patterns)
|
|
416
|
+
import yaml
|
|
417
|
+
contracts_dir = project_root / ".codebase-intel" / "contracts"
|
|
418
|
+
contracts_dir.mkdir(parents=True, exist_ok=True)
|
|
419
|
+
out_path = contracts_dir / "auto-detected.yaml"
|
|
420
|
+
data = contract.model_dump(mode="json")
|
|
421
|
+
out_path.write_text(
|
|
422
|
+
yaml.dump(data, default_flow_style=False, sort_keys=False),
|
|
423
|
+
encoding="utf-8",
|
|
424
|
+
)
|
|
425
|
+
console.print(f"\n[green]Saved contract to {out_path.relative_to(project_root)}[/green]")
|
|
426
|
+
console.print("Review the rules and adjust severity levels as needed.")
|
|
427
|
+
else:
|
|
428
|
+
console.print("\nRun with `--save` to generate a draft contract from these patterns.")
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
# -------------------------------------------------------------------
|
|
432
|
+
# serve
|
|
433
|
+
# -------------------------------------------------------------------
|
|
434
|
+
|
|
435
|
+
|
|
436
|
+
@app.command()
|
|
437
|
+
def serve(
|
|
438
|
+
path: Annotated[Path, typer.Argument(help="Project root")] = Path("."),
|
|
439
|
+
verbose: bool = typer.Option(False, "--verbose", "-v"),
|
|
440
|
+
) -> None:
|
|
441
|
+
"""Start the MCP server (stdio transport)."""
|
|
442
|
+
_setup_logging(verbose)
|
|
443
|
+
console.print("[dim]Starting MCP server over stdio...[/dim]", err=True)
|
|
444
|
+
|
|
445
|
+
from codebase_intel.mcp.server import run_server
|
|
446
|
+
asyncio.run(run_server(path.resolve()))
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
# -------------------------------------------------------------------
|
|
450
|
+
# status
|
|
451
|
+
# -------------------------------------------------------------------
|
|
452
|
+
|
|
453
|
+
|
|
454
|
+
@app.command()
|
|
455
|
+
def status(
|
|
456
|
+
path: Annotated[Path, typer.Argument(help="Project root")] = Path("."),
|
|
457
|
+
) -> None:
|
|
458
|
+
"""Show component health status."""
|
|
459
|
+
project_root = path.resolve()
|
|
460
|
+
intel_dir = project_root / ".codebase-intel"
|
|
461
|
+
|
|
462
|
+
table = Table(title=f"codebase-intel status: {project_root.name}")
|
|
463
|
+
table.add_column("Component", style="bold")
|
|
464
|
+
table.add_column("Status")
|
|
465
|
+
table.add_column("Details")
|
|
466
|
+
|
|
467
|
+
# Check config
|
|
468
|
+
config_file = intel_dir / "config.yaml"
|
|
469
|
+
if config_file.exists():
|
|
470
|
+
table.add_row("Config", "[green]OK[/green]", str(config_file.relative_to(project_root)))
|
|
471
|
+
else:
|
|
472
|
+
table.add_row("Config", "[red]Missing[/red]", "Run `codebase-intel init`")
|
|
473
|
+
|
|
474
|
+
# Check graph
|
|
475
|
+
graph_db = intel_dir / "graph.db"
|
|
476
|
+
if graph_db.exists():
|
|
477
|
+
size_mb = graph_db.stat().st_size / (1024 * 1024)
|
|
478
|
+
table.add_row("Code Graph", "[green]OK[/green]", f"{size_mb:.1f} MB")
|
|
479
|
+
else:
|
|
480
|
+
table.add_row("Code Graph", "[red]Not built[/red]", "Run `codebase-intel init`")
|
|
481
|
+
|
|
482
|
+
# Check decisions
|
|
483
|
+
decisions_dir = intel_dir / "decisions"
|
|
484
|
+
if decisions_dir.exists():
|
|
485
|
+
count = len(list(decisions_dir.glob("*.yaml")))
|
|
486
|
+
table.add_row("Decisions", "[green]OK[/green]", f"{count} records")
|
|
487
|
+
else:
|
|
488
|
+
table.add_row("Decisions", "[yellow]Empty[/yellow]", "Run `codebase-intel mine`")
|
|
489
|
+
|
|
490
|
+
# Check contracts
|
|
491
|
+
contracts_dir = intel_dir / "contracts"
|
|
492
|
+
if contracts_dir.exists():
|
|
493
|
+
count = len(list(contracts_dir.glob("*.yaml")))
|
|
494
|
+
table.add_row("Contracts", "[green]OK[/green]", f"{count} files + builtins")
|
|
495
|
+
else:
|
|
496
|
+
table.add_row("Contracts", "[yellow]Builtins only[/yellow]", "")
|
|
497
|
+
|
|
498
|
+
console.print(table)
|
|
499
|
+
|
|
500
|
+
|
|
501
|
+
# -------------------------------------------------------------------
|
|
502
|
+
# benchmark
|
|
503
|
+
# -------------------------------------------------------------------
|
|
504
|
+
|
|
505
|
+
|
|
506
|
+
@app.command()
|
|
507
|
+
def benchmark(
|
|
508
|
+
path: Annotated[Path, typer.Argument(help="Project root")] = Path("."),
|
|
509
|
+
verbose: bool = typer.Option(False, "--verbose", "-v"),
|
|
510
|
+
) -> None:
|
|
511
|
+
"""Run benchmarks — measure token efficiency on this project."""
|
|
512
|
+
_setup_logging(verbose)
|
|
513
|
+
asyncio.run(_benchmark_async(path.resolve()))
|
|
514
|
+
|
|
515
|
+
|
|
516
|
+
async def _benchmark_async(project_root: Path) -> None:
|
|
517
|
+
from codebase_intel.analytics.benchmark import BenchmarkRunner
|
|
518
|
+
from codebase_intel.analytics.tracker import AnalyticsTracker
|
|
519
|
+
|
|
520
|
+
tracker = AnalyticsTracker(project_root / ".codebase-intel" / "analytics.db")
|
|
521
|
+
|
|
522
|
+
with Progress(
|
|
523
|
+
SpinnerColumn(),
|
|
524
|
+
TextColumn("[progress.description]{task.description}"),
|
|
525
|
+
console=console,
|
|
526
|
+
) as progress:
|
|
527
|
+
task = progress.add_task("Running benchmarks...", total=None)
|
|
528
|
+
|
|
529
|
+
runner = BenchmarkRunner(project_root)
|
|
530
|
+
report = await runner.run(tracker=tracker)
|
|
531
|
+
|
|
532
|
+
progress.update(task, description="Done!", completed=True)
|
|
533
|
+
|
|
534
|
+
# Display results
|
|
535
|
+
console.print()
|
|
536
|
+
console.print(Panel(
|
|
537
|
+
f"[bold]{report.repo_name}[/bold]\n"
|
|
538
|
+
f"{report.total_files} files | {report.total_nodes} nodes | "
|
|
539
|
+
f"{report.total_edges} edges | Built in {report.build_time_ms:.0f}ms",
|
|
540
|
+
title="Benchmark Results",
|
|
541
|
+
))
|
|
542
|
+
|
|
543
|
+
table = Table(title="Token Efficiency — Before vs After")
|
|
544
|
+
table.add_column("Scenario", style="bold")
|
|
545
|
+
table.add_column("Naive\n(without tool)", justify="right", style="red")
|
|
546
|
+
table.add_column("Graph Only", justify="right", style="yellow")
|
|
547
|
+
table.add_column("Full Pipeline", justify="right", style="green")
|
|
548
|
+
table.add_column("Reduction", justify="right", style="bold cyan")
|
|
549
|
+
table.add_column("Multiplier", justify="right", style="bold cyan")
|
|
550
|
+
table.add_column("Decisions", justify="right")
|
|
551
|
+
table.add_column("Contracts", justify="right")
|
|
552
|
+
|
|
553
|
+
for s in report.scenarios:
|
|
554
|
+
table.add_row(
|
|
555
|
+
s.name,
|
|
556
|
+
f"{s.naive_tokens:,}",
|
|
557
|
+
f"{s.graph_tokens:,}",
|
|
558
|
+
f"{s.full_tokens:,}",
|
|
559
|
+
f"{s.naive_vs_full_reduction:.0f}%",
|
|
560
|
+
f"{s.multiplier:.1f}x",
|
|
561
|
+
str(s.decisions_surfaced),
|
|
562
|
+
str(s.contracts_applied),
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
# Average row
|
|
566
|
+
table.add_row(
|
|
567
|
+
"[bold]Average[/bold]",
|
|
568
|
+
"", "", "",
|
|
569
|
+
f"[bold]{report.avg_reduction_pct:.0f}%[/bold]",
|
|
570
|
+
f"[bold]{report.avg_multiplier:.1f}x[/bold]",
|
|
571
|
+
f"[bold]{report.total_decisions_surfaced}[/bold]",
|
|
572
|
+
f"[bold]{report.total_contracts_applied}[/bold]",
|
|
573
|
+
style="on grey23",
|
|
574
|
+
)
|
|
575
|
+
console.print(table)
|
|
576
|
+
|
|
577
|
+
# Before/After summary
|
|
578
|
+
console.print()
|
|
579
|
+
console.print("[bold]What this means:[/bold]")
|
|
580
|
+
console.print(
|
|
581
|
+
f" Without codebase-intel: agent reads [red]{report.scenarios[0].naive_tokens:,}[/red] tokens "
|
|
582
|
+
f"(every file in the directory)"
|
|
583
|
+
) if report.scenarios else None
|
|
584
|
+
console.print(
|
|
585
|
+
f" With codebase-intel: agent reads [green]{report.scenarios[0].full_tokens:,}[/green] tokens "
|
|
586
|
+
f"(only what matters + decisions + contracts)"
|
|
587
|
+
) if report.scenarios else None
|
|
588
|
+
if report.total_decisions_surfaced > 0:
|
|
589
|
+
console.print(
|
|
590
|
+
f" [cyan]{report.total_decisions_surfaced}[/cyan] architectural decisions surfaced "
|
|
591
|
+
f"that the agent would have missed"
|
|
592
|
+
)
|
|
593
|
+
if report.total_contracts_applied > 0:
|
|
594
|
+
console.print(
|
|
595
|
+
f" [cyan]{report.total_contracts_applied}[/cyan] quality rules enforced "
|
|
596
|
+
f"before code was generated"
|
|
597
|
+
)
|
|
598
|
+
|
|
599
|
+
tracker.close()
|
|
600
|
+
|
|
601
|
+
|
|
602
|
+
# -------------------------------------------------------------------
|
|
603
|
+
# dashboard
|
|
604
|
+
# -------------------------------------------------------------------
|
|
605
|
+
|
|
606
|
+
|
|
607
|
+
@app.command()
|
|
608
|
+
def dashboard(
|
|
609
|
+
path: Annotated[Path, typer.Argument(help="Project root")] = Path("."),
|
|
610
|
+
) -> None:
|
|
611
|
+
"""Show live efficiency dashboard — proves value over time."""
|
|
612
|
+
project_root = path.resolve()
|
|
613
|
+
analytics_db = project_root / ".codebase-intel" / "analytics.db"
|
|
614
|
+
|
|
615
|
+
if not analytics_db.exists():
|
|
616
|
+
console.print(
|
|
617
|
+
"[yellow]No analytics data yet.[/yellow] Run `codebase-intel benchmark` "
|
|
618
|
+
"or use the MCP server to start collecting data."
|
|
619
|
+
)
|
|
620
|
+
raise typer.Exit(0)
|
|
621
|
+
|
|
622
|
+
from codebase_intel.analytics.tracker import AnalyticsTracker
|
|
623
|
+
|
|
624
|
+
tracker = AnalyticsTracker(analytics_db)
|
|
625
|
+
stats = tracker.get_lifetime_stats()
|
|
626
|
+
comparison = tracker.get_before_after_comparison()
|
|
627
|
+
benchmarks = tracker.get_benchmark_results()
|
|
628
|
+
|
|
629
|
+
# --- Header ---
|
|
630
|
+
console.print()
|
|
631
|
+
console.print(Panel(
|
|
632
|
+
f"[bold]codebase-intel Dashboard[/bold] — {project_root.name}",
|
|
633
|
+
subtitle="Live efficiency tracking",
|
|
634
|
+
))
|
|
635
|
+
|
|
636
|
+
# --- Before/After ---
|
|
637
|
+
if comparison.get("has_data"):
|
|
638
|
+
console.print()
|
|
639
|
+
before_after = Table(title="Before vs After — Token Efficiency")
|
|
640
|
+
before_after.add_column("Metric", style="bold")
|
|
641
|
+
before_after.add_column("Without\ncodebase-intel", justify="right", style="red")
|
|
642
|
+
before_after.add_column("With\ncodebase-intel", justify="right", style="green")
|
|
643
|
+
before_after.add_column("Improvement", justify="right", style="bold cyan")
|
|
644
|
+
|
|
645
|
+
b = comparison["before"]
|
|
646
|
+
a = comparison["after"]
|
|
647
|
+
imp = comparison["improvement"]
|
|
648
|
+
|
|
649
|
+
before_after.add_row(
|
|
650
|
+
"Avg tokens / request",
|
|
651
|
+
f"{b['tokens_per_request']:,}",
|
|
652
|
+
f"{a['tokens_per_request']:,}",
|
|
653
|
+
f"{imp['token_reduction_pct']:.0f}% fewer ({imp['multiplier']:.1f}x)",
|
|
654
|
+
)
|
|
655
|
+
before_after.add_row(
|
|
656
|
+
"Decisions available",
|
|
657
|
+
"0 (blind)",
|
|
658
|
+
f"{a['decisions_available']}",
|
|
659
|
+
f"{imp['decisions_that_prevented_mistakes']} surfaced",
|
|
660
|
+
)
|
|
661
|
+
before_after.add_row(
|
|
662
|
+
"Contract enforcement",
|
|
663
|
+
"0 (no guardrails)",
|
|
664
|
+
f"{a['contract_checks']}",
|
|
665
|
+
f"{imp['violations_caught_before_generation']} rules enforced",
|
|
666
|
+
)
|
|
667
|
+
before_after.add_row(
|
|
668
|
+
"Drift awareness",
|
|
669
|
+
"None",
|
|
670
|
+
"Active",
|
|
671
|
+
"Stale context detected",
|
|
672
|
+
)
|
|
673
|
+
before_after.add_row(
|
|
674
|
+
"Knows WHY code exists",
|
|
675
|
+
"[red]No[/red]",
|
|
676
|
+
"[green]Yes[/green]",
|
|
677
|
+
"Decision provenance",
|
|
678
|
+
)
|
|
679
|
+
console.print(before_after)
|
|
680
|
+
|
|
681
|
+
# --- Lifetime stats ---
|
|
682
|
+
tokens = stats["tokens"]
|
|
683
|
+
quality = stats["context_quality"]
|
|
684
|
+
|
|
685
|
+
console.print()
|
|
686
|
+
lifetime = Table(title="Lifetime Statistics")
|
|
687
|
+
lifetime.add_column("Metric", style="bold")
|
|
688
|
+
lifetime.add_column("Value", justify="right")
|
|
689
|
+
|
|
690
|
+
lifetime.add_row("Total context requests", f"{stats['total_requests']:,}")
|
|
691
|
+
lifetime.add_row("Total tokens saved", f"[green]{tokens['total_saved']:,}[/green]")
|
|
692
|
+
lifetime.add_row("Token reduction", f"[cyan]{tokens['reduction_pct']:.0f}%[/cyan]")
|
|
693
|
+
lifetime.add_row("Decisions surfaced", f"{quality['decisions_surfaced']:,}")
|
|
694
|
+
lifetime.add_row("Contracts applied", f"{quality['contracts_applied']:,}")
|
|
695
|
+
lifetime.add_row("Drift warnings", f"{quality['drift_warnings']:,}")
|
|
696
|
+
lifetime.add_row("Avg assembly time", f"{stats['performance']['avg_assembly_ms']:.0f}ms")
|
|
697
|
+
console.print(lifetime)
|
|
698
|
+
|
|
699
|
+
# --- Benchmark history ---
|
|
700
|
+
if benchmarks:
|
|
701
|
+
console.print()
|
|
702
|
+
bench_table = Table(title="Benchmark History")
|
|
703
|
+
bench_table.add_column("Date", style="dim")
|
|
704
|
+
bench_table.add_column("Project", style="bold")
|
|
705
|
+
bench_table.add_column("Files", justify="right")
|
|
706
|
+
bench_table.add_column("Nodes", justify="right")
|
|
707
|
+
bench_table.add_column("Reduction", justify="right", style="cyan")
|
|
708
|
+
bench_table.add_column("Build Time", justify="right")
|
|
709
|
+
|
|
710
|
+
for b in benchmarks[:10]:
|
|
711
|
+
bench_table.add_row(
|
|
712
|
+
b["timestamp"][:10],
|
|
713
|
+
b["repo_name"],
|
|
714
|
+
str(b["total_files"]),
|
|
715
|
+
str(b["total_nodes"]),
|
|
716
|
+
f"{b['avg_token_reduction_pct']:.0f}%",
|
|
717
|
+
f"{b['build_time_ms']:.0f}ms",
|
|
718
|
+
)
|
|
719
|
+
console.print(bench_table)
|
|
720
|
+
|
|
721
|
+
# --- Daily trend (sparkline-style) ---
|
|
722
|
+
daily = tracker.get_daily_trend(14)
|
|
723
|
+
if daily:
|
|
724
|
+
console.print()
|
|
725
|
+
console.print("[bold]Daily Token Savings (last 14 days)[/bold]")
|
|
726
|
+
max_saved = max((d["total_naive_tokens"] - d["total_full_tokens"]) for d in daily) or 1
|
|
727
|
+
for d in daily:
|
|
728
|
+
saved = d["total_naive_tokens"] - d["total_full_tokens"]
|
|
729
|
+
bar_len = int((saved / max_saved) * 40)
|
|
730
|
+
bar = "█" * bar_len
|
|
731
|
+
console.print(
|
|
732
|
+
f" {d['date']} [green]{bar}[/green] "
|
|
733
|
+
f"{saved:,} tokens saved ({d['total_requests']} requests)"
|
|
734
|
+
)
|
|
735
|
+
|
|
736
|
+
tracker.close()
|
|
737
|
+
|
|
738
|
+
|
|
739
|
+
if __name__ == "__main__":
|
|
740
|
+
app()
|