codebase-intel 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. codebase_intel/__init__.py +3 -0
  2. codebase_intel/analytics/__init__.py +1 -0
  3. codebase_intel/analytics/benchmark.py +406 -0
  4. codebase_intel/analytics/feedback.py +496 -0
  5. codebase_intel/analytics/tracker.py +439 -0
  6. codebase_intel/cli/__init__.py +1 -0
  7. codebase_intel/cli/main.py +740 -0
  8. codebase_intel/contracts/__init__.py +1 -0
  9. codebase_intel/contracts/auto_generator.py +438 -0
  10. codebase_intel/contracts/evaluator.py +531 -0
  11. codebase_intel/contracts/models.py +433 -0
  12. codebase_intel/contracts/registry.py +225 -0
  13. codebase_intel/core/__init__.py +1 -0
  14. codebase_intel/core/config.py +248 -0
  15. codebase_intel/core/exceptions.py +454 -0
  16. codebase_intel/core/types.py +375 -0
  17. codebase_intel/decisions/__init__.py +1 -0
  18. codebase_intel/decisions/miner.py +297 -0
  19. codebase_intel/decisions/models.py +302 -0
  20. codebase_intel/decisions/store.py +411 -0
  21. codebase_intel/drift/__init__.py +1 -0
  22. codebase_intel/drift/detector.py +443 -0
  23. codebase_intel/graph/__init__.py +1 -0
  24. codebase_intel/graph/builder.py +391 -0
  25. codebase_intel/graph/parser.py +1232 -0
  26. codebase_intel/graph/query.py +377 -0
  27. codebase_intel/graph/storage.py +736 -0
  28. codebase_intel/mcp/__init__.py +1 -0
  29. codebase_intel/mcp/server.py +710 -0
  30. codebase_intel/orchestrator/__init__.py +1 -0
  31. codebase_intel/orchestrator/assembler.py +649 -0
  32. codebase_intel-0.1.0.dist-info/METADATA +361 -0
  33. codebase_intel-0.1.0.dist-info/RECORD +36 -0
  34. codebase_intel-0.1.0.dist-info/WHEEL +4 -0
  35. codebase_intel-0.1.0.dist-info/entry_points.txt +2 -0
  36. codebase_intel-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,710 @@
1
+ """MCP Server — the agent-facing interface for codebase intelligence.
2
+
3
+ This is the primary integration point. AI agents connect via MCP and
4
+ call tools to get structured context for their tasks.
5
+
6
+ Design principles:
7
+ - Every tool is self-describing (clear name, description, parameter docs)
8
+ - Tools return structured JSON (not raw text) for reliable parsing
9
+ - Errors are returned as structured objects, never thrown as exceptions
10
+ - Response sizes are bounded (no unbounded result sets)
11
+ - Tools are stateless per-request (state lives in storage layer)
12
+
13
+ Edge cases:
14
+ - MCP client connects before init: tools return "not initialized" error
15
+ with instructions to run `codebase-intel init`
16
+ - Concurrent tool calls: safe because storage uses WAL mode and
17
+ tool handlers are independently scoped
18
+ - Large response payload: capped at configurable max size, truncated
19
+ with continuation token (future)
20
+ - Client disconnects mid-request: handled by MCP framework
21
+ - Invalid parameters: Pydantic validation on inputs, structured error on failure
22
+ """
23
+
24
+ from __future__ import annotations
25
+
26
+ import json
27
+ import logging
28
+ from pathlib import Path
29
+ from typing import Any
30
+
31
+ from mcp.server import Server
32
+ from mcp.server.stdio import stdio_server
33
+ from mcp.types import TextContent, Tool
34
+
35
+ from codebase_intel.core.config import ProjectConfig
36
+ from codebase_intel.core.types import TokenBudget
37
+
38
+ logger = logging.getLogger(__name__)
39
+
40
+
41
+ def create_server(project_root: Path | None = None) -> Server:
42
+ """Create and configure the MCP server with all tools.
43
+
44
+ The server exposes these tools:
45
+ 1. get_context — Main tool: assemble context for a task
46
+ 2. query_graph — Query the code graph directly
47
+ 3. get_decisions — Get decisions relevant to files/tags
48
+ 4. get_contracts — Get applicable quality contracts
49
+ 5. check_drift — Run drift detection
50
+ 6. impact_analysis — What's affected by changes to these files?
51
+ 7. get_status — Health check and component status
52
+ """
53
+ server = Server("codebase-intel")
54
+ _root = project_root or Path.cwd()
55
+
56
+ # Lazy initialization — components are created on first use
57
+ _state: dict[str, Any] = {"initialized": False}
58
+
59
+ async def _ensure_initialized() -> dict[str, Any]:
60
+ """Initialize components lazily on first tool call.
61
+
62
+ Edge case: init might fail partially (graph OK, decisions dir missing).
63
+ We track what's available and what's not, and tools degrade gracefully.
64
+ """
65
+ if _state["initialized"]:
66
+ return _state
67
+
68
+ try:
69
+ config = ProjectConfig(project_root=_root)
70
+ except Exception as exc:
71
+ _state["error"] = f"Configuration error: {exc}"
72
+ _state["initialized"] = True
73
+ return _state
74
+
75
+ _state["config"] = config
76
+
77
+ # Initialize graph — open persistent connection for the server lifetime
78
+ try:
79
+ from codebase_intel.graph.storage import GraphStorage
80
+
81
+ if config.graph.db_path.exists():
82
+ import aiosqlite
83
+
84
+ db = await aiosqlite.connect(str(config.graph.db_path))
85
+ await db.execute("PRAGMA journal_mode=WAL")
86
+ await db.execute("PRAGMA busy_timeout=5000")
87
+ await db.execute("PRAGMA foreign_keys=ON")
88
+ storage = GraphStorage(db, config.project_root)
89
+ await storage._ensure_schema()
90
+
91
+ from codebase_intel.graph.query import GraphQueryEngine
92
+
93
+ _state["graph_storage"] = storage
94
+ _state["graph_engine"] = GraphQueryEngine(storage)
95
+ _state["graph_db"] = db
96
+ _state["graph_available"] = True
97
+ else:
98
+ _state["graph_available"] = False
99
+ except Exception as exc:
100
+ logger.warning("Graph initialization failed: %s", exc)
101
+ _state["graph_available"] = False
102
+
103
+ # Initialize decisions
104
+ try:
105
+ from codebase_intel.decisions.store import DecisionStore
106
+
107
+ store = DecisionStore(config.decisions, config.project_root)
108
+ _state["decisions"] = store
109
+ _state["decisions_available"] = True
110
+ except Exception:
111
+ _state["decisions_available"] = False
112
+
113
+ # Initialize contracts
114
+ try:
115
+ from codebase_intel.contracts.registry import ContractRegistry
116
+
117
+ registry = ContractRegistry(config.contracts, config.project_root)
118
+ registry.load()
119
+ _state["contracts"] = registry
120
+ _state["contracts_available"] = True
121
+ except Exception:
122
+ _state["contracts_available"] = False
123
+
124
+ _state["initialized"] = True
125
+ return _state
126
+
127
+ # -------------------------------------------------------------------
128
+ # Tool: get_context
129
+ # -------------------------------------------------------------------
130
+
131
+ @server.list_tools()
132
+ async def list_tools() -> list[Tool]:
133
+ return [
134
+ Tool(
135
+ name="get_context",
136
+ description=(
137
+ "Assemble relevant context for a coding task. Gathers related files, "
138
+ "architectural decisions, and quality contracts within a token budget. "
139
+ "This is the main tool — use it before starting work on any task."
140
+ ),
141
+ inputSchema={
142
+ "type": "object",
143
+ "properties": {
144
+ "task": {
145
+ "type": "string",
146
+ "description": "What you're trying to do (e.g., 'add rate limiting to payment endpoint')",
147
+ },
148
+ "files": {
149
+ "type": "array",
150
+ "items": {"type": "string"},
151
+ "description": "File paths you're working on (relative to project root)",
152
+ },
153
+ "symbols": {
154
+ "type": "array",
155
+ "items": {"type": "string"},
156
+ "description": "Specific function/class names you're modifying",
157
+ },
158
+ "budget_tokens": {
159
+ "type": "integer",
160
+ "description": "Max tokens for context (default: 8000)",
161
+ "default": 8000,
162
+ },
163
+ },
164
+ "required": ["task"],
165
+ },
166
+ ),
167
+ Tool(
168
+ name="query_graph",
169
+ description=(
170
+ "Query the semantic code graph. Find dependencies, dependents, "
171
+ "or run impact analysis for changed files."
172
+ ),
173
+ inputSchema={
174
+ "type": "object",
175
+ "properties": {
176
+ "query_type": {
177
+ "type": "string",
178
+ "enum": ["dependencies", "dependents", "impact", "stats"],
179
+ "description": "Type of graph query",
180
+ },
181
+ "files": {
182
+ "type": "array",
183
+ "items": {"type": "string"},
184
+ "description": "File paths to query",
185
+ },
186
+ "symbol": {
187
+ "type": "string",
188
+ "description": "Symbol name to search for",
189
+ },
190
+ "max_depth": {
191
+ "type": "integer",
192
+ "description": "Max traversal depth (default: 2)",
193
+ "default": 2,
194
+ },
195
+ },
196
+ "required": ["query_type"],
197
+ },
198
+ ),
199
+ Tool(
200
+ name="get_decisions",
201
+ description=(
202
+ "Get architectural and business decisions relevant to specific files or tags. "
203
+ "Includes the rationale, constraints, and alternatives considered."
204
+ ),
205
+ inputSchema={
206
+ "type": "object",
207
+ "properties": {
208
+ "files": {
209
+ "type": "array",
210
+ "items": {"type": "string"},
211
+ "description": "File paths to find relevant decisions for",
212
+ },
213
+ "tags": {
214
+ "type": "array",
215
+ "items": {"type": "string"},
216
+ "description": "Decision tags to filter by (e.g., 'architecture', 'security')",
217
+ },
218
+ "decision_id": {
219
+ "type": "string",
220
+ "description": "Get a specific decision by ID",
221
+ },
222
+ "include_stale": {
223
+ "type": "boolean",
224
+ "description": "Include stale decisions (default: true)",
225
+ "default": True,
226
+ },
227
+ },
228
+ },
229
+ ),
230
+ Tool(
231
+ name="get_contracts",
232
+ description=(
233
+ "Get quality contracts applicable to specific files. Returns rules "
234
+ "the code must follow, including AI-specific anti-pattern checks."
235
+ ),
236
+ inputSchema={
237
+ "type": "object",
238
+ "properties": {
239
+ "files": {
240
+ "type": "array",
241
+ "items": {"type": "string"},
242
+ "description": "File paths to get contracts for",
243
+ },
244
+ "contract_id": {
245
+ "type": "string",
246
+ "description": "Get a specific contract by ID",
247
+ },
248
+ },
249
+ },
250
+ ),
251
+ Tool(
252
+ name="check_drift",
253
+ description=(
254
+ "Run drift detection to find stale decisions, orphaned code anchors, "
255
+ "and outdated graph data. Use this to verify context freshness."
256
+ ),
257
+ inputSchema={
258
+ "type": "object",
259
+ "properties": {
260
+ "files": {
261
+ "type": "array",
262
+ "items": {"type": "string"},
263
+ "description": "Check drift for specific files (faster than full check)",
264
+ },
265
+ "full": {
266
+ "type": "boolean",
267
+ "description": "Run full drift check across all records",
268
+ "default": False,
269
+ },
270
+ },
271
+ },
272
+ ),
273
+ Tool(
274
+ name="impact_analysis",
275
+ description=(
276
+ "Analyze what's affected by changes to specific files. "
277
+ "Returns a list of files and functions that depend on the changed code."
278
+ ),
279
+ inputSchema={
280
+ "type": "object",
281
+ "properties": {
282
+ "changed_files": {
283
+ "type": "array",
284
+ "items": {"type": "string"},
285
+ "description": "Files that were changed or will be changed",
286
+ },
287
+ "max_depth": {
288
+ "type": "integer",
289
+ "description": "Max dependency depth to check (default: 3)",
290
+ "default": 3,
291
+ },
292
+ },
293
+ "required": ["changed_files"],
294
+ },
295
+ ),
296
+ Tool(
297
+ name="get_status",
298
+ description="Get health status of all codebase-intel components.",
299
+ inputSchema={
300
+ "type": "object",
301
+ "properties": {},
302
+ },
303
+ ),
304
+ ]
305
+
306
+ @server.call_tool()
307
+ async def call_tool(name: str, arguments: dict[str, Any]) -> list[TextContent]:
308
+ """Route tool calls to handlers.
309
+
310
+ Every handler follows the same pattern:
311
+ 1. Ensure initialized
312
+ 2. Validate inputs
313
+ 3. Execute (catch exceptions → structured error response)
314
+ 4. Return structured JSON
315
+ """
316
+ state = await _ensure_initialized()
317
+
318
+ if "error" in state:
319
+ return [TextContent(
320
+ type="text",
321
+ text=json.dumps({"error": state["error"], "suggestion": "Run `codebase-intel init`"}),
322
+ )]
323
+
324
+ try:
325
+ if name == "get_context":
326
+ result = await _handle_get_context(state, arguments)
327
+ elif name == "query_graph":
328
+ result = await _handle_query_graph(state, arguments)
329
+ elif name == "get_decisions":
330
+ result = await _handle_get_decisions(state, arguments)
331
+ elif name == "get_contracts":
332
+ result = await _handle_get_contracts(state, arguments)
333
+ elif name == "check_drift":
334
+ result = await _handle_check_drift(state, arguments)
335
+ elif name == "impact_analysis":
336
+ result = await _handle_impact_analysis(state, arguments)
337
+ elif name == "get_status":
338
+ result = await _handle_get_status(state, arguments)
339
+ else:
340
+ result = {"error": f"Unknown tool: {name}"}
341
+ except Exception as exc:
342
+ logger.exception("Tool %s failed", name)
343
+ result = {
344
+ "error": str(exc),
345
+ "tool": name,
346
+ "suggestion": "Check logs for details",
347
+ }
348
+
349
+ return [TextContent(type="text", text=json.dumps(result, default=str, indent=2))]
350
+
351
+ # -------------------------------------------------------------------
352
+ # Tool handlers
353
+ # -------------------------------------------------------------------
354
+
355
+ async def _handle_get_context(
356
+ state: dict[str, Any], args: dict[str, Any]
357
+ ) -> dict[str, Any]:
358
+ """Handle the main get_context tool."""
359
+ from codebase_intel.orchestrator.assembler import ContextAssembler
360
+
361
+ config = state["config"]
362
+ file_paths = [Path(f) for f in args.get("files", [])]
363
+ budget = TokenBudget(total=args.get("budget_tokens", config.orchestrator.default_budget_tokens))
364
+
365
+ # Build assembler with all available components wired in
366
+ assembler = ContextAssembler(
367
+ config=config.orchestrator,
368
+ graph_engine=state.get("graph_engine"),
369
+ decision_store=state.get("decisions"),
370
+ contract_registry=state.get("contracts"),
371
+ )
372
+
373
+ result = await assembler.assemble(
374
+ task_description=args["task"],
375
+ file_paths=file_paths if file_paths else None,
376
+ symbol_names=args.get("symbols"),
377
+ budget=budget,
378
+ )
379
+
380
+ return {
381
+ "items": [
382
+ {
383
+ "source": item.source,
384
+ "type": item.item_type,
385
+ "priority": item.priority.value,
386
+ "content": item.content,
387
+ "tokens": item.estimated_tokens,
388
+ "freshness": item.freshness_score,
389
+ }
390
+ for item in result.items
391
+ ],
392
+ "summary": {
393
+ "total_tokens": result.total_tokens,
394
+ "budget_tokens": result.budget_tokens,
395
+ "items_included": len(result.items),
396
+ "items_dropped": result.dropped_count,
397
+ "truncated": result.truncated,
398
+ "assembly_time_ms": round(result.assembly_time_ms, 1),
399
+ },
400
+ "warnings": result.warnings,
401
+ "conflicts": result.conflicts,
402
+ }
403
+
404
+ async def _handle_query_graph(
405
+ state: dict[str, Any], args: dict[str, Any]
406
+ ) -> dict[str, Any]:
407
+ """Handle graph queries."""
408
+ if not state.get("graph_available"):
409
+ return {
410
+ "error": "Code graph not available",
411
+ "suggestion": "Run `codebase-intel init` then `codebase-intel analyze`",
412
+ }
413
+
414
+ query_type = args["query_type"]
415
+ engine = state["graph_engine"]
416
+ storage = state["graph_storage"]
417
+
418
+ if query_type == "stats":
419
+ stats = await storage.get_stats()
420
+ return {"stats": stats}
421
+
422
+ if query_type == "dependencies":
423
+ if "symbol" in args:
424
+ result = await engine.query_by_symbol(args["symbol"], include_depth=args.get("max_depth", 2))
425
+ elif "files" in args:
426
+ result = await engine.query_by_files(
427
+ [Path(f) for f in args["files"]],
428
+ include_depth=args.get("max_depth", 2),
429
+ )
430
+ else:
431
+ return {"error": "Provide 'files' or 'symbol' for dependency queries"}
432
+
433
+ return {
434
+ "nodes": [
435
+ {
436
+ "name": n.qualified_name,
437
+ "kind": n.kind.value,
438
+ "file": str(n.file_path),
439
+ "lines": f"{n.line_range.start}-{n.line_range.end}" if n.line_range else None,
440
+ "priority": result.priorities.get(n.node_id, "low"),
441
+ "reason": result.explanations.get(n.node_id, ""),
442
+ }
443
+ for n in result.nodes[:50]
444
+ ],
445
+ "total": len(result.nodes),
446
+ "truncated": result.truncated,
447
+ "warnings": result.warnings,
448
+ }
449
+
450
+ if query_type == "dependents":
451
+ if "files" not in args:
452
+ return {"error": "Provide 'files' for dependent queries"}
453
+ result = await engine.query_impact(
454
+ [Path(f) for f in args["files"]],
455
+ max_depth=args.get("max_depth", 2),
456
+ )
457
+ return {
458
+ "nodes": [
459
+ {
460
+ "name": n.qualified_name,
461
+ "kind": n.kind.value,
462
+ "file": str(n.file_path),
463
+ "priority": result.priorities.get(n.node_id, "low"),
464
+ "reason": result.explanations.get(n.node_id, ""),
465
+ }
466
+ for n in result.nodes[:50]
467
+ ],
468
+ "total": len(result.nodes),
469
+ "truncated": result.truncated,
470
+ "warnings": result.warnings,
471
+ }
472
+
473
+ if query_type == "impact":
474
+ if "files" not in args:
475
+ return {"error": "Provide 'files' for impact analysis"}
476
+ impact_map = await storage.impact_analysis(
477
+ [Path(f) for f in args["files"]],
478
+ max_depth=args.get("max_depth", 3),
479
+ )
480
+ return {
481
+ "impact": {
482
+ fp: [
483
+ {"name": n.qualified_name, "kind": n.kind.value, "file": str(n.file_path)}
484
+ for n in nodes[:20]
485
+ ]
486
+ for fp, nodes in impact_map.items()
487
+ },
488
+ "total_affected": sum(len(ns) for ns in impact_map.values()),
489
+ }
490
+
491
+ return {"error": f"Unknown query_type: {query_type}"}
492
+
493
+ async def _handle_get_decisions(
494
+ state: dict[str, Any], args: dict[str, Any]
495
+ ) -> dict[str, Any]:
496
+ """Handle decision queries."""
497
+ store = state.get("decisions")
498
+ if not store:
499
+ return {
500
+ "error": "Decision journal not available",
501
+ "suggestion": "Run `codebase-intel init` to create the decisions directory",
502
+ }
503
+
504
+ # Specific decision by ID
505
+ if "decision_id" in args:
506
+ record = await store.get(args["decision_id"])
507
+ if record:
508
+ return {"decision": record.to_context_string(verbose=True)}
509
+ return {"error": f"Decision '{args['decision_id']}' not found"}
510
+
511
+ # Query by files
512
+ if "files" in args:
513
+ file_paths = {Path(f) for f in args["files"]}
514
+ scored = await store.query_by_files(file_paths)
515
+ return {
516
+ "decisions": [
517
+ {
518
+ "id": record.id,
519
+ "title": record.title,
520
+ "relevance": round(score, 2),
521
+ "status": record.status.value,
522
+ "content": record.to_context_string(verbose=score >= 0.5),
523
+ }
524
+ for record, score in scored[:10]
525
+ ],
526
+ "total_found": len(scored),
527
+ }
528
+
529
+ # Query by tags
530
+ if "tags" in args:
531
+ records = await store.query_by_tags(args["tags"])
532
+ return {
533
+ "decisions": [
534
+ {"id": r.id, "title": r.title, "tags": r.tags}
535
+ for r in records[:20]
536
+ ],
537
+ }
538
+
539
+ # Return all
540
+ all_records = await store.load_all()
541
+ return {
542
+ "decisions": [
543
+ {"id": r.id, "title": r.title, "status": r.status.value}
544
+ for r in all_records
545
+ ],
546
+ "total": len(all_records),
547
+ }
548
+
549
+ async def _handle_get_contracts(
550
+ state: dict[str, Any], args: dict[str, Any]
551
+ ) -> dict[str, Any]:
552
+ """Handle contract queries."""
553
+ registry = state.get("contracts")
554
+ if not registry:
555
+ return {"error": "Contract registry not available"}
556
+
557
+ if "contract_id" in args:
558
+ contract = registry.get(args["contract_id"])
559
+ if contract:
560
+ return {"contract": contract.to_context_string(verbose=True)}
561
+ return {"error": f"Contract '{args['contract_id']}' not found"}
562
+
563
+ if "files" in args:
564
+ all_contracts = []
565
+ for f in args["files"]:
566
+ applicable = registry.get_for_file(Path(f))
567
+ for c in applicable:
568
+ if c.id not in {ac["id"] for ac in all_contracts}:
569
+ all_contracts.append({
570
+ "id": c.id,
571
+ "name": c.name,
572
+ "priority": c.priority,
573
+ "rule_count": len(c.rules),
574
+ "content": c.to_context_string(verbose=True),
575
+ })
576
+ return {"contracts": all_contracts}
577
+
578
+ # Return all
579
+ all_contracts = registry.get_all()
580
+ return {
581
+ "contracts": [
582
+ {"id": c.id, "name": c.name, "rules": len(c.rules), "builtin": c.is_builtin}
583
+ for c in all_contracts
584
+ ],
585
+ }
586
+
587
+ async def _handle_check_drift(
588
+ state: dict[str, Any], args: dict[str, Any]
589
+ ) -> dict[str, Any]:
590
+ """Handle drift detection."""
591
+ from codebase_intel.drift.detector import DriftDetector
592
+
593
+ config = state["config"]
594
+ detector = DriftDetector(
595
+ config=config.drift,
596
+ project_root=config.project_root,
597
+ graph_storage=state.get("graph_storage"),
598
+ decision_store=state.get("decisions"),
599
+ )
600
+
601
+ if args.get("full"):
602
+ report = await detector.full_check()
603
+ elif "files" in args:
604
+ report = await detector.check_files([Path(f) for f in args["files"]])
605
+ else:
606
+ report = await detector.full_check()
607
+
608
+ return {
609
+ "summary": report.summary,
610
+ "overall_level": report.overall_level.value,
611
+ "items": [
612
+ {
613
+ "component": item.component,
614
+ "level": item.level.value,
615
+ "description": item.description,
616
+ "remediation": item.remediation,
617
+ }
618
+ for item in report.items[:50]
619
+ ],
620
+ "stats": {
621
+ "graph_stale_files": report.graph_stale_files,
622
+ "decision_stale": report.decision_stale_count,
623
+ "decision_orphaned": report.decision_orphaned_count,
624
+ "rot_detected": report.rot_detected,
625
+ "rot_percentage": round(report.rot_percentage, 2),
626
+ },
627
+ }
628
+
629
+ async def _handle_impact_analysis(
630
+ state: dict[str, Any], args: dict[str, Any]
631
+ ) -> dict[str, Any]:
632
+ """Handle impact analysis."""
633
+ if not state.get("graph_available"):
634
+ return {"error": "Code graph not available", "suggestion": "Run `codebase-intel init`"}
635
+
636
+ storage = state["graph_storage"]
637
+ changed = [Path(f) for f in args["changed_files"]]
638
+ max_depth = args.get("max_depth", 3)
639
+
640
+ impact_map = await storage.impact_analysis(changed, max_depth=max_depth)
641
+
642
+ # Also find relevant decisions for changed files
643
+ decision_context: list[dict[str, Any]] = []
644
+ if state.get("decisions"):
645
+ scored = await state["decisions"].query_by_files(set(changed))
646
+ decision_context = [
647
+ {"id": r.id, "title": r.title, "relevance": round(s, 2)}
648
+ for r, s in scored[:5]
649
+ ]
650
+
651
+ return {
652
+ "changed_files": [str(f) for f in changed],
653
+ "impact": {
654
+ fp: [
655
+ {"name": n.qualified_name, "kind": n.kind.value, "file": str(n.file_path)}
656
+ for n in nodes[:20]
657
+ ]
658
+ for fp, nodes in impact_map.items()
659
+ },
660
+ "total_affected": sum(len(ns) for ns in impact_map.values()),
661
+ "related_decisions": decision_context,
662
+ }
663
+
664
+ async def _handle_get_status(
665
+ state: dict[str, Any], _args: dict[str, Any]
666
+ ) -> dict[str, Any]:
667
+ """Return component health status."""
668
+ config = state.get("config")
669
+ root = str(config.project_root) if config else str(_root)
670
+
671
+ status: dict[str, Any] = {
672
+ "project_root": root,
673
+ "components": {
674
+ "graph": "available" if state.get("graph_available") else "not_initialized",
675
+ "decisions": "available" if state.get("decisions_available") else "not_initialized",
676
+ "contracts": "available" if state.get("contracts_available") else "not_initialized",
677
+ },
678
+ "version": "0.1.0",
679
+ }
680
+
681
+ # Add graph stats if available
682
+ if state.get("graph_available"):
683
+ storage = state["graph_storage"]
684
+ stats = await storage.get_stats()
685
+ status["graph_stats"] = stats
686
+
687
+ # Add decision count if available
688
+ if state.get("decisions"):
689
+ all_decisions = await state["decisions"].load_all()
690
+ status["decision_count"] = len(all_decisions)
691
+
692
+ # Add contract count if available
693
+ if state.get("contracts"):
694
+ all_contracts = state["contracts"].get_all()
695
+ status["contract_count"] = len(all_contracts)
696
+ status["builtin_contracts"] = sum(1 for c in all_contracts if c.is_builtin)
697
+
698
+ return status
699
+
700
+ return server
701
+
702
+
703
+ async def run_server(project_root: Path | None = None) -> None:
704
+ """Run the MCP server over stdio.
705
+
706
+ This is the entry point for `codebase-intel serve`.
707
+ """
708
+ server = create_server(project_root)
709
+ async with stdio_server() as (read_stream, write_stream):
710
+ await server.run(read_stream, write_stream, server.create_initialization_options())