code-context-mcp 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. code_context/__init__.py +3 -0
  2. code_context/_background.py +93 -0
  3. code_context/_composition.py +425 -0
  4. code_context/_watcher.py +89 -0
  5. code_context/adapters/__init__.py +0 -0
  6. code_context/adapters/driven/__init__.py +0 -0
  7. code_context/adapters/driven/chunker_dispatcher.py +43 -0
  8. code_context/adapters/driven/chunker_line.py +54 -0
  9. code_context/adapters/driven/chunker_treesitter.py +215 -0
  10. code_context/adapters/driven/chunker_treesitter_queries.py +111 -0
  11. code_context/adapters/driven/code_source_fs.py +122 -0
  12. code_context/adapters/driven/embeddings_local.py +111 -0
  13. code_context/adapters/driven/embeddings_openai.py +58 -0
  14. code_context/adapters/driven/git_source_cli.py +211 -0
  15. code_context/adapters/driven/introspector_fs.py +224 -0
  16. code_context/adapters/driven/keyword_index_sqlite.py +206 -0
  17. code_context/adapters/driven/reranker_crossencoder.py +61 -0
  18. code_context/adapters/driven/symbol_index_sqlite.py +264 -0
  19. code_context/adapters/driven/vector_store_numpy.py +119 -0
  20. code_context/adapters/driving/__init__.py +0 -0
  21. code_context/adapters/driving/mcp_server.py +365 -0
  22. code_context/cli.py +161 -0
  23. code_context/config.py +114 -0
  24. code_context/domain/__init__.py +0 -0
  25. code_context/domain/index_bus.py +52 -0
  26. code_context/domain/models.py +140 -0
  27. code_context/domain/ports.py +205 -0
  28. code_context/domain/use_cases/__init__.py +0 -0
  29. code_context/domain/use_cases/explain_diff.py +98 -0
  30. code_context/domain/use_cases/find_definition.py +30 -0
  31. code_context/domain/use_cases/find_references.py +22 -0
  32. code_context/domain/use_cases/get_file_tree.py +36 -0
  33. code_context/domain/use_cases/get_summary.py +24 -0
  34. code_context/domain/use_cases/indexer.py +336 -0
  35. code_context/domain/use_cases/recent_changes.py +36 -0
  36. code_context/domain/use_cases/search_repo.py +131 -0
  37. code_context/server.py +151 -0
  38. code_context_mcp-1.0.0.dist-info/METADATA +181 -0
  39. code_context_mcp-1.0.0.dist-info/RECORD +43 -0
  40. code_context_mcp-1.0.0.dist-info/WHEEL +5 -0
  41. code_context_mcp-1.0.0.dist-info/entry_points.txt +3 -0
  42. code_context_mcp-1.0.0.dist-info/licenses/LICENSE +21 -0
  43. code_context_mcp-1.0.0.dist-info/top_level.txt +1 -0
@@ -0,0 +1,52 @@
1
+ """IndexUpdateBus — minimal threadsafe pub-sub for index-swap events.
2
+
3
+ Sprint 7's background indexer runs reindex on a daemon thread and
4
+ publishes a "swap" notification to this bus when a fresh index dir
5
+ becomes the active one. Search use cases consult `generation` to
6
+ short-circuit the no-op path with an int compare; on detected drift,
7
+ they reload their store handles from the active index dir before
8
+ serving the next query.
9
+
10
+ Pure domain — no I/O. Thread safety: a single `Lock` guards
11
+ `generation` and `subscribers`. Subscriber callbacks fire OUTSIDE the
12
+ lock (so a misbehaving subscriber can't deadlock the publisher); a
13
+ bad subscriber raising an exception is logged-and-swallowed so the
14
+ publisher's contract (monotonic generation, no lost events for
15
+ well-behaved subscribers) holds.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import logging
21
+ import threading
22
+ from collections.abc import Callable
23
+
24
+ log = logging.getLogger(__name__)
25
+
26
+
27
+ class IndexUpdateBus:
28
+ def __init__(self) -> None:
29
+ self._lock = threading.Lock()
30
+ self._gen = 0
31
+ self._subs: list[Callable[[str], None]] = []
32
+
33
+ @property
34
+ def generation(self) -> int:
35
+ with self._lock:
36
+ return self._gen
37
+
38
+ def subscribe(self, fn: Callable[[str], None]) -> None:
39
+ with self._lock:
40
+ self._subs.append(fn)
41
+
42
+ def publish_swap(self, new_index_dir: str) -> None:
43
+ with self._lock:
44
+ self._gen += 1
45
+ subs = list(self._subs)
46
+ # Fire callbacks without holding the lock — a slow subscriber
47
+ # must not block other publishers.
48
+ for fn in subs:
49
+ try:
50
+ fn(new_index_dir)
51
+ except Exception: # noqa: BLE001 - subscriber bug must not break publisher
52
+ log.exception("IndexUpdateBus subscriber raised; continuing")
@@ -0,0 +1,140 @@
1
+ """Domain models. Pure data; no I/O.
2
+
3
+ These dataclasses are the boundary types of the application. The 3 contract
4
+ return types (SearchResult, Change, ProjectSummary) match docs/tool-protocol.md
5
+ in context-template byte-for-byte at the field level.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from dataclasses import dataclass, field
11
+ from datetime import datetime
12
+ from pathlib import Path
13
+ from typing import Any
14
+
15
+ import numpy as np
16
+
17
+
18
+ @dataclass(frozen=True, slots=True)
19
+ class Chunk:
20
+ """A piece of code (text fragment) ready to embed."""
21
+
22
+ path: str
23
+ line_start: int
24
+ line_end: int
25
+ content_hash: str # sha256 of snippet, hex string
26
+ snippet: str
27
+
28
+
29
+ @dataclass(frozen=True, slots=True)
30
+ class IndexEntry:
31
+ """A chunk plus its embedding vector. Lives in the vector store."""
32
+
33
+ chunk: Chunk
34
+ vector: np.ndarray # shape: (dimension,), dtype float32
35
+
36
+
37
+ @dataclass(frozen=True, slots=True)
38
+ class SearchResult:
39
+ """Result of search_repo. Matches tool-protocol.md SearchResult."""
40
+
41
+ path: str
42
+ lines: tuple[int, int]
43
+ snippet: str
44
+ score: float
45
+ why: str
46
+
47
+
48
+ @dataclass(frozen=True, slots=True)
49
+ class Change:
50
+ """Result of recent_changes. Matches tool-protocol.md Change."""
51
+
52
+ sha: str
53
+ date: datetime
54
+ author: str
55
+ paths: list[str]
56
+ summary: str
57
+
58
+
59
+ @dataclass(frozen=True, slots=True)
60
+ class ProjectSummary:
61
+ """Result of get_summary. Matches tool-protocol.md ProjectSummary."""
62
+
63
+ name: str
64
+ purpose: str
65
+ stack: list[str]
66
+ entry_points: list[str]
67
+ key_modules: list[dict[str, str]] = field(default_factory=list)
68
+ stats: dict[str, Any] = field(default_factory=dict)
69
+
70
+
71
+ @dataclass(frozen=True, slots=True)
72
+ class SymbolDef:
73
+ """Result of find_definition. Matches tool-protocol.md SymbolDef (v1.1)."""
74
+
75
+ name: str
76
+ path: str
77
+ lines: tuple[int, int]
78
+ kind: str # "function" | "class" | "method" | "type" | "enum" | "interface" | "struct" | ...
79
+ language: str # "python" | "javascript" | "typescript" | "go" | "rust" | "csharp"
80
+
81
+
82
+ @dataclass(frozen=True, slots=True)
83
+ class SymbolRef:
84
+ """Result of find_references. Matches tool-protocol.md SymbolRef (v1.1)."""
85
+
86
+ path: str
87
+ line: int
88
+ snippet: str
89
+
90
+
91
+ @dataclass(frozen=True, slots=True)
92
+ class FileTreeNode:
93
+ """Result of get_file_tree. Matches tool-protocol.md FileTreeNode (v1.2)."""
94
+
95
+ path: str
96
+ kind: str # "file" | "dir"
97
+ children: tuple[FileTreeNode, ...] = ()
98
+ size: int | None = None # bytes; None for dirs
99
+
100
+
101
+ @dataclass(frozen=True, slots=True)
102
+ class DiffFile:
103
+ """Per-file diff hunks returned by GitSource.diff_files (v1.2 internal type)."""
104
+
105
+ path: str
106
+ hunks: tuple[tuple[int, int], ...] # list of (start_line, end_line) in the new file
107
+
108
+
109
+ @dataclass(frozen=True, slots=True)
110
+ class DiffChunk:
111
+ """Result of explain_diff. Matches tool-protocol.md DiffChunk (v1.2)."""
112
+
113
+ path: str
114
+ lines: tuple[int, int]
115
+ snippet: str
116
+ kind: str # "function" | "class" | "method" | ... | "fragment"
117
+ change: str # "added" | "modified" | "deleted"
118
+
119
+
120
+ @dataclass(frozen=True, slots=True)
121
+ class StaleSet:
122
+ """Per-file staleness verdict driving incremental reindex (Sprint 6).
123
+
124
+ `full_reindex_required` is the authoritative "blow it all away" flag —
125
+ set on first run (no current index), or when a global invalidator
126
+ changed (embeddings model id, chunker version, keyword/symbol index
127
+ versions, metadata schema upgrade). When True, the file lists are
128
+ advisory only; callers should ignore them and run a full reindex.
129
+
130
+ Otherwise, `dirty_files` are absolute paths that need re-chunking +
131
+ re-embedding (content hash drift); `deleted_files` are repo-relative
132
+ paths that vanished since last index and whose rows must be purged
133
+ from every store. An all-empty StaleSet with full_reindex_required=
134
+ False is the steady-state "no work" signal.
135
+ """
136
+
137
+ full_reindex_required: bool
138
+ reason: str # human-readable summary for logs / `code-context status`
139
+ dirty_files: tuple[Path, ...] = ()
140
+ deleted_files: tuple[str, ...] = ()
@@ -0,0 +1,205 @@
1
+ """Driven ports — interfaces that the domain calls.
2
+
3
+ Each port is a Protocol (PEP 544 structural typing). Adapters implement them
4
+ duck-style; no inheritance required. Tests mock by writing a class that has
5
+ the same methods.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from collections.abc import Iterable
11
+ from datetime import datetime
12
+ from pathlib import Path
13
+ from typing import Protocol
14
+
15
+ import numpy as np
16
+
17
+ from code_context.domain.models import (
18
+ Change,
19
+ Chunk,
20
+ DiffFile,
21
+ FileTreeNode,
22
+ IndexEntry,
23
+ ProjectSummary,
24
+ SymbolDef,
25
+ SymbolRef,
26
+ )
27
+
28
+
29
+ class EmbeddingsProvider(Protocol):
30
+ """Embeds text. Default: LocalST (sentence-transformers)."""
31
+
32
+ @property
33
+ def dimension(self) -> int: ...
34
+
35
+ @property
36
+ def model_id(self) -> str:
37
+ """Identifier including library version, used for staleness detection."""
38
+
39
+ def embed(self, texts: list[str]) -> np.ndarray:
40
+ """Returns shape (len(texts), dimension), dtype float32."""
41
+
42
+
43
+ class VectorStore(Protocol):
44
+ """Persistent vector store. Default: NumPyParquetStore."""
45
+
46
+ def add(self, entries: Iterable[IndexEntry]) -> None: ...
47
+
48
+ def search(self, query: np.ndarray, k: int) -> list[tuple[IndexEntry, float]]:
49
+ """Returns top-k entries with cosine similarity scores, descending."""
50
+
51
+ def delete_by_path(self, path: str) -> int:
52
+ """Remove every entry whose chunk.path == `path`. Returns the row
53
+ count removed. Used by incremental reindex (Sprint 6) to purge a
54
+ file's chunks before re-adding fresh ones."""
55
+
56
+ def persist(self, path: Path) -> None:
57
+ """Writes vectors.npy + chunks.parquet under path/."""
58
+
59
+ def load(self, path: Path) -> None:
60
+ """Loads from path/."""
61
+
62
+
63
+ class Chunker(Protocol):
64
+ """Splits source code text into chunks. Default: LineChunker."""
65
+
66
+ @property
67
+ def version(self) -> str:
68
+ """Identifier for staleness detection."""
69
+
70
+ def chunk(self, content: str, path: str) -> list[Chunk]: ...
71
+
72
+
73
+ class CodeSource(Protocol):
74
+ """Lists and reads source files. Default: FilesystemSource."""
75
+
76
+ def list_files(self, root: Path, include_exts: list[str], max_bytes: int) -> list[Path]: ...
77
+
78
+ def read(self, path: Path) -> str: ...
79
+
80
+ def walk_tree(
81
+ self,
82
+ root: Path,
83
+ max_depth: int = 4,
84
+ include_hidden: bool = False,
85
+ subpath: Path | None = None,
86
+ ) -> FileTreeNode:
87
+ """Walk the filesystem rooted at `root` (or `root/subpath` if given)
88
+ and return a hierarchical FileTreeNode. Honors .gitignore. Skips
89
+ binary files. Caps recursion at `max_depth`."""
90
+
91
+
92
+ class GitSource(Protocol):
93
+ """Reads git state. Default: GitCliSource."""
94
+
95
+ def is_repo(self, root: Path) -> bool: ...
96
+
97
+ def head_sha(self, root: Path) -> str:
98
+ """Empty string if not a repo."""
99
+
100
+ def commits(
101
+ self,
102
+ root: Path,
103
+ since: datetime | None = None,
104
+ paths: list[str] | None = None,
105
+ max_count: int = 20,
106
+ ) -> list[Change]: ...
107
+
108
+ def diff_files(self, root: Path, ref: str) -> list[DiffFile]:
109
+ """Return per-file diff hunks for the commit at `ref` (or worktree
110
+ diff against HEAD if ref=='HEAD' is given the current behavior).
111
+ Each DiffFile.hunks is a tuple of (start_line, end_line) ranges in
112
+ the *new* version of the file (post-commit). Empty list if not a
113
+ repo."""
114
+
115
+
116
+ class ProjectIntrospector(Protocol):
117
+ """Builds a ProjectSummary. Default: FilesystemIntrospector."""
118
+
119
+ def summary(
120
+ self, root: Path, scope: str = "project", path: Path | None = None
121
+ ) -> ProjectSummary: ...
122
+
123
+
124
+ class KeywordIndex(Protocol):
125
+ """Keyword-based index for exact-identifier search. Default: SqliteFTS5Index."""
126
+
127
+ @property
128
+ def version(self) -> str:
129
+ """Identifier for staleness detection."""
130
+
131
+ def add(self, entries: Iterable[IndexEntry]) -> None: ...
132
+
133
+ def search(self, query: str, k: int) -> list[tuple[IndexEntry, float]]:
134
+ """Returns top-k entries with BM25-style scores, descending."""
135
+
136
+ def delete_by_path(self, path: str) -> int:
137
+ """Remove every row whose path == `path`. Returns the row count
138
+ removed. Used by incremental reindex (Sprint 6)."""
139
+
140
+ def persist(self, path: Path) -> None: ...
141
+
142
+ def load(self, path: Path) -> None: ...
143
+
144
+
145
+ class Reranker(Protocol):
146
+ """Re-orders search candidates with a more accurate model. Optional."""
147
+
148
+ @property
149
+ def version(self) -> str: ...
150
+
151
+ @property
152
+ def model_id(self) -> str: ...
153
+
154
+ def rerank(
155
+ self,
156
+ query: str,
157
+ candidates: list[tuple[IndexEntry, float]],
158
+ k: int,
159
+ ) -> list[tuple[IndexEntry, float]]:
160
+ """Returns the top-k candidates re-scored by the reranker, descending."""
161
+
162
+
163
+ class SymbolIndex(Protocol):
164
+ """Index of named symbols (definitions + textual references).
165
+
166
+ Definitions come from the chunker's AST extraction (see
167
+ TreeSitterChunker.extract_definitions in v0.5.0). References are derived
168
+ from the keyword index's snippet text — they share an on-disk file in
169
+ the default SQLite-backed adapter to avoid duplicate I/O.
170
+ """
171
+
172
+ @property
173
+ def version(self) -> str:
174
+ """Identifier for staleness detection."""
175
+
176
+ def add_definitions(self, defs: Iterable[SymbolDef]) -> None: ...
177
+
178
+ def add_references(self, refs: Iterable[tuple[str, int, str]]) -> None:
179
+ """Bulk-insert reference rows: (path, line, snippet) triples.
180
+
181
+ Snippet text is full-text-indexed; path and line are stored verbatim.
182
+ IndexerUseCase feeds chunks here so find_references has rows to match
183
+ against. Adapters that don't track references (e.g., a null adapter)
184
+ may no-op.
185
+ """
186
+
187
+ def find_definition(
188
+ self,
189
+ name: str,
190
+ language: str | None = None,
191
+ max_count: int = 5,
192
+ ) -> list[SymbolDef]:
193
+ """Returns symbol definitions matching `name`, optionally filtered by language."""
194
+
195
+ def find_references(self, name: str, max_count: int = 50) -> list[SymbolRef]:
196
+ """Returns lines mentioning `name` as a whole-word match (no `log` → `logger`)."""
197
+
198
+ def delete_by_path(self, path: str) -> int:
199
+ """Remove every definition AND reference row whose path == `path`.
200
+ Returns the total row count removed across both tables. Used by
201
+ incremental reindex (Sprint 6)."""
202
+
203
+ def persist(self, path: Path) -> None: ...
204
+
205
+ def load(self, path: Path) -> None: ...
File without changes
@@ -0,0 +1,98 @@
1
+ """ExplainDiffUseCase — combines GitSource.diff_files with the chunker.
2
+
3
+ For each diff hunk in `ref`, find the AST-aligned chunk that contains
4
+ the affected lines. If the chunker produced no chunks for a file (e.g.
5
+ it's a binary file or an unsupported language), emit a "fragment" chunk
6
+ with the raw line range — caller can still see WHAT changed even if
7
+ not at AST granularity.
8
+ """
9
+
10
+ from __future__ import annotations
11
+
12
+ from dataclasses import dataclass
13
+ from pathlib import Path
14
+
15
+ from code_context.domain.models import DiffChunk
16
+ from code_context.domain.ports import Chunker, CodeSource, GitSource
17
+
18
+
19
+ @dataclass
20
+ class ExplainDiffUseCase:
21
+ chunker: Chunker
22
+ code_source: CodeSource
23
+ git_source: GitSource
24
+ repo_root: Path
25
+
26
+ def run(self, ref: str, max_chunks: int = 50) -> list[DiffChunk]:
27
+ diff_files = self.git_source.diff_files(self.repo_root, ref)
28
+ results: list[DiffChunk] = []
29
+ seen: set[tuple[str, int, int]] = set() # (path, line_start, line_end)
30
+
31
+ for diff_file in diff_files:
32
+ file_path = self.repo_root / diff_file.path
33
+ try:
34
+ content = self.code_source.read(file_path)
35
+ except (OSError, UnicodeDecodeError):
36
+ # Likely binary or deleted in HEAD. Emit raw-line fragments.
37
+ for hunk_start, hunk_end in diff_file.hunks:
38
+ key = (diff_file.path, hunk_start, hunk_end)
39
+ if key in seen:
40
+ continue
41
+ seen.add(key)
42
+ results.append(
43
+ DiffChunk(
44
+ path=diff_file.path,
45
+ lines=(hunk_start, hunk_end),
46
+ snippet="",
47
+ kind="fragment",
48
+ change="modified",
49
+ )
50
+ )
51
+ if len(results) >= max_chunks:
52
+ return results
53
+ continue
54
+
55
+ chunks = self.chunker.chunk(content, diff_file.path)
56
+ for hunk_start, hunk_end in diff_file.hunks:
57
+ # Find AST chunks whose line range overlaps the hunk.
58
+ overlapping = [
59
+ c for c in chunks if c.line_start <= hunk_end and c.line_end >= hunk_start
60
+ ]
61
+ if not overlapping:
62
+ # Hunk fell between chunks (e.g., top-of-file imports);
63
+ # emit a fragment with the raw line range.
64
+ key = (diff_file.path, hunk_start, hunk_end)
65
+ if key in seen:
66
+ continue
67
+ seen.add(key)
68
+ snippet_lines = content.splitlines()[hunk_start - 1 : hunk_end]
69
+ results.append(
70
+ DiffChunk(
71
+ path=diff_file.path,
72
+ lines=(hunk_start, hunk_end),
73
+ snippet="\n".join(snippet_lines),
74
+ kind="fragment",
75
+ change="modified",
76
+ )
77
+ )
78
+ else:
79
+ for chunk in overlapping:
80
+ key = (diff_file.path, chunk.line_start, chunk.line_end)
81
+ if key in seen:
82
+ continue
83
+ seen.add(key)
84
+ results.append(
85
+ DiffChunk(
86
+ path=diff_file.path,
87
+ lines=(chunk.line_start, chunk.line_end),
88
+ snippet=chunk.snippet,
89
+ kind="function", # Chunker doesn't expose node-level kind;
90
+ # tree-sitter would give more granularity but
91
+ # Chunker port doesn't expose it. v0.8 follow-up.
92
+ change="modified",
93
+ )
94
+ )
95
+ if len(results) >= max_chunks:
96
+ return results
97
+
98
+ return results[:max_chunks]
@@ -0,0 +1,30 @@
1
+ """FindDefinitionUseCase — delegates to SymbolIndex."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+
7
+ from code_context.domain.models import SymbolDef
8
+ from code_context.domain.ports import SymbolIndex
9
+
10
+
11
+ @dataclass
12
+ class FindDefinitionUseCase:
13
+ """Use case for the find_definition MCP tool.
14
+
15
+ Thin delegate over SymbolIndex.find_definition. Ranking, language
16
+ filtering, and max-count semantics live in the adapter; this layer
17
+ only exists to keep the MCP driving adapter free of port-specific
18
+ knowledge (mirrors the pattern of RecentChangesUseCase and
19
+ GetSummaryUseCase).
20
+ """
21
+
22
+ symbol_index: SymbolIndex
23
+
24
+ def run(
25
+ self,
26
+ name: str,
27
+ language: str | None = None,
28
+ max_count: int = 5,
29
+ ) -> list[SymbolDef]:
30
+ return self.symbol_index.find_definition(name, language=language, max_count=max_count)
@@ -0,0 +1,22 @@
1
+ """FindReferencesUseCase — delegates to SymbolIndex."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+
7
+ from code_context.domain.models import SymbolRef
8
+ from code_context.domain.ports import SymbolIndex
9
+
10
+
11
+ @dataclass
12
+ class FindReferencesUseCase:
13
+ """Use case for the find_references MCP tool.
14
+
15
+ Thin delegate over SymbolIndex.find_references. Word-boundary matching
16
+ and result ordering are the adapter's responsibility.
17
+ """
18
+
19
+ symbol_index: SymbolIndex
20
+
21
+ def run(self, name: str, max_count: int = 50) -> list[SymbolRef]:
22
+ return self.symbol_index.find_references(name, max_count=max_count)
@@ -0,0 +1,36 @@
1
+ """GetFileTreeUseCase — delegates to CodeSource."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from code_context.domain.models import FileTreeNode
9
+ from code_context.domain.ports import CodeSource
10
+
11
+
12
+ @dataclass
13
+ class GetFileTreeUseCase:
14
+ """Use case for the get_file_tree MCP tool.
15
+
16
+ Thin delegate over CodeSource.walk_tree. The MCP server flattens
17
+ the FileTreeNode tree into JSON; this layer keeps the use case
18
+ Path-aware.
19
+ """
20
+
21
+ code_source: CodeSource
22
+ repo_root: Path
23
+
24
+ def run(
25
+ self,
26
+ path: str | None = None,
27
+ max_depth: int = 4,
28
+ include_hidden: bool = False,
29
+ ) -> FileTreeNode:
30
+ subpath = Path(path) if path else None
31
+ return self.code_source.walk_tree(
32
+ self.repo_root,
33
+ max_depth=max_depth,
34
+ include_hidden=include_hidden,
35
+ subpath=subpath,
36
+ )
@@ -0,0 +1,24 @@
1
+ """GetSummaryUseCase — delegates to ProjectIntrospector."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass
6
+ from pathlib import Path
7
+
8
+ from code_context.domain.models import ProjectSummary
9
+ from code_context.domain.ports import ProjectIntrospector
10
+
11
+
12
+ @dataclass
13
+ class GetSummaryUseCase:
14
+ introspector: ProjectIntrospector
15
+ repo_root: Path
16
+
17
+ def run(self, scope: str = "project", path: Path | None = None) -> ProjectSummary:
18
+ # MCP `path` arg is documented as repo-relative; resolve here so
19
+ # introspectors can stay path-agnostic and so callers from other
20
+ # CWDs (the smoke harness, the CLI, MCP) all behave identically.
21
+ # Absolute paths pass through unchanged.
22
+ if path is not None and not path.is_absolute():
23
+ path = self.repo_root / path
24
+ return self.introspector.summary(self.repo_root, scope=scope, path=path)