mcp-kb 0.2.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mcp_kb/config.py +7 -5
- {mcp_kb-0.2.0.dist-info → mcp_kb-0.3.0.dist-info}/METADATA +3 -1
- mcp_kb-0.3.0.dist-info/RECORD +7 -0
- mcp_kb/cli/__init__.py +0 -1
- mcp_kb/cli/args.py +0 -153
- mcp_kb/cli/main.py +0 -116
- mcp_kb/cli/reindex.py +0 -91
- mcp_kb/data/KNOWLEDBASE_DOC.md +0 -36
- mcp_kb/data/__init__.py +0 -1
- mcp_kb/ingest/__init__.py +0 -1
- mcp_kb/ingest/chroma.py +0 -588
- mcp_kb/knowledge/__init__.py +0 -1
- mcp_kb/knowledge/bootstrap.py +0 -39
- mcp_kb/knowledge/events.py +0 -100
- mcp_kb/knowledge/search.py +0 -178
- mcp_kb/knowledge/store.py +0 -263
- mcp_kb/security/__init__.py +0 -1
- mcp_kb/security/path_validation.py +0 -105
- mcp_kb/server/__init__.py +0 -1
- mcp_kb/server/app.py +0 -201
- mcp_kb/utils/__init__.py +0 -1
- mcp_kb/utils/filesystem.py +0 -127
- mcp_kb-0.2.0.dist-info/RECORD +0 -26
- {mcp_kb-0.2.0.dist-info → mcp_kb-0.3.0.dist-info}/WHEEL +0 -0
- {mcp_kb-0.2.0.dist-info → mcp_kb-0.3.0.dist-info}/entry_points.txt +0 -0
- {mcp_kb-0.2.0.dist-info → mcp_kb-0.3.0.dist-info}/top_level.txt +0 -0
mcp_kb/ingest/chroma.py
DELETED
@@ -1,588 +0,0 @@
|
|
1
|
-
"""Integration layer that mirrors knowledge base updates into ChromaDB."""
|
2
|
-
from __future__ import annotations
|
3
|
-
|
4
|
-
import importlib
|
5
|
-
from dataclasses import dataclass
|
6
|
-
from pathlib import Path
|
7
|
-
from typing import Any, Dict, List, Mapping, Optional, Set, Tuple, Type, TYPE_CHECKING
|
8
|
-
from langchain_text_splitters import TokenTextSplitter
|
9
|
-
from tqdm import tqdm
|
10
|
-
|
11
|
-
from mcp_kb.config import DATA_FOLDER_NAME
|
12
|
-
from mcp_kb.knowledge.events import (
|
13
|
-
FileDeleteEvent,
|
14
|
-
FileUpsertEvent,
|
15
|
-
KnowledgeBaseListener,
|
16
|
-
KnowledgeBaseReindexListener,
|
17
|
-
)
|
18
|
-
from mcp_kb.knowledge.search import SearchMatch
|
19
|
-
|
20
|
-
if TYPE_CHECKING: # pragma: no cover - type checking only imports
|
21
|
-
from chromadb.api import ClientAPI, GetResult
|
22
|
-
from chromadb.api.models.Collection import Collection
|
23
|
-
from mcp_kb.knowledge.store import KnowledgeBase
|
24
|
-
|
25
|
-
SUPPORTED_CLIENTS: Tuple[str, ...] = ("off", "ephemeral", "persistent", "http", "cloud")
|
26
|
-
"""Recognised client types exposed to operators enabling Chroma ingestion."""
|
27
|
-
|
28
|
-
@dataclass(frozen=True)
|
29
|
-
class ChromaConfiguration:
|
30
|
-
"""Runtime configuration controlling how Chroma ingestion behaves.
|
31
|
-
|
32
|
-
Each attribute corresponds to either a CLI flag or an environment variable
|
33
|
-
so that deployments can toggle Chroma synchronisation without changing the
|
34
|
-
application code. The configuration intentionally stores already-normalised
|
35
|
-
values (e.g., resolved paths and lowercase enums) so downstream components
|
36
|
-
can rely on consistent semantics regardless of where the data originated.
|
37
|
-
"""
|
38
|
-
|
39
|
-
client_type: str
|
40
|
-
collection_name: str
|
41
|
-
embedding: str
|
42
|
-
data_directory: Optional[Path]
|
43
|
-
host: Optional[str]
|
44
|
-
port: Optional[int]
|
45
|
-
ssl: bool
|
46
|
-
tenant: Optional[str]
|
47
|
-
database: Optional[str]
|
48
|
-
api_key: Optional[str]
|
49
|
-
custom_auth_credentials: Optional[str]
|
50
|
-
id_prefix: str
|
51
|
-
|
52
|
-
@property
|
53
|
-
def enabled(self) -> bool:
|
54
|
-
"""Return ``True`` when ingestion should be activated."""
|
55
|
-
|
56
|
-
return self.client_type != "off"
|
57
|
-
|
58
|
-
@classmethod
|
59
|
-
def from_options(
|
60
|
-
cls,
|
61
|
-
*,
|
62
|
-
root: Path,
|
63
|
-
client_type: str,
|
64
|
-
collection_name: str,
|
65
|
-
embedding: str,
|
66
|
-
data_directory: Optional[str],
|
67
|
-
host: Optional[str],
|
68
|
-
port: Optional[int],
|
69
|
-
ssl: bool,
|
70
|
-
tenant: Optional[str],
|
71
|
-
database: Optional[str],
|
72
|
-
api_key: Optional[str],
|
73
|
-
custom_auth_credentials: Optional[str],
|
74
|
-
id_prefix: Optional[str],
|
75
|
-
) -> "ChromaConfiguration":
|
76
|
-
"""Normalise CLI and environment inputs into a configuration object.
|
77
|
-
|
78
|
-
Parameters
|
79
|
-
----------
|
80
|
-
root:
|
81
|
-
Absolute knowledge base root used to derive default directories.
|
82
|
-
client_type:
|
83
|
-
One of :data:`SUPPORTED_CLIENTS`. ``"off"`` disables ingestion.
|
84
|
-
collection_name:
|
85
|
-
Target Chroma collection that will store knowledge base documents.
|
86
|
-
embedding:
|
87
|
-
Name of the embedding function to instantiate. Values are matched
|
88
|
-
case-insensitively to the functions exported by Chroma.
|
89
|
-
data_directory:
|
90
|
-
Optional directory for the persistent client. When omitted and the
|
91
|
-
client type is ``"persistent"`` the function creates a ``chroma``
|
92
|
-
sub-directory next to the knowledge base.
|
93
|
-
host / port / ssl / tenant / database / api_key / custom_auth_credentials:
|
94
|
-
Transport-specific settings passed directly to the Chroma client
|
95
|
-
constructors.
|
96
|
-
id_prefix:
|
97
|
-
Optional prefix prepended to every document ID stored in Chroma.
|
98
|
-
Defaults to ``"kb::"`` for readability.
|
99
|
-
"""
|
100
|
-
|
101
|
-
normalized_type = (client_type or "off").lower()
|
102
|
-
if normalized_type not in SUPPORTED_CLIENTS:
|
103
|
-
raise ValueError(f"Unsupported Chroma client type: {client_type}")
|
104
|
-
|
105
|
-
resolved_directory: Optional[Path]
|
106
|
-
if data_directory:
|
107
|
-
resolved_directory = Path(data_directory).expanduser().resolve()
|
108
|
-
elif normalized_type == "persistent":
|
109
|
-
resolved_directory = (root/DATA_FOLDER_NAME / "chroma").resolve()
|
110
|
-
else:
|
111
|
-
resolved_directory = None
|
112
|
-
|
113
|
-
if resolved_directory is not None:
|
114
|
-
resolved_directory.mkdir(parents=True, exist_ok=True)
|
115
|
-
|
116
|
-
prefix = id_prefix or "kb::"
|
117
|
-
|
118
|
-
normalized_embedding = (embedding or "default").lower()
|
119
|
-
|
120
|
-
config = cls(
|
121
|
-
client_type=normalized_type,
|
122
|
-
collection_name=collection_name,
|
123
|
-
embedding=normalized_embedding,
|
124
|
-
data_directory=resolved_directory,
|
125
|
-
host=host,
|
126
|
-
port=port,
|
127
|
-
ssl=ssl,
|
128
|
-
tenant=tenant,
|
129
|
-
database=database,
|
130
|
-
api_key=api_key,
|
131
|
-
custom_auth_credentials=custom_auth_credentials,
|
132
|
-
id_prefix=prefix,
|
133
|
-
)
|
134
|
-
config._validate()
|
135
|
-
return config
|
136
|
-
|
137
|
-
def _validate(self) -> None:
|
138
|
-
"""Validate the configuration and raise descriptive errors when invalid."""
|
139
|
-
|
140
|
-
if not self.enabled:
|
141
|
-
return
|
142
|
-
|
143
|
-
if self.client_type == "persistent" and self.data_directory is None:
|
144
|
-
raise ValueError("Persistent Chroma client requires a data directory")
|
145
|
-
|
146
|
-
if self.client_type == "http" and not self.host:
|
147
|
-
raise ValueError("HTTP Chroma client requires --chroma-host or MCP_KB_CHROMA_HOST")
|
148
|
-
|
149
|
-
if self.client_type == "cloud":
|
150
|
-
missing = [
|
151
|
-
name
|
152
|
-
for name, value in (
|
153
|
-
("tenant", self.tenant),
|
154
|
-
("database", self.database),
|
155
|
-
("api_key", self.api_key),
|
156
|
-
)
|
157
|
-
if not value
|
158
|
-
]
|
159
|
-
if missing:
|
160
|
-
pretty = ", ".join(missing)
|
161
|
-
raise ValueError(f"Cloud Chroma client requires values for: {pretty}")
|
162
|
-
|
163
|
-
if not self.collection_name:
|
164
|
-
raise ValueError("Collection name must be provided")
|
165
|
-
|
166
|
-
if not self.embedding:
|
167
|
-
raise ValueError("Embedding function name must be provided")
|
168
|
-
|
169
|
-
|
170
|
-
@dataclass(frozen=True)
|
171
|
-
class _ChromaDependencies:
|
172
|
-
"""Lazy import bundle containing the pieces needed to talk to ChromaDB."""
|
173
|
-
|
174
|
-
chroma_module: Any
|
175
|
-
settings_cls: Type[Any]
|
176
|
-
embedding_factories: Mapping[str, Type[Any]]
|
177
|
-
|
178
|
-
|
179
|
-
def _load_dependencies() -> _ChromaDependencies:
|
180
|
-
"""Import ChromaDB lazily so the base server works without the dependency."""
|
181
|
-
|
182
|
-
try:
|
183
|
-
chroma_module = importlib.import_module("chromadb")
|
184
|
-
except ModuleNotFoundError as exc: # pragma: no cover - dependent on environment
|
185
|
-
raise RuntimeError(
|
186
|
-
"Chroma integration requested but the 'chromadb' package is not installed. "
|
187
|
-
"Install chromadb via 'uv add chromadb' or disable ingestion."
|
188
|
-
) from exc
|
189
|
-
|
190
|
-
config_module = importlib.import_module("chromadb.config")
|
191
|
-
embedding_module = importlib.import_module("chromadb.utils.embedding_functions")
|
192
|
-
|
193
|
-
factories: Dict[str, Type[Any]] = {}
|
194
|
-
fallback_map = {
|
195
|
-
"default": "DefaultEmbeddingFunction",
|
196
|
-
"cohere": "CohereEmbeddingFunction",
|
197
|
-
"openai": "OpenAIEmbeddingFunction",
|
198
|
-
"jina": "JinaEmbeddingFunction",
|
199
|
-
"voyageai": "VoyageAIEmbeddingFunction",
|
200
|
-
"roboflow": "RoboflowEmbeddingFunction",
|
201
|
-
}
|
202
|
-
for alias, attr in fallback_map.items():
|
203
|
-
if hasattr(embedding_module, attr):
|
204
|
-
factories[alias] = getattr(embedding_module, attr)
|
205
|
-
if not factories:
|
206
|
-
raise RuntimeError("No embedding functions were found in chromadb.utils.embedding_functions")
|
207
|
-
|
208
|
-
return _ChromaDependencies(
|
209
|
-
chroma_module=chroma_module,
|
210
|
-
settings_cls=getattr(config_module, "Settings"),
|
211
|
-
embedding_factories=factories,
|
212
|
-
)
|
213
|
-
|
214
|
-
|
215
|
-
class ChromaIngestor(KnowledgeBaseListener, KnowledgeBaseReindexListener):
|
216
|
-
"""Listener that mirrors knowledge base writes into a Chroma collection.
|
217
|
-
|
218
|
-
The listener adheres to the :class:`KnowledgeBaseListener` protocol so it
|
219
|
-
can be registered alongside other observers without coupling. Events are
|
220
|
-
written synchronously to guarantee that indexing stays consistent with the
|
221
|
-
underlying filesystem operations.
|
222
|
-
"""
|
223
|
-
|
224
|
-
def __init__(self, configuration: ChromaConfiguration) -> None:
|
225
|
-
"""Create an ingestor bound to ``configuration``.
|
226
|
-
|
227
|
-
Parameters
|
228
|
-
----------
|
229
|
-
configuration:
|
230
|
-
Sanitised :class:`ChromaConfiguration` describing how to connect to
|
231
|
-
Chroma and which collection to mirror.
|
232
|
-
"""
|
233
|
-
|
234
|
-
self.configuration = configuration
|
235
|
-
self._deps = _load_dependencies()
|
236
|
-
self._client = self._create_client()
|
237
|
-
self._collection = self._ensure_collection()
|
238
|
-
self.textsplitter = TokenTextSplitter(
|
239
|
-
chunk_size=200,
|
240
|
-
chunk_overlap=20,
|
241
|
-
add_start_index=True
|
242
|
-
)
|
243
|
-
|
244
|
-
def get_document_chunks(self, document_id: str, include: List[str] = ["metadatas", "documents"]) -> GetResult:
|
245
|
-
"""Get a document from the Chroma index."""
|
246
|
-
return self._collection.get(where={"document_id": document_id},include=include)
|
247
|
-
|
248
|
-
def handle_upsert(self, event: FileUpsertEvent) -> None:
|
249
|
-
"""Upsert ``event`` into the configured Chroma collection.
|
250
|
-
|
251
|
-
Every invocation removes any existing Chroma entry before inserting the
|
252
|
-
fresh payload so that the embedding engine recomputes vectors using the
|
253
|
-
latest markdown. The stored metadata keeps both absolute and relative
|
254
|
-
paths, enabling downstream semantic search tools to surface references
|
255
|
-
that point straight back into the knowledge base.
|
256
|
-
"""
|
257
|
-
|
258
|
-
document_id = f"{self.configuration.id_prefix}{event.relative_path}"
|
259
|
-
metadata = {
|
260
|
-
"relative_path": event.relative_path,
|
261
|
-
}
|
262
|
-
self._reindex_document(document_id, event.content, metadata)
|
263
|
-
|
264
|
-
def delete_document(self, document_id: str) -> None:
|
265
|
-
"""Delete a document from the Chroma index."""
|
266
|
-
self._collection.delete(ids=self.get_document_chunks(document_id,include=[])["ids"])
|
267
|
-
|
268
|
-
def handle_delete(self, event: FileDeleteEvent) -> None:
|
269
|
-
"""Remove documents associated with ``event`` from the Chroma index.
|
270
|
-
|
271
|
-
Soft deletions translate to a straight removal because the PRD treats
|
272
|
-
files carrying the delete sentinel as hidden from client tooling.
|
273
|
-
"""
|
274
|
-
|
275
|
-
document_id = f"{self.configuration.id_prefix}{event.relative_path}"
|
276
|
-
try:
|
277
|
-
self.delete_document(document_id)
|
278
|
-
except Exception: # pragma: no cover - depends on Chroma exceptions
|
279
|
-
# Chroma raises a custom error when the ID is missing. Deletion should
|
280
|
-
# be idempotent so we swallow those errors silently.
|
281
|
-
pass
|
282
|
-
|
283
|
-
@property
|
284
|
-
def collection(self) -> "Collection":
|
285
|
-
"""Return the underlying Chroma collection for diagnostics and tests."""
|
286
|
-
|
287
|
-
return self._collection
|
288
|
-
|
289
|
-
def query(self, query: str, *, n_results: int = 5) -> List[Dict[str, Any]]:
|
290
|
-
"""Return structured query results from the configured collection.
|
291
|
-
|
292
|
-
Parameters
|
293
|
-
----------
|
294
|
-
query:
|
295
|
-
Natural language string used to compute the semantic embedding.
|
296
|
-
n_results:
|
297
|
-
Maximum number of results to return. Defaults to five to mirror the
|
298
|
-
behaviour surfaced through the MCP search tool.
|
299
|
-
|
300
|
-
Returns
|
301
|
-
-------
|
302
|
-
list[dict[str, Any]]
|
303
|
-
Each dictionary contains the ``document`` text, associated
|
304
|
-
``metadata`` payload, and a floating-point ``distance`` score if
|
305
|
-
provided by Chroma.
|
306
|
-
"""
|
307
|
-
|
308
|
-
payload = self._collection.query(
|
309
|
-
query_texts=[query],
|
310
|
-
n_results=n_results,
|
311
|
-
include=["metadatas", "documents", "distances"],
|
312
|
-
)
|
313
|
-
|
314
|
-
documents = payload.get("documents", [[]])
|
315
|
-
metadatas = payload.get("metadatas", [[]])
|
316
|
-
distances = payload.get("distances", [[]])
|
317
|
-
|
318
|
-
if not documents or not documents[0]:
|
319
|
-
return []
|
320
|
-
|
321
|
-
results: List[Dict[str, Any]] = []
|
322
|
-
for index, metadata in enumerate(metadatas[0]):
|
323
|
-
document = documents[0][index] if index < len(documents[0]) else ""
|
324
|
-
distance = distances[0][index] if distances and distances[0] else None
|
325
|
-
results.append(
|
326
|
-
{
|
327
|
-
"metadata": metadata or {},
|
328
|
-
"document": document,
|
329
|
-
"distance": distance,
|
330
|
-
}
|
331
|
-
)
|
332
|
-
|
333
|
-
return results
|
334
|
-
|
335
|
-
# Optional search extension -------------------------------------------------
|
336
|
-
|
337
|
-
def search(
|
338
|
-
self,
|
339
|
-
kb: "KnowledgeBase",
|
340
|
-
query: str,
|
341
|
-
*,
|
342
|
-
context_lines: int = 2,
|
343
|
-
limit: Optional[int] = None,
|
344
|
-
) -> List[SearchMatch]:
|
345
|
-
"""Translate semantic query results into :class:`SearchMatch` objects."""
|
346
|
-
|
347
|
-
max_results = limit or 5
|
348
|
-
records = self.query(query, n_results=max_results)
|
349
|
-
matches: List[SearchMatch] = []
|
350
|
-
seen_paths: Set[Path] = set()
|
351
|
-
|
352
|
-
for record in records:
|
353
|
-
metadata = record.get("metadata") or {}
|
354
|
-
candidate = self._resolve_candidate_path(
|
355
|
-
kb,
|
356
|
-
metadata.get("relative_path"),
|
357
|
-
)
|
358
|
-
if candidate is None or candidate in seen_paths:
|
359
|
-
continue
|
360
|
-
|
361
|
-
seen_paths.add(candidate)
|
362
|
-
try:
|
363
|
-
text = candidate.read_text(encoding="utf-8")
|
364
|
-
except FileNotFoundError:
|
365
|
-
continue
|
366
|
-
|
367
|
-
lines = text.splitlines()
|
368
|
-
file_matches = self._extract_matches_from_lines(candidate, lines, query, context_lines)
|
369
|
-
if file_matches:
|
370
|
-
matches.append(file_matches[0])
|
371
|
-
elif lines:
|
372
|
-
preview_limit = min(len(lines), context_lines * 2 + 1)
|
373
|
-
matches.append(
|
374
|
-
SearchMatch(
|
375
|
-
path=candidate,
|
376
|
-
line_number=1,
|
377
|
-
context=lines[:preview_limit],
|
378
|
-
)
|
379
|
-
)
|
380
|
-
|
381
|
-
if limit is not None and len(matches) >= limit:
|
382
|
-
break
|
383
|
-
|
384
|
-
return matches
|
385
|
-
|
386
|
-
# Internal helpers ----------------------------------------------------------
|
387
|
-
|
388
|
-
def _reindex_document(
|
389
|
-
self,
|
390
|
-
document_id: str,
|
391
|
-
content: str,
|
392
|
-
metadata: Mapping[str, Any],
|
393
|
-
) -> None:
|
394
|
-
"""Replace the stored document so embeddings are recomputed.
|
395
|
-
|
396
|
-
Reindexing involves removing any stale record before inserting the new
|
397
|
-
payload. Some Chroma backends keep historical data around when ``add``
|
398
|
-
is invoked with an existing ID, so the deletion step ensures the stored
|
399
|
-
embedding always reflects the latest markdown contents. ``metadata`` is
|
400
|
-
copied to break accidental references held by callers.
|
401
|
-
"""
|
402
|
-
|
403
|
-
try:
|
404
|
-
# filter by document_id in metadata
|
405
|
-
self.delete_document(document_id)
|
406
|
-
except Exception: # pragma: no cover - depends on Chroma exception types
|
407
|
-
# Missing IDs are not an error; most clients raise when attempting to
|
408
|
-
# delete a non-existent record. We swallow those errors to keep the
|
409
|
-
# reindexing path idempotent.
|
410
|
-
pass
|
411
|
-
|
412
|
-
payload_metadata = dict(metadata)
|
413
|
-
payload_metadata['document_id'] = document_id
|
414
|
-
|
415
|
-
# splitting
|
416
|
-
|
417
|
-
split_docs = self.textsplitter.create_documents([content])
|
418
|
-
|
419
|
-
for i, d in enumerate(split_docs):
|
420
|
-
d.metadata.update(payload_metadata)
|
421
|
-
d.metadata['chunk_number'] = i
|
422
|
-
d.metadata['startline'] = len(content[:d.metadata['start_index']].splitlines())
|
423
|
-
d.metadata['endline'] = d.metadata['startline'] + len(d.page_content.splitlines())-1
|
424
|
-
|
425
|
-
|
426
|
-
self._collection.add(
|
427
|
-
documents=[d.page_content for d in split_docs],
|
428
|
-
metadatas=[d.metadata for d in split_docs],
|
429
|
-
ids=[f"{d.metadata['document_id']}-{d.metadata['chunk_number']}" for d in split_docs],
|
430
|
-
)
|
431
|
-
|
432
|
-
# Optional full reindex -----------------------------------------------------
|
433
|
-
|
434
|
-
def reindex(self, kb: "KnowledgeBase") -> int:
|
435
|
-
"""Rebuild the Chroma index from the current knowledge base state.
|
436
|
-
|
437
|
-
The method iterates over all active markdown files visible to the
|
438
|
-
provided knowledge base instance, computing a deterministic document ID
|
439
|
-
for each path using the configured ``id_prefix``. Each file is read from
|
440
|
-
disk and upserted into the underlying Chroma collection by delegating to
|
441
|
-
:meth:`_reindex_document`, ensuring embeddings are recomputed.
|
442
|
-
|
443
|
-
Parameters
|
444
|
-
----------
|
445
|
-
kb:
|
446
|
-
The :class:`~mcp_kb.knowledge.store.KnowledgeBase` providing access
|
447
|
-
to the validated filesystem and utility methods.
|
448
|
-
|
449
|
-
Returns
|
450
|
-
-------
|
451
|
-
int
|
452
|
-
The number of documents processed during the reindex run.
|
453
|
-
"""
|
454
|
-
|
455
|
-
count = 0
|
456
|
-
root = kb.rules.root
|
457
|
-
with tqdm(kb.iter_active_files(include_docs=False), desc="Reindexing Chroma",total=kb.total_active_files(include_docs=False)) as pbar:
|
458
|
-
for path in pbar:
|
459
|
-
pbar.set_description(f"Reindexing Chroma {path.name}")
|
460
|
-
try:
|
461
|
-
content = path.read_text(encoding="utf-8")
|
462
|
-
except FileNotFoundError: # pragma: no cover - race with external edits
|
463
|
-
continue
|
464
|
-
|
465
|
-
relative = str(path.relative_to(root))
|
466
|
-
document_id = f"{self.configuration.id_prefix}{relative}"
|
467
|
-
metadata = {
|
468
|
-
"relative_path": relative,
|
469
|
-
}
|
470
|
-
self._reindex_document(document_id, content, metadata)
|
471
|
-
count += 1
|
472
|
-
|
473
|
-
return count
|
474
|
-
|
475
|
-
def _extract_matches_from_lines(
|
476
|
-
self,
|
477
|
-
path: Path,
|
478
|
-
lines: List[str],
|
479
|
-
query: str,
|
480
|
-
context_lines: int,
|
481
|
-
) -> List[SearchMatch]:
|
482
|
-
"""Return line-based matches for ``query`` within ``lines``."""
|
483
|
-
|
484
|
-
matches: List[SearchMatch] = []
|
485
|
-
for index, line in enumerate(lines, start=1):
|
486
|
-
if query in line:
|
487
|
-
start = max(0, index - context_lines - 1)
|
488
|
-
end = min(len(lines), index + context_lines)
|
489
|
-
matches.append(
|
490
|
-
SearchMatch(
|
491
|
-
path=path,
|
492
|
-
line_number=index,
|
493
|
-
context=lines[start:end],
|
494
|
-
)
|
495
|
-
)
|
496
|
-
return matches
|
497
|
-
|
498
|
-
def _resolve_candidate_path(
|
499
|
-
self,
|
500
|
-
kb: "KnowledgeBase",
|
501
|
-
relative: Optional[str],
|
502
|
-
) -> Optional[Path]:
|
503
|
-
"""Translate metadata hints into a validated path inside ``kb``."""
|
504
|
-
|
505
|
-
path: Optional[Path] = None
|
506
|
-
if relative:
|
507
|
-
candidate = (kb.rules.root / relative).resolve()
|
508
|
-
try:
|
509
|
-
candidate.relative_to(kb.rules.root)
|
510
|
-
except ValueError:
|
511
|
-
path = None
|
512
|
-
else:
|
513
|
-
if candidate.exists():
|
514
|
-
path = candidate
|
515
|
-
|
516
|
-
return path
|
517
|
-
|
518
|
-
def _create_client(self) -> "ClientAPI":
|
519
|
-
"""Instantiate the proper Chroma client based on configuration.
|
520
|
-
|
521
|
-
The method supports all transport modes referenced in the user
|
522
|
-
requirements. It constructs the minimal set of keyword arguments for the
|
523
|
-
chosen backend and lets Chroma's client validate the final configuration.
|
524
|
-
"""
|
525
|
-
|
526
|
-
chroma = self._deps.chroma_module
|
527
|
-
config = self.configuration
|
528
|
-
|
529
|
-
if not config.enabled:
|
530
|
-
raise RuntimeError("ChromaIngestor cannot be constructed when ingestion is disabled")
|
531
|
-
|
532
|
-
if config.client_type == "ephemeral":
|
533
|
-
return chroma.EphemeralClient()
|
534
|
-
|
535
|
-
if config.client_type == "persistent":
|
536
|
-
return chroma.PersistentClient(path=str(config.data_directory))
|
537
|
-
|
538
|
-
if config.client_type in {"http", "cloud"}:
|
539
|
-
kwargs: Dict[str, Any] = {
|
540
|
-
"ssl": config.ssl if config.client_type == "http" else True,
|
541
|
-
}
|
542
|
-
if config.client_type == "http":
|
543
|
-
kwargs["host"] = config.host
|
544
|
-
if config.port is not None:
|
545
|
-
kwargs["port"] = config.port
|
546
|
-
if config.custom_auth_credentials:
|
547
|
-
kwargs["settings"] = self._deps.settings_cls(
|
548
|
-
chroma_client_auth_provider="chromadb.auth.basic_authn.BasicAuthClientProvider",
|
549
|
-
chroma_client_auth_credentials=config.custom_auth_credentials,
|
550
|
-
)
|
551
|
-
else: # cloud
|
552
|
-
kwargs["host"] = config.host or "api.trychroma.com"
|
553
|
-
kwargs["tenant"] = config.tenant
|
554
|
-
kwargs["database"] = config.database
|
555
|
-
kwargs.setdefault("headers", {})
|
556
|
-
kwargs["headers"]["x-chroma-token"] = config.api_key
|
557
|
-
|
558
|
-
return chroma.HttpClient(**kwargs)
|
559
|
-
|
560
|
-
raise ValueError(f"Unsupported client type: {config.client_type}")
|
561
|
-
|
562
|
-
def _ensure_collection(self) -> "Collection":
|
563
|
-
"""Create or return the configured Chroma collection."""
|
564
|
-
|
565
|
-
factory = self._deps.embedding_factories.get(self.configuration.embedding)
|
566
|
-
if factory is None:
|
567
|
-
available = ", ".join(sorted(self._deps.embedding_factories))
|
568
|
-
raise ValueError(
|
569
|
-
f"Unknown embedding function '{self.configuration.embedding}'. "
|
570
|
-
f"Available options: {available}"
|
571
|
-
)
|
572
|
-
embedding_function = factory()
|
573
|
-
|
574
|
-
metadata = {"source": "mcp-knowledge-base"}
|
575
|
-
client = self._client
|
576
|
-
try:
|
577
|
-
return client.get_or_create_collection(
|
578
|
-
name=self.configuration.collection_name,
|
579
|
-
metadata=metadata,
|
580
|
-
embedding_function=embedding_function,
|
581
|
-
)
|
582
|
-
except TypeError:
|
583
|
-
# Older Chroma versions expect CreateCollectionConfiguration. Fall back
|
584
|
-
# to create_collection for compatibility.
|
585
|
-
return client.get_or_create_collection(
|
586
|
-
name=self.configuration.collection_name,
|
587
|
-
embedding_function=embedding_function,
|
588
|
-
)
|
mcp_kb/knowledge/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
"""Knowledge layer that encapsulates content storage and search helpers."""
|
mcp_kb/knowledge/bootstrap.py
DELETED
@@ -1,39 +0,0 @@
|
|
1
|
-
"""Bootstrap helpers executed during server startup."""
|
2
|
-
from __future__ import annotations
|
3
|
-
|
4
|
-
import importlib.resources as resources
|
5
|
-
from pathlib import Path
|
6
|
-
|
7
|
-
from mcp_kb.config import DATA_FOLDER_NAME, DOC_FILENAME
|
8
|
-
|
9
|
-
|
10
|
-
def install_default_documentation(root: Path) -> Path:
|
11
|
-
"""Ensure the default documentation file exists under ``root``.
|
12
|
-
|
13
|
-
The function creates the documentation directory if necessary and copies the
|
14
|
-
packaged ``KNOWLEDBASE_DOC.md`` file into place. Existing documentation is
|
15
|
-
preserved so that operators can customize the file without losing changes on
|
16
|
-
subsequent startups.
|
17
|
-
|
18
|
-
Parameters
|
19
|
-
----------
|
20
|
-
root:
|
21
|
-
Absolute path representing the knowledge base root directory.
|
22
|
-
|
23
|
-
Returns
|
24
|
-
-------
|
25
|
-
Path
|
26
|
-
Path to the documentation file inside the knowledge base tree.
|
27
|
-
"""
|
28
|
-
|
29
|
-
docs_dir = root / DATA_FOLDER_NAME
|
30
|
-
doc_path = docs_dir / DOC_FILENAME
|
31
|
-
if doc_path.exists():
|
32
|
-
return doc_path
|
33
|
-
|
34
|
-
docs_dir.mkdir(parents=True, exist_ok=True)
|
35
|
-
|
36
|
-
with resources.files("mcp_kb.data").joinpath("KNOWLEDBASE_DOC.md").open("r", encoding="utf-8") as source:
|
37
|
-
doc_path.write_text(source.read(), encoding="utf-8")
|
38
|
-
|
39
|
-
return doc_path
|