biblicus 0.15.1__py3-none-any.whl → 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- biblicus/__init__.py +21 -1
- biblicus/analysis/markov.py +35 -3
- biblicus/backends/__init__.py +6 -2
- biblicus/backends/embedding_index_common.py +334 -0
- biblicus/backends/embedding_index_file.py +272 -0
- biblicus/backends/embedding_index_inmemory.py +270 -0
- biblicus/backends/hybrid.py +8 -5
- biblicus/backends/scan.py +1 -0
- biblicus/backends/sqlite_full_text_search.py +1 -1
- biblicus/backends/{vector.py → tf_vector.py} +28 -35
- biblicus/chunking.py +396 -0
- biblicus/cli.py +75 -25
- biblicus/context.py +27 -12
- biblicus/context_engine/__init__.py +53 -0
- biblicus/context_engine/assembler.py +1060 -0
- biblicus/context_engine/compaction.py +110 -0
- biblicus/context_engine/models.py +423 -0
- biblicus/context_engine/retrieval.py +129 -0
- biblicus/corpus.py +117 -16
- biblicus/embedding_providers.py +122 -0
- biblicus/errors.py +24 -0
- biblicus/frontmatter.py +2 -0
- biblicus/knowledge_base.py +1 -1
- biblicus/models.py +15 -3
- biblicus/retrieval.py +7 -2
- biblicus/sources.py +46 -11
- biblicus/text/link.py +6 -0
- biblicus/text/prompts.py +2 -0
- {biblicus-0.15.1.dist-info → biblicus-1.0.0.dist-info}/METADATA +4 -3
- {biblicus-0.15.1.dist-info → biblicus-1.0.0.dist-info}/RECORD +34 -24
- {biblicus-0.15.1.dist-info → biblicus-1.0.0.dist-info}/WHEEL +0 -0
- {biblicus-0.15.1.dist-info → biblicus-1.0.0.dist-info}/entry_points.txt +0 -0
- {biblicus-0.15.1.dist-info → biblicus-1.0.0.dist-info}/licenses/LICENSE +0 -0
- {biblicus-0.15.1.dist-info → biblicus-1.0.0.dist-info}/top_level.txt +0 -0
biblicus/__init__.py
CHANGED
|
@@ -2,6 +2,17 @@
|
|
|
2
2
|
Biblicus public package interface.
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
|
+
from .context_engine import (
|
|
6
|
+
ContextAssembler,
|
|
7
|
+
ContextBudgetSpec,
|
|
8
|
+
ContextDeclaration,
|
|
9
|
+
ContextExpansionSpec,
|
|
10
|
+
ContextPackBudgetSpec,
|
|
11
|
+
ContextPackSpec,
|
|
12
|
+
ContextPolicySpec,
|
|
13
|
+
ContextRetrieverRequest,
|
|
14
|
+
retrieve_context_pack,
|
|
15
|
+
)
|
|
5
16
|
from .corpus import Corpus
|
|
6
17
|
from .knowledge_base import KnowledgeBase
|
|
7
18
|
from .models import (
|
|
@@ -16,6 +27,15 @@ from .models import (
|
|
|
16
27
|
|
|
17
28
|
__all__ = [
|
|
18
29
|
"__version__",
|
|
30
|
+
"ContextAssembler",
|
|
31
|
+
"ContextBudgetSpec",
|
|
32
|
+
"ContextDeclaration",
|
|
33
|
+
"ContextExpansionSpec",
|
|
34
|
+
"ContextPackBudgetSpec",
|
|
35
|
+
"ContextPackSpec",
|
|
36
|
+
"ContextPolicySpec",
|
|
37
|
+
"ContextRetrieverRequest",
|
|
38
|
+
"retrieve_context_pack",
|
|
19
39
|
"Corpus",
|
|
20
40
|
"CorpusConfig",
|
|
21
41
|
"Evidence",
|
|
@@ -27,4 +47,4 @@ __all__ = [
|
|
|
27
47
|
"RetrievalRun",
|
|
28
48
|
]
|
|
29
49
|
|
|
30
|
-
__version__ = "0.
|
|
50
|
+
__version__ = "1.0.0"
|
biblicus/analysis/markov.py
CHANGED
|
@@ -686,6 +686,15 @@ def _build_observations(
|
|
|
686
686
|
llm = config.llm_observations
|
|
687
687
|
assert llm.client is not None and llm.prompt_template is not None
|
|
688
688
|
for index, observation in enumerate(observations):
|
|
689
|
+
if observation.segment_text in {"START", "END"}:
|
|
690
|
+
observations[index] = observation.model_copy(
|
|
691
|
+
update={
|
|
692
|
+
"llm_label": observation.segment_text,
|
|
693
|
+
"llm_label_confidence": 1.0,
|
|
694
|
+
"llm_summary": observation.segment_text,
|
|
695
|
+
}
|
|
696
|
+
)
|
|
697
|
+
continue
|
|
689
698
|
prompt = llm.prompt_template.format(segment=observation.segment_text)
|
|
690
699
|
response_text = generate_completion(
|
|
691
700
|
client=llm.client,
|
|
@@ -707,8 +716,12 @@ def _build_observations(
|
|
|
707
716
|
if config.embeddings.enabled:
|
|
708
717
|
embedding_config = config.embeddings
|
|
709
718
|
assert embedding_config.client is not None
|
|
719
|
+
embed_indices: List[int] = []
|
|
710
720
|
embed_texts: List[str] = []
|
|
711
|
-
for observation in observations:
|
|
721
|
+
for index, observation in enumerate(observations):
|
|
722
|
+
if observation.segment_text in {"START", "END"}:
|
|
723
|
+
continue
|
|
724
|
+
embed_indices.append(index)
|
|
712
725
|
if embedding_config.text_source == "segment_text":
|
|
713
726
|
embed_texts.append(observation.segment_text)
|
|
714
727
|
else:
|
|
@@ -717,10 +730,29 @@ def _build_observations(
|
|
|
717
730
|
"embeddings.text_source is 'llm_summary' but llm_summary is missing"
|
|
718
731
|
)
|
|
719
732
|
embed_texts.append(observation.llm_summary)
|
|
733
|
+
|
|
734
|
+
if not embed_indices:
|
|
735
|
+
raise ValueError("Embeddings require at least one non-boundary segment")
|
|
736
|
+
|
|
720
737
|
vectors = generate_embeddings_batch(client=embedding_config.client, texts=embed_texts)
|
|
738
|
+
if len(vectors) != len(embed_indices):
|
|
739
|
+
raise ValueError(
|
|
740
|
+
"Embedding provider returned unexpected vector count: "
|
|
741
|
+
f"expected {len(embed_indices)} but got {len(vectors)}"
|
|
742
|
+
)
|
|
743
|
+
|
|
744
|
+
vector_by_observation_index: Dict[int, List[float]] = {}
|
|
745
|
+
for observation_index, vector in zip(embed_indices, vectors):
|
|
746
|
+
vector_by_observation_index[observation_index] = list(vector)
|
|
747
|
+
|
|
748
|
+
embedding_dimension = len(next(iter(vector_by_observation_index.values())))
|
|
749
|
+
boundary_embedding = [0.0 for _ in range(embedding_dimension)]
|
|
721
750
|
updated: List[MarkovAnalysisObservation] = []
|
|
722
|
-
for
|
|
723
|
-
|
|
751
|
+
for index, observation in enumerate(observations):
|
|
752
|
+
vector = vector_by_observation_index.get(index)
|
|
753
|
+
updated.append(
|
|
754
|
+
observation.model_copy(update={"embedding": vector or boundary_embedding})
|
|
755
|
+
)
|
|
724
756
|
observations = updated
|
|
725
757
|
|
|
726
758
|
return observations
|
biblicus/backends/__init__.py
CHANGED
|
@@ -7,10 +7,12 @@ from __future__ import annotations
|
|
|
7
7
|
from typing import Dict, Type
|
|
8
8
|
|
|
9
9
|
from .base import RetrievalBackend
|
|
10
|
+
from .embedding_index_file import EmbeddingIndexFileBackend
|
|
11
|
+
from .embedding_index_inmemory import EmbeddingIndexInMemoryBackend
|
|
10
12
|
from .hybrid import HybridBackend
|
|
11
13
|
from .scan import ScanBackend
|
|
12
14
|
from .sqlite_full_text_search import SqliteFullTextSearchBackend
|
|
13
|
-
from .
|
|
15
|
+
from .tf_vector import TfVectorBackend
|
|
14
16
|
|
|
15
17
|
|
|
16
18
|
def available_backends() -> Dict[str, Type[RetrievalBackend]]:
|
|
@@ -21,10 +23,12 @@ def available_backends() -> Dict[str, Type[RetrievalBackend]]:
|
|
|
21
23
|
:rtype: dict[str, Type[RetrievalBackend]]
|
|
22
24
|
"""
|
|
23
25
|
return {
|
|
26
|
+
EmbeddingIndexFileBackend.backend_id: EmbeddingIndexFileBackend,
|
|
27
|
+
EmbeddingIndexInMemoryBackend.backend_id: EmbeddingIndexInMemoryBackend,
|
|
24
28
|
HybridBackend.backend_id: HybridBackend,
|
|
25
29
|
ScanBackend.backend_id: ScanBackend,
|
|
26
30
|
SqliteFullTextSearchBackend.backend_id: SqliteFullTextSearchBackend,
|
|
27
|
-
|
|
31
|
+
TfVectorBackend.backend_id: TfVectorBackend,
|
|
28
32
|
}
|
|
29
33
|
|
|
30
34
|
|
|
@@ -0,0 +1,334 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Shared primitives for embedding-index retrieval backends.
|
|
3
|
+
"""
|
|
4
|
+
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
from typing import Dict, Iterable, Iterator, List, Optional, Tuple
|
|
10
|
+
|
|
11
|
+
import numpy as np
|
|
12
|
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
|
13
|
+
|
|
14
|
+
from ..chunking import ChunkerConfig, TextChunk, TokenizerConfig
|
|
15
|
+
from ..corpus import CORPUS_DIR_NAME, RUNS_DIR_NAME, Corpus
|
|
16
|
+
from ..embedding_providers import EmbeddingProviderConfig, _l2_normalize_rows
|
|
17
|
+
from ..frontmatter import parse_front_matter
|
|
18
|
+
from ..models import ExtractionRunReference, parse_extraction_run_reference
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class ChunkRecord(BaseModel):
|
|
22
|
+
"""
|
|
23
|
+
Minimal persisted representation of a chunk.
|
|
24
|
+
|
|
25
|
+
:ivar item_id: Item identifier that produced the chunk.
|
|
26
|
+
:vartype item_id: str
|
|
27
|
+
:ivar span_start: Inclusive start character offset.
|
|
28
|
+
:vartype span_start: int
|
|
29
|
+
:ivar span_end: Exclusive end character offset.
|
|
30
|
+
:vartype span_end: int
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
model_config = ConfigDict(extra="forbid")
|
|
34
|
+
|
|
35
|
+
item_id: str = Field(min_length=1)
|
|
36
|
+
span_start: int = Field(ge=0)
|
|
37
|
+
span_end: int = Field(ge=0)
|
|
38
|
+
|
|
39
|
+
@model_validator(mode="after")
|
|
40
|
+
def _validate_span(self) -> "ChunkRecord":
|
|
41
|
+
if self.span_end <= self.span_start:
|
|
42
|
+
raise ValueError("chunk span_end must be greater than span_start")
|
|
43
|
+
return self
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class EmbeddingIndexRecipeConfig(BaseModel):
|
|
47
|
+
"""
|
|
48
|
+
Configuration for embedding-index retrieval backends.
|
|
49
|
+
|
|
50
|
+
:ivar extraction_run: Optional extraction run reference in the form extractor_id:run_id.
|
|
51
|
+
:vartype extraction_run: str or None
|
|
52
|
+
:ivar chunker: Chunker configuration.
|
|
53
|
+
:vartype chunker: biblicus.chunking.ChunkerConfig
|
|
54
|
+
:ivar tokenizer: Optional tokenizer configuration.
|
|
55
|
+
:vartype tokenizer: biblicus.chunking.TokenizerConfig or None
|
|
56
|
+
:ivar embedding_provider: Embedding provider configuration.
|
|
57
|
+
:vartype embedding_provider: biblicus.embedding_providers.EmbeddingProviderConfig
|
|
58
|
+
:ivar snippet_characters: Optional maximum character count for returned evidence text.
|
|
59
|
+
:vartype snippet_characters: int or None
|
|
60
|
+
:ivar maximum_cache_total_items: Optional maximum number of vectors cached per scan batch.
|
|
61
|
+
:vartype maximum_cache_total_items: int or None
|
|
62
|
+
:ivar maximum_cache_total_characters: Optional maximum characters cached per scan batch.
|
|
63
|
+
:vartype maximum_cache_total_characters: int or None
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
model_config = ConfigDict(extra="forbid")
|
|
67
|
+
|
|
68
|
+
snippet_characters: Optional[int] = Field(default=None, ge=1)
|
|
69
|
+
maximum_cache_total_items: Optional[int] = Field(default=None, ge=1)
|
|
70
|
+
maximum_cache_total_characters: Optional[int] = Field(default=None, ge=1)
|
|
71
|
+
extraction_run: Optional[str] = None
|
|
72
|
+
chunker: ChunkerConfig = Field(default_factory=lambda: ChunkerConfig(chunker_id="paragraph"))
|
|
73
|
+
tokenizer: Optional[TokenizerConfig] = None
|
|
74
|
+
embedding_provider: EmbeddingProviderConfig
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _extract_span_text(text: Optional[str], span: Tuple[int, int]) -> Optional[str]:
|
|
78
|
+
if not isinstance(text, str):
|
|
79
|
+
return None
|
|
80
|
+
span_start, span_end = span
|
|
81
|
+
if span_start < 0 or span_end <= span_start:
|
|
82
|
+
return text
|
|
83
|
+
return text[span_start:span_end]
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def _build_snippet(
|
|
87
|
+
text: Optional[str], span: Tuple[int, int], max_chars: Optional[int]
|
|
88
|
+
) -> Optional[str]:
|
|
89
|
+
if not isinstance(text, str):
|
|
90
|
+
return None
|
|
91
|
+
if max_chars is None:
|
|
92
|
+
return _extract_span_text(text, span)
|
|
93
|
+
if max_chars <= 0:
|
|
94
|
+
return ""
|
|
95
|
+
span_start, span_end = span
|
|
96
|
+
if span_start < 0 or span_end <= span_start:
|
|
97
|
+
return text[:max_chars]
|
|
98
|
+
half_window = max_chars // 2
|
|
99
|
+
snippet_start = max(span_start - half_window, 0)
|
|
100
|
+
snippet_end = min(span_end + half_window, len(text))
|
|
101
|
+
return text[snippet_start:snippet_end]
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def resolve_extraction_reference(
|
|
105
|
+
corpus: Corpus, recipe_config: EmbeddingIndexRecipeConfig
|
|
106
|
+
) -> Optional[ExtractionRunReference]:
|
|
107
|
+
"""
|
|
108
|
+
Resolve an extraction run reference from an embedding-index recipe config.
|
|
109
|
+
|
|
110
|
+
:param corpus: Corpus associated with the recipe.
|
|
111
|
+
:type corpus: Corpus
|
|
112
|
+
:param recipe_config: Parsed embedding-index recipe configuration.
|
|
113
|
+
:type recipe_config: EmbeddingIndexRecipeConfig
|
|
114
|
+
:return: Parsed extraction reference or None.
|
|
115
|
+
:rtype: ExtractionRunReference or None
|
|
116
|
+
:raises FileNotFoundError: If an extraction run is referenced but not present.
|
|
117
|
+
"""
|
|
118
|
+
if not recipe_config.extraction_run:
|
|
119
|
+
return None
|
|
120
|
+
extraction_reference = parse_extraction_run_reference(recipe_config.extraction_run)
|
|
121
|
+
run_dir = corpus.extraction_run_dir(
|
|
122
|
+
extractor_id=extraction_reference.extractor_id,
|
|
123
|
+
run_id=extraction_reference.run_id,
|
|
124
|
+
)
|
|
125
|
+
if not run_dir.is_dir():
|
|
126
|
+
raise FileNotFoundError(f"Missing extraction run: {extraction_reference.as_string()}")
|
|
127
|
+
return extraction_reference
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def _load_text_from_item(
|
|
131
|
+
corpus: Corpus,
|
|
132
|
+
*,
|
|
133
|
+
item_id: str,
|
|
134
|
+
relpath: str,
|
|
135
|
+
media_type: str,
|
|
136
|
+
extraction_reference: Optional[ExtractionRunReference],
|
|
137
|
+
) -> Optional[str]:
|
|
138
|
+
if extraction_reference:
|
|
139
|
+
extracted_text = corpus.read_extracted_text(
|
|
140
|
+
extractor_id=extraction_reference.extractor_id,
|
|
141
|
+
run_id=extraction_reference.run_id,
|
|
142
|
+
item_id=item_id,
|
|
143
|
+
)
|
|
144
|
+
if isinstance(extracted_text, str):
|
|
145
|
+
return extracted_text
|
|
146
|
+
|
|
147
|
+
if media_type == "text/markdown":
|
|
148
|
+
raw = (corpus.root / relpath).read_text(encoding="utf-8")
|
|
149
|
+
return parse_front_matter(raw).body
|
|
150
|
+
if media_type.startswith("text/"):
|
|
151
|
+
return (corpus.root / relpath).read_text(encoding="utf-8")
|
|
152
|
+
return None
|
|
153
|
+
|
|
154
|
+
|
|
155
|
+
def iter_text_payloads(
|
|
156
|
+
corpus: Corpus, *, extraction_reference: Optional[ExtractionRunReference]
|
|
157
|
+
) -> Iterator[Tuple[object, str]]:
|
|
158
|
+
"""
|
|
159
|
+
Yield catalog items and their text payloads.
|
|
160
|
+
|
|
161
|
+
:param corpus: Corpus containing the items.
|
|
162
|
+
:type corpus: Corpus
|
|
163
|
+
:param extraction_reference: Optional extraction reference.
|
|
164
|
+
:type extraction_reference: ExtractionRunReference or None
|
|
165
|
+
:yield: (catalog_item, text) pairs.
|
|
166
|
+
:rtype: Iterator[tuple[object, str]]
|
|
167
|
+
"""
|
|
168
|
+
catalog = corpus.load_catalog()
|
|
169
|
+
for catalog_item in catalog.items.values():
|
|
170
|
+
item_id = str(getattr(catalog_item, "id", ""))
|
|
171
|
+
relpath = str(getattr(catalog_item, "relpath", ""))
|
|
172
|
+
media_type = str(getattr(catalog_item, "media_type", ""))
|
|
173
|
+
if not item_id or not relpath or not media_type:
|
|
174
|
+
continue
|
|
175
|
+
text = _load_text_from_item(
|
|
176
|
+
corpus,
|
|
177
|
+
item_id=item_id,
|
|
178
|
+
relpath=relpath,
|
|
179
|
+
media_type=media_type,
|
|
180
|
+
extraction_reference=extraction_reference,
|
|
181
|
+
)
|
|
182
|
+
if not isinstance(text, str) or not text.strip():
|
|
183
|
+
continue
|
|
184
|
+
yield catalog_item, text
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def collect_chunks(
|
|
188
|
+
corpus: Corpus, *, recipe_config: EmbeddingIndexRecipeConfig
|
|
189
|
+
) -> Tuple[List[TextChunk], int]:
|
|
190
|
+
"""
|
|
191
|
+
Collect chunks from text payloads in a corpus.
|
|
192
|
+
|
|
193
|
+
:param corpus: Corpus to chunk.
|
|
194
|
+
:type corpus: Corpus
|
|
195
|
+
:param recipe_config: Parsed embedding-index recipe configuration.
|
|
196
|
+
:type recipe_config: EmbeddingIndexRecipeConfig
|
|
197
|
+
:return: (chunks, text_item_count)
|
|
198
|
+
:rtype: tuple[list[TextChunk], int]
|
|
199
|
+
"""
|
|
200
|
+
tokenizer = recipe_config.tokenizer.build_tokenizer() if recipe_config.tokenizer else None
|
|
201
|
+
chunker = recipe_config.chunker.build_chunker(tokenizer=tokenizer)
|
|
202
|
+
extraction_reference = resolve_extraction_reference(corpus, recipe_config)
|
|
203
|
+
|
|
204
|
+
chunks: List[TextChunk] = []
|
|
205
|
+
next_chunk_id = 0
|
|
206
|
+
text_items = 0
|
|
207
|
+
for catalog_item, text in iter_text_payloads(corpus, extraction_reference=extraction_reference):
|
|
208
|
+
text_items += 1
|
|
209
|
+
item_id = str(getattr(catalog_item, "id"))
|
|
210
|
+
item_chunks = chunker.chunk_text(
|
|
211
|
+
item_id=item_id, text=text, starting_chunk_id=next_chunk_id
|
|
212
|
+
)
|
|
213
|
+
if item_chunks:
|
|
214
|
+
next_chunk_id = item_chunks[-1].chunk_id + 1
|
|
215
|
+
chunks.extend(item_chunks)
|
|
216
|
+
return chunks, text_items
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def chunks_to_records(chunks: Iterable[TextChunk]) -> List[ChunkRecord]:
|
|
220
|
+
"""
|
|
221
|
+
Convert chunk objects to persisted chunk records.
|
|
222
|
+
|
|
223
|
+
:param chunks: Chunk list.
|
|
224
|
+
:type chunks: Iterable[TextChunk]
|
|
225
|
+
:return: Chunk record list.
|
|
226
|
+
:rtype: list[ChunkRecord]
|
|
227
|
+
"""
|
|
228
|
+
records: List[ChunkRecord] = []
|
|
229
|
+
for chunk in chunks:
|
|
230
|
+
records.append(
|
|
231
|
+
ChunkRecord(
|
|
232
|
+
item_id=chunk.item_id,
|
|
233
|
+
span_start=chunk.span_start,
|
|
234
|
+
span_end=chunk.span_end,
|
|
235
|
+
)
|
|
236
|
+
)
|
|
237
|
+
return records
|
|
238
|
+
|
|
239
|
+
|
|
240
|
+
def write_chunks_jsonl(path: Path, records: Iterable[ChunkRecord]) -> None:
|
|
241
|
+
"""
|
|
242
|
+
Write chunk records as newline-delimited JSON.
|
|
243
|
+
|
|
244
|
+
:param path: Destination path.
|
|
245
|
+
:type path: pathlib.Path
|
|
246
|
+
:param records: Chunk records.
|
|
247
|
+
:type records: Iterable[ChunkRecord]
|
|
248
|
+
:return: None.
|
|
249
|
+
:rtype: None
|
|
250
|
+
"""
|
|
251
|
+
with path.open("w", encoding="utf-8") as handle:
|
|
252
|
+
for record in records:
|
|
253
|
+
handle.write(json.dumps(record.model_dump(), separators=(",", ":")) + "\n")
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def read_chunks_jsonl(path: Path) -> List[ChunkRecord]:
|
|
257
|
+
"""
|
|
258
|
+
Read chunk records from a JSON Lines file.
|
|
259
|
+
|
|
260
|
+
:param path: Source path.
|
|
261
|
+
:type path: pathlib.Path
|
|
262
|
+
:return: Chunk record list.
|
|
263
|
+
:rtype: list[ChunkRecord]
|
|
264
|
+
"""
|
|
265
|
+
records: List[ChunkRecord] = []
|
|
266
|
+
for line in path.read_text(encoding="utf-8").splitlines():
|
|
267
|
+
if not line.strip():
|
|
268
|
+
continue
|
|
269
|
+
records.append(ChunkRecord.model_validate(json.loads(line)))
|
|
270
|
+
return records
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def write_embeddings(path: Path, embeddings: np.ndarray) -> None:
|
|
274
|
+
"""
|
|
275
|
+
Write embeddings to disk.
|
|
276
|
+
|
|
277
|
+
:param path: Destination path.
|
|
278
|
+
:type path: pathlib.Path
|
|
279
|
+
:param embeddings: Embedding matrix.
|
|
280
|
+
:type embeddings: numpy.ndarray
|
|
281
|
+
:return: None.
|
|
282
|
+
:rtype: None
|
|
283
|
+
"""
|
|
284
|
+
np.save(path, embeddings.astype(np.float32))
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
def read_embeddings(path: Path, *, mmap: bool) -> np.ndarray:
|
|
288
|
+
"""
|
|
289
|
+
Read embeddings from disk.
|
|
290
|
+
|
|
291
|
+
:param path: Source path.
|
|
292
|
+
:type path: pathlib.Path
|
|
293
|
+
:param mmap: Whether to memory-map the file.
|
|
294
|
+
:type mmap: bool
|
|
295
|
+
:return: Embedding matrix.
|
|
296
|
+
:rtype: numpy.ndarray
|
|
297
|
+
"""
|
|
298
|
+
mode = "r" if mmap else None
|
|
299
|
+
return np.load(path, mmap_mode=mode)
|
|
300
|
+
|
|
301
|
+
|
|
302
|
+
def cosine_similarity_scores(embeddings: np.ndarray, query_vector: np.ndarray) -> np.ndarray:
|
|
303
|
+
"""
|
|
304
|
+
Compute cosine similarity scores for a query vector.
|
|
305
|
+
|
|
306
|
+
The embedding matrix must already be L2-normalized.
|
|
307
|
+
|
|
308
|
+
:param embeddings: Embedding matrix of shape (n, d).
|
|
309
|
+
:type embeddings: numpy.ndarray
|
|
310
|
+
:param query_vector: Query vector of shape (d,).
|
|
311
|
+
:type query_vector: numpy.ndarray
|
|
312
|
+
:return: Score vector of shape (n,).
|
|
313
|
+
:rtype: numpy.ndarray
|
|
314
|
+
"""
|
|
315
|
+
query_vector = query_vector.astype(np.float32).reshape(-1)
|
|
316
|
+
query_vector = _l2_normalize_rows(query_vector.reshape(1, -1)).reshape(-1)
|
|
317
|
+
return embeddings @ query_vector
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
def artifact_paths_for_run(*, run_id: str, backend_id: str) -> Dict[str, str]:
|
|
321
|
+
"""
|
|
322
|
+
Build deterministic artifact relative paths for an embedding index run.
|
|
323
|
+
|
|
324
|
+
:param run_id: Run identifier.
|
|
325
|
+
:type run_id: str
|
|
326
|
+
:param backend_id: Backend identifier.
|
|
327
|
+
:type backend_id: str
|
|
328
|
+
:return: Mapping with keys embeddings and chunks.
|
|
329
|
+
:rtype: dict[str, str]
|
|
330
|
+
"""
|
|
331
|
+
prefix = f"{run_id}.{backend_id}"
|
|
332
|
+
embeddings_relpath = str(Path(CORPUS_DIR_NAME) / RUNS_DIR_NAME / f"{prefix}.embeddings.npy")
|
|
333
|
+
chunks_relpath = str(Path(CORPUS_DIR_NAME) / RUNS_DIR_NAME / f"{prefix}.chunks.jsonl")
|
|
334
|
+
return {"embeddings": embeddings_relpath, "chunks": chunks_relpath}
|