ragit 0.8.1__tar.gz → 0.10.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {ragit-0.8.1/ragit.egg-info → ragit-0.10.1}/PKG-INFO +9 -22
- {ragit-0.8.1 → ragit-0.10.1}/README.md +6 -18
- {ragit-0.8.1 → ragit-0.10.1}/pyproject.toml +2 -3
- {ragit-0.8.1 → ragit-0.10.1}/ragit/__init__.py +27 -15
- {ragit-0.8.1 → ragit-0.10.1}/ragit/assistant.py +325 -10
- ragit-0.10.1/ragit/config.py +204 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/core/experiment/experiment.py +10 -5
- ragit-0.10.1/ragit/exceptions.py +271 -0
- ragit-0.10.1/ragit/loaders.py +401 -0
- ragit-0.10.1/ragit/logging.py +194 -0
- ragit-0.10.1/ragit/monitor.py +307 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/providers/__init__.py +1 -13
- ragit-0.10.1/ragit/providers/ollama.py +670 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/version.py +1 -1
- {ragit-0.8.1 → ragit-0.10.1/ragit.egg-info}/PKG-INFO +9 -22
- {ragit-0.8.1 → ragit-0.10.1}/ragit.egg-info/SOURCES.txt +3 -1
- {ragit-0.8.1 → ragit-0.10.1}/ragit.egg-info/requires.txt +1 -3
- ragit-0.8.1/ragit/config.py +0 -60
- ragit-0.8.1/ragit/loaders.py +0 -219
- ragit-0.8.1/ragit/providers/ollama.py +0 -446
- ragit-0.8.1/ragit/providers/sentence_transformers.py +0 -225
- {ragit-0.8.1 → ragit-0.10.1}/LICENSE +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/core/__init__.py +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/core/experiment/__init__.py +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/core/experiment/results.py +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/providers/base.py +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/providers/function_adapter.py +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit/utils/__init__.py +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit.egg-info/dependency_links.txt +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/ragit.egg-info/top_level.txt +0 -0
- {ragit-0.8.1 → ragit-0.10.1}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: ragit
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.10.1
|
|
4
4
|
Summary: Automatic RAG Pattern Optimization Engine
|
|
5
5
|
Author: RODMENA LIMITED
|
|
6
6
|
Maintainer-email: RODMENA LIMITED <info@rodmena.co.uk>
|
|
@@ -16,7 +16,7 @@ Classifier: Programming Language :: Python :: 3.13
|
|
|
16
16
|
Classifier: Programming Language :: Python :: 3.14
|
|
17
17
|
Classifier: Operating System :: MacOS :: MacOS X
|
|
18
18
|
Classifier: Operating System :: POSIX :: Linux
|
|
19
|
-
Requires-Python:
|
|
19
|
+
Requires-Python: >=3.12
|
|
20
20
|
Description-Content-Type: text/markdown
|
|
21
21
|
License-File: LICENSE
|
|
22
22
|
Requires-Dist: requests>=2.31.0
|
|
@@ -28,6 +28,7 @@ Requires-Dist: scikit-learn>=1.5.0
|
|
|
28
28
|
Requires-Dist: tqdm>=4.66.0
|
|
29
29
|
Requires-Dist: trio>=0.24.0
|
|
30
30
|
Requires-Dist: httpx>=0.27.0
|
|
31
|
+
Requires-Dist: resilient-circuit>=0.4.7
|
|
31
32
|
Provides-Extra: dev
|
|
32
33
|
Requires-Dist: ragit[test]; extra == "dev"
|
|
33
34
|
Requires-Dist: pytest; extra == "dev"
|
|
@@ -39,8 +40,6 @@ Provides-Extra: test
|
|
|
39
40
|
Requires-Dist: pytest; extra == "test"
|
|
40
41
|
Requires-Dist: pytest-cov; extra == "test"
|
|
41
42
|
Requires-Dist: pytest-mock; extra == "test"
|
|
42
|
-
Provides-Extra: transformers
|
|
43
|
-
Requires-Dist: sentence-transformers>=2.2.0; extra == "transformers"
|
|
44
43
|
Provides-Extra: docs
|
|
45
44
|
Requires-Dist: sphinx>=7.0; extra == "docs"
|
|
46
45
|
Requires-Dist: sphinx-rtd-theme>=2.0; extra == "docs"
|
|
@@ -55,14 +54,11 @@ RAG toolkit for Python. Document loading, chunking, vector search, LLM integrati
|
|
|
55
54
|
|
|
56
55
|
```bash
|
|
57
56
|
pip install ragit
|
|
58
|
-
|
|
59
|
-
# For offline embedding
|
|
60
|
-
pip install ragit[transformers]
|
|
61
57
|
```
|
|
62
58
|
|
|
63
59
|
## Quick Start
|
|
64
60
|
|
|
65
|
-
You must provide an embedding source: custom function,
|
|
61
|
+
You must provide an embedding source: custom function, Ollama, or any provider.
|
|
66
62
|
|
|
67
63
|
### Custom Embedding Function
|
|
68
64
|
|
|
@@ -90,26 +86,17 @@ assistant = RAGAssistant("docs/", embed_fn=my_embed, generate_fn=my_generate)
|
|
|
90
86
|
answer = assistant.ask("How does authentication work?")
|
|
91
87
|
```
|
|
92
88
|
|
|
93
|
-
###
|
|
94
|
-
|
|
95
|
-
Models are downloaded automatically on first use (~90MB for default model).
|
|
89
|
+
### With Ollama (nomic-embed-text)
|
|
96
90
|
|
|
97
91
|
```python
|
|
98
92
|
from ragit import RAGAssistant
|
|
99
|
-
from ragit.providers import
|
|
93
|
+
from ragit.providers import OllamaProvider
|
|
100
94
|
|
|
101
|
-
# Uses
|
|
102
|
-
assistant = RAGAssistant("docs/", provider=
|
|
103
|
-
|
|
104
|
-
# Or specify a model
|
|
105
|
-
assistant = RAGAssistant(
|
|
106
|
-
"docs/",
|
|
107
|
-
provider=SentenceTransformersProvider(model_name="all-mpnet-base-v2")
|
|
108
|
-
)
|
|
95
|
+
# Uses nomic-embed-text for embeddings (768d)
|
|
96
|
+
assistant = RAGAssistant("docs/", provider=OllamaProvider())
|
|
97
|
+
results = assistant.retrieve("search query")
|
|
109
98
|
```
|
|
110
99
|
|
|
111
|
-
Available models: `all-MiniLM-L6-v2` (384d), `all-mpnet-base-v2` (768d), `paraphrase-MiniLM-L6-v2` (384d)
|
|
112
|
-
|
|
113
100
|
## Core API
|
|
114
101
|
|
|
115
102
|
```python
|
|
@@ -6,14 +6,11 @@ RAG toolkit for Python. Document loading, chunking, vector search, LLM integrati
|
|
|
6
6
|
|
|
7
7
|
```bash
|
|
8
8
|
pip install ragit
|
|
9
|
-
|
|
10
|
-
# For offline embedding
|
|
11
|
-
pip install ragit[transformers]
|
|
12
9
|
```
|
|
13
10
|
|
|
14
11
|
## Quick Start
|
|
15
12
|
|
|
16
|
-
You must provide an embedding source: custom function,
|
|
13
|
+
You must provide an embedding source: custom function, Ollama, or any provider.
|
|
17
14
|
|
|
18
15
|
### Custom Embedding Function
|
|
19
16
|
|
|
@@ -41,26 +38,17 @@ assistant = RAGAssistant("docs/", embed_fn=my_embed, generate_fn=my_generate)
|
|
|
41
38
|
answer = assistant.ask("How does authentication work?")
|
|
42
39
|
```
|
|
43
40
|
|
|
44
|
-
###
|
|
45
|
-
|
|
46
|
-
Models are downloaded automatically on first use (~90MB for default model).
|
|
41
|
+
### With Ollama (nomic-embed-text)
|
|
47
42
|
|
|
48
43
|
```python
|
|
49
44
|
from ragit import RAGAssistant
|
|
50
|
-
from ragit.providers import
|
|
45
|
+
from ragit.providers import OllamaProvider
|
|
51
46
|
|
|
52
|
-
# Uses
|
|
53
|
-
assistant = RAGAssistant("docs/", provider=
|
|
54
|
-
|
|
55
|
-
# Or specify a model
|
|
56
|
-
assistant = RAGAssistant(
|
|
57
|
-
"docs/",
|
|
58
|
-
provider=SentenceTransformersProvider(model_name="all-mpnet-base-v2")
|
|
59
|
-
)
|
|
47
|
+
# Uses nomic-embed-text for embeddings (768d)
|
|
48
|
+
assistant = RAGAssistant("docs/", provider=OllamaProvider())
|
|
49
|
+
results = assistant.retrieve("search query")
|
|
60
50
|
```
|
|
61
51
|
|
|
62
|
-
Available models: `all-MiniLM-L6-v2` (384d), `all-mpnet-base-v2` (768d), `paraphrase-MiniLM-L6-v2` (384d)
|
|
63
|
-
|
|
64
52
|
## Core API
|
|
65
53
|
|
|
66
54
|
```python
|
|
@@ -10,7 +10,7 @@ maintainers = [
|
|
|
10
10
|
{ name = "RODMENA LIMITED", email = "info@rodmena.co.uk" },
|
|
11
11
|
]
|
|
12
12
|
readme = "README.md"
|
|
13
|
-
requires-python = ">=3.12
|
|
13
|
+
requires-python = ">=3.12"
|
|
14
14
|
classifiers = [
|
|
15
15
|
"Development Status :: 2 - Pre-Alpha",
|
|
16
16
|
"Natural Language :: English",
|
|
@@ -40,6 +40,7 @@ dependencies = [
|
|
|
40
40
|
"tqdm>=4.66.0",
|
|
41
41
|
"trio>=0.24.0",
|
|
42
42
|
"httpx>=0.27.0",
|
|
43
|
+
"resilient-circuit>=0.4.7",
|
|
43
44
|
]
|
|
44
45
|
|
|
45
46
|
[project.urls]
|
|
@@ -59,8 +60,6 @@ dev = [
|
|
|
59
60
|
|
|
60
61
|
test = ["pytest", "pytest-cov", "pytest-mock"]
|
|
61
62
|
|
|
62
|
-
transformers = ["sentence-transformers>=2.2.0"]
|
|
63
|
-
|
|
64
63
|
docs = [
|
|
65
64
|
"sphinx>=7.0",
|
|
66
65
|
"sphinx-rtd-theme>=2.0",
|
|
@@ -16,11 +16,7 @@ Quick Start
|
|
|
16
16
|
>>> assistant = RAGAssistant("docs/", embed_fn=my_embed)
|
|
17
17
|
>>> results = assistant.retrieve("How do I create a REST API?")
|
|
18
18
|
>>>
|
|
19
|
-
>>> # With
|
|
20
|
-
>>> from ragit.providers import SentenceTransformersProvider
|
|
21
|
-
>>> assistant = RAGAssistant("docs/", provider=SentenceTransformersProvider())
|
|
22
|
-
>>>
|
|
23
|
-
>>> # With Ollama (explicit)
|
|
19
|
+
>>> # With Ollama
|
|
24
20
|
>>> from ragit.providers import OllamaProvider
|
|
25
21
|
>>> assistant = RAGAssistant("docs/", provider=OllamaProvider())
|
|
26
22
|
>>> answer = assistant.ask("How do I create a REST API?")
|
|
@@ -63,14 +59,27 @@ from ragit.core.experiment.experiment import ( # noqa: E402
|
|
|
63
59
|
RagitExperiment,
|
|
64
60
|
)
|
|
65
61
|
from ragit.core.experiment.results import EvaluationResult, ExperimentResults # noqa: E402
|
|
62
|
+
from ragit.exceptions import ( # noqa: E402
|
|
63
|
+
ConfigurationError,
|
|
64
|
+
EvaluationError,
|
|
65
|
+
ExceptionAggregator,
|
|
66
|
+
GenerationError,
|
|
67
|
+
IndexingError,
|
|
68
|
+
ProviderError,
|
|
69
|
+
RagitError,
|
|
70
|
+
RetrievalError,
|
|
71
|
+
)
|
|
66
72
|
from ragit.loaders import ( # noqa: E402
|
|
67
73
|
chunk_by_separator,
|
|
68
74
|
chunk_document,
|
|
69
75
|
chunk_rst_sections,
|
|
70
76
|
chunk_text,
|
|
77
|
+
deduplicate_documents,
|
|
78
|
+
generate_document_id,
|
|
71
79
|
load_directory,
|
|
72
80
|
load_text,
|
|
73
81
|
)
|
|
82
|
+
from ragit.monitor import ExecutionMonitor # noqa: E402
|
|
74
83
|
from ragit.providers import ( # noqa: E402
|
|
75
84
|
BaseEmbeddingProvider,
|
|
76
85
|
BaseLLMProvider,
|
|
@@ -89,6 +98,8 @@ __all__ = [
|
|
|
89
98
|
"chunk_document",
|
|
90
99
|
"chunk_by_separator",
|
|
91
100
|
"chunk_rst_sections",
|
|
101
|
+
"generate_document_id",
|
|
102
|
+
"deduplicate_documents",
|
|
92
103
|
# Core classes
|
|
93
104
|
"Document",
|
|
94
105
|
"Chunk",
|
|
@@ -97,6 +108,17 @@ __all__ = [
|
|
|
97
108
|
"FunctionProvider",
|
|
98
109
|
"BaseLLMProvider",
|
|
99
110
|
"BaseEmbeddingProvider",
|
|
111
|
+
# Exceptions
|
|
112
|
+
"RagitError",
|
|
113
|
+
"ConfigurationError",
|
|
114
|
+
"ProviderError",
|
|
115
|
+
"IndexingError",
|
|
116
|
+
"RetrievalError",
|
|
117
|
+
"GenerationError",
|
|
118
|
+
"EvaluationError",
|
|
119
|
+
"ExceptionAggregator",
|
|
120
|
+
# Monitoring
|
|
121
|
+
"ExecutionMonitor",
|
|
100
122
|
# Optimization
|
|
101
123
|
"RagitExperiment",
|
|
102
124
|
"BenchmarkQuestion",
|
|
@@ -104,13 +126,3 @@ __all__ = [
|
|
|
104
126
|
"EvaluationResult",
|
|
105
127
|
"ExperimentResults",
|
|
106
128
|
]
|
|
107
|
-
|
|
108
|
-
# Conditionally add SentenceTransformersProvider if available
|
|
109
|
-
try:
|
|
110
|
-
from ragit.providers import ( # noqa: E402
|
|
111
|
-
SentenceTransformersProvider as SentenceTransformersProvider,
|
|
112
|
-
)
|
|
113
|
-
|
|
114
|
-
__all__ += ["SentenceTransformersProvider"]
|
|
115
|
-
except ImportError:
|
|
116
|
-
pass
|
|
@@ -19,6 +19,7 @@ from numpy.typing import NDArray
|
|
|
19
19
|
|
|
20
20
|
from ragit.core.experiment.experiment import Chunk, Document
|
|
21
21
|
from ragit.loaders import chunk_document, chunk_rst_sections, load_directory, load_text
|
|
22
|
+
from ragit.logging import log_operation
|
|
22
23
|
from ragit.providers.base import BaseEmbeddingProvider, BaseLLMProvider
|
|
23
24
|
from ragit.providers.function_adapter import FunctionProvider
|
|
24
25
|
|
|
@@ -76,13 +77,9 @@ class RAGAssistant:
|
|
|
76
77
|
>>> assistant = RAGAssistant(docs, embed_fn=my_embed, generate_fn=my_llm)
|
|
77
78
|
>>> answer = assistant.ask("What is X?")
|
|
78
79
|
>>>
|
|
79
|
-
>>> # With
|
|
80
|
+
>>> # With Ollama provider (supports nomic-embed-text)
|
|
80
81
|
>>> from ragit.providers import OllamaProvider
|
|
81
82
|
>>> assistant = RAGAssistant(docs, provider=OllamaProvider())
|
|
82
|
-
>>>
|
|
83
|
-
>>> # With SentenceTransformers (offline)
|
|
84
|
-
>>> from ragit.providers import SentenceTransformersProvider
|
|
85
|
-
>>> assistant = RAGAssistant(docs, provider=SentenceTransformersProvider())
|
|
86
83
|
"""
|
|
87
84
|
|
|
88
85
|
def __init__(
|
|
@@ -116,8 +113,7 @@ class RAGAssistant:
|
|
|
116
113
|
# Use explicit provider
|
|
117
114
|
if not isinstance(provider, BaseEmbeddingProvider):
|
|
118
115
|
raise ValueError(
|
|
119
|
-
"Provider must implement BaseEmbeddingProvider for embeddings. "
|
|
120
|
-
"Alternatively, provide embed_fn."
|
|
116
|
+
"Provider must implement BaseEmbeddingProvider for embeddings. Alternatively, provide embed_fn."
|
|
121
117
|
)
|
|
122
118
|
self._embedding_provider = provider
|
|
123
119
|
if isinstance(provider, BaseLLMProvider):
|
|
@@ -127,8 +123,7 @@ class RAGAssistant:
|
|
|
127
123
|
"Must provide embed_fn or provider for embeddings. "
|
|
128
124
|
"Examples:\n"
|
|
129
125
|
" RAGAssistant(docs, embed_fn=my_embed_function)\n"
|
|
130
|
-
" RAGAssistant(docs, provider=OllamaProvider())
|
|
131
|
-
" RAGAssistant(docs, provider=SentenceTransformersProvider())"
|
|
126
|
+
" RAGAssistant(docs, provider=OllamaProvider())"
|
|
132
127
|
)
|
|
133
128
|
|
|
134
129
|
self.embedding_model = embedding_model or "default"
|
|
@@ -156,7 +151,20 @@ class RAGAssistant:
|
|
|
156
151
|
|
|
157
152
|
if path.is_dir():
|
|
158
153
|
docs: list[Document] = []
|
|
159
|
-
for pattern in (
|
|
154
|
+
for pattern in (
|
|
155
|
+
"*.txt",
|
|
156
|
+
"*.md",
|
|
157
|
+
"*.rst",
|
|
158
|
+
"*.py",
|
|
159
|
+
"*.js",
|
|
160
|
+
"*.ts",
|
|
161
|
+
"*.go",
|
|
162
|
+
"*.java",
|
|
163
|
+
"*.c",
|
|
164
|
+
"*.cpp",
|
|
165
|
+
"*.h",
|
|
166
|
+
"*.hpp",
|
|
167
|
+
):
|
|
160
168
|
docs.extend(load_directory(path, pattern))
|
|
161
169
|
return docs
|
|
162
170
|
|
|
@@ -194,6 +202,129 @@ class RAGAssistant:
|
|
|
194
202
|
self._chunks = tuple(all_chunks)
|
|
195
203
|
self._embedding_matrix = embedding_matrix / norms
|
|
196
204
|
|
|
205
|
+
def add_documents(self, documents: list[Document] | str | Path) -> int:
|
|
206
|
+
"""Add documents to the existing index incrementally.
|
|
207
|
+
|
|
208
|
+
Args:
|
|
209
|
+
documents: Documents to add.
|
|
210
|
+
|
|
211
|
+
Returns:
|
|
212
|
+
Number of chunks added.
|
|
213
|
+
"""
|
|
214
|
+
new_docs = self._load_documents(documents)
|
|
215
|
+
if not new_docs:
|
|
216
|
+
return 0
|
|
217
|
+
|
|
218
|
+
self.documents.extend(new_docs)
|
|
219
|
+
|
|
220
|
+
# Chunk new docs
|
|
221
|
+
new_chunks: list[Chunk] = []
|
|
222
|
+
for doc in new_docs:
|
|
223
|
+
if doc.metadata.get("filename", "").endswith(".rst"):
|
|
224
|
+
chunks = chunk_rst_sections(doc.content, doc.id)
|
|
225
|
+
else:
|
|
226
|
+
chunks = chunk_document(doc, self.chunk_size, self.chunk_overlap)
|
|
227
|
+
new_chunks.extend(chunks)
|
|
228
|
+
|
|
229
|
+
if not new_chunks:
|
|
230
|
+
return 0
|
|
231
|
+
|
|
232
|
+
# Embed new chunks
|
|
233
|
+
texts = [chunk.content for chunk in new_chunks]
|
|
234
|
+
responses = self._embedding_provider.embed_batch(texts, self.embedding_model)
|
|
235
|
+
|
|
236
|
+
new_matrix = np.array([response.embedding for response in responses], dtype=np.float64)
|
|
237
|
+
|
|
238
|
+
# Normalize
|
|
239
|
+
norms = np.linalg.norm(new_matrix, axis=1, keepdims=True)
|
|
240
|
+
norms[norms == 0] = 1
|
|
241
|
+
new_matrix_norm = new_matrix / norms
|
|
242
|
+
|
|
243
|
+
# Update state
|
|
244
|
+
current_chunks = list(self._chunks)
|
|
245
|
+
current_chunks.extend(new_chunks)
|
|
246
|
+
self._chunks = tuple(current_chunks)
|
|
247
|
+
|
|
248
|
+
if self._embedding_matrix is None:
|
|
249
|
+
self._embedding_matrix = new_matrix_norm
|
|
250
|
+
else:
|
|
251
|
+
self._embedding_matrix = np.vstack((self._embedding_matrix, new_matrix_norm))
|
|
252
|
+
|
|
253
|
+
return len(new_chunks)
|
|
254
|
+
|
|
255
|
+
def remove_documents(self, source_path_pattern: str) -> int:
|
|
256
|
+
"""Remove documents matching a source path pattern.
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
source_path_pattern: Glob pattern to match 'source' metadata.
|
|
260
|
+
|
|
261
|
+
Returns:
|
|
262
|
+
Number of chunks removed.
|
|
263
|
+
"""
|
|
264
|
+
import fnmatch
|
|
265
|
+
|
|
266
|
+
if not self._chunks:
|
|
267
|
+
return 0
|
|
268
|
+
|
|
269
|
+
indices_to_keep = []
|
|
270
|
+
kept_chunks = []
|
|
271
|
+
removed_count = 0
|
|
272
|
+
|
|
273
|
+
for i, chunk in enumerate(self._chunks):
|
|
274
|
+
source = chunk.metadata.get("source", "")
|
|
275
|
+
if not source or not fnmatch.fnmatch(source, source_path_pattern):
|
|
276
|
+
indices_to_keep.append(i)
|
|
277
|
+
kept_chunks.append(chunk)
|
|
278
|
+
else:
|
|
279
|
+
removed_count += 1
|
|
280
|
+
|
|
281
|
+
if removed_count == 0:
|
|
282
|
+
return 0
|
|
283
|
+
|
|
284
|
+
self._chunks = tuple(kept_chunks)
|
|
285
|
+
|
|
286
|
+
if self._embedding_matrix is not None:
|
|
287
|
+
if not kept_chunks:
|
|
288
|
+
self._embedding_matrix = None
|
|
289
|
+
else:
|
|
290
|
+
self._embedding_matrix = self._embedding_matrix[indices_to_keep]
|
|
291
|
+
|
|
292
|
+
# Also remove from self.documents
|
|
293
|
+
self.documents = [
|
|
294
|
+
doc for doc in self.documents if not fnmatch.fnmatch(doc.metadata.get("source", ""), source_path_pattern)
|
|
295
|
+
]
|
|
296
|
+
|
|
297
|
+
return removed_count
|
|
298
|
+
|
|
299
|
+
def update_documents(self, documents: list[Document] | str | Path) -> int:
|
|
300
|
+
"""Update existing documents (remove old, add new).
|
|
301
|
+
|
|
302
|
+
Uses document source path to identify what to remove.
|
|
303
|
+
|
|
304
|
+
Args:
|
|
305
|
+
documents: New versions of documents.
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
Number of chunks added.
|
|
309
|
+
"""
|
|
310
|
+
new_docs = self._load_documents(documents)
|
|
311
|
+
if not new_docs:
|
|
312
|
+
return 0
|
|
313
|
+
|
|
314
|
+
# Identify sources to remove
|
|
315
|
+
sources_to_remove = set()
|
|
316
|
+
for doc in new_docs:
|
|
317
|
+
source = doc.metadata.get("source")
|
|
318
|
+
if source:
|
|
319
|
+
sources_to_remove.add(source)
|
|
320
|
+
|
|
321
|
+
# Remove old versions
|
|
322
|
+
for source in sources_to_remove:
|
|
323
|
+
self.remove_documents(source)
|
|
324
|
+
|
|
325
|
+
# Add new versions
|
|
326
|
+
return self.add_documents(new_docs)
|
|
327
|
+
|
|
197
328
|
def retrieve(self, query: str, top_k: int = 3) -> list[tuple[Chunk, float]]:
|
|
198
329
|
"""
|
|
199
330
|
Retrieve relevant chunks for a query.
|
|
@@ -243,6 +374,190 @@ class RAGAssistant:
|
|
|
243
374
|
|
|
244
375
|
return [(self._chunks[i], float(similarities[i])) for i in top_indices]
|
|
245
376
|
|
|
377
|
+
def retrieve_with_context(
|
|
378
|
+
self,
|
|
379
|
+
query: str,
|
|
380
|
+
top_k: int = 3,
|
|
381
|
+
window_size: int = 1,
|
|
382
|
+
min_score: float = 0.0,
|
|
383
|
+
) -> list[tuple[Chunk, float]]:
|
|
384
|
+
"""
|
|
385
|
+
Retrieve chunks with adjacent context expansion (window search).
|
|
386
|
+
|
|
387
|
+
For each retrieved chunk, also includes adjacent chunks from the
|
|
388
|
+
same document to provide more context. This is useful when relevant
|
|
389
|
+
information spans multiple chunks.
|
|
390
|
+
|
|
391
|
+
Pattern inspired by ai4rag window_search.
|
|
392
|
+
|
|
393
|
+
Parameters
|
|
394
|
+
----------
|
|
395
|
+
query : str
|
|
396
|
+
Search query.
|
|
397
|
+
top_k : int
|
|
398
|
+
Number of initial chunks to retrieve (default: 3).
|
|
399
|
+
window_size : int
|
|
400
|
+
Number of adjacent chunks to include on each side (default: 1).
|
|
401
|
+
Set to 0 to disable window expansion.
|
|
402
|
+
min_score : float
|
|
403
|
+
Minimum similarity score threshold (default: 0.0).
|
|
404
|
+
|
|
405
|
+
Returns
|
|
406
|
+
-------
|
|
407
|
+
list[tuple[Chunk, float]]
|
|
408
|
+
List of (chunk, similarity_score) tuples, sorted by relevance.
|
|
409
|
+
Adjacent chunks have slightly lower scores.
|
|
410
|
+
|
|
411
|
+
Examples
|
|
412
|
+
--------
|
|
413
|
+
>>> # Get chunks with 1 adjacent chunk on each side
|
|
414
|
+
>>> results = assistant.retrieve_with_context("query", window_size=1)
|
|
415
|
+
>>> for chunk, score in results:
|
|
416
|
+
... print(f"{score:.2f}: {chunk.content[:50]}...")
|
|
417
|
+
"""
|
|
418
|
+
with log_operation("retrieve_with_context", query_len=len(query), top_k=top_k, window_size=window_size) as ctx:
|
|
419
|
+
# Get initial results (more than top_k to account for filtering)
|
|
420
|
+
results = self.retrieve(query, top_k * 2)
|
|
421
|
+
|
|
422
|
+
# Apply minimum score threshold
|
|
423
|
+
if min_score > 0:
|
|
424
|
+
results = [(chunk, score) for chunk, score in results if score >= min_score]
|
|
425
|
+
|
|
426
|
+
if window_size == 0 or not results:
|
|
427
|
+
ctx["expanded_chunks"] = len(results)
|
|
428
|
+
return results[:top_k]
|
|
429
|
+
|
|
430
|
+
# Build chunk index for fast lookup
|
|
431
|
+
chunk_to_idx = {id(chunk): i for i, chunk in enumerate(self._chunks)}
|
|
432
|
+
|
|
433
|
+
expanded_results: list[tuple[Chunk, float]] = []
|
|
434
|
+
seen_indices: set[int] = set()
|
|
435
|
+
|
|
436
|
+
for chunk, score in results[:top_k]:
|
|
437
|
+
chunk_idx = chunk_to_idx.get(id(chunk))
|
|
438
|
+
if chunk_idx is None:
|
|
439
|
+
expanded_results.append((chunk, score))
|
|
440
|
+
continue
|
|
441
|
+
|
|
442
|
+
# Get window of adjacent chunks from same document
|
|
443
|
+
start_idx = max(0, chunk_idx - window_size)
|
|
444
|
+
end_idx = min(len(self._chunks), chunk_idx + window_size + 1)
|
|
445
|
+
|
|
446
|
+
for idx in range(start_idx, end_idx):
|
|
447
|
+
if idx in seen_indices:
|
|
448
|
+
continue
|
|
449
|
+
|
|
450
|
+
adjacent_chunk = self._chunks[idx]
|
|
451
|
+
# Only include adjacent chunks from same document
|
|
452
|
+
if adjacent_chunk.doc_id == chunk.doc_id:
|
|
453
|
+
seen_indices.add(idx)
|
|
454
|
+
# Original chunk keeps full score, adjacent get 80%
|
|
455
|
+
adj_score = score if idx == chunk_idx else score * 0.8
|
|
456
|
+
expanded_results.append((adjacent_chunk, adj_score))
|
|
457
|
+
|
|
458
|
+
# Sort by score (highest first)
|
|
459
|
+
expanded_results.sort(key=lambda x: (-x[1], self._chunks.index(x[0]) if x[0] in self._chunks else 0))
|
|
460
|
+
ctx["expanded_chunks"] = len(expanded_results)
|
|
461
|
+
|
|
462
|
+
return expanded_results
|
|
463
|
+
|
|
464
|
+
def get_context_with_window(
|
|
465
|
+
self,
|
|
466
|
+
query: str,
|
|
467
|
+
top_k: int = 3,
|
|
468
|
+
window_size: int = 1,
|
|
469
|
+
min_score: float = 0.0,
|
|
470
|
+
) -> str:
|
|
471
|
+
"""
|
|
472
|
+
Get formatted context with adjacent chunk expansion.
|
|
473
|
+
|
|
474
|
+
Merges overlapping text from adjacent chunks intelligently.
|
|
475
|
+
|
|
476
|
+
Parameters
|
|
477
|
+
----------
|
|
478
|
+
query : str
|
|
479
|
+
Search query.
|
|
480
|
+
top_k : int
|
|
481
|
+
Number of initial chunks to retrieve.
|
|
482
|
+
window_size : int
|
|
483
|
+
Number of adjacent chunks on each side.
|
|
484
|
+
min_score : float
|
|
485
|
+
Minimum similarity score threshold.
|
|
486
|
+
|
|
487
|
+
Returns
|
|
488
|
+
-------
|
|
489
|
+
str
|
|
490
|
+
Formatted context string with merged chunks.
|
|
491
|
+
"""
|
|
492
|
+
results = self.retrieve_with_context(query, top_k, window_size, min_score)
|
|
493
|
+
|
|
494
|
+
if not results:
|
|
495
|
+
return ""
|
|
496
|
+
|
|
497
|
+
# Group chunks by document to merge properly
|
|
498
|
+
doc_chunks: dict[str, list[tuple[Chunk, float]]] = {}
|
|
499
|
+
for chunk, score in results:
|
|
500
|
+
doc_id = chunk.doc_id or "unknown"
|
|
501
|
+
if doc_id not in doc_chunks:
|
|
502
|
+
doc_chunks[doc_id] = []
|
|
503
|
+
doc_chunks[doc_id].append((chunk, score))
|
|
504
|
+
|
|
505
|
+
merged_sections: list[str] = []
|
|
506
|
+
|
|
507
|
+
for _doc_id, chunks in doc_chunks.items():
|
|
508
|
+
# Sort chunks by their position in the original list
|
|
509
|
+
chunks.sort(key=lambda x: self._chunks.index(x[0]) if x[0] in self._chunks else 0)
|
|
510
|
+
|
|
511
|
+
# Merge overlapping text
|
|
512
|
+
merged_content = []
|
|
513
|
+
for chunk, _ in chunks:
|
|
514
|
+
if merged_content:
|
|
515
|
+
# Check for overlap with previous chunk
|
|
516
|
+
prev_content = merged_content[-1]
|
|
517
|
+
non_overlapping = self._get_non_overlapping_text(prev_content, chunk.content)
|
|
518
|
+
if non_overlapping != chunk.content:
|
|
519
|
+
# Found overlap, extend previous chunk
|
|
520
|
+
merged_content[-1] = prev_content + non_overlapping
|
|
521
|
+
else:
|
|
522
|
+
# No overlap, add as new section
|
|
523
|
+
merged_content.append(chunk.content)
|
|
524
|
+
else:
|
|
525
|
+
merged_content.append(chunk.content)
|
|
526
|
+
|
|
527
|
+
merged_sections.append("\n".join(merged_content))
|
|
528
|
+
|
|
529
|
+
return "\n\n---\n\n".join(merged_sections)
|
|
530
|
+
|
|
531
|
+
def _get_non_overlapping_text(self, str1: str, str2: str) -> str:
|
|
532
|
+
"""
|
|
533
|
+
Find non-overlapping portion of str2 when appending after str1.
|
|
534
|
+
|
|
535
|
+
Detects overlap where the end of str1 matches the beginning of str2,
|
|
536
|
+
and returns only the non-overlapping portion of str2.
|
|
537
|
+
|
|
538
|
+
Pattern from ai4rag vector_store/utils.py.
|
|
539
|
+
|
|
540
|
+
Parameters
|
|
541
|
+
----------
|
|
542
|
+
str1 : str
|
|
543
|
+
First string (previous content).
|
|
544
|
+
str2 : str
|
|
545
|
+
Second string (content to potentially append).
|
|
546
|
+
|
|
547
|
+
Returns
|
|
548
|
+
-------
|
|
549
|
+
str
|
|
550
|
+
Non-overlapping portion of str2, or full str2 if no overlap.
|
|
551
|
+
"""
|
|
552
|
+
# Limit overlap search to avoid O(n^2) for large strings
|
|
553
|
+
max_overlap = min(len(str1), len(str2), 200)
|
|
554
|
+
|
|
555
|
+
for i in range(max_overlap, 0, -1):
|
|
556
|
+
if str1[-i:] == str2[:i]:
|
|
557
|
+
return str2[i:]
|
|
558
|
+
|
|
559
|
+
return str2
|
|
560
|
+
|
|
246
561
|
def get_context(self, query: str, top_k: int = 3) -> str:
|
|
247
562
|
"""
|
|
248
563
|
Get formatted context string from retrieved chunks.
|