ragit 0.7.5__py3-none-any.whl → 0.8.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragit/__init__.py +36 -9
- ragit/assistant.py +106 -23
- ragit/config.py +15 -6
- ragit/core/experiment/experiment.py +85 -20
- ragit/providers/__init__.py +30 -3
- ragit/providers/function_adapter.py +237 -0
- ragit/providers/ollama.py +1 -1
- ragit/providers/sentence_transformers.py +225 -0
- ragit/version.py +1 -1
- ragit-0.8.1.dist-info/METADATA +166 -0
- ragit-0.8.1.dist-info/RECORD +20 -0
- ragit-0.7.5.dist-info/METADATA +0 -553
- ragit-0.7.5.dist-info/RECORD +0 -18
- {ragit-0.7.5.dist-info → ragit-0.8.1.dist-info}/WHEEL +0 -0
- {ragit-0.7.5.dist-info → ragit-0.8.1.dist-info}/licenses/LICENSE +0 -0
- {ragit-0.7.5.dist-info → ragit-0.8.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Copyright RODMENA LIMITED 2025
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
"""
|
|
6
|
+
Function-based provider adapter for pluggable embedding and LLM functions.
|
|
7
|
+
|
|
8
|
+
This module provides a simple adapter that wraps user-provided functions
|
|
9
|
+
into the provider interface, enabling easy integration with custom
|
|
10
|
+
embedding and LLM implementations.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
import inspect
|
|
14
|
+
from collections.abc import Callable
|
|
15
|
+
|
|
16
|
+
from ragit.providers.base import (
|
|
17
|
+
BaseEmbeddingProvider,
|
|
18
|
+
BaseLLMProvider,
|
|
19
|
+
EmbeddingResponse,
|
|
20
|
+
LLMResponse,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class FunctionProvider(BaseLLMProvider, BaseEmbeddingProvider):
|
|
25
|
+
"""
|
|
26
|
+
Adapter that wraps user-provided embedding and generation functions.
|
|
27
|
+
|
|
28
|
+
This provider allows users to bring their own embedding and/or LLM functions
|
|
29
|
+
without implementing the full provider interface.
|
|
30
|
+
|
|
31
|
+
Parameters
|
|
32
|
+
----------
|
|
33
|
+
embed_fn : Callable[[str], list[float]], optional
|
|
34
|
+
Function that takes text and returns an embedding vector.
|
|
35
|
+
Example: `lambda text: openai.embeddings.create(input=text).data[0].embedding`
|
|
36
|
+
generate_fn : Callable, optional
|
|
37
|
+
Function for text generation. Supports two signatures:
|
|
38
|
+
- (prompt: str) -> str
|
|
39
|
+
- (prompt: str, system_prompt: str) -> str
|
|
40
|
+
embedding_dimensions : int, optional
|
|
41
|
+
Embedding dimensions. Auto-detected on first call if not provided.
|
|
42
|
+
|
|
43
|
+
Examples
|
|
44
|
+
--------
|
|
45
|
+
>>> # Simple embedding function
|
|
46
|
+
>>> def my_embed(text: str) -> list[float]:
|
|
47
|
+
... return openai.embeddings.create(input=text).data[0].embedding
|
|
48
|
+
>>>
|
|
49
|
+
>>> # Use with RAGAssistant (retrieval-only)
|
|
50
|
+
>>> assistant = RAGAssistant(docs, embed_fn=my_embed)
|
|
51
|
+
>>> results = assistant.retrieve("query")
|
|
52
|
+
>>>
|
|
53
|
+
>>> # With LLM for full RAG
|
|
54
|
+
>>> def my_llm(prompt: str, system_prompt: str = None) -> str:
|
|
55
|
+
... return openai.chat.completions.create(
|
|
56
|
+
... messages=[{"role": "user", "content": prompt}]
|
|
57
|
+
... ).choices[0].message.content
|
|
58
|
+
>>>
|
|
59
|
+
>>> assistant = RAGAssistant(docs, embed_fn=my_embed, generate_fn=my_llm)
|
|
60
|
+
>>> answer = assistant.ask("What is X?")
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
def __init__(
|
|
64
|
+
self,
|
|
65
|
+
embed_fn: Callable[[str], list[float]] | None = None,
|
|
66
|
+
generate_fn: Callable[..., str] | None = None,
|
|
67
|
+
embedding_dimensions: int | None = None,
|
|
68
|
+
) -> None:
|
|
69
|
+
self._embed_fn = embed_fn
|
|
70
|
+
self._generate_fn = generate_fn
|
|
71
|
+
self._embedding_dimensions = embedding_dimensions
|
|
72
|
+
self._generate_fn_signature: int | None = None # Number of args (1 or 2)
|
|
73
|
+
|
|
74
|
+
# Detect generate_fn signature if provided
|
|
75
|
+
if generate_fn is not None:
|
|
76
|
+
self._detect_generate_signature()
|
|
77
|
+
|
|
78
|
+
def _detect_generate_signature(self) -> None:
|
|
79
|
+
"""Detect whether generate_fn accepts 1 or 2 arguments."""
|
|
80
|
+
if self._generate_fn is None:
|
|
81
|
+
return
|
|
82
|
+
|
|
83
|
+
sig = inspect.signature(self._generate_fn)
|
|
84
|
+
params = [
|
|
85
|
+
p
|
|
86
|
+
for p in sig.parameters.values()
|
|
87
|
+
if p.default is inspect.Parameter.empty and p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
|
|
88
|
+
]
|
|
89
|
+
# Count required parameters
|
|
90
|
+
required_count = len(params)
|
|
91
|
+
|
|
92
|
+
if required_count == 1:
|
|
93
|
+
self._generate_fn_signature = 1
|
|
94
|
+
else:
|
|
95
|
+
# Assume 2 args if more than 1 required or if has optional args
|
|
96
|
+
self._generate_fn_signature = 2
|
|
97
|
+
|
|
98
|
+
@property
|
|
99
|
+
def provider_name(self) -> str:
|
|
100
|
+
return "function"
|
|
101
|
+
|
|
102
|
+
@property
|
|
103
|
+
def dimensions(self) -> int:
|
|
104
|
+
if self._embedding_dimensions is None:
|
|
105
|
+
raise ValueError("Embedding dimensions not yet determined. Call embed() first or provide dimensions.")
|
|
106
|
+
return self._embedding_dimensions
|
|
107
|
+
|
|
108
|
+
@property
|
|
109
|
+
def has_embedding(self) -> bool:
|
|
110
|
+
"""Check if embedding function is configured."""
|
|
111
|
+
return self._embed_fn is not None
|
|
112
|
+
|
|
113
|
+
@property
|
|
114
|
+
def has_llm(self) -> bool:
|
|
115
|
+
"""Check if LLM generation function is configured."""
|
|
116
|
+
return self._generate_fn is not None
|
|
117
|
+
|
|
118
|
+
def is_available(self) -> bool:
|
|
119
|
+
"""Check if the provider has at least one function configured."""
|
|
120
|
+
return self._embed_fn is not None or self._generate_fn is not None
|
|
121
|
+
|
|
122
|
+
def embed(self, text: str, model: str = "") -> EmbeddingResponse:
|
|
123
|
+
"""
|
|
124
|
+
Generate embedding using the provided function.
|
|
125
|
+
|
|
126
|
+
Parameters
|
|
127
|
+
----------
|
|
128
|
+
text : str
|
|
129
|
+
Text to embed.
|
|
130
|
+
model : str
|
|
131
|
+
Model identifier (ignored, kept for interface compatibility).
|
|
132
|
+
|
|
133
|
+
Returns
|
|
134
|
+
-------
|
|
135
|
+
EmbeddingResponse
|
|
136
|
+
The embedding response.
|
|
137
|
+
|
|
138
|
+
Raises
|
|
139
|
+
------
|
|
140
|
+
ValueError
|
|
141
|
+
If no embedding function was provided.
|
|
142
|
+
"""
|
|
143
|
+
if self._embed_fn is None:
|
|
144
|
+
raise ValueError("No embedding function configured. Provide embed_fn to use embeddings.")
|
|
145
|
+
|
|
146
|
+
raw_embedding = self._embed_fn(text)
|
|
147
|
+
|
|
148
|
+
# Convert to tuple for immutability
|
|
149
|
+
embedding_tuple: tuple[float, ...] = tuple(raw_embedding)
|
|
150
|
+
|
|
151
|
+
# Auto-detect dimensions on first call
|
|
152
|
+
if self._embedding_dimensions is None:
|
|
153
|
+
self._embedding_dimensions = len(embedding_tuple)
|
|
154
|
+
|
|
155
|
+
return EmbeddingResponse(
|
|
156
|
+
embedding=embedding_tuple,
|
|
157
|
+
model=model or "function",
|
|
158
|
+
provider=self.provider_name,
|
|
159
|
+
dimensions=len(embedding_tuple),
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
def embed_batch(self, texts: list[str], model: str = "") -> list[EmbeddingResponse]:
|
|
163
|
+
"""
|
|
164
|
+
Generate embeddings for multiple texts.
|
|
165
|
+
|
|
166
|
+
Iterates over embed_fn for each text. For providers with native batch
|
|
167
|
+
support, users should implement their own BatchEmbeddingProvider.
|
|
168
|
+
|
|
169
|
+
Parameters
|
|
170
|
+
----------
|
|
171
|
+
texts : list[str]
|
|
172
|
+
Texts to embed.
|
|
173
|
+
model : str
|
|
174
|
+
Model identifier (ignored).
|
|
175
|
+
|
|
176
|
+
Returns
|
|
177
|
+
-------
|
|
178
|
+
list[EmbeddingResponse]
|
|
179
|
+
List of embedding responses.
|
|
180
|
+
"""
|
|
181
|
+
return [self.embed(text, model) for text in texts]
|
|
182
|
+
|
|
183
|
+
def generate(
|
|
184
|
+
self,
|
|
185
|
+
prompt: str,
|
|
186
|
+
model: str = "",
|
|
187
|
+
system_prompt: str | None = None,
|
|
188
|
+
temperature: float = 0.7,
|
|
189
|
+
max_tokens: int | None = None,
|
|
190
|
+
) -> LLMResponse:
|
|
191
|
+
"""
|
|
192
|
+
Generate text using the provided function.
|
|
193
|
+
|
|
194
|
+
Parameters
|
|
195
|
+
----------
|
|
196
|
+
prompt : str
|
|
197
|
+
The user prompt.
|
|
198
|
+
model : str
|
|
199
|
+
Model identifier (ignored, kept for interface compatibility).
|
|
200
|
+
system_prompt : str, optional
|
|
201
|
+
System prompt for context.
|
|
202
|
+
temperature : float
|
|
203
|
+
Sampling temperature (ignored if function doesn't support it).
|
|
204
|
+
max_tokens : int, optional
|
|
205
|
+
Maximum tokens (ignored if function doesn't support it).
|
|
206
|
+
|
|
207
|
+
Returns
|
|
208
|
+
-------
|
|
209
|
+
LLMResponse
|
|
210
|
+
The generated response.
|
|
211
|
+
|
|
212
|
+
Raises
|
|
213
|
+
------
|
|
214
|
+
NotImplementedError
|
|
215
|
+
If no generation function was provided.
|
|
216
|
+
"""
|
|
217
|
+
if self._generate_fn is None:
|
|
218
|
+
raise NotImplementedError(
|
|
219
|
+
"No LLM configured. Provide generate_fn or a provider with LLM support "
|
|
220
|
+
"to use ask(), generate(), or generate_code() methods."
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
# Call with appropriate signature
|
|
224
|
+
if self._generate_fn_signature == 1:
|
|
225
|
+
# Single argument - prepend system prompt to prompt if provided
|
|
226
|
+
full_prompt = f"{system_prompt}\n\n{prompt}" if system_prompt else prompt
|
|
227
|
+
text = self._generate_fn(full_prompt)
|
|
228
|
+
else:
|
|
229
|
+
# Two arguments - pass separately
|
|
230
|
+
text = self._generate_fn(prompt, system_prompt)
|
|
231
|
+
|
|
232
|
+
return LLMResponse(
|
|
233
|
+
text=text,
|
|
234
|
+
model=model or "function",
|
|
235
|
+
provider=self.provider_name,
|
|
236
|
+
usage=None,
|
|
237
|
+
)
|
ragit/providers/ollama.py
CHANGED
|
@@ -158,7 +158,7 @@ class OllamaProvider(BaseLLMProvider, BaseEmbeddingProvider):
|
|
|
158
158
|
f"{self.base_url}/api/tags",
|
|
159
159
|
timeout=5,
|
|
160
160
|
)
|
|
161
|
-
return response.status_code == 200
|
|
161
|
+
return bool(response.status_code == 200)
|
|
162
162
|
except requests.RequestException:
|
|
163
163
|
return False
|
|
164
164
|
|
|
@@ -0,0 +1,225 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Copyright RODMENA LIMITED 2025
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
"""
|
|
6
|
+
SentenceTransformers provider for offline embedding.
|
|
7
|
+
|
|
8
|
+
This module provides embedding capabilities using the sentence-transformers
|
|
9
|
+
library, enabling fully offline RAG pipelines without API dependencies.
|
|
10
|
+
|
|
11
|
+
Requires: pip install ragit[transformers]
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from typing import TYPE_CHECKING
|
|
15
|
+
|
|
16
|
+
from ragit.providers.base import (
|
|
17
|
+
BaseEmbeddingProvider,
|
|
18
|
+
EmbeddingResponse,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
if TYPE_CHECKING:
|
|
22
|
+
from sentence_transformers import SentenceTransformer
|
|
23
|
+
|
|
24
|
+
# Lazy import flag
|
|
25
|
+
_sentence_transformers_available: bool | None = None
|
|
26
|
+
_model_cache: dict[str, "SentenceTransformer"] = {}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _check_sentence_transformers() -> bool:
|
|
30
|
+
"""Check if sentence-transformers is available."""
|
|
31
|
+
global _sentence_transformers_available
|
|
32
|
+
if _sentence_transformers_available is None:
|
|
33
|
+
try:
|
|
34
|
+
from sentence_transformers import SentenceTransformer # noqa: F401
|
|
35
|
+
|
|
36
|
+
_sentence_transformers_available = True
|
|
37
|
+
except ImportError:
|
|
38
|
+
_sentence_transformers_available = False
|
|
39
|
+
return _sentence_transformers_available
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _get_model(model_name: str, device: str | None = None) -> "SentenceTransformer":
|
|
43
|
+
"""Get or create a cached SentenceTransformer model."""
|
|
44
|
+
cache_key = f"{model_name}:{device or 'auto'}"
|
|
45
|
+
if cache_key not in _model_cache:
|
|
46
|
+
from sentence_transformers import SentenceTransformer
|
|
47
|
+
|
|
48
|
+
_model_cache[cache_key] = SentenceTransformer(model_name, device=device)
|
|
49
|
+
return _model_cache[cache_key]
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
class SentenceTransformersProvider(BaseEmbeddingProvider):
|
|
53
|
+
"""
|
|
54
|
+
Embedding provider using sentence-transformers for offline operation.
|
|
55
|
+
|
|
56
|
+
This provider uses the sentence-transformers library to generate embeddings
|
|
57
|
+
locally without requiring any API calls. It's ideal for:
|
|
58
|
+
- Offline/air-gapped environments
|
|
59
|
+
- Development and testing
|
|
60
|
+
- Cost-sensitive applications
|
|
61
|
+
- Privacy-sensitive use cases
|
|
62
|
+
|
|
63
|
+
Parameters
|
|
64
|
+
----------
|
|
65
|
+
model_name : str
|
|
66
|
+
HuggingFace model name. Default: "all-MiniLM-L6-v2" (fast, 384 dims).
|
|
67
|
+
Other popular options:
|
|
68
|
+
- "all-mpnet-base-v2" (768 dims, higher quality)
|
|
69
|
+
- "paraphrase-MiniLM-L6-v2" (384 dims)
|
|
70
|
+
- "multi-qa-MiniLM-L6-cos-v1" (384 dims, optimized for QA)
|
|
71
|
+
device : str, optional
|
|
72
|
+
Device to run on ("cpu", "cuda", "mps"). Auto-detected if None.
|
|
73
|
+
|
|
74
|
+
Examples
|
|
75
|
+
--------
|
|
76
|
+
>>> # Basic usage
|
|
77
|
+
>>> from ragit.providers import SentenceTransformersProvider
|
|
78
|
+
>>> provider = SentenceTransformersProvider()
|
|
79
|
+
>>>
|
|
80
|
+
>>> # With RAGAssistant (retrieval-only)
|
|
81
|
+
>>> assistant = RAGAssistant(docs, provider=provider)
|
|
82
|
+
>>> results = assistant.retrieve("query")
|
|
83
|
+
>>>
|
|
84
|
+
>>> # Custom model
|
|
85
|
+
>>> provider = SentenceTransformersProvider(model_name="all-mpnet-base-v2")
|
|
86
|
+
|
|
87
|
+
Raises
|
|
88
|
+
------
|
|
89
|
+
ImportError
|
|
90
|
+
If sentence-transformers is not installed.
|
|
91
|
+
|
|
92
|
+
Note
|
|
93
|
+
----
|
|
94
|
+
Install with: pip install ragit[transformers]
|
|
95
|
+
"""
|
|
96
|
+
|
|
97
|
+
# Known model dimensions for common models
|
|
98
|
+
MODEL_DIMENSIONS: dict[str, int] = {
|
|
99
|
+
"all-MiniLM-L6-v2": 384,
|
|
100
|
+
"all-mpnet-base-v2": 768,
|
|
101
|
+
"paraphrase-MiniLM-L6-v2": 384,
|
|
102
|
+
"multi-qa-MiniLM-L6-cos-v1": 384,
|
|
103
|
+
"all-distilroberta-v1": 768,
|
|
104
|
+
"paraphrase-multilingual-MiniLM-L12-v2": 384,
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
def __init__(
|
|
108
|
+
self,
|
|
109
|
+
model_name: str = "all-MiniLM-L6-v2",
|
|
110
|
+
device: str | None = None,
|
|
111
|
+
) -> None:
|
|
112
|
+
if not _check_sentence_transformers():
|
|
113
|
+
raise ImportError(
|
|
114
|
+
"sentence-transformers is required for SentenceTransformersProvider. "
|
|
115
|
+
"Install with: pip install ragit[transformers]"
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
self._model_name = model_name
|
|
119
|
+
self._device = device
|
|
120
|
+
self._model: SentenceTransformer | None = None # Lazy loaded
|
|
121
|
+
self._dimensions: int | None = self.MODEL_DIMENSIONS.get(model_name)
|
|
122
|
+
|
|
123
|
+
def _ensure_model(self) -> "SentenceTransformer":
|
|
124
|
+
"""Ensure model is loaded (lazy loading)."""
|
|
125
|
+
if self._model is None:
|
|
126
|
+
model = _get_model(self._model_name, self._device)
|
|
127
|
+
self._model = model
|
|
128
|
+
# Update dimensions from actual model
|
|
129
|
+
self._dimensions = model.get_sentence_embedding_dimension()
|
|
130
|
+
return self._model
|
|
131
|
+
|
|
132
|
+
@property
|
|
133
|
+
def provider_name(self) -> str:
|
|
134
|
+
return "sentence_transformers"
|
|
135
|
+
|
|
136
|
+
@property
|
|
137
|
+
def dimensions(self) -> int:
|
|
138
|
+
if self._dimensions is None:
|
|
139
|
+
# Load model to get dimensions
|
|
140
|
+
self._ensure_model()
|
|
141
|
+
return self._dimensions or 384 # Fallback
|
|
142
|
+
|
|
143
|
+
@property
|
|
144
|
+
def model_name(self) -> str:
|
|
145
|
+
"""Return the model name being used."""
|
|
146
|
+
return self._model_name
|
|
147
|
+
|
|
148
|
+
def is_available(self) -> bool:
|
|
149
|
+
"""Check if sentence-transformers is installed and model can be loaded."""
|
|
150
|
+
if not _check_sentence_transformers():
|
|
151
|
+
return False
|
|
152
|
+
try:
|
|
153
|
+
self._ensure_model()
|
|
154
|
+
return True
|
|
155
|
+
except Exception:
|
|
156
|
+
return False
|
|
157
|
+
|
|
158
|
+
def embed(self, text: str, model: str = "") -> EmbeddingResponse:
|
|
159
|
+
"""
|
|
160
|
+
Generate embedding for text.
|
|
161
|
+
|
|
162
|
+
Parameters
|
|
163
|
+
----------
|
|
164
|
+
text : str
|
|
165
|
+
Text to embed.
|
|
166
|
+
model : str
|
|
167
|
+
Model identifier (ignored, uses model from constructor).
|
|
168
|
+
|
|
169
|
+
Returns
|
|
170
|
+
-------
|
|
171
|
+
EmbeddingResponse
|
|
172
|
+
The embedding response.
|
|
173
|
+
"""
|
|
174
|
+
model_instance = self._ensure_model()
|
|
175
|
+
embedding = model_instance.encode(text, convert_to_numpy=True)
|
|
176
|
+
|
|
177
|
+
# Convert to tuple
|
|
178
|
+
embedding_tuple = tuple(float(x) for x in embedding)
|
|
179
|
+
|
|
180
|
+
return EmbeddingResponse(
|
|
181
|
+
embedding=embedding_tuple,
|
|
182
|
+
model=self._model_name,
|
|
183
|
+
provider=self.provider_name,
|
|
184
|
+
dimensions=len(embedding_tuple),
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
def embed_batch(self, texts: list[str], model: str = "") -> list[EmbeddingResponse]:
|
|
188
|
+
"""
|
|
189
|
+
Generate embeddings for multiple texts efficiently.
|
|
190
|
+
|
|
191
|
+
Uses batch encoding for better performance.
|
|
192
|
+
|
|
193
|
+
Parameters
|
|
194
|
+
----------
|
|
195
|
+
texts : list[str]
|
|
196
|
+
Texts to embed.
|
|
197
|
+
model : str
|
|
198
|
+
Model identifier (ignored).
|
|
199
|
+
|
|
200
|
+
Returns
|
|
201
|
+
-------
|
|
202
|
+
list[EmbeddingResponse]
|
|
203
|
+
List of embedding responses.
|
|
204
|
+
"""
|
|
205
|
+
if not texts:
|
|
206
|
+
return []
|
|
207
|
+
|
|
208
|
+
model_instance = self._ensure_model()
|
|
209
|
+
|
|
210
|
+
# Batch encode for efficiency
|
|
211
|
+
embeddings = model_instance.encode(texts, convert_to_numpy=True, show_progress_bar=False)
|
|
212
|
+
|
|
213
|
+
results = []
|
|
214
|
+
for embedding in embeddings:
|
|
215
|
+
embedding_tuple = tuple(float(x) for x in embedding)
|
|
216
|
+
results.append(
|
|
217
|
+
EmbeddingResponse(
|
|
218
|
+
embedding=embedding_tuple,
|
|
219
|
+
model=self._model_name,
|
|
220
|
+
provider=self.provider_name,
|
|
221
|
+
dimensions=len(embedding_tuple),
|
|
222
|
+
)
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
return results
|
ragit/version.py
CHANGED
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: ragit
|
|
3
|
+
Version: 0.8.1
|
|
4
|
+
Summary: Automatic RAG Pattern Optimization Engine
|
|
5
|
+
Author: RODMENA LIMITED
|
|
6
|
+
Maintainer-email: RODMENA LIMITED <info@rodmena.co.uk>
|
|
7
|
+
License-Expression: Apache-2.0
|
|
8
|
+
Project-URL: Homepage, https://github.com/rodmena-limited/ragit
|
|
9
|
+
Project-URL: Repository, https://github.com/rodmena-limited/ragit
|
|
10
|
+
Project-URL: Issues, https://github.com/rodmena-limited/ragit/issues
|
|
11
|
+
Keywords: AI,RAG,LLM,GenAI,Optimization,Ollama
|
|
12
|
+
Classifier: Development Status :: 2 - Pre-Alpha
|
|
13
|
+
Classifier: Natural Language :: English
|
|
14
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
17
|
+
Classifier: Operating System :: MacOS :: MacOS X
|
|
18
|
+
Classifier: Operating System :: POSIX :: Linux
|
|
19
|
+
Requires-Python: <3.14,>=3.12
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
License-File: LICENSE
|
|
22
|
+
Requires-Dist: requests>=2.31.0
|
|
23
|
+
Requires-Dist: numpy>=1.26.0
|
|
24
|
+
Requires-Dist: pandas>=2.2.0
|
|
25
|
+
Requires-Dist: pydantic>=2.0.0
|
|
26
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
27
|
+
Requires-Dist: scikit-learn>=1.5.0
|
|
28
|
+
Requires-Dist: tqdm>=4.66.0
|
|
29
|
+
Requires-Dist: trio>=0.24.0
|
|
30
|
+
Requires-Dist: httpx>=0.27.0
|
|
31
|
+
Provides-Extra: dev
|
|
32
|
+
Requires-Dist: ragit[test]; extra == "dev"
|
|
33
|
+
Requires-Dist: pytest; extra == "dev"
|
|
34
|
+
Requires-Dist: pytest-cov; extra == "dev"
|
|
35
|
+
Requires-Dist: issuedb[web]; extra == "dev"
|
|
36
|
+
Requires-Dist: ruff; extra == "dev"
|
|
37
|
+
Requires-Dist: mypy; extra == "dev"
|
|
38
|
+
Provides-Extra: test
|
|
39
|
+
Requires-Dist: pytest; extra == "test"
|
|
40
|
+
Requires-Dist: pytest-cov; extra == "test"
|
|
41
|
+
Requires-Dist: pytest-mock; extra == "test"
|
|
42
|
+
Provides-Extra: transformers
|
|
43
|
+
Requires-Dist: sentence-transformers>=2.2.0; extra == "transformers"
|
|
44
|
+
Provides-Extra: docs
|
|
45
|
+
Requires-Dist: sphinx>=7.0; extra == "docs"
|
|
46
|
+
Requires-Dist: sphinx-rtd-theme>=2.0; extra == "docs"
|
|
47
|
+
Requires-Dist: sphinx-copybutton>=0.5; extra == "docs"
|
|
48
|
+
Dynamic: license-file
|
|
49
|
+
|
|
50
|
+
# ragit
|
|
51
|
+
|
|
52
|
+
RAG toolkit for Python. Document loading, chunking, vector search, LLM integration.
|
|
53
|
+
|
|
54
|
+
## Installation
|
|
55
|
+
|
|
56
|
+
```bash
|
|
57
|
+
pip install ragit
|
|
58
|
+
|
|
59
|
+
# For offline embedding
|
|
60
|
+
pip install ragit[transformers]
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
## Quick Start
|
|
64
|
+
|
|
65
|
+
You must provide an embedding source: custom function, SentenceTransformers, or any provider.
|
|
66
|
+
|
|
67
|
+
### Custom Embedding Function
|
|
68
|
+
|
|
69
|
+
```python
|
|
70
|
+
from ragit import RAGAssistant
|
|
71
|
+
|
|
72
|
+
def my_embed(text: str) -> list[float]:
|
|
73
|
+
# Use any embedding API: OpenAI, Cohere, HuggingFace, etc.
|
|
74
|
+
return embedding_vector
|
|
75
|
+
|
|
76
|
+
assistant = RAGAssistant("docs/", embed_fn=my_embed)
|
|
77
|
+
results = assistant.retrieve("search query")
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
### With LLM for Q&A
|
|
81
|
+
|
|
82
|
+
```python
|
|
83
|
+
def my_embed(text: str) -> list[float]:
|
|
84
|
+
return embedding_vector
|
|
85
|
+
|
|
86
|
+
def my_generate(prompt: str, system_prompt: str = "") -> str:
|
|
87
|
+
return llm_response
|
|
88
|
+
|
|
89
|
+
assistant = RAGAssistant("docs/", embed_fn=my_embed, generate_fn=my_generate)
|
|
90
|
+
answer = assistant.ask("How does authentication work?")
|
|
91
|
+
```
|
|
92
|
+
|
|
93
|
+
### Offline Embedding (SentenceTransformers)
|
|
94
|
+
|
|
95
|
+
Models are downloaded automatically on first use (~90MB for default model).
|
|
96
|
+
|
|
97
|
+
```python
|
|
98
|
+
from ragit import RAGAssistant
|
|
99
|
+
from ragit.providers import SentenceTransformersProvider
|
|
100
|
+
|
|
101
|
+
# Uses all-MiniLM-L6-v2 by default
|
|
102
|
+
assistant = RAGAssistant("docs/", provider=SentenceTransformersProvider())
|
|
103
|
+
|
|
104
|
+
# Or specify a model
|
|
105
|
+
assistant = RAGAssistant(
|
|
106
|
+
"docs/",
|
|
107
|
+
provider=SentenceTransformersProvider(model_name="all-mpnet-base-v2")
|
|
108
|
+
)
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
Available models: `all-MiniLM-L6-v2` (384d), `all-mpnet-base-v2` (768d), `paraphrase-MiniLM-L6-v2` (384d)
|
|
112
|
+
|
|
113
|
+
## Core API
|
|
114
|
+
|
|
115
|
+
```python
|
|
116
|
+
assistant = RAGAssistant(
|
|
117
|
+
documents, # Path, list of Documents, or list of Chunks
|
|
118
|
+
embed_fn=..., # Embedding function: (str) -> list[float]
|
|
119
|
+
generate_fn=..., # LLM function: (prompt, system_prompt) -> str
|
|
120
|
+
provider=..., # Or use a provider instead of functions
|
|
121
|
+
chunk_size=512,
|
|
122
|
+
chunk_overlap=50
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
results = assistant.retrieve(query, top_k=3) # [(Chunk, score), ...]
|
|
126
|
+
context = assistant.get_context(query, top_k=3) # Formatted string
|
|
127
|
+
answer = assistant.ask(question, top_k=3) # Requires generate_fn/LLM
|
|
128
|
+
code = assistant.generate_code(request) # Requires generate_fn/LLM
|
|
129
|
+
```
|
|
130
|
+
|
|
131
|
+
## Document Loading
|
|
132
|
+
|
|
133
|
+
```python
|
|
134
|
+
from ragit import load_text, load_directory, chunk_text
|
|
135
|
+
|
|
136
|
+
doc = load_text("file.md")
|
|
137
|
+
docs = load_directory("docs/", "*.md")
|
|
138
|
+
chunks = chunk_text(text, chunk_size=512, chunk_overlap=50, doc_id="id")
|
|
139
|
+
```
|
|
140
|
+
|
|
141
|
+
## Hyperparameter Optimization
|
|
142
|
+
|
|
143
|
+
```python
|
|
144
|
+
from ragit import RagitExperiment, Document, BenchmarkQuestion
|
|
145
|
+
|
|
146
|
+
def my_embed(text: str) -> list[float]:
|
|
147
|
+
return embedding_vector
|
|
148
|
+
|
|
149
|
+
def my_generate(prompt: str, system_prompt: str = "") -> str:
|
|
150
|
+
return llm_response
|
|
151
|
+
|
|
152
|
+
docs = [Document(id="1", content="...")]
|
|
153
|
+
benchmark = [BenchmarkQuestion(question="...", ground_truth="...")]
|
|
154
|
+
|
|
155
|
+
experiment = RagitExperiment(
|
|
156
|
+
docs, benchmark,
|
|
157
|
+
embed_fn=my_embed,
|
|
158
|
+
generate_fn=my_generate
|
|
159
|
+
)
|
|
160
|
+
results = experiment.run(max_configs=20)
|
|
161
|
+
print(results[0]) # Best config
|
|
162
|
+
```
|
|
163
|
+
|
|
164
|
+
## License
|
|
165
|
+
|
|
166
|
+
Apache-2.0 - RODMENA LIMITED
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
ragit/__init__.py,sha256=JUkL7ivgr4o4nZak-96P1C-pzKdNuN3Tl0X0WvpeXBU,3142
|
|
2
|
+
ragit/assistant.py,sha256=FW8LVqEOA1nemTMdTZhb79aONeHsQM8tHADxCQ47p1Y,14705
|
|
3
|
+
ragit/config.py,sha256=7XnueNO4h22ibeWd1akHnfVoGSD8xE5vuOCMYeQOOU4,1898
|
|
4
|
+
ragit/loaders.py,sha256=keusuPzXPBiLDVj4hKfPCcge-rm-cnzNRk50fGXvTJs,5571
|
|
5
|
+
ragit/version.py,sha256=_qpX4vMVMSqb-_4jdv6EZJ3tkvFsyu_Pj00vRC6T2sg,97
|
|
6
|
+
ragit/core/__init__.py,sha256=j53PFfoSMXwSbK1rRHpMbo8mX2i4R1LJ5kvTxBd7-0w,100
|
|
7
|
+
ragit/core/experiment/__init__.py,sha256=4vAPOOYlY5Dcr2gOolyhBSPGIUxZKwEkgQffxS9BodA,452
|
|
8
|
+
ragit/core/experiment/experiment.py,sha256=WQZWRLbLPuGpG0tpCZCEz3sKgSv4CNimmABbOLR_oKs,19314
|
|
9
|
+
ragit/core/experiment/results.py,sha256=KHpN3YSLJ83_JUfIMccRPS-q7LEt0S9p8ehDRawk_4k,3487
|
|
10
|
+
ragit/providers/__init__.py,sha256=tKWjUV31OZprD8k9aUUidtDMg7C_dWBXN7igtxeB8Ec,1339
|
|
11
|
+
ragit/providers/base.py,sha256=MJ8mVeXuGWhkX2XGTbkWIY3cVoTOPr4h5XBXw8rAX2Q,3434
|
|
12
|
+
ragit/providers/function_adapter.py,sha256=A-TQhBgBWbuO_w1sy795Dxep1FOCBpAlWpXCKVQD8rc,7778
|
|
13
|
+
ragit/providers/ollama.py,sha256=YJH5a9nQHnP0NrIK7G9PqjV5A53f9JxmEJDAJ6d297M,15410
|
|
14
|
+
ragit/providers/sentence_transformers.py,sha256=tTkd4HpE1MyfFJAwur-a7w-GlBxe93HlyM_dRffDrdY,6996
|
|
15
|
+
ragit/utils/__init__.py,sha256=-UsE5oJSnmEnBDswl-ph0A09Iu8yKNbPhd1-_7Lcb8Y,3051
|
|
16
|
+
ragit-0.8.1.dist-info/licenses/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
|
|
17
|
+
ragit-0.8.1.dist-info/METADATA,sha256=OaOeM-ujuMlkfjiNcXRUC6JpIApFgkvP536nHsaLW0g,4888
|
|
18
|
+
ragit-0.8.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
19
|
+
ragit-0.8.1.dist-info/top_level.txt,sha256=pkPbG7yrw61wt9_y_xcLE2vq2a55fzockASD0yq0g4s,6
|
|
20
|
+
ragit-0.8.1.dist-info/RECORD,,
|