ragit 0.7__py3-none-any.whl → 0.7.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ragit/__init__.py +89 -2
- ragit/assistant.py +359 -0
- ragit/config.py +51 -0
- ragit/core/__init__.py +5 -0
- ragit/core/experiment/__init__.py +22 -0
- ragit/core/experiment/experiment.py +507 -0
- ragit/core/experiment/results.py +131 -0
- ragit/loaders.py +219 -0
- ragit/providers/__init__.py +20 -0
- ragit/providers/base.py +147 -0
- ragit/providers/ollama.py +284 -0
- ragit/utils/__init__.py +105 -0
- ragit/version.py +5 -0
- ragit-0.7.2.dist-info/METADATA +480 -0
- ragit-0.7.2.dist-info/RECORD +18 -0
- {ragit-0.7.dist-info → ragit-0.7.2.dist-info}/WHEEL +1 -1
- ragit-0.7.2.dist-info/licenses/LICENSE +201 -0
- ragit/main.py +0 -354
- ragit-0.7.dist-info/METADATA +0 -170
- ragit-0.7.dist-info/RECORD +0 -6
- {ragit-0.7.dist-info → ragit-0.7.2.dist-info}/top_level.txt +0 -0
ragit/__init__.py
CHANGED
|
@@ -1,2 +1,89 @@
|
|
|
1
|
-
#
|
|
2
|
-
|
|
1
|
+
#
|
|
2
|
+
# Copyright RODMENA LIMITED 2025
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
"""
|
|
6
|
+
Ragit - RAG toolkit for document Q&A and hyperparameter optimization.
|
|
7
|
+
|
|
8
|
+
Quick Start
|
|
9
|
+
-----------
|
|
10
|
+
>>> from ragit import RAGAssistant
|
|
11
|
+
>>>
|
|
12
|
+
>>> # Load docs and ask questions
|
|
13
|
+
>>> assistant = RAGAssistant("docs/")
|
|
14
|
+
>>> answer = assistant.ask("How do I create a REST API?")
|
|
15
|
+
>>> print(answer)
|
|
16
|
+
>>>
|
|
17
|
+
>>> # Generate code
|
|
18
|
+
>>> code = assistant.generate_code("create a user authentication API")
|
|
19
|
+
>>> print(code)
|
|
20
|
+
|
|
21
|
+
Optimization
|
|
22
|
+
------------
|
|
23
|
+
>>> from ragit import RagitExperiment, Document, BenchmarkQuestion
|
|
24
|
+
>>>
|
|
25
|
+
>>> docs = [Document(id="doc1", content="...")]
|
|
26
|
+
>>> benchmark = [BenchmarkQuestion(question="What is X?", ground_truth="...")]
|
|
27
|
+
>>>
|
|
28
|
+
>>> experiment = RagitExperiment(docs, benchmark)
|
|
29
|
+
>>> results = experiment.run()
|
|
30
|
+
>>> print(results[0]) # Best configuration
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
import logging
|
|
34
|
+
import os
|
|
35
|
+
|
|
36
|
+
from ragit.version import __version__
|
|
37
|
+
|
|
38
|
+
# Set up logging
|
|
39
|
+
logger = logging.getLogger("ragit")
|
|
40
|
+
logger.setLevel(os.getenv("RAGIT_LOG_LEVEL", "INFO"))
|
|
41
|
+
|
|
42
|
+
if not logger.handlers:
|
|
43
|
+
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
|
44
|
+
handler = logging.StreamHandler()
|
|
45
|
+
handler.setFormatter(formatter)
|
|
46
|
+
logger.addHandler(handler)
|
|
47
|
+
|
|
48
|
+
# Public API (imports after logging setup)
|
|
49
|
+
from ragit.assistant import RAGAssistant # noqa: E402
|
|
50
|
+
from ragit.core.experiment.experiment import ( # noqa: E402
|
|
51
|
+
BenchmarkQuestion,
|
|
52
|
+
Chunk,
|
|
53
|
+
Document,
|
|
54
|
+
RAGConfig,
|
|
55
|
+
RagitExperiment,
|
|
56
|
+
)
|
|
57
|
+
from ragit.core.experiment.results import EvaluationResult, ExperimentResults # noqa: E402
|
|
58
|
+
from ragit.loaders import ( # noqa: E402
|
|
59
|
+
chunk_by_separator,
|
|
60
|
+
chunk_document,
|
|
61
|
+
chunk_rst_sections,
|
|
62
|
+
chunk_text,
|
|
63
|
+
load_directory,
|
|
64
|
+
load_text,
|
|
65
|
+
)
|
|
66
|
+
from ragit.providers import OllamaProvider # noqa: E402
|
|
67
|
+
|
|
68
|
+
__all__ = [
|
|
69
|
+
"__version__",
|
|
70
|
+
# High-level API
|
|
71
|
+
"RAGAssistant",
|
|
72
|
+
# Document loading
|
|
73
|
+
"load_text",
|
|
74
|
+
"load_directory",
|
|
75
|
+
"chunk_text",
|
|
76
|
+
"chunk_document",
|
|
77
|
+
"chunk_by_separator",
|
|
78
|
+
"chunk_rst_sections",
|
|
79
|
+
# Core classes
|
|
80
|
+
"Document",
|
|
81
|
+
"Chunk",
|
|
82
|
+
"OllamaProvider",
|
|
83
|
+
# Optimization
|
|
84
|
+
"RagitExperiment",
|
|
85
|
+
"BenchmarkQuestion",
|
|
86
|
+
"RAGConfig",
|
|
87
|
+
"EvaluationResult",
|
|
88
|
+
"ExperimentResults",
|
|
89
|
+
]
|
ragit/assistant.py
ADDED
|
@@ -0,0 +1,359 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Copyright RODMENA LIMITED 2025
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
"""
|
|
6
|
+
High-level RAG Assistant for document Q&A and code generation.
|
|
7
|
+
|
|
8
|
+
Provides a simple interface for RAG-based tasks.
|
|
9
|
+
|
|
10
|
+
Note: This class is NOT thread-safe. Do not share instances across threads.
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import TYPE_CHECKING
|
|
15
|
+
|
|
16
|
+
import numpy as np
|
|
17
|
+
from numpy.typing import NDArray
|
|
18
|
+
|
|
19
|
+
from ragit.config import config
|
|
20
|
+
from ragit.core.experiment.experiment import Chunk, Document
|
|
21
|
+
from ragit.loaders import chunk_document, chunk_rst_sections, load_directory, load_text
|
|
22
|
+
from ragit.providers import OllamaProvider
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from numpy.typing import NDArray
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class RAGAssistant:
|
|
29
|
+
"""
|
|
30
|
+
High-level RAG assistant for document Q&A and generation.
|
|
31
|
+
|
|
32
|
+
Handles document indexing, retrieval, and LLM generation in one simple API.
|
|
33
|
+
|
|
34
|
+
Parameters
|
|
35
|
+
----------
|
|
36
|
+
documents : list[Document] or str or Path
|
|
37
|
+
Documents to index. Can be:
|
|
38
|
+
- List of Document objects
|
|
39
|
+
- Path to a single file
|
|
40
|
+
- Path to a directory (will load all .txt, .md, .rst files)
|
|
41
|
+
provider : OllamaProvider, optional
|
|
42
|
+
LLM/embedding provider. Defaults to OllamaProvider().
|
|
43
|
+
embedding_model : str, optional
|
|
44
|
+
Embedding model name. Defaults to config.DEFAULT_EMBEDDING_MODEL.
|
|
45
|
+
llm_model : str, optional
|
|
46
|
+
LLM model name. Defaults to config.DEFAULT_LLM_MODEL.
|
|
47
|
+
chunk_size : int, optional
|
|
48
|
+
Chunk size for splitting documents (default: 512).
|
|
49
|
+
chunk_overlap : int, optional
|
|
50
|
+
Overlap between chunks (default: 50).
|
|
51
|
+
|
|
52
|
+
Note
|
|
53
|
+
----
|
|
54
|
+
This class is NOT thread-safe. Each thread should have its own instance.
|
|
55
|
+
|
|
56
|
+
Examples
|
|
57
|
+
--------
|
|
58
|
+
>>> # From documents
|
|
59
|
+
>>> assistant = RAGAssistant([Document(id="doc1", content="...")])
|
|
60
|
+
>>> answer = assistant.ask("What is X?")
|
|
61
|
+
|
|
62
|
+
>>> # From file
|
|
63
|
+
>>> assistant = RAGAssistant("docs/tutorial.rst")
|
|
64
|
+
>>> answer = assistant.ask("How do I do Y?")
|
|
65
|
+
|
|
66
|
+
>>> # From directory
|
|
67
|
+
>>> assistant = RAGAssistant("docs/")
|
|
68
|
+
>>> answer = assistant.ask("Explain Z")
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
def __init__(
|
|
72
|
+
self,
|
|
73
|
+
documents: list[Document] | str | Path,
|
|
74
|
+
provider: OllamaProvider | None = None,
|
|
75
|
+
embedding_model: str | None = None,
|
|
76
|
+
llm_model: str | None = None,
|
|
77
|
+
chunk_size: int = 512,
|
|
78
|
+
chunk_overlap: int = 50,
|
|
79
|
+
):
|
|
80
|
+
self.provider = provider or OllamaProvider()
|
|
81
|
+
self.embedding_model = embedding_model or config.DEFAULT_EMBEDDING_MODEL
|
|
82
|
+
self.llm_model = llm_model or config.DEFAULT_LLM_MODEL
|
|
83
|
+
self.chunk_size = chunk_size
|
|
84
|
+
self.chunk_overlap = chunk_overlap
|
|
85
|
+
|
|
86
|
+
# Load documents if path provided
|
|
87
|
+
self.documents = self._load_documents(documents)
|
|
88
|
+
|
|
89
|
+
# Index chunks - embeddings stored as pre-normalized numpy matrix for fast search
|
|
90
|
+
self._chunks: tuple[Chunk, ...] = ()
|
|
91
|
+
self._embedding_matrix: NDArray[np.float64] | None = None # Pre-normalized
|
|
92
|
+
self._build_index()
|
|
93
|
+
|
|
94
|
+
def _load_documents(self, documents: list[Document] | str | Path) -> list[Document]:
|
|
95
|
+
"""Load documents from various sources."""
|
|
96
|
+
if isinstance(documents, list):
|
|
97
|
+
return documents
|
|
98
|
+
|
|
99
|
+
path = Path(documents)
|
|
100
|
+
|
|
101
|
+
if path.is_file():
|
|
102
|
+
return [load_text(path)]
|
|
103
|
+
|
|
104
|
+
if path.is_dir():
|
|
105
|
+
docs: list[Document] = []
|
|
106
|
+
for pattern in ("*.txt", "*.md", "*.rst"):
|
|
107
|
+
docs.extend(load_directory(path, pattern))
|
|
108
|
+
return docs
|
|
109
|
+
|
|
110
|
+
raise ValueError(f"Invalid documents source: {documents}")
|
|
111
|
+
|
|
112
|
+
def _build_index(self) -> None:
|
|
113
|
+
"""Build vector index from documents using batch embedding."""
|
|
114
|
+
all_chunks: list[Chunk] = []
|
|
115
|
+
|
|
116
|
+
for doc in self.documents:
|
|
117
|
+
# Use RST section chunking for .rst files, otherwise regular chunking
|
|
118
|
+
if doc.metadata.get("filename", "").endswith(".rst"):
|
|
119
|
+
chunks = chunk_rst_sections(doc.content, doc.id)
|
|
120
|
+
else:
|
|
121
|
+
chunks = chunk_document(doc, self.chunk_size, self.chunk_overlap)
|
|
122
|
+
all_chunks.extend(chunks)
|
|
123
|
+
|
|
124
|
+
if not all_chunks:
|
|
125
|
+
self._chunks = ()
|
|
126
|
+
self._embedding_matrix = None
|
|
127
|
+
return
|
|
128
|
+
|
|
129
|
+
# Batch embed all chunks at once (single API call)
|
|
130
|
+
texts = [chunk.content for chunk in all_chunks]
|
|
131
|
+
responses = self.provider.embed_batch(texts, self.embedding_model)
|
|
132
|
+
|
|
133
|
+
# Build embedding matrix directly (skip storing in chunks to avoid duplication)
|
|
134
|
+
embedding_matrix = np.array([response.embedding for response in responses], dtype=np.float64)
|
|
135
|
+
|
|
136
|
+
# Pre-normalize for fast cosine similarity (normalize once, use many times)
|
|
137
|
+
norms = np.linalg.norm(embedding_matrix, axis=1, keepdims=True)
|
|
138
|
+
norms[norms == 0] = 1 # Avoid division by zero
|
|
139
|
+
|
|
140
|
+
# Store as immutable tuple and pre-normalized numpy matrix
|
|
141
|
+
self._chunks = tuple(all_chunks)
|
|
142
|
+
self._embedding_matrix = embedding_matrix / norms
|
|
143
|
+
|
|
144
|
+
def retrieve(self, query: str, top_k: int = 3) -> list[tuple[Chunk, float]]:
|
|
145
|
+
"""
|
|
146
|
+
Retrieve relevant chunks for a query.
|
|
147
|
+
|
|
148
|
+
Uses vectorized cosine similarity for fast search over all chunks.
|
|
149
|
+
|
|
150
|
+
Parameters
|
|
151
|
+
----------
|
|
152
|
+
query : str
|
|
153
|
+
Search query.
|
|
154
|
+
top_k : int
|
|
155
|
+
Number of chunks to return (default: 3).
|
|
156
|
+
|
|
157
|
+
Returns
|
|
158
|
+
-------
|
|
159
|
+
list[tuple[Chunk, float]]
|
|
160
|
+
List of (chunk, similarity_score) tuples, sorted by relevance.
|
|
161
|
+
|
|
162
|
+
Examples
|
|
163
|
+
--------
|
|
164
|
+
>>> results = assistant.retrieve("how to create a route")
|
|
165
|
+
>>> for chunk, score in results:
|
|
166
|
+
... print(f"{score:.2f}: {chunk.content[:100]}...")
|
|
167
|
+
"""
|
|
168
|
+
if not self._chunks or self._embedding_matrix is None:
|
|
169
|
+
return []
|
|
170
|
+
|
|
171
|
+
# Get query embedding and normalize
|
|
172
|
+
query_response = self.provider.embed(query, self.embedding_model)
|
|
173
|
+
query_vec = np.array(query_response.embedding, dtype=np.float64)
|
|
174
|
+
query_norm = np.linalg.norm(query_vec)
|
|
175
|
+
if query_norm == 0:
|
|
176
|
+
return []
|
|
177
|
+
query_normalized = query_vec / query_norm
|
|
178
|
+
|
|
179
|
+
# Fast cosine similarity: matrix is pre-normalized, just dot product
|
|
180
|
+
similarities = self._embedding_matrix @ query_normalized
|
|
181
|
+
|
|
182
|
+
# Get top_k indices using argpartition (faster than full sort for large arrays)
|
|
183
|
+
if len(similarities) <= top_k:
|
|
184
|
+
top_indices = np.argsort(similarities)[::-1]
|
|
185
|
+
else:
|
|
186
|
+
# Partial sort - only find top_k elements
|
|
187
|
+
top_indices = np.argpartition(similarities, -top_k)[-top_k:]
|
|
188
|
+
# Sort the top_k by score
|
|
189
|
+
top_indices = top_indices[np.argsort(similarities[top_indices])[::-1]]
|
|
190
|
+
|
|
191
|
+
return [(self._chunks[i], float(similarities[i])) for i in top_indices]
|
|
192
|
+
|
|
193
|
+
def get_context(self, query: str, top_k: int = 3) -> str:
|
|
194
|
+
"""
|
|
195
|
+
Get formatted context string from retrieved chunks.
|
|
196
|
+
|
|
197
|
+
Parameters
|
|
198
|
+
----------
|
|
199
|
+
query : str
|
|
200
|
+
Search query.
|
|
201
|
+
top_k : int
|
|
202
|
+
Number of chunks to include.
|
|
203
|
+
|
|
204
|
+
Returns
|
|
205
|
+
-------
|
|
206
|
+
str
|
|
207
|
+
Formatted context string.
|
|
208
|
+
"""
|
|
209
|
+
results = self.retrieve(query, top_k)
|
|
210
|
+
return "\n\n---\n\n".join(chunk.content for chunk, _ in results)
|
|
211
|
+
|
|
212
|
+
def generate(
|
|
213
|
+
self,
|
|
214
|
+
prompt: str,
|
|
215
|
+
system_prompt: str | None = None,
|
|
216
|
+
temperature: float = 0.7,
|
|
217
|
+
) -> str:
|
|
218
|
+
"""
|
|
219
|
+
Generate text using the LLM (without retrieval).
|
|
220
|
+
|
|
221
|
+
Parameters
|
|
222
|
+
----------
|
|
223
|
+
prompt : str
|
|
224
|
+
User prompt.
|
|
225
|
+
system_prompt : str, optional
|
|
226
|
+
System prompt for context.
|
|
227
|
+
temperature : float
|
|
228
|
+
Sampling temperature (default: 0.7).
|
|
229
|
+
|
|
230
|
+
Returns
|
|
231
|
+
-------
|
|
232
|
+
str
|
|
233
|
+
Generated text.
|
|
234
|
+
"""
|
|
235
|
+
response = self.provider.generate(
|
|
236
|
+
prompt=prompt,
|
|
237
|
+
model=self.llm_model,
|
|
238
|
+
system_prompt=system_prompt,
|
|
239
|
+
temperature=temperature,
|
|
240
|
+
)
|
|
241
|
+
return response.text
|
|
242
|
+
|
|
243
|
+
def ask(
|
|
244
|
+
self,
|
|
245
|
+
question: str,
|
|
246
|
+
system_prompt: str | None = None,
|
|
247
|
+
top_k: int = 3,
|
|
248
|
+
temperature: float = 0.7,
|
|
249
|
+
) -> str:
|
|
250
|
+
"""
|
|
251
|
+
Ask a question using RAG (retrieve + generate).
|
|
252
|
+
|
|
253
|
+
Parameters
|
|
254
|
+
----------
|
|
255
|
+
question : str
|
|
256
|
+
Question to answer.
|
|
257
|
+
system_prompt : str, optional
|
|
258
|
+
System prompt. Defaults to a helpful assistant prompt.
|
|
259
|
+
top_k : int
|
|
260
|
+
Number of context chunks to retrieve (default: 3).
|
|
261
|
+
temperature : float
|
|
262
|
+
Sampling temperature (default: 0.7).
|
|
263
|
+
|
|
264
|
+
Returns
|
|
265
|
+
-------
|
|
266
|
+
str
|
|
267
|
+
Generated answer.
|
|
268
|
+
|
|
269
|
+
Examples
|
|
270
|
+
--------
|
|
271
|
+
>>> answer = assistant.ask("How do I create a REST API?")
|
|
272
|
+
>>> print(answer)
|
|
273
|
+
"""
|
|
274
|
+
# Retrieve context
|
|
275
|
+
context = self.get_context(question, top_k)
|
|
276
|
+
|
|
277
|
+
# Default system prompt
|
|
278
|
+
if system_prompt is None:
|
|
279
|
+
system_prompt = """You are a helpful assistant. Answer questions based on the provided context.
|
|
280
|
+
If the context doesn't contain enough information, say so. Be concise and accurate."""
|
|
281
|
+
|
|
282
|
+
# Build prompt with context
|
|
283
|
+
prompt = f"""Context:
|
|
284
|
+
{context}
|
|
285
|
+
|
|
286
|
+
Question: {question}
|
|
287
|
+
|
|
288
|
+
Answer:"""
|
|
289
|
+
|
|
290
|
+
return self.generate(prompt, system_prompt, temperature)
|
|
291
|
+
|
|
292
|
+
def generate_code(
|
|
293
|
+
self,
|
|
294
|
+
request: str,
|
|
295
|
+
language: str = "python",
|
|
296
|
+
top_k: int = 3,
|
|
297
|
+
temperature: float = 0.7,
|
|
298
|
+
) -> str:
|
|
299
|
+
"""
|
|
300
|
+
Generate code based on documentation context.
|
|
301
|
+
|
|
302
|
+
Parameters
|
|
303
|
+
----------
|
|
304
|
+
request : str
|
|
305
|
+
Description of what code to generate.
|
|
306
|
+
language : str
|
|
307
|
+
Programming language (default: "python").
|
|
308
|
+
top_k : int
|
|
309
|
+
Number of context chunks to retrieve.
|
|
310
|
+
temperature : float
|
|
311
|
+
Sampling temperature.
|
|
312
|
+
|
|
313
|
+
Returns
|
|
314
|
+
-------
|
|
315
|
+
str
|
|
316
|
+
Generated code (cleaned, without markdown).
|
|
317
|
+
|
|
318
|
+
Examples
|
|
319
|
+
--------
|
|
320
|
+
>>> code = assistant.generate_code("create a REST API with user endpoints")
|
|
321
|
+
>>> print(code)
|
|
322
|
+
"""
|
|
323
|
+
context = self.get_context(request, top_k)
|
|
324
|
+
|
|
325
|
+
system_prompt = f"""You are an expert {language} developer. Generate ONLY valid {language} code.
|
|
326
|
+
|
|
327
|
+
RULES:
|
|
328
|
+
1. Output PURE CODE ONLY - no explanations, no markdown code blocks
|
|
329
|
+
2. Include necessary imports
|
|
330
|
+
3. Write clean, production-ready code
|
|
331
|
+
4. Add brief comments for clarity"""
|
|
332
|
+
|
|
333
|
+
prompt = f"""Documentation:
|
|
334
|
+
{context}
|
|
335
|
+
|
|
336
|
+
Request: {request}
|
|
337
|
+
|
|
338
|
+
Generate the {language} code:"""
|
|
339
|
+
|
|
340
|
+
response = self.generate(prompt, system_prompt, temperature)
|
|
341
|
+
|
|
342
|
+
# Clean up response - remove markdown if present
|
|
343
|
+
code = response
|
|
344
|
+
if f"```{language}" in code:
|
|
345
|
+
code = code.split(f"```{language}")[1].split("```")[0]
|
|
346
|
+
elif "```" in code:
|
|
347
|
+
code = code.split("```")[1].split("```")[0]
|
|
348
|
+
|
|
349
|
+
return code.strip()
|
|
350
|
+
|
|
351
|
+
@property
|
|
352
|
+
def num_chunks(self) -> int:
|
|
353
|
+
"""Return number of indexed chunks."""
|
|
354
|
+
return len(self._chunks)
|
|
355
|
+
|
|
356
|
+
@property
|
|
357
|
+
def num_documents(self) -> int:
|
|
358
|
+
"""Return number of loaded documents."""
|
|
359
|
+
return len(self.documents)
|
ragit/config.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Copyright RODMENA LIMITED 2025
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
"""
|
|
6
|
+
Ragit configuration management.
|
|
7
|
+
|
|
8
|
+
Loads configuration from environment variables and .env files.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import os
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
|
|
14
|
+
from dotenv import load_dotenv
|
|
15
|
+
|
|
16
|
+
# Load .env file from current working directory or project root
|
|
17
|
+
_env_path = Path.cwd() / ".env"
|
|
18
|
+
if _env_path.exists():
|
|
19
|
+
load_dotenv(_env_path)
|
|
20
|
+
else:
|
|
21
|
+
# Try to find .env in parent directories
|
|
22
|
+
for parent in Path.cwd().parents:
|
|
23
|
+
_env_path = parent / ".env"
|
|
24
|
+
if _env_path.exists():
|
|
25
|
+
load_dotenv(_env_path)
|
|
26
|
+
break
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Config:
|
|
30
|
+
"""Ragit configuration loaded from environment variables."""
|
|
31
|
+
|
|
32
|
+
# Ollama LLM API Configuration (can be cloud)
|
|
33
|
+
OLLAMA_BASE_URL: str = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
|
34
|
+
OLLAMA_API_KEY: str | None = os.getenv("OLLAMA_API_KEY")
|
|
35
|
+
OLLAMA_TIMEOUT: int = int(os.getenv("OLLAMA_TIMEOUT", "120"))
|
|
36
|
+
|
|
37
|
+
# Ollama Embedding API Configuration (cloud doesn't support embeddings, use local)
|
|
38
|
+
OLLAMA_EMBEDDING_URL: str = os.getenv(
|
|
39
|
+
"OLLAMA_EMBEDDING_URL", os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
# Default Models
|
|
43
|
+
DEFAULT_LLM_MODEL: str = os.getenv("RAGIT_DEFAULT_LLM_MODEL", "qwen3-vl:235b-instruct")
|
|
44
|
+
DEFAULT_EMBEDDING_MODEL: str = os.getenv("RAGIT_DEFAULT_EMBEDDING_MODEL", "mxbai-embed-large")
|
|
45
|
+
|
|
46
|
+
# Logging
|
|
47
|
+
LOG_LEVEL: str = os.getenv("RAGIT_LOG_LEVEL", "INFO")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# Singleton instance
|
|
51
|
+
config = Config()
|
ragit/core/__init__.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
#
|
|
2
|
+
# Copyright RODMENA LIMITED 2025
|
|
3
|
+
# SPDX-License-Identifier: Apache-2.0
|
|
4
|
+
#
|
|
5
|
+
"""Ragit experiment module."""
|
|
6
|
+
|
|
7
|
+
from ragit.core.experiment.experiment import (
|
|
8
|
+
BenchmarkQuestion,
|
|
9
|
+
Document,
|
|
10
|
+
RAGConfig,
|
|
11
|
+
RagitExperiment,
|
|
12
|
+
)
|
|
13
|
+
from ragit.core.experiment.results import EvaluationResult, ExperimentResults
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"RagitExperiment",
|
|
17
|
+
"Document",
|
|
18
|
+
"BenchmarkQuestion",
|
|
19
|
+
"RAGConfig",
|
|
20
|
+
"EvaluationResult",
|
|
21
|
+
"ExperimentResults",
|
|
22
|
+
]
|