ragit 0.3__py3-none-any.whl → 0.10.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,237 @@
1
+ #
2
+ # Copyright RODMENA LIMITED 2025
3
+ # SPDX-License-Identifier: Apache-2.0
4
+ #
5
+ """
6
+ Function-based provider adapter for pluggable embedding and LLM functions.
7
+
8
+ This module provides a simple adapter that wraps user-provided functions
9
+ into the provider interface, enabling easy integration with custom
10
+ embedding and LLM implementations.
11
+ """
12
+
13
+ import inspect
14
+ from collections.abc import Callable
15
+
16
+ from ragit.providers.base import (
17
+ BaseEmbeddingProvider,
18
+ BaseLLMProvider,
19
+ EmbeddingResponse,
20
+ LLMResponse,
21
+ )
22
+
23
+
24
+ class FunctionProvider(BaseLLMProvider, BaseEmbeddingProvider):
25
+ """
26
+ Adapter that wraps user-provided embedding and generation functions.
27
+
28
+ This provider allows users to bring their own embedding and/or LLM functions
29
+ without implementing the full provider interface.
30
+
31
+ Parameters
32
+ ----------
33
+ embed_fn : Callable[[str], list[float]], optional
34
+ Function that takes text and returns an embedding vector.
35
+ Example: `lambda text: openai.embeddings.create(input=text).data[0].embedding`
36
+ generate_fn : Callable, optional
37
+ Function for text generation. Supports two signatures:
38
+ - (prompt: str) -> str
39
+ - (prompt: str, system_prompt: str) -> str
40
+ embedding_dimensions : int, optional
41
+ Embedding dimensions. Auto-detected on first call if not provided.
42
+
43
+ Examples
44
+ --------
45
+ >>> # Simple embedding function
46
+ >>> def my_embed(text: str) -> list[float]:
47
+ ... return openai.embeddings.create(input=text).data[0].embedding
48
+ >>>
49
+ >>> # Use with RAGAssistant (retrieval-only)
50
+ >>> assistant = RAGAssistant(docs, embed_fn=my_embed)
51
+ >>> results = assistant.retrieve("query")
52
+ >>>
53
+ >>> # With LLM for full RAG
54
+ >>> def my_llm(prompt: str, system_prompt: str = None) -> str:
55
+ ... return openai.chat.completions.create(
56
+ ... messages=[{"role": "user", "content": prompt}]
57
+ ... ).choices[0].message.content
58
+ >>>
59
+ >>> assistant = RAGAssistant(docs, embed_fn=my_embed, generate_fn=my_llm)
60
+ >>> answer = assistant.ask("What is X?")
61
+ """
62
+
63
+ def __init__(
64
+ self,
65
+ embed_fn: Callable[[str], list[float]] | None = None,
66
+ generate_fn: Callable[..., str] | None = None,
67
+ embedding_dimensions: int | None = None,
68
+ ) -> None:
69
+ self._embed_fn = embed_fn
70
+ self._generate_fn = generate_fn
71
+ self._embedding_dimensions = embedding_dimensions
72
+ self._generate_fn_signature: int | None = None # Number of args (1 or 2)
73
+
74
+ # Detect generate_fn signature if provided
75
+ if generate_fn is not None:
76
+ self._detect_generate_signature()
77
+
78
+ def _detect_generate_signature(self) -> None:
79
+ """Detect whether generate_fn accepts 1 or 2 arguments."""
80
+ if self._generate_fn is None:
81
+ return
82
+
83
+ sig = inspect.signature(self._generate_fn)
84
+ params = [
85
+ p
86
+ for p in sig.parameters.values()
87
+ if p.default is inspect.Parameter.empty and p.kind not in (p.VAR_POSITIONAL, p.VAR_KEYWORD)
88
+ ]
89
+ # Count required parameters
90
+ required_count = len(params)
91
+
92
+ if required_count == 1:
93
+ self._generate_fn_signature = 1
94
+ else:
95
+ # Assume 2 args if more than 1 required or if has optional args
96
+ self._generate_fn_signature = 2
97
+
98
+ @property
99
+ def provider_name(self) -> str:
100
+ return "function"
101
+
102
+ @property
103
+ def dimensions(self) -> int:
104
+ if self._embedding_dimensions is None:
105
+ raise ValueError("Embedding dimensions not yet determined. Call embed() first or provide dimensions.")
106
+ return self._embedding_dimensions
107
+
108
+ @property
109
+ def has_embedding(self) -> bool:
110
+ """Check if embedding function is configured."""
111
+ return self._embed_fn is not None
112
+
113
+ @property
114
+ def has_llm(self) -> bool:
115
+ """Check if LLM generation function is configured."""
116
+ return self._generate_fn is not None
117
+
118
+ def is_available(self) -> bool:
119
+ """Check if the provider has at least one function configured."""
120
+ return self._embed_fn is not None or self._generate_fn is not None
121
+
122
+ def embed(self, text: str, model: str = "") -> EmbeddingResponse:
123
+ """
124
+ Generate embedding using the provided function.
125
+
126
+ Parameters
127
+ ----------
128
+ text : str
129
+ Text to embed.
130
+ model : str
131
+ Model identifier (ignored, kept for interface compatibility).
132
+
133
+ Returns
134
+ -------
135
+ EmbeddingResponse
136
+ The embedding response.
137
+
138
+ Raises
139
+ ------
140
+ ValueError
141
+ If no embedding function was provided.
142
+ """
143
+ if self._embed_fn is None:
144
+ raise ValueError("No embedding function configured. Provide embed_fn to use embeddings.")
145
+
146
+ raw_embedding = self._embed_fn(text)
147
+
148
+ # Convert to tuple for immutability
149
+ embedding_tuple: tuple[float, ...] = tuple(raw_embedding)
150
+
151
+ # Auto-detect dimensions on first call
152
+ if self._embedding_dimensions is None:
153
+ self._embedding_dimensions = len(embedding_tuple)
154
+
155
+ return EmbeddingResponse(
156
+ embedding=embedding_tuple,
157
+ model=model or "function",
158
+ provider=self.provider_name,
159
+ dimensions=len(embedding_tuple),
160
+ )
161
+
162
+ def embed_batch(self, texts: list[str], model: str = "") -> list[EmbeddingResponse]:
163
+ """
164
+ Generate embeddings for multiple texts.
165
+
166
+ Iterates over embed_fn for each text. For providers with native batch
167
+ support, users should implement their own BatchEmbeddingProvider.
168
+
169
+ Parameters
170
+ ----------
171
+ texts : list[str]
172
+ Texts to embed.
173
+ model : str
174
+ Model identifier (ignored).
175
+
176
+ Returns
177
+ -------
178
+ list[EmbeddingResponse]
179
+ List of embedding responses.
180
+ """
181
+ return [self.embed(text, model) for text in texts]
182
+
183
+ def generate(
184
+ self,
185
+ prompt: str,
186
+ model: str = "",
187
+ system_prompt: str | None = None,
188
+ temperature: float = 0.7,
189
+ max_tokens: int | None = None,
190
+ ) -> LLMResponse:
191
+ """
192
+ Generate text using the provided function.
193
+
194
+ Parameters
195
+ ----------
196
+ prompt : str
197
+ The user prompt.
198
+ model : str
199
+ Model identifier (ignored, kept for interface compatibility).
200
+ system_prompt : str, optional
201
+ System prompt for context.
202
+ temperature : float
203
+ Sampling temperature (ignored if function doesn't support it).
204
+ max_tokens : int, optional
205
+ Maximum tokens (ignored if function doesn't support it).
206
+
207
+ Returns
208
+ -------
209
+ LLMResponse
210
+ The generated response.
211
+
212
+ Raises
213
+ ------
214
+ NotImplementedError
215
+ If no generation function was provided.
216
+ """
217
+ if self._generate_fn is None:
218
+ raise NotImplementedError(
219
+ "No LLM configured. Provide generate_fn or a provider with LLM support "
220
+ "to use ask(), generate(), or generate_code() methods."
221
+ )
222
+
223
+ # Call with appropriate signature
224
+ if self._generate_fn_signature == 1:
225
+ # Single argument - prepend system prompt to prompt if provided
226
+ full_prompt = f"{system_prompt}\n\n{prompt}" if system_prompt else prompt
227
+ text = self._generate_fn(full_prompt)
228
+ else:
229
+ # Two arguments - pass separately
230
+ text = self._generate_fn(prompt, system_prompt)
231
+
232
+ return LLMResponse(
233
+ text=text,
234
+ model=model or "function",
235
+ provider=self.provider_name,
236
+ usage=None,
237
+ )