aisberg 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aisberg/__init__.py +7 -0
- aisberg/abstract/__init__.py +0 -0
- aisberg/abstract/modules.py +57 -0
- aisberg/api/__init__.py +0 -0
- aisberg/api/async_endpoints.py +333 -0
- aisberg/api/endpoints.py +328 -0
- aisberg/async_client.py +107 -0
- aisberg/client.py +108 -0
- aisberg/config.py +17 -0
- aisberg/exceptions.py +22 -0
- aisberg/models/__init__.py +0 -0
- aisberg/models/chat.py +143 -0
- aisberg/models/collections.py +36 -0
- aisberg/models/embeddings.py +92 -0
- aisberg/models/models.py +39 -0
- aisberg/models/requests.py +11 -0
- aisberg/models/token.py +11 -0
- aisberg/models/tools.py +73 -0
- aisberg/models/workflows.py +66 -0
- aisberg/modules/__init__.py +23 -0
- aisberg/modules/chat.py +403 -0
- aisberg/modules/collections.py +117 -0
- aisberg/modules/document.py +117 -0
- aisberg/modules/embeddings.py +309 -0
- aisberg/modules/me.py +77 -0
- aisberg/modules/models.py +108 -0
- aisberg/modules/tools.py +78 -0
- aisberg/modules/workflows.py +140 -0
- aisberg/requests/__init__.py +0 -0
- aisberg/requests/async_requests.py +85 -0
- aisberg/requests/sync_requests.py +85 -0
- aisberg/utils.py +111 -0
- aisberg-0.1.0.dist-info/METADATA +212 -0
- aisberg-0.1.0.dist-info/RECORD +43 -0
- aisberg-0.1.0.dist-info/WHEEL +5 -0
- aisberg-0.1.0.dist-info/licenses/LICENSE +9 -0
- aisberg-0.1.0.dist-info/top_level.txt +3 -0
- tests/integration/test_collections_integration.py +115 -0
- tests/unit/test_collections_sync.py +104 -0
- tmp/test.py +33 -0
- tmp/test_async.py +126 -0
- tmp/test_doc_parse.py +12 -0
- tmp/test_sync.py +146 -0
aisberg/api/endpoints.py
ADDED
@@ -0,0 +1,328 @@
|
|
1
|
+
from io import BytesIO
|
2
|
+
|
3
|
+
import httpx
|
4
|
+
from ..models.chat import (
|
5
|
+
LanguageModelInput,
|
6
|
+
format_messages,
|
7
|
+
ChatCompletionResponse,
|
8
|
+
ChatCompletionChunk,
|
9
|
+
)
|
10
|
+
from typing import Optional, Generator, Union, List, Any, Tuple
|
11
|
+
|
12
|
+
from ..models.collections import GroupCollections, PointDetails
|
13
|
+
from ..models.embeddings import (
|
14
|
+
EncodingFormat,
|
15
|
+
EncodingResponse,
|
16
|
+
ChunksDataList,
|
17
|
+
RerankerResponse,
|
18
|
+
)
|
19
|
+
from ..models.models import Model
|
20
|
+
from ..models.token import TokenInfo
|
21
|
+
from ..models.workflows import WorkflowDetails, Workflow
|
22
|
+
from ..utils import parse_chat_line, WorkflowLineParser
|
23
|
+
from ..requests.sync_requests import req, req_stream
|
24
|
+
from ..models.requests import AnyDict, AnyList
|
25
|
+
|
26
|
+
|
27
|
+
def models(client: httpx.Client) -> List[Model]:
|
28
|
+
"""
|
29
|
+
Get the list of available models.
|
30
|
+
"""
|
31
|
+
resp = req(client, "GET", "/v1/models", AnyDict).data
|
32
|
+
if not resp or not isinstance(resp, list):
|
33
|
+
raise ValueError("Invalid response format for models")
|
34
|
+
return [Model.model_validate(item) for item in resp]
|
35
|
+
|
36
|
+
|
37
|
+
def workflows(client: httpx.Client) -> List[Workflow]:
|
38
|
+
"""
|
39
|
+
Get the list of available workflows.
|
40
|
+
"""
|
41
|
+
resp = req(client, "GET", "/workflow/light", AnyList)
|
42
|
+
return [Workflow.model_validate(item) for item in resp.root]
|
43
|
+
|
44
|
+
|
45
|
+
def workflow(client: httpx.Client, workflow_id: str) -> WorkflowDetails:
|
46
|
+
"""
|
47
|
+
Get details of a specific workflow.
|
48
|
+
"""
|
49
|
+
try:
|
50
|
+
resp = req(client, "GET", f"/workflow/details/{workflow_id}", WorkflowDetails)
|
51
|
+
return resp
|
52
|
+
except httpx.HTTPStatusError as e:
|
53
|
+
if e.response.status_code == 404:
|
54
|
+
raise ValueError(f"Workflow with ID {workflow_id} not found")
|
55
|
+
raise e
|
56
|
+
|
57
|
+
|
58
|
+
def collections(client: httpx.Client) -> List[GroupCollections]:
|
59
|
+
"""
|
60
|
+
Get the list of available collections.
|
61
|
+
"""
|
62
|
+
resp = req(client, "GET", "/collections", AnyList)
|
63
|
+
return [GroupCollections.model_validate(item) for item in resp.root]
|
64
|
+
|
65
|
+
|
66
|
+
def collection(
|
67
|
+
client: httpx.Client, collection_id: str, group_id: str
|
68
|
+
) -> List[PointDetails]:
|
69
|
+
"""
|
70
|
+
Get details of a specific collection.
|
71
|
+
"""
|
72
|
+
try:
|
73
|
+
resp = req(client, "GET", f"/collections/{collection_id}/{group_id}", AnyList)
|
74
|
+
return [PointDetails.model_validate(item) for item in resp.root]
|
75
|
+
except httpx.HTTPStatusError as e:
|
76
|
+
if e.response.status_code == 404:
|
77
|
+
raise ValueError(
|
78
|
+
f"Collection with ID {collection_id} not found in group {group_id}"
|
79
|
+
)
|
80
|
+
raise e
|
81
|
+
|
82
|
+
|
83
|
+
def me(client: httpx.Client) -> TokenInfo:
|
84
|
+
"""
|
85
|
+
Get the details of the current user.
|
86
|
+
"""
|
87
|
+
return req(client, "GET", "/users/me", TokenInfo)
|
88
|
+
|
89
|
+
|
90
|
+
def chat(
|
91
|
+
client: httpx.Client,
|
92
|
+
input: LanguageModelInput,
|
93
|
+
model: str = None,
|
94
|
+
temperature: float = 0.7,
|
95
|
+
tools: Optional[list] = None,
|
96
|
+
group: Optional[str] = None,
|
97
|
+
**kwargs,
|
98
|
+
) -> ChatCompletionResponse:
|
99
|
+
"""
|
100
|
+
Send a chat message and get a response from an LLM endpoint.
|
101
|
+
"""
|
102
|
+
if model is None:
|
103
|
+
raise ValueError("Model must be specified")
|
104
|
+
|
105
|
+
formatted_messages = format_messages(input)
|
106
|
+
|
107
|
+
payload = {
|
108
|
+
"model": model,
|
109
|
+
"messages": formatted_messages,
|
110
|
+
"temperature": temperature,
|
111
|
+
"stream": False,
|
112
|
+
**kwargs,
|
113
|
+
}
|
114
|
+
|
115
|
+
if group is not None:
|
116
|
+
payload["group"] = group
|
117
|
+
|
118
|
+
if tools is not None:
|
119
|
+
payload["tools"] = tools
|
120
|
+
|
121
|
+
return req(
|
122
|
+
client,
|
123
|
+
"POST",
|
124
|
+
"/v1/chat/completions",
|
125
|
+
ChatCompletionResponse,
|
126
|
+
json=payload,
|
127
|
+
)
|
128
|
+
|
129
|
+
|
130
|
+
def stream(
|
131
|
+
client: httpx.Client,
|
132
|
+
input: LanguageModelInput,
|
133
|
+
model: str,
|
134
|
+
temperature: float = 0.7,
|
135
|
+
full_chunk: bool = True,
|
136
|
+
group: Optional[str] = None,
|
137
|
+
**kwargs,
|
138
|
+
) -> Generator[Union[str, ChatCompletionChunk, ChatCompletionResponse], None, None]:
|
139
|
+
"""
|
140
|
+
Stream de complétions OpenAI.
|
141
|
+
- Si `full_chunk` est True (défaut) : chaque yield est le JSON complet du chunk.
|
142
|
+
- Sinon : on garde la compat ascendante → on ne yield que le delta.content + marquages.
|
143
|
+
"""
|
144
|
+
formatted_messages = format_messages(input)
|
145
|
+
|
146
|
+
payload = {
|
147
|
+
"model": model,
|
148
|
+
"messages": formatted_messages,
|
149
|
+
"temperature": temperature,
|
150
|
+
"stream": True,
|
151
|
+
**kwargs,
|
152
|
+
}
|
153
|
+
|
154
|
+
if group is not None:
|
155
|
+
payload["group"] = group
|
156
|
+
|
157
|
+
for chunk in req_stream(
|
158
|
+
client,
|
159
|
+
"POST",
|
160
|
+
"/v1/chat/completions",
|
161
|
+
parse_line=lambda line: parse_chat_line(line, full_chunk=full_chunk),
|
162
|
+
json=payload,
|
163
|
+
):
|
164
|
+
data = ChatCompletionChunk.model_validate(chunk)
|
165
|
+
|
166
|
+
if data is None:
|
167
|
+
continue
|
168
|
+
|
169
|
+
if full_chunk:
|
170
|
+
yield data
|
171
|
+
else:
|
172
|
+
yield data.choices[0].delta.content if data.choices else ""
|
173
|
+
|
174
|
+
|
175
|
+
def embeddings(
|
176
|
+
client: httpx.Client,
|
177
|
+
input: str,
|
178
|
+
model: str,
|
179
|
+
encoding_format: EncodingFormat,
|
180
|
+
normalize: bool,
|
181
|
+
group: Optional[str] = None,
|
182
|
+
**kwargs,
|
183
|
+
) -> EncodingResponse:
|
184
|
+
"""
|
185
|
+
Get embeddings for a given input using the specified model.
|
186
|
+
"""
|
187
|
+
payload = {
|
188
|
+
"model": model,
|
189
|
+
"input": input,
|
190
|
+
"encoding_format": encoding_format,
|
191
|
+
"normalize": normalize,
|
192
|
+
**kwargs,
|
193
|
+
}
|
194
|
+
|
195
|
+
if group is not None:
|
196
|
+
payload["group"] = group
|
197
|
+
|
198
|
+
return req(
|
199
|
+
client,
|
200
|
+
"POST",
|
201
|
+
"/v1/embeddings",
|
202
|
+
EncodingResponse,
|
203
|
+
json=payload,
|
204
|
+
)
|
205
|
+
|
206
|
+
|
207
|
+
def retrieve(
|
208
|
+
client: httpx.Client,
|
209
|
+
query: str,
|
210
|
+
collections_names: List[str],
|
211
|
+
limit: int,
|
212
|
+
score_threshold: float,
|
213
|
+
filters: list,
|
214
|
+
beta: float,
|
215
|
+
group: Optional[str] = None,
|
216
|
+
**kwargs,
|
217
|
+
) -> ChunksDataList:
|
218
|
+
"""
|
219
|
+
Retrieve the most relevant documents based on the given query from specified collections.
|
220
|
+
"""
|
221
|
+
data = {
|
222
|
+
"query": query,
|
223
|
+
"collections_names": collections_names,
|
224
|
+
"limit": limit,
|
225
|
+
"score": score_threshold,
|
226
|
+
"filters": filters,
|
227
|
+
"beta": beta,
|
228
|
+
**kwargs,
|
229
|
+
}
|
230
|
+
|
231
|
+
if group is not None:
|
232
|
+
data["group"] = group
|
233
|
+
|
234
|
+
return req(
|
235
|
+
client,
|
236
|
+
"POST",
|
237
|
+
"/collections/run/search",
|
238
|
+
ChunksDataList,
|
239
|
+
json=data,
|
240
|
+
)
|
241
|
+
|
242
|
+
|
243
|
+
def rerank(
|
244
|
+
client: httpx.Client,
|
245
|
+
query: str,
|
246
|
+
documents: List[str],
|
247
|
+
model: str,
|
248
|
+
top_n: int,
|
249
|
+
return_documents: bool,
|
250
|
+
group: Optional[str] = None,
|
251
|
+
**kwargs,
|
252
|
+
) -> RerankerResponse:
|
253
|
+
"""
|
254
|
+
Rerank a list of documents based on their relevance to a given query using the specified model.
|
255
|
+
"""
|
256
|
+
payload = {
|
257
|
+
"query": query,
|
258
|
+
"documents": documents,
|
259
|
+
"model": model,
|
260
|
+
"top_n": top_n,
|
261
|
+
"return_documents": return_documents,
|
262
|
+
**kwargs,
|
263
|
+
}
|
264
|
+
|
265
|
+
if group is not None:
|
266
|
+
payload["group"] = group
|
267
|
+
|
268
|
+
return req(
|
269
|
+
client,
|
270
|
+
"POST",
|
271
|
+
"/v1/rerank",
|
272
|
+
RerankerResponse,
|
273
|
+
json=payload,
|
274
|
+
)
|
275
|
+
|
276
|
+
|
277
|
+
def run_workflow(
|
278
|
+
client: httpx.Client,
|
279
|
+
workflow_id: str,
|
280
|
+
data: dict,
|
281
|
+
) -> Any:
|
282
|
+
"""
|
283
|
+
Run a specific workflow with the provided data.
|
284
|
+
"""
|
285
|
+
try:
|
286
|
+
parser = WorkflowLineParser()
|
287
|
+
for chunk in req_stream(
|
288
|
+
client,
|
289
|
+
"POST",
|
290
|
+
f"/workflow/run/{workflow_id}",
|
291
|
+
parse_line=parser,
|
292
|
+
json=data,
|
293
|
+
):
|
294
|
+
yield chunk
|
295
|
+
except httpx.HTTPStatusError as e:
|
296
|
+
if e.response.status_code == 404:
|
297
|
+
raise ValueError(f"Workflow with ID {workflow_id} not found")
|
298
|
+
raise e
|
299
|
+
|
300
|
+
|
301
|
+
def parse_document(
|
302
|
+
client: httpx.Client,
|
303
|
+
file: Tuple[bytes, str],
|
304
|
+
source: str,
|
305
|
+
group: Optional[str] = None,
|
306
|
+
) -> str:
|
307
|
+
"""
|
308
|
+
Parse a document using the specified model.
|
309
|
+
"""
|
310
|
+
payload = {
|
311
|
+
"source": source,
|
312
|
+
}
|
313
|
+
|
314
|
+
if group is not None:
|
315
|
+
payload["group"] = group
|
316
|
+
|
317
|
+
files = {"file": (file[1], BytesIO(file[0]), "application/octet-stream")}
|
318
|
+
|
319
|
+
response = req(
|
320
|
+
client,
|
321
|
+
"POST",
|
322
|
+
"/document-parser/parsing/parse",
|
323
|
+
AnyDict,
|
324
|
+
files=files,
|
325
|
+
data=payload,
|
326
|
+
)
|
327
|
+
print(response)
|
328
|
+
return response
|
aisberg/async_client.py
ADDED
@@ -0,0 +1,107 @@
|
|
1
|
+
from .config import settings
|
2
|
+
import httpx
|
3
|
+
from typing import Dict, Callable
|
4
|
+
|
5
|
+
from .modules import (
|
6
|
+
AsyncChatModule,
|
7
|
+
AsyncCollectionsModule,
|
8
|
+
AsyncEmbeddingsModule,
|
9
|
+
AsyncMeModule,
|
10
|
+
AsyncModelsModule,
|
11
|
+
AsyncWorkflowsModule,
|
12
|
+
ToolsModule,
|
13
|
+
)
|
14
|
+
|
15
|
+
|
16
|
+
class AisbergAsyncClient:
|
17
|
+
def __init__(self, base_url=None, api_key=None, timeout=None):
|
18
|
+
self.api_key = api_key or settings.aisberg_api_key
|
19
|
+
self.base_url = base_url or settings.aisberg_base_url
|
20
|
+
self.timeout = timeout or settings.timeout
|
21
|
+
self.tool_registry: Dict[str, Callable] = {}
|
22
|
+
self._client = httpx.AsyncClient(
|
23
|
+
base_url=self.base_url,
|
24
|
+
timeout=self.timeout,
|
25
|
+
headers={"Authorization": f"Bearer {self.api_key}"},
|
26
|
+
)
|
27
|
+
|
28
|
+
# Modules
|
29
|
+
self.tools = ToolsModule(self)
|
30
|
+
self.chat = AsyncChatModule(self, self._client)
|
31
|
+
self.models = AsyncModelsModule(self, self._client)
|
32
|
+
self.workflows = AsyncWorkflowsModule(self, self._client)
|
33
|
+
self.me = AsyncMeModule(self, self._client)
|
34
|
+
self.collections = AsyncCollectionsModule(self, self._client)
|
35
|
+
self.embeddings = AsyncEmbeddingsModule(self, self._client)
|
36
|
+
|
37
|
+
async def initialize(self):
|
38
|
+
"""
|
39
|
+
Initialise le client asynchrone.
|
40
|
+
Cette méthode est appelée pour s'assurer que le client est prêt à être utilisé.
|
41
|
+
"""
|
42
|
+
await self._validate_api_key()
|
43
|
+
return self
|
44
|
+
|
45
|
+
async def _validate_api_key(self):
|
46
|
+
"""
|
47
|
+
Valide la clé API en effectuant une requête à l'API.
|
48
|
+
"""
|
49
|
+
try:
|
50
|
+
await self.me.info()
|
51
|
+
except httpx.ConnectTimeout as e:
|
52
|
+
raise ConnectionError(
|
53
|
+
f"Le host {self.base_url} n'est pas accessible. Vérifiez votre connexion réseau ou l'URL de l'API."
|
54
|
+
) from e
|
55
|
+
except httpx.HTTPStatusError as e:
|
56
|
+
if e.response.status_code == 401:
|
57
|
+
raise ValueError(
|
58
|
+
"Clé API invalide ou expirée. Veuillez vérifier votre clé API."
|
59
|
+
) from e
|
60
|
+
elif e.response.status_code == 403:
|
61
|
+
raise PermissionError(
|
62
|
+
"Accès interdit. Vérifiez vos permissions pour utiliser l'API."
|
63
|
+
) from e
|
64
|
+
else:
|
65
|
+
raise ValueError(
|
66
|
+
f"Erreur lors de la validation de la clé API: {e.response.text}"
|
67
|
+
) from e
|
68
|
+
except Exception:
|
69
|
+
raise ValueError(
|
70
|
+
f"Clé API invalide/expirée ou le host {self.base_url} n'est pas accessible."
|
71
|
+
)
|
72
|
+
|
73
|
+
async def close(self):
|
74
|
+
"""
|
75
|
+
Ferme le client.
|
76
|
+
"""
|
77
|
+
await self._client.aclose()
|
78
|
+
|
79
|
+
async def __aenter__(self):
|
80
|
+
"""
|
81
|
+
Enter the context manager, returning the client instance.
|
82
|
+
|
83
|
+
Example:
|
84
|
+
async with AisbergAsyncClient() as client:
|
85
|
+
# Use the client here
|
86
|
+
pass
|
87
|
+
|
88
|
+
Returns:
|
89
|
+
AisbergAsyncClient: L'instance du client Aisberg.
|
90
|
+
"""
|
91
|
+
return self
|
92
|
+
|
93
|
+
async def __aexit__(self, exc_type, exc_value, traceback):
|
94
|
+
"""
|
95
|
+
Exit the context manager, closing the client.
|
96
|
+
Example:
|
97
|
+
async with AisbergAsyncClient() as client:
|
98
|
+
# Use the client here
|
99
|
+
pass
|
100
|
+
"""
|
101
|
+
await self.close()
|
102
|
+
|
103
|
+
def __repr__(self):
|
104
|
+
return f"<AisbergAsyncClient base_url={self.base_url}>"
|
105
|
+
|
106
|
+
def __str__(self):
|
107
|
+
return f"AisbergAsyncClient(base_url={self.base_url})"
|
aisberg/client.py
ADDED
@@ -0,0 +1,108 @@
|
|
1
|
+
from .config import settings
|
2
|
+
import httpx
|
3
|
+
from typing import Dict, Callable
|
4
|
+
from .modules import (
|
5
|
+
SyncChatModule,
|
6
|
+
SyncCollectionsModule,
|
7
|
+
SyncEmbeddingsModule,
|
8
|
+
SyncMeModule,
|
9
|
+
SyncModelsModule,
|
10
|
+
SyncWorkflowsModule,
|
11
|
+
ToolsModule,
|
12
|
+
)
|
13
|
+
|
14
|
+
|
15
|
+
class AisbergClient:
|
16
|
+
def __init__(self, base_url=None, api_key=None, timeout=None):
|
17
|
+
self.api_key = api_key or settings.aisberg_api_key
|
18
|
+
self.base_url = base_url or settings.aisberg_base_url
|
19
|
+
self.timeout = timeout or settings.timeout
|
20
|
+
|
21
|
+
if not self.base_url or not self.api_key:
|
22
|
+
raise ValueError(
|
23
|
+
"L'URL de base et la clé API doivent être définies. "
|
24
|
+
"Utilisez les variables d'environnement AISBERG_API_KEY et AISBERG_BASE_URL ou passez-les lors de l'initialisation du client."
|
25
|
+
)
|
26
|
+
|
27
|
+
self.tool_registry: Dict[str, Callable] = {}
|
28
|
+
self._client = httpx.Client(
|
29
|
+
base_url=self.base_url,
|
30
|
+
timeout=self.timeout,
|
31
|
+
headers={"Authorization": f"Bearer {self.api_key}"},
|
32
|
+
)
|
33
|
+
|
34
|
+
# Modules
|
35
|
+
self.tools = ToolsModule(self)
|
36
|
+
self.chat = SyncChatModule(self, self._client)
|
37
|
+
self.models = SyncModelsModule(self, self._client)
|
38
|
+
self.workflows = SyncWorkflowsModule(self, self._client)
|
39
|
+
self.me = SyncMeModule(self, self._client)
|
40
|
+
self.collections = SyncCollectionsModule(self, self._client)
|
41
|
+
self.embeddings = SyncEmbeddingsModule(self, self._client)
|
42
|
+
|
43
|
+
# Validate API key
|
44
|
+
self._validate_api_key()
|
45
|
+
|
46
|
+
def _validate_api_key(self):
|
47
|
+
"""
|
48
|
+
Valide la clé API en effectuant une requête à l'API.
|
49
|
+
"""
|
50
|
+
try:
|
51
|
+
self.me.info()
|
52
|
+
except httpx.ConnectTimeout as e:
|
53
|
+
raise ConnectionError(
|
54
|
+
f"Le host {self.base_url} n'est pas accessible. Vérifiez votre connexion réseau ou l'URL de l'API."
|
55
|
+
) from e
|
56
|
+
except httpx.HTTPStatusError as e:
|
57
|
+
if e.response.status_code == 401:
|
58
|
+
raise ValueError(
|
59
|
+
"Clé API invalide ou expirée. Veuillez vérifier votre clé API."
|
60
|
+
) from e
|
61
|
+
elif e.response.status_code == 403:
|
62
|
+
raise PermissionError(
|
63
|
+
"Accès interdit. Vérifiez vos permissions pour utiliser l'API."
|
64
|
+
) from e
|
65
|
+
else:
|
66
|
+
raise ValueError(
|
67
|
+
f"Erreur lors de la validation de la clé API: {e.response.text}"
|
68
|
+
) from e
|
69
|
+
except Exception as e:
|
70
|
+
raise ValueError(
|
71
|
+
f"Clé API invalide/expirée ou le host {self.base_url} n'est pas accessible. Erreur: {str(e)}"
|
72
|
+
)
|
73
|
+
|
74
|
+
def close(self):
|
75
|
+
"""
|
76
|
+
Ferme le client.
|
77
|
+
"""
|
78
|
+
self._client.close()
|
79
|
+
|
80
|
+
def __enter__(self):
|
81
|
+
"""
|
82
|
+
Enter the context manager, returning the client instance.
|
83
|
+
|
84
|
+
Example:
|
85
|
+
with AisbergClient() as client:
|
86
|
+
# Use the client here
|
87
|
+
pass
|
88
|
+
|
89
|
+
Returns:
|
90
|
+
AisbergClient: L'instance du client Aisberg.
|
91
|
+
"""
|
92
|
+
return self
|
93
|
+
|
94
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
95
|
+
"""
|
96
|
+
Exit the context manager, closing the client.
|
97
|
+
Example:
|
98
|
+
with AisbergClient() as client:
|
99
|
+
# Use the client here
|
100
|
+
pass
|
101
|
+
"""
|
102
|
+
self.close()
|
103
|
+
|
104
|
+
def __repr__(self):
|
105
|
+
return f"<AisbergClient base_url={self.base_url}>"
|
106
|
+
|
107
|
+
def __str__(self):
|
108
|
+
return f"AisbergClient(base_url={self.base_url})"
|
aisberg/config.py
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
from typing import Union
|
2
|
+
|
3
|
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
4
|
+
|
5
|
+
|
6
|
+
class Settings(BaseSettings):
|
7
|
+
# Variables attendues
|
8
|
+
aisberg_api_key: Union[str, None] = None
|
9
|
+
aisberg_base_url: Union[str, None] = None
|
10
|
+
timeout: int = 30
|
11
|
+
|
12
|
+
# Pour indiquer le fichier .env
|
13
|
+
model_config = SettingsConfigDict(env_file=".env", env_file_encoding="utf-8")
|
14
|
+
|
15
|
+
|
16
|
+
# Singleton partagé dans tout le SDK
|
17
|
+
settings = Settings()
|
aisberg/exceptions.py
ADDED
@@ -0,0 +1,22 @@
|
|
1
|
+
class APIError(Exception):
|
2
|
+
pass
|
3
|
+
|
4
|
+
|
5
|
+
class AuthError(APIError):
|
6
|
+
pass
|
7
|
+
|
8
|
+
|
9
|
+
class ToolExecutionError(Exception):
|
10
|
+
"""Exception levée lors de l'exécution d'un tool"""
|
11
|
+
|
12
|
+
pass
|
13
|
+
|
14
|
+
|
15
|
+
class UnspecifiedClassArgumentError(Exception):
|
16
|
+
"""Exception levée lorsqu'un argument requis n'est pas spécifié"""
|
17
|
+
|
18
|
+
def __init__(self, argument_name: str):
|
19
|
+
super().__init__(
|
20
|
+
f"L'argument '{argument_name}' est requis mais n'a pas été spécifié."
|
21
|
+
)
|
22
|
+
self.argument_name = argument_name
|
File without changes
|