aisberg 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. aisberg/__init__.py +7 -0
  2. aisberg/abstract/__init__.py +0 -0
  3. aisberg/abstract/modules.py +57 -0
  4. aisberg/api/__init__.py +0 -0
  5. aisberg/api/async_endpoints.py +333 -0
  6. aisberg/api/endpoints.py +328 -0
  7. aisberg/async_client.py +107 -0
  8. aisberg/client.py +108 -0
  9. aisberg/config.py +17 -0
  10. aisberg/exceptions.py +22 -0
  11. aisberg/models/__init__.py +0 -0
  12. aisberg/models/chat.py +143 -0
  13. aisberg/models/collections.py +36 -0
  14. aisberg/models/embeddings.py +92 -0
  15. aisberg/models/models.py +39 -0
  16. aisberg/models/requests.py +11 -0
  17. aisberg/models/token.py +11 -0
  18. aisberg/models/tools.py +73 -0
  19. aisberg/models/workflows.py +66 -0
  20. aisberg/modules/__init__.py +23 -0
  21. aisberg/modules/chat.py +403 -0
  22. aisberg/modules/collections.py +117 -0
  23. aisberg/modules/document.py +117 -0
  24. aisberg/modules/embeddings.py +309 -0
  25. aisberg/modules/me.py +77 -0
  26. aisberg/modules/models.py +108 -0
  27. aisberg/modules/tools.py +78 -0
  28. aisberg/modules/workflows.py +140 -0
  29. aisberg/requests/__init__.py +0 -0
  30. aisberg/requests/async_requests.py +85 -0
  31. aisberg/requests/sync_requests.py +85 -0
  32. aisberg/utils.py +111 -0
  33. aisberg-0.1.0.dist-info/METADATA +212 -0
  34. aisberg-0.1.0.dist-info/RECORD +43 -0
  35. aisberg-0.1.0.dist-info/WHEEL +5 -0
  36. aisberg-0.1.0.dist-info/licenses/LICENSE +9 -0
  37. aisberg-0.1.0.dist-info/top_level.txt +3 -0
  38. tests/integration/test_collections_integration.py +115 -0
  39. tests/unit/test_collections_sync.py +104 -0
  40. tmp/test.py +33 -0
  41. tmp/test_async.py +126 -0
  42. tmp/test_doc_parse.py +12 -0
  43. tmp/test_sync.py +146 -0
@@ -0,0 +1,403 @@
1
+ from abc import ABC, abstractmethod
2
+ from typing import Optional, Union, Generator, AsyncGenerator, List, Awaitable
3
+
4
+ from ..models.chat import (
5
+ LanguageModelInput,
6
+ HumanMessage,
7
+ ChatCompletionResponse,
8
+ AIMessage,
9
+ ToolMessage,
10
+ ChatCompletionChunk,
11
+ )
12
+ from ..models.tools import Tool, tools_to_payload
13
+ from ..exceptions import ToolExecutionError
14
+ import json
15
+ from ..api import async_endpoints, endpoints
16
+ from ..abstract.modules import AsyncModule, SyncModule
17
+
18
+
19
+ class AbstractChatModule(ABC):
20
+ """
21
+ Abstract base class for chat modules.
22
+ Handles common logic for processing tool calls in conversational LLM flows.
23
+ """
24
+
25
+ def __init__(self, parent, http_client):
26
+ """
27
+ Initialize the AbstractChatModule.
28
+
29
+ Args:
30
+ parent: Parent client instance.
31
+ http_client: HTTP client for making requests.
32
+ """
33
+ self._parent = parent
34
+ self._client = http_client
35
+
36
+ @abstractmethod
37
+ def complete(
38
+ self,
39
+ input: LanguageModelInput,
40
+ model: str,
41
+ temperature: float = 0.7,
42
+ tools: Optional[List[Tool]] = None,
43
+ auto_execute_tools: bool = False,
44
+ **kwargs,
45
+ ) -> Union[ChatCompletionResponse, "Awaitable[ChatCompletionResponse]"]:
46
+ """
47
+ Sends a chat message (or list of messages) to the model and returns a structured response.
48
+ Supports tool usage and optional automatic tool execution.
49
+
50
+ Args:
51
+ input (LanguageModelInput): Message(s) to send to the model
52
+ model (str): Identifier of the language model to use.
53
+ temperature (float): Sampling temperature for response creativity (0.0–1.0, default: 0.7).
54
+ tools (Optional[list]): List of tools that can be invoked by the model.
55
+ auto_execute_tools (bool): If True, automatically executes tool calls generated by the model (default: False).
56
+ **kwargs: Additional arguments passed to the API.
57
+
58
+ Returns:
59
+ ChatCompletionResponse: Structured model response.
60
+ """
61
+ pass
62
+
63
+ @abstractmethod
64
+ def stream(
65
+ self,
66
+ input: LanguageModelInput,
67
+ model: str,
68
+ temperature: float = 0.7,
69
+ tools: Optional[List[Tool]] = None,
70
+ auto_execute_tools: bool = True,
71
+ full_chunk: bool = True,
72
+ **kwargs,
73
+ ) -> Union[
74
+ Generator[Union[str, ChatCompletionChunk, ChatCompletionResponse], None, None],
75
+ AsyncGenerator[Union[str, ChatCompletionChunk, ChatCompletionResponse], None],
76
+ ]:
77
+ """
78
+ Streams a chat completion response from the model. If tools are provided,
79
+ makes an initial non-streaming call to determine tool invocations,
80
+ executes them if auto_execute_tools is enabled, and then streams the final response.
81
+
82
+ Args:
83
+ input (LanguageModelInput): Message(s) to send to the model
84
+ model (str): Identifier of the language model to use.
85
+ temperature (float): Sampling temperature for response creativity (0.0–1.0, default: 0.7).
86
+ tools (Optional[list]): List of tools that can be invoked by the model.
87
+ auto_execute_tools (bool): If True, automatically executes tool calls generated by the model (default: False).
88
+ full_chunk (bool): If True, yields full response objects; if False, yields only the content (default: True).
89
+ **kwargs: Additional keyword arguments for the API call.
90
+
91
+ Yields:
92
+ Union[str, ChatCompletionChunk, ChatCompletionResponse]: Response chunks or final responses as they become available.
93
+ """
94
+ pass
95
+
96
+ @abstractmethod
97
+ def _handle_tool_calls(
98
+ self,
99
+ response: ChatCompletionResponse,
100
+ input,
101
+ model,
102
+ temperature,
103
+ stream: bool = False,
104
+ full_chunk: bool = True,
105
+ **kwargs,
106
+ ):
107
+ """
108
+ Handles execution of tool calls found in a model response. Automatically executes the requested tools and
109
+ integrates their results into the conversation, then continues the chat either in streaming or blocking mode.
110
+
111
+ Args:
112
+ response (ChatCompletionResponse): The initial model response containing tool calls.
113
+ input: Original input messages.
114
+ model: Model identifier.
115
+ temperature: Sampling temperature.
116
+ stream: If True, returns a streaming generator.
117
+ full_chunk: (Streaming only) If True, yields full chunks.
118
+ **kwargs: Additional arguments for the next API call.
119
+
120
+ Returns:
121
+ ChatCompletionResponse or AsyncGenerator: If stream is True, returns a generator yielding tool results and final response.
122
+ Otherwise, returns a complete ChatCompletionResponse with tool results integrated.
123
+ """
124
+ pass
125
+
126
+ def _build_tool_messages(
127
+ self,
128
+ response: ChatCompletionResponse,
129
+ input: Union[LanguageModelInput, List[HumanMessage]],
130
+ ):
131
+ """
132
+ Utility: Build the message list including tool calls and their results.
133
+ """
134
+ tool_calls = response.choices[0].message.tool_calls
135
+ messages: list = (
136
+ input if isinstance(input, list) else [HumanMessage(content=input)]
137
+ )
138
+
139
+ messages.append(
140
+ AIMessage(
141
+ content=response.choices[0].message.content or "",
142
+ tool_calls=tool_calls,
143
+ )
144
+ )
145
+
146
+ for tool_call in tool_calls or []:
147
+ tool_name = tool_call.function.name
148
+ tool_arguments_str = tool_call.function.arguments
149
+ try:
150
+ tool_arguments = (
151
+ json.loads(tool_arguments_str) if tool_arguments_str else {}
152
+ )
153
+ tool_result = self._parent.tools.execute(tool_name, tool_arguments)
154
+ messages.append(
155
+ ToolMessage(
156
+ content=json.dumps(tool_result)
157
+ if not isinstance(tool_result, str)
158
+ else tool_result,
159
+ tool_call_id=tool_call.id,
160
+ )
161
+ )
162
+ except json.JSONDecodeError:
163
+ messages.append(
164
+ ToolMessage(
165
+ content=f"Error: Invalid JSON arguments provided for tool {tool_name}.",
166
+ tool_call_id=tool_call.id,
167
+ )
168
+ )
169
+ except ToolExecutionError as e:
170
+ messages.append(
171
+ ToolMessage(
172
+ content=f"Error: {str(e)}",
173
+ tool_call_id=tool_call.id,
174
+ )
175
+ )
176
+ return messages
177
+
178
+
179
+ class SyncChatModule(AbstractChatModule, SyncModule):
180
+ """
181
+ `ChatModule` is a synchronous module that provides a high-level interface for interacting with
182
+ language models in a conversational format. It supports standard completions, streaming responses,
183
+ and automatic tool execution within conversations. The module abstracts all communication with the backend API,
184
+ providing both blocking and generator-based usage.
185
+ """
186
+
187
+ def __init__(self, parent, http_client):
188
+ super().__init__(parent, http_client)
189
+ SyncModule.__init__(self, parent, http_client)
190
+
191
+ def complete(
192
+ self,
193
+ input: LanguageModelInput,
194
+ model: str,
195
+ temperature: float = 0.7,
196
+ tools: Optional[list[Tool]] = None,
197
+ auto_execute_tools: bool = False,
198
+ **kwargs,
199
+ ) -> ChatCompletionResponse:
200
+ response = endpoints.chat(
201
+ self._client, input, model, temperature, tools_to_payload(tools), **kwargs
202
+ )
203
+
204
+ if (
205
+ auto_execute_tools
206
+ and response.choices
207
+ and response.choices[0].message.tool_calls
208
+ ):
209
+ return self._handle_tool_calls(
210
+ input=input,
211
+ response=response,
212
+ model=model,
213
+ temperature=temperature,
214
+ stream=False,
215
+ **kwargs,
216
+ )
217
+
218
+ return response
219
+
220
+ def stream(
221
+ self,
222
+ input: LanguageModelInput,
223
+ model: str,
224
+ temperature: float = 0.7,
225
+ tools: Optional[list[Tool]] = None,
226
+ auto_execute_tools: bool = True,
227
+ full_chunk: bool = True,
228
+ **kwargs,
229
+ ) -> Generator[Union[str, ChatCompletionChunk, ChatCompletionResponse], None, None]:
230
+ if tools:
231
+ resp = endpoints.chat(
232
+ self._client,
233
+ input,
234
+ model,
235
+ temperature,
236
+ tools_to_payload(tools),
237
+ **kwargs,
238
+ )
239
+ response = ChatCompletionResponse.model_validate(resp)
240
+
241
+ if not auto_execute_tools:
242
+ yield response if full_chunk else response.choices[0].message.content
243
+ return
244
+
245
+ yield from self._handle_tool_calls(
246
+ response,
247
+ input,
248
+ model,
249
+ temperature,
250
+ stream=True,
251
+ full_chunk=full_chunk,
252
+ **kwargs,
253
+ )
254
+ else:
255
+ for chunk in endpoints.stream(
256
+ self._client,
257
+ input,
258
+ model,
259
+ temperature,
260
+ full_chunk=full_chunk,
261
+ **kwargs,
262
+ ):
263
+ yield ChatCompletionChunk.model_validate(chunk) if full_chunk else chunk
264
+
265
+ def _handle_tool_calls(
266
+ self,
267
+ response: ChatCompletionResponse,
268
+ input,
269
+ model,
270
+ temperature,
271
+ stream: bool = False,
272
+ full_chunk: bool = True,
273
+ **kwargs,
274
+ ):
275
+ tool_calls = response.choices[0].message.tool_calls
276
+
277
+ if tool_calls is None:
278
+ return response
279
+
280
+ messages = self._build_tool_messages(response, input)
281
+
282
+ if stream:
283
+ return self.stream(
284
+ messages, model, temperature, full_chunk=full_chunk, **kwargs
285
+ )
286
+ else:
287
+ return self.complete(messages, model, temperature, **kwargs)
288
+
289
+
290
+ class AsyncChatModule(AbstractChatModule, AsyncModule):
291
+ """
292
+ `ChatModule` is an asynchronous module that provides a high-level interface for interacting with
293
+ language models in a conversational format. It supports standard completions, streaming responses,
294
+ and automatic tool execution within conversations. The module abstracts all communication with the backend API,
295
+ providing both blocking and generator-based usage.
296
+ """
297
+
298
+ def __init__(self, parent, http_client):
299
+ super().__init__(parent, http_client)
300
+ AsyncModule.__init__(self, parent, http_client)
301
+
302
+ async def complete(
303
+ self,
304
+ input: LanguageModelInput,
305
+ model: str,
306
+ temperature: float = 0.7,
307
+ tools: Optional[list[Tool]] = None,
308
+ auto_execute_tools: bool = False,
309
+ **kwargs,
310
+ ) -> ChatCompletionResponse:
311
+ resp = await async_endpoints.chat(
312
+ self._client, input, model, temperature, tools_to_payload(tools), **kwargs
313
+ )
314
+ response = ChatCompletionResponse.model_validate(resp)
315
+
316
+ if (
317
+ auto_execute_tools
318
+ and response.choices
319
+ and response.choices[0].message.tool_calls
320
+ ):
321
+ return await self._handle_tool_calls(
322
+ input=input,
323
+ response=response,
324
+ model=model,
325
+ temperature=temperature,
326
+ stream=False,
327
+ **kwargs,
328
+ )
329
+
330
+ return response
331
+
332
+ async def stream(
333
+ self,
334
+ input: LanguageModelInput,
335
+ model: str,
336
+ temperature: float = 0.7,
337
+ tools: Optional[list[Tool]] = None,
338
+ auto_execute_tools: bool = True,
339
+ full_chunk: bool = True,
340
+ **kwargs,
341
+ ) -> AsyncGenerator[Union[str, ChatCompletionChunk, ChatCompletionResponse], None]:
342
+ if tools:
343
+ resp = await async_endpoints.chat(
344
+ self._client,
345
+ input,
346
+ model,
347
+ temperature,
348
+ tools_to_payload(tools),
349
+ **kwargs,
350
+ )
351
+ response = ChatCompletionResponse.model_validate(resp)
352
+
353
+ if not auto_execute_tools:
354
+ if full_chunk:
355
+ yield response
356
+ else:
357
+ yield response.choices[0].message.content
358
+ return
359
+
360
+ async for val in await self._handle_tool_calls(
361
+ response,
362
+ input,
363
+ model,
364
+ temperature,
365
+ stream=True,
366
+ full_chunk=full_chunk,
367
+ **kwargs,
368
+ ):
369
+ yield val
370
+ else:
371
+ async for chunk in async_endpoints.chat_stream(
372
+ self._client,
373
+ input,
374
+ model,
375
+ temperature,
376
+ full_chunk=full_chunk,
377
+ **kwargs,
378
+ ):
379
+ yield ChatCompletionChunk.model_validate(chunk) if full_chunk else chunk
380
+
381
+ async def _handle_tool_calls(
382
+ self,
383
+ response: ChatCompletionResponse,
384
+ input,
385
+ model,
386
+ temperature,
387
+ stream: bool = False,
388
+ full_chunk: bool = True,
389
+ **kwargs,
390
+ ):
391
+ tool_calls = response.choices[0].message.tool_calls
392
+
393
+ if tool_calls is None:
394
+ return response
395
+
396
+ messages = self._build_tool_messages(response, input)
397
+
398
+ if stream:
399
+ return self.stream(
400
+ messages, model, temperature, full_chunk=full_chunk, **kwargs
401
+ )
402
+ else:
403
+ return await self.complete(messages, model, temperature, **kwargs)
@@ -0,0 +1,117 @@
1
+ from typing import List
2
+ from abc import ABC
3
+
4
+ from ..models.collections import GroupCollections, Collection, CollectionDetails
5
+
6
+ from abc import abstractmethod
7
+ from ..abstract.modules import SyncModule, AsyncModule
8
+ from ..api import endpoints, async_endpoints
9
+
10
+
11
+ class AbstractCollectionsModule(ABC):
12
+ def __init__(self, parent, client):
13
+ self._parent = parent
14
+ self._client = client
15
+
16
+ @abstractmethod
17
+ def list(self) -> List[GroupCollections]:
18
+ """
19
+ Get a list of available collections. Collections are grouped by your belonging groups.
20
+
21
+ Returns:
22
+ List[GroupCollections]: A list of available collections.
23
+
24
+ Raises:
25
+ ValueError: If no collections are found.
26
+ Exception: If there is an error fetching the collections.
27
+ """
28
+ pass
29
+
30
+ @abstractmethod
31
+ def get_by_group(self, group_id: str) -> List[Collection]:
32
+ """
33
+ Get collections by group ID.
34
+
35
+ Args:
36
+ group_id (str): The ID of the group for which to retrieve collections.
37
+
38
+ Returns:
39
+ List[Collection]: A list of collections for the specified group.
40
+
41
+ Raises:
42
+ ValueError: If no collections are found for the specified group ID.
43
+ Exception: If there is an error fetching the collections.
44
+ """
45
+ pass
46
+
47
+ @abstractmethod
48
+ def details(self, collection_id: str, group_id: str) -> CollectionDetails:
49
+ """
50
+ Get details of a specific collection.
51
+
52
+ Args:
53
+ collection_id (str): The ID of the collection to retrieve.
54
+ group_id (str): The ID of the group to which the collection belongs.
55
+
56
+ Returns:
57
+ CollectionDetails: The details of the specified collection.
58
+
59
+ Raises:
60
+ ValueError: If the specified collection is not found.
61
+ """
62
+ pass
63
+
64
+ @staticmethod
65
+ def _get_collections_by_group(
66
+ collections: List[GroupCollections], group_id: str
67
+ ) -> List[Collection]:
68
+ for group in collections:
69
+ if group.group == group_id:
70
+ return group.collections
71
+ raise ValueError("No collections found for group ID")
72
+
73
+
74
+ class SyncCollectionsModule(SyncModule, AbstractCollectionsModule):
75
+ def __init__(self, parent, client):
76
+ SyncModule.__init__(self, parent, client)
77
+ AbstractCollectionsModule.__init__(self, parent, client)
78
+
79
+ def list(self) -> List[GroupCollections]:
80
+ return endpoints.collections(self._client)
81
+
82
+ def get_by_group(self, group_id: str) -> List[Collection]:
83
+ collections = self.list()
84
+ return self._get_collections_by_group(collections, group_id)
85
+
86
+ def details(self, collection_id: str, group_id: str) -> CollectionDetails:
87
+ points = endpoints.collection(self._client, collection_id, group_id)
88
+ if points is None:
89
+ raise ValueError("No collection found")
90
+ return CollectionDetails(
91
+ name=collection_id,
92
+ group=group_id,
93
+ points=points,
94
+ )
95
+
96
+
97
+ class AsyncCollectionsModule(AsyncModule, AbstractCollectionsModule):
98
+ def __init__(self, parent, client):
99
+ AsyncModule.__init__(self, parent, client)
100
+ AbstractCollectionsModule.__init__(self, parent, client)
101
+
102
+ async def list(self) -> List[GroupCollections]:
103
+ return await async_endpoints.collections(self._client)
104
+
105
+ async def get_by_group(self, group_id: str) -> List[Collection]:
106
+ collections = await self.list()
107
+ return self._get_collections_by_group(collections, group_id)
108
+
109
+ async def details(self, collection_id: str, group_id: str) -> CollectionDetails:
110
+ points = await async_endpoints.collection(self._client, collection_id, group_id)
111
+ if points is None:
112
+ raise ValueError("No collection found")
113
+ return CollectionDetails(
114
+ name=collection_id,
115
+ group=group_id,
116
+ points=points,
117
+ )
@@ -0,0 +1,117 @@
1
+ from typing import List
2
+ from abc import ABC
3
+
4
+ from ..models.document import GroupDocument, Collection, CollectionDetails
5
+
6
+ from abc import abstractmethod
7
+ from ..abstract.modules import SyncModule, AsyncModule
8
+ from ..api import endpoints, async_endpoints
9
+
10
+
11
+ class AbstractDocumentModule(ABC):
12
+ def __init__(self, parent, client):
13
+ self._parent = parent
14
+ self._client = client
15
+
16
+ @abstractmethod
17
+ def list(self) -> List[GroupDocument]:
18
+ """
19
+ Get a list of available collections. Document are grouped by your belonging groups.
20
+
21
+ Returns:
22
+ List[GroupDocument]: A list of available collections.
23
+
24
+ Raises:
25
+ ValueError: If no collections are found.
26
+ Exception: If there is an error fetching the collections.
27
+ """
28
+ pass
29
+
30
+ @abstractmethod
31
+ def get_by_group(self, group_id: str) -> List[Collection]:
32
+ """
33
+ Get collections by group ID.
34
+
35
+ Args:
36
+ group_id (str): The ID of the group for which to retrieve collections.
37
+
38
+ Returns:
39
+ List[Collection]: A list of collections for the specified group.
40
+
41
+ Raises:
42
+ ValueError: If no collections are found for the specified group ID.
43
+ Exception: If there is an error fetching the collections.
44
+ """
45
+ pass
46
+
47
+ @abstractmethod
48
+ def details(self, collection_id: str, group_id: str) -> CollectionDetails:
49
+ """
50
+ Get details of a specific collection.
51
+
52
+ Args:
53
+ collection_id (str): The ID of the collection to retrieve.
54
+ group_id (str): The ID of the group to which the collection belongs.
55
+
56
+ Returns:
57
+ CollectionDetails: The details of the specified collection.
58
+
59
+ Raises:
60
+ ValueError: If the specified collection is not found.
61
+ """
62
+ pass
63
+
64
+ @staticmethod
65
+ def _get_collections_by_group(
66
+ collections: List[GroupDocument], group_id: str
67
+ ) -> List[Collection]:
68
+ for group in collections:
69
+ if group.group == group_id:
70
+ return group.collections
71
+ raise ValueError("No collections found for group ID")
72
+
73
+
74
+ class SyncDocumentModule(SyncModule, AbstractDocumentModule):
75
+ def __init__(self, parent, client):
76
+ SyncModule.__init__(self, parent, client)
77
+ AbstractDocumentModule.__init__(self, parent, client)
78
+
79
+ def list(self) -> List[GroupDocument]:
80
+ return endpoints.collections(self._client)
81
+
82
+ def get_by_group(self, group_id: str) -> List[Collection]:
83
+ collections = self.list()
84
+ return self._get_collections_by_group(collections, group_id)
85
+
86
+ def details(self, collection_id: str, group_id: str) -> CollectionDetails:
87
+ points = endpoints.collection(self._client, collection_id, group_id)
88
+ if points is None:
89
+ raise ValueError("No collection found")
90
+ return CollectionDetails(
91
+ name=collection_id,
92
+ group=group_id,
93
+ points=points,
94
+ )
95
+
96
+
97
+ class AsyncDocumentModule(AsyncModule, AbstractDocumentModule):
98
+ def __init__(self, parent, client):
99
+ AsyncModule.__init__(self, parent, client)
100
+ AbstractDocumentModule.__init__(self, parent, client)
101
+
102
+ async def list(self) -> List[GroupDocument]:
103
+ return await async_endpoints.collections(self._client)
104
+
105
+ async def get_by_group(self, group_id: str) -> List[Collection]:
106
+ collections = await self.list()
107
+ return self._get_collections_by_group(collections, group_id)
108
+
109
+ async def details(self, collection_id: str, group_id: str) -> CollectionDetails:
110
+ points = await async_endpoints.collection(self._client, collection_id, group_id)
111
+ if points is None:
112
+ raise ValueError("No collection found")
113
+ return CollectionDetails(
114
+ name=collection_id,
115
+ group=group_id,
116
+ points=points,
117
+ )