unique_toolkit 0.5.6__py3-none-any.whl → 0.5.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/_common/_base_service.py +10 -0
- unique_toolkit/app/performance/async_tasks.py +70 -0
- unique_toolkit/app/performance/async_wrapper.py +19 -8
- unique_toolkit/chat/service.py +106 -103
- unique_toolkit/chat/state.py +0 -10
- unique_toolkit/content/schemas.py +2 -2
- unique_toolkit/content/service.py +93 -75
- unique_toolkit/embedding/service.py +31 -26
- unique_toolkit/language_model/service.py +95 -95
- {unique_toolkit-0.5.6.dist-info → unique_toolkit-0.5.8.dist-info}/METADATA +11 -3
- {unique_toolkit-0.5.6.dist-info → unique_toolkit-0.5.8.dist-info}/RECORD +13 -12
- unique_toolkit/app/performance/async_executor.py +0 -186
- {unique_toolkit-0.5.6.dist-info → unique_toolkit-0.5.8.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.5.6.dist-info → unique_toolkit-0.5.8.dist-info}/WHEEL +0 -0
@@ -7,7 +7,7 @@ from typing import Optional, cast
|
|
7
7
|
import requests
|
8
8
|
import unique_sdk
|
9
9
|
|
10
|
-
from unique_toolkit.
|
10
|
+
from unique_toolkit._common._base_service import BaseService
|
11
11
|
from unique_toolkit.chat.state import ChatState
|
12
12
|
from unique_toolkit.content.schemas import (
|
13
13
|
Content,
|
@@ -18,21 +18,29 @@ from unique_toolkit.content.schemas import (
|
|
18
18
|
)
|
19
19
|
|
20
20
|
|
21
|
-
class ContentService:
|
22
|
-
|
21
|
+
class ContentService(BaseService):
|
22
|
+
"""
|
23
|
+
Provides methods for searching, downloading and uploading content in the knowledge base.
|
24
|
+
|
25
|
+
Attributes:
|
26
|
+
state (ChatState): The chat state.
|
27
|
+
logger (Optional[logging.Logger]): The logger. Defaults to None.
|
28
|
+
"""
|
23
29
|
|
24
30
|
def __init__(self, state: ChatState, logger: Optional[logging.Logger] = None):
|
25
|
-
|
26
|
-
|
31
|
+
super().__init__(state, logger)
|
32
|
+
|
33
|
+
DEFAULT_SEARCH_LANGUAGE = "english"
|
27
34
|
|
28
35
|
def search_content_chunks(
|
29
36
|
self,
|
30
37
|
search_string: str,
|
31
38
|
search_type: ContentSearchType,
|
32
39
|
limit: int,
|
33
|
-
reranker_config: Optional[RerankerConfig] = None,
|
34
40
|
search_language: str = DEFAULT_SEARCH_LANGUAGE,
|
41
|
+
reranker_config: Optional[RerankerConfig] = None,
|
35
42
|
scope_ids: Optional[list[str]] = None,
|
43
|
+
chat_only: Optional[bool] = None,
|
36
44
|
) -> list[ContentChunk]:
|
37
45
|
"""
|
38
46
|
Performs a synchronous search for content chunks in the knowledge base.
|
@@ -41,32 +49,52 @@ class ContentService:
|
|
41
49
|
search_string (str): The search string.
|
42
50
|
search_type (ContentSearchType): The type of search to perform.
|
43
51
|
limit (int): The maximum number of results to return.
|
44
|
-
reranker_config (Optional[RerankerConfig]): The reranker configuration. Defaults to None.
|
45
52
|
search_language (str): The language for the full-text search. Defaults to "english".
|
53
|
+
reranker_config (Optional[RerankerConfig]): The reranker configuration. Defaults to None.
|
46
54
|
scope_ids (Optional[list[str]]): The scope IDs. Defaults to None.
|
55
|
+
chat_only (Optional[bool]): Whether to search only in the current chat. Defaults to None.
|
47
56
|
|
48
57
|
Returns:
|
49
58
|
list[ContentChunk]: The search results.
|
50
59
|
"""
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
60
|
+
if not scope_ids:
|
61
|
+
self.logger.warning("No scope IDs provided for search.")
|
62
|
+
|
63
|
+
try:
|
64
|
+
searches = unique_sdk.Search.create(
|
65
|
+
user_id=self.state.user_id,
|
66
|
+
company_id=self.state.company_id,
|
67
|
+
chatId=self.state.chat_id,
|
68
|
+
searchString=search_string,
|
69
|
+
searchType=search_type.name,
|
70
|
+
scopeIds=scope_ids,
|
71
|
+
limit=limit,
|
72
|
+
reranker=reranker_config.model_dump(by_alias=True)
|
73
|
+
if reranker_config
|
74
|
+
else None,
|
75
|
+
language=search_language,
|
76
|
+
chatOnly=chat_only,
|
77
|
+
)
|
78
|
+
except Exception as e:
|
79
|
+
self.logger.error(f"Error while searching content chunks: {e}")
|
80
|
+
raise e
|
81
|
+
|
82
|
+
def map_to_content_chunks(searches: list[unique_sdk.Search]):
|
83
|
+
return [ContentChunk(**search) for search in searches]
|
59
84
|
|
60
|
-
|
61
|
-
|
62
|
-
|
85
|
+
# TODO change return type in sdk from Search to list[Search]
|
86
|
+
searches = cast(list[unique_sdk.Search], searches)
|
87
|
+
return map_to_content_chunks(searches)
|
88
|
+
|
89
|
+
async def search_content_chunks_async(
|
63
90
|
self,
|
64
91
|
search_string: str,
|
65
92
|
search_type: ContentSearchType,
|
66
93
|
limit: int,
|
67
|
-
reranker_config: Optional[RerankerConfig] = None,
|
68
94
|
search_language: str = DEFAULT_SEARCH_LANGUAGE,
|
95
|
+
reranker_config: Optional[RerankerConfig] = None,
|
69
96
|
scope_ids: Optional[list[str]] = None,
|
97
|
+
chat_only: Optional[bool] = None,
|
70
98
|
):
|
71
99
|
"""
|
72
100
|
Performs an asynchronous search for content chunks in the knowledge base.
|
@@ -75,36 +103,19 @@ class ContentService:
|
|
75
103
|
search_string (str): The search string.
|
76
104
|
search_type (ContentSearchType): The type of search to perform.
|
77
105
|
limit (int): The maximum number of results to return.
|
78
|
-
reranker_config (Optional[RerankerConfig]): The reranker configuration. Defaults to None.
|
79
106
|
search_language (str): The language for the full-text search. Defaults to "english".
|
80
|
-
|
107
|
+
reranker_config (Optional[RerankerConfig]): The reranker configuration. Defaults to None.
|
108
|
+
scope_ids (Optional[list[str]]): The scope IDs. Defaults to None.
|
109
|
+
chat_only (Optional[bool]): Whether to search only in the current chat. Defaults to None.
|
81
110
|
|
82
111
|
Returns:
|
83
112
|
list[ContentChunk]: The search results.
|
84
113
|
"""
|
85
|
-
return self._trigger_search_content_chunks(
|
86
|
-
search_string=search_string,
|
87
|
-
search_type=search_type,
|
88
|
-
limit=limit,
|
89
|
-
reranker_config=reranker_config,
|
90
|
-
search_language=search_language,
|
91
|
-
scope_ids=scope_ids,
|
92
|
-
)
|
93
|
-
|
94
|
-
def _trigger_search_content_chunks(
|
95
|
-
self,
|
96
|
-
search_string: str,
|
97
|
-
search_type: ContentSearchType,
|
98
|
-
limit: int,
|
99
|
-
reranker_config: Optional[RerankerConfig] = None,
|
100
|
-
search_language: str = DEFAULT_SEARCH_LANGUAGE,
|
101
|
-
scope_ids: Optional[list[str]] = None,
|
102
|
-
) -> list[ContentChunk]:
|
103
114
|
if not scope_ids:
|
104
115
|
self.logger.warning("No scope IDs provided for search.")
|
105
116
|
|
106
117
|
try:
|
107
|
-
searches = unique_sdk.Search.
|
118
|
+
searches = await unique_sdk.Search.create_async(
|
108
119
|
user_id=self.state.user_id,
|
109
120
|
company_id=self.state.company_id,
|
110
121
|
chatId=self.state.chat_id,
|
@@ -112,9 +123,11 @@ class ContentService:
|
|
112
123
|
searchType=search_type.name,
|
113
124
|
scopeIds=scope_ids,
|
114
125
|
limit=limit,
|
115
|
-
reranker=reranker_config.model_dump()
|
126
|
+
reranker=reranker_config.model_dump(by_alias=True)
|
127
|
+
if reranker_config
|
128
|
+
else None,
|
116
129
|
language=search_language,
|
117
|
-
chatOnly=
|
130
|
+
chatOnly=chat_only,
|
118
131
|
)
|
119
132
|
except Exception as e:
|
120
133
|
self.logger.error(f"Error while searching content chunks: {e}")
|
@@ -141,11 +154,21 @@ class ContentService:
|
|
141
154
|
Returns:
|
142
155
|
list[Content]: The search results.
|
143
156
|
"""
|
144
|
-
|
157
|
+
try:
|
158
|
+
contents = unique_sdk.Content.search(
|
159
|
+
user_id=self.state.user_id,
|
160
|
+
company_id=self.state.company_id,
|
161
|
+
chatId=self.state.chat_id,
|
162
|
+
# TODO add type parameter
|
163
|
+
where=where, # type: ignore
|
164
|
+
)
|
165
|
+
except Exception as e:
|
166
|
+
self.logger.error(f"Error while searching contents: {e}")
|
167
|
+
raise e
|
145
168
|
|
146
|
-
|
147
|
-
|
148
|
-
def
|
169
|
+
return self._map_contents(contents)
|
170
|
+
|
171
|
+
async def search_contents_async(
|
149
172
|
self,
|
150
173
|
where: dict,
|
151
174
|
) -> list[Content]:
|
@@ -158,35 +181,8 @@ class ContentService:
|
|
158
181
|
Returns:
|
159
182
|
list[Content]: The search results.
|
160
183
|
"""
|
161
|
-
return self._trigger_search_contents(where)
|
162
|
-
|
163
|
-
def _trigger_search_contents(
|
164
|
-
self,
|
165
|
-
where: dict,
|
166
|
-
) -> list[Content]:
|
167
|
-
def map_content_chunk(content_chunk):
|
168
|
-
return ContentChunk(
|
169
|
-
id=content_chunk["id"],
|
170
|
-
text=content_chunk["text"],
|
171
|
-
start_page=content_chunk["startPage"],
|
172
|
-
end_page=content_chunk["endPage"],
|
173
|
-
order=content_chunk["order"],
|
174
|
-
)
|
175
|
-
|
176
|
-
def map_content(content):
|
177
|
-
return Content(
|
178
|
-
id=content["id"],
|
179
|
-
key=content["key"],
|
180
|
-
title=content["title"],
|
181
|
-
url=content["url"],
|
182
|
-
chunks=[map_content_chunk(chunk) for chunk in content["chunks"]],
|
183
|
-
)
|
184
|
-
|
185
|
-
def map_contents(contents):
|
186
|
-
return [map_content(content) for content in contents]
|
187
|
-
|
188
184
|
try:
|
189
|
-
contents = unique_sdk.Content.
|
185
|
+
contents = await unique_sdk.Content.search_async(
|
190
186
|
user_id=self.state.user_id,
|
191
187
|
company_id=self.state.company_id,
|
192
188
|
chatId=self.state.chat_id,
|
@@ -197,7 +193,29 @@ class ContentService:
|
|
197
193
|
self.logger.error(f"Error while searching contents: {e}")
|
198
194
|
raise e
|
199
195
|
|
200
|
-
return
|
196
|
+
return self._map_contents(contents)
|
197
|
+
|
198
|
+
@staticmethod
|
199
|
+
def _map_content_chunk(content_chunk: dict):
|
200
|
+
return ContentChunk(
|
201
|
+
id=content_chunk["id"],
|
202
|
+
text=content_chunk["text"],
|
203
|
+
start_page=content_chunk["startPage"],
|
204
|
+
end_page=content_chunk["endPage"],
|
205
|
+
order=content_chunk["order"],
|
206
|
+
)
|
207
|
+
|
208
|
+
def _map_content(self, content: dict):
|
209
|
+
return Content(
|
210
|
+
id=content["id"],
|
211
|
+
key=content["key"],
|
212
|
+
title=content["title"],
|
213
|
+
url=content["url"],
|
214
|
+
chunks=[self._map_content_chunk(chunk) for chunk in content["chunks"]],
|
215
|
+
)
|
216
|
+
|
217
|
+
def _map_contents(self, contents):
|
218
|
+
return [self._map_content(content) for content in contents]
|
201
219
|
|
202
220
|
def upload_content(
|
203
221
|
self,
|
@@ -4,22 +4,29 @@ from typing import Optional
|
|
4
4
|
import numpy as np
|
5
5
|
import unique_sdk
|
6
6
|
|
7
|
-
from unique_toolkit.
|
7
|
+
from unique_toolkit._common._base_service import BaseService
|
8
8
|
from unique_toolkit.chat.state import ChatState
|
9
9
|
from unique_toolkit.embedding.schemas import Embeddings
|
10
10
|
|
11
11
|
|
12
|
-
class EmbeddingService:
|
12
|
+
class EmbeddingService(BaseService):
|
13
|
+
"""
|
14
|
+
Provides methods to interact with the Embedding service.
|
15
|
+
|
16
|
+
Attributes:
|
17
|
+
state (ChatState): The ChatState object.
|
18
|
+
logger (Optional[logging.Logger]): The logger object. Defaults t∏o None.
|
19
|
+
"""
|
20
|
+
|
13
21
|
def __init__(self, state: ChatState, logger: Optional[logging.Logger] = None):
|
14
|
-
|
15
|
-
self.logger = logger or logging.getLogger(__name__)
|
22
|
+
super().__init__(state, logger)
|
16
23
|
|
17
|
-
|
24
|
+
DEFAULT_TIMEOUT = 600_000
|
18
25
|
|
19
26
|
def embed_texts(
|
20
27
|
self,
|
21
28
|
texts: list[str],
|
22
|
-
timeout: int =
|
29
|
+
timeout: int = DEFAULT_TIMEOUT,
|
23
30
|
) -> Embeddings:
|
24
31
|
"""
|
25
32
|
Embed text.
|
@@ -34,17 +41,18 @@ class EmbeddingService:
|
|
34
41
|
Raises:
|
35
42
|
Exception: If an error occurs.
|
36
43
|
"""
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
44
|
+
request = self._get_request_obj(texts=texts, timeout=timeout)
|
45
|
+
try:
|
46
|
+
response = unique_sdk.Embeddings.create(**request)
|
47
|
+
return Embeddings(**response)
|
48
|
+
except Exception as e:
|
49
|
+
self.logger.error(f"Error embedding texts: {e}")
|
50
|
+
raise e
|
41
51
|
|
42
|
-
|
43
|
-
@async_warning
|
44
|
-
def async_embed_texts(
|
52
|
+
async def embed_texts_async(
|
45
53
|
self,
|
46
54
|
texts: list[str],
|
47
|
-
timeout: int =
|
55
|
+
timeout: int = DEFAULT_TIMEOUT,
|
48
56
|
) -> Embeddings:
|
49
57
|
"""
|
50
58
|
Embed text asynchronously.
|
@@ -59,24 +67,21 @@ class EmbeddingService:
|
|
59
67
|
Raises:
|
60
68
|
Exception: If an error occurs.
|
61
69
|
"""
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
70
|
+
request = self._get_request_obj(texts=texts, timeout=timeout)
|
71
|
+
try:
|
72
|
+
response = await unique_sdk.Embeddings.create_async(**request)
|
73
|
+
return Embeddings(**response)
|
74
|
+
except Exception as e:
|
75
|
+
self.logger.error(f"Error embedding texts: {e}")
|
76
|
+
raise e
|
66
77
|
|
67
|
-
def
|
68
|
-
|
78
|
+
def _get_request_obj(self, texts: list[str], timeout: int) -> dict:
|
79
|
+
return {
|
69
80
|
"user_id": self.state.user_id,
|
70
81
|
"company_id": self.state.company_id,
|
71
82
|
"texts": texts,
|
72
83
|
"timeout": timeout,
|
73
84
|
}
|
74
|
-
try:
|
75
|
-
response = unique_sdk.Embeddings.create(**request)
|
76
|
-
return Embeddings(**response)
|
77
|
-
except Exception as e:
|
78
|
-
self.logger.error(f"Error embedding texts: {e}")
|
79
|
-
raise e
|
80
85
|
|
81
86
|
def get_cosine_similarity(
|
82
87
|
self,
|
@@ -3,7 +3,7 @@ from typing import Optional, cast
|
|
3
3
|
|
4
4
|
import unique_sdk
|
5
5
|
|
6
|
-
from unique_toolkit.
|
6
|
+
from unique_toolkit._common._base_service import BaseService
|
7
7
|
from unique_toolkit.chat.state import ChatState
|
8
8
|
from unique_toolkit.content.schemas import ContentChunk
|
9
9
|
from unique_toolkit.language_model.infos import LanguageModelName
|
@@ -15,20 +15,27 @@ from unique_toolkit.language_model.schemas import (
|
|
15
15
|
)
|
16
16
|
|
17
17
|
|
18
|
-
class LanguageModelService:
|
18
|
+
class LanguageModelService(BaseService):
|
19
|
+
"""
|
20
|
+
Provides methods to interact with the Language Model by generating responses.
|
21
|
+
|
22
|
+
Attributes:
|
23
|
+
state (ChatState): The ChatState object.
|
24
|
+
logger (Optional[logging.Logger]): The logger object. Defaults to None.
|
25
|
+
"""
|
26
|
+
|
19
27
|
def __init__(self, state: ChatState, logger: Optional[logging.Logger] = None):
|
20
|
-
|
21
|
-
self.logger = logger or logging.getLogger(__name__)
|
28
|
+
super().__init__(state, logger)
|
22
29
|
|
23
|
-
|
24
|
-
|
30
|
+
DEFAULT_COMPLETE_TIMEOUT = 240_000
|
31
|
+
DEFAULT_COMPLETE_TEMPERATURE = 0.0
|
25
32
|
|
26
33
|
def complete(
|
27
34
|
self,
|
28
35
|
messages: LanguageModelMessages,
|
29
36
|
model_name: LanguageModelName,
|
30
|
-
temperature: float =
|
31
|
-
timeout: int =
|
37
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
38
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
32
39
|
tools: Optional[list[LanguageModelTool]] = None,
|
33
40
|
):
|
34
41
|
"""
|
@@ -44,22 +51,32 @@ class LanguageModelService:
|
|
44
51
|
Returns:
|
45
52
|
LanguageModelResponse: The LanguageModelResponse object.
|
46
53
|
"""
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
+
options = self._add_tools_to_options({}, tools)
|
55
|
+
messages = messages.model_dump(exclude_none=True)
|
56
|
+
try:
|
57
|
+
response = unique_sdk.ChatCompletion.create(
|
58
|
+
company_id=self.state.company_id,
|
59
|
+
# TODO change or extend types in unique_sdk
|
60
|
+
model=model_name.name, # type: ignore
|
61
|
+
messages=cast(
|
62
|
+
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
63
|
+
messages,
|
64
|
+
),
|
65
|
+
timeout=timeout,
|
66
|
+
temperature=temperature,
|
67
|
+
options=options, # type: ignore
|
68
|
+
)
|
69
|
+
return LanguageModelResponse(**response)
|
70
|
+
except Exception as e:
|
71
|
+
self.logger.error(f"Error completing: {e}")
|
72
|
+
raise e
|
54
73
|
|
55
|
-
|
56
|
-
@async_warning
|
57
|
-
def async_complete(
|
74
|
+
async def complete_async(
|
58
75
|
self,
|
59
76
|
messages: LanguageModelMessages,
|
60
77
|
model_name: LanguageModelName,
|
61
|
-
temperature: float =
|
62
|
-
timeout: int =
|
78
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
79
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
63
80
|
tools: Optional[list[LanguageModelTool]] = None,
|
64
81
|
):
|
65
82
|
"""
|
@@ -75,26 +92,10 @@ class LanguageModelService:
|
|
75
92
|
Returns:
|
76
93
|
str: The completed message content.
|
77
94
|
"""
|
78
|
-
return self._trigger_complete(
|
79
|
-
messages=messages,
|
80
|
-
model_name=model_name,
|
81
|
-
temperature=temperature,
|
82
|
-
timeout=timeout,
|
83
|
-
tools=tools,
|
84
|
-
)
|
85
|
-
|
86
|
-
def _trigger_complete(
|
87
|
-
self,
|
88
|
-
messages: LanguageModelMessages,
|
89
|
-
model_name: LanguageModelName,
|
90
|
-
temperature: float,
|
91
|
-
timeout: int,
|
92
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
93
|
-
) -> LanguageModelResponse:
|
94
95
|
options = self._add_tools_to_options({}, tools)
|
95
96
|
messages = messages.model_dump(exclude_none=True, exclude={"tool_calls"})
|
96
97
|
try:
|
97
|
-
response = unique_sdk.ChatCompletion.
|
98
|
+
response = await unique_sdk.ChatCompletion.create_async(
|
98
99
|
company_id=self.state.company_id,
|
99
100
|
# TODO change or extend types in unique_sdk
|
100
101
|
model=model_name.name, # type: ignore
|
@@ -106,20 +107,19 @@ class LanguageModelService:
|
|
106
107
|
temperature=temperature,
|
107
108
|
options=options, # type: ignore
|
108
109
|
)
|
110
|
+
return LanguageModelResponse(**response)
|
109
111
|
except Exception as e:
|
110
112
|
self.logger.error(f"Error completing: {e}")
|
111
113
|
raise e
|
112
114
|
|
113
|
-
return LanguageModelResponse(**response)
|
114
|
-
|
115
115
|
def stream_complete(
|
116
116
|
self,
|
117
117
|
messages: LanguageModelMessages,
|
118
118
|
model_name: LanguageModelName,
|
119
119
|
content_chunks: list[ContentChunk] = [],
|
120
120
|
debug_info: dict = {},
|
121
|
-
temperature: float =
|
122
|
-
timeout: int =
|
121
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
122
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
123
123
|
tools: Optional[list[LanguageModelTool]] = None,
|
124
124
|
start_text: Optional[str] = None,
|
125
125
|
):
|
@@ -139,27 +139,44 @@ class LanguageModelService:
|
|
139
139
|
Returns:
|
140
140
|
The LanguageModelStreamResponse object once the stream has finished.
|
141
141
|
"""
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
model_name=model_name,
|
146
|
-
debug_info=debug_info,
|
147
|
-
timeout=timeout,
|
148
|
-
temperature=temperature,
|
149
|
-
tools=tools,
|
150
|
-
start_text=start_text,
|
151
|
-
)
|
142
|
+
options = self._add_tools_to_options({}, tools)
|
143
|
+
search_context = self._to_search_context(content_chunks)
|
144
|
+
messages = messages.model_dump(exclude_none=True)
|
152
145
|
|
153
|
-
|
154
|
-
|
155
|
-
|
146
|
+
try:
|
147
|
+
response = unique_sdk.Integrated.chat_stream_completion(
|
148
|
+
user_id=self.state.user_id,
|
149
|
+
company_id=self.state.company_id,
|
150
|
+
assistantMessageId=self.state.assistant_message_id, # type: ignore
|
151
|
+
userMessageId=self.state.user_message_id, # type: ignore
|
152
|
+
messages=cast(
|
153
|
+
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
154
|
+
messages,
|
155
|
+
),
|
156
|
+
chatId=self.state.chat_id,
|
157
|
+
searchContext=search_context,
|
158
|
+
# TODO change or extend types in unique_sdk
|
159
|
+
model=model_name.name, # type: ignore
|
160
|
+
timeout=timeout,
|
161
|
+
temperature=temperature,
|
162
|
+
assistantId=self.state.assistant_id,
|
163
|
+
debugInfo=debug_info,
|
164
|
+
options=options, # type: ignore
|
165
|
+
startText=start_text,
|
166
|
+
)
|
167
|
+
return LanguageModelStreamResponse(**response)
|
168
|
+
except Exception as e:
|
169
|
+
self.logger.error(f"Error streaming completion: {e}")
|
170
|
+
raise e
|
171
|
+
|
172
|
+
async def stream_complete_async(
|
156
173
|
self,
|
157
174
|
messages: LanguageModelMessages,
|
158
175
|
model_name: LanguageModelName,
|
159
176
|
content_chunks: list[ContentChunk] = [],
|
160
177
|
debug_info: dict = {},
|
161
|
-
temperature: float =
|
162
|
-
timeout: int =
|
178
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
179
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
163
180
|
tools: Optional[list[LanguageModelTool]] = None,
|
164
181
|
start_text: Optional[str] = None,
|
165
182
|
):
|
@@ -179,48 +196,13 @@ class LanguageModelService:
|
|
179
196
|
Returns:
|
180
197
|
The LanguageModelStreamResponse object once the stream has finished.
|
181
198
|
"""
|
182
|
-
return self._trigger_stream_complete(
|
183
|
-
messages=messages,
|
184
|
-
content_chunks=content_chunks,
|
185
|
-
model_name=model_name,
|
186
|
-
debug_info=debug_info,
|
187
|
-
timeout=timeout,
|
188
|
-
temperature=temperature,
|
189
|
-
tools=tools,
|
190
|
-
start_text=start_text,
|
191
|
-
)
|
192
199
|
|
193
|
-
def _trigger_stream_complete(
|
194
|
-
self,
|
195
|
-
messages: LanguageModelMessages,
|
196
|
-
model_name: LanguageModelName,
|
197
|
-
content_chunks: list[ContentChunk],
|
198
|
-
debug_info: dict,
|
199
|
-
timeout: int,
|
200
|
-
temperature: float,
|
201
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
202
|
-
start_text: Optional[str] = None,
|
203
|
-
) -> LanguageModelStreamResponse:
|
204
200
|
options = self._add_tools_to_options({}, tools)
|
205
|
-
search_context =
|
206
|
-
|
207
|
-
id=chunk.id,
|
208
|
-
chunkId=chunk.chunk_id,
|
209
|
-
key=chunk.key,
|
210
|
-
title=chunk.title,
|
211
|
-
url=chunk.url,
|
212
|
-
startPage=chunk.start_page,
|
213
|
-
endPage=chunk.end_page,
|
214
|
-
order=chunk.order,
|
215
|
-
object=chunk.object,
|
216
|
-
) # type: ignore
|
217
|
-
for chunk in content_chunks
|
218
|
-
]
|
219
|
-
|
220
|
-
messages = messages.model_dump(exclude_none=True, exclude={"tool_calls"})
|
201
|
+
search_context = self._to_search_context(content_chunks)
|
202
|
+
messages = messages.model_dump(exclude_none=True, exclude=["tool_calls"])
|
221
203
|
|
222
204
|
try:
|
223
|
-
response = unique_sdk.Integrated.
|
205
|
+
response = await unique_sdk.Integrated.chat_stream_completion_async(
|
224
206
|
user_id=self.state.user_id,
|
225
207
|
company_id=self.state.company_id,
|
226
208
|
assistantMessageId=self.state.assistant_message_id, # type: ignore
|
@@ -240,11 +222,29 @@ class LanguageModelService:
|
|
240
222
|
options=options, # type: ignore
|
241
223
|
startText=start_text,
|
242
224
|
)
|
225
|
+
return LanguageModelStreamResponse(**response)
|
243
226
|
except Exception as e:
|
244
227
|
self.logger.error(f"Error streaming completion: {e}")
|
245
228
|
raise e
|
246
229
|
|
247
|
-
|
230
|
+
@staticmethod
|
231
|
+
def _to_search_context(chunks: list[ContentChunk]) -> dict | None:
|
232
|
+
if not chunks:
|
233
|
+
return None
|
234
|
+
return [
|
235
|
+
unique_sdk.Integrated.SearchResult(
|
236
|
+
id=chunk.id,
|
237
|
+
chunkId=chunk.chunk_id,
|
238
|
+
key=chunk.key,
|
239
|
+
title=chunk.title,
|
240
|
+
url=chunk.url,
|
241
|
+
startPage=chunk.start_page,
|
242
|
+
endPage=chunk.end_page,
|
243
|
+
order=chunk.order,
|
244
|
+
object=chunk.object,
|
245
|
+
) # type: ignore
|
246
|
+
for chunk in chunks
|
247
|
+
]
|
248
248
|
|
249
249
|
@staticmethod
|
250
250
|
def _add_tools_to_options(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: unique_toolkit
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.8
|
4
4
|
Summary:
|
5
5
|
License: MIT
|
6
6
|
Author: Martin Fadler
|
@@ -15,10 +15,9 @@ Requires-Dist: pydantic (>=2.8.2,<3.0.0)
|
|
15
15
|
Requires-Dist: pyhumps (>=3.8.0,<4.0.0)
|
16
16
|
Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
17
17
|
Requires-Dist: regex (>=2024.5.15,<2025.0.0)
|
18
|
-
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
19
18
|
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
20
19
|
Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
|
21
|
-
Requires-Dist: unique-sdk (>=0.9.
|
20
|
+
Requires-Dist: unique-sdk (>=0.9.4,<0.10.0)
|
22
21
|
Description-Content-Type: text/markdown
|
23
22
|
|
24
23
|
# Unique Toolkit
|
@@ -102,6 +101,15 @@ All notable changes to this project will be documented in this file.
|
|
102
101
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
103
102
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
104
103
|
|
104
|
+
## [0.5.8] - 2024-08-1
|
105
|
+
- `RerankerConfig` serialization alias added
|
106
|
+
|
107
|
+
## [0.5.7] - 2024-07-31
|
108
|
+
- Replace mocked async service calls with async calls in `unique_sdk`
|
109
|
+
- Change async methods name from `async_*` to `*_async`
|
110
|
+
- Remove `chat_only` and `scope_ids` attributes from `ChatState` class
|
111
|
+
- Replace `AsyncExecutor` by simpler utility function `run_async_tasks_parallel`
|
112
|
+
|
105
113
|
## [0.5.6] - 2024-07-30
|
106
114
|
- Bug fix: `ContentService.search_content_chunks` and it's `async` equivalent now accept `None` as a valid parameter value for `scope_ids`.
|
107
115
|
|