unique_toolkit 0.5.5__py3-none-any.whl → 0.5.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/_common/_base_service.py +10 -0
- unique_toolkit/app/performance/async_tasks.py +70 -0
- unique_toolkit/app/performance/async_wrapper.py +19 -8
- unique_toolkit/chat/service.py +106 -103
- unique_toolkit/chat/state.py +0 -10
- unique_toolkit/content/service.py +89 -77
- unique_toolkit/embedding/service.py +31 -26
- unique_toolkit/language_model/service.py +96 -96
- {unique_toolkit-0.5.5.dist-info → unique_toolkit-0.5.7.dist-info}/METADATA +12 -4
- {unique_toolkit-0.5.5.dist-info → unique_toolkit-0.5.7.dist-info}/RECORD +12 -11
- unique_toolkit/app/performance/async_executor.py +0 -186
- {unique_toolkit-0.5.5.dist-info → unique_toolkit-0.5.7.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.5.5.dist-info → unique_toolkit-0.5.7.dist-info}/WHEEL +0 -0
@@ -7,7 +7,7 @@ from typing import Optional, cast
|
|
7
7
|
import requests
|
8
8
|
import unique_sdk
|
9
9
|
|
10
|
-
from unique_toolkit.
|
10
|
+
from unique_toolkit._common._base_service import BaseService
|
11
11
|
from unique_toolkit.chat.state import ChatState
|
12
12
|
from unique_toolkit.content.schemas import (
|
13
13
|
Content,
|
@@ -18,21 +18,29 @@ from unique_toolkit.content.schemas import (
|
|
18
18
|
)
|
19
19
|
|
20
20
|
|
21
|
-
class ContentService:
|
22
|
-
|
21
|
+
class ContentService(BaseService):
|
22
|
+
"""
|
23
|
+
Provides methods for searching, downloading and uploading content in the knowledge base.
|
24
|
+
|
25
|
+
Attributes:
|
26
|
+
state (ChatState): The chat state.
|
27
|
+
logger (Optional[logging.Logger]): The logger. Defaults to None.
|
28
|
+
"""
|
23
29
|
|
24
30
|
def __init__(self, state: ChatState, logger: Optional[logging.Logger] = None):
|
25
|
-
|
26
|
-
|
31
|
+
super().__init__(state, logger)
|
32
|
+
|
33
|
+
DEFAULT_SEARCH_LANGUAGE = "english"
|
27
34
|
|
28
35
|
def search_content_chunks(
|
29
36
|
self,
|
30
37
|
search_string: str,
|
31
38
|
search_type: ContentSearchType,
|
32
39
|
limit: int,
|
33
|
-
reranker_config: Optional[RerankerConfig] = None,
|
34
40
|
search_language: str = DEFAULT_SEARCH_LANGUAGE,
|
41
|
+
reranker_config: Optional[RerankerConfig] = None,
|
35
42
|
scope_ids: Optional[list[str]] = None,
|
43
|
+
chat_only: Optional[bool] = None,
|
36
44
|
) -> list[ContentChunk]:
|
37
45
|
"""
|
38
46
|
Performs a synchronous search for content chunks in the knowledge base.
|
@@ -41,32 +49,50 @@ class ContentService:
|
|
41
49
|
search_string (str): The search string.
|
42
50
|
search_type (ContentSearchType): The type of search to perform.
|
43
51
|
limit (int): The maximum number of results to return.
|
44
|
-
reranker_config (Optional[RerankerConfig]): The reranker configuration. Defaults to None.
|
45
52
|
search_language (str): The language for the full-text search. Defaults to "english".
|
53
|
+
reranker_config (Optional[RerankerConfig]): The reranker configuration. Defaults to None.
|
46
54
|
scope_ids (Optional[list[str]]): The scope IDs. Defaults to None.
|
55
|
+
chat_only (Optional[bool]): Whether to search only in the current chat. Defaults to None.
|
47
56
|
|
48
57
|
Returns:
|
49
58
|
list[ContentChunk]: The search results.
|
50
59
|
"""
|
51
|
-
|
52
|
-
|
53
|
-
search_type=search_type,
|
54
|
-
limit=limit,
|
55
|
-
reranker_config=reranker_config,
|
56
|
-
search_language=search_language,
|
57
|
-
scope_ids=scope_ids,
|
58
|
-
)
|
60
|
+
if not scope_ids:
|
61
|
+
self.logger.warning("No scope IDs provided for search.")
|
59
62
|
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
+
try:
|
64
|
+
searches = unique_sdk.Search.create(
|
65
|
+
user_id=self.state.user_id,
|
66
|
+
company_id=self.state.company_id,
|
67
|
+
chatId=self.state.chat_id,
|
68
|
+
searchString=search_string,
|
69
|
+
searchType=search_type.name,
|
70
|
+
scopeIds=scope_ids,
|
71
|
+
limit=limit,
|
72
|
+
reranker=reranker_config.model_dump() if reranker_config else None,
|
73
|
+
language=search_language,
|
74
|
+
chatOnly=chat_only,
|
75
|
+
)
|
76
|
+
except Exception as e:
|
77
|
+
self.logger.error(f"Error while searching content chunks: {e}")
|
78
|
+
raise e
|
79
|
+
|
80
|
+
def map_to_content_chunks(searches: list[unique_sdk.Search]):
|
81
|
+
return [ContentChunk(**search) for search in searches]
|
82
|
+
|
83
|
+
# TODO change return type in sdk from Search to list[Search]
|
84
|
+
searches = cast(list[unique_sdk.Search], searches)
|
85
|
+
return map_to_content_chunks(searches)
|
86
|
+
|
87
|
+
async def search_content_chunks_async(
|
63
88
|
self,
|
64
89
|
search_string: str,
|
65
90
|
search_type: ContentSearchType,
|
66
91
|
limit: int,
|
67
|
-
reranker_config: Optional[RerankerConfig] = None,
|
68
92
|
search_language: str = DEFAULT_SEARCH_LANGUAGE,
|
93
|
+
reranker_config: Optional[RerankerConfig] = None,
|
69
94
|
scope_ids: Optional[list[str]] = None,
|
95
|
+
chat_only: Optional[bool] = None,
|
70
96
|
):
|
71
97
|
"""
|
72
98
|
Performs an asynchronous search for content chunks in the knowledge base.
|
@@ -75,38 +101,19 @@ class ContentService:
|
|
75
101
|
search_string (str): The search string.
|
76
102
|
search_type (ContentSearchType): The type of search to perform.
|
77
103
|
limit (int): The maximum number of results to return.
|
78
|
-
reranker_config (Optional[RerankerConfig]): The reranker configuration. Defaults to None.
|
79
104
|
search_language (str): The language for the full-text search. Defaults to "english".
|
80
|
-
|
105
|
+
reranker_config (Optional[RerankerConfig]): The reranker configuration. Defaults to None.
|
106
|
+
scope_ids (Optional[list[str]]): The scope IDs. Defaults to None.
|
107
|
+
chat_only (Optional[bool]): Whether to search only in the current chat. Defaults to None.
|
81
108
|
|
82
109
|
Returns:
|
83
110
|
list[ContentChunk]: The search results.
|
84
111
|
"""
|
85
|
-
return self._trigger_search_content_chunks(
|
86
|
-
search_string=search_string,
|
87
|
-
search_type=search_type,
|
88
|
-
limit=limit,
|
89
|
-
reranker_config=reranker_config,
|
90
|
-
search_language=search_language,
|
91
|
-
scope_ids=scope_ids,
|
92
|
-
)
|
93
|
-
|
94
|
-
def _trigger_search_content_chunks(
|
95
|
-
self,
|
96
|
-
search_string: str,
|
97
|
-
search_type: ContentSearchType,
|
98
|
-
limit: int,
|
99
|
-
reranker_config: Optional[RerankerConfig] = None,
|
100
|
-
search_language: str = DEFAULT_SEARCH_LANGUAGE,
|
101
|
-
scope_ids: Optional[list[str]] = None,
|
102
|
-
) -> list[ContentChunk]:
|
103
|
-
scope_ids = scope_ids or self.state.scope_ids or []
|
104
|
-
|
105
112
|
if not scope_ids:
|
106
113
|
self.logger.warning("No scope IDs provided for search.")
|
107
114
|
|
108
115
|
try:
|
109
|
-
searches = unique_sdk.Search.
|
116
|
+
searches = await unique_sdk.Search.create_async(
|
110
117
|
user_id=self.state.user_id,
|
111
118
|
company_id=self.state.company_id,
|
112
119
|
chatId=self.state.chat_id,
|
@@ -114,9 +121,9 @@ class ContentService:
|
|
114
121
|
searchType=search_type.name,
|
115
122
|
scopeIds=scope_ids,
|
116
123
|
limit=limit,
|
117
|
-
reranker=reranker_config,
|
124
|
+
reranker=reranker_config.model_dump() if reranker_config else None,
|
118
125
|
language=search_language,
|
119
|
-
chatOnly=
|
126
|
+
chatOnly=chat_only,
|
120
127
|
)
|
121
128
|
except Exception as e:
|
122
129
|
self.logger.error(f"Error while searching content chunks: {e}")
|
@@ -143,11 +150,21 @@ class ContentService:
|
|
143
150
|
Returns:
|
144
151
|
list[Content]: The search results.
|
145
152
|
"""
|
146
|
-
|
153
|
+
try:
|
154
|
+
contents = unique_sdk.Content.search(
|
155
|
+
user_id=self.state.user_id,
|
156
|
+
company_id=self.state.company_id,
|
157
|
+
chatId=self.state.chat_id,
|
158
|
+
# TODO add type parameter
|
159
|
+
where=where, # type: ignore
|
160
|
+
)
|
161
|
+
except Exception as e:
|
162
|
+
self.logger.error(f"Error while searching contents: {e}")
|
163
|
+
raise e
|
147
164
|
|
148
|
-
|
149
|
-
|
150
|
-
def
|
165
|
+
return self._map_contents(contents)
|
166
|
+
|
167
|
+
async def search_contents_async(
|
151
168
|
self,
|
152
169
|
where: dict,
|
153
170
|
) -> list[Content]:
|
@@ -160,35 +177,8 @@ class ContentService:
|
|
160
177
|
Returns:
|
161
178
|
list[Content]: The search results.
|
162
179
|
"""
|
163
|
-
return self._trigger_search_contents(where)
|
164
|
-
|
165
|
-
def _trigger_search_contents(
|
166
|
-
self,
|
167
|
-
where: dict,
|
168
|
-
) -> list[Content]:
|
169
|
-
def map_content_chunk(content_chunk):
|
170
|
-
return ContentChunk(
|
171
|
-
id=content_chunk["id"],
|
172
|
-
text=content_chunk["text"],
|
173
|
-
start_page=content_chunk["startPage"],
|
174
|
-
end_page=content_chunk["endPage"],
|
175
|
-
order=content_chunk["order"],
|
176
|
-
)
|
177
|
-
|
178
|
-
def map_content(content):
|
179
|
-
return Content(
|
180
|
-
id=content["id"],
|
181
|
-
key=content["key"],
|
182
|
-
title=content["title"],
|
183
|
-
url=content["url"],
|
184
|
-
chunks=[map_content_chunk(chunk) for chunk in content["chunks"]],
|
185
|
-
)
|
186
|
-
|
187
|
-
def map_contents(contents):
|
188
|
-
return [map_content(content) for content in contents]
|
189
|
-
|
190
180
|
try:
|
191
|
-
contents = unique_sdk.Content.
|
181
|
+
contents = await unique_sdk.Content.search_async(
|
192
182
|
user_id=self.state.user_id,
|
193
183
|
company_id=self.state.company_id,
|
194
184
|
chatId=self.state.chat_id,
|
@@ -199,7 +189,29 @@ class ContentService:
|
|
199
189
|
self.logger.error(f"Error while searching contents: {e}")
|
200
190
|
raise e
|
201
191
|
|
202
|
-
return
|
192
|
+
return self._map_contents(contents)
|
193
|
+
|
194
|
+
@staticmethod
|
195
|
+
def _map_content_chunk(content_chunk: dict):
|
196
|
+
return ContentChunk(
|
197
|
+
id=content_chunk["id"],
|
198
|
+
text=content_chunk["text"],
|
199
|
+
start_page=content_chunk["startPage"],
|
200
|
+
end_page=content_chunk["endPage"],
|
201
|
+
order=content_chunk["order"],
|
202
|
+
)
|
203
|
+
|
204
|
+
def _map_content(self, content: dict):
|
205
|
+
return Content(
|
206
|
+
id=content["id"],
|
207
|
+
key=content["key"],
|
208
|
+
title=content["title"],
|
209
|
+
url=content["url"],
|
210
|
+
chunks=[self._map_content_chunk(chunk) for chunk in content["chunks"]],
|
211
|
+
)
|
212
|
+
|
213
|
+
def _map_contents(self, contents):
|
214
|
+
return [self._map_content(content) for content in contents]
|
203
215
|
|
204
216
|
def upload_content(
|
205
217
|
self,
|
@@ -4,22 +4,29 @@ from typing import Optional
|
|
4
4
|
import numpy as np
|
5
5
|
import unique_sdk
|
6
6
|
|
7
|
-
from unique_toolkit.
|
7
|
+
from unique_toolkit._common._base_service import BaseService
|
8
8
|
from unique_toolkit.chat.state import ChatState
|
9
9
|
from unique_toolkit.embedding.schemas import Embeddings
|
10
10
|
|
11
11
|
|
12
|
-
class EmbeddingService:
|
12
|
+
class EmbeddingService(BaseService):
|
13
|
+
"""
|
14
|
+
Provides methods to interact with the Embedding service.
|
15
|
+
|
16
|
+
Attributes:
|
17
|
+
state (ChatState): The ChatState object.
|
18
|
+
logger (Optional[logging.Logger]): The logger object. Defaults t∏o None.
|
19
|
+
"""
|
20
|
+
|
13
21
|
def __init__(self, state: ChatState, logger: Optional[logging.Logger] = None):
|
14
|
-
|
15
|
-
self.logger = logger or logging.getLogger(__name__)
|
22
|
+
super().__init__(state, logger)
|
16
23
|
|
17
|
-
|
24
|
+
DEFAULT_TIMEOUT = 600_000
|
18
25
|
|
19
26
|
def embed_texts(
|
20
27
|
self,
|
21
28
|
texts: list[str],
|
22
|
-
timeout: int =
|
29
|
+
timeout: int = DEFAULT_TIMEOUT,
|
23
30
|
) -> Embeddings:
|
24
31
|
"""
|
25
32
|
Embed text.
|
@@ -34,17 +41,18 @@ class EmbeddingService:
|
|
34
41
|
Raises:
|
35
42
|
Exception: If an error occurs.
|
36
43
|
"""
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
44
|
+
request = self._get_request_obj(texts=texts, timeout=timeout)
|
45
|
+
try:
|
46
|
+
response = unique_sdk.Embeddings.create(**request)
|
47
|
+
return Embeddings(**response)
|
48
|
+
except Exception as e:
|
49
|
+
self.logger.error(f"Error embedding texts: {e}")
|
50
|
+
raise e
|
41
51
|
|
42
|
-
|
43
|
-
@async_warning
|
44
|
-
def async_embed_texts(
|
52
|
+
async def embed_texts_async(
|
45
53
|
self,
|
46
54
|
texts: list[str],
|
47
|
-
timeout: int =
|
55
|
+
timeout: int = DEFAULT_TIMEOUT,
|
48
56
|
) -> Embeddings:
|
49
57
|
"""
|
50
58
|
Embed text asynchronously.
|
@@ -59,24 +67,21 @@ class EmbeddingService:
|
|
59
67
|
Raises:
|
60
68
|
Exception: If an error occurs.
|
61
69
|
"""
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
70
|
+
request = self._get_request_obj(texts=texts, timeout=timeout)
|
71
|
+
try:
|
72
|
+
response = await unique_sdk.Embeddings.create_async(**request)
|
73
|
+
return Embeddings(**response)
|
74
|
+
except Exception as e:
|
75
|
+
self.logger.error(f"Error embedding texts: {e}")
|
76
|
+
raise e
|
66
77
|
|
67
|
-
def
|
68
|
-
|
78
|
+
def _get_request_obj(self, texts: list[str], timeout: int) -> dict:
|
79
|
+
return {
|
69
80
|
"user_id": self.state.user_id,
|
70
81
|
"company_id": self.state.company_id,
|
71
82
|
"texts": texts,
|
72
83
|
"timeout": timeout,
|
73
84
|
}
|
74
|
-
try:
|
75
|
-
response = unique_sdk.Embeddings.create(**request)
|
76
|
-
return Embeddings(**response)
|
77
|
-
except Exception as e:
|
78
|
-
self.logger.error(f"Error embedding texts: {e}")
|
79
|
-
raise e
|
80
85
|
|
81
86
|
def get_cosine_similarity(
|
82
87
|
self,
|
@@ -3,7 +3,7 @@ from typing import Optional, cast
|
|
3
3
|
|
4
4
|
import unique_sdk
|
5
5
|
|
6
|
-
from unique_toolkit.
|
6
|
+
from unique_toolkit._common._base_service import BaseService
|
7
7
|
from unique_toolkit.chat.state import ChatState
|
8
8
|
from unique_toolkit.content.schemas import ContentChunk
|
9
9
|
from unique_toolkit.language_model.infos import LanguageModelName
|
@@ -15,20 +15,27 @@ from unique_toolkit.language_model.schemas import (
|
|
15
15
|
)
|
16
16
|
|
17
17
|
|
18
|
-
class LanguageModelService:
|
18
|
+
class LanguageModelService(BaseService):
|
19
|
+
"""
|
20
|
+
Provides methods to interact with the Language Model by generating responses.
|
21
|
+
|
22
|
+
Attributes:
|
23
|
+
state (ChatState): The ChatState object.
|
24
|
+
logger (Optional[logging.Logger]): The logger object. Defaults to None.
|
25
|
+
"""
|
26
|
+
|
19
27
|
def __init__(self, state: ChatState, logger: Optional[logging.Logger] = None):
|
20
|
-
|
21
|
-
self.logger = logger or logging.getLogger(__name__)
|
28
|
+
super().__init__(state, logger)
|
22
29
|
|
23
|
-
|
24
|
-
|
30
|
+
DEFAULT_COMPLETE_TIMEOUT = 240_000
|
31
|
+
DEFAULT_COMPLETE_TEMPERATURE = 0.0
|
25
32
|
|
26
33
|
def complete(
|
27
34
|
self,
|
28
35
|
messages: LanguageModelMessages,
|
29
36
|
model_name: LanguageModelName,
|
30
|
-
temperature: float =
|
31
|
-
timeout: int =
|
37
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
38
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
32
39
|
tools: Optional[list[LanguageModelTool]] = None,
|
33
40
|
):
|
34
41
|
"""
|
@@ -44,22 +51,32 @@ class LanguageModelService:
|
|
44
51
|
Returns:
|
45
52
|
LanguageModelResponse: The LanguageModelResponse object.
|
46
53
|
"""
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
+
options = self._add_tools_to_options({}, tools)
|
55
|
+
messages = messages.model_dump(exclude_none=True)
|
56
|
+
try:
|
57
|
+
response = unique_sdk.ChatCompletion.create(
|
58
|
+
company_id=self.state.company_id,
|
59
|
+
# TODO change or extend types in unique_sdk
|
60
|
+
model=model_name.name, # type: ignore
|
61
|
+
messages=cast(
|
62
|
+
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
63
|
+
messages,
|
64
|
+
),
|
65
|
+
timeout=timeout,
|
66
|
+
temperature=temperature,
|
67
|
+
options=options, # type: ignore
|
68
|
+
)
|
69
|
+
return LanguageModelResponse(**response)
|
70
|
+
except Exception as e:
|
71
|
+
self.logger.error(f"Error completing: {e}")
|
72
|
+
raise e
|
54
73
|
|
55
|
-
|
56
|
-
@async_warning
|
57
|
-
def async_complete(
|
74
|
+
async def complete_async(
|
58
75
|
self,
|
59
76
|
messages: LanguageModelMessages,
|
60
77
|
model_name: LanguageModelName,
|
61
|
-
temperature: float =
|
62
|
-
timeout: int =
|
78
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
79
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
63
80
|
tools: Optional[list[LanguageModelTool]] = None,
|
64
81
|
):
|
65
82
|
"""
|
@@ -75,26 +92,10 @@ class LanguageModelService:
|
|
75
92
|
Returns:
|
76
93
|
str: The completed message content.
|
77
94
|
"""
|
78
|
-
return self._trigger_complete(
|
79
|
-
messages=messages,
|
80
|
-
model_name=model_name,
|
81
|
-
temperature=temperature,
|
82
|
-
timeout=timeout,
|
83
|
-
tools=tools,
|
84
|
-
)
|
85
|
-
|
86
|
-
def _trigger_complete(
|
87
|
-
self,
|
88
|
-
messages: LanguageModelMessages,
|
89
|
-
model_name: LanguageModelName,
|
90
|
-
temperature: float,
|
91
|
-
timeout: int,
|
92
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
93
|
-
) -> LanguageModelResponse:
|
94
95
|
options = self._add_tools_to_options({}, tools)
|
95
|
-
messages = messages.model_dump(exclude_none=True)
|
96
|
+
messages = messages.model_dump(exclude_none=True, exclude={"tool_calls"})
|
96
97
|
try:
|
97
|
-
response = unique_sdk.ChatCompletion.
|
98
|
+
response = await unique_sdk.ChatCompletion.create_async(
|
98
99
|
company_id=self.state.company_id,
|
99
100
|
# TODO change or extend types in unique_sdk
|
100
101
|
model=model_name.name, # type: ignore
|
@@ -106,20 +107,19 @@ class LanguageModelService:
|
|
106
107
|
temperature=temperature,
|
107
108
|
options=options, # type: ignore
|
108
109
|
)
|
110
|
+
return LanguageModelResponse(**response)
|
109
111
|
except Exception as e:
|
110
112
|
self.logger.error(f"Error completing: {e}")
|
111
113
|
raise e
|
112
114
|
|
113
|
-
return LanguageModelResponse(**response)
|
114
|
-
|
115
115
|
def stream_complete(
|
116
116
|
self,
|
117
117
|
messages: LanguageModelMessages,
|
118
118
|
model_name: LanguageModelName,
|
119
119
|
content_chunks: list[ContentChunk] = [],
|
120
120
|
debug_info: dict = {},
|
121
|
-
temperature: float =
|
122
|
-
timeout: int =
|
121
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
122
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
123
123
|
tools: Optional[list[LanguageModelTool]] = None,
|
124
124
|
start_text: Optional[str] = None,
|
125
125
|
):
|
@@ -139,27 +139,44 @@ class LanguageModelService:
|
|
139
139
|
Returns:
|
140
140
|
The LanguageModelStreamResponse object once the stream has finished.
|
141
141
|
"""
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
142
|
+
options = self._add_tools_to_options({}, tools)
|
143
|
+
search_context = self._to_search_context(content_chunks)
|
144
|
+
messages = messages.model_dump(exclude_none=True)
|
145
|
+
|
146
|
+
try:
|
147
|
+
response = unique_sdk.Integrated.chat_stream_completion(
|
148
|
+
user_id=self.state.user_id,
|
149
|
+
company_id=self.state.company_id,
|
150
|
+
assistantMessageId=self.state.assistant_message_id, # type: ignore
|
151
|
+
userMessageId=self.state.user_message_id, # type: ignore
|
152
|
+
messages=cast(
|
153
|
+
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
154
|
+
messages,
|
155
|
+
),
|
156
|
+
chatId=self.state.chat_id,
|
157
|
+
searchContext=search_context,
|
158
|
+
# TODO change or extend types in unique_sdk
|
159
|
+
model=model_name.name, # type: ignore
|
160
|
+
timeout=timeout,
|
161
|
+
temperature=temperature,
|
162
|
+
assistantId=self.state.assistant_id,
|
163
|
+
debugInfo=debug_info,
|
164
|
+
options=options, # type: ignore
|
165
|
+
startText=start_text,
|
166
|
+
)
|
167
|
+
return LanguageModelStreamResponse(**response)
|
168
|
+
except Exception as e:
|
169
|
+
self.logger.error(f"Error streaming completion: {e}")
|
170
|
+
raise e
|
152
171
|
|
153
|
-
|
154
|
-
@async_warning
|
155
|
-
def async_stream_complete(
|
172
|
+
async def stream_complete_async(
|
156
173
|
self,
|
157
174
|
messages: LanguageModelMessages,
|
158
175
|
model_name: LanguageModelName,
|
159
176
|
content_chunks: list[ContentChunk] = [],
|
160
177
|
debug_info: dict = {},
|
161
|
-
temperature: float =
|
162
|
-
timeout: int =
|
178
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
179
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
163
180
|
tools: Optional[list[LanguageModelTool]] = None,
|
164
181
|
start_text: Optional[str] = None,
|
165
182
|
):
|
@@ -179,48 +196,13 @@ class LanguageModelService:
|
|
179
196
|
Returns:
|
180
197
|
The LanguageModelStreamResponse object once the stream has finished.
|
181
198
|
"""
|
182
|
-
return self._trigger_stream_complete(
|
183
|
-
messages=messages,
|
184
|
-
content_chunks=content_chunks,
|
185
|
-
model_name=model_name,
|
186
|
-
debug_info=debug_info,
|
187
|
-
timeout=timeout,
|
188
|
-
temperature=temperature,
|
189
|
-
tools=tools,
|
190
|
-
start_text=start_text,
|
191
|
-
)
|
192
199
|
|
193
|
-
def _trigger_stream_complete(
|
194
|
-
self,
|
195
|
-
messages: LanguageModelMessages,
|
196
|
-
model_name: LanguageModelName,
|
197
|
-
content_chunks: list[ContentChunk],
|
198
|
-
debug_info: dict,
|
199
|
-
timeout: int,
|
200
|
-
temperature: float,
|
201
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
202
|
-
start_text: Optional[str] = None,
|
203
|
-
) -> LanguageModelStreamResponse:
|
204
200
|
options = self._add_tools_to_options({}, tools)
|
205
|
-
search_context =
|
206
|
-
|
207
|
-
id=chunk.id,
|
208
|
-
chunkId=chunk.chunk_id,
|
209
|
-
key=chunk.key,
|
210
|
-
title=chunk.title,
|
211
|
-
url=chunk.url,
|
212
|
-
startPage=chunk.start_page,
|
213
|
-
endPage=chunk.end_page,
|
214
|
-
order=chunk.order,
|
215
|
-
object=chunk.object,
|
216
|
-
) # type: ignore
|
217
|
-
for chunk in content_chunks
|
218
|
-
]
|
219
|
-
|
220
|
-
messages = messages.model_dump(exclude_none=True)
|
201
|
+
search_context = self._to_search_context(content_chunks)
|
202
|
+
messages = messages.model_dump(exclude_none=True, exclude=["tool_calls"])
|
221
203
|
|
222
204
|
try:
|
223
|
-
response = unique_sdk.Integrated.
|
205
|
+
response = await unique_sdk.Integrated.chat_stream_completion_async(
|
224
206
|
user_id=self.state.user_id,
|
225
207
|
company_id=self.state.company_id,
|
226
208
|
assistantMessageId=self.state.assistant_message_id, # type: ignore
|
@@ -240,11 +222,29 @@ class LanguageModelService:
|
|
240
222
|
options=options, # type: ignore
|
241
223
|
startText=start_text,
|
242
224
|
)
|
225
|
+
return LanguageModelStreamResponse(**response)
|
243
226
|
except Exception as e:
|
244
227
|
self.logger.error(f"Error streaming completion: {e}")
|
245
228
|
raise e
|
246
229
|
|
247
|
-
|
230
|
+
@staticmethod
|
231
|
+
def _to_search_context(chunks: list[ContentChunk]) -> dict | None:
|
232
|
+
if not chunks:
|
233
|
+
return None
|
234
|
+
return [
|
235
|
+
unique_sdk.Integrated.SearchResult(
|
236
|
+
id=chunk.id,
|
237
|
+
chunkId=chunk.chunk_id,
|
238
|
+
key=chunk.key,
|
239
|
+
title=chunk.title,
|
240
|
+
url=chunk.url,
|
241
|
+
startPage=chunk.start_page,
|
242
|
+
endPage=chunk.end_page,
|
243
|
+
order=chunk.order,
|
244
|
+
object=chunk.object,
|
245
|
+
) # type: ignore
|
246
|
+
for chunk in chunks
|
247
|
+
]
|
248
248
|
|
249
249
|
@staticmethod
|
250
250
|
def _add_tools_to_options(
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: unique_toolkit
|
3
|
-
Version: 0.5.
|
3
|
+
Version: 0.5.7
|
4
4
|
Summary:
|
5
5
|
License: MIT
|
6
6
|
Author: Martin Fadler
|
@@ -15,10 +15,9 @@ Requires-Dist: pydantic (>=2.8.2,<3.0.0)
|
|
15
15
|
Requires-Dist: pyhumps (>=3.8.0,<4.0.0)
|
16
16
|
Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
17
17
|
Requires-Dist: regex (>=2024.5.15,<2025.0.0)
|
18
|
-
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
19
18
|
Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
|
20
19
|
Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
|
21
|
-
Requires-Dist: unique-sdk (>=0.9,<0.10)
|
20
|
+
Requires-Dist: unique-sdk (>=0.9.4,<0.10.0)
|
22
21
|
Description-Content-Type: text/markdown
|
23
22
|
|
24
23
|
# Unique Toolkit
|
@@ -102,9 +101,18 @@ All notable changes to this project will be documented in this file.
|
|
102
101
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
103
102
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
104
103
|
|
104
|
+
## [0.5.7] - 2024-07-31
|
105
|
+
- Replace mocked async service calls with async calls in `unique_sdk`
|
106
|
+
- Change async methods name from `async_*` to `*_async`
|
107
|
+
- Remove `chat_only` and `scope_ids` attributes from `ChatState` class
|
108
|
+
- Replace `AsyncExecutor` by simpler utility function `run_async_tasks_parallel`
|
109
|
+
|
110
|
+
## [0.5.6] - 2024-07-30
|
111
|
+
- Bug fix: `ContentService.search_content_chunks` and it's `async` equivalent now accept `None` as a valid parameter value for `scope_ids`.
|
112
|
+
|
105
113
|
## [0.5.5] - 2024-07-30
|
106
114
|
- Added parameters to `ContentService.search_content_chunks` and `ContentService.async_search_content_chunks`
|
107
|
-
- `reranker_config` to optinally
|
115
|
+
- `reranker_config` to optinally rerank the search results
|
108
116
|
- `search_language` to specify a language for full-text-search
|
109
117
|
|
110
118
|
## [0.5.4] - 2024-07-26
|