unique_toolkit 0.5.16__py3-none-any.whl → 0.5.18__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -51,6 +51,9 @@ class EventPayload(BaseModel):
51
51
  assistant_message: EventAssistantMessage
52
52
  text: Optional[str] = None
53
53
  additional_parameters: Optional[EventAdditionalParameters] = None
54
+ user_metadata: Optional[dict[str, Any]] = None
55
+ tool_parameters: Optional[dict[str, Any]] = None
56
+ metadata_filter: Optional[dict[str, Any]] = None
54
57
 
55
58
 
56
59
  class Event(BaseModel):
@@ -28,6 +28,106 @@ class ChatService(BaseService):
28
28
  DEFAULT_PERCENT_OF_MAX_TOKENS = 0.15
29
29
  DEFAULT_MAX_MESSAGES = 4
30
30
 
31
+ async def update_debug_info_async(self, debug_info: dict):
32
+ """
33
+ Updates the debug information for the chat session.
34
+
35
+ Args:
36
+ debug_info (dict): The new debug information.
37
+ """
38
+ params = self._construct_message_params(assistant=False, debug_info=debug_info)
39
+ try:
40
+ await unique_sdk.Message.modify_async(**params)
41
+ except Exception as e:
42
+ self.logger.error(f"Failed to update debug info: {e}")
43
+ raise e
44
+
45
+ def update_debug_info(self, debug_info: dict):
46
+ """
47
+ Updates the debug information for the chat session.
48
+
49
+ Args:
50
+ debug_info (dict): The new debug information.
51
+ """
52
+ params = self._construct_message_params(assistant=False, debug_info=debug_info)
53
+ try:
54
+ unique_sdk.Message.modify(**params)
55
+ except Exception as e:
56
+ self.logger.error(f"Failed to update debug info: {e}")
57
+ raise e
58
+
59
+ def modify_user_message(
60
+ self,
61
+ content: str,
62
+ references: Optional[list[ContentReference]] = None,
63
+ debug_info: Optional[dict] = None,
64
+ message_id: Optional[str] = None,
65
+ ) -> ChatMessage:
66
+ """
67
+ Modifies a message in the chat session synchronously.
68
+
69
+ Args:
70
+ content (str): The new content for the message.
71
+ references (list[ContentReference]): list of ContentReference objects. Defaults to [].
72
+ debug_info (dict[str, Any]]]): Debug information. Defaults to {}.
73
+ message_id (str, optional): The message ID. Defaults to None, then the ChatState user message id is used.
74
+
75
+ Returns:
76
+ ChatMessage: The modified message.
77
+
78
+ Raises:
79
+ Exception: If the modification fails.
80
+ """
81
+ try:
82
+ params = self._construct_message_params(
83
+ assistant=False,
84
+ content=content,
85
+ references=references,
86
+ debug_info=debug_info,
87
+ message_id=message_id,
88
+ )
89
+ message = unique_sdk.Message.modify(**params)
90
+ except Exception as e:
91
+ self.logger.error(f"Failed to modify user message: {e}")
92
+ raise e
93
+ return ChatMessage(**message)
94
+
95
+ async def modify_user_message_async(
96
+ self,
97
+ content: str,
98
+ references: list[ContentReference] = [],
99
+ debug_info: dict = {},
100
+ message_id: Optional[str] = None,
101
+ ) -> ChatMessage:
102
+ """
103
+ Modifies a message in the chat session asynchronously.
104
+
105
+ Args:
106
+ content (str): The new content for the message.
107
+ message_id (str, optional): The message ID. Defaults to None, then the ChatState user message id is used.
108
+ references (list[ContentReference]): list of ContentReference objects. Defaults to None.
109
+ debug_info (Optional[dict[str, Any]]], optional): Debug information. Defaults to None.
110
+
111
+ Returns:
112
+ ChatMessage: The modified message.
113
+
114
+ Raises:
115
+ Exception: If the modification fails.
116
+ """
117
+ try:
118
+ params = self._construct_message_params(
119
+ assistant=False,
120
+ content=content,
121
+ references=references,
122
+ debug_info=debug_info,
123
+ message_id=message_id,
124
+ )
125
+ message = await unique_sdk.Message.modify_async(**params)
126
+ except Exception as e:
127
+ self.logger.error(f"Failed to modify user message: {e}")
128
+ raise e
129
+ return ChatMessage(**message)
130
+
31
131
  def modify_assistant_message(
32
132
  self,
33
133
  content: str,
@@ -50,19 +150,15 @@ class ChatService(BaseService):
50
150
  Raises:
51
151
  Exception: If the modification fails.
52
152
  """
53
- message_id = message_id or self.event.payload.assistant_message.id
54
-
55
153
  try:
56
- message = unique_sdk.Message.modify(
57
- user_id=self.event.user_id,
58
- company_id=self.event.company_id,
59
- id=message_id,
60
- chatId=self.event.payload.chat_id,
61
- text=content,
62
- references=self._map_references(references),
63
- debugInfo=debug_info or {},
64
- completedAt=_time_utils.get_datetime_now(), # type: ignore
154
+ params = self._construct_message_params(
155
+ assistant=True,
156
+ content=content,
157
+ references=references,
158
+ debug_info=debug_info,
159
+ message_id=message_id,
65
160
  )
161
+ message = unique_sdk.Message.modify(**params)
66
162
  except Exception as e:
67
163
  self.logger.error(f"Failed to modify assistant message: {e}")
68
164
  raise e
@@ -90,19 +186,15 @@ class ChatService(BaseService):
90
186
  Raises:
91
187
  Exception: If the modification fails.
92
188
  """
93
- message_id = message_id or self.event.payload.assistant_message.id
94
-
95
189
  try:
96
- message = await unique_sdk.Message.modify_async(
97
- user_id=self.event.user_id,
98
- company_id=self.event.company_id,
99
- id=message_id,
100
- chatId=self.event.payload.chat_id,
101
- text=content,
102
- references=self._map_references(references),
103
- debugInfo=debug_info or {},
104
- completedAt=_time_utils.get_datetime_now(), # type: ignore
190
+ params = self._construct_message_params(
191
+ assistant=True,
192
+ content=content,
193
+ references=references,
194
+ debug_info=debug_info,
195
+ message_id=message_id,
105
196
  )
197
+ message = await unique_sdk.Message.modify_async(**params)
106
198
  except Exception as e:
107
199
  self.logger.error(f"Failed to modify assistant message: {e}")
108
200
  raise e
@@ -384,3 +476,35 @@ class ChatService(BaseService):
384
476
 
385
477
  last_index = max(0, last_index)
386
478
  return messages[last_index:]
479
+
480
+ def _construct_message_params(
481
+ self,
482
+ assistant: bool = True,
483
+ content: Optional[str] = None,
484
+ references: Optional[list[ContentReference]] = None,
485
+ debug_info: Optional[dict] = None,
486
+ message_id: Optional[str] = None,
487
+ ):
488
+ if message_id:
489
+ # Message ID specified. No need to guess
490
+ message_id = message_id
491
+ elif assistant:
492
+ # Assistant message ID
493
+ message_id = self.event.payload.assistant_message.id
494
+ else:
495
+ # User message ID
496
+ message_id = self.event.payload.user_message.id
497
+ if content is None:
498
+ content = self.event.payload.user_message.text
499
+
500
+ params = {
501
+ "user_id": self.event.user_id,
502
+ "company_id": self.event.company_id,
503
+ "id": message_id,
504
+ "chatId": self.event.payload.chat_id,
505
+ "text": content,
506
+ "references": self._map_references(references) if references else [],
507
+ "debugInfo": debug_info,
508
+ "completedAt": _time_utils.get_datetime_now(),
509
+ }
510
+ return params
@@ -14,7 +14,12 @@ model_config = ConfigDict(
14
14
 
15
15
 
16
16
  class ContentMetadata(BaseModel):
17
- model_config = model_config
17
+ model_config = ConfigDict(
18
+ alias_generator=camelize,
19
+ populate_by_name=True,
20
+ arbitrary_types_allowed=True,
21
+ extra="allow",
22
+ )
18
23
  key: str
19
24
  mime_type: str
20
25
 
@@ -40,6 +40,7 @@ class ContentService(BaseService):
40
40
  reranker_config: Optional[ContentRerankerConfig] = None,
41
41
  scope_ids: Optional[list[str]] = None,
42
42
  chat_only: Optional[bool] = None,
43
+ metadata_filter: Optional[dict] = None,
43
44
  ) -> list[ContentChunk]:
44
45
  """
45
46
  Performs a synchronous search for content chunks in the knowledge base.
@@ -52,6 +53,7 @@ class ContentService(BaseService):
52
53
  reranker_config (Optional[ContentRerankerConfig]): The reranker configuration. Defaults to None.
53
54
  scope_ids (Optional[list[str]]): The scope IDs. Defaults to None.
54
55
  chat_only (Optional[bool]): Whether to search only in the current chat. Defaults to None.
56
+ metadata_filter (Optional[dict]): UniqueQL metadata filter. Defaults to None.
55
57
 
56
58
  Returns:
57
59
  list[ContentChunk]: The search results.
@@ -75,6 +77,7 @@ class ContentService(BaseService):
75
77
  ),
76
78
  language=search_language,
77
79
  chatOnly=chat_only,
80
+ metaDataFilter=metadata_filter,
78
81
  )
79
82
  except Exception as e:
80
83
  self.logger.error(f"Error while searching content chunks: {e}")
@@ -96,6 +99,7 @@ class ContentService(BaseService):
96
99
  reranker_config: Optional[ContentRerankerConfig] = None,
97
100
  scope_ids: Optional[list[str]] = None,
98
101
  chat_only: Optional[bool] = None,
102
+ metadata_filter: Optional[dict] = None,
99
103
  ):
100
104
  """
101
105
  Performs an asynchronous search for content chunks in the knowledge base.
@@ -108,6 +112,7 @@ class ContentService(BaseService):
108
112
  reranker_config (Optional[ContentRerankerConfig]): The reranker configuration. Defaults to None.
109
113
  scope_ids (Optional[list[str]]): The scope IDs. Defaults to None.
110
114
  chat_only (Optional[bool]): Whether to search only in the current chat. Defaults to None.
115
+ metadata_filter (Optional[dict]): UniqueQL metadata filter. Defaults to None.
111
116
 
112
117
  Returns:
113
118
  list[ContentChunk]: The search results.
@@ -131,6 +136,7 @@ class ContentService(BaseService):
131
136
  ),
132
137
  language=search_language,
133
138
  chatOnly=chat_only,
139
+ metaDataFilter=metadata_filter,
134
140
  )
135
141
  except Exception as e:
136
142
  self.logger.error(f"Error while searching content chunks: {e}")
@@ -22,17 +22,18 @@ class LanguageModelName(StrEnum):
22
22
 
23
23
  class LanguageModelProvider(StrEnum):
24
24
  AZURE = "AZURE"
25
+ CUSTOM = "CUSTOM"
25
26
 
26
27
 
27
28
  class LanguageModelInfo(BaseModel):
28
- name: LanguageModelName
29
+ name: LanguageModelName | str
29
30
  version: str
30
31
  provider: LanguageModelProvider
31
32
 
32
- token_limits: LanguageModelTokenLimits
33
+ token_limits: Optional[LanguageModelTokenLimits] = None
33
34
 
34
- info_cutoff_at: date
35
- published_at: date
35
+ info_cutoff_at: Optional[date] = None
36
+ published_at: Optional[date] = None
36
37
  retirement_at: Optional[date] = None
37
38
 
38
39
  deprecated_at: Optional[date] = None
@@ -42,7 +43,7 @@ class LanguageModelInfo(BaseModel):
42
43
  class LanguageModel:
43
44
  _info: ClassVar[LanguageModelInfo]
44
45
 
45
- def __init__(self, model_name: LanguageModelName):
46
+ def __init__(self, model_name: LanguageModelName | str):
46
47
  self._model_info = self.get_model_info(model_name)
47
48
 
48
49
  @property
@@ -62,9 +63,9 @@ class LanguageModel:
62
63
  return self._model_info
63
64
 
64
65
  @property
65
- def name(self) -> LanguageModelName:
66
+ def name(self) -> LanguageModelName | str:
66
67
  """
67
- Returns the LanguageModelName of the model.
68
+ Returns the LanguageModelName of the model or the name string when it is a custom / not defined model.
68
69
  """
69
70
  return self._model_info.name
70
71
 
@@ -73,10 +74,13 @@ class LanguageModel:
73
74
  """
74
75
  Returns the name of the model as a string.
75
76
  """
76
- return self._model_info.name.name
77
+ if isinstance(self._model_info.name, LanguageModelName):
78
+ return self._model_info.name.name
79
+ else:
80
+ return self._model_info.name
77
81
 
78
82
  @property
79
- def version(self) -> str:
83
+ def version(self) -> Optional[str]:
80
84
  """
81
85
  Returns the version of the model.
82
86
  """
@@ -87,31 +91,34 @@ class LanguageModel:
87
91
  """
88
92
  Returns the maximum number of tokens for the model.
89
93
  """
90
- return self._model_info.token_limits.token_limit
94
+ if self._model_info.token_limits:
95
+ return self._model_info.token_limits.token_limit
91
96
 
92
97
  @property
93
98
  def token_limit_input(self) -> Optional[int]:
94
99
  """
95
100
  Returns the maximum number of input tokens for the model.
96
101
  """
97
- return self._model_info.token_limits.token_limit_input
102
+ if self._model_info.token_limits:
103
+ return self._model_info.token_limits.token_limit_input
98
104
 
99
105
  @property
100
106
  def token_limit_output(self) -> Optional[int]:
101
107
  """
102
108
  Returns the maximum number of output tokens for the model.
103
109
  """
104
- return self._model_info.token_limits.token_limit_output
110
+ if self._model_info.token_limits:
111
+ return self._model_info.token_limits.token_limit_output
105
112
 
106
113
  @property
107
- def info_cutoff_at(self) -> date:
114
+ def info_cutoff_at(self) -> Optional[date]:
108
115
  """
109
116
  Returns the date the model was last updated.
110
117
  """
111
118
  return self._model_info.info_cutoff_at
112
119
 
113
120
  @property
114
- def published_at(self) -> date:
121
+ def published_at(self) -> Optional[date]:
115
122
  """
116
123
  Returns the date the model was published.
117
124
  """
@@ -146,7 +153,10 @@ class LanguageModel:
146
153
  return self._model_info.provider
147
154
 
148
155
  @classmethod
149
- def get_model_info(cls, model_name: LanguageModelName) -> LanguageModelInfo:
156
+ def get_model_info(cls, model_name: LanguageModelName | str) -> LanguageModelInfo:
157
+ if not model_name:
158
+ raise ValueError("Model name must be provided to get the model info.")
159
+
150
160
  for subclass in cls.__subclasses__():
151
161
  if hasattr(subclass, "info") and subclass._info.name == model_name:
152
162
  # TODO find alternative solution for warning
@@ -155,7 +165,12 @@ class LanguageModel:
155
165
  # print(warning_text)
156
166
  # warnings.warn(warning_text, DeprecationWarning, stacklevel=2)
157
167
  return subclass._info
158
- raise ValueError(f"Model {model_name} not found.")
168
+
169
+ return LanguageModelInfo(
170
+ name=model_name,
171
+ version="custom",
172
+ provider=LanguageModelProvider.CUSTOM,
173
+ )
159
174
 
160
175
  @classmethod
161
176
  def list_models(cls) -> list[LanguageModelInfo]:
@@ -33,7 +33,7 @@ class LanguageModelService(BaseService):
33
33
  def complete(
34
34
  self,
35
35
  messages: LanguageModelMessages,
36
- model_name: LanguageModelName,
36
+ model_name: LanguageModelName | str,
37
37
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
38
38
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
39
39
  tools: Optional[list[LanguageModelTool]] = None,
@@ -43,7 +43,7 @@ class LanguageModelService(BaseService):
43
43
 
44
44
  Args:
45
45
  messages (LanguageModelMessages): The LanguageModelMessages obj to complete.
46
- model_name (LanguageModelName): The model name.
46
+ model_name (LanguageModelName | str): The model name.
47
47
  temperature (float): The temperature value. Defaults to 0.
48
48
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
49
49
  tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
@@ -53,11 +53,15 @@ class LanguageModelService(BaseService):
53
53
  """
54
54
  options = self._add_tools_to_options({}, tools)
55
55
  messages = messages.model_dump(exclude_none=True)
56
+ model = (
57
+ model_name.name if isinstance(model_name, LanguageModelName) else model_name
58
+ )
59
+
56
60
  try:
57
61
  response = unique_sdk.ChatCompletion.create(
58
62
  company_id=self.event.company_id,
59
63
  # TODO change or extend types in unique_sdk
60
- model=model_name.name,
64
+ model=model,
61
65
  messages=cast(
62
66
  list[unique_sdk.Integrated.ChatCompletionRequestMessage],
63
67
  messages,
@@ -74,7 +78,7 @@ class LanguageModelService(BaseService):
74
78
  async def complete_async(
75
79
  self,
76
80
  messages: LanguageModelMessages,
77
- model_name: LanguageModelName,
81
+ model_name: LanguageModelName | str,
78
82
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
79
83
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
80
84
  tools: Optional[list[LanguageModelTool]] = None,
@@ -84,7 +88,7 @@ class LanguageModelService(BaseService):
84
88
 
85
89
  Args:
86
90
  messages (LanguageModelMessages): The messages to complete.
87
- model_name (LanguageModelName): The model name.
91
+ model_name (LanguageModelName | str): The model name.
88
92
  temperature (float): The temperature value. Defaults to 0.
89
93
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
90
94
  tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
@@ -94,11 +98,15 @@ class LanguageModelService(BaseService):
94
98
  """
95
99
  options = self._add_tools_to_options({}, tools)
96
100
  messages = messages.model_dump(exclude_none=True, exclude={"tool_calls"})
101
+ model = (
102
+ model_name.name if isinstance(model_name, LanguageModelName) else model_name
103
+ )
104
+
97
105
  try:
98
106
  response = await unique_sdk.ChatCompletion.create_async(
99
107
  company_id=self.event.company_id,
100
108
  # TODO change or extend types in unique_sdk
101
- model=model_name.name,
109
+ model=model,
102
110
  messages=cast(
103
111
  list[unique_sdk.Integrated.ChatCompletionRequestMessage],
104
112
  messages,
@@ -115,7 +123,7 @@ class LanguageModelService(BaseService):
115
123
  def stream_complete(
116
124
  self,
117
125
  messages: LanguageModelMessages,
118
- model_name: LanguageModelName,
126
+ model_name: LanguageModelName | str,
119
127
  content_chunks: list[ContentChunk] = [],
120
128
  debug_info: dict = {},
121
129
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
@@ -129,7 +137,7 @@ class LanguageModelService(BaseService):
129
137
  Args:
130
138
  messages (LanguageModelMessages): The LanguageModelMessages object to stream.
131
139
  content_chunks (list[ContentChunk]): The ContentChunks objects.
132
- model_name (LanguageModelName): The language model to use for the completion.
140
+ model_name (LanguageModelName | str): The language model to use for the completion.
133
141
  debug_info (dict): The debug information. Defaults to {}.
134
142
  temperature (float): The temperature value. Defaults to 0.25.
135
143
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
@@ -142,6 +150,9 @@ class LanguageModelService(BaseService):
142
150
  options = self._add_tools_to_options({}, tools)
143
151
  search_context = self._to_search_context(content_chunks)
144
152
  messages = messages.model_dump(exclude_none=True)
153
+ model = (
154
+ model_name.name if isinstance(model_name, LanguageModelName) else model_name
155
+ )
145
156
 
146
157
  try:
147
158
  response = unique_sdk.Integrated.chat_stream_completion(
@@ -156,7 +167,7 @@ class LanguageModelService(BaseService):
156
167
  chatId=self.event.payload.chat_id,
157
168
  searchContext=search_context,
158
169
  # TODO change or extend types in unique_sdk
159
- model=model_name.name,
170
+ model=model,
160
171
  timeout=timeout,
161
172
  temperature=temperature,
162
173
  assistantId=self.event.payload.assistant_id,
@@ -172,7 +183,7 @@ class LanguageModelService(BaseService):
172
183
  async def stream_complete_async(
173
184
  self,
174
185
  messages: LanguageModelMessages,
175
- model_name: LanguageModelName,
186
+ model_name: LanguageModelName | str,
176
187
  content_chunks: list[ContentChunk] = [],
177
188
  debug_info: dict = {},
178
189
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
@@ -186,7 +197,7 @@ class LanguageModelService(BaseService):
186
197
  Args:
187
198
  messages (LanguageModelMessages): The LanguageModelMessages object to stream.
188
199
  content_chunks (list[ContentChunk]): The content chunks.
189
- model_name (LanguageModelName): The language model to use for the completion.
200
+ model_name (LanguageModelName | str): The language model to use for the completion.
190
201
  debug_info (dict): The debug information. Defaults to {}.
191
202
  temperature (float): The temperature value. Defaults to 0.25.
192
203
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
@@ -200,6 +211,9 @@ class LanguageModelService(BaseService):
200
211
  options = self._add_tools_to_options({}, tools)
201
212
  search_context = self._to_search_context(content_chunks)
202
213
  messages = messages.model_dump(exclude_none=True, exclude=["tool_calls"])
214
+ model = (
215
+ model_name.name if isinstance(model_name, LanguageModelName) else model_name
216
+ )
203
217
 
204
218
  try:
205
219
  response = await unique_sdk.Integrated.chat_stream_completion_async(
@@ -213,7 +227,7 @@ class LanguageModelService(BaseService):
213
227
  ),
214
228
  chatId=self.event.payload.chat_id,
215
229
  searchContext=search_context,
216
- model=model_name.name,
230
+ model=model,
217
231
  timeout=timeout,
218
232
  temperature=temperature,
219
233
  assistantId=self.event.payload.assistant_id,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.5.16
3
+ Version: 0.5.18
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Martin Fadler
@@ -13,12 +13,11 @@ Classifier: Programming Language :: Python :: 3.12
13
13
  Requires-Dist: numpy (>=1.26.4,<2.0.0)
14
14
  Requires-Dist: pydantic (>=2.8.2,<3.0.0)
15
15
  Requires-Dist: pyhumps (>=3.8.0,<4.0.0)
16
- Requires-Dist: pytest-mock (>=3.14.0,<4.0.0)
17
16
  Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
18
17
  Requires-Dist: regex (>=2024.5.15,<2025.0.0)
19
18
  Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
20
19
  Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
21
- Requires-Dist: unique-sdk (>=0.9.4,<0.10.0)
20
+ Requires-Dist: unique-sdk (>=0.9.6,<0.10.0)
22
21
  Description-Content-Type: text/markdown
23
22
 
24
23
  # Unique Toolkit
@@ -101,7 +100,16 @@ All notable changes to this project will be documented in this file.
101
100
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
102
101
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
103
102
 
104
- ## [0.5.16] - 2024-08-27
103
+ ## [0.5.18] - 2024-09-03
104
+ - Adds option to use `metadata_filter` with search.
105
+ - Adds `user_metadata`, `tool_parameters` and `metadata_filter` to `EventPayload`.
106
+ - Adds `update_debug_info` and `modify_user_message` (and the corresponding `async` variants) to `ChatService`.
107
+
108
+ ## [0.5.17] - 2024-08-30
109
+ - Add option to initiate `LanguageModel` with a string.
110
+ - Add option to call `LanguageModelService` functions also with a string instead of `LanguageModelName` enum for parameter `model_name`.
111
+
112
+ ## [0.5.16] - 2024-08-29
105
113
  - Fix `ContentService.upload_content` function.
106
114
 
107
115
  ## [0.5.15] - 2024-08-27
@@ -6,27 +6,27 @@ unique_toolkit/app/init_logging.py,sha256=Sh26SRxOj8i8dzobKhYha2lLrkrMTHfB1V4jR3
6
6
  unique_toolkit/app/init_sdk.py,sha256=Nv4Now4pMfM0AgRhbtatLpm_39rKxn0WmRLwmPhRl-8,1285
7
7
  unique_toolkit/app/performance/async_tasks.py,sha256=H0l3OAcosLwNHZ8d2pd-Di4wHIXfclEvagi5kfqLFPA,1941
8
8
  unique_toolkit/app/performance/async_wrapper.py,sha256=yVVcRDkcdyfjsxro-N29SBvi-7773wnfDplef6-y8xw,1077
9
- unique_toolkit/app/schemas.py,sha256=_PIROOUtdKvZFZdvkCORhj-MVWvfkopIQW7VIFmguRg,1364
9
+ unique_toolkit/app/schemas.py,sha256=tzrmUFKZUdC1P3LxZ7DrElpkMtekUDoClb7jCRzGqNQ,1521
10
10
  unique_toolkit/app/verification.py,sha256=UZqTHg3PX_QxMjeLH_BVBYoMVqMnMpeMoqvyTBKDqj8,1996
11
11
  unique_toolkit/chat/__init__.py,sha256=1prdTVfLOf6NgU-Aa1VIO-XiR6OYuRm51LaVRfKDCqc,267
12
12
  unique_toolkit/chat/schemas.py,sha256=ff4M-XMagF0Evn3FcKHHP5xzDEyufZgq9Dmit3i8r_E,802
13
- unique_toolkit/chat/service.py,sha256=mjzj0GCpIbGMX_k1aei8BY-132DCPv53FI7TqbXfaKk,13544
13
+ unique_toolkit/chat/service.py,sha256=nVZ8agpEsXI2-5bS61e1Eawd0abioEQPov2xahiJtfk,17848
14
14
  unique_toolkit/chat/state.py,sha256=Cjgwv_2vhDFbV69xxsn7SefhaoIAEqLx3ferdVFCnOg,1445
15
15
  unique_toolkit/chat/utils.py,sha256=ihm-wQykBWhB4liR3LnwPVPt_qGW6ETq21Mw4HY0THE,854
16
16
  unique_toolkit/content/__init__.py,sha256=MSH2sxjQyKD2Sef92fzE5Dt9SihdzivB6yliSwJfTmQ,890
17
- unique_toolkit/content/schemas.py,sha256=UlC5nBIaFkq9TD31LR6ioG9JRZ1ScmtABi0l06HZR70,2231
18
- unique_toolkit/content/service.py,sha256=NoH8FNP36Jt92RRswDZAfUV0UItrsrtokcqKP__ERRc,13656
17
+ unique_toolkit/content/schemas.py,sha256=zks_Pkki2VhxICJJgHZyc-LPmRuj5dLbw3pgcUT7SW8,2362
18
+ unique_toolkit/content/service.py,sha256=AHyMJTXm5IpYbg1uINzjGqvSL_5aJwEHwSH7Y5pkXBg,14028
19
19
  unique_toolkit/content/utils.py,sha256=Lake671plRsqNvO3pN_rmyVcpwbdED_KQpLcCnc4lv4,6902
20
20
  unique_toolkit/embedding/__init__.py,sha256=dr8M9jvslQTxPpxgaGwzxY0FildiWf-DidN_cahPAWw,191
21
21
  unique_toolkit/embedding/schemas.py,sha256=1GvKCaSk4jixzVQ2PKq8yDqwGEVY_hWclYtoAr6CC2g,96
22
22
  unique_toolkit/embedding/service.py,sha256=Iiw-sbdkjuWlWMfLM9qyC4GNTJOotQAaVjkYvh5Su4Y,2370
23
23
  unique_toolkit/embedding/utils.py,sha256=v86lo__bCJbxZBQ3OcLu5SuwT6NbFfWlcq8iyk6BuzQ,279
24
24
  unique_toolkit/language_model/__init__.py,sha256=QgU_uwpVh1URQyVs6l-6Am4UwmEEhuGXNic3dUZ0FCc,1701
25
- unique_toolkit/language_model/infos.py,sha256=JkugUAFFlrhTHXeM3A_R5QLkNSR9Ro85xWQFcdc2oM0,9307
25
+ unique_toolkit/language_model/infos.py,sha256=ETAUV0YTs6BjwuiTdhKz247CtL0W8Jwo3-c0ZQ2HdXs,9962
26
26
  unique_toolkit/language_model/schemas.py,sha256=h5zjZNk7O-wLKtRuiNtMCIbp5hEVXrAOviKonQcjFuI,4594
27
- unique_toolkit/language_model/service.py,sha256=JjsOOcGDcR7db3yF3_oDXclEGfxqmwWpL5jor7Q42cU,10470
27
+ unique_toolkit/language_model/service.py,sha256=8s2tiGLE5ryKQDOtEbNaFkc73NngANxvFNr-hD-dgps,10948
28
28
  unique_toolkit/language_model/utils.py,sha256=WBPj1XKkDgxy_-T8HCZvsfkkSzj_1w4UZzNmyvdbBLY,1081
29
- unique_toolkit-0.5.16.dist-info/LICENSE,sha256=bIeCWCYuoUU_MzNdg48-ubJSVm7qxakaRbzTiJ5uxrs,1065
30
- unique_toolkit-0.5.16.dist-info/METADATA,sha256=h7bIucz9PpU2PCc6BLMlAkDQO4s9aBZB_sGsc69oMlc,10301
31
- unique_toolkit-0.5.16.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
32
- unique_toolkit-0.5.16.dist-info/RECORD,,
29
+ unique_toolkit-0.5.18.dist-info/LICENSE,sha256=bIeCWCYuoUU_MzNdg48-ubJSVm7qxakaRbzTiJ5uxrs,1065
30
+ unique_toolkit-0.5.18.dist-info/METADATA,sha256=Bc1nuWyOLgX0SujW7MCpzcbG8WBlTqPiC3PPLU858WA,10748
31
+ unique_toolkit-0.5.18.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
32
+ unique_toolkit-0.5.18.dist-info/RECORD,,