unique_toolkit 0.5.16__py3-none-any.whl → 0.5.17__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -22,17 +22,18 @@ class LanguageModelName(StrEnum):
22
22
 
23
23
  class LanguageModelProvider(StrEnum):
24
24
  AZURE = "AZURE"
25
+ CUSTOM = "CUSTOM"
25
26
 
26
27
 
27
28
  class LanguageModelInfo(BaseModel):
28
- name: LanguageModelName
29
+ name: LanguageModelName | str
29
30
  version: str
30
31
  provider: LanguageModelProvider
31
32
 
32
- token_limits: LanguageModelTokenLimits
33
+ token_limits: Optional[LanguageModelTokenLimits] = None
33
34
 
34
- info_cutoff_at: date
35
- published_at: date
35
+ info_cutoff_at: Optional[date] = None
36
+ published_at: Optional[date] = None
36
37
  retirement_at: Optional[date] = None
37
38
 
38
39
  deprecated_at: Optional[date] = None
@@ -42,7 +43,7 @@ class LanguageModelInfo(BaseModel):
42
43
  class LanguageModel:
43
44
  _info: ClassVar[LanguageModelInfo]
44
45
 
45
- def __init__(self, model_name: LanguageModelName):
46
+ def __init__(self, model_name: LanguageModelName | str):
46
47
  self._model_info = self.get_model_info(model_name)
47
48
 
48
49
  @property
@@ -62,9 +63,9 @@ class LanguageModel:
62
63
  return self._model_info
63
64
 
64
65
  @property
65
- def name(self) -> LanguageModelName:
66
+ def name(self) -> LanguageModelName | str:
66
67
  """
67
- Returns the LanguageModelName of the model.
68
+ Returns the LanguageModelName of the model or the name string when it is a custom / not defined model.
68
69
  """
69
70
  return self._model_info.name
70
71
 
@@ -73,10 +74,13 @@ class LanguageModel:
73
74
  """
74
75
  Returns the name of the model as a string.
75
76
  """
76
- return self._model_info.name.name
77
+ if isinstance(self._model_info.name, LanguageModelName):
78
+ return self._model_info.name.name
79
+ else:
80
+ return self._model_info.name
77
81
 
78
82
  @property
79
- def version(self) -> str:
83
+ def version(self) -> Optional[str]:
80
84
  """
81
85
  Returns the version of the model.
82
86
  """
@@ -87,31 +91,34 @@ class LanguageModel:
87
91
  """
88
92
  Returns the maximum number of tokens for the model.
89
93
  """
90
- return self._model_info.token_limits.token_limit
94
+ if self._model_info.token_limits:
95
+ return self._model_info.token_limits.token_limit
91
96
 
92
97
  @property
93
98
  def token_limit_input(self) -> Optional[int]:
94
99
  """
95
100
  Returns the maximum number of input tokens for the model.
96
101
  """
97
- return self._model_info.token_limits.token_limit_input
102
+ if self._model_info.token_limits:
103
+ return self._model_info.token_limits.token_limit_input
98
104
 
99
105
  @property
100
106
  def token_limit_output(self) -> Optional[int]:
101
107
  """
102
108
  Returns the maximum number of output tokens for the model.
103
109
  """
104
- return self._model_info.token_limits.token_limit_output
110
+ if self._model_info.token_limits:
111
+ return self._model_info.token_limits.token_limit_output
105
112
 
106
113
  @property
107
- def info_cutoff_at(self) -> date:
114
+ def info_cutoff_at(self) -> Optional[date]:
108
115
  """
109
116
  Returns the date the model was last updated.
110
117
  """
111
118
  return self._model_info.info_cutoff_at
112
119
 
113
120
  @property
114
- def published_at(self) -> date:
121
+ def published_at(self) -> Optional[date]:
115
122
  """
116
123
  Returns the date the model was published.
117
124
  """
@@ -146,7 +153,10 @@ class LanguageModel:
146
153
  return self._model_info.provider
147
154
 
148
155
  @classmethod
149
- def get_model_info(cls, model_name: LanguageModelName) -> LanguageModelInfo:
156
+ def get_model_info(cls, model_name: LanguageModelName | str) -> LanguageModelInfo:
157
+ if not model_name:
158
+ raise ValueError("Model name must be provided to get the model info.")
159
+
150
160
  for subclass in cls.__subclasses__():
151
161
  if hasattr(subclass, "info") and subclass._info.name == model_name:
152
162
  # TODO find alternative solution for warning
@@ -155,7 +165,12 @@ class LanguageModel:
155
165
  # print(warning_text)
156
166
  # warnings.warn(warning_text, DeprecationWarning, stacklevel=2)
157
167
  return subclass._info
158
- raise ValueError(f"Model {model_name} not found.")
168
+
169
+ return LanguageModelInfo(
170
+ name=model_name,
171
+ version="custom",
172
+ provider=LanguageModelProvider.CUSTOM,
173
+ )
159
174
 
160
175
  @classmethod
161
176
  def list_models(cls) -> list[LanguageModelInfo]:
@@ -33,7 +33,7 @@ class LanguageModelService(BaseService):
33
33
  def complete(
34
34
  self,
35
35
  messages: LanguageModelMessages,
36
- model_name: LanguageModelName,
36
+ model_name: LanguageModelName | str,
37
37
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
38
38
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
39
39
  tools: Optional[list[LanguageModelTool]] = None,
@@ -43,7 +43,7 @@ class LanguageModelService(BaseService):
43
43
 
44
44
  Args:
45
45
  messages (LanguageModelMessages): The LanguageModelMessages obj to complete.
46
- model_name (LanguageModelName): The model name.
46
+ model_name (LanguageModelName | str): The model name.
47
47
  temperature (float): The temperature value. Defaults to 0.
48
48
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
49
49
  tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
@@ -53,11 +53,15 @@ class LanguageModelService(BaseService):
53
53
  """
54
54
  options = self._add_tools_to_options({}, tools)
55
55
  messages = messages.model_dump(exclude_none=True)
56
+ model = (
57
+ model_name.name if isinstance(model_name, LanguageModelName) else model_name
58
+ )
59
+
56
60
  try:
57
61
  response = unique_sdk.ChatCompletion.create(
58
62
  company_id=self.event.company_id,
59
63
  # TODO change or extend types in unique_sdk
60
- model=model_name.name,
64
+ model=model,
61
65
  messages=cast(
62
66
  list[unique_sdk.Integrated.ChatCompletionRequestMessage],
63
67
  messages,
@@ -74,7 +78,7 @@ class LanguageModelService(BaseService):
74
78
  async def complete_async(
75
79
  self,
76
80
  messages: LanguageModelMessages,
77
- model_name: LanguageModelName,
81
+ model_name: LanguageModelName | str,
78
82
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
79
83
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
80
84
  tools: Optional[list[LanguageModelTool]] = None,
@@ -84,7 +88,7 @@ class LanguageModelService(BaseService):
84
88
 
85
89
  Args:
86
90
  messages (LanguageModelMessages): The messages to complete.
87
- model_name (LanguageModelName): The model name.
91
+ model_name (LanguageModelName | str): The model name.
88
92
  temperature (float): The temperature value. Defaults to 0.
89
93
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
90
94
  tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
@@ -94,11 +98,15 @@ class LanguageModelService(BaseService):
94
98
  """
95
99
  options = self._add_tools_to_options({}, tools)
96
100
  messages = messages.model_dump(exclude_none=True, exclude={"tool_calls"})
101
+ model = (
102
+ model_name.name if isinstance(model_name, LanguageModelName) else model_name
103
+ )
104
+
97
105
  try:
98
106
  response = await unique_sdk.ChatCompletion.create_async(
99
107
  company_id=self.event.company_id,
100
108
  # TODO change or extend types in unique_sdk
101
- model=model_name.name,
109
+ model=model,
102
110
  messages=cast(
103
111
  list[unique_sdk.Integrated.ChatCompletionRequestMessage],
104
112
  messages,
@@ -115,7 +123,7 @@ class LanguageModelService(BaseService):
115
123
  def stream_complete(
116
124
  self,
117
125
  messages: LanguageModelMessages,
118
- model_name: LanguageModelName,
126
+ model_name: LanguageModelName | str,
119
127
  content_chunks: list[ContentChunk] = [],
120
128
  debug_info: dict = {},
121
129
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
@@ -129,7 +137,7 @@ class LanguageModelService(BaseService):
129
137
  Args:
130
138
  messages (LanguageModelMessages): The LanguageModelMessages object to stream.
131
139
  content_chunks (list[ContentChunk]): The ContentChunks objects.
132
- model_name (LanguageModelName): The language model to use for the completion.
140
+ model_name (LanguageModelName | str): The language model to use for the completion.
133
141
  debug_info (dict): The debug information. Defaults to {}.
134
142
  temperature (float): The temperature value. Defaults to 0.25.
135
143
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
@@ -142,6 +150,9 @@ class LanguageModelService(BaseService):
142
150
  options = self._add_tools_to_options({}, tools)
143
151
  search_context = self._to_search_context(content_chunks)
144
152
  messages = messages.model_dump(exclude_none=True)
153
+ model = (
154
+ model_name.name if isinstance(model_name, LanguageModelName) else model_name
155
+ )
145
156
 
146
157
  try:
147
158
  response = unique_sdk.Integrated.chat_stream_completion(
@@ -156,7 +167,7 @@ class LanguageModelService(BaseService):
156
167
  chatId=self.event.payload.chat_id,
157
168
  searchContext=search_context,
158
169
  # TODO change or extend types in unique_sdk
159
- model=model_name.name,
170
+ model=model,
160
171
  timeout=timeout,
161
172
  temperature=temperature,
162
173
  assistantId=self.event.payload.assistant_id,
@@ -172,7 +183,7 @@ class LanguageModelService(BaseService):
172
183
  async def stream_complete_async(
173
184
  self,
174
185
  messages: LanguageModelMessages,
175
- model_name: LanguageModelName,
186
+ model_name: LanguageModelName | str,
176
187
  content_chunks: list[ContentChunk] = [],
177
188
  debug_info: dict = {},
178
189
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
@@ -186,7 +197,7 @@ class LanguageModelService(BaseService):
186
197
  Args:
187
198
  messages (LanguageModelMessages): The LanguageModelMessages object to stream.
188
199
  content_chunks (list[ContentChunk]): The content chunks.
189
- model_name (LanguageModelName): The language model to use for the completion.
200
+ model_name (LanguageModelName | str): The language model to use for the completion.
190
201
  debug_info (dict): The debug information. Defaults to {}.
191
202
  temperature (float): The temperature value. Defaults to 0.25.
192
203
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
@@ -200,6 +211,9 @@ class LanguageModelService(BaseService):
200
211
  options = self._add_tools_to_options({}, tools)
201
212
  search_context = self._to_search_context(content_chunks)
202
213
  messages = messages.model_dump(exclude_none=True, exclude=["tool_calls"])
214
+ model = (
215
+ model_name.name if isinstance(model_name, LanguageModelName) else model_name
216
+ )
203
217
 
204
218
  try:
205
219
  response = await unique_sdk.Integrated.chat_stream_completion_async(
@@ -213,7 +227,7 @@ class LanguageModelService(BaseService):
213
227
  ),
214
228
  chatId=self.event.payload.chat_id,
215
229
  searchContext=search_context,
216
- model=model_name.name,
230
+ model=model,
217
231
  timeout=timeout,
218
232
  temperature=temperature,
219
233
  assistantId=self.event.payload.assistant_id,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.5.16
3
+ Version: 0.5.17
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Martin Fadler
@@ -101,7 +101,11 @@ All notable changes to this project will be documented in this file.
101
101
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
102
102
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
103
103
 
104
- ## [0.5.16] - 2024-08-27
104
+ ## [0.5.17] - 2024-08-30
105
+ - Add option to initiate `LanguageModel` with a string
106
+ - Add option to call `LanguageModelService` functions also with a string instead of `LanguageModelName` enum for parameter `model_name`.
107
+
108
+ ## [0.5.16] - 2024-08-29
105
109
  - Fix `ContentService.upload_content` function.
106
110
 
107
111
  ## [0.5.15] - 2024-08-27
@@ -22,11 +22,11 @@ unique_toolkit/embedding/schemas.py,sha256=1GvKCaSk4jixzVQ2PKq8yDqwGEVY_hWclYtoA
22
22
  unique_toolkit/embedding/service.py,sha256=Iiw-sbdkjuWlWMfLM9qyC4GNTJOotQAaVjkYvh5Su4Y,2370
23
23
  unique_toolkit/embedding/utils.py,sha256=v86lo__bCJbxZBQ3OcLu5SuwT6NbFfWlcq8iyk6BuzQ,279
24
24
  unique_toolkit/language_model/__init__.py,sha256=QgU_uwpVh1URQyVs6l-6Am4UwmEEhuGXNic3dUZ0FCc,1701
25
- unique_toolkit/language_model/infos.py,sha256=JkugUAFFlrhTHXeM3A_R5QLkNSR9Ro85xWQFcdc2oM0,9307
25
+ unique_toolkit/language_model/infos.py,sha256=ETAUV0YTs6BjwuiTdhKz247CtL0W8Jwo3-c0ZQ2HdXs,9962
26
26
  unique_toolkit/language_model/schemas.py,sha256=h5zjZNk7O-wLKtRuiNtMCIbp5hEVXrAOviKonQcjFuI,4594
27
- unique_toolkit/language_model/service.py,sha256=JjsOOcGDcR7db3yF3_oDXclEGfxqmwWpL5jor7Q42cU,10470
27
+ unique_toolkit/language_model/service.py,sha256=8s2tiGLE5ryKQDOtEbNaFkc73NngANxvFNr-hD-dgps,10948
28
28
  unique_toolkit/language_model/utils.py,sha256=WBPj1XKkDgxy_-T8HCZvsfkkSzj_1w4UZzNmyvdbBLY,1081
29
- unique_toolkit-0.5.16.dist-info/LICENSE,sha256=bIeCWCYuoUU_MzNdg48-ubJSVm7qxakaRbzTiJ5uxrs,1065
30
- unique_toolkit-0.5.16.dist-info/METADATA,sha256=h7bIucz9PpU2PCc6BLMlAkDQO4s9aBZB_sGsc69oMlc,10301
31
- unique_toolkit-0.5.16.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
32
- unique_toolkit-0.5.16.dist-info/RECORD,,
29
+ unique_toolkit-0.5.17.dist-info/LICENSE,sha256=bIeCWCYuoUU_MzNdg48-ubJSVm7qxakaRbzTiJ5uxrs,1065
30
+ unique_toolkit-0.5.17.dist-info/METADATA,sha256=EQ-ycT-rGgOHG8Qu2Gr4OvMUl_i8Tdh2CT-M2e--gLM,10519
31
+ unique_toolkit-0.5.17.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
32
+ unique_toolkit-0.5.17.dist-info/RECORD,,