unique_toolkit 0.5.18__py3-none-any.whl → 0.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -43,7 +43,7 @@ class LanguageModelMessage(BaseModel):
43
43
  model_config = model_config
44
44
 
45
45
  role: LanguageModelMessageRole
46
- content: Optional[str] = None
46
+ content: Optional[str | list[dict]] = None
47
47
  name: Optional[str] = None
48
48
  tool_calls: Optional[list[LanguageModelFunctionCall]] = None
49
49
 
@@ -75,37 +75,47 @@ class LanguageModelService(BaseService):
75
75
  self.logger.error(f"Error completing: {e}")
76
76
  raise e
77
77
 
78
- async def complete_async(
79
- self,
78
+ @classmethod
79
+ async def complete_async_util(
80
+ cls,
81
+ company_id: str,
80
82
  messages: LanguageModelMessages,
81
83
  model_name: LanguageModelName | str,
82
84
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
83
85
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
84
86
  tools: Optional[list[LanguageModelTool]] = None,
85
- ):
87
+ logger: Optional[logging.Logger] = logging.getLogger(__name__),
88
+ ) -> LanguageModelResponse:
86
89
  """
87
90
  Calls the completion endpoint asynchronously without streaming the response.
88
91
 
92
+ This method sends a request to the completion endpoint using the provided messages, model name,
93
+ temperature, timeout, and optional tools. It returns a `LanguageModelResponse` object containing
94
+ the completed result.
95
+
89
96
  Args:
97
+ company_id (str): The company ID associated with the request.
90
98
  messages (LanguageModelMessages): The messages to complete.
91
- model_name (LanguageModelName | str): The model name.
92
- temperature (float): The temperature value. Defaults to 0.
93
- timeout (int): The timeout value in milliseconds. Defaults to 240_000.
94
- tools (Optional[list[LanguageModelTool]]): The tools to use. Defaults to None.
99
+ model_name (LanguageModelName | str): The model name to use for the completion.
100
+ temperature (float): The temperature setting for the completion. Defaults to 0.
101
+ timeout (int): The timeout value in milliseconds for the request. Defaults to 240_000.
102
+ tools (Optional[list[LanguageModelTool]]): Optional list of tools to include in the request.
103
+ logger (Optional[logging.Logger], optional): The logger used to log errors. Defaults to the logger for the current module.
95
104
 
96
105
  Returns:
97
- str: The completed message content.
106
+ LanguageModelResponse: The response object containing the completed result.
107
+
108
+ Raises:
109
+ Exception: If an error occurs during the request, an exception is raised and logged.
98
110
  """
99
- options = self._add_tools_to_options({}, tools)
111
+ options = cls._add_tools_to_options({}, tools)
100
112
  messages = messages.model_dump(exclude_none=True, exclude={"tool_calls"})
101
113
  model = (
102
114
  model_name.name if isinstance(model_name, LanguageModelName) else model_name
103
115
  )
104
-
105
116
  try:
106
117
  response = await unique_sdk.ChatCompletion.create_async(
107
- company_id=self.event.company_id,
108
- # TODO change or extend types in unique_sdk
118
+ company_id=company_id,
109
119
  model=model,
110
120
  messages=cast(
111
121
  list[unique_sdk.Integrated.ChatCompletionRequestMessage],
@@ -117,9 +127,47 @@ class LanguageModelService(BaseService):
117
127
  )
118
128
  return LanguageModelResponse(**response)
119
129
  except Exception as e:
120
- self.logger.error(f"Error completing: {e}")
130
+ logger.error(f"Error completing: {e}")
121
131
  raise e
122
132
 
133
+ async def complete_async(
134
+ self,
135
+ messages: LanguageModelMessages,
136
+ model_name: LanguageModelName | str,
137
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
138
+ timeout: int = DEFAULT_COMPLETE_TIMEOUT,
139
+ tools: Optional[list[LanguageModelTool]] = None,
140
+ ) -> LanguageModelResponse:
141
+ """
142
+ Calls the completion endpoint asynchronously without streaming the response.
143
+
144
+ This method utilizes the class method `complete_async_util` to perform the asynchronous completion
145
+ request using the provided messages, model name, temperature, timeout, and optional tools. It
146
+ returns a `LanguageModelResponse` object containing the result of the completion.
147
+
148
+ Args:
149
+ messages (LanguageModelMessages): The messages to complete.
150
+ model_name (LanguageModelName | str): The model name to use for the completion.
151
+ temperature (float): The temperature setting for the completion. Defaults to 0.0.
152
+ timeout (int): The timeout value in milliseconds for the request. Defaults to 240,000.
153
+ tools (Optional[list[LanguageModelTool]]): Optional list of tools to include in the request.
154
+
155
+ Returns:
156
+ LanguageModelResponse: The response object containing the completed result.
157
+
158
+ Raises:
159
+ Exception: If an error occurs during the completion request.
160
+ """
161
+ return await self.complete_async_util(
162
+ company_id=self.event.company_id,
163
+ messages=messages,
164
+ model_name=model_name,
165
+ temperature=temperature,
166
+ timeout=timeout,
167
+ tools=tools,
168
+ logger=self.logger,
169
+ )
170
+
123
171
  def stream_complete(
124
172
  self,
125
173
  messages: LanguageModelMessages,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.5.18
3
+ Version: 0.5.20
4
4
  Summary:
5
5
  License: MIT
6
6
  Author: Martin Fadler
@@ -100,6 +100,12 @@ All notable changes to this project will be documented in this file.
100
100
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
101
101
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
102
102
 
103
+ ## [0.5.20] - 2024-09-16
104
+ - `LanguageModelService` now supports complete_util_async that can be called without instantiating the class, currently being used in the Hallucination service and evaluation API
105
+
106
+ ## [0.5.19] - 2024-09-11
107
+ - `LanguageModelMessage` now supports content as a list of dictionary. Useful when adding image_url content along user message.
108
+
103
109
  ## [0.5.18] - 2024-09-03
104
110
  - Adds option to use `metadata_filter` with search.
105
111
  - Adds `user_metadata`, `tool_parameters` and `metadata_filter` to `EventPayload`.
@@ -23,10 +23,10 @@ unique_toolkit/embedding/service.py,sha256=Iiw-sbdkjuWlWMfLM9qyC4GNTJOotQAaVjkYv
23
23
  unique_toolkit/embedding/utils.py,sha256=v86lo__bCJbxZBQ3OcLu5SuwT6NbFfWlcq8iyk6BuzQ,279
24
24
  unique_toolkit/language_model/__init__.py,sha256=QgU_uwpVh1URQyVs6l-6Am4UwmEEhuGXNic3dUZ0FCc,1701
25
25
  unique_toolkit/language_model/infos.py,sha256=ETAUV0YTs6BjwuiTdhKz247CtL0W8Jwo3-c0ZQ2HdXs,9962
26
- unique_toolkit/language_model/schemas.py,sha256=h5zjZNk7O-wLKtRuiNtMCIbp5hEVXrAOviKonQcjFuI,4594
27
- unique_toolkit/language_model/service.py,sha256=8s2tiGLE5ryKQDOtEbNaFkc73NngANxvFNr-hD-dgps,10948
26
+ unique_toolkit/language_model/schemas.py,sha256=DGZL6j63txkq5rdCn1uuVQIyLOyZt9t8J4f8JzhZENg,4607
27
+ unique_toolkit/language_model/service.py,sha256=CvVo5CBa5Ia_fQD3DtJRsVChybuUfGFV5ml2_78_p1I,13395
28
28
  unique_toolkit/language_model/utils.py,sha256=WBPj1XKkDgxy_-T8HCZvsfkkSzj_1w4UZzNmyvdbBLY,1081
29
- unique_toolkit-0.5.18.dist-info/LICENSE,sha256=bIeCWCYuoUU_MzNdg48-ubJSVm7qxakaRbzTiJ5uxrs,1065
30
- unique_toolkit-0.5.18.dist-info/METADATA,sha256=Bc1nuWyOLgX0SujW7MCpzcbG8WBlTqPiC3PPLU858WA,10748
31
- unique_toolkit-0.5.18.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
32
- unique_toolkit-0.5.18.dist-info/RECORD,,
29
+ unique_toolkit-0.5.20.dist-info/LICENSE,sha256=bIeCWCYuoUU_MzNdg48-ubJSVm7qxakaRbzTiJ5uxrs,1065
30
+ unique_toolkit-0.5.20.dist-info/METADATA,sha256=RlEnbfzQ12Do8nqVzhdWLfIktxKQbzu7ffdacfkWmTw,11108
31
+ unique_toolkit-0.5.20.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
32
+ unique_toolkit-0.5.20.dist-info/RECORD,,