unique_toolkit 0.5.48__py3-none-any.whl → 0.5.50__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -35,7 +35,7 @@ class LanguageModelMessageRole(StrEnum):
35
35
  class LanguageModelFunction(BaseModel):
36
36
  model_config = model_config
37
37
 
38
- id: Optional[str] = None
38
+ id: str | None = None
39
39
  name: str
40
40
  arguments: Optional[dict[str, Any] | str] = None # type: ignore
41
41
 
@@ -63,8 +63,8 @@ class LanguageModelFunction(BaseModel):
63
63
  class LanguageModelFunctionCall(BaseModel):
64
64
  model_config = model_config
65
65
 
66
- id: Optional[str] = None
67
- type: Optional[str] = None
66
+ id: str | None = None
67
+ type: str | None = None
68
68
  function: LanguageModelFunction
69
69
 
70
70
  @staticmethod
@@ -88,10 +88,17 @@ class LanguageModelFunctionCall(BaseModel):
88
88
  class LanguageModelMessage(BaseModel):
89
89
  model_config = model_config
90
90
  role: LanguageModelMessageRole
91
- content: Optional[str | list[dict]] = None
91
+ content: str | list[dict] | None = None
92
92
 
93
93
  def __str__(self):
94
- return format_message(self.role.capitalize(), message=self.content, num_tabs=1)
94
+ if not self.content:
95
+ message = ""
96
+ if isinstance(self.content, str):
97
+ message = self.content
98
+ elif isinstance(self.content, list):
99
+ message = json.dumps(self.content)
100
+
101
+ return format_message(self.role.capitalize(), message=message, num_tabs=1)
95
102
 
96
103
 
97
104
  class LanguageModelSystemMessage(LanguageModelMessage):
@@ -112,7 +119,9 @@ class LanguageModelUserMessage(LanguageModelMessage):
112
119
 
113
120
  class LanguageModelAssistantMessage(LanguageModelMessage):
114
121
  role: LanguageModelMessageRole = LanguageModelMessageRole.ASSISTANT
115
- tool_calls: Optional[list[LanguageModelFunctionCall]] = None
122
+ parsed: dict | None = None
123
+ refusal: str | None = None
124
+ tool_calls: list[LanguageModelFunctionCall] | None = None
116
125
 
117
126
  @field_validator("role", mode="before")
118
127
  def set_role(cls, value):
@@ -178,7 +187,7 @@ class LanguageModelStreamResponseMessage(BaseModel):
178
187
  ) # Stream response can return a null previous_message_id if an assisstant message is manually added
179
188
  role: LanguageModelMessageRole
180
189
  text: str
181
- original_text: Optional[str] = None
190
+ original_text: str | None = None
182
191
  references: list[dict[str, list | dict | str | int | float | bool]] = [] # type: ignore
183
192
 
184
193
  # TODO make sdk return role in lowercase
@@ -1,7 +1,8 @@
1
1
  import logging
2
- from typing import Optional, cast
2
+ from typing import Optional, Type, cast
3
3
 
4
4
  import unique_sdk
5
+ from pydantic import BaseModel
5
6
 
6
7
  from unique_toolkit._common._base_service import BaseService
7
8
  from unique_toolkit.app.schemas import Event
@@ -37,6 +38,8 @@ class LanguageModelService(BaseService):
37
38
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
38
39
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
39
40
  tools: Optional[list[LanguageModelTool]] = None,
41
+ structured_output_model: Optional[Type[BaseModel]] = None,
42
+ structured_output_enforce_schema: bool = False,
40
43
  other_options: Optional[dict] = None,
41
44
  ):
42
45
  """
@@ -53,12 +56,14 @@ class LanguageModelService(BaseService):
53
56
  Returns:
54
57
  LanguageModelResponse: The LanguageModelResponse object.
55
58
  """
56
- options, model, messages_dict, _ = self.prepare_completion_params_util(
59
+ options, model, messages_dict, _ = self._prepare_completion_params_util(
57
60
  messages=messages,
58
61
  model_name=model_name,
59
62
  temperature=temperature,
60
63
  tools=tools,
61
64
  other_options=other_options,
65
+ structured_output_model=structured_output_model,
66
+ structured_output_enforce_schema=structured_output_enforce_schema,
62
67
  )
63
68
 
64
69
  try:
@@ -77,9 +82,8 @@ class LanguageModelService(BaseService):
77
82
  self.logger.error(f"Error completing: {e}")
78
83
  raise e
79
84
 
80
- @classmethod
81
85
  async def complete_async_util(
82
- cls,
86
+ self,
83
87
  company_id: str,
84
88
  messages: LanguageModelMessages,
85
89
  model_name: LanguageModelName | str,
@@ -87,6 +91,8 @@ class LanguageModelService(BaseService):
87
91
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
88
92
  tools: Optional[list[LanguageModelTool]] = None,
89
93
  other_options: Optional[dict] = None,
94
+ structured_output_model: Optional[Type[BaseModel]] = None,
95
+ structured_output_enforce_schema: bool = False,
90
96
  logger: Optional[logging.Logger] = logging.getLogger(__name__),
91
97
  ) -> LanguageModelResponse:
92
98
  """
@@ -112,12 +118,14 @@ class LanguageModelService(BaseService):
112
118
  Raises:
113
119
  Exception: If an error occurs during the request, an exception is raised and logged.
114
120
  """
115
- options, model, messages_dict, _ = cls.prepare_completion_params_util(
121
+ options, model, messages_dict, _ = self._prepare_completion_params_util(
116
122
  messages=messages,
117
123
  model_name=model_name,
118
124
  temperature=temperature,
119
125
  tools=tools,
120
126
  other_options=other_options,
127
+ structured_output_model=structured_output_model,
128
+ structured_output_enforce_schema=structured_output_enforce_schema,
121
129
  )
122
130
 
123
131
  try:
@@ -205,7 +213,7 @@ class LanguageModelService(BaseService):
205
213
  The LanguageModelStreamResponse object once the stream has finished.
206
214
  """
207
215
  options, model, messages_dict, search_context = (
208
- self.prepare_completion_params_util(
216
+ self._prepare_completion_params_util(
209
217
  messages=messages,
210
218
  model_name=model_name,
211
219
  temperature=temperature,
@@ -268,7 +276,7 @@ class LanguageModelService(BaseService):
268
276
  The LanguageModelStreamResponse object once the stream has finished.
269
277
  """
270
278
  options, model, messages_dict, search_context = (
271
- self.prepare_completion_params_util(
279
+ self._prepare_completion_params_util(
272
280
  messages=messages,
273
281
  model_name=model_name,
274
282
  temperature=temperature,
@@ -335,15 +343,32 @@ class LanguageModelService(BaseService):
335
343
  ]
336
344
  return options
337
345
 
338
- @classmethod
339
- def prepare_completion_params_util(
340
- cls,
346
+ @staticmethod
347
+ def _add_response_format_to_options(
348
+ options: dict,
349
+ structured_output_model: Type[BaseModel],
350
+ structured_output_enforce_schema: bool = False,
351
+ ) -> dict:
352
+ options["responseFormat"] = {
353
+ "type": "json_schema",
354
+ "json_schema": {
355
+ "name": structured_output_model.__name__,
356
+ "strict": structured_output_enforce_schema,
357
+ "schema": structured_output_model.model_json_schema(),
358
+ },
359
+ }
360
+ return options
361
+
362
+ def _prepare_completion_params_util(
363
+ self,
341
364
  messages: LanguageModelMessages,
342
365
  model_name: LanguageModelName | str,
343
366
  temperature: float,
344
367
  tools: Optional[list[LanguageModelTool]] = None,
345
368
  other_options: Optional[dict] = None,
346
369
  content_chunks: Optional[list[ContentChunk]] = None,
370
+ structured_output_model: Optional[Type[BaseModel]] = None,
371
+ structured_output_enforce_schema: bool = False,
347
372
  ) -> tuple[dict, str, dict, Optional[dict]]:
348
373
  """
349
374
  Prepares common parameters for completion requests.
@@ -356,8 +381,15 @@ class LanguageModelService(BaseService):
356
381
  - search_context (Optional[dict]): Processed content chunks if provided
357
382
  """
358
383
 
359
- options = cls._add_tools_to_options({}, tools)
384
+ options = self._add_tools_to_options({}, tools)
385
+
386
+ if structured_output_model:
387
+ options = self._add_response_format_to_options(
388
+ options, structured_output_model, structured_output_enforce_schema
389
+ )
390
+
360
391
  options["temperature"] = temperature
392
+
361
393
  if other_options:
362
394
  options.update(other_options)
363
395
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_toolkit
3
- Version: 0.5.48
3
+ Version: 0.5.50
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Martin Fadler
@@ -100,6 +100,13 @@ All notable changes to this project will be documented in this file.
100
100
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
101
101
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
102
102
 
103
+ ## [0.5.50] - 2025-01-30
104
+ - Add the possibility to define completion output structure through a pydantic class
105
+
106
+
107
+ ## [0.5.49] - 2025-01-24
108
+ - Add `parsed` and `refusal` to `LanguageModelAssistantMessage` to support structured output
109
+
103
110
  ## [0.5.48] - 2025-01-19
104
111
  - Added the possibility define tool parameters with a json schema (Useful when generating tool parameters from a pydantic object)
105
112
 
@@ -39,12 +39,12 @@ unique_toolkit/language_model/__init__.py,sha256=hgk5yiFF4SpIcE2QSoki9YknFxmcKnq
39
39
  unique_toolkit/language_model/builder.py,sha256=nsRqWO_2dgFehK5CgtqR5aqXgYUU0QL6mR0lALPrQXM,1898
40
40
  unique_toolkit/language_model/infos.py,sha256=NgoV05ausVWMqrYqgH6i3s7tYG7mejupROIF_bwEGZo,13050
41
41
  unique_toolkit/language_model/prompt.py,sha256=JSawaLjQg3VR-E2fK8engFyJnNdk21zaO8pPIodzN4Q,3991
42
- unique_toolkit/language_model/schemas.py,sha256=DHGUWRITtYV1phwuc4MyOwIZA78hHxfw0C-g-iRVLBw,7198
43
- unique_toolkit/language_model/service.py,sha256=brNCPRA0XxgqHi2rI5i2lyFCkUiw4MNMe1VaR3UgWmY,15500
42
+ unique_toolkit/language_model/schemas.py,sha256=87511yupvea-U6sfKWfelETevNMVPevhj7mEqX5FszU,7461
43
+ unique_toolkit/language_model/service.py,sha256=jBHFeGtPbaOeVBxg4XBwzCLjpkIDDAx_9eW7X_fOibk,16900
44
44
  unique_toolkit/language_model/utils.py,sha256=bPQ4l6_YO71w-zaIPanUUmtbXC1_hCvLK0tAFc3VCRc,1902
45
45
  unique_toolkit/short_term_memory/schemas.py,sha256=OhfcXyF6ACdwIXW45sKzjtZX_gkcJs8FEZXcgQTNenw,1406
46
46
  unique_toolkit/short_term_memory/service.py,sha256=Jd9P72-VvJy7hnqNrjmrmB5BHmsKuOpTiT0Jr-dBbsQ,1682
47
- unique_toolkit-0.5.48.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
48
- unique_toolkit-0.5.48.dist-info/METADATA,sha256=P27b6NvkO1GUEnYi486DsECbpXch7kjRrwo-j_e5HVA,15700
49
- unique_toolkit-0.5.48.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
50
- unique_toolkit-0.5.48.dist-info/RECORD,,
47
+ unique_toolkit-0.5.50.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
48
+ unique_toolkit-0.5.50.dist-info/METADATA,sha256=ybCuYkNYzgWuZhRbEf_vSlVG1zMXWLaAEH8Es53iy20,15931
49
+ unique_toolkit-0.5.50.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
50
+ unique_toolkit-0.5.50.dist-info/RECORD,,