unique_toolkit 0.7.7__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_toolkit might be problematic. Click here for more details.

Files changed (166) hide show
  1. unique_toolkit/__init__.py +28 -1
  2. unique_toolkit/_common/api_calling/human_verification_manager.py +343 -0
  3. unique_toolkit/_common/base_model_type_attribute.py +303 -0
  4. unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
  5. unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
  6. unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
  7. unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
  8. unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
  9. unique_toolkit/_common/default_language_model.py +12 -0
  10. unique_toolkit/_common/docx_generator/__init__.py +7 -0
  11. unique_toolkit/_common/docx_generator/config.py +12 -0
  12. unique_toolkit/_common/docx_generator/schemas.py +80 -0
  13. unique_toolkit/_common/docx_generator/service.py +252 -0
  14. unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
  15. unique_toolkit/_common/endpoint_builder.py +305 -0
  16. unique_toolkit/_common/endpoint_requestor.py +430 -0
  17. unique_toolkit/_common/exception.py +24 -0
  18. unique_toolkit/_common/feature_flags/schema.py +9 -0
  19. unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
  20. unique_toolkit/_common/pydantic_helpers.py +154 -0
  21. unique_toolkit/_common/referencing.py +53 -0
  22. unique_toolkit/_common/string_utilities.py +140 -0
  23. unique_toolkit/_common/tests/test_referencing.py +521 -0
  24. unique_toolkit/_common/tests/test_string_utilities.py +506 -0
  25. unique_toolkit/_common/token/image_token_counting.py +67 -0
  26. unique_toolkit/_common/token/token_counting.py +204 -0
  27. unique_toolkit/_common/utils/__init__.py +1 -0
  28. unique_toolkit/_common/utils/files.py +43 -0
  29. unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
  30. unique_toolkit/_common/utils/structured_output/schema.py +5 -0
  31. unique_toolkit/_common/utils/write_configuration.py +51 -0
  32. unique_toolkit/_common/validators.py +101 -4
  33. unique_toolkit/agentic/__init__.py +1 -0
  34. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
  35. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  36. unique_toolkit/agentic/evaluation/config.py +36 -0
  37. unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
  38. unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
  39. unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
  40. unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
  41. unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
  42. unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +111 -0
  43. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
  44. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +16 -15
  45. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +30 -20
  46. unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
  47. unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
  48. unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
  49. unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
  50. unique_toolkit/agentic/history_manager/history_construction_with_contents.py +297 -0
  51. unique_toolkit/agentic/history_manager/history_manager.py +242 -0
  52. unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
  53. unique_toolkit/agentic/history_manager/utils.py +96 -0
  54. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
  55. unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
  56. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  57. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  58. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  59. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  60. unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
  61. unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
  62. unique_toolkit/agentic/tools/__init__.py +1 -0
  63. unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
  64. unique_toolkit/agentic/tools/a2a/config.py +17 -0
  65. unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
  66. unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
  67. unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
  68. unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
  69. unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
  70. unique_toolkit/agentic/tools/a2a/manager.py +55 -0
  71. unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
  72. unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +185 -0
  73. unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +73 -0
  74. unique_toolkit/agentic/tools/a2a/postprocessing/config.py +45 -0
  75. unique_toolkit/agentic/tools/a2a/postprocessing/display.py +180 -0
  76. unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
  77. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +1335 -0
  78. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
  79. unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
  80. unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
  81. unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
  82. unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
  83. unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
  84. unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
  85. unique_toolkit/agentic/tools/a2a/tool/config.py +73 -0
  86. unique_toolkit/agentic/tools/a2a/tool/service.py +306 -0
  87. unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
  88. unique_toolkit/agentic/tools/config.py +167 -0
  89. unique_toolkit/agentic/tools/factory.py +44 -0
  90. unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
  91. unique_toolkit/agentic/tools/mcp/manager.py +71 -0
  92. unique_toolkit/agentic/tools/mcp/models.py +28 -0
  93. unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
  94. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  95. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  96. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  97. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  98. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  99. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  100. unique_toolkit/agentic/tools/schemas.py +141 -0
  101. unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
  102. unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
  103. unique_toolkit/agentic/tools/tool.py +183 -0
  104. unique_toolkit/agentic/tools/tool_manager.py +523 -0
  105. unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
  106. unique_toolkit/agentic/tools/utils/__init__.py +19 -0
  107. unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
  108. unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
  109. unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
  110. unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
  111. unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
  112. unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
  113. unique_toolkit/app/__init__.py +6 -0
  114. unique_toolkit/app/dev_util.py +180 -0
  115. unique_toolkit/app/init_sdk.py +32 -1
  116. unique_toolkit/app/schemas.py +198 -31
  117. unique_toolkit/app/unique_settings.py +367 -0
  118. unique_toolkit/chat/__init__.py +8 -1
  119. unique_toolkit/chat/deprecated/service.py +232 -0
  120. unique_toolkit/chat/functions.py +642 -77
  121. unique_toolkit/chat/rendering.py +34 -0
  122. unique_toolkit/chat/responses_api.py +461 -0
  123. unique_toolkit/chat/schemas.py +133 -2
  124. unique_toolkit/chat/service.py +115 -767
  125. unique_toolkit/content/functions.py +153 -4
  126. unique_toolkit/content/schemas.py +122 -15
  127. unique_toolkit/content/service.py +278 -44
  128. unique_toolkit/content/smart_rules.py +301 -0
  129. unique_toolkit/content/utils.py +8 -3
  130. unique_toolkit/embedding/service.py +102 -11
  131. unique_toolkit/framework_utilities/__init__.py +1 -0
  132. unique_toolkit/framework_utilities/langchain/client.py +71 -0
  133. unique_toolkit/framework_utilities/langchain/history.py +19 -0
  134. unique_toolkit/framework_utilities/openai/__init__.py +6 -0
  135. unique_toolkit/framework_utilities/openai/client.py +83 -0
  136. unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
  137. unique_toolkit/framework_utilities/utils.py +23 -0
  138. unique_toolkit/language_model/__init__.py +3 -0
  139. unique_toolkit/language_model/builder.py +27 -11
  140. unique_toolkit/language_model/default_language_model.py +3 -0
  141. unique_toolkit/language_model/functions.py +327 -43
  142. unique_toolkit/language_model/infos.py +992 -50
  143. unique_toolkit/language_model/reference.py +242 -0
  144. unique_toolkit/language_model/schemas.py +475 -48
  145. unique_toolkit/language_model/service.py +228 -27
  146. unique_toolkit/protocols/support.py +145 -0
  147. unique_toolkit/services/__init__.py +7 -0
  148. unique_toolkit/services/chat_service.py +1630 -0
  149. unique_toolkit/services/knowledge_base.py +861 -0
  150. unique_toolkit/short_term_memory/service.py +178 -41
  151. unique_toolkit/smart_rules/__init__.py +0 -0
  152. unique_toolkit/smart_rules/compile.py +56 -0
  153. unique_toolkit/test_utilities/events.py +197 -0
  154. {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/METADATA +606 -7
  155. unique_toolkit-1.23.0.dist-info/RECORD +182 -0
  156. unique_toolkit/evaluators/__init__.py +0 -1
  157. unique_toolkit/evaluators/config.py +0 -35
  158. unique_toolkit/evaluators/constants.py +0 -1
  159. unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
  160. unique_toolkit/evaluators/context_relevancy/service.py +0 -53
  161. unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
  162. unique_toolkit/evaluators/hallucination/constants.py +0 -41
  163. unique_toolkit-0.7.7.dist-info/RECORD +0 -64
  164. /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
  165. {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/LICENSE +0 -0
  166. {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/WHEEL +0 -0
@@ -1,53 +1,71 @@
1
+ import copy
1
2
  import logging
2
- from typing import Type, cast
3
+ from datetime import UTC, datetime
4
+ from typing import Any, Sequence, cast
3
5
 
6
+ import humps
4
7
  import unique_sdk
8
+ from openai.types.chat import ChatCompletionToolChoiceOptionParam
9
+ from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
5
10
  from pydantic import BaseModel
6
11
 
7
- from unique_toolkit.content.schemas import ContentChunk
8
- from unique_toolkit.evaluators import DOMAIN_NAME
12
+ from unique_toolkit.chat.schemas import ChatMessage, ChatMessageRole
13
+ from unique_toolkit.content.schemas import ContentChunk, ContentReference
14
+ from unique_toolkit.language_model import (
15
+ LanguageModelMessageRole,
16
+ LanguageModelMessages,
17
+ LanguageModelResponse,
18
+ LanguageModelStreamResponse,
19
+ LanguageModelStreamResponseMessage,
20
+ LanguageModelTool,
21
+ LanguageModelToolDescription,
22
+ )
23
+ from unique_toolkit.language_model.infos import (
24
+ LanguageModelInfo,
25
+ LanguageModelName,
26
+ TemperatureBounds,
27
+ )
28
+ from unique_toolkit.language_model.reference import (
29
+ add_references_to_message,
30
+ )
9
31
 
10
32
  from .constants import (
11
33
  DEFAULT_COMPLETE_TEMPERATURE,
12
34
  DEFAULT_COMPLETE_TIMEOUT,
13
35
  )
14
- from .infos import LanguageModelName
15
- from .schemas import (
16
- LanguageModelMessages,
17
- LanguageModelResponse,
18
- LanguageModelTool,
19
- )
20
36
 
21
- logger = logging.getLogger(f"toolkit.{DOMAIN_NAME}.{__name__}")
37
+ logger = logging.getLogger(f"toolkit.language_model.{__name__}")
22
38
 
23
39
 
24
40
  def complete(
25
41
  company_id: str,
26
- messages: LanguageModelMessages,
42
+ messages: LanguageModelMessages | list[ChatCompletionMessageParam],
27
43
  model_name: LanguageModelName | str,
28
44
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
29
45
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
30
- tools: list[LanguageModelTool] | None = None,
46
+ tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
31
47
  other_options: dict | None = None,
32
- structured_output_model: Type[BaseModel] | None = None,
48
+ structured_output_model: type[BaseModel] | None = None,
33
49
  structured_output_enforce_schema: bool = False,
34
50
  ) -> LanguageModelResponse:
35
- """
36
- Calls the completion endpoint synchronously without streaming the response.
51
+ """Call the completion endpoint synchronously without streaming the response.
37
52
 
38
53
  Args:
54
+ ----
39
55
  company_id (str): The company ID associated with the request.
40
56
  messages (LanguageModelMessages): The messages to complete.
41
57
  model_name (LanguageModelName | str): The model name to use for the completion.
42
58
  temperature (float): The temperature setting for the completion. Defaults to 0.
43
59
  timeout (int): The timeout value in milliseconds. Defaults to 240_000.
44
- tools (Optional[list[LanguageModelTool]]): Optional list of tools to include.
60
+ tools (Optional[list[LanguageModelTool | LanguageModelToolDescription ]]): Optional list of tools to include.
45
61
  other_options (Optional[dict]): Additional options to use. Defaults to None.
46
62
 
47
63
  Returns:
64
+ -------
48
65
  LanguageModelResponse: The response object containing the completed result.
66
+
49
67
  """
50
- options, model, messages_dict, _ = _prepare_completion_params_util(
68
+ options, model, messages_dict, _ = _prepare_all_completions_params_util(
51
69
  messages=messages,
52
70
  model_name=model_name,
53
71
  temperature=temperature,
@@ -62,7 +80,7 @@ def complete(
62
80
  company_id=company_id,
63
81
  model=model,
64
82
  messages=cast(
65
- list[unique_sdk.Integrated.ChatCompletionRequestMessage],
83
+ "list[unique_sdk.Integrated.ChatCompletionRequestMessage]",
66
84
  messages_dict,
67
85
  ),
68
86
  timeout=timeout,
@@ -76,38 +94,43 @@ def complete(
76
94
 
77
95
  async def complete_async(
78
96
  company_id: str,
79
- messages: LanguageModelMessages,
97
+ messages: LanguageModelMessages | list[ChatCompletionMessageParam],
80
98
  model_name: LanguageModelName | str,
99
+ user_id: str | None = None,
81
100
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
82
101
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
83
- tools: list[LanguageModelTool] | None = None,
102
+ tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
84
103
  other_options: dict | None = None,
85
- structured_output_model: Type[BaseModel] | None = None,
104
+ structured_output_model: type[BaseModel] | None = None,
86
105
  structured_output_enforce_schema: bool = False,
87
106
  ) -> LanguageModelResponse:
88
- """
89
- Calls the completion endpoint asynchronously without streaming the response.
107
+ """Call the completion endpoint asynchronously without streaming the response.
90
108
 
91
109
  This method sends a request to the completion endpoint using the provided messages, model name,
92
110
  temperature, timeout, and optional tools. It returns a `LanguageModelResponse` object containing
93
111
  the completed result.
94
112
 
95
113
  Args:
114
+ ----
96
115
  company_id (str): The company ID associated with the request.
97
116
  messages (LanguageModelMessages): The messages to complete.
98
117
  model_name (LanguageModelName | str): The model name to use for the completion.
99
118
  temperature (float): The temperature setting for the completion. Defaults to 0.
100
119
  timeout (int): The timeout value in milliseconds for the request. Defaults to 240_000.
101
- tools (Optional[list[LanguageModelTool]]): Optional list of tools to include in the request.
120
+ tools (Optional[list[LanguageModelTool | LanguageModelToolDescription ]]): Optional list of tools to include in the request.
102
121
  other_options (Optional[dict]): The other options to use. Defaults to None.
103
122
 
104
123
  Returns:
124
+ -------
105
125
  LanguageModelResponse: The response object containing the completed result.
106
126
 
107
127
  Raises:
108
- Exception: If an error occurs during the request, an exception is raised and logged.
128
+ ------
129
+ Exception: If an error occurs during the request, an exception is raised
130
+ and logged.
131
+
109
132
  """
110
- options, model, messages_dict, _ = _prepare_completion_params_util(
133
+ options, model, messages_dict, _ = _prepare_all_completions_params_util(
111
134
  messages=messages,
112
135
  model_name=model_name,
113
136
  temperature=temperature,
@@ -120,9 +143,10 @@ async def complete_async(
120
143
  try:
121
144
  response = await unique_sdk.ChatCompletion.create_async(
122
145
  company_id=company_id,
146
+ user_id=user_id,
123
147
  model=model,
124
148
  messages=cast(
125
- list[unique_sdk.Integrated.ChatCompletionRequestMessage],
149
+ "list[unique_sdk.Integrated.ChatCompletionRequestMessage]",
126
150
  messages_dict,
127
151
  ),
128
152
  timeout=timeout,
@@ -130,13 +154,13 @@ async def complete_async(
130
154
  )
131
155
  return LanguageModelResponse(**response)
132
156
  except Exception as e:
133
- logger.error(f"Error completing: {e}") # type: ignore
157
+ logger.exception(f"Error completing: {e}")
134
158
  raise e
135
159
 
136
160
 
137
161
  def _add_tools_to_options(
138
162
  options: dict,
139
- tools: list[LanguageModelTool] | None,
163
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None,
140
164
  ) -> dict:
141
165
  if tools:
142
166
  options["tools"] = [
@@ -149,7 +173,12 @@ def _add_tools_to_options(
149
173
  return options
150
174
 
151
175
 
152
- def _to_search_context(chunks: list[ContentChunk]) -> dict | None:
176
+ SearchContext = list[unique_sdk.Integrated.SearchResult]
177
+
178
+
179
+ def _to_search_context(
180
+ chunks: list[ContentChunk],
181
+ ) -> SearchContext | None:
153
182
  if not chunks:
154
183
  return None
155
184
  return [
@@ -163,14 +192,14 @@ def _to_search_context(chunks: list[ContentChunk]) -> dict | None:
163
192
  endPage=chunk.end_page,
164
193
  order=chunk.order,
165
194
  object=chunk.object,
166
- ) # type: ignore
195
+ )
167
196
  for chunk in chunks
168
197
  ]
169
198
 
170
199
 
171
200
  def _add_response_format_to_options(
172
201
  options: dict,
173
- structured_output_model: Type[BaseModel],
202
+ structured_output_model: type[BaseModel],
174
203
  structured_output_enforce_schema: bool = False,
175
204
  ) -> dict:
176
205
  options["responseFormat"] = {
@@ -188,38 +217,42 @@ def _prepare_completion_params_util(
188
217
  messages: LanguageModelMessages,
189
218
  model_name: LanguageModelName | str,
190
219
  temperature: float,
191
- tools: list[LanguageModelTool] | None = None,
220
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
192
221
  other_options: dict | None = None,
193
222
  content_chunks: list[ContentChunk] | None = None,
194
- structured_output_model: Type[BaseModel] | None = None,
223
+ structured_output_model: type[BaseModel] | None = None,
195
224
  structured_output_enforce_schema: bool = False,
196
- ) -> tuple[dict, str, dict, dict | None]:
197
- """
198
- Prepares common parameters for completion requests.
225
+ ) -> tuple[dict, str, dict, SearchContext | None]:
226
+ """Prepare common parameters for completion requests.
199
227
 
200
- Returns:
228
+ Returns
229
+ -------
201
230
  tuple containing:
202
231
  - options (dict): Combined options including tools and temperature
203
232
  - model (str): Resolved model name
204
233
  - messages_dict (dict): Processed messages
205
234
  - search_context (dict | None): Processed content chunks if provided
206
- """
207
235
 
236
+ """
208
237
  options = _add_tools_to_options({}, tools)
238
+
209
239
  if structured_output_model:
210
240
  options = _add_response_format_to_options(
211
- options, structured_output_model, structured_output_enforce_schema
241
+ options,
242
+ structured_output_model,
243
+ structured_output_enforce_schema,
212
244
  )
213
245
  options["temperature"] = temperature
214
246
  if other_options:
215
247
  options.update(other_options)
216
248
 
217
- model = model_name.name if isinstance(model_name, LanguageModelName) else model_name
249
+ model = (
250
+ model_name.value if isinstance(model_name, LanguageModelName) else model_name
251
+ )
218
252
 
219
- # Different methods need different message dump parameters
220
253
  messages_dict = messages.model_dump(
221
254
  exclude_none=True,
222
- by_alias=content_chunks is not None, # Use by_alias for streaming methods
255
+ by_alias=True,
223
256
  )
224
257
 
225
258
  search_context = (
@@ -227,3 +260,254 @@ def _prepare_completion_params_util(
227
260
  )
228
261
 
229
262
  return options, model, messages_dict, search_context
263
+
264
+
265
+ def _prepare_openai_completion_params_util(
266
+ model_name: LanguageModelName | str,
267
+ temperature: float,
268
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
269
+ other_options: dict | None = None,
270
+ content_chunks: list[ContentChunk] | None = None,
271
+ structured_output_model: type[BaseModel] | None = None,
272
+ structured_output_enforce_schema: bool = False,
273
+ ) -> tuple[dict, str, SearchContext | None]:
274
+ """Prepare common parameters for completion requests.
275
+
276
+ Returns
277
+ -------
278
+ tuple containing:
279
+ - options (dict): Combined options including tools and temperature
280
+ - model (str): Resolved model name
281
+ - messages_dict (dict): Processed messages
282
+ - search_context (dict | None): Processed content chunks if provided
283
+
284
+ """
285
+ options = _add_tools_to_options({}, tools)
286
+
287
+ if structured_output_model:
288
+ options = _add_response_format_to_options(
289
+ options,
290
+ structured_output_model,
291
+ structured_output_enforce_schema,
292
+ )
293
+ options["temperature"] = temperature
294
+ if other_options:
295
+ options.update(other_options)
296
+
297
+ model = (
298
+ model_name.value if isinstance(model_name, LanguageModelName) else model_name
299
+ )
300
+
301
+ search_context = (
302
+ _to_search_context(content_chunks) if content_chunks is not None else None
303
+ )
304
+
305
+ return options, model, search_context
306
+
307
+
308
+ def __camelize_keys(data):
309
+ """Recursively camelize dictionary keys using humps."""
310
+ if isinstance(data, dict):
311
+ return {humps.camelize(k): __camelize_keys(v) for k, v in data.items()}
312
+ if isinstance(data, list):
313
+ return [__camelize_keys(item) for item in data]
314
+ return data
315
+
316
+
317
+ def _clamp_temperature(
318
+ temperature: float, temperature_bounds: TemperatureBounds
319
+ ) -> float:
320
+ temperature = max(temperature_bounds.min_temperature, temperature)
321
+ temperature = min(temperature_bounds.max_temperature, temperature)
322
+ return round(temperature, 2)
323
+
324
+
325
+ def _prepare_other_options(
326
+ other_options: dict | None,
327
+ default_options: dict,
328
+ ) -> dict:
329
+ options = default_options
330
+ if other_options is not None:
331
+ options.update(other_options)
332
+ return options
333
+
334
+
335
+ def _prepare_all_completions_params_util(
336
+ messages: LanguageModelMessages | list[ChatCompletionMessageParam],
337
+ model_name: LanguageModelName | str,
338
+ temperature: float,
339
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
340
+ other_options: dict | None = None,
341
+ content_chunks: list[ContentChunk] | None = None,
342
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
343
+ structured_output_model: type[BaseModel] | None = None,
344
+ structured_output_enforce_schema: bool = False,
345
+ ) -> tuple[
346
+ dict,
347
+ str,
348
+ list[unique_sdk.Integrated.ChatCompletionRequestMessage],
349
+ SearchContext | None,
350
+ ]:
351
+ model_info = None
352
+
353
+ other_options = copy.deepcopy(other_options)
354
+
355
+ if tool_choice is not None:
356
+ if other_options is None:
357
+ other_options = {}
358
+ if "toolChoice" not in other_options:
359
+ other_options["toolChoice"] = tool_choice # Backend expects CamelCase
360
+
361
+ if isinstance(model_name, LanguageModelName):
362
+ model_info = LanguageModelInfo.from_name(model_name)
363
+ other_options = _prepare_other_options(
364
+ other_options, model_info.default_options
365
+ )
366
+
367
+ if isinstance(messages, LanguageModelMessages):
368
+ options, model, messages_dict, search_context = _prepare_completion_params_util(
369
+ messages=messages,
370
+ model_name=model_name,
371
+ temperature=temperature,
372
+ tools=tools,
373
+ content_chunks=content_chunks,
374
+ other_options=other_options,
375
+ structured_output_model=structured_output_model,
376
+ structured_output_enforce_schema=structured_output_enforce_schema,
377
+ )
378
+ else:
379
+ options, model, search_context = _prepare_openai_completion_params_util(
380
+ model_name=model_name,
381
+ temperature=temperature,
382
+ tools=tools,
383
+ content_chunks=content_chunks,
384
+ other_options=other_options,
385
+ structured_output_model=structured_output_model,
386
+ structured_output_enforce_schema=structured_output_enforce_schema,
387
+ )
388
+ messages_dict = __camelize_keys(messages.copy())
389
+
390
+ if (
391
+ model_info is not None
392
+ and model_info.temperature_bounds is not None
393
+ and "temperature" in options
394
+ ):
395
+ options["temperature"] = _clamp_temperature(
396
+ temperature, model_info.temperature_bounds
397
+ )
398
+
399
+ integrated_messages = cast(
400
+ "list[unique_sdk.Integrated.ChatCompletionRequestMessage]",
401
+ messages_dict,
402
+ )
403
+
404
+ return options, model, integrated_messages, search_context
405
+
406
+
407
+ def complete_with_references(
408
+ company_id: str,
409
+ messages: LanguageModelMessages,
410
+ model_name: LanguageModelName | str,
411
+ content_chunks: list[ContentChunk] | None = None,
412
+ debug_dict: dict = {},
413
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
414
+ timeout: int = DEFAULT_COMPLETE_TIMEOUT,
415
+ tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
416
+ start_text: str | None = None,
417
+ other_options: dict[str, Any] | None = None,
418
+ ) -> LanguageModelStreamResponse:
419
+ # Use toolkit language model functions for chat completion
420
+ response = complete(
421
+ company_id=company_id,
422
+ model_name=model_name,
423
+ messages=messages,
424
+ temperature=temperature,
425
+ timeout=timeout,
426
+ tools=tools,
427
+ other_options=other_options,
428
+ )
429
+
430
+ return _create_language_model_stream_response_with_references(
431
+ response=response,
432
+ content_chunks=content_chunks,
433
+ start_text=start_text,
434
+ )
435
+
436
+
437
+ async def complete_with_references_async(
438
+ company_id: str,
439
+ user_id: str,
440
+ messages: LanguageModelMessages,
441
+ model_name: LanguageModelName | str,
442
+ content_chunks: list[ContentChunk] | None = None,
443
+ debug_dict: dict | None = None,
444
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
445
+ timeout: int = DEFAULT_COMPLETE_TIMEOUT,
446
+ tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
447
+ start_text: str | None = None,
448
+ other_options: dict[str, Any] | None = None,
449
+ ) -> LanguageModelStreamResponse:
450
+ # Use toolkit language model functions for chat completion
451
+ response = await complete_async(
452
+ company_id=company_id,
453
+ user_id=user_id,
454
+ model_name=model_name,
455
+ messages=messages,
456
+ temperature=temperature,
457
+ timeout=timeout,
458
+ tools=tools,
459
+ other_options=other_options,
460
+ )
461
+
462
+ return _create_language_model_stream_response_with_references(
463
+ response=response,
464
+ content_chunks=content_chunks,
465
+ start_text=start_text,
466
+ )
467
+
468
+
469
+ def _create_language_model_stream_response_with_references(
470
+ response: LanguageModelResponse,
471
+ content_chunks: list[ContentChunk] | None = None,
472
+ start_text: str | None = None,
473
+ ):
474
+ content = response.choices[0].message.content
475
+ content_chunks = content_chunks or []
476
+
477
+ if content is None:
478
+ raise ValueError("Content is None, which is not supported")
479
+ if isinstance(content, list):
480
+ raise ValueError("Content is a list, which is not supported")
481
+ content = start_text or "" + str(content)
482
+
483
+ message = ChatMessage(
484
+ id="msg_unknown",
485
+ text=copy.deepcopy(content),
486
+ role=ChatMessageRole.ASSISTANT,
487
+ created_at=datetime.now(UTC),
488
+ chat_id="chat_unknown",
489
+ )
490
+
491
+ message, __ = add_references_to_message(
492
+ message=message,
493
+ search_context=content_chunks,
494
+ )
495
+
496
+ stream_response_message = LanguageModelStreamResponseMessage(
497
+ id="stream_unknown",
498
+ previous_message_id=None,
499
+ role=LanguageModelMessageRole.ASSISTANT,
500
+ text=message.content or "",
501
+ original_text=content,
502
+ references=[
503
+ ContentReference(**u.model_dump()) for u in message.references or []
504
+ ],
505
+ )
506
+
507
+ tool_calls = [r.function for r in response.choices[0].message.tool_calls or []]
508
+ tool_calls = tool_calls if len(tool_calls) > 0 else None
509
+
510
+ return LanguageModelStreamResponse(
511
+ message=stream_response_message,
512
+ tool_calls=tool_calls,
513
+ )