unique_toolkit 0.7.9__py3-none-any.whl → 1.33.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. unique_toolkit/__init__.py +36 -3
  2. unique_toolkit/_common/api_calling/human_verification_manager.py +357 -0
  3. unique_toolkit/_common/base_model_type_attribute.py +303 -0
  4. unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
  5. unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
  6. unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
  7. unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
  8. unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
  9. unique_toolkit/_common/default_language_model.py +12 -0
  10. unique_toolkit/_common/docx_generator/__init__.py +7 -0
  11. unique_toolkit/_common/docx_generator/config.py +12 -0
  12. unique_toolkit/_common/docx_generator/schemas.py +80 -0
  13. unique_toolkit/_common/docx_generator/service.py +225 -0
  14. unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
  15. unique_toolkit/_common/endpoint_builder.py +368 -0
  16. unique_toolkit/_common/endpoint_requestor.py +480 -0
  17. unique_toolkit/_common/exception.py +24 -0
  18. unique_toolkit/_common/experimental/endpoint_builder.py +368 -0
  19. unique_toolkit/_common/experimental/endpoint_requestor.py +488 -0
  20. unique_toolkit/_common/feature_flags/schema.py +9 -0
  21. unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
  22. unique_toolkit/_common/pydantic_helpers.py +174 -0
  23. unique_toolkit/_common/referencing.py +53 -0
  24. unique_toolkit/_common/string_utilities.py +140 -0
  25. unique_toolkit/_common/tests/test_referencing.py +521 -0
  26. unique_toolkit/_common/tests/test_string_utilities.py +506 -0
  27. unique_toolkit/_common/token/image_token_counting.py +67 -0
  28. unique_toolkit/_common/token/token_counting.py +204 -0
  29. unique_toolkit/_common/utils/__init__.py +1 -0
  30. unique_toolkit/_common/utils/files.py +43 -0
  31. unique_toolkit/_common/utils/image/encode.py +25 -0
  32. unique_toolkit/_common/utils/jinja/helpers.py +10 -0
  33. unique_toolkit/_common/utils/jinja/render.py +18 -0
  34. unique_toolkit/_common/utils/jinja/schema.py +65 -0
  35. unique_toolkit/_common/utils/jinja/utils.py +80 -0
  36. unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
  37. unique_toolkit/_common/utils/structured_output/schema.py +5 -0
  38. unique_toolkit/_common/utils/write_configuration.py +51 -0
  39. unique_toolkit/_common/validators.py +101 -4
  40. unique_toolkit/agentic/__init__.py +1 -0
  41. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
  42. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  43. unique_toolkit/agentic/evaluation/config.py +36 -0
  44. unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
  45. unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
  46. unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
  47. unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
  48. unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
  49. unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +112 -0
  50. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
  51. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +20 -16
  52. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +32 -21
  53. unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
  54. unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
  55. unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
  56. unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
  57. unique_toolkit/agentic/history_manager/history_construction_with_contents.py +298 -0
  58. unique_toolkit/agentic/history_manager/history_manager.py +241 -0
  59. unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
  60. unique_toolkit/agentic/history_manager/utils.py +96 -0
  61. unique_toolkit/agentic/message_log_manager/__init__.py +5 -0
  62. unique_toolkit/agentic/message_log_manager/service.py +93 -0
  63. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
  64. unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
  65. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  66. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +71 -0
  67. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +297 -0
  68. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  69. unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
  70. unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
  71. unique_toolkit/agentic/tools/__init__.py +1 -0
  72. unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
  73. unique_toolkit/agentic/tools/a2a/config.py +17 -0
  74. unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
  75. unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
  76. unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
  77. unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
  78. unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
  79. unique_toolkit/agentic/tools/a2a/manager.py +55 -0
  80. unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
  81. unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +240 -0
  82. unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +84 -0
  83. unique_toolkit/agentic/tools/a2a/postprocessing/config.py +78 -0
  84. unique_toolkit/agentic/tools/a2a/postprocessing/display.py +264 -0
  85. unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
  86. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +421 -0
  87. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +2103 -0
  88. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
  89. unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
  90. unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
  91. unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
  92. unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
  93. unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
  94. unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
  95. unique_toolkit/agentic/tools/a2a/tool/config.py +158 -0
  96. unique_toolkit/agentic/tools/a2a/tool/service.py +393 -0
  97. unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
  98. unique_toolkit/agentic/tools/config.py +128 -0
  99. unique_toolkit/agentic/tools/factory.py +44 -0
  100. unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
  101. unique_toolkit/agentic/tools/mcp/manager.py +71 -0
  102. unique_toolkit/agentic/tools/mcp/models.py +28 -0
  103. unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
  104. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  105. unique_toolkit/agentic/tools/openai_builtin/base.py +46 -0
  106. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  107. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +88 -0
  108. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +250 -0
  109. unique_toolkit/agentic/tools/openai_builtin/manager.py +79 -0
  110. unique_toolkit/agentic/tools/schemas.py +145 -0
  111. unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
  112. unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
  113. unique_toolkit/agentic/tools/tool.py +187 -0
  114. unique_toolkit/agentic/tools/tool_manager.py +492 -0
  115. unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
  116. unique_toolkit/agentic/tools/utils/__init__.py +19 -0
  117. unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
  118. unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
  119. unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
  120. unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
  121. unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
  122. unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
  123. unique_toolkit/app/__init__.py +9 -0
  124. unique_toolkit/app/dev_util.py +180 -0
  125. unique_toolkit/app/fast_api_factory.py +131 -0
  126. unique_toolkit/app/init_sdk.py +32 -1
  127. unique_toolkit/app/schemas.py +206 -31
  128. unique_toolkit/app/unique_settings.py +367 -0
  129. unique_toolkit/app/webhook.py +77 -0
  130. unique_toolkit/chat/__init__.py +8 -1
  131. unique_toolkit/chat/deprecated/service.py +232 -0
  132. unique_toolkit/chat/functions.py +648 -78
  133. unique_toolkit/chat/rendering.py +34 -0
  134. unique_toolkit/chat/responses_api.py +461 -0
  135. unique_toolkit/chat/schemas.py +134 -2
  136. unique_toolkit/chat/service.py +115 -767
  137. unique_toolkit/content/functions.py +353 -8
  138. unique_toolkit/content/schemas.py +128 -15
  139. unique_toolkit/content/service.py +321 -45
  140. unique_toolkit/content/smart_rules.py +301 -0
  141. unique_toolkit/content/utils.py +10 -3
  142. unique_toolkit/data_extraction/README.md +96 -0
  143. unique_toolkit/data_extraction/__init__.py +11 -0
  144. unique_toolkit/data_extraction/augmented/__init__.py +5 -0
  145. unique_toolkit/data_extraction/augmented/service.py +93 -0
  146. unique_toolkit/data_extraction/base.py +25 -0
  147. unique_toolkit/data_extraction/basic/__init__.py +11 -0
  148. unique_toolkit/data_extraction/basic/config.py +18 -0
  149. unique_toolkit/data_extraction/basic/prompt.py +13 -0
  150. unique_toolkit/data_extraction/basic/service.py +55 -0
  151. unique_toolkit/embedding/service.py +103 -12
  152. unique_toolkit/framework_utilities/__init__.py +1 -0
  153. unique_toolkit/framework_utilities/langchain/__init__.py +10 -0
  154. unique_toolkit/framework_utilities/langchain/client.py +71 -0
  155. unique_toolkit/framework_utilities/langchain/history.py +19 -0
  156. unique_toolkit/framework_utilities/openai/__init__.py +6 -0
  157. unique_toolkit/framework_utilities/openai/client.py +84 -0
  158. unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
  159. unique_toolkit/framework_utilities/utils.py +23 -0
  160. unique_toolkit/language_model/__init__.py +3 -0
  161. unique_toolkit/language_model/_responses_api_utils.py +93 -0
  162. unique_toolkit/language_model/builder.py +27 -11
  163. unique_toolkit/language_model/default_language_model.py +3 -0
  164. unique_toolkit/language_model/functions.py +345 -43
  165. unique_toolkit/language_model/infos.py +1288 -46
  166. unique_toolkit/language_model/reference.py +242 -0
  167. unique_toolkit/language_model/schemas.py +481 -49
  168. unique_toolkit/language_model/service.py +229 -28
  169. unique_toolkit/protocols/support.py +145 -0
  170. unique_toolkit/services/__init__.py +7 -0
  171. unique_toolkit/services/chat_service.py +1631 -0
  172. unique_toolkit/services/knowledge_base.py +1094 -0
  173. unique_toolkit/short_term_memory/service.py +178 -41
  174. unique_toolkit/smart_rules/__init__.py +0 -0
  175. unique_toolkit/smart_rules/compile.py +56 -0
  176. unique_toolkit/test_utilities/events.py +197 -0
  177. unique_toolkit-1.33.3.dist-info/METADATA +1145 -0
  178. unique_toolkit-1.33.3.dist-info/RECORD +205 -0
  179. unique_toolkit/evaluators/__init__.py +0 -1
  180. unique_toolkit/evaluators/config.py +0 -35
  181. unique_toolkit/evaluators/constants.py +0 -1
  182. unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
  183. unique_toolkit/evaluators/context_relevancy/service.py +0 -53
  184. unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
  185. unique_toolkit/evaluators/hallucination/constants.py +0 -41
  186. unique_toolkit-0.7.9.dist-info/METADATA +0 -413
  187. unique_toolkit-0.7.9.dist-info/RECORD +0 -64
  188. /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
  189. {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/LICENSE +0 -0
  190. {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/WHEEL +0 -0
@@ -0,0 +1,34 @@
1
+ from urllib.parse import quote
2
+
3
+
4
+ def create_prompt_button_string(
5
+ *,
6
+ button_text: str,
7
+ next_user_message: str,
8
+ ) -> str:
9
+ """
10
+ Create a prompt button string.
11
+
12
+ Args:
13
+ button_text: The text of the button.
14
+ next_user_message: The message to send when the button is clicked.
15
+
16
+ Returns:
17
+ A string that can be used to create a prompt button.
18
+ A prompt button includes the `next_user_message` to the user prompt windown.
19
+ """
20
+ next_user_message = quote(next_user_message)
21
+ return f"[{button_text}](https://prompt={next_user_message})"
22
+
23
+
24
+ def create_latex_formula_string(latex_expression: str) -> str:
25
+ """
26
+ Create a LaTeX string.
27
+
28
+ Args:
29
+ latex_expression: The LaTeX expression to create.
30
+
31
+ Returns:
32
+ A string that can be used to create a LaTeX string.
33
+ """
34
+ return f"\\[{latex_expression}\\]"
@@ -0,0 +1,461 @@
1
+ import logging
2
+ from typing import Any, NamedTuple, Sequence
3
+
4
+ import unique_sdk
5
+ from openai.types.responses import (
6
+ ResponseIncludable,
7
+ ResponseInputItemParam,
8
+ ResponseInputParam,
9
+ ResponseOutputItem,
10
+ ResponseTextConfigParam,
11
+ ToolParam,
12
+ response_create_params,
13
+ )
14
+ from openai.types.shared_params import Metadata, Reasoning
15
+ from pydantic import BaseModel, TypeAdapter, ValidationError
16
+
17
+ from unique_toolkit.agentic.tools.utils.execution.execution import (
18
+ failsafe,
19
+ )
20
+ from unique_toolkit.content.schemas import ContentChunk
21
+ from unique_toolkit.language_model.constants import (
22
+ DEFAULT_COMPLETE_TEMPERATURE,
23
+ )
24
+ from unique_toolkit.language_model.functions import (
25
+ SearchContext,
26
+ _clamp_temperature,
27
+ _to_search_context,
28
+ )
29
+ from unique_toolkit.language_model.infos import (
30
+ LanguageModelInfo,
31
+ LanguageModelName,
32
+ )
33
+ from unique_toolkit.language_model.schemas import (
34
+ LanguageModelAssistantMessage,
35
+ LanguageModelMessage,
36
+ LanguageModelMessageOptions,
37
+ LanguageModelMessageRole,
38
+ LanguageModelMessages,
39
+ LanguageModelSystemMessage,
40
+ LanguageModelToolDescription,
41
+ LanguageModelToolMessage,
42
+ LanguageModelUserMessage,
43
+ ResponsesLanguageModelStreamResponse,
44
+ )
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ def _convert_tools_to_openai(
50
+ tools: Sequence[LanguageModelToolDescription | ToolParam],
51
+ ) -> list[ToolParam]:
52
+ openai_tools = []
53
+ for tool in tools:
54
+ if isinstance(tool, LanguageModelToolDescription):
55
+ openai_tools.append(tool.to_openai(mode="responses"))
56
+ else:
57
+ openai_tools.append(tool)
58
+ return openai_tools
59
+
60
+
61
+ def _convert_message_to_openai(
62
+ message: LanguageModelMessageOptions,
63
+ ) -> ResponseInputParam:
64
+ res = []
65
+ match message:
66
+ case LanguageModelAssistantMessage():
67
+ return message.to_openai(mode="responses") # type: ignore
68
+ case (
69
+ LanguageModelUserMessage()
70
+ | LanguageModelSystemMessage()
71
+ | LanguageModelToolMessage()
72
+ ):
73
+ return [message.to_openai(mode="responses")]
74
+ case _:
75
+ return _convert_message_to_openai(_convert_to_specific_message(message))
76
+ return res
77
+
78
+
79
+ def _convert_to_specific_message(
80
+ message: LanguageModelMessage,
81
+ ) -> "LanguageModelSystemMessage | LanguageModelUserMessage | LanguageModelAssistantMessage":
82
+ match message.role:
83
+ case LanguageModelMessageRole.SYSTEM:
84
+ return LanguageModelSystemMessage(content=message.content)
85
+ case LanguageModelMessageRole.USER:
86
+ return LanguageModelUserMessage(content=message.content)
87
+ case LanguageModelMessageRole.ASSISTANT:
88
+ return LanguageModelAssistantMessage(content=message.content)
89
+ case LanguageModelMessageRole.TOOL:
90
+ raise ValueError(
91
+ "Cannot convert message with role `tool`. Please use `LanguageModelToolMessage` instead."
92
+ )
93
+
94
+
95
+ def _convert_messages_to_openai(
96
+ messages: Sequence[
97
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
98
+ ],
99
+ ) -> ResponseInputParam:
100
+ res = []
101
+ for message in messages:
102
+ if isinstance(message, LanguageModelMessageOptions):
103
+ res.extend(_convert_message_to_openai(message))
104
+ elif isinstance(
105
+ message, dict
106
+ ): # Openai uses dicts for their input and BaseModel as output
107
+ res.append(message)
108
+ else:
109
+ assert isinstance(message, BaseModel)
110
+ res.append(message.model_dump(exclude_defaults=True))
111
+
112
+ return res
113
+
114
+
115
+ class _ResponsesParams(NamedTuple):
116
+ temperature: float
117
+ model_name: str
118
+ search_context: SearchContext | None
119
+ messages: str | ResponseInputParam
120
+ tools: list[ToolParam] | None
121
+ reasoning: Reasoning | None
122
+ text: ResponseTextConfigParam | None
123
+
124
+
125
+ def _prepare_responses_params_util(
126
+ model_name: LanguageModelName | str,
127
+ content_chunks: list[ContentChunk] | None,
128
+ temperature: float,
129
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None,
130
+ messages: str
131
+ | LanguageModelMessages
132
+ | Sequence[
133
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
134
+ ],
135
+ reasoning: Reasoning | None,
136
+ text: ResponseTextConfigParam | None,
137
+ other_options: dict | None = None,
138
+ ) -> _ResponsesParams:
139
+ search_context = (
140
+ _to_search_context(content_chunks) if content_chunks is not None else None
141
+ )
142
+
143
+ model = model_name.name if isinstance(model_name, LanguageModelName) else model_name
144
+
145
+ tools_res = _convert_tools_to_openai(tools) if tools is not None else None
146
+
147
+ if other_options is not None:
148
+ # Key word argument takes precedence
149
+ reasoning = reasoning or _attempt_extract_reasoning_from_options(other_options)
150
+ text = text or _attempt_extract_verbosity_from_options(other_options)
151
+
152
+ if isinstance(model_name, LanguageModelName):
153
+ model_info = LanguageModelInfo.from_name(model_name)
154
+
155
+ if model_info.temperature_bounds is not None and temperature is not None:
156
+ temperature = _clamp_temperature(temperature, model_info.temperature_bounds)
157
+
158
+ if (
159
+ reasoning is None
160
+ and model_info.default_options is not None
161
+ and "reasoning_effort" in model_info.default_options
162
+ ):
163
+ reasoning = Reasoning(effort=model_info.default_options["reasoning_effort"])
164
+
165
+ if (
166
+ reasoning is not None
167
+ and tools_res is not None
168
+ and any(tool["type"] == "code_interpreter" for tool in tools_res)
169
+ and "effort" in reasoning
170
+ and reasoning["effort"] == "minimal"
171
+ ):
172
+ logger.warning(
173
+ "Code interpreter cannot be used with `minimal` effort. Switching to `low`."
174
+ )
175
+ reasoning["effort"] = (
176
+ "low" # Code interpreter cannot be used with minimal effort
177
+ )
178
+
179
+ messages_res = None
180
+ if isinstance(messages, LanguageModelMessages):
181
+ messages_res = _convert_messages_to_openai(messages.root)
182
+ elif isinstance(messages, list):
183
+ messages_res = _convert_messages_to_openai(messages)
184
+ else:
185
+ assert isinstance(messages, str)
186
+ messages_res = messages
187
+
188
+ return _ResponsesParams(
189
+ temperature, model, search_context, messages_res, tools_res, reasoning, text
190
+ )
191
+
192
+
193
+ @failsafe(
194
+ failure_return_value=None,
195
+ exceptions=(ValidationError,),
196
+ log_exc_info=False,
197
+ logger=logger,
198
+ )
199
+ def _attempt_extract_reasoning_from_options(options: dict) -> Reasoning | None:
200
+ reasoning = None
201
+
202
+ # Responses API
203
+ if "reasoning" in options:
204
+ reasoning = options["reasoning"]
205
+
206
+ # Completions API
207
+ elif "reasoning_effort" in options:
208
+ reasoning = {"effort": options["reasoning_effort"]}
209
+ if "reasoningEffort" in options:
210
+ reasoning = {"effort": options["reasoningEffort"]}
211
+
212
+ if reasoning is not None:
213
+ return TypeAdapter(Reasoning).validate_python(reasoning)
214
+
215
+ return None
216
+
217
+
218
+ @failsafe(
219
+ failure_return_value=None,
220
+ exceptions=(ValidationError,),
221
+ log_exc_info=False,
222
+ logger=logger,
223
+ )
224
+ def _attempt_extract_verbosity_from_options(
225
+ options: dict,
226
+ ) -> ResponseTextConfigParam | None:
227
+ reasoning = None
228
+
229
+ # Responses API
230
+ if "text" in options:
231
+ reasoning = options["text"]
232
+
233
+ # Completions API
234
+ elif "verbosity" in options:
235
+ reasoning = {"verbosity": options["verbosity"]}
236
+
237
+ if reasoning is not None:
238
+ return TypeAdapter(ResponseTextConfigParam).validate_python(reasoning)
239
+
240
+ return None
241
+
242
+
243
+ def _prepare_responses_args(
244
+ company_id: str,
245
+ user_id: str,
246
+ assistant_message_id: str,
247
+ user_message_id: str,
248
+ chat_id: str,
249
+ assistant_id: str,
250
+ params: _ResponsesParams,
251
+ debug_info: dict | None,
252
+ start_text: str | None,
253
+ include: list[ResponseIncludable] | None,
254
+ instructions: str | None,
255
+ max_output_tokens: int | None,
256
+ metadata: Metadata | None,
257
+ parallel_tool_calls: bool | None,
258
+ tool_choice: response_create_params.ToolChoice | None,
259
+ top_p: float | None,
260
+ other_options: dict | None = None,
261
+ ) -> dict[str, Any]:
262
+ options = {}
263
+
264
+ options["company_id"] = company_id
265
+ options["user_id"] = user_id
266
+
267
+ options["model"] = params.model_name
268
+
269
+ if params.search_context is not None:
270
+ options["searchContext"] = params.search_context
271
+
272
+ options["chatId"] = chat_id
273
+ options["assistantId"] = assistant_id
274
+ options["assistantMessageId"] = assistant_message_id
275
+ options["userMessageId"] = user_message_id
276
+
277
+ if debug_info is not None:
278
+ options["debugInfo"] = debug_info
279
+ if start_text is not None:
280
+ options["startText"] = start_text
281
+
282
+ options["input"] = params.messages
283
+
284
+ openai_options: unique_sdk.Integrated.CreateStreamResponsesOpenaiParams = {}
285
+
286
+ if params.temperature is not None:
287
+ openai_options["temperature"] = params.temperature
288
+
289
+ if params.reasoning is not None:
290
+ openai_options["reasoning"] = params.reasoning
291
+
292
+ if params.text is not None:
293
+ openai_options["text"] = params.text
294
+
295
+ if include is not None:
296
+ openai_options["include"] = include
297
+
298
+ if instructions is not None:
299
+ openai_options["instructions"] = instructions
300
+
301
+ if max_output_tokens is not None:
302
+ openai_options["max_output_tokens"] = max_output_tokens
303
+
304
+ if metadata is not None:
305
+ openai_options["metadata"] = metadata
306
+
307
+ if parallel_tool_calls is not None:
308
+ openai_options["parallel_tool_calls"] = parallel_tool_calls
309
+
310
+ if tool_choice is not None:
311
+ openai_options["tool_choice"] = tool_choice
312
+
313
+ if params.tools is not None:
314
+ openai_options["tools"] = params.tools
315
+
316
+ if top_p is not None:
317
+ openai_options["top_p"] = top_p
318
+
319
+ # allow any other openai.resources.responses.Response.create options
320
+ if other_options is not None:
321
+ openai_options.update(other_options) # type: ignore
322
+
323
+ options["options"] = openai_options
324
+
325
+ return options
326
+
327
+
328
+ def stream_responses_with_references(
329
+ *,
330
+ company_id: str,
331
+ user_id: str,
332
+ assistant_message_id: str,
333
+ user_message_id: str,
334
+ chat_id: str,
335
+ assistant_id: str,
336
+ model_name: LanguageModelName | str,
337
+ messages: str
338
+ | LanguageModelMessages
339
+ | Sequence[
340
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
341
+ ],
342
+ content_chunks: list[ContentChunk] | None = None,
343
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
344
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
345
+ debug_info: dict | None = None,
346
+ start_text: str | None = None,
347
+ include: list[ResponseIncludable] | None = None,
348
+ instructions: str | None = None,
349
+ max_output_tokens: int | None = None,
350
+ metadata: Metadata | None = None,
351
+ parallel_tool_calls: bool | None = None,
352
+ text: ResponseTextConfigParam | None = None,
353
+ tool_choice: response_create_params.ToolChoice | None = None,
354
+ top_p: float | None = None,
355
+ reasoning: Reasoning | None = None,
356
+ other_options: dict | None = None,
357
+ ) -> ResponsesLanguageModelStreamResponse:
358
+ responses_params = _prepare_responses_params_util(
359
+ model_name=model_name,
360
+ content_chunks=content_chunks,
361
+ temperature=temperature,
362
+ tools=tools,
363
+ messages=messages,
364
+ reasoning=reasoning,
365
+ text=text,
366
+ other_options=other_options,
367
+ )
368
+
369
+ responses_args = _prepare_responses_args(
370
+ company_id=company_id,
371
+ user_id=user_id,
372
+ assistant_message_id=assistant_message_id,
373
+ user_message_id=user_message_id,
374
+ chat_id=chat_id,
375
+ assistant_id=assistant_id,
376
+ params=responses_params,
377
+ debug_info=debug_info,
378
+ start_text=start_text,
379
+ include=include,
380
+ instructions=instructions,
381
+ max_output_tokens=max_output_tokens,
382
+ metadata=metadata,
383
+ parallel_tool_calls=parallel_tool_calls,
384
+ tool_choice=tool_choice,
385
+ top_p=top_p,
386
+ other_options=other_options,
387
+ )
388
+
389
+ return ResponsesLanguageModelStreamResponse.model_validate(
390
+ unique_sdk.Integrated.responses_stream(
391
+ **responses_args,
392
+ )
393
+ )
394
+
395
+
396
+ async def stream_responses_with_references_async(
397
+ *,
398
+ company_id: str,
399
+ user_id: str,
400
+ assistant_message_id: str,
401
+ user_message_id: str,
402
+ chat_id: str,
403
+ assistant_id: str,
404
+ model_name: LanguageModelName | str,
405
+ messages: str
406
+ | LanguageModelMessages
407
+ | Sequence[
408
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
409
+ ],
410
+ content_chunks: list[ContentChunk] | None = None,
411
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
412
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
413
+ debug_info: dict | None = None,
414
+ start_text: str | None = None,
415
+ include: list[ResponseIncludable] | None = None,
416
+ instructions: str | None = None,
417
+ max_output_tokens: int | None = None,
418
+ metadata: Metadata | None = None,
419
+ parallel_tool_calls: bool | None = None,
420
+ text: ResponseTextConfigParam | None = None,
421
+ tool_choice: response_create_params.ToolChoice | None = None,
422
+ top_p: float | None = None,
423
+ reasoning: Reasoning | None = None,
424
+ other_options: dict | None = None,
425
+ ) -> ResponsesLanguageModelStreamResponse:
426
+ responses_params = _prepare_responses_params_util(
427
+ model_name=model_name,
428
+ content_chunks=content_chunks,
429
+ temperature=temperature,
430
+ tools=tools,
431
+ messages=messages,
432
+ reasoning=reasoning,
433
+ text=text,
434
+ other_options=other_options,
435
+ )
436
+
437
+ responses_args = _prepare_responses_args(
438
+ company_id=company_id,
439
+ user_id=user_id,
440
+ assistant_message_id=assistant_message_id,
441
+ user_message_id=user_message_id,
442
+ chat_id=chat_id,
443
+ assistant_id=assistant_id,
444
+ params=responses_params,
445
+ debug_info=debug_info,
446
+ start_text=start_text,
447
+ include=include,
448
+ instructions=instructions,
449
+ max_output_tokens=max_output_tokens,
450
+ metadata=metadata,
451
+ parallel_tool_calls=parallel_tool_calls,
452
+ tool_choice=tool_choice,
453
+ top_p=top_p,
454
+ other_options=other_options,
455
+ )
456
+
457
+ return ResponsesLanguageModelStreamResponse.model_validate(
458
+ await unique_sdk.Integrated.responses_stream_async(
459
+ **responses_args,
460
+ )
461
+ )
@@ -1,7 +1,19 @@
1
1
  from datetime import datetime
2
2
  from enum import StrEnum
3
+ from typing import Literal
3
4
 
4
5
  from humps import camelize
6
+ from openai.types.chat import (
7
+ ChatCompletionAssistantMessageParam,
8
+ ChatCompletionMessageParam,
9
+ ChatCompletionUserMessageParam,
10
+ )
11
+ from openai.types.chat.chat_completion_message_function_tool_call_param import (
12
+ ChatCompletionMessageFunctionToolCallParam,
13
+ )
14
+ from openai.types.chat.chat_completion_message_function_tool_call_param import (
15
+ Function as OpenAIFunction,
16
+ )
5
17
  from pydantic import (
6
18
  BaseModel,
7
19
  ConfigDict,
@@ -10,16 +22,20 @@ from pydantic import (
10
22
  model_validator,
11
23
  )
12
24
 
25
+ from unique_toolkit.content.schemas import ContentReference
26
+
13
27
  # set config to convert camelCase to snake_case
14
28
  model_config = ConfigDict(
15
- alias_generator=camelize, populate_by_name=True, arbitrary_types_allowed=True
29
+ alias_generator=camelize,
30
+ populate_by_name=True,
16
31
  )
17
32
 
18
33
 
19
34
  class ChatMessageRole(StrEnum):
20
35
  USER = "user"
21
36
  ASSISTANT = "assistant"
22
- TOOL = "tool"
37
+ TOOL = "tool" # TODO: Unused according @unique-fabian. To be removed in separate PR
38
+ SYSTEM = "system" # Note: These messages are appended by the backend and should not be confused with the LLM’s system message.
23
39
 
24
40
 
25
41
  class Function(BaseModel):
@@ -28,6 +44,12 @@ class Function(BaseModel):
28
44
  name: str
29
45
  arguments: str
30
46
 
47
+ def to_openai(self) -> OpenAIFunction:
48
+ return OpenAIFunction(
49
+ arguments=self.arguments,
50
+ name=self.name,
51
+ )
52
+
31
53
 
32
54
  class ToolCall(BaseModel):
33
55
  model_config = model_config
@@ -36,8 +58,16 @@ class ToolCall(BaseModel):
36
58
  type: str
37
59
  function: Function
38
60
 
61
+ def to_openai_param(self) -> ChatCompletionMessageFunctionToolCallParam:
62
+ return ChatCompletionMessageFunctionToolCallParam(
63
+ id=self.id,
64
+ function=self.function.to_openai(),
65
+ type="function",
66
+ )
67
+
39
68
 
40
69
  class ChatMessage(BaseModel):
70
+ # TODO: The below seems not to be True anymore @irina-unique. To be checked in separate PR
41
71
  # This model should strictly meets https://github.com/Unique-AG/monorepo/blob/master/node/apps/node-chat/src/public-api/2023-12-06/dtos/message/public-message.dto.ts
42
72
  model_config = model_config
43
73
 
@@ -54,6 +84,7 @@ class ChatMessage(BaseModel):
54
84
  created_at: datetime | None = None
55
85
  completed_at: datetime | None = None
56
86
  updated_at: datetime | None = None
87
+ references: list[ContentReference] | None = None
57
88
 
58
89
  # TODO make sdk return role consistently in lowercase
59
90
  # Currently needed as sdk returns role in uppercase
@@ -68,6 +99,38 @@ class ChatMessage(BaseModel):
68
99
  raise ValueError("tool_call_ids is required when role is 'tool'")
69
100
  return self
70
101
 
102
+ def to_openai_param(self) -> ChatCompletionMessageParam:
103
+ match self.role:
104
+ case ChatMessageRole.USER:
105
+ return ChatCompletionUserMessageParam(
106
+ role="user",
107
+ content=self.content or "",
108
+ )
109
+
110
+ case ChatMessageRole.ASSISTANT:
111
+ if self.tool_calls:
112
+ assistant_message = ChatCompletionAssistantMessageParam(
113
+ role="assistant",
114
+ audio=None,
115
+ content=self.content or "",
116
+ function_call=None,
117
+ refusal=None,
118
+ tool_calls=[t.to_openai_param() for t in self.tool_calls],
119
+ )
120
+ else:
121
+ assistant_message = ChatCompletionAssistantMessageParam(
122
+ role="assistant",
123
+ audio=None,
124
+ content=self.content or "",
125
+ function_call=None,
126
+ refusal=None,
127
+ )
128
+
129
+ return assistant_message
130
+
131
+ case ChatMessageRole.TOOL:
132
+ raise NotImplementedError
133
+
71
134
 
72
135
  class ChatMessageAssessmentStatus(StrEnum):
73
136
  PENDING = "PENDING"
@@ -100,3 +163,72 @@ class ChatMessageAssessment(BaseModel):
100
163
  is_visible: bool
101
164
  created_at: datetime | None = None
102
165
  updated_at: datetime | None = None
166
+
167
+
168
+ class MessageLogStatus(StrEnum):
169
+ RUNNING = "RUNNING"
170
+ COMPLETED = "COMPLETED"
171
+ FAILED = "FAILED"
172
+
173
+
174
+ class MessageExecutionStatus(StrEnum):
175
+ PENDING = "PENDING"
176
+ RUNNING = "RUNNING"
177
+ COMPLETED = "COMPLETED"
178
+ FAILED = "FAILED"
179
+
180
+
181
+ class MessageExecutionType(StrEnum):
182
+ DEEP_RESEARCH = "DEEP_RESEARCH"
183
+
184
+
185
+ class MessageExecutionUpdateStatus(StrEnum):
186
+ COMPLETED = "COMPLETED"
187
+ FAILED = "FAILED"
188
+
189
+
190
+ class MessageLogUncitedReferences(BaseModel):
191
+ model_config = model_config
192
+ data: list[ContentReference]
193
+
194
+
195
+ class MessageLogEvent(BaseModel):
196
+ model_config = model_config
197
+ type: Literal["WebSearch", "InternalSearch"]
198
+ text: str
199
+
200
+
201
+ class MessageLogDetails(BaseModel):
202
+ model_config = model_config
203
+ data: list[MessageLogEvent] | None = None
204
+ status: str | None = Field(
205
+ default=None, description="Overarching status of the current message log"
206
+ )
207
+
208
+
209
+ class MessageLog(BaseModel):
210
+ model_config = model_config
211
+
212
+ message_log_id: str | None = Field(default=None, validation_alias="id")
213
+ message_id: str | None = None
214
+ status: MessageLogStatus
215
+ text: str | None = None
216
+ details: MessageLogDetails | None = None
217
+ uncited_references: MessageLogUncitedReferences | None = None
218
+ order: int
219
+ references: list[ContentReference] | None = None
220
+ created_at: datetime | None = None
221
+ updated_at: datetime | None = None
222
+
223
+
224
+ class MessageExecution(BaseModel):
225
+ model_config = model_config
226
+
227
+ message_execution_id: str | None = None
228
+ message_id: str | None = None
229
+ status: MessageExecutionStatus
230
+ type: MessageExecutionType = MessageExecutionType.DEEP_RESEARCH
231
+ seconds_remaining: int | None = None
232
+ percentage_completed: int | None = None
233
+ created_at: datetime | None = None
234
+ updated_at: datetime | None = None