unique_toolkit 0.7.7__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_toolkit might be problematic. Click here for more details.

Files changed (166) hide show
  1. unique_toolkit/__init__.py +28 -1
  2. unique_toolkit/_common/api_calling/human_verification_manager.py +343 -0
  3. unique_toolkit/_common/base_model_type_attribute.py +303 -0
  4. unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
  5. unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
  6. unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
  7. unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
  8. unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
  9. unique_toolkit/_common/default_language_model.py +12 -0
  10. unique_toolkit/_common/docx_generator/__init__.py +7 -0
  11. unique_toolkit/_common/docx_generator/config.py +12 -0
  12. unique_toolkit/_common/docx_generator/schemas.py +80 -0
  13. unique_toolkit/_common/docx_generator/service.py +252 -0
  14. unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
  15. unique_toolkit/_common/endpoint_builder.py +305 -0
  16. unique_toolkit/_common/endpoint_requestor.py +430 -0
  17. unique_toolkit/_common/exception.py +24 -0
  18. unique_toolkit/_common/feature_flags/schema.py +9 -0
  19. unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
  20. unique_toolkit/_common/pydantic_helpers.py +154 -0
  21. unique_toolkit/_common/referencing.py +53 -0
  22. unique_toolkit/_common/string_utilities.py +140 -0
  23. unique_toolkit/_common/tests/test_referencing.py +521 -0
  24. unique_toolkit/_common/tests/test_string_utilities.py +506 -0
  25. unique_toolkit/_common/token/image_token_counting.py +67 -0
  26. unique_toolkit/_common/token/token_counting.py +204 -0
  27. unique_toolkit/_common/utils/__init__.py +1 -0
  28. unique_toolkit/_common/utils/files.py +43 -0
  29. unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
  30. unique_toolkit/_common/utils/structured_output/schema.py +5 -0
  31. unique_toolkit/_common/utils/write_configuration.py +51 -0
  32. unique_toolkit/_common/validators.py +101 -4
  33. unique_toolkit/agentic/__init__.py +1 -0
  34. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
  35. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  36. unique_toolkit/agentic/evaluation/config.py +36 -0
  37. unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
  38. unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
  39. unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
  40. unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
  41. unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
  42. unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +111 -0
  43. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
  44. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +16 -15
  45. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +30 -20
  46. unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
  47. unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
  48. unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
  49. unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
  50. unique_toolkit/agentic/history_manager/history_construction_with_contents.py +297 -0
  51. unique_toolkit/agentic/history_manager/history_manager.py +242 -0
  52. unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
  53. unique_toolkit/agentic/history_manager/utils.py +96 -0
  54. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
  55. unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
  56. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  57. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  58. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  59. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  60. unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
  61. unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
  62. unique_toolkit/agentic/tools/__init__.py +1 -0
  63. unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
  64. unique_toolkit/agentic/tools/a2a/config.py +17 -0
  65. unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
  66. unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
  67. unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
  68. unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
  69. unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
  70. unique_toolkit/agentic/tools/a2a/manager.py +55 -0
  71. unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
  72. unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +185 -0
  73. unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +73 -0
  74. unique_toolkit/agentic/tools/a2a/postprocessing/config.py +45 -0
  75. unique_toolkit/agentic/tools/a2a/postprocessing/display.py +180 -0
  76. unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
  77. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +1335 -0
  78. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
  79. unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
  80. unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
  81. unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
  82. unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
  83. unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
  84. unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
  85. unique_toolkit/agentic/tools/a2a/tool/config.py +73 -0
  86. unique_toolkit/agentic/tools/a2a/tool/service.py +306 -0
  87. unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
  88. unique_toolkit/agentic/tools/config.py +167 -0
  89. unique_toolkit/agentic/tools/factory.py +44 -0
  90. unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
  91. unique_toolkit/agentic/tools/mcp/manager.py +71 -0
  92. unique_toolkit/agentic/tools/mcp/models.py +28 -0
  93. unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
  94. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  95. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  96. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  97. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  98. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  99. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  100. unique_toolkit/agentic/tools/schemas.py +141 -0
  101. unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
  102. unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
  103. unique_toolkit/agentic/tools/tool.py +183 -0
  104. unique_toolkit/agentic/tools/tool_manager.py +523 -0
  105. unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
  106. unique_toolkit/agentic/tools/utils/__init__.py +19 -0
  107. unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
  108. unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
  109. unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
  110. unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
  111. unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
  112. unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
  113. unique_toolkit/app/__init__.py +6 -0
  114. unique_toolkit/app/dev_util.py +180 -0
  115. unique_toolkit/app/init_sdk.py +32 -1
  116. unique_toolkit/app/schemas.py +198 -31
  117. unique_toolkit/app/unique_settings.py +367 -0
  118. unique_toolkit/chat/__init__.py +8 -1
  119. unique_toolkit/chat/deprecated/service.py +232 -0
  120. unique_toolkit/chat/functions.py +642 -77
  121. unique_toolkit/chat/rendering.py +34 -0
  122. unique_toolkit/chat/responses_api.py +461 -0
  123. unique_toolkit/chat/schemas.py +133 -2
  124. unique_toolkit/chat/service.py +115 -767
  125. unique_toolkit/content/functions.py +153 -4
  126. unique_toolkit/content/schemas.py +122 -15
  127. unique_toolkit/content/service.py +278 -44
  128. unique_toolkit/content/smart_rules.py +301 -0
  129. unique_toolkit/content/utils.py +8 -3
  130. unique_toolkit/embedding/service.py +102 -11
  131. unique_toolkit/framework_utilities/__init__.py +1 -0
  132. unique_toolkit/framework_utilities/langchain/client.py +71 -0
  133. unique_toolkit/framework_utilities/langchain/history.py +19 -0
  134. unique_toolkit/framework_utilities/openai/__init__.py +6 -0
  135. unique_toolkit/framework_utilities/openai/client.py +83 -0
  136. unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
  137. unique_toolkit/framework_utilities/utils.py +23 -0
  138. unique_toolkit/language_model/__init__.py +3 -0
  139. unique_toolkit/language_model/builder.py +27 -11
  140. unique_toolkit/language_model/default_language_model.py +3 -0
  141. unique_toolkit/language_model/functions.py +327 -43
  142. unique_toolkit/language_model/infos.py +992 -50
  143. unique_toolkit/language_model/reference.py +242 -0
  144. unique_toolkit/language_model/schemas.py +475 -48
  145. unique_toolkit/language_model/service.py +228 -27
  146. unique_toolkit/protocols/support.py +145 -0
  147. unique_toolkit/services/__init__.py +7 -0
  148. unique_toolkit/services/chat_service.py +1630 -0
  149. unique_toolkit/services/knowledge_base.py +861 -0
  150. unique_toolkit/short_term_memory/service.py +178 -41
  151. unique_toolkit/smart_rules/__init__.py +0 -0
  152. unique_toolkit/smart_rules/compile.py +56 -0
  153. unique_toolkit/test_utilities/events.py +197 -0
  154. {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/METADATA +606 -7
  155. unique_toolkit-1.23.0.dist-info/RECORD +182 -0
  156. unique_toolkit/evaluators/__init__.py +0 -1
  157. unique_toolkit/evaluators/config.py +0 -35
  158. unique_toolkit/evaluators/constants.py +0 -1
  159. unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
  160. unique_toolkit/evaluators/context_relevancy/service.py +0 -53
  161. unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
  162. unique_toolkit/evaluators/hallucination/constants.py +0 -41
  163. unique_toolkit-0.7.7.dist-info/RECORD +0 -64
  164. /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
  165. {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/LICENSE +0 -0
  166. {unique_toolkit-0.7.7.dist-info → unique_toolkit-1.23.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,34 @@
1
+ from urllib.parse import quote
2
+
3
+
4
+ def create_prompt_button_string(
5
+ *,
6
+ button_text: str,
7
+ next_user_message: str,
8
+ ) -> str:
9
+ """
10
+ Create a prompt button string.
11
+
12
+ Args:
13
+ button_text: The text of the button.
14
+ next_user_message: The message to send when the button is clicked.
15
+
16
+ Returns:
17
+ A string that can be used to create a prompt button.
18
+ A prompt button includes the `next_user_message` to the user prompt windown.
19
+ """
20
+ next_user_message = quote(next_user_message)
21
+ return f"[{button_text}](https://prompt={next_user_message})"
22
+
23
+
24
+ def create_latex_formula_string(latex_expression: str) -> str:
25
+ """
26
+ Create a LaTeX string.
27
+
28
+ Args:
29
+ latex_expression: The LaTeX expression to create.
30
+
31
+ Returns:
32
+ A string that can be used to create a LaTeX string.
33
+ """
34
+ return f"\\[{latex_expression}\\]"
@@ -0,0 +1,461 @@
1
+ import logging
2
+ from typing import Any, NamedTuple, Sequence
3
+
4
+ import unique_sdk
5
+ from openai.types.responses import (
6
+ ResponseIncludable,
7
+ ResponseInputItemParam,
8
+ ResponseInputParam,
9
+ ResponseOutputItem,
10
+ ResponseTextConfigParam,
11
+ ToolParam,
12
+ response_create_params,
13
+ )
14
+ from openai.types.shared_params import Metadata, Reasoning
15
+ from pydantic import BaseModel, TypeAdapter, ValidationError
16
+
17
+ from unique_toolkit.agentic.tools.utils.execution.execution import (
18
+ failsafe,
19
+ )
20
+ from unique_toolkit.content.schemas import ContentChunk
21
+ from unique_toolkit.language_model.constants import (
22
+ DEFAULT_COMPLETE_TEMPERATURE,
23
+ )
24
+ from unique_toolkit.language_model.functions import (
25
+ SearchContext,
26
+ _clamp_temperature,
27
+ _to_search_context,
28
+ )
29
+ from unique_toolkit.language_model.infos import (
30
+ LanguageModelInfo,
31
+ LanguageModelName,
32
+ )
33
+ from unique_toolkit.language_model.schemas import (
34
+ LanguageModelAssistantMessage,
35
+ LanguageModelMessage,
36
+ LanguageModelMessageOptions,
37
+ LanguageModelMessageRole,
38
+ LanguageModelMessages,
39
+ LanguageModelSystemMessage,
40
+ LanguageModelToolDescription,
41
+ LanguageModelToolMessage,
42
+ LanguageModelUserMessage,
43
+ ResponsesLanguageModelStreamResponse,
44
+ )
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ def _convert_tools_to_openai(
50
+ tools: Sequence[LanguageModelToolDescription | ToolParam],
51
+ ) -> list[ToolParam]:
52
+ openai_tools = []
53
+ for tool in tools:
54
+ if isinstance(tool, LanguageModelToolDescription):
55
+ openai_tools.append(tool.to_openai(mode="responses"))
56
+ else:
57
+ openai_tools.append(tool)
58
+ return openai_tools
59
+
60
+
61
+ def _convert_message_to_openai(
62
+ message: LanguageModelMessageOptions,
63
+ ) -> ResponseInputParam:
64
+ res = []
65
+ match message:
66
+ case LanguageModelAssistantMessage():
67
+ return message.to_openai(mode="responses") # type: ignore
68
+ case (
69
+ LanguageModelUserMessage()
70
+ | LanguageModelSystemMessage()
71
+ | LanguageModelToolMessage()
72
+ ):
73
+ return [message.to_openai(mode="responses")]
74
+ case _:
75
+ return _convert_message_to_openai(_convert_to_specific_message(message))
76
+ return res
77
+
78
+
79
+ def _convert_to_specific_message(
80
+ message: LanguageModelMessage,
81
+ ) -> "LanguageModelSystemMessage | LanguageModelUserMessage | LanguageModelAssistantMessage":
82
+ match message.role:
83
+ case LanguageModelMessageRole.SYSTEM:
84
+ return LanguageModelSystemMessage(content=message.content)
85
+ case LanguageModelMessageRole.USER:
86
+ return LanguageModelUserMessage(content=message.content)
87
+ case LanguageModelMessageRole.ASSISTANT:
88
+ return LanguageModelAssistantMessage(content=message.content)
89
+ case LanguageModelMessageRole.TOOL:
90
+ raise ValueError(
91
+ "Cannot convert message with role `tool`. Please use `LanguageModelToolMessage` instead."
92
+ )
93
+
94
+
95
+ def _convert_messages_to_openai(
96
+ messages: Sequence[
97
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
98
+ ],
99
+ ) -> ResponseInputParam:
100
+ res = []
101
+ for message in messages:
102
+ if isinstance(message, LanguageModelMessageOptions):
103
+ res.extend(_convert_message_to_openai(message))
104
+ elif isinstance(
105
+ message, dict
106
+ ): # Openai uses dicts for their input and BaseModel as output
107
+ res.append(message)
108
+ else:
109
+ assert isinstance(message, BaseModel)
110
+ res.append(message.model_dump(exclude_defaults=True))
111
+
112
+ return res
113
+
114
+
115
+ class _ResponsesParams(NamedTuple):
116
+ temperature: float
117
+ model_name: str
118
+ search_context: SearchContext | None
119
+ messages: str | ResponseInputParam
120
+ tools: list[ToolParam] | None
121
+ reasoning: Reasoning | None
122
+ text: ResponseTextConfigParam | None
123
+
124
+
125
+ def _prepare_responses_params_util(
126
+ model_name: LanguageModelName | str,
127
+ content_chunks: list[ContentChunk] | None,
128
+ temperature: float,
129
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None,
130
+ messages: str
131
+ | LanguageModelMessages
132
+ | Sequence[
133
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
134
+ ],
135
+ reasoning: Reasoning | None,
136
+ text: ResponseTextConfigParam | None,
137
+ other_options: dict | None = None,
138
+ ) -> _ResponsesParams:
139
+ search_context = (
140
+ _to_search_context(content_chunks) if content_chunks is not None else None
141
+ )
142
+
143
+ model = model_name.name if isinstance(model_name, LanguageModelName) else model_name
144
+
145
+ tools_res = _convert_tools_to_openai(tools) if tools is not None else None
146
+
147
+ if other_options is not None:
148
+ # Key word argument takes precedence
149
+ reasoning = reasoning or _attempt_extract_reasoning_from_options(other_options)
150
+ text = text or _attempt_extract_verbosity_from_options(other_options)
151
+
152
+ if isinstance(model_name, LanguageModelName):
153
+ model_info = LanguageModelInfo.from_name(model_name)
154
+
155
+ if model_info.temperature_bounds is not None and temperature is not None:
156
+ temperature = _clamp_temperature(temperature, model_info.temperature_bounds)
157
+
158
+ if (
159
+ reasoning is None
160
+ and model_info.default_options is not None
161
+ and "reasoning_effort" in model_info.default_options
162
+ ):
163
+ reasoning = Reasoning(effort=model_info.default_options["reasoning_effort"])
164
+
165
+ if (
166
+ reasoning is not None
167
+ and tools_res is not None
168
+ and any(tool["type"] == "code_interpreter" for tool in tools_res)
169
+ and "effort" in reasoning
170
+ and reasoning["effort"] == "minimal"
171
+ ):
172
+ logger.warning(
173
+ "Code interpreter cannot be used with `minimal` effort. Switching to `low`."
174
+ )
175
+ reasoning["effort"] = (
176
+ "low" # Code interpreter cannot be used with minimal effort
177
+ )
178
+
179
+ messages_res = None
180
+ if isinstance(messages, LanguageModelMessages):
181
+ messages_res = _convert_messages_to_openai(messages.root)
182
+ elif isinstance(messages, list):
183
+ messages_res = _convert_messages_to_openai(messages)
184
+ else:
185
+ assert isinstance(messages, str)
186
+ messages_res = messages
187
+
188
+ return _ResponsesParams(
189
+ temperature, model, search_context, messages_res, tools_res, reasoning, text
190
+ )
191
+
192
+
193
+ @failsafe(
194
+ failure_return_value=None,
195
+ exceptions=(ValidationError,),
196
+ log_exc_info=False,
197
+ logger=logger,
198
+ )
199
+ def _attempt_extract_reasoning_from_options(options: dict) -> Reasoning | None:
200
+ reasoning = None
201
+
202
+ # Responses API
203
+ if "reasoning" in options:
204
+ reasoning = options["reasoning"]
205
+
206
+ # Completions API
207
+ elif "reasoning_effort" in options:
208
+ reasoning = {"effort": options["reasoning_effort"]}
209
+ if "reasoningEffort" in options:
210
+ reasoning = {"effort": options["reasoningEffort"]}
211
+
212
+ if reasoning is not None:
213
+ return TypeAdapter(Reasoning).validate_python(reasoning)
214
+
215
+ return None
216
+
217
+
218
+ @failsafe(
219
+ failure_return_value=None,
220
+ exceptions=(ValidationError,),
221
+ log_exc_info=False,
222
+ logger=logger,
223
+ )
224
+ def _attempt_extract_verbosity_from_options(
225
+ options: dict,
226
+ ) -> ResponseTextConfigParam | None:
227
+ reasoning = None
228
+
229
+ # Responses API
230
+ if "text" in options:
231
+ reasoning = options["text"]
232
+
233
+ # Completions API
234
+ elif "verbosity" in options:
235
+ reasoning = {"verbosity": options["verbosity"]}
236
+
237
+ if reasoning is not None:
238
+ return TypeAdapter(ResponseTextConfigParam).validate_python(reasoning)
239
+
240
+ return None
241
+
242
+
243
+ def _prepare_responses_args(
244
+ company_id: str,
245
+ user_id: str,
246
+ assistant_message_id: str,
247
+ user_message_id: str,
248
+ chat_id: str,
249
+ assistant_id: str,
250
+ params: _ResponsesParams,
251
+ debug_info: dict | None,
252
+ start_text: str | None,
253
+ include: list[ResponseIncludable] | None,
254
+ instructions: str | None,
255
+ max_output_tokens: int | None,
256
+ metadata: Metadata | None,
257
+ parallel_tool_calls: bool | None,
258
+ tool_choice: response_create_params.ToolChoice | None,
259
+ top_p: float | None,
260
+ other_options: dict | None = None,
261
+ ) -> dict[str, Any]:
262
+ options = {}
263
+
264
+ options["company_id"] = company_id
265
+ options["user_id"] = user_id
266
+
267
+ options["model"] = params.model_name
268
+
269
+ if params.search_context is not None:
270
+ options["searchContext"] = params.search_context
271
+
272
+ options["chatId"] = chat_id
273
+ options["assistantId"] = assistant_id
274
+ options["assistantMessageId"] = assistant_message_id
275
+ options["userMessageId"] = user_message_id
276
+
277
+ if debug_info is not None:
278
+ options["debugInfo"] = debug_info
279
+ if start_text is not None:
280
+ options["startText"] = start_text
281
+
282
+ options["input"] = params.messages
283
+
284
+ openai_options: unique_sdk.Integrated.CreateStreamResponsesOpenaiParams = {}
285
+
286
+ if params.temperature is not None:
287
+ openai_options["temperature"] = params.temperature
288
+
289
+ if params.reasoning is not None:
290
+ openai_options["reasoning"] = params.reasoning
291
+
292
+ if params.text is not None:
293
+ openai_options["text"] = params.text
294
+
295
+ if include is not None:
296
+ openai_options["include"] = include
297
+
298
+ if instructions is not None:
299
+ openai_options["instructions"] = instructions
300
+
301
+ if max_output_tokens is not None:
302
+ openai_options["max_output_tokens"] = max_output_tokens
303
+
304
+ if metadata is not None:
305
+ openai_options["metadata"] = metadata
306
+
307
+ if parallel_tool_calls is not None:
308
+ openai_options["parallel_tool_calls"] = parallel_tool_calls
309
+
310
+ if tool_choice is not None:
311
+ openai_options["tool_choice"] = tool_choice
312
+
313
+ if params.tools is not None:
314
+ openai_options["tools"] = params.tools
315
+
316
+ if top_p is not None:
317
+ openai_options["top_p"] = top_p
318
+
319
+ # allow any other openai.resources.responses.Response.create options
320
+ if other_options is not None:
321
+ openai_options.update(other_options) # type: ignore
322
+
323
+ options["options"] = openai_options
324
+
325
+ return options
326
+
327
+
328
+ def stream_responses_with_references(
329
+ *,
330
+ company_id: str,
331
+ user_id: str,
332
+ assistant_message_id: str,
333
+ user_message_id: str,
334
+ chat_id: str,
335
+ assistant_id: str,
336
+ model_name: LanguageModelName | str,
337
+ messages: str
338
+ | LanguageModelMessages
339
+ | Sequence[
340
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
341
+ ],
342
+ content_chunks: list[ContentChunk] | None = None,
343
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
344
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
345
+ debug_info: dict | None = None,
346
+ start_text: str | None = None,
347
+ include: list[ResponseIncludable] | None = None,
348
+ instructions: str | None = None,
349
+ max_output_tokens: int | None = None,
350
+ metadata: Metadata | None = None,
351
+ parallel_tool_calls: bool | None = None,
352
+ text: ResponseTextConfigParam | None = None,
353
+ tool_choice: response_create_params.ToolChoice | None = None,
354
+ top_p: float | None = None,
355
+ reasoning: Reasoning | None = None,
356
+ other_options: dict | None = None,
357
+ ) -> ResponsesLanguageModelStreamResponse:
358
+ responses_params = _prepare_responses_params_util(
359
+ model_name=model_name,
360
+ content_chunks=content_chunks,
361
+ temperature=temperature,
362
+ tools=tools,
363
+ messages=messages,
364
+ reasoning=reasoning,
365
+ text=text,
366
+ other_options=other_options,
367
+ )
368
+
369
+ responses_args = _prepare_responses_args(
370
+ company_id=company_id,
371
+ user_id=user_id,
372
+ assistant_message_id=assistant_message_id,
373
+ user_message_id=user_message_id,
374
+ chat_id=chat_id,
375
+ assistant_id=assistant_id,
376
+ params=responses_params,
377
+ debug_info=debug_info,
378
+ start_text=start_text,
379
+ include=include,
380
+ instructions=instructions,
381
+ max_output_tokens=max_output_tokens,
382
+ metadata=metadata,
383
+ parallel_tool_calls=parallel_tool_calls,
384
+ tool_choice=tool_choice,
385
+ top_p=top_p,
386
+ other_options=other_options,
387
+ )
388
+
389
+ return ResponsesLanguageModelStreamResponse.model_validate(
390
+ unique_sdk.Integrated.responses_stream(
391
+ **responses_args,
392
+ )
393
+ )
394
+
395
+
396
+ async def stream_responses_with_references_async(
397
+ *,
398
+ company_id: str,
399
+ user_id: str,
400
+ assistant_message_id: str,
401
+ user_message_id: str,
402
+ chat_id: str,
403
+ assistant_id: str,
404
+ model_name: LanguageModelName | str,
405
+ messages: str
406
+ | LanguageModelMessages
407
+ | Sequence[
408
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
409
+ ],
410
+ content_chunks: list[ContentChunk] | None = None,
411
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
412
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
413
+ debug_info: dict | None = None,
414
+ start_text: str | None = None,
415
+ include: list[ResponseIncludable] | None = None,
416
+ instructions: str | None = None,
417
+ max_output_tokens: int | None = None,
418
+ metadata: Metadata | None = None,
419
+ parallel_tool_calls: bool | None = None,
420
+ text: ResponseTextConfigParam | None = None,
421
+ tool_choice: response_create_params.ToolChoice | None = None,
422
+ top_p: float | None = None,
423
+ reasoning: Reasoning | None = None,
424
+ other_options: dict | None = None,
425
+ ) -> ResponsesLanguageModelStreamResponse:
426
+ responses_params = _prepare_responses_params_util(
427
+ model_name=model_name,
428
+ content_chunks=content_chunks,
429
+ temperature=temperature,
430
+ tools=tools,
431
+ messages=messages,
432
+ reasoning=reasoning,
433
+ text=text,
434
+ other_options=other_options,
435
+ )
436
+
437
+ responses_args = _prepare_responses_args(
438
+ company_id=company_id,
439
+ user_id=user_id,
440
+ assistant_message_id=assistant_message_id,
441
+ user_message_id=user_message_id,
442
+ chat_id=chat_id,
443
+ assistant_id=assistant_id,
444
+ params=responses_params,
445
+ debug_info=debug_info,
446
+ start_text=start_text,
447
+ include=include,
448
+ instructions=instructions,
449
+ max_output_tokens=max_output_tokens,
450
+ metadata=metadata,
451
+ parallel_tool_calls=parallel_tool_calls,
452
+ tool_choice=tool_choice,
453
+ top_p=top_p,
454
+ other_options=other_options,
455
+ )
456
+
457
+ return ResponsesLanguageModelStreamResponse.model_validate(
458
+ await unique_sdk.Integrated.responses_stream_async(
459
+ **responses_args,
460
+ )
461
+ )
@@ -1,7 +1,19 @@
1
1
  from datetime import datetime
2
2
  from enum import StrEnum
3
+ from typing import Literal
3
4
 
4
5
  from humps import camelize
6
+ from openai.types.chat import (
7
+ ChatCompletionAssistantMessageParam,
8
+ ChatCompletionMessageParam,
9
+ ChatCompletionUserMessageParam,
10
+ )
11
+ from openai.types.chat.chat_completion_message_function_tool_call_param import (
12
+ ChatCompletionMessageFunctionToolCallParam,
13
+ )
14
+ from openai.types.chat.chat_completion_message_function_tool_call_param import (
15
+ Function as OpenAIFunction,
16
+ )
5
17
  from pydantic import (
6
18
  BaseModel,
7
19
  ConfigDict,
@@ -10,16 +22,19 @@ from pydantic import (
10
22
  model_validator,
11
23
  )
12
24
 
25
+ from unique_toolkit.content.schemas import ContentReference
26
+
13
27
  # set config to convert camelCase to snake_case
14
28
  model_config = ConfigDict(
15
- alias_generator=camelize, populate_by_name=True, arbitrary_types_allowed=True
29
+ alias_generator=camelize,
30
+ populate_by_name=True,
16
31
  )
17
32
 
18
33
 
19
34
  class ChatMessageRole(StrEnum):
20
35
  USER = "user"
21
36
  ASSISTANT = "assistant"
22
- TOOL = "tool"
37
+ TOOL = "tool" # TODO: Unused according @unique-fabian. To be removed in separate PR
23
38
 
24
39
 
25
40
  class Function(BaseModel):
@@ -28,6 +43,12 @@ class Function(BaseModel):
28
43
  name: str
29
44
  arguments: str
30
45
 
46
+ def to_openai(self) -> OpenAIFunction:
47
+ return OpenAIFunction(
48
+ arguments=self.arguments,
49
+ name=self.name,
50
+ )
51
+
31
52
 
32
53
  class ToolCall(BaseModel):
33
54
  model_config = model_config
@@ -36,8 +57,16 @@ class ToolCall(BaseModel):
36
57
  type: str
37
58
  function: Function
38
59
 
60
+ def to_openai_param(self) -> ChatCompletionMessageFunctionToolCallParam:
61
+ return ChatCompletionMessageFunctionToolCallParam(
62
+ id=self.id,
63
+ function=self.function.to_openai(),
64
+ type="function",
65
+ )
66
+
39
67
 
40
68
  class ChatMessage(BaseModel):
69
+ # TODO: The below seems not to be True anymore @irina-unique. To be checked in separate PR
41
70
  # This model should strictly meets https://github.com/Unique-AG/monorepo/blob/master/node/apps/node-chat/src/public-api/2023-12-06/dtos/message/public-message.dto.ts
42
71
  model_config = model_config
43
72
 
@@ -54,6 +83,7 @@ class ChatMessage(BaseModel):
54
83
  created_at: datetime | None = None
55
84
  completed_at: datetime | None = None
56
85
  updated_at: datetime | None = None
86
+ references: list[ContentReference] | None = None
57
87
 
58
88
  # TODO make sdk return role consistently in lowercase
59
89
  # Currently needed as sdk returns role in uppercase
@@ -68,6 +98,38 @@ class ChatMessage(BaseModel):
68
98
  raise ValueError("tool_call_ids is required when role is 'tool'")
69
99
  return self
70
100
 
101
+ def to_openai_param(self) -> ChatCompletionMessageParam:
102
+ match self.role:
103
+ case ChatMessageRole.USER:
104
+ return ChatCompletionUserMessageParam(
105
+ role="user",
106
+ content=self.content or "",
107
+ )
108
+
109
+ case ChatMessageRole.ASSISTANT:
110
+ if self.tool_calls:
111
+ assistant_message = ChatCompletionAssistantMessageParam(
112
+ role="assistant",
113
+ audio=None,
114
+ content=self.content or "",
115
+ function_call=None,
116
+ refusal=None,
117
+ tool_calls=[t.to_openai_param() for t in self.tool_calls],
118
+ )
119
+ else:
120
+ assistant_message = ChatCompletionAssistantMessageParam(
121
+ role="assistant",
122
+ audio=None,
123
+ content=self.content or "",
124
+ function_call=None,
125
+ refusal=None,
126
+ )
127
+
128
+ return assistant_message
129
+
130
+ case ChatMessageRole.TOOL:
131
+ raise NotImplementedError
132
+
71
133
 
72
134
  class ChatMessageAssessmentStatus(StrEnum):
73
135
  PENDING = "PENDING"
@@ -100,3 +162,72 @@ class ChatMessageAssessment(BaseModel):
100
162
  is_visible: bool
101
163
  created_at: datetime | None = None
102
164
  updated_at: datetime | None = None
165
+
166
+
167
+ class MessageLogStatus(StrEnum):
168
+ RUNNING = "RUNNING"
169
+ COMPLETED = "COMPLETED"
170
+ FAILED = "FAILED"
171
+
172
+
173
+ class MessageExecutionStatus(StrEnum):
174
+ PENDING = "PENDING"
175
+ RUNNING = "RUNNING"
176
+ COMPLETED = "COMPLETED"
177
+ FAILED = "FAILED"
178
+
179
+
180
+ class MessageExecutionType(StrEnum):
181
+ DEEP_RESEARCH = "DEEP_RESEARCH"
182
+
183
+
184
+ class MessageExecutionUpdateStatus(StrEnum):
185
+ COMPLETED = "COMPLETED"
186
+ FAILED = "FAILED"
187
+
188
+
189
+ class MessageLogUncitedReferences(BaseModel):
190
+ model_config = model_config
191
+ data: list[ContentReference]
192
+
193
+
194
+ class MessageLogEvent(BaseModel):
195
+ model_config = model_config
196
+ type: Literal["WebSearch", "InternalSearch"]
197
+ text: str
198
+
199
+
200
+ class MessageLogDetails(BaseModel):
201
+ model_config = model_config
202
+ data: list[MessageLogEvent] | None = None
203
+ status: str | None = Field(
204
+ default=None, description="Overarching status of the current message log"
205
+ )
206
+
207
+
208
+ class MessageLog(BaseModel):
209
+ model_config = model_config
210
+
211
+ message_log_id: str | None = Field(default=None, validation_alias="id")
212
+ message_id: str | None = None
213
+ status: MessageLogStatus
214
+ text: str | None = None
215
+ details: MessageLogDetails | None = None
216
+ uncited_references: MessageLogUncitedReferences | None = None
217
+ order: int
218
+ references: list[ContentReference] | None = None
219
+ created_at: datetime | None = None
220
+ updated_at: datetime | None = None
221
+
222
+
223
+ class MessageExecution(BaseModel):
224
+ model_config = model_config
225
+
226
+ message_execution_id: str | None = None
227
+ message_id: str | None = None
228
+ status: MessageExecutionStatus
229
+ type: MessageExecutionType = MessageExecutionType.DEEP_RESEARCH
230
+ seconds_remaining: int | None = None
231
+ percentage_completed: int | None = None
232
+ created_at: datetime | None = None
233
+ updated_at: datetime | None = None