unique_toolkit 1.8.1__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_toolkit might be problematic. Click here for more details.

Files changed (105) hide show
  1. unique_toolkit/__init__.py +20 -0
  2. unique_toolkit/_common/api_calling/human_verification_manager.py +121 -28
  3. unique_toolkit/_common/chunk_relevancy_sorter/config.py +3 -3
  4. unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +2 -5
  5. unique_toolkit/_common/default_language_model.py +9 -3
  6. unique_toolkit/_common/docx_generator/__init__.py +7 -0
  7. unique_toolkit/_common/docx_generator/config.py +12 -0
  8. unique_toolkit/_common/docx_generator/schemas.py +80 -0
  9. unique_toolkit/_common/docx_generator/service.py +252 -0
  10. unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
  11. unique_toolkit/_common/endpoint_builder.py +138 -117
  12. unique_toolkit/_common/endpoint_requestor.py +240 -14
  13. unique_toolkit/_common/exception.py +20 -0
  14. unique_toolkit/_common/feature_flags/schema.py +1 -5
  15. unique_toolkit/_common/referencing.py +53 -0
  16. unique_toolkit/_common/string_utilities.py +52 -1
  17. unique_toolkit/_common/tests/test_referencing.py +521 -0
  18. unique_toolkit/_common/tests/test_string_utilities.py +506 -0
  19. unique_toolkit/_common/utils/files.py +43 -0
  20. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +16 -6
  21. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  22. unique_toolkit/agentic/evaluation/config.py +3 -2
  23. unique_toolkit/agentic/evaluation/context_relevancy/service.py +2 -2
  24. unique_toolkit/agentic/evaluation/evaluation_manager.py +9 -5
  25. unique_toolkit/agentic/evaluation/hallucination/constants.py +1 -1
  26. unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +26 -3
  27. unique_toolkit/agentic/history_manager/history_manager.py +14 -11
  28. unique_toolkit/agentic/history_manager/loop_token_reducer.py +3 -4
  29. unique_toolkit/agentic/history_manager/utils.py +10 -87
  30. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +107 -16
  31. unique_toolkit/agentic/reference_manager/reference_manager.py +1 -1
  32. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  33. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  34. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  35. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  36. unique_toolkit/agentic/tools/a2a/__init__.py +18 -2
  37. unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +2 -0
  38. unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +3 -3
  39. unique_toolkit/agentic/tools/a2a/evaluation/config.py +1 -1
  40. unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +143 -91
  41. unique_toolkit/agentic/tools/a2a/manager.py +7 -1
  42. unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +11 -3
  43. unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +185 -0
  44. unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +73 -0
  45. unique_toolkit/agentic/tools/a2a/postprocessing/config.py +21 -0
  46. unique_toolkit/agentic/tools/a2a/postprocessing/display.py +180 -0
  47. unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
  48. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +1335 -0
  49. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
  50. unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
  51. unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
  52. unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
  53. unique_toolkit/agentic/tools/a2a/tool/config.py +15 -5
  54. unique_toolkit/agentic/tools/a2a/tool/service.py +69 -36
  55. unique_toolkit/agentic/tools/config.py +16 -2
  56. unique_toolkit/agentic/tools/factory.py +4 -0
  57. unique_toolkit/agentic/tools/mcp/tool_wrapper.py +7 -35
  58. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  59. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  60. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  61. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  62. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  63. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  64. unique_toolkit/agentic/tools/test/test_mcp_manager.py +95 -7
  65. unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +240 -0
  66. unique_toolkit/agentic/tools/tool.py +0 -11
  67. unique_toolkit/agentic/tools/tool_manager.py +337 -122
  68. unique_toolkit/agentic/tools/tool_progress_reporter.py +81 -15
  69. unique_toolkit/agentic/tools/utils/__init__.py +18 -0
  70. unique_toolkit/agentic/tools/utils/execution/execution.py +8 -4
  71. unique_toolkit/agentic/tools/utils/source_handling/schema.py +1 -1
  72. unique_toolkit/chat/__init__.py +8 -1
  73. unique_toolkit/chat/deprecated/service.py +232 -0
  74. unique_toolkit/chat/functions.py +54 -40
  75. unique_toolkit/chat/rendering.py +34 -0
  76. unique_toolkit/chat/responses_api.py +461 -0
  77. unique_toolkit/chat/schemas.py +1 -1
  78. unique_toolkit/chat/service.py +96 -1569
  79. unique_toolkit/content/functions.py +116 -1
  80. unique_toolkit/content/schemas.py +59 -0
  81. unique_toolkit/content/service.py +5 -37
  82. unique_toolkit/content/smart_rules.py +301 -0
  83. unique_toolkit/framework_utilities/langchain/client.py +27 -3
  84. unique_toolkit/framework_utilities/openai/client.py +12 -1
  85. unique_toolkit/framework_utilities/openai/message_builder.py +85 -1
  86. unique_toolkit/language_model/default_language_model.py +3 -0
  87. unique_toolkit/language_model/functions.py +25 -9
  88. unique_toolkit/language_model/infos.py +72 -4
  89. unique_toolkit/language_model/schemas.py +246 -40
  90. unique_toolkit/protocols/support.py +91 -9
  91. unique_toolkit/services/__init__.py +7 -0
  92. unique_toolkit/services/chat_service.py +1630 -0
  93. unique_toolkit/services/knowledge_base.py +861 -0
  94. unique_toolkit/smart_rules/compile.py +56 -301
  95. unique_toolkit/test_utilities/events.py +197 -0
  96. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/METADATA +173 -3
  97. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/RECORD +99 -67
  98. unique_toolkit/agentic/tools/a2a/postprocessing/_display.py +0 -122
  99. unique_toolkit/agentic/tools/a2a/postprocessing/_utils.py +0 -19
  100. unique_toolkit/agentic/tools/a2a/postprocessing/postprocessor.py +0 -230
  101. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_consolidate_references.py +0 -665
  102. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +0 -391
  103. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_postprocessor_reference_functions.py +0 -256
  104. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/LICENSE +0 -0
  105. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/WHEEL +0 -0
@@ -0,0 +1,461 @@
1
+ import logging
2
+ from typing import Any, NamedTuple, Sequence
3
+
4
+ import unique_sdk
5
+ from openai.types.responses import (
6
+ ResponseIncludable,
7
+ ResponseInputItemParam,
8
+ ResponseInputParam,
9
+ ResponseOutputItem,
10
+ ResponseTextConfigParam,
11
+ ToolParam,
12
+ response_create_params,
13
+ )
14
+ from openai.types.shared_params import Metadata, Reasoning
15
+ from pydantic import BaseModel, TypeAdapter, ValidationError
16
+
17
+ from unique_toolkit.agentic.tools.utils.execution.execution import (
18
+ failsafe,
19
+ )
20
+ from unique_toolkit.content.schemas import ContentChunk
21
+ from unique_toolkit.language_model.constants import (
22
+ DEFAULT_COMPLETE_TEMPERATURE,
23
+ )
24
+ from unique_toolkit.language_model.functions import (
25
+ SearchContext,
26
+ _clamp_temperature,
27
+ _to_search_context,
28
+ )
29
+ from unique_toolkit.language_model.infos import (
30
+ LanguageModelInfo,
31
+ LanguageModelName,
32
+ )
33
+ from unique_toolkit.language_model.schemas import (
34
+ LanguageModelAssistantMessage,
35
+ LanguageModelMessage,
36
+ LanguageModelMessageOptions,
37
+ LanguageModelMessageRole,
38
+ LanguageModelMessages,
39
+ LanguageModelSystemMessage,
40
+ LanguageModelToolDescription,
41
+ LanguageModelToolMessage,
42
+ LanguageModelUserMessage,
43
+ ResponsesLanguageModelStreamResponse,
44
+ )
45
+
46
+ logger = logging.getLogger(__name__)
47
+
48
+
49
+ def _convert_tools_to_openai(
50
+ tools: Sequence[LanguageModelToolDescription | ToolParam],
51
+ ) -> list[ToolParam]:
52
+ openai_tools = []
53
+ for tool in tools:
54
+ if isinstance(tool, LanguageModelToolDescription):
55
+ openai_tools.append(tool.to_openai(mode="responses"))
56
+ else:
57
+ openai_tools.append(tool)
58
+ return openai_tools
59
+
60
+
61
+ def _convert_message_to_openai(
62
+ message: LanguageModelMessageOptions,
63
+ ) -> ResponseInputParam:
64
+ res = []
65
+ match message:
66
+ case LanguageModelAssistantMessage():
67
+ return message.to_openai(mode="responses") # type: ignore
68
+ case (
69
+ LanguageModelUserMessage()
70
+ | LanguageModelSystemMessage()
71
+ | LanguageModelToolMessage()
72
+ ):
73
+ return [message.to_openai(mode="responses")]
74
+ case _:
75
+ return _convert_message_to_openai(_convert_to_specific_message(message))
76
+ return res
77
+
78
+
79
+ def _convert_to_specific_message(
80
+ message: LanguageModelMessage,
81
+ ) -> "LanguageModelSystemMessage | LanguageModelUserMessage | LanguageModelAssistantMessage":
82
+ match message.role:
83
+ case LanguageModelMessageRole.SYSTEM:
84
+ return LanguageModelSystemMessage(content=message.content)
85
+ case LanguageModelMessageRole.USER:
86
+ return LanguageModelUserMessage(content=message.content)
87
+ case LanguageModelMessageRole.ASSISTANT:
88
+ return LanguageModelAssistantMessage(content=message.content)
89
+ case LanguageModelMessageRole.TOOL:
90
+ raise ValueError(
91
+ "Cannot convert message with role `tool`. Please use `LanguageModelToolMessage` instead."
92
+ )
93
+
94
+
95
+ def _convert_messages_to_openai(
96
+ messages: Sequence[
97
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
98
+ ],
99
+ ) -> ResponseInputParam:
100
+ res = []
101
+ for message in messages:
102
+ if isinstance(message, LanguageModelMessageOptions):
103
+ res.extend(_convert_message_to_openai(message))
104
+ elif isinstance(
105
+ message, dict
106
+ ): # Openai uses dicts for their input and BaseModel as output
107
+ res.append(message)
108
+ else:
109
+ assert isinstance(message, BaseModel)
110
+ res.append(message.model_dump(exclude_defaults=True))
111
+
112
+ return res
113
+
114
+
115
+ class _ResponsesParams(NamedTuple):
116
+ temperature: float
117
+ model_name: str
118
+ search_context: SearchContext | None
119
+ messages: str | ResponseInputParam
120
+ tools: list[ToolParam] | None
121
+ reasoning: Reasoning | None
122
+ text: ResponseTextConfigParam | None
123
+
124
+
125
+ def _prepare_responses_params_util(
126
+ model_name: LanguageModelName | str,
127
+ content_chunks: list[ContentChunk] | None,
128
+ temperature: float,
129
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None,
130
+ messages: str
131
+ | LanguageModelMessages
132
+ | Sequence[
133
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
134
+ ],
135
+ reasoning: Reasoning | None,
136
+ text: ResponseTextConfigParam | None,
137
+ other_options: dict | None = None,
138
+ ) -> _ResponsesParams:
139
+ search_context = (
140
+ _to_search_context(content_chunks) if content_chunks is not None else None
141
+ )
142
+
143
+ model = model_name.name if isinstance(model_name, LanguageModelName) else model_name
144
+
145
+ tools_res = _convert_tools_to_openai(tools) if tools is not None else None
146
+
147
+ if other_options is not None:
148
+ # Key word argument takes precedence
149
+ reasoning = reasoning or _attempt_extract_reasoning_from_options(other_options)
150
+ text = text or _attempt_extract_verbosity_from_options(other_options)
151
+
152
+ if isinstance(model_name, LanguageModelName):
153
+ model_info = LanguageModelInfo.from_name(model_name)
154
+
155
+ if model_info.temperature_bounds is not None and temperature is not None:
156
+ temperature = _clamp_temperature(temperature, model_info.temperature_bounds)
157
+
158
+ if (
159
+ reasoning is None
160
+ and model_info.default_options is not None
161
+ and "reasoning_effort" in model_info.default_options
162
+ ):
163
+ reasoning = Reasoning(effort=model_info.default_options["reasoning_effort"])
164
+
165
+ if (
166
+ reasoning is not None
167
+ and tools_res is not None
168
+ and any(tool["type"] == "code_interpreter" for tool in tools_res)
169
+ and "effort" in reasoning
170
+ and reasoning["effort"] == "minimal"
171
+ ):
172
+ logger.warning(
173
+ "Code interpreter cannot be used with `minimal` effort. Switching to `low`."
174
+ )
175
+ reasoning["effort"] = (
176
+ "low" # Code interpreter cannot be used with minimal effort
177
+ )
178
+
179
+ messages_res = None
180
+ if isinstance(messages, LanguageModelMessages):
181
+ messages_res = _convert_messages_to_openai(messages.root)
182
+ elif isinstance(messages, list):
183
+ messages_res = _convert_messages_to_openai(messages)
184
+ else:
185
+ assert isinstance(messages, str)
186
+ messages_res = messages
187
+
188
+ return _ResponsesParams(
189
+ temperature, model, search_context, messages_res, tools_res, reasoning, text
190
+ )
191
+
192
+
193
+ @failsafe(
194
+ failure_return_value=None,
195
+ exceptions=(ValidationError,),
196
+ log_exc_info=False,
197
+ logger=logger,
198
+ )
199
+ def _attempt_extract_reasoning_from_options(options: dict) -> Reasoning | None:
200
+ reasoning = None
201
+
202
+ # Responses API
203
+ if "reasoning" in options:
204
+ reasoning = options["reasoning"]
205
+
206
+ # Completions API
207
+ elif "reasoning_effort" in options:
208
+ reasoning = {"effort": options["reasoning_effort"]}
209
+ if "reasoningEffort" in options:
210
+ reasoning = {"effort": options["reasoningEffort"]}
211
+
212
+ if reasoning is not None:
213
+ return TypeAdapter(Reasoning).validate_python(reasoning)
214
+
215
+ return None
216
+
217
+
218
+ @failsafe(
219
+ failure_return_value=None,
220
+ exceptions=(ValidationError,),
221
+ log_exc_info=False,
222
+ logger=logger,
223
+ )
224
+ def _attempt_extract_verbosity_from_options(
225
+ options: dict,
226
+ ) -> ResponseTextConfigParam | None:
227
+ reasoning = None
228
+
229
+ # Responses API
230
+ if "text" in options:
231
+ reasoning = options["text"]
232
+
233
+ # Completions API
234
+ elif "verbosity" in options:
235
+ reasoning = {"verbosity": options["verbosity"]}
236
+
237
+ if reasoning is not None:
238
+ return TypeAdapter(ResponseTextConfigParam).validate_python(reasoning)
239
+
240
+ return None
241
+
242
+
243
+ def _prepare_responses_args(
244
+ company_id: str,
245
+ user_id: str,
246
+ assistant_message_id: str,
247
+ user_message_id: str,
248
+ chat_id: str,
249
+ assistant_id: str,
250
+ params: _ResponsesParams,
251
+ debug_info: dict | None,
252
+ start_text: str | None,
253
+ include: list[ResponseIncludable] | None,
254
+ instructions: str | None,
255
+ max_output_tokens: int | None,
256
+ metadata: Metadata | None,
257
+ parallel_tool_calls: bool | None,
258
+ tool_choice: response_create_params.ToolChoice | None,
259
+ top_p: float | None,
260
+ other_options: dict | None = None,
261
+ ) -> dict[str, Any]:
262
+ options = {}
263
+
264
+ options["company_id"] = company_id
265
+ options["user_id"] = user_id
266
+
267
+ options["model"] = params.model_name
268
+
269
+ if params.search_context is not None:
270
+ options["searchContext"] = params.search_context
271
+
272
+ options["chatId"] = chat_id
273
+ options["assistantId"] = assistant_id
274
+ options["assistantMessageId"] = assistant_message_id
275
+ options["userMessageId"] = user_message_id
276
+
277
+ if debug_info is not None:
278
+ options["debugInfo"] = debug_info
279
+ if start_text is not None:
280
+ options["startText"] = start_text
281
+
282
+ options["input"] = params.messages
283
+
284
+ openai_options: unique_sdk.Integrated.CreateStreamResponsesOpenaiParams = {}
285
+
286
+ if params.temperature is not None:
287
+ openai_options["temperature"] = params.temperature
288
+
289
+ if params.reasoning is not None:
290
+ openai_options["reasoning"] = params.reasoning
291
+
292
+ if params.text is not None:
293
+ openai_options["text"] = params.text
294
+
295
+ if include is not None:
296
+ openai_options["include"] = include
297
+
298
+ if instructions is not None:
299
+ openai_options["instructions"] = instructions
300
+
301
+ if max_output_tokens is not None:
302
+ openai_options["max_output_tokens"] = max_output_tokens
303
+
304
+ if metadata is not None:
305
+ openai_options["metadata"] = metadata
306
+
307
+ if parallel_tool_calls is not None:
308
+ openai_options["parallel_tool_calls"] = parallel_tool_calls
309
+
310
+ if tool_choice is not None:
311
+ openai_options["tool_choice"] = tool_choice
312
+
313
+ if params.tools is not None:
314
+ openai_options["tools"] = params.tools
315
+
316
+ if top_p is not None:
317
+ openai_options["top_p"] = top_p
318
+
319
+ # allow any other openai.resources.responses.Response.create options
320
+ if other_options is not None:
321
+ openai_options.update(other_options) # type: ignore
322
+
323
+ options["options"] = openai_options
324
+
325
+ return options
326
+
327
+
328
+ def stream_responses_with_references(
329
+ *,
330
+ company_id: str,
331
+ user_id: str,
332
+ assistant_message_id: str,
333
+ user_message_id: str,
334
+ chat_id: str,
335
+ assistant_id: str,
336
+ model_name: LanguageModelName | str,
337
+ messages: str
338
+ | LanguageModelMessages
339
+ | Sequence[
340
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
341
+ ],
342
+ content_chunks: list[ContentChunk] | None = None,
343
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
344
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
345
+ debug_info: dict | None = None,
346
+ start_text: str | None = None,
347
+ include: list[ResponseIncludable] | None = None,
348
+ instructions: str | None = None,
349
+ max_output_tokens: int | None = None,
350
+ metadata: Metadata | None = None,
351
+ parallel_tool_calls: bool | None = None,
352
+ text: ResponseTextConfigParam | None = None,
353
+ tool_choice: response_create_params.ToolChoice | None = None,
354
+ top_p: float | None = None,
355
+ reasoning: Reasoning | None = None,
356
+ other_options: dict | None = None,
357
+ ) -> ResponsesLanguageModelStreamResponse:
358
+ responses_params = _prepare_responses_params_util(
359
+ model_name=model_name,
360
+ content_chunks=content_chunks,
361
+ temperature=temperature,
362
+ tools=tools,
363
+ messages=messages,
364
+ reasoning=reasoning,
365
+ text=text,
366
+ other_options=other_options,
367
+ )
368
+
369
+ responses_args = _prepare_responses_args(
370
+ company_id=company_id,
371
+ user_id=user_id,
372
+ assistant_message_id=assistant_message_id,
373
+ user_message_id=user_message_id,
374
+ chat_id=chat_id,
375
+ assistant_id=assistant_id,
376
+ params=responses_params,
377
+ debug_info=debug_info,
378
+ start_text=start_text,
379
+ include=include,
380
+ instructions=instructions,
381
+ max_output_tokens=max_output_tokens,
382
+ metadata=metadata,
383
+ parallel_tool_calls=parallel_tool_calls,
384
+ tool_choice=tool_choice,
385
+ top_p=top_p,
386
+ other_options=other_options,
387
+ )
388
+
389
+ return ResponsesLanguageModelStreamResponse.model_validate(
390
+ unique_sdk.Integrated.responses_stream(
391
+ **responses_args,
392
+ )
393
+ )
394
+
395
+
396
+ async def stream_responses_with_references_async(
397
+ *,
398
+ company_id: str,
399
+ user_id: str,
400
+ assistant_message_id: str,
401
+ user_message_id: str,
402
+ chat_id: str,
403
+ assistant_id: str,
404
+ model_name: LanguageModelName | str,
405
+ messages: str
406
+ | LanguageModelMessages
407
+ | Sequence[
408
+ ResponseInputItemParam | LanguageModelMessageOptions | ResponseOutputItem
409
+ ],
410
+ content_chunks: list[ContentChunk] | None = None,
411
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
412
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
413
+ debug_info: dict | None = None,
414
+ start_text: str | None = None,
415
+ include: list[ResponseIncludable] | None = None,
416
+ instructions: str | None = None,
417
+ max_output_tokens: int | None = None,
418
+ metadata: Metadata | None = None,
419
+ parallel_tool_calls: bool | None = None,
420
+ text: ResponseTextConfigParam | None = None,
421
+ tool_choice: response_create_params.ToolChoice | None = None,
422
+ top_p: float | None = None,
423
+ reasoning: Reasoning | None = None,
424
+ other_options: dict | None = None,
425
+ ) -> ResponsesLanguageModelStreamResponse:
426
+ responses_params = _prepare_responses_params_util(
427
+ model_name=model_name,
428
+ content_chunks=content_chunks,
429
+ temperature=temperature,
430
+ tools=tools,
431
+ messages=messages,
432
+ reasoning=reasoning,
433
+ text=text,
434
+ other_options=other_options,
435
+ )
436
+
437
+ responses_args = _prepare_responses_args(
438
+ company_id=company_id,
439
+ user_id=user_id,
440
+ assistant_message_id=assistant_message_id,
441
+ user_message_id=user_message_id,
442
+ chat_id=chat_id,
443
+ assistant_id=assistant_id,
444
+ params=responses_params,
445
+ debug_info=debug_info,
446
+ start_text=start_text,
447
+ include=include,
448
+ instructions=instructions,
449
+ max_output_tokens=max_output_tokens,
450
+ metadata=metadata,
451
+ parallel_tool_calls=parallel_tool_calls,
452
+ tool_choice=tool_choice,
453
+ top_p=top_p,
454
+ other_options=other_options,
455
+ )
456
+
457
+ return ResponsesLanguageModelStreamResponse.model_validate(
458
+ await unique_sdk.Integrated.responses_stream_async(
459
+ **responses_args,
460
+ )
461
+ )
@@ -208,7 +208,7 @@ class MessageLogDetails(BaseModel):
208
208
  class MessageLog(BaseModel):
209
209
  model_config = model_config
210
210
 
211
- message_log_id: str | None = None
211
+ message_log_id: str | None = Field(default=None, validation_alias="id")
212
212
  message_id: str | None = None
213
213
  status: MessageLogStatus
214
214
  text: str | None = None