unique_toolkit 1.8.1__py3-none-any.whl → 1.23.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_toolkit might be problematic. Click here for more details.

Files changed (105) hide show
  1. unique_toolkit/__init__.py +20 -0
  2. unique_toolkit/_common/api_calling/human_verification_manager.py +121 -28
  3. unique_toolkit/_common/chunk_relevancy_sorter/config.py +3 -3
  4. unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +2 -5
  5. unique_toolkit/_common/default_language_model.py +9 -3
  6. unique_toolkit/_common/docx_generator/__init__.py +7 -0
  7. unique_toolkit/_common/docx_generator/config.py +12 -0
  8. unique_toolkit/_common/docx_generator/schemas.py +80 -0
  9. unique_toolkit/_common/docx_generator/service.py +252 -0
  10. unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
  11. unique_toolkit/_common/endpoint_builder.py +138 -117
  12. unique_toolkit/_common/endpoint_requestor.py +240 -14
  13. unique_toolkit/_common/exception.py +20 -0
  14. unique_toolkit/_common/feature_flags/schema.py +1 -5
  15. unique_toolkit/_common/referencing.py +53 -0
  16. unique_toolkit/_common/string_utilities.py +52 -1
  17. unique_toolkit/_common/tests/test_referencing.py +521 -0
  18. unique_toolkit/_common/tests/test_string_utilities.py +506 -0
  19. unique_toolkit/_common/utils/files.py +43 -0
  20. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +16 -6
  21. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  22. unique_toolkit/agentic/evaluation/config.py +3 -2
  23. unique_toolkit/agentic/evaluation/context_relevancy/service.py +2 -2
  24. unique_toolkit/agentic/evaluation/evaluation_manager.py +9 -5
  25. unique_toolkit/agentic/evaluation/hallucination/constants.py +1 -1
  26. unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +26 -3
  27. unique_toolkit/agentic/history_manager/history_manager.py +14 -11
  28. unique_toolkit/agentic/history_manager/loop_token_reducer.py +3 -4
  29. unique_toolkit/agentic/history_manager/utils.py +10 -87
  30. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +107 -16
  31. unique_toolkit/agentic/reference_manager/reference_manager.py +1 -1
  32. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  33. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  34. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  35. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  36. unique_toolkit/agentic/tools/a2a/__init__.py +18 -2
  37. unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +2 -0
  38. unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +3 -3
  39. unique_toolkit/agentic/tools/a2a/evaluation/config.py +1 -1
  40. unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +143 -91
  41. unique_toolkit/agentic/tools/a2a/manager.py +7 -1
  42. unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +11 -3
  43. unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +185 -0
  44. unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +73 -0
  45. unique_toolkit/agentic/tools/a2a/postprocessing/config.py +21 -0
  46. unique_toolkit/agentic/tools/a2a/postprocessing/display.py +180 -0
  47. unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
  48. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +1335 -0
  49. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
  50. unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
  51. unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
  52. unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
  53. unique_toolkit/agentic/tools/a2a/tool/config.py +15 -5
  54. unique_toolkit/agentic/tools/a2a/tool/service.py +69 -36
  55. unique_toolkit/agentic/tools/config.py +16 -2
  56. unique_toolkit/agentic/tools/factory.py +4 -0
  57. unique_toolkit/agentic/tools/mcp/tool_wrapper.py +7 -35
  58. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  59. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  60. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  61. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  62. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  63. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  64. unique_toolkit/agentic/tools/test/test_mcp_manager.py +95 -7
  65. unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +240 -0
  66. unique_toolkit/agentic/tools/tool.py +0 -11
  67. unique_toolkit/agentic/tools/tool_manager.py +337 -122
  68. unique_toolkit/agentic/tools/tool_progress_reporter.py +81 -15
  69. unique_toolkit/agentic/tools/utils/__init__.py +18 -0
  70. unique_toolkit/agentic/tools/utils/execution/execution.py +8 -4
  71. unique_toolkit/agentic/tools/utils/source_handling/schema.py +1 -1
  72. unique_toolkit/chat/__init__.py +8 -1
  73. unique_toolkit/chat/deprecated/service.py +232 -0
  74. unique_toolkit/chat/functions.py +54 -40
  75. unique_toolkit/chat/rendering.py +34 -0
  76. unique_toolkit/chat/responses_api.py +461 -0
  77. unique_toolkit/chat/schemas.py +1 -1
  78. unique_toolkit/chat/service.py +96 -1569
  79. unique_toolkit/content/functions.py +116 -1
  80. unique_toolkit/content/schemas.py +59 -0
  81. unique_toolkit/content/service.py +5 -37
  82. unique_toolkit/content/smart_rules.py +301 -0
  83. unique_toolkit/framework_utilities/langchain/client.py +27 -3
  84. unique_toolkit/framework_utilities/openai/client.py +12 -1
  85. unique_toolkit/framework_utilities/openai/message_builder.py +85 -1
  86. unique_toolkit/language_model/default_language_model.py +3 -0
  87. unique_toolkit/language_model/functions.py +25 -9
  88. unique_toolkit/language_model/infos.py +72 -4
  89. unique_toolkit/language_model/schemas.py +246 -40
  90. unique_toolkit/protocols/support.py +91 -9
  91. unique_toolkit/services/__init__.py +7 -0
  92. unique_toolkit/services/chat_service.py +1630 -0
  93. unique_toolkit/services/knowledge_base.py +861 -0
  94. unique_toolkit/smart_rules/compile.py +56 -301
  95. unique_toolkit/test_utilities/events.py +197 -0
  96. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/METADATA +173 -3
  97. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/RECORD +99 -67
  98. unique_toolkit/agentic/tools/a2a/postprocessing/_display.py +0 -122
  99. unique_toolkit/agentic/tools/a2a/postprocessing/_utils.py +0 -19
  100. unique_toolkit/agentic/tools/a2a/postprocessing/postprocessor.py +0 -230
  101. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_consolidate_references.py +0 -665
  102. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +0 -391
  103. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_postprocessor_reference_functions.py +0 -256
  104. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/LICENSE +0 -0
  105. {unique_toolkit-1.8.1.dist-info → unique_toolkit-1.23.0.dist-info}/WHEEL +0 -0
@@ -1,16 +1,31 @@
1
1
  import json
2
2
  import math
3
3
  from enum import StrEnum
4
- from typing import Any, Self
4
+ from typing import Any, Literal, Self, TypeVar
5
5
  from uuid import uuid4
6
6
 
7
7
  from humps import camelize
8
- from openai.types.chat import ChatCompletionAssistantMessageParam
8
+ from openai.types.chat import (
9
+ ChatCompletionAssistantMessageParam,
10
+ ChatCompletionSystemMessageParam,
11
+ ChatCompletionToolMessageParam,
12
+ ChatCompletionUserMessageParam,
13
+ )
9
14
  from openai.types.chat.chat_completion_message_function_tool_call_param import (
10
15
  ChatCompletionMessageFunctionToolCallParam,
11
16
  Function,
12
17
  )
13
18
  from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
19
+ from openai.types.responses import (
20
+ EasyInputMessageParam,
21
+ FunctionToolParam,
22
+ ResponseCodeInterpreterToolCall,
23
+ ResponseFunctionToolCallParam,
24
+ ResponseOutputItem,
25
+ ResponseOutputMessage,
26
+ )
27
+ from openai.types.responses.response_input_param import FunctionCallOutput
28
+ from openai.types.responses.response_output_text import AnnotationContainerFileCitation
14
29
  from openai.types.shared_params.function_definition import FunctionDefinition
15
30
  from pydantic import (
16
31
  BaseModel,
@@ -23,7 +38,7 @@ from pydantic import (
23
38
  model_serializer,
24
39
  model_validator,
25
40
  )
26
- from typing_extensions import deprecated
41
+ from typing_extensions import deprecated, overload
27
42
 
28
43
  from unique_toolkit.content.schemas import ContentReference
29
44
  from unique_toolkit.language_model.utils import format_message
@@ -68,22 +83,21 @@ class LanguageModelStreamResponseMessage(BaseModel):
68
83
  class LanguageModelFunction(BaseModel):
69
84
  model_config = model_config
70
85
 
71
- id: str | None = None
86
+ id: str = Field(default_factory=lambda: uuid4().hex)
72
87
  name: str
73
- arguments: dict[str, Any] | str | None = None # type: ignore
88
+ arguments: dict[str, Any] | None = None
74
89
 
75
90
  @field_validator("arguments", mode="before")
76
- def set_arguments(cls, value):
91
+ def set_arguments(cls, value: Any) -> Any:
77
92
  if isinstance(value, str):
78
93
  return json.loads(value)
79
94
  return value
80
95
 
81
96
  @field_validator("id", mode="before")
82
- def randomize_id(cls, value):
83
- if not value:
97
+ def randomize_id(cls, value: Any) -> Any:
98
+ if value is None or value == "":
84
99
  return uuid4().hex
85
- else:
86
- return value
100
+ return value
87
101
 
88
102
  @model_serializer()
89
103
  def serialize_model(self):
@@ -93,7 +107,7 @@ class LanguageModelFunction(BaseModel):
93
107
  seralization["arguments"] = json.dumps(self.arguments)
94
108
  return seralization
95
109
 
96
- def __eq__(self, other: Self) -> bool:
110
+ def __eq__(self, other: object) -> bool:
97
111
  """Compare two tool calls based on name and arguments."""
98
112
  if not isinstance(other, LanguageModelFunction):
99
113
  return False
@@ -109,21 +123,43 @@ class LanguageModelFunction(BaseModel):
109
123
 
110
124
  return True
111
125
 
112
- def to_openai_param(self) -> ChatCompletionMessageFunctionToolCallParam:
126
+ @overload
127
+ def to_openai_param(
128
+ self, mode: Literal["completions"] = "completions"
129
+ ) -> ChatCompletionMessageFunctionToolCallParam: ...
130
+
131
+ @overload
132
+ def to_openai_param(
133
+ self, mode: Literal["responses"]
134
+ ) -> ResponseFunctionToolCallParam: ...
135
+
136
+ def to_openai_param(
137
+ self, mode: Literal["completions", "responses"] = "completions"
138
+ ) -> ChatCompletionMessageFunctionToolCallParam | ResponseFunctionToolCallParam:
113
139
  arguments = ""
114
140
  if isinstance(self.arguments, dict):
115
141
  arguments = json.dumps(self.arguments)
116
142
  elif isinstance(self.arguments, str):
117
143
  arguments = self.arguments
118
144
 
119
- return ChatCompletionMessageFunctionToolCallParam(
120
- type="function",
121
- id=self.id or "unknown_id",
122
- function=Function(name=self.name, arguments=arguments),
123
- )
145
+ if mode == "completions":
146
+ return ChatCompletionMessageFunctionToolCallParam(
147
+ type="function",
148
+ id=self.id or "unknown_id",
149
+ function=Function(name=self.name, arguments=arguments),
150
+ )
151
+ elif mode == "responses":
152
+ if self.id is None:
153
+ raise ValueError("Missing tool call id")
154
+
155
+ return ResponseFunctionToolCallParam(
156
+ type="function_call",
157
+ call_id=self.id,
158
+ name=self.name,
159
+ arguments=arguments,
160
+ )
124
161
 
125
162
 
126
- # This is tailored to the unique backend
127
163
  class LanguageModelStreamResponse(BaseModel):
128
164
  model_config = model_config
129
165
 
@@ -148,6 +184,32 @@ class LanguageModelStreamResponse(BaseModel):
148
184
  )
149
185
 
150
186
 
187
+ OutputItemType = TypeVar("OutputItemType", bound=ResponseOutputItem)
188
+
189
+
190
+ class ResponsesLanguageModelStreamResponse(LanguageModelStreamResponse):
191
+ output: list[ResponseOutputItem]
192
+
193
+ def filter_output(self, type: type[OutputItemType]) -> list[OutputItemType]:
194
+ return [item for item in self.output if isinstance(item, type)]
195
+
196
+ @property
197
+ def code_interpreter_calls(self) -> list[ResponseCodeInterpreterToolCall]:
198
+ return self.filter_output(ResponseCodeInterpreterToolCall)
199
+
200
+ @property
201
+ def container_files(self) -> list[AnnotationContainerFileCitation]:
202
+ container_files = []
203
+ messages = self.filter_output(ResponseOutputMessage)
204
+ for message in messages:
205
+ for content in message.content:
206
+ if content.type == "output_text":
207
+ for annotation in content.annotations:
208
+ if annotation.type == "container_file_citation":
209
+ container_files.append(annotation)
210
+ return container_files
211
+
212
+
151
213
  class LanguageModelFunctionCall(BaseModel):
152
214
  model_config = model_config
153
215
 
@@ -190,7 +252,6 @@ class LanguageModelMessage(BaseModel):
190
252
  return format_message(self.role.capitalize(), message=message, num_tabs=1)
191
253
 
192
254
 
193
- # Equivalent to
194
255
  class LanguageModelSystemMessage(LanguageModelMessage):
195
256
  role: LanguageModelMessageRole = LanguageModelMessageRole.SYSTEM
196
257
 
@@ -198,6 +259,26 @@ class LanguageModelSystemMessage(LanguageModelMessage):
198
259
  def set_role(cls, value):
199
260
  return LanguageModelMessageRole.SYSTEM
200
261
 
262
+ @overload
263
+ def to_openai(
264
+ self, mode: Literal["completions"] = "completions"
265
+ ) -> ChatCompletionSystemMessageParam: ...
266
+
267
+ @overload
268
+ def to_openai(self, mode: Literal["responses"]) -> EasyInputMessageParam: ...
269
+
270
+ def to_openai(
271
+ self, mode: Literal["completions", "responses"] = "completions"
272
+ ) -> ChatCompletionSystemMessageParam | EasyInputMessageParam:
273
+ content = self.content or ""
274
+ if not isinstance(content, str):
275
+ raise ValueError("Content must be a string")
276
+
277
+ if mode == "completions":
278
+ return ChatCompletionSystemMessageParam(role="system", content=content)
279
+ elif mode == "responses":
280
+ return EasyInputMessageParam(role="user", content=content)
281
+
201
282
 
202
283
  # Equivalent to
203
284
  # from openai.types.chat.chat_completion_user_message_param import ChatCompletionUserMessageParam
@@ -210,6 +291,26 @@ class LanguageModelUserMessage(LanguageModelMessage):
210
291
  def set_role(cls, value):
211
292
  return LanguageModelMessageRole.USER
212
293
 
294
+ @overload
295
+ def to_openai(
296
+ self, mode: Literal["completions"] = "completions"
297
+ ) -> ChatCompletionUserMessageParam: ...
298
+
299
+ @overload
300
+ def to_openai(self, mode: Literal["responses"]) -> EasyInputMessageParam: ...
301
+
302
+ def to_openai(
303
+ self, mode: Literal["completions", "responses"] = "completions"
304
+ ) -> ChatCompletionUserMessageParam | EasyInputMessageParam:
305
+ content = self.content or ""
306
+ if not isinstance(content, str):
307
+ raise ValueError("Content must be a string")
308
+
309
+ if mode == "completions":
310
+ return ChatCompletionUserMessageParam(role="user", content=content)
311
+ elif mode == "responses":
312
+ return EasyInputMessageParam(role="user", content=content)
313
+
213
314
 
214
315
  # Equivalent to
215
316
  # from openai.types.chat.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
@@ -244,8 +345,8 @@ class LanguageModelAssistantMessage(LanguageModelMessage):
244
345
  def from_stream_response(cls, response: LanguageModelStreamResponse):
245
346
  tool_calls = [
246
347
  LanguageModelFunctionCall(
247
- id=None,
248
- type=None,
348
+ id=f.id,
349
+ type="function",
249
350
  function=f,
250
351
  )
251
352
  for f in response.tool_calls or []
@@ -260,6 +361,50 @@ class LanguageModelAssistantMessage(LanguageModelMessage):
260
361
  tool_calls=tool_calls,
261
362
  )
262
363
 
364
+ @overload
365
+ def to_openai(
366
+ self, mode: Literal["completions"] = "completions"
367
+ ) -> ChatCompletionAssistantMessageParam: ...
368
+
369
+ @overload
370
+ def to_openai(
371
+ self, mode: Literal["responses"]
372
+ ) -> list[EasyInputMessageParam | ResponseFunctionToolCallParam]: ...
373
+
374
+ def to_openai(
375
+ self, mode: Literal["completions", "responses"] = "completions"
376
+ ) -> (
377
+ ChatCompletionAssistantMessageParam
378
+ | list[EasyInputMessageParam | ResponseFunctionToolCallParam]
379
+ ):
380
+ content = self.content or ""
381
+ if not isinstance(content, str):
382
+ raise ValueError("Content must be a string")
383
+
384
+ if mode == "completions":
385
+ return ChatCompletionAssistantMessageParam(
386
+ role="assistant",
387
+ content=content,
388
+ tool_calls=[
389
+ t.function.to_openai_param() for t in self.tool_calls or []
390
+ ],
391
+ )
392
+ elif mode == "responses":
393
+ """
394
+ Responses API does not support assistant messages with tool calls
395
+ """
396
+ res = []
397
+ if content != "":
398
+ res.append(EasyInputMessageParam(role="assistant", content=content))
399
+ if self.tool_calls:
400
+ res.extend(
401
+ [
402
+ t.function.to_openai_param(mode="responses")
403
+ for t in self.tool_calls
404
+ ]
405
+ )
406
+ return res
407
+
263
408
 
264
409
  # Equivalent to
265
410
  # from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
@@ -281,20 +426,50 @@ class LanguageModelToolMessage(LanguageModelMessage):
281
426
  def set_role(cls, value):
282
427
  return LanguageModelMessageRole.TOOL
283
428
 
429
+ @overload
430
+ def to_openai(
431
+ self, mode: Literal["completions"] = "completions"
432
+ ) -> ChatCompletionToolMessageParam: ...
433
+
434
+ @overload
435
+ def to_openai(self, mode: Literal["responses"]) -> FunctionCallOutput: ...
436
+
437
+ def to_openai(
438
+ self, mode: Literal["completions", "responses"] = "completions"
439
+ ) -> ChatCompletionToolMessageParam | FunctionCallOutput:
440
+ content = self.content or ""
441
+ if not isinstance(content, str):
442
+ raise ValueError("Content must be a string")
443
+
444
+ if mode == "completions":
445
+ return ChatCompletionToolMessageParam(
446
+ role="tool",
447
+ content=content,
448
+ tool_call_id=self.tool_call_id,
449
+ )
450
+ elif mode == "responses":
451
+ return FunctionCallOutput(
452
+ call_id=self.tool_call_id,
453
+ output=content,
454
+ type="function_call_output",
455
+ )
456
+
284
457
 
285
458
  # Equivalent implementation for list of
286
459
  # from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
287
460
  # with the addition of the builder
288
461
 
462
+ LanguageModelMessageOptions = (
463
+ LanguageModelMessage
464
+ | LanguageModelToolMessage
465
+ | LanguageModelAssistantMessage
466
+ | LanguageModelSystemMessage
467
+ | LanguageModelUserMessage
468
+ )
469
+
289
470
 
290
471
  class LanguageModelMessages(RootModel):
291
- root: list[
292
- LanguageModelMessage
293
- | LanguageModelToolMessage
294
- | LanguageModelAssistantMessage
295
- | LanguageModelSystemMessage
296
- | LanguageModelUserMessage
297
- ]
472
+ root: list[LanguageModelMessageOptions]
298
473
 
299
474
  @classmethod
300
475
  def load_messages_to_root(cls, data: list[dict] | dict) -> Self:
@@ -487,9 +662,10 @@ class LanguageModelToolDescription(BaseModel):
487
662
  ...,
488
663
  description="Description of what the tool is doing the tool",
489
664
  )
490
- parameters: type[BaseModel] = Field(
665
+ parameters: type[BaseModel] | dict[str, Any] = Field(
491
666
  ...,
492
667
  description="Pydantic model for the tool parameters",
668
+ union_mode="left_to_right",
493
669
  )
494
670
 
495
671
  # TODO: This should be default `True` but if this is the case the parameter_model needs to include additional properties
@@ -499,16 +675,46 @@ class LanguageModelToolDescription(BaseModel):
499
675
  )
500
676
 
501
677
  @field_serializer("parameters")
502
- def serialize_parameters(self, parameters: type[BaseModel]):
503
- return parameters.model_json_schema()
504
-
505
- def to_openai(self) -> ChatCompletionToolParam:
506
- return ChatCompletionToolParam(
507
- function=FunctionDefinition(
678
+ def serialize_parameters(
679
+ self, parameters: type[BaseModel] | dict[str, Any]
680
+ ) -> dict[str, Any]:
681
+ return _parameters_as_json_schema(parameters)
682
+
683
+ @overload
684
+ def to_openai(
685
+ self, mode: Literal["completions"] = "completions"
686
+ ) -> ChatCompletionToolParam: ...
687
+
688
+ @overload
689
+ def to_openai(self, mode: Literal["responses"]) -> FunctionToolParam: ...
690
+
691
+ def to_openai(
692
+ self, mode: Literal["completions", "responses"] = "completions"
693
+ ) -> ChatCompletionToolParam | FunctionToolParam:
694
+ if mode == "completions":
695
+ return ChatCompletionToolParam(
696
+ function=FunctionDefinition(
697
+ name=self.name,
698
+ description=self.description,
699
+ parameters=_parameters_as_json_schema(self.parameters),
700
+ strict=self.strict,
701
+ ),
702
+ type="function",
703
+ )
704
+ elif mode == "responses":
705
+ return FunctionToolParam(
706
+ type="function",
508
707
  name=self.name,
509
- description=self.description,
510
- parameters=self.parameters.model_json_schema(),
708
+ parameters=_parameters_as_json_schema(self.parameters),
511
709
  strict=self.strict,
512
- ),
513
- type="function",
514
- )
710
+ description=self.description,
711
+ )
712
+
713
+
714
+ def _parameters_as_json_schema(
715
+ parameters: type[BaseModel] | dict[str, Any],
716
+ ) -> dict[str, Any]:
717
+ if isinstance(parameters, dict):
718
+ return parameters
719
+
720
+ return parameters.model_json_schema()
@@ -1,4 +1,18 @@
1
- from typing import Any, Awaitable, Protocol
1
+ from typing import Awaitable, Protocol, Sequence
2
+
3
+ from openai.types.chat import (
4
+ ChatCompletionMessageParam,
5
+ ChatCompletionToolChoiceOptionParam,
6
+ )
7
+ from openai.types.responses import (
8
+ ResponseIncludable,
9
+ ResponseInputItemParam,
10
+ ResponseOutputItem,
11
+ ResponseTextConfigParam,
12
+ ToolParam,
13
+ response_create_params,
14
+ )
15
+ from openai.types.shared_params import Metadata, Reasoning
2
16
 
3
17
  from unique_toolkit.content import ContentChunk
4
18
  from unique_toolkit.language_model import (
@@ -13,6 +27,10 @@ from unique_toolkit.language_model.constants import (
13
27
  DEFAULT_COMPLETE_TEMPERATURE,
14
28
  DEFAULT_COMPLETE_TIMEOUT,
15
29
  )
30
+ from unique_toolkit.language_model.schemas import (
31
+ LanguageModelMessageOptions,
32
+ ResponsesLanguageModelStreamResponse,
33
+ )
16
34
 
17
35
  # As soon as we have multiple, remember
18
36
  # https://pypi.org/project/typing-protocol-intersection/
@@ -35,7 +53,7 @@ class SupportsComplete(Protocol):
35
53
  model_name: LanguageModelName | str,
36
54
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
37
55
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
38
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
56
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
39
57
  ) -> Awaitable[LanguageModelResponse]: ...
40
58
 
41
59
 
@@ -45,19 +63,83 @@ class SupportCompleteWithReferences(Protocol):
45
63
  messages: LanguageModelMessages,
46
64
  model_name: LanguageModelName | str,
47
65
  content_chunks: list[ContentChunk] | None = None,
48
- debug_info: dict[str, Any] = {},
66
+ debug_info: dict | None = None,
49
67
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
50
68
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
51
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
69
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
70
+ start_text: str | None = None,
71
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
72
+ other_options: dict | None = None,
52
73
  ) -> LanguageModelStreamResponse: ...
53
74
 
54
- def complete_with_references_async(
75
+ async def complete_with_references_async(
55
76
  self,
56
- messages: LanguageModelMessages,
77
+ messages: LanguageModelMessages | list[ChatCompletionMessageParam],
57
78
  model_name: LanguageModelName | str,
58
79
  content_chunks: list[ContentChunk] | None = None,
59
- debug_info: dict[str, Any] = {},
80
+ debug_info: dict | None = None,
60
81
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
61
82
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
62
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
63
- ) -> Awaitable[LanguageModelStreamResponse]: ...
83
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
84
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
85
+ start_text: str | None = None,
86
+ other_options: dict | None = None,
87
+ ) -> LanguageModelStreamResponse: ...
88
+
89
+
90
+ class ResponsesSupportCompleteWithReferences(Protocol):
91
+ def complete_with_references(
92
+ self,
93
+ *,
94
+ model_name: LanguageModelName | str,
95
+ messages: str
96
+ | LanguageModelMessages
97
+ | Sequence[
98
+ ResponseInputItemParam
99
+ | LanguageModelMessageOptions
100
+ | ResponseOutputItem # History is automatically convertible
101
+ ],
102
+ content_chunks: list[ContentChunk] | None = None,
103
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
104
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
105
+ debug_info: dict | None = None,
106
+ start_text: str | None = None,
107
+ include: list[ResponseIncludable] | None = None,
108
+ instructions: str | None = None,
109
+ max_output_tokens: int | None = None,
110
+ metadata: Metadata | None = None,
111
+ parallel_tool_calls: bool | None = None,
112
+ text: ResponseTextConfigParam | None = None,
113
+ tool_choice: response_create_params.ToolChoice | None = None,
114
+ top_p: float | None = None,
115
+ reasoning: Reasoning | None = None,
116
+ other_options: dict | None = None,
117
+ ) -> ResponsesLanguageModelStreamResponse: ...
118
+
119
+ async def complete_with_references_async(
120
+ self,
121
+ *,
122
+ model_name: LanguageModelName | str,
123
+ messages: str
124
+ | LanguageModelMessages
125
+ | Sequence[
126
+ ResponseInputItemParam
127
+ | LanguageModelMessageOptions
128
+ | ResponseOutputItem # History is automatically convertible
129
+ ],
130
+ content_chunks: list[ContentChunk] | None = None,
131
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
132
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
133
+ debug_info: dict | None = None,
134
+ start_text: str | None = None,
135
+ include: list[ResponseIncludable] | None = None,
136
+ instructions: str | None = None,
137
+ max_output_tokens: int | None = None,
138
+ metadata: Metadata | None = None,
139
+ parallel_tool_calls: bool | None = None,
140
+ text: ResponseTextConfigParam | None = None,
141
+ tool_choice: response_create_params.ToolChoice | None = None,
142
+ top_p: float | None = None,
143
+ reasoning: Reasoning | None = None,
144
+ other_options: dict | None = None,
145
+ ) -> ResponsesLanguageModelStreamResponse: ...
@@ -0,0 +1,7 @@
1
+ from unique_toolkit.services.chat_service import ChatService
2
+ from unique_toolkit.services.knowledge_base import KnowledgeBaseService
3
+
4
+ __all__ = [
5
+ "ChatService",
6
+ "KnowledgeBaseService",
7
+ ]