unique_toolkit 0.7.9__py3-none-any.whl → 1.33.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (190) hide show
  1. unique_toolkit/__init__.py +36 -3
  2. unique_toolkit/_common/api_calling/human_verification_manager.py +357 -0
  3. unique_toolkit/_common/base_model_type_attribute.py +303 -0
  4. unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
  5. unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
  6. unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
  7. unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
  8. unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
  9. unique_toolkit/_common/default_language_model.py +12 -0
  10. unique_toolkit/_common/docx_generator/__init__.py +7 -0
  11. unique_toolkit/_common/docx_generator/config.py +12 -0
  12. unique_toolkit/_common/docx_generator/schemas.py +80 -0
  13. unique_toolkit/_common/docx_generator/service.py +225 -0
  14. unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
  15. unique_toolkit/_common/endpoint_builder.py +368 -0
  16. unique_toolkit/_common/endpoint_requestor.py +480 -0
  17. unique_toolkit/_common/exception.py +24 -0
  18. unique_toolkit/_common/experimental/endpoint_builder.py +368 -0
  19. unique_toolkit/_common/experimental/endpoint_requestor.py +488 -0
  20. unique_toolkit/_common/feature_flags/schema.py +9 -0
  21. unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
  22. unique_toolkit/_common/pydantic_helpers.py +174 -0
  23. unique_toolkit/_common/referencing.py +53 -0
  24. unique_toolkit/_common/string_utilities.py +140 -0
  25. unique_toolkit/_common/tests/test_referencing.py +521 -0
  26. unique_toolkit/_common/tests/test_string_utilities.py +506 -0
  27. unique_toolkit/_common/token/image_token_counting.py +67 -0
  28. unique_toolkit/_common/token/token_counting.py +204 -0
  29. unique_toolkit/_common/utils/__init__.py +1 -0
  30. unique_toolkit/_common/utils/files.py +43 -0
  31. unique_toolkit/_common/utils/image/encode.py +25 -0
  32. unique_toolkit/_common/utils/jinja/helpers.py +10 -0
  33. unique_toolkit/_common/utils/jinja/render.py +18 -0
  34. unique_toolkit/_common/utils/jinja/schema.py +65 -0
  35. unique_toolkit/_common/utils/jinja/utils.py +80 -0
  36. unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
  37. unique_toolkit/_common/utils/structured_output/schema.py +5 -0
  38. unique_toolkit/_common/utils/write_configuration.py +51 -0
  39. unique_toolkit/_common/validators.py +101 -4
  40. unique_toolkit/agentic/__init__.py +1 -0
  41. unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
  42. unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
  43. unique_toolkit/agentic/evaluation/config.py +36 -0
  44. unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
  45. unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
  46. unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
  47. unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
  48. unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
  49. unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +112 -0
  50. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
  51. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +20 -16
  52. unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +32 -21
  53. unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
  54. unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
  55. unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
  56. unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
  57. unique_toolkit/agentic/history_manager/history_construction_with_contents.py +298 -0
  58. unique_toolkit/agentic/history_manager/history_manager.py +241 -0
  59. unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
  60. unique_toolkit/agentic/history_manager/utils.py +96 -0
  61. unique_toolkit/agentic/message_log_manager/__init__.py +5 -0
  62. unique_toolkit/agentic/message_log_manager/service.py +93 -0
  63. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
  64. unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
  65. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  66. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +71 -0
  67. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +297 -0
  68. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  69. unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
  70. unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
  71. unique_toolkit/agentic/tools/__init__.py +1 -0
  72. unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
  73. unique_toolkit/agentic/tools/a2a/config.py +17 -0
  74. unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
  75. unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
  76. unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
  77. unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
  78. unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
  79. unique_toolkit/agentic/tools/a2a/manager.py +55 -0
  80. unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
  81. unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +240 -0
  82. unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +84 -0
  83. unique_toolkit/agentic/tools/a2a/postprocessing/config.py +78 -0
  84. unique_toolkit/agentic/tools/a2a/postprocessing/display.py +264 -0
  85. unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
  86. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +421 -0
  87. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +2103 -0
  88. unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
  89. unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
  90. unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
  91. unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
  92. unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
  93. unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
  94. unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
  95. unique_toolkit/agentic/tools/a2a/tool/config.py +158 -0
  96. unique_toolkit/agentic/tools/a2a/tool/service.py +393 -0
  97. unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
  98. unique_toolkit/agentic/tools/config.py +128 -0
  99. unique_toolkit/agentic/tools/factory.py +44 -0
  100. unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
  101. unique_toolkit/agentic/tools/mcp/manager.py +71 -0
  102. unique_toolkit/agentic/tools/mcp/models.py +28 -0
  103. unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
  104. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  105. unique_toolkit/agentic/tools/openai_builtin/base.py +46 -0
  106. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  107. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +88 -0
  108. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +250 -0
  109. unique_toolkit/agentic/tools/openai_builtin/manager.py +79 -0
  110. unique_toolkit/agentic/tools/schemas.py +145 -0
  111. unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
  112. unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
  113. unique_toolkit/agentic/tools/tool.py +187 -0
  114. unique_toolkit/agentic/tools/tool_manager.py +492 -0
  115. unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
  116. unique_toolkit/agentic/tools/utils/__init__.py +19 -0
  117. unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
  118. unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
  119. unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
  120. unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
  121. unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
  122. unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
  123. unique_toolkit/app/__init__.py +9 -0
  124. unique_toolkit/app/dev_util.py +180 -0
  125. unique_toolkit/app/fast_api_factory.py +131 -0
  126. unique_toolkit/app/init_sdk.py +32 -1
  127. unique_toolkit/app/schemas.py +206 -31
  128. unique_toolkit/app/unique_settings.py +367 -0
  129. unique_toolkit/app/webhook.py +77 -0
  130. unique_toolkit/chat/__init__.py +8 -1
  131. unique_toolkit/chat/deprecated/service.py +232 -0
  132. unique_toolkit/chat/functions.py +648 -78
  133. unique_toolkit/chat/rendering.py +34 -0
  134. unique_toolkit/chat/responses_api.py +461 -0
  135. unique_toolkit/chat/schemas.py +134 -2
  136. unique_toolkit/chat/service.py +115 -767
  137. unique_toolkit/content/functions.py +353 -8
  138. unique_toolkit/content/schemas.py +128 -15
  139. unique_toolkit/content/service.py +321 -45
  140. unique_toolkit/content/smart_rules.py +301 -0
  141. unique_toolkit/content/utils.py +10 -3
  142. unique_toolkit/data_extraction/README.md +96 -0
  143. unique_toolkit/data_extraction/__init__.py +11 -0
  144. unique_toolkit/data_extraction/augmented/__init__.py +5 -0
  145. unique_toolkit/data_extraction/augmented/service.py +93 -0
  146. unique_toolkit/data_extraction/base.py +25 -0
  147. unique_toolkit/data_extraction/basic/__init__.py +11 -0
  148. unique_toolkit/data_extraction/basic/config.py +18 -0
  149. unique_toolkit/data_extraction/basic/prompt.py +13 -0
  150. unique_toolkit/data_extraction/basic/service.py +55 -0
  151. unique_toolkit/embedding/service.py +103 -12
  152. unique_toolkit/framework_utilities/__init__.py +1 -0
  153. unique_toolkit/framework_utilities/langchain/__init__.py +10 -0
  154. unique_toolkit/framework_utilities/langchain/client.py +71 -0
  155. unique_toolkit/framework_utilities/langchain/history.py +19 -0
  156. unique_toolkit/framework_utilities/openai/__init__.py +6 -0
  157. unique_toolkit/framework_utilities/openai/client.py +84 -0
  158. unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
  159. unique_toolkit/framework_utilities/utils.py +23 -0
  160. unique_toolkit/language_model/__init__.py +3 -0
  161. unique_toolkit/language_model/_responses_api_utils.py +93 -0
  162. unique_toolkit/language_model/builder.py +27 -11
  163. unique_toolkit/language_model/default_language_model.py +3 -0
  164. unique_toolkit/language_model/functions.py +345 -43
  165. unique_toolkit/language_model/infos.py +1288 -46
  166. unique_toolkit/language_model/reference.py +242 -0
  167. unique_toolkit/language_model/schemas.py +481 -49
  168. unique_toolkit/language_model/service.py +229 -28
  169. unique_toolkit/protocols/support.py +145 -0
  170. unique_toolkit/services/__init__.py +7 -0
  171. unique_toolkit/services/chat_service.py +1631 -0
  172. unique_toolkit/services/knowledge_base.py +1094 -0
  173. unique_toolkit/short_term_memory/service.py +178 -41
  174. unique_toolkit/smart_rules/__init__.py +0 -0
  175. unique_toolkit/smart_rules/compile.py +56 -0
  176. unique_toolkit/test_utilities/events.py +197 -0
  177. unique_toolkit-1.33.3.dist-info/METADATA +1145 -0
  178. unique_toolkit-1.33.3.dist-info/RECORD +205 -0
  179. unique_toolkit/evaluators/__init__.py +0 -1
  180. unique_toolkit/evaluators/config.py +0 -35
  181. unique_toolkit/evaluators/constants.py +0 -1
  182. unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
  183. unique_toolkit/evaluators/context_relevancy/service.py +0 -53
  184. unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
  185. unique_toolkit/evaluators/hallucination/constants.py +0 -41
  186. unique_toolkit-0.7.9.dist-info/METADATA +0 -413
  187. unique_toolkit-0.7.9.dist-info/RECORD +0 -64
  188. /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
  189. {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/LICENSE +0 -0
  190. {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/WHEEL +0 -0
@@ -1,22 +1,49 @@
1
1
  import json
2
2
  import math
3
3
  from enum import StrEnum
4
- from typing import Any, Optional, Self
4
+ from typing import Any, Literal, Self, TypeVar
5
5
  from uuid import uuid4
6
6
 
7
7
  from humps import camelize
8
+ from openai.types.chat import (
9
+ ChatCompletionAssistantMessageParam,
10
+ ChatCompletionSystemMessageParam,
11
+ ChatCompletionToolMessageParam,
12
+ ChatCompletionUserMessageParam,
13
+ )
14
+ from openai.types.chat.chat_completion_message_function_tool_call_param import (
15
+ ChatCompletionMessageFunctionToolCallParam,
16
+ Function,
17
+ )
18
+ from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
19
+ from openai.types.responses import (
20
+ EasyInputMessageParam,
21
+ FunctionToolParam,
22
+ ResponseCodeInterpreterToolCall,
23
+ ResponseFunctionToolCallParam,
24
+ ResponseOutputItem,
25
+ ResponseOutputMessage,
26
+ )
27
+ from openai.types.responses.response_input_param import FunctionCallOutput
28
+ from openai.types.responses.response_output_text import AnnotationContainerFileCitation
29
+ from openai.types.shared_params.function_definition import FunctionDefinition
8
30
  from pydantic import (
9
31
  BaseModel,
10
32
  ConfigDict,
11
33
  Field,
12
34
  PrivateAttr,
13
35
  RootModel,
36
+ field_serializer,
14
37
  field_validator,
15
38
  model_serializer,
16
39
  model_validator,
17
40
  )
18
- from typing_extensions import deprecated
41
+ from typing_extensions import deprecated, overload
19
42
 
43
+ from unique_toolkit.content.schemas import ContentReference
44
+ from unique_toolkit.language_model._responses_api_utils import (
45
+ convert_user_message_content_to_responses_api,
46
+ )
20
47
  from unique_toolkit.language_model.utils import format_message
21
48
 
22
49
  # set config to convert camelCase to snake_case
@@ -27,6 +54,8 @@ model_config = ConfigDict(
27
54
  )
28
55
 
29
56
 
57
+ # Equivalent to
58
+ # from openai.types.chat.chat_completion_role import ChatCompletionRole
30
59
  class LanguageModelMessageRole(StrEnum):
31
60
  USER = "user"
32
61
  SYSTEM = "system"
@@ -34,33 +63,155 @@ class LanguageModelMessageRole(StrEnum):
34
63
  TOOL = "tool"
35
64
 
36
65
 
66
+ # This is tailored to the unique backend
67
+ class LanguageModelStreamResponseMessage(BaseModel):
68
+ model_config = model_config
69
+
70
+ id: str
71
+ previous_message_id: (
72
+ str | None
73
+ ) # Stream response can return a null previous_message_id if an assisstant message is manually added
74
+ role: LanguageModelMessageRole
75
+ text: str
76
+ original_text: str | None = None
77
+ references: list[ContentReference] = []
78
+
79
+ # TODO make sdk return role in lowercase
80
+ # Currently needed as sdk returns role in uppercase
81
+ @field_validator("role", mode="before")
82
+ def set_role(cls, value: str):
83
+ return value.lower()
84
+
85
+
37
86
  class LanguageModelFunction(BaseModel):
38
87
  model_config = model_config
39
88
 
40
- id: str | None = None
89
+ id: str = Field(default_factory=lambda: uuid4().hex)
41
90
  name: str
42
- arguments: Optional[dict[str, Any] | str] = None # type: ignore
91
+ arguments: dict[str, Any] | None = None
43
92
 
44
93
  @field_validator("arguments", mode="before")
45
- def set_arguments(cls, value):
94
+ def set_arguments(cls, value: Any) -> Any:
46
95
  if isinstance(value, str):
47
96
  return json.loads(value)
48
97
  return value
49
98
 
50
99
  @field_validator("id", mode="before")
51
- def randomize_id(cls, value):
52
- return uuid4().hex
100
+ def randomize_id(cls, value: Any) -> Any:
101
+ if value is None or value == "":
102
+ return uuid4().hex
103
+ return value
53
104
 
54
105
  @model_serializer()
55
106
  def serialize_model(self):
56
107
  seralization = {}
57
- if self.id:
58
- seralization["id"] = self.id
59
108
  seralization["name"] = self.name
60
109
  if self.arguments:
61
110
  seralization["arguments"] = json.dumps(self.arguments)
62
111
  return seralization
63
112
 
113
+ def __eq__(self, other: object) -> bool:
114
+ """Compare two tool calls based on name and arguments."""
115
+ if not isinstance(other, LanguageModelFunction):
116
+ return False
117
+
118
+ if self.id != other.id:
119
+ return False
120
+
121
+ if self.name != other.name:
122
+ return False
123
+
124
+ if self.arguments != other.arguments:
125
+ return False
126
+
127
+ return True
128
+
129
+ @overload
130
+ def to_openai_param(
131
+ self, mode: Literal["completions"] = "completions"
132
+ ) -> ChatCompletionMessageFunctionToolCallParam: ...
133
+
134
+ @overload
135
+ def to_openai_param(
136
+ self, mode: Literal["responses"]
137
+ ) -> ResponseFunctionToolCallParam: ...
138
+
139
+ def to_openai_param(
140
+ self, mode: Literal["completions", "responses"] = "completions"
141
+ ) -> ChatCompletionMessageFunctionToolCallParam | ResponseFunctionToolCallParam:
142
+ arguments = ""
143
+ if isinstance(self.arguments, dict):
144
+ arguments = json.dumps(self.arguments)
145
+ elif isinstance(self.arguments, str):
146
+ arguments = self.arguments
147
+
148
+ if mode == "completions":
149
+ return ChatCompletionMessageFunctionToolCallParam(
150
+ type="function",
151
+ id=self.id or "unknown_id",
152
+ function=Function(name=self.name, arguments=arguments),
153
+ )
154
+ elif mode == "responses":
155
+ if self.id is None:
156
+ raise ValueError("Missing tool call id")
157
+
158
+ return ResponseFunctionToolCallParam(
159
+ type="function_call",
160
+ call_id=self.id,
161
+ name=self.name,
162
+ arguments=arguments,
163
+ )
164
+
165
+
166
+ class LanguageModelStreamResponse(BaseModel):
167
+ model_config = model_config
168
+
169
+ message: LanguageModelStreamResponseMessage
170
+ tool_calls: list[LanguageModelFunction] | None = None
171
+
172
+ def is_empty(self) -> bool:
173
+ """
174
+ Check if the stream response is empty.
175
+ An empty stream response has no text and no tool calls.
176
+ """
177
+ return not self.message.original_text and not self.tool_calls
178
+
179
+ def to_openai_param(self) -> ChatCompletionAssistantMessageParam:
180
+ return ChatCompletionAssistantMessageParam(
181
+ role="assistant",
182
+ audio=None,
183
+ content=self.message.text,
184
+ function_call=None,
185
+ refusal=None,
186
+ tool_calls=[t.to_openai_param() for t in self.tool_calls or []],
187
+ )
188
+
189
+
190
+ OutputItemType = TypeVar("OutputItemType", bound=ResponseOutputItem)
191
+
192
+
193
+ class ResponsesLanguageModelStreamResponse(LanguageModelStreamResponse):
194
+ output: list[ResponseOutputItem]
195
+
196
+ def filter_output(self, type: type[OutputItemType]) -> list[OutputItemType]:
197
+ return [item for item in self.output if isinstance(item, type)]
198
+
199
+ @property
200
+ def code_interpreter_calls(self) -> list[ResponseCodeInterpreterToolCall]:
201
+ return self.filter_output(ResponseCodeInterpreterToolCall)
202
+
203
+ @property
204
+ def container_files(self) -> list[AnnotationContainerFileCitation]:
205
+ container_files = []
206
+ messages = self.filter_output(ResponseOutputMessage)
207
+ for message in messages:
208
+ for content in message.content:
209
+ if content.type == "output_text":
210
+ for annotation in content.annotations:
211
+ if annotation.type == "container_file_citation":
212
+ container_files.append(annotation)
213
+ return container_files
214
+
64
215
 
65
216
  class LanguageModelFunctionCall(BaseModel):
66
217
  model_config = model_config
@@ -69,10 +220,12 @@ class LanguageModelFunctionCall(BaseModel):
69
220
  type: str | None = None
70
221
  function: LanguageModelFunction
71
222
 
223
+ # TODO: Circular reference of types
224
+ @deprecated("Use LanguageModelAssistantMessage.from_functions instead.")
72
225
  @staticmethod
73
226
  def create_assistant_message_from_tool_calls(
74
227
  tool_calls: list[LanguageModelFunction],
75
- ):
228
+ ) -> "LanguageModelAssistantMessage":
76
229
  assistant_message = LanguageModelAssistantMessage(
77
230
  content="",
78
231
  tool_calls=[
@@ -93,8 +246,7 @@ class LanguageModelMessage(BaseModel):
93
246
  content: str | list[dict] | None = None
94
247
 
95
248
  def __str__(self):
96
- if not self.content:
97
- message = ""
249
+ message = ""
98
250
  if isinstance(self.content, str):
99
251
  message = self.content
100
252
  elif isinstance(self.content, list):
@@ -110,7 +262,29 @@ class LanguageModelSystemMessage(LanguageModelMessage):
110
262
  def set_role(cls, value):
111
263
  return LanguageModelMessageRole.SYSTEM
112
264
 
265
+ @overload
266
+ def to_openai(
267
+ self, mode: Literal["completions"] = "completions"
268
+ ) -> ChatCompletionSystemMessageParam: ...
113
269
 
270
+ @overload
271
+ def to_openai(self, mode: Literal["responses"]) -> EasyInputMessageParam: ...
272
+
273
+ def to_openai(
274
+ self, mode: Literal["completions", "responses"] = "completions"
275
+ ) -> ChatCompletionSystemMessageParam | EasyInputMessageParam:
276
+ content = self.content or ""
277
+ if not isinstance(content, str):
278
+ raise ValueError("Content must be a string")
279
+
280
+ if mode == "completions":
281
+ return ChatCompletionSystemMessageParam(role="system", content=content)
282
+ elif mode == "responses":
283
+ return EasyInputMessageParam(role="user", content=content)
284
+
285
+
286
+ # Equivalent to
287
+ # from openai.types.chat.chat_completion_user_message_param import ChatCompletionUserMessageParam
114
288
  class LanguageModelUserMessage(LanguageModelMessage):
115
289
  role: LanguageModelMessageRole = LanguageModelMessageRole.USER
116
290
 
@@ -118,7 +292,33 @@ class LanguageModelUserMessage(LanguageModelMessage):
118
292
  def set_role(cls, value):
119
293
  return LanguageModelMessageRole.USER
120
294
 
121
-
295
+ @overload
296
+ def to_openai(
297
+ self, mode: Literal["completions"] = "completions"
298
+ ) -> ChatCompletionUserMessageParam: ...
299
+
300
+ @overload
301
+ def to_openai(self, mode: Literal["responses"]) -> EasyInputMessageParam: ...
302
+
303
+ def to_openai(
304
+ self, mode: Literal["completions", "responses"] = "completions"
305
+ ) -> ChatCompletionUserMessageParam | EasyInputMessageParam:
306
+ if self.content is None:
307
+ content = ""
308
+ else:
309
+ content = self.content
310
+
311
+ if mode == "completions":
312
+ return ChatCompletionUserMessageParam(role="user", content=content) # type: ignore
313
+ elif mode == "responses":
314
+ return EasyInputMessageParam(
315
+ role="user",
316
+ content=convert_user_message_content_to_responses_api(content),
317
+ )
318
+
319
+
320
+ # Equivalent to
321
+ # from openai.types.chat.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
122
322
  class LanguageModelAssistantMessage(LanguageModelMessage):
123
323
  role: LanguageModelMessageRole = LanguageModelMessageRole.ASSISTANT
124
324
  parsed: dict | None = None
@@ -129,6 +329,91 @@ class LanguageModelAssistantMessage(LanguageModelMessage):
129
329
  def set_role(cls, value):
130
330
  return LanguageModelMessageRole.ASSISTANT
131
331
 
332
+ @classmethod
333
+ def from_functions(
334
+ cls,
335
+ tool_calls: list[LanguageModelFunction],
336
+ ):
337
+ return cls(
338
+ content="",
339
+ tool_calls=[
340
+ LanguageModelFunctionCall(
341
+ id=tool_call.id,
342
+ type="function",
343
+ function=tool_call,
344
+ )
345
+ for tool_call in tool_calls
346
+ ],
347
+ )
348
+
349
+ @classmethod
350
+ def from_stream_response(cls, response: LanguageModelStreamResponse):
351
+ tool_calls = [
352
+ LanguageModelFunctionCall(
353
+ id=f.id,
354
+ type="function",
355
+ function=f,
356
+ )
357
+ for f in response.tool_calls or []
358
+ ]
359
+
360
+ tool_calls = tool_calls if len(tool_calls) > 0 else None
361
+
362
+ return cls(
363
+ content=response.message.text,
364
+ parsed=None,
365
+ refusal=None,
366
+ tool_calls=tool_calls,
367
+ )
368
+
369
+ @overload
370
+ def to_openai(
371
+ self, mode: Literal["completions"] = "completions"
372
+ ) -> ChatCompletionAssistantMessageParam: ...
373
+
374
+ @overload
375
+ def to_openai(
376
+ self, mode: Literal["responses"]
377
+ ) -> list[EasyInputMessageParam | ResponseFunctionToolCallParam]: ...
378
+
379
+ def to_openai(
380
+ self, mode: Literal["completions", "responses"] = "completions"
381
+ ) -> (
382
+ ChatCompletionAssistantMessageParam
383
+ | list[EasyInputMessageParam | ResponseFunctionToolCallParam]
384
+ ):
385
+ content = self.content or ""
386
+ if not isinstance(content, str):
387
+ raise ValueError("Content must be a string")
388
+
389
+ if mode == "completions":
390
+ return ChatCompletionAssistantMessageParam(
391
+ role="assistant",
392
+ content=content,
393
+ tool_calls=[
394
+ t.function.to_openai_param() for t in self.tool_calls or []
395
+ ],
396
+ )
397
+ elif mode == "responses":
398
+ """
399
+ Responses API does not support assistant messages with tool calls
400
+ """
401
+ res = []
402
+ if content != "":
403
+ res.append(EasyInputMessageParam(role="assistant", content=content))
404
+ if self.tool_calls:
405
+ res.extend(
406
+ [
407
+ t.function.to_openai_param(mode="responses")
408
+ for t in self.tool_calls
409
+ ]
410
+ )
411
+ return res
412
+
413
+
414
+ # Equivalent to
415
+ # from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
416
+
132
417
 
133
418
  class LanguageModelToolMessage(LanguageModelMessage):
134
419
  role: LanguageModelMessageRole = LanguageModelMessageRole.TOOL
@@ -146,15 +431,84 @@ class LanguageModelToolMessage(LanguageModelMessage):
146
431
  def set_role(cls, value):
147
432
  return LanguageModelMessageRole.TOOL
148
433
 
434
+ @overload
435
+ def to_openai(
436
+ self, mode: Literal["completions"] = "completions"
437
+ ) -> ChatCompletionToolMessageParam: ...
438
+
439
+ @overload
440
+ def to_openai(self, mode: Literal["responses"]) -> FunctionCallOutput: ...
441
+
442
+ def to_openai(
443
+ self, mode: Literal["completions", "responses"] = "completions"
444
+ ) -> ChatCompletionToolMessageParam | FunctionCallOutput:
445
+ content = self.content or ""
446
+ if not isinstance(content, str):
447
+ raise ValueError("Content must be a string")
448
+
449
+ if mode == "completions":
450
+ return ChatCompletionToolMessageParam(
451
+ role="tool",
452
+ content=content,
453
+ tool_call_id=self.tool_call_id,
454
+ )
455
+ elif mode == "responses":
456
+ return FunctionCallOutput(
457
+ call_id=self.tool_call_id,
458
+ output=content,
459
+ type="function_call_output",
460
+ )
461
+
462
+
463
+ # Equivalent implementation for list of
464
+ # from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
465
+ # with the addition of the builder
466
+
467
+ LanguageModelMessageOptions = (
468
+ LanguageModelMessage
469
+ | LanguageModelToolMessage
470
+ | LanguageModelAssistantMessage
471
+ | LanguageModelSystemMessage
472
+ | LanguageModelUserMessage
473
+ )
474
+
149
475
 
150
476
  class LanguageModelMessages(RootModel):
151
- root: list[
152
- LanguageModelMessage
153
- | LanguageModelToolMessage
154
- | LanguageModelAssistantMessage
155
- | LanguageModelSystemMessage
156
- | LanguageModelUserMessage
157
- ]
477
+ root: list[LanguageModelMessageOptions]
478
+
479
+ @classmethod
480
+ def load_messages_to_root(cls, data: list[dict] | dict) -> Self:
481
+ """Convert list of dictionaries to appropriate message objects based on role."""
482
+ # Handle case where data is already wrapped in root
483
+ if isinstance(data, dict) and "root" in data:
484
+ messages_list = data["root"]
485
+ elif isinstance(data, list):
486
+ messages_list = data
487
+ else:
488
+ raise ValueError("Invalid data type")
489
+
490
+ # Convert the messages list
491
+ converted_messages = []
492
+ for item in messages_list:
493
+ if isinstance(item, dict):
494
+ role = item.get("role", "").lower()
495
+
496
+ # Map dictionary to appropriate message class based on role
497
+ if role == "system":
498
+ converted_messages.append(LanguageModelSystemMessage(**item))
499
+ elif role == "user":
500
+ converted_messages.append(LanguageModelUserMessage(**item))
501
+ elif role == "assistant":
502
+ converted_messages.append(LanguageModelAssistantMessage(**item))
503
+ elif role == "tool":
504
+ converted_messages.append(LanguageModelToolMessage(**item))
505
+ else:
506
+ # Fallback to base LanguageModelMessage
507
+ converted_messages.append(LanguageModelMessage(**item))
508
+ else:
509
+ # If it's already a message object, keep it as is
510
+ converted_messages.append(item)
511
+ return cls(root=converted_messages)
158
512
 
159
513
  def __str__(self):
160
514
  return "\n\n".join([str(message) for message in self.root])
@@ -174,6 +528,11 @@ class LanguageModelMessages(RootModel):
174
528
  return builder
175
529
 
176
530
 
531
+ # This seems similar to
532
+ # from openai.types.completion_choice import CompletionChoice
533
+ # but is missing multiple attributes and uses message instead of text
534
+
535
+
177
536
  class LanguageModelCompletionChoice(BaseModel):
178
537
  model_config = model_config
179
538
 
@@ -182,38 +541,26 @@ class LanguageModelCompletionChoice(BaseModel):
182
541
  finish_reason: str
183
542
 
184
543
 
544
+ # This seems similar to
545
+ # from openai.types.completion import Completion
546
+ # but is missing multiple attributes
185
547
  class LanguageModelResponse(BaseModel):
186
548
  model_config = model_config
187
549
 
188
550
  choices: list[LanguageModelCompletionChoice]
189
551
 
552
+ @classmethod
553
+ def from_stream_response(cls, response: LanguageModelStreamResponse):
554
+ choice = LanguageModelCompletionChoice(
555
+ index=0,
556
+ message=LanguageModelAssistantMessage.from_stream_response(response),
557
+ finish_reason="",
558
+ )
190
559
 
191
- class LanguageModelStreamResponseMessage(BaseModel):
192
- model_config = model_config
193
-
194
- id: str
195
- previous_message_id: (
196
- str | None
197
- ) # Stream response can return a null previous_message_id if an assisstant message is manually added
198
- role: LanguageModelMessageRole
199
- text: str
200
- original_text: str | None = None
201
- references: list[dict[str, list | dict | str | int | float | bool]] = [] # type: ignore
202
-
203
- # TODO make sdk return role in lowercase
204
- # Currently needed as sdk returns role in uppercase
205
- @field_validator("role", mode="before")
206
- def set_role(cls, value: str):
207
- return value.lower()
208
-
209
-
210
- class LanguageModelStreamResponse(BaseModel):
211
- model_config = model_config
212
-
213
- message: LanguageModelStreamResponseMessage
214
- tool_calls: Optional[list[LanguageModelFunction]] = None
560
+ return cls(choices=[choice])
215
561
 
216
562
 
563
+ # This is tailored for unique and only used in language model info
217
564
  class LanguageModelTokenLimits(BaseModel):
218
565
  token_limit_input: int
219
566
  token_limit_output: int
@@ -255,29 +602,46 @@ class LanguageModelTokenLimits(BaseModel):
255
602
 
256
603
  data["token_limit_input"] = math.floor(fraction_input * token_limit)
257
604
  data["token_limit_output"] = math.floor(
258
- (1 - fraction_input) * token_limit
605
+ (1 - fraction_input) * token_limit,
259
606
  )
260
607
  data["_fraction_adaptpable"] = True
261
608
  return data
262
609
 
263
610
  raise ValueError(
264
- 'Either "token_limit_input" and "token_limit_output" must be provided together, or "token_limit" must be provided.'
611
+ 'Either "token_limit_input" and "token_limit_output" must be provided together, or "token_limit" must be provided.',
265
612
  )
266
613
 
267
614
 
615
+ # This is more restrictive than what openai allows
616
+
617
+
618
+ @deprecated(
619
+ "Deprecated as `LanguageModelTool` is deprecated in favor of `LanguageModelToolDescription`",
620
+ )
268
621
  class LanguageModelToolParameterProperty(BaseModel):
269
622
  type: str
270
623
  description: str
271
- enum: Optional[list[Any]] = None
272
- items: Optional[Self] = None
624
+ enum: list[Any] | None = None
625
+ items: Self | None = None
273
626
 
274
627
 
628
+ # Looks most like
629
+ # from openai.types.shared.function_parameters import FunctionParameters
630
+ @deprecated(
631
+ "Deprecated as `LanguageModelTool` is deprecated in favor of `LanguageModelToolDescription`",
632
+ )
275
633
  class LanguageModelToolParameters(BaseModel):
276
634
  type: str = "object"
277
635
  properties: dict[str, LanguageModelToolParameterProperty]
278
636
  required: list[str]
279
637
 
280
638
 
639
+ # Looks most like
640
+ # from openai.types.shared_params.function_definition import FunctionDefinition
641
+ # but returns parameter is not known
642
+ @deprecated(
643
+ "Deprecated as `LanguageModelTool` use `LanguageModelToolDescription` instead",
644
+ )
281
645
  class LanguageModelTool(BaseModel):
282
646
  name: str = Field(
283
647
  ...,
@@ -286,8 +650,76 @@ class LanguageModelTool(BaseModel):
286
650
  )
287
651
  description: str
288
652
  parameters: (
289
- LanguageModelToolParameters | dict
653
+ LanguageModelToolParameters | dict[str, Any]
290
654
  ) # dict represents json schema dumped from pydantic
291
655
  returns: LanguageModelToolParameterProperty | LanguageModelToolParameters | None = (
292
656
  None
293
657
  )
658
+
659
+
660
+ class LanguageModelToolDescription(BaseModel):
661
+ name: str = Field(
662
+ ...,
663
+ pattern=r"^[a-zA-Z1-9_-]+$",
664
+ description="Name must adhere to the pattern ^[a-zA-Z1-9_-]+$",
665
+ )
666
+ description: str = Field(
667
+ ...,
668
+ description="Description of what the tool is doing the tool",
669
+ )
670
+ parameters: type[BaseModel] | dict[str, Any] = Field(
671
+ ...,
672
+ description="Pydantic model for the tool parameters",
673
+ union_mode="left_to_right",
674
+ )
675
+
676
+ # TODO: This should be default `True` but if this is the case the parameter_model needs to include additional properties
677
+ strict: bool = Field(
678
+ default=False,
679
+ description="Setting strict to true will ensure function calls reliably adhere to the function schema, instead of being best effort. If set to True the `parameter_model` set `model_config = {'extra':'forbid'}` must be set for on all BaseModels.",
680
+ )
681
+
682
+ @field_serializer("parameters")
683
+ def serialize_parameters(
684
+ self, parameters: type[BaseModel] | dict[str, Any]
685
+ ) -> dict[str, Any]:
686
+ return _parameters_as_json_schema(parameters)
687
+
688
+ @overload
689
+ def to_openai(
690
+ self, mode: Literal["completions"] = "completions"
691
+ ) -> ChatCompletionToolParam: ...
692
+
693
+ @overload
694
+ def to_openai(self, mode: Literal["responses"]) -> FunctionToolParam: ...
695
+
696
+ def to_openai(
697
+ self, mode: Literal["completions", "responses"] = "completions"
698
+ ) -> ChatCompletionToolParam | FunctionToolParam:
699
+ if mode == "completions":
700
+ return ChatCompletionToolParam(
701
+ function=FunctionDefinition(
702
+ name=self.name,
703
+ description=self.description,
704
+ parameters=_parameters_as_json_schema(self.parameters),
705
+ strict=self.strict,
706
+ ),
707
+ type="function",
708
+ )
709
+ elif mode == "responses":
710
+ return FunctionToolParam(
711
+ type="function",
712
+ name=self.name,
713
+ parameters=_parameters_as_json_schema(self.parameters),
714
+ strict=self.strict,
715
+ description=self.description,
716
+ )
717
+
718
+
719
+ def _parameters_as_json_schema(
720
+ parameters: type[BaseModel] | dict[str, Any],
721
+ ) -> dict[str, Any]:
722
+ if isinstance(parameters, dict):
723
+ return parameters
724
+
725
+ return parameters.model_json_schema()