unique_toolkit 1.14.11__py3-none-any.whl → 1.16.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (25) hide show
  1. unique_toolkit/_common/api_calling/human_verification_manager.py +95 -15
  2. unique_toolkit/agentic/postprocessor/postprocessor_manager.py +50 -11
  3. unique_toolkit/agentic/responses_api/__init__.py +19 -0
  4. unique_toolkit/agentic/responses_api/postprocessors/code_display.py +63 -0
  5. unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +145 -0
  6. unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
  7. unique_toolkit/agentic/tools/factory.py +4 -0
  8. unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
  9. unique_toolkit/agentic/tools/openai_builtin/base.py +30 -0
  10. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
  11. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +57 -0
  12. unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +230 -0
  13. unique_toolkit/agentic/tools/openai_builtin/manager.py +62 -0
  14. unique_toolkit/agentic/tools/tool_manager.py +257 -127
  15. unique_toolkit/chat/functions.py +15 -6
  16. unique_toolkit/chat/responses_api.py +461 -0
  17. unique_toolkit/language_model/functions.py +25 -9
  18. unique_toolkit/language_model/schemas.py +222 -27
  19. unique_toolkit/protocols/support.py +91 -9
  20. unique_toolkit/services/__init__.py +7 -0
  21. unique_toolkit/services/chat_service.py +139 -7
  22. {unique_toolkit-1.14.11.dist-info → unique_toolkit-1.16.0.dist-info}/METADATA +8 -1
  23. {unique_toolkit-1.14.11.dist-info → unique_toolkit-1.16.0.dist-info}/RECORD +25 -13
  24. {unique_toolkit-1.14.11.dist-info → unique_toolkit-1.16.0.dist-info}/LICENSE +0 -0
  25. {unique_toolkit-1.14.11.dist-info → unique_toolkit-1.16.0.dist-info}/WHEEL +0 -0
@@ -1,16 +1,31 @@
1
1
  import json
2
2
  import math
3
3
  from enum import StrEnum
4
- from typing import Any, Self
4
+ from typing import Any, Literal, Self, TypeVar
5
5
  from uuid import uuid4
6
6
 
7
7
  from humps import camelize
8
- from openai.types.chat import ChatCompletionAssistantMessageParam
8
+ from openai.types.chat import (
9
+ ChatCompletionAssistantMessageParam,
10
+ ChatCompletionSystemMessageParam,
11
+ ChatCompletionToolMessageParam,
12
+ ChatCompletionUserMessageParam,
13
+ )
9
14
  from openai.types.chat.chat_completion_message_function_tool_call_param import (
10
15
  ChatCompletionMessageFunctionToolCallParam,
11
16
  Function,
12
17
  )
13
18
  from openai.types.chat.chat_completion_tool_param import ChatCompletionToolParam
19
+ from openai.types.responses import (
20
+ EasyInputMessageParam,
21
+ FunctionToolParam,
22
+ ResponseCodeInterpreterToolCall,
23
+ ResponseFunctionToolCallParam,
24
+ ResponseOutputItem,
25
+ ResponseOutputMessage,
26
+ )
27
+ from openai.types.responses.response_input_param import FunctionCallOutput
28
+ from openai.types.responses.response_output_text import AnnotationContainerFileCitation
14
29
  from openai.types.shared_params.function_definition import FunctionDefinition
15
30
  from pydantic import (
16
31
  BaseModel,
@@ -23,7 +38,7 @@ from pydantic import (
23
38
  model_serializer,
24
39
  model_validator,
25
40
  )
26
- from typing_extensions import deprecated
41
+ from typing_extensions import deprecated, overload
27
42
 
28
43
  from unique_toolkit.content.schemas import ContentReference
29
44
  from unique_toolkit.language_model.utils import format_message
@@ -109,21 +124,43 @@ class LanguageModelFunction(BaseModel):
109
124
 
110
125
  return True
111
126
 
112
- def to_openai_param(self) -> ChatCompletionMessageFunctionToolCallParam:
127
+ @overload
128
+ def to_openai_param(
129
+ self, mode: Literal["completions"] = "completions"
130
+ ) -> ChatCompletionMessageFunctionToolCallParam: ...
131
+
132
+ @overload
133
+ def to_openai_param(
134
+ self, mode: Literal["responses"]
135
+ ) -> ResponseFunctionToolCallParam: ...
136
+
137
+ def to_openai_param(
138
+ self, mode: Literal["completions", "responses"] = "completions"
139
+ ) -> ChatCompletionMessageFunctionToolCallParam | ResponseFunctionToolCallParam:
113
140
  arguments = ""
114
141
  if isinstance(self.arguments, dict):
115
142
  arguments = json.dumps(self.arguments)
116
143
  elif isinstance(self.arguments, str):
117
144
  arguments = self.arguments
118
145
 
119
- return ChatCompletionMessageFunctionToolCallParam(
120
- type="function",
121
- id=self.id or "unknown_id",
122
- function=Function(name=self.name, arguments=arguments),
123
- )
146
+ if mode == "completions":
147
+ return ChatCompletionMessageFunctionToolCallParam(
148
+ type="function",
149
+ id=self.id or "unknown_id",
150
+ function=Function(name=self.name, arguments=arguments),
151
+ )
152
+ elif mode == "responses":
153
+ if self.id is None:
154
+ raise ValueError("Missing tool call id")
155
+
156
+ return ResponseFunctionToolCallParam(
157
+ type="function_call",
158
+ call_id=self.id,
159
+ name=self.name,
160
+ arguments=arguments,
161
+ )
124
162
 
125
163
 
126
- # This is tailored to the unique backend
127
164
  class LanguageModelStreamResponse(BaseModel):
128
165
  model_config = model_config
129
166
 
@@ -148,6 +185,32 @@ class LanguageModelStreamResponse(BaseModel):
148
185
  )
149
186
 
150
187
 
188
+ OutputItemType = TypeVar("OutputItemType", bound=ResponseOutputItem)
189
+
190
+
191
+ class ResponsesLanguageModelStreamResponse(LanguageModelStreamResponse):
192
+ output: list[ResponseOutputItem]
193
+
194
+ def filter_output(self, type: type[OutputItemType]) -> list[OutputItemType]:
195
+ return [item for item in self.output if isinstance(item, type)]
196
+
197
+ @property
198
+ def code_interpreter_calls(self) -> list[ResponseCodeInterpreterToolCall]:
199
+ return self.filter_output(ResponseCodeInterpreterToolCall)
200
+
201
+ @property
202
+ def container_files(self) -> list[AnnotationContainerFileCitation]:
203
+ container_files = []
204
+ messages = self.filter_output(ResponseOutputMessage)
205
+ for message in messages:
206
+ for content in message.content:
207
+ if content.type == "output_text":
208
+ for annotation in content.annotations:
209
+ if annotation.type == "container_file_citation":
210
+ container_files.append(annotation)
211
+ return container_files
212
+
213
+
151
214
  class LanguageModelFunctionCall(BaseModel):
152
215
  model_config = model_config
153
216
 
@@ -190,7 +253,6 @@ class LanguageModelMessage(BaseModel):
190
253
  return format_message(self.role.capitalize(), message=message, num_tabs=1)
191
254
 
192
255
 
193
- # Equivalent to
194
256
  class LanguageModelSystemMessage(LanguageModelMessage):
195
257
  role: LanguageModelMessageRole = LanguageModelMessageRole.SYSTEM
196
258
 
@@ -198,6 +260,26 @@ class LanguageModelSystemMessage(LanguageModelMessage):
198
260
  def set_role(cls, value):
199
261
  return LanguageModelMessageRole.SYSTEM
200
262
 
263
+ @overload
264
+ def to_openai(
265
+ self, mode: Literal["completions"] = "completions"
266
+ ) -> ChatCompletionSystemMessageParam: ...
267
+
268
+ @overload
269
+ def to_openai(self, mode: Literal["responses"]) -> EasyInputMessageParam: ...
270
+
271
+ def to_openai(
272
+ self, mode: Literal["completions", "responses"] = "completions"
273
+ ) -> ChatCompletionSystemMessageParam | EasyInputMessageParam:
274
+ content = self.content or ""
275
+ if not isinstance(content, str):
276
+ raise ValueError("Content must be a string")
277
+
278
+ if mode == "completions":
279
+ return ChatCompletionSystemMessageParam(role="system", content=content)
280
+ elif mode == "responses":
281
+ return EasyInputMessageParam(role="user", content=content)
282
+
201
283
 
202
284
  # Equivalent to
203
285
  # from openai.types.chat.chat_completion_user_message_param import ChatCompletionUserMessageParam
@@ -210,6 +292,26 @@ class LanguageModelUserMessage(LanguageModelMessage):
210
292
  def set_role(cls, value):
211
293
  return LanguageModelMessageRole.USER
212
294
 
295
+ @overload
296
+ def to_openai(
297
+ self, mode: Literal["completions"] = "completions"
298
+ ) -> ChatCompletionUserMessageParam: ...
299
+
300
+ @overload
301
+ def to_openai(self, mode: Literal["responses"]) -> EasyInputMessageParam: ...
302
+
303
+ def to_openai(
304
+ self, mode: Literal["completions", "responses"] = "completions"
305
+ ) -> ChatCompletionUserMessageParam | EasyInputMessageParam:
306
+ content = self.content or ""
307
+ if not isinstance(content, str):
308
+ raise ValueError("Content must be a string")
309
+
310
+ if mode == "completions":
311
+ return ChatCompletionUserMessageParam(role="user", content=content)
312
+ elif mode == "responses":
313
+ return EasyInputMessageParam(role="user", content=content)
314
+
213
315
 
214
316
  # Equivalent to
215
317
  # from openai.types.chat.chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
@@ -244,8 +346,8 @@ class LanguageModelAssistantMessage(LanguageModelMessage):
244
346
  def from_stream_response(cls, response: LanguageModelStreamResponse):
245
347
  tool_calls = [
246
348
  LanguageModelFunctionCall(
247
- id=None,
248
- type=None,
349
+ id=f.id,
350
+ type="function",
249
351
  function=f,
250
352
  )
251
353
  for f in response.tool_calls or []
@@ -260,6 +362,50 @@ class LanguageModelAssistantMessage(LanguageModelMessage):
260
362
  tool_calls=tool_calls,
261
363
  )
262
364
 
365
+ @overload
366
+ def to_openai(
367
+ self, mode: Literal["completions"] = "completions"
368
+ ) -> ChatCompletionAssistantMessageParam: ...
369
+
370
+ @overload
371
+ def to_openai(
372
+ self, mode: Literal["responses"]
373
+ ) -> list[EasyInputMessageParam | ResponseFunctionToolCallParam]: ...
374
+
375
+ def to_openai(
376
+ self, mode: Literal["completions", "responses"] = "completions"
377
+ ) -> (
378
+ ChatCompletionAssistantMessageParam
379
+ | list[EasyInputMessageParam | ResponseFunctionToolCallParam]
380
+ ):
381
+ content = self.content or ""
382
+ if not isinstance(content, str):
383
+ raise ValueError("Content must be a string")
384
+
385
+ if mode == "completions":
386
+ return ChatCompletionAssistantMessageParam(
387
+ role="assistant",
388
+ content=content,
389
+ tool_calls=[
390
+ t.function.to_openai_param() for t in self.tool_calls or []
391
+ ],
392
+ )
393
+ elif mode == "responses":
394
+ """
395
+ Responses API does not support assistant messages with tool calls
396
+ """
397
+ res = []
398
+ if content != "":
399
+ res.append(EasyInputMessageParam(role="assistant", content=content))
400
+ if self.tool_calls:
401
+ res.extend(
402
+ [
403
+ t.function.to_openai_param(mode="responses")
404
+ for t in self.tool_calls
405
+ ]
406
+ )
407
+ return res
408
+
263
409
 
264
410
  # Equivalent to
265
411
  # from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
@@ -281,20 +427,50 @@ class LanguageModelToolMessage(LanguageModelMessage):
281
427
  def set_role(cls, value):
282
428
  return LanguageModelMessageRole.TOOL
283
429
 
430
+ @overload
431
+ def to_openai(
432
+ self, mode: Literal["completions"] = "completions"
433
+ ) -> ChatCompletionToolMessageParam: ...
434
+
435
+ @overload
436
+ def to_openai(self, mode: Literal["responses"]) -> FunctionCallOutput: ...
437
+
438
+ def to_openai(
439
+ self, mode: Literal["completions", "responses"] = "completions"
440
+ ) -> ChatCompletionToolMessageParam | FunctionCallOutput:
441
+ content = self.content or ""
442
+ if not isinstance(content, str):
443
+ raise ValueError("Content must be a string")
444
+
445
+ if mode == "completions":
446
+ return ChatCompletionToolMessageParam(
447
+ role="tool",
448
+ content=content,
449
+ tool_call_id=self.tool_call_id,
450
+ )
451
+ elif mode == "responses":
452
+ return FunctionCallOutput(
453
+ call_id=self.tool_call_id,
454
+ output=content,
455
+ type="function_call_output",
456
+ )
457
+
284
458
 
285
459
  # Equivalent implementation for list of
286
460
  # from openai.types.chat.chat_completion_tool_message_param import ChatCompletionToolMessageParam
287
461
  # with the addition of the builder
288
462
 
463
+ LanguageModelMessageOptions = (
464
+ LanguageModelMessage
465
+ | LanguageModelToolMessage
466
+ | LanguageModelAssistantMessage
467
+ | LanguageModelSystemMessage
468
+ | LanguageModelUserMessage
469
+ )
470
+
289
471
 
290
472
  class LanguageModelMessages(RootModel):
291
- root: list[
292
- LanguageModelMessage
293
- | LanguageModelToolMessage
294
- | LanguageModelAssistantMessage
295
- | LanguageModelSystemMessage
296
- | LanguageModelUserMessage
297
- ]
473
+ root: list[LanguageModelMessageOptions]
298
474
 
299
475
  @classmethod
300
476
  def load_messages_to_root(cls, data: list[dict] | dict) -> Self:
@@ -502,13 +678,32 @@ class LanguageModelToolDescription(BaseModel):
502
678
  def serialize_parameters(self, parameters: type[BaseModel]):
503
679
  return parameters.model_json_schema()
504
680
 
505
- def to_openai(self) -> ChatCompletionToolParam:
506
- return ChatCompletionToolParam(
507
- function=FunctionDefinition(
681
+ @overload
682
+ def to_openai(
683
+ self, mode: Literal["completions"] = "completions"
684
+ ) -> ChatCompletionToolParam: ...
685
+
686
+ @overload
687
+ def to_openai(self, mode: Literal["responses"]) -> FunctionToolParam: ...
688
+
689
+ def to_openai(
690
+ self, mode: Literal["completions", "responses"] = "completions"
691
+ ) -> ChatCompletionToolParam | FunctionToolParam:
692
+ if mode == "completions":
693
+ return ChatCompletionToolParam(
694
+ function=FunctionDefinition(
695
+ name=self.name,
696
+ description=self.description,
697
+ parameters=self.parameters.model_json_schema(),
698
+ strict=self.strict,
699
+ ),
700
+ type="function",
701
+ )
702
+ elif mode == "responses":
703
+ return FunctionToolParam(
704
+ type="function",
508
705
  name=self.name,
509
- description=self.description,
510
706
  parameters=self.parameters.model_json_schema(),
511
707
  strict=self.strict,
512
- ),
513
- type="function",
514
- )
708
+ description=self.description,
709
+ )
@@ -1,4 +1,18 @@
1
- from typing import Any, Awaitable, Protocol
1
+ from typing import Awaitable, Protocol, Sequence
2
+
3
+ from openai.types.chat import (
4
+ ChatCompletionMessageParam,
5
+ ChatCompletionToolChoiceOptionParam,
6
+ )
7
+ from openai.types.responses import (
8
+ ResponseIncludable,
9
+ ResponseInputItemParam,
10
+ ResponseOutputItem,
11
+ ResponseTextConfigParam,
12
+ ToolParam,
13
+ response_create_params,
14
+ )
15
+ from openai.types.shared_params import Metadata, Reasoning
2
16
 
3
17
  from unique_toolkit.content import ContentChunk
4
18
  from unique_toolkit.language_model import (
@@ -13,6 +27,10 @@ from unique_toolkit.language_model.constants import (
13
27
  DEFAULT_COMPLETE_TEMPERATURE,
14
28
  DEFAULT_COMPLETE_TIMEOUT,
15
29
  )
30
+ from unique_toolkit.language_model.schemas import (
31
+ LanguageModelMessageOptions,
32
+ ResponsesLanguageModelStreamResponse,
33
+ )
16
34
 
17
35
  # As soon as we have multiple, remember
18
36
  # https://pypi.org/project/typing-protocol-intersection/
@@ -35,7 +53,7 @@ class SupportsComplete(Protocol):
35
53
  model_name: LanguageModelName | str,
36
54
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
37
55
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
38
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
56
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
39
57
  ) -> Awaitable[LanguageModelResponse]: ...
40
58
 
41
59
 
@@ -45,19 +63,83 @@ class SupportCompleteWithReferences(Protocol):
45
63
  messages: LanguageModelMessages,
46
64
  model_name: LanguageModelName | str,
47
65
  content_chunks: list[ContentChunk] | None = None,
48
- debug_info: dict[str, Any] = {},
66
+ debug_info: dict | None = None,
49
67
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
50
68
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
51
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
69
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
70
+ start_text: str | None = None,
71
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
72
+ other_options: dict | None = None,
52
73
  ) -> LanguageModelStreamResponse: ...
53
74
 
54
- def complete_with_references_async(
75
+ async def complete_with_references_async(
55
76
  self,
56
- messages: LanguageModelMessages,
77
+ messages: LanguageModelMessages | list[ChatCompletionMessageParam],
57
78
  model_name: LanguageModelName | str,
58
79
  content_chunks: list[ContentChunk] | None = None,
59
- debug_info: dict[str, Any] = {},
80
+ debug_info: dict | None = None,
60
81
  temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
61
82
  timeout: int = DEFAULT_COMPLETE_TIMEOUT,
62
- tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
63
- ) -> Awaitable[LanguageModelStreamResponse]: ...
83
+ tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
84
+ tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
85
+ start_text: str | None = None,
86
+ other_options: dict | None = None,
87
+ ) -> LanguageModelStreamResponse: ...
88
+
89
+
90
+ class ResponsesSupportCompleteWithReferences(Protocol):
91
+ def complete_with_references(
92
+ self,
93
+ *,
94
+ model_name: LanguageModelName | str,
95
+ messages: str
96
+ | LanguageModelMessages
97
+ | Sequence[
98
+ ResponseInputItemParam
99
+ | LanguageModelMessageOptions
100
+ | ResponseOutputItem # History is automatically convertible
101
+ ],
102
+ content_chunks: list[ContentChunk] | None = None,
103
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
104
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
105
+ debug_info: dict | None = None,
106
+ start_text: str | None = None,
107
+ include: list[ResponseIncludable] | None = None,
108
+ instructions: str | None = None,
109
+ max_output_tokens: int | None = None,
110
+ metadata: Metadata | None = None,
111
+ parallel_tool_calls: bool | None = None,
112
+ text: ResponseTextConfigParam | None = None,
113
+ tool_choice: response_create_params.ToolChoice | None = None,
114
+ top_p: float | None = None,
115
+ reasoning: Reasoning | None = None,
116
+ other_options: dict | None = None,
117
+ ) -> ResponsesLanguageModelStreamResponse: ...
118
+
119
+ async def complete_with_references_async(
120
+ self,
121
+ *,
122
+ model_name: LanguageModelName | str,
123
+ messages: str
124
+ | LanguageModelMessages
125
+ | Sequence[
126
+ ResponseInputItemParam
127
+ | LanguageModelMessageOptions
128
+ | ResponseOutputItem # History is automatically convertible
129
+ ],
130
+ content_chunks: list[ContentChunk] | None = None,
131
+ tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
132
+ temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
133
+ debug_info: dict | None = None,
134
+ start_text: str | None = None,
135
+ include: list[ResponseIncludable] | None = None,
136
+ instructions: str | None = None,
137
+ max_output_tokens: int | None = None,
138
+ metadata: Metadata | None = None,
139
+ parallel_tool_calls: bool | None = None,
140
+ text: ResponseTextConfigParam | None = None,
141
+ tool_choice: response_create_params.ToolChoice | None = None,
142
+ top_p: float | None = None,
143
+ reasoning: Reasoning | None = None,
144
+ other_options: dict | None = None,
145
+ ) -> ResponsesLanguageModelStreamResponse: ...
@@ -0,0 +1,7 @@
1
+ from unique_toolkit.services.chat_service import ChatService
2
+ from unique_toolkit.services.knowledge_base import KnowledgeBaseService
3
+
4
+ __all__ = [
5
+ "ChatService",
6
+ "KnowledgeBaseService",
7
+ ]