unique_toolkit 0.7.9__py3-none-any.whl → 1.33.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/__init__.py +36 -3
- unique_toolkit/_common/api_calling/human_verification_manager.py +357 -0
- unique_toolkit/_common/base_model_type_attribute.py +303 -0
- unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
- unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
- unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
- unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
- unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
- unique_toolkit/_common/default_language_model.py +12 -0
- unique_toolkit/_common/docx_generator/__init__.py +7 -0
- unique_toolkit/_common/docx_generator/config.py +12 -0
- unique_toolkit/_common/docx_generator/schemas.py +80 -0
- unique_toolkit/_common/docx_generator/service.py +225 -0
- unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
- unique_toolkit/_common/endpoint_builder.py +368 -0
- unique_toolkit/_common/endpoint_requestor.py +480 -0
- unique_toolkit/_common/exception.py +24 -0
- unique_toolkit/_common/experimental/endpoint_builder.py +368 -0
- unique_toolkit/_common/experimental/endpoint_requestor.py +488 -0
- unique_toolkit/_common/feature_flags/schema.py +9 -0
- unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
- unique_toolkit/_common/pydantic_helpers.py +174 -0
- unique_toolkit/_common/referencing.py +53 -0
- unique_toolkit/_common/string_utilities.py +140 -0
- unique_toolkit/_common/tests/test_referencing.py +521 -0
- unique_toolkit/_common/tests/test_string_utilities.py +506 -0
- unique_toolkit/_common/token/image_token_counting.py +67 -0
- unique_toolkit/_common/token/token_counting.py +204 -0
- unique_toolkit/_common/utils/__init__.py +1 -0
- unique_toolkit/_common/utils/files.py +43 -0
- unique_toolkit/_common/utils/image/encode.py +25 -0
- unique_toolkit/_common/utils/jinja/helpers.py +10 -0
- unique_toolkit/_common/utils/jinja/render.py +18 -0
- unique_toolkit/_common/utils/jinja/schema.py +65 -0
- unique_toolkit/_common/utils/jinja/utils.py +80 -0
- unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
- unique_toolkit/_common/utils/structured_output/schema.py +5 -0
- unique_toolkit/_common/utils/write_configuration.py +51 -0
- unique_toolkit/_common/validators.py +101 -4
- unique_toolkit/agentic/__init__.py +1 -0
- unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
- unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
- unique_toolkit/agentic/evaluation/config.py +36 -0
- unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
- unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
- unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
- unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
- unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
- unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +112 -0
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +20 -16
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +32 -21
- unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
- unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
- unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
- unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
- unique_toolkit/agentic/history_manager/history_construction_with_contents.py +298 -0
- unique_toolkit/agentic/history_manager/history_manager.py +241 -0
- unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
- unique_toolkit/agentic/history_manager/utils.py +96 -0
- unique_toolkit/agentic/message_log_manager/__init__.py +5 -0
- unique_toolkit/agentic/message_log_manager/service.py +93 -0
- unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
- unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
- unique_toolkit/agentic/responses_api/__init__.py +19 -0
- unique_toolkit/agentic/responses_api/postprocessors/code_display.py +71 -0
- unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +297 -0
- unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
- unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
- unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
- unique_toolkit/agentic/tools/__init__.py +1 -0
- unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
- unique_toolkit/agentic/tools/a2a/config.py +17 -0
- unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
- unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
- unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
- unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
- unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
- unique_toolkit/agentic/tools/a2a/manager.py +55 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +240 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +84 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/config.py +78 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/display.py +264 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +421 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +2103 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
- unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
- unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
- unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
- unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
- unique_toolkit/agentic/tools/a2a/tool/config.py +158 -0
- unique_toolkit/agentic/tools/a2a/tool/service.py +393 -0
- unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
- unique_toolkit/agentic/tools/config.py +128 -0
- unique_toolkit/agentic/tools/factory.py +44 -0
- unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
- unique_toolkit/agentic/tools/mcp/manager.py +71 -0
- unique_toolkit/agentic/tools/mcp/models.py +28 -0
- unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
- unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
- unique_toolkit/agentic/tools/openai_builtin/base.py +46 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +88 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +250 -0
- unique_toolkit/agentic/tools/openai_builtin/manager.py +79 -0
- unique_toolkit/agentic/tools/schemas.py +145 -0
- unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
- unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
- unique_toolkit/agentic/tools/tool.py +187 -0
- unique_toolkit/agentic/tools/tool_manager.py +492 -0
- unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
- unique_toolkit/agentic/tools/utils/__init__.py +19 -0
- unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
- unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
- unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
- unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
- unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
- unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
- unique_toolkit/app/__init__.py +9 -0
- unique_toolkit/app/dev_util.py +180 -0
- unique_toolkit/app/fast_api_factory.py +131 -0
- unique_toolkit/app/init_sdk.py +32 -1
- unique_toolkit/app/schemas.py +206 -31
- unique_toolkit/app/unique_settings.py +367 -0
- unique_toolkit/app/webhook.py +77 -0
- unique_toolkit/chat/__init__.py +8 -1
- unique_toolkit/chat/deprecated/service.py +232 -0
- unique_toolkit/chat/functions.py +648 -78
- unique_toolkit/chat/rendering.py +34 -0
- unique_toolkit/chat/responses_api.py +461 -0
- unique_toolkit/chat/schemas.py +134 -2
- unique_toolkit/chat/service.py +115 -767
- unique_toolkit/content/functions.py +353 -8
- unique_toolkit/content/schemas.py +128 -15
- unique_toolkit/content/service.py +321 -45
- unique_toolkit/content/smart_rules.py +301 -0
- unique_toolkit/content/utils.py +10 -3
- unique_toolkit/data_extraction/README.md +96 -0
- unique_toolkit/data_extraction/__init__.py +11 -0
- unique_toolkit/data_extraction/augmented/__init__.py +5 -0
- unique_toolkit/data_extraction/augmented/service.py +93 -0
- unique_toolkit/data_extraction/base.py +25 -0
- unique_toolkit/data_extraction/basic/__init__.py +11 -0
- unique_toolkit/data_extraction/basic/config.py +18 -0
- unique_toolkit/data_extraction/basic/prompt.py +13 -0
- unique_toolkit/data_extraction/basic/service.py +55 -0
- unique_toolkit/embedding/service.py +103 -12
- unique_toolkit/framework_utilities/__init__.py +1 -0
- unique_toolkit/framework_utilities/langchain/__init__.py +10 -0
- unique_toolkit/framework_utilities/langchain/client.py +71 -0
- unique_toolkit/framework_utilities/langchain/history.py +19 -0
- unique_toolkit/framework_utilities/openai/__init__.py +6 -0
- unique_toolkit/framework_utilities/openai/client.py +84 -0
- unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
- unique_toolkit/framework_utilities/utils.py +23 -0
- unique_toolkit/language_model/__init__.py +3 -0
- unique_toolkit/language_model/_responses_api_utils.py +93 -0
- unique_toolkit/language_model/builder.py +27 -11
- unique_toolkit/language_model/default_language_model.py +3 -0
- unique_toolkit/language_model/functions.py +345 -43
- unique_toolkit/language_model/infos.py +1288 -46
- unique_toolkit/language_model/reference.py +242 -0
- unique_toolkit/language_model/schemas.py +481 -49
- unique_toolkit/language_model/service.py +229 -28
- unique_toolkit/protocols/support.py +145 -0
- unique_toolkit/services/__init__.py +7 -0
- unique_toolkit/services/chat_service.py +1631 -0
- unique_toolkit/services/knowledge_base.py +1094 -0
- unique_toolkit/short_term_memory/service.py +178 -41
- unique_toolkit/smart_rules/__init__.py +0 -0
- unique_toolkit/smart_rules/compile.py +56 -0
- unique_toolkit/test_utilities/events.py +197 -0
- unique_toolkit-1.33.3.dist-info/METADATA +1145 -0
- unique_toolkit-1.33.3.dist-info/RECORD +205 -0
- unique_toolkit/evaluators/__init__.py +0 -1
- unique_toolkit/evaluators/config.py +0 -35
- unique_toolkit/evaluators/constants.py +0 -1
- unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
- unique_toolkit/evaluators/context_relevancy/service.py +0 -53
- unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
- unique_toolkit/evaluators/hallucination/constants.py +0 -41
- unique_toolkit-0.7.9.dist-info/METADATA +0 -413
- unique_toolkit-0.7.9.dist-info/RECORD +0 -64
- /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
- {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/WHEEL +0 -0
|
@@ -1,11 +1,13 @@
|
|
|
1
1
|
import logging
|
|
2
|
-
from typing import Optional, Type
|
|
2
|
+
from typing import Any, Optional, Type, overload
|
|
3
3
|
|
|
4
4
|
from pydantic import BaseModel
|
|
5
5
|
from typing_extensions import deprecated
|
|
6
6
|
|
|
7
7
|
from unique_toolkit._common.validate_required_values import validate_required_values
|
|
8
8
|
from unique_toolkit.app.schemas import BaseEvent, ChatEvent, Event
|
|
9
|
+
from unique_toolkit.app.unique_settings import UniqueSettings
|
|
10
|
+
from unique_toolkit.content.schemas import ContentChunk
|
|
9
11
|
from unique_toolkit.language_model.constants import (
|
|
10
12
|
DEFAULT_COMPLETE_TEMPERATURE,
|
|
11
13
|
DEFAULT_COMPLETE_TIMEOUT,
|
|
@@ -14,12 +16,16 @@ from unique_toolkit.language_model.constants import (
|
|
|
14
16
|
from unique_toolkit.language_model.functions import (
|
|
15
17
|
complete,
|
|
16
18
|
complete_async,
|
|
19
|
+
complete_with_references,
|
|
20
|
+
complete_with_references_async,
|
|
17
21
|
)
|
|
18
22
|
from unique_toolkit.language_model.infos import LanguageModelName
|
|
19
23
|
from unique_toolkit.language_model.schemas import (
|
|
20
24
|
LanguageModelMessages,
|
|
21
25
|
LanguageModelResponse,
|
|
26
|
+
LanguageModelStreamResponse,
|
|
22
27
|
LanguageModelTool,
|
|
28
|
+
LanguageModelToolDescription,
|
|
23
29
|
)
|
|
24
30
|
|
|
25
31
|
logger = logging.getLogger(f"toolkit.{DOMAIN_NAME}.{__name__}")
|
|
@@ -28,34 +34,73 @@ logger = logging.getLogger(f"toolkit.{DOMAIN_NAME}.{__name__}")
|
|
|
28
34
|
class LanguageModelService:
|
|
29
35
|
"""
|
|
30
36
|
Provides methods to interact with the Language Model by generating responses.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
@deprecated(
|
|
40
|
+
"Use __init__ with company_id and user_id instead or use the classmethod `from_event`"
|
|
41
|
+
)
|
|
42
|
+
@overload
|
|
43
|
+
def __init__(self, event: Event | ChatEvent | BaseEvent): ...
|
|
31
44
|
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
45
|
+
"""
|
|
46
|
+
Initialize the LanguageModelService with an event (deprecated)
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
@overload
|
|
50
|
+
def __init__(self, *, company_id: str, user_id: str): ...
|
|
51
|
+
|
|
52
|
+
"""
|
|
53
|
+
Initialize the LanguageModelService with a company_id and user_id.
|
|
37
54
|
"""
|
|
38
55
|
|
|
39
56
|
def __init__(
|
|
40
57
|
self,
|
|
41
|
-
event: Event | BaseEvent | None = None,
|
|
58
|
+
event: Event | ChatEvent | BaseEvent | None = None,
|
|
42
59
|
company_id: str | None = None,
|
|
43
60
|
user_id: str | None = None,
|
|
44
|
-
|
|
45
|
-
assistant_id: str | None = None,
|
|
61
|
+
**kwargs: dict[str, Any], # only here for backward compatibility
|
|
46
62
|
):
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
self.
|
|
55
|
-
self.
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
63
|
+
if isinstance(event, (ChatEvent, Event)):
|
|
64
|
+
self._event = event
|
|
65
|
+
self._chat_id = event.payload.chat_id
|
|
66
|
+
self._assistant_id = event.payload.assistant_id
|
|
67
|
+
self._company_id = event.company_id
|
|
68
|
+
self._user_id = event.user_id
|
|
69
|
+
elif isinstance(event, BaseEvent):
|
|
70
|
+
self._event = event
|
|
71
|
+
self._company_id = event.company_id
|
|
72
|
+
self._user_id = event.user_id
|
|
73
|
+
self._chat_id = None
|
|
74
|
+
self._assistant_id = None
|
|
75
|
+
else:
|
|
76
|
+
[company_id, user_id] = validate_required_values([company_id, user_id])
|
|
77
|
+
self._event = None
|
|
78
|
+
self._company_id: str = company_id
|
|
79
|
+
self._user_id: str = user_id
|
|
80
|
+
self._chat_id: str | None = None
|
|
81
|
+
self._assistant_id: str | None = None
|
|
82
|
+
|
|
83
|
+
@classmethod
|
|
84
|
+
def from_event(cls, event: BaseEvent):
|
|
85
|
+
"""
|
|
86
|
+
Initialize the LanguageModelService with an event.
|
|
87
|
+
"""
|
|
88
|
+
return cls(company_id=event.company_id, user_id=event.user_id)
|
|
89
|
+
|
|
90
|
+
@classmethod
|
|
91
|
+
def from_settings(cls, settings: UniqueSettings | str | None = None):
|
|
92
|
+
"""
|
|
93
|
+
Initialize the LanguageModelService with a settings object.
|
|
94
|
+
"""
|
|
95
|
+
if settings is None:
|
|
96
|
+
settings = UniqueSettings.from_env_auto_with_sdk_init()
|
|
97
|
+
elif isinstance(settings, str):
|
|
98
|
+
settings = UniqueSettings.from_env_auto_with_sdk_init(filename=settings)
|
|
99
|
+
|
|
100
|
+
return cls(
|
|
101
|
+
company_id=settings.auth.company_id.get_secret_value(),
|
|
102
|
+
user_id=settings.auth.user_id.get_secret_value(),
|
|
103
|
+
)
|
|
59
104
|
|
|
60
105
|
@property
|
|
61
106
|
@deprecated(
|
|
@@ -70,13 +115,117 @@ class LanguageModelService:
|
|
|
70
115
|
"""
|
|
71
116
|
return self._event
|
|
72
117
|
|
|
118
|
+
@property
|
|
119
|
+
@deprecated(
|
|
120
|
+
"The company_id property is deprecated and will be removed in a future version."
|
|
121
|
+
)
|
|
122
|
+
def company_id(self) -> str | None:
|
|
123
|
+
"""
|
|
124
|
+
Get the company identifier (deprecated).
|
|
125
|
+
|
|
126
|
+
Returns:
|
|
127
|
+
str | None: The company identifier.
|
|
128
|
+
"""
|
|
129
|
+
return self._company_id
|
|
130
|
+
|
|
131
|
+
@company_id.setter
|
|
132
|
+
@deprecated(
|
|
133
|
+
"The company_id setter is deprecated and will be removed in a future version."
|
|
134
|
+
)
|
|
135
|
+
def company_id(self, value: str) -> None:
|
|
136
|
+
"""
|
|
137
|
+
Set the company identifier (deprecated).
|
|
138
|
+
|
|
139
|
+
Args:
|
|
140
|
+
value (str | None): The company identifier.
|
|
141
|
+
"""
|
|
142
|
+
self._company_id = value
|
|
143
|
+
|
|
144
|
+
@property
|
|
145
|
+
@deprecated(
|
|
146
|
+
"The user_id property is deprecated and will be removed in a future version."
|
|
147
|
+
)
|
|
148
|
+
def user_id(self) -> str | None:
|
|
149
|
+
"""
|
|
150
|
+
Get the user identifier (deprecated).
|
|
151
|
+
|
|
152
|
+
Returns:
|
|
153
|
+
str | None: The user identifier.
|
|
154
|
+
"""
|
|
155
|
+
return self._user_id
|
|
156
|
+
|
|
157
|
+
@user_id.setter
|
|
158
|
+
@deprecated(
|
|
159
|
+
"The user_id setter is deprecated and will be removed in a future version."
|
|
160
|
+
)
|
|
161
|
+
def user_id(self, value: str) -> None:
|
|
162
|
+
"""
|
|
163
|
+
Set the user identifier (deprecated).
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
value (str | None): The user identifier.
|
|
167
|
+
"""
|
|
168
|
+
self._user_id = value
|
|
169
|
+
|
|
170
|
+
@property
|
|
171
|
+
@deprecated(
|
|
172
|
+
"The chat_id property is deprecated and will be removed in a future version."
|
|
173
|
+
)
|
|
174
|
+
def chat_id(self) -> str | None:
|
|
175
|
+
"""
|
|
176
|
+
Get the chat identifier (deprecated).
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
str | None: The chat identifier.
|
|
180
|
+
"""
|
|
181
|
+
return self._chat_id
|
|
182
|
+
|
|
183
|
+
@chat_id.setter
|
|
184
|
+
@deprecated(
|
|
185
|
+
"The chat_id setter is deprecated and will be removed in a future version."
|
|
186
|
+
)
|
|
187
|
+
def chat_id(self, value: str | None) -> None:
|
|
188
|
+
"""
|
|
189
|
+
Set the chat identifier (deprecated).
|
|
190
|
+
|
|
191
|
+
Args:
|
|
192
|
+
value (str | None): The chat identifier.
|
|
193
|
+
"""
|
|
194
|
+
self._chat_id = value
|
|
195
|
+
|
|
196
|
+
@property
|
|
197
|
+
@deprecated(
|
|
198
|
+
"The assistant_id property is deprecated and will be removed in a future version."
|
|
199
|
+
)
|
|
200
|
+
def assistant_id(self) -> str | None:
|
|
201
|
+
"""
|
|
202
|
+
Get the assistant identifier (deprecated).
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
str | None: The assistant identifier.
|
|
206
|
+
"""
|
|
207
|
+
return self._assistant_id
|
|
208
|
+
|
|
209
|
+
@assistant_id.setter
|
|
210
|
+
@deprecated(
|
|
211
|
+
"The assistant_id setter is deprecated and will be removed in a future version."
|
|
212
|
+
)
|
|
213
|
+
def assistant_id(self, value: str | None) -> None:
|
|
214
|
+
"""
|
|
215
|
+
Set the assistant identifier (deprecated).
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
value (str | None): The assistant identifier.
|
|
219
|
+
"""
|
|
220
|
+
self._assistant_id = value
|
|
221
|
+
|
|
73
222
|
def complete(
|
|
74
223
|
self,
|
|
75
224
|
messages: LanguageModelMessages,
|
|
76
225
|
model_name: LanguageModelName | str,
|
|
77
226
|
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
78
227
|
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
79
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
|
228
|
+
tools: Optional[list[LanguageModelTool | LanguageModelToolDescription]] = None,
|
|
80
229
|
structured_output_model: Optional[Type[BaseModel]] = None,
|
|
81
230
|
structured_output_enforce_schema: bool = False,
|
|
82
231
|
other_options: Optional[dict] = None,
|
|
@@ -84,10 +233,10 @@ class LanguageModelService:
|
|
|
84
233
|
"""
|
|
85
234
|
Calls the completion endpoint synchronously without streaming the response.
|
|
86
235
|
"""
|
|
87
|
-
[company_id] = validate_required_values([self.company_id])
|
|
88
236
|
|
|
89
237
|
return complete(
|
|
90
|
-
company_id=
|
|
238
|
+
company_id=self._company_id,
|
|
239
|
+
user_id=self._user_id,
|
|
91
240
|
messages=messages,
|
|
92
241
|
model_name=model_name,
|
|
93
242
|
temperature=temperature,
|
|
@@ -104,7 +253,7 @@ class LanguageModelService:
|
|
|
104
253
|
model_name: LanguageModelName | str,
|
|
105
254
|
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
106
255
|
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
107
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
|
256
|
+
tools: Optional[list[LanguageModelTool | LanguageModelToolDescription]] = None,
|
|
108
257
|
structured_output_model: Optional[Type[BaseModel]] = None,
|
|
109
258
|
structured_output_enforce_schema: bool = False,
|
|
110
259
|
other_options: Optional[dict] = None,
|
|
@@ -112,10 +261,10 @@ class LanguageModelService:
|
|
|
112
261
|
"""
|
|
113
262
|
Calls the completion endpoint asynchronously without streaming the response.
|
|
114
263
|
"""
|
|
115
|
-
[company_id] = validate_required_values([self.company_id])
|
|
116
264
|
|
|
117
265
|
return await complete_async(
|
|
118
|
-
company_id=
|
|
266
|
+
company_id=self._company_id,
|
|
267
|
+
user_id=self._user_id,
|
|
119
268
|
messages=messages,
|
|
120
269
|
model_name=model_name,
|
|
121
270
|
temperature=temperature,
|
|
@@ -133,9 +282,10 @@ class LanguageModelService:
|
|
|
133
282
|
company_id: str,
|
|
134
283
|
messages: LanguageModelMessages,
|
|
135
284
|
model_name: LanguageModelName | str,
|
|
285
|
+
user_id: str | None = None,
|
|
136
286
|
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
137
287
|
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
138
|
-
tools: Optional[list[LanguageModelTool]] = None,
|
|
288
|
+
tools: Optional[list[LanguageModelTool | LanguageModelToolDescription]] = None,
|
|
139
289
|
structured_output_model: Optional[Type[BaseModel]] = None,
|
|
140
290
|
structured_output_enforce_schema: bool = False,
|
|
141
291
|
other_options: Optional[dict] = None,
|
|
@@ -146,6 +296,7 @@ class LanguageModelService:
|
|
|
146
296
|
|
|
147
297
|
return await complete_async(
|
|
148
298
|
company_id=company_id,
|
|
299
|
+
user_id=user_id,
|
|
149
300
|
messages=messages,
|
|
150
301
|
model_name=model_name,
|
|
151
302
|
temperature=temperature,
|
|
@@ -155,3 +306,53 @@ class LanguageModelService:
|
|
|
155
306
|
structured_output_model=structured_output_model,
|
|
156
307
|
structured_output_enforce_schema=structured_output_enforce_schema,
|
|
157
308
|
)
|
|
309
|
+
|
|
310
|
+
def complete_with_references(
|
|
311
|
+
self,
|
|
312
|
+
messages: LanguageModelMessages,
|
|
313
|
+
model_name: LanguageModelName | str,
|
|
314
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
315
|
+
debug_info: dict = {},
|
|
316
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
317
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
318
|
+
tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
319
|
+
start_text: str | None = None,
|
|
320
|
+
other_options: dict[str, Any] | None = None,
|
|
321
|
+
) -> LanguageModelStreamResponse:
|
|
322
|
+
return complete_with_references(
|
|
323
|
+
company_id=self._company_id,
|
|
324
|
+
user_id=self._user_id,
|
|
325
|
+
messages=messages,
|
|
326
|
+
model_name=model_name,
|
|
327
|
+
content_chunks=content_chunks,
|
|
328
|
+
temperature=temperature,
|
|
329
|
+
timeout=timeout,
|
|
330
|
+
other_options=other_options,
|
|
331
|
+
tools=tools,
|
|
332
|
+
start_text=start_text,
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
async def complete_with_references_async(
|
|
336
|
+
self,
|
|
337
|
+
messages: LanguageModelMessages,
|
|
338
|
+
model_name: LanguageModelName | str,
|
|
339
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
340
|
+
debug_info: dict = {},
|
|
341
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
342
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
343
|
+
tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
344
|
+
start_text: str | None = None,
|
|
345
|
+
other_options: dict[str, Any] | None = None,
|
|
346
|
+
) -> LanguageModelStreamResponse:
|
|
347
|
+
return await complete_with_references_async(
|
|
348
|
+
company_id=self._company_id,
|
|
349
|
+
user_id=self._user_id,
|
|
350
|
+
messages=messages,
|
|
351
|
+
model_name=model_name,
|
|
352
|
+
content_chunks=content_chunks,
|
|
353
|
+
temperature=temperature,
|
|
354
|
+
timeout=timeout,
|
|
355
|
+
other_options=other_options,
|
|
356
|
+
tools=tools,
|
|
357
|
+
start_text=start_text,
|
|
358
|
+
)
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
from typing import Awaitable, Protocol, Sequence
|
|
2
|
+
|
|
3
|
+
from openai.types.chat import (
|
|
4
|
+
ChatCompletionMessageParam,
|
|
5
|
+
ChatCompletionToolChoiceOptionParam,
|
|
6
|
+
)
|
|
7
|
+
from openai.types.responses import (
|
|
8
|
+
ResponseIncludable,
|
|
9
|
+
ResponseInputItemParam,
|
|
10
|
+
ResponseOutputItem,
|
|
11
|
+
ResponseTextConfigParam,
|
|
12
|
+
ToolParam,
|
|
13
|
+
response_create_params,
|
|
14
|
+
)
|
|
15
|
+
from openai.types.shared_params import Metadata, Reasoning
|
|
16
|
+
|
|
17
|
+
from unique_toolkit.content import ContentChunk
|
|
18
|
+
from unique_toolkit.language_model import (
|
|
19
|
+
LanguageModelMessages,
|
|
20
|
+
LanguageModelName,
|
|
21
|
+
LanguageModelResponse,
|
|
22
|
+
LanguageModelStreamResponse,
|
|
23
|
+
LanguageModelTool,
|
|
24
|
+
LanguageModelToolDescription,
|
|
25
|
+
)
|
|
26
|
+
from unique_toolkit.language_model.constants import (
|
|
27
|
+
DEFAULT_COMPLETE_TEMPERATURE,
|
|
28
|
+
DEFAULT_COMPLETE_TIMEOUT,
|
|
29
|
+
)
|
|
30
|
+
from unique_toolkit.language_model.schemas import (
|
|
31
|
+
LanguageModelMessageOptions,
|
|
32
|
+
ResponsesLanguageModelStreamResponse,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
# As soon as we have multiple, remember
|
|
36
|
+
# https://pypi.org/project/typing-protocol-intersection/
|
|
37
|
+
# to generate combinations of protocols without inheritance
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class SupportsComplete(Protocol):
|
|
41
|
+
def complete(
|
|
42
|
+
self,
|
|
43
|
+
messages: LanguageModelMessages,
|
|
44
|
+
model_name: LanguageModelName | str,
|
|
45
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
46
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
47
|
+
tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
48
|
+
) -> LanguageModelResponse: ...
|
|
49
|
+
|
|
50
|
+
async def complete_async(
|
|
51
|
+
self,
|
|
52
|
+
messages: LanguageModelMessages,
|
|
53
|
+
model_name: LanguageModelName | str,
|
|
54
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
55
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
56
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
57
|
+
) -> Awaitable[LanguageModelResponse]: ...
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class SupportCompleteWithReferences(Protocol):
|
|
61
|
+
def complete_with_references(
|
|
62
|
+
self,
|
|
63
|
+
messages: LanguageModelMessages,
|
|
64
|
+
model_name: LanguageModelName | str,
|
|
65
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
66
|
+
debug_info: dict | None = None,
|
|
67
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
68
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
69
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
70
|
+
start_text: str | None = None,
|
|
71
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
|
|
72
|
+
other_options: dict | None = None,
|
|
73
|
+
) -> LanguageModelStreamResponse: ...
|
|
74
|
+
|
|
75
|
+
async def complete_with_references_async(
|
|
76
|
+
self,
|
|
77
|
+
messages: LanguageModelMessages | list[ChatCompletionMessageParam],
|
|
78
|
+
model_name: LanguageModelName | str,
|
|
79
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
80
|
+
debug_info: dict | None = None,
|
|
81
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
82
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
83
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
84
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
|
|
85
|
+
start_text: str | None = None,
|
|
86
|
+
other_options: dict | None = None,
|
|
87
|
+
) -> LanguageModelStreamResponse: ...
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class ResponsesSupportCompleteWithReferences(Protocol):
|
|
91
|
+
def complete_with_references(
|
|
92
|
+
self,
|
|
93
|
+
*,
|
|
94
|
+
model_name: LanguageModelName | str,
|
|
95
|
+
messages: str
|
|
96
|
+
| LanguageModelMessages
|
|
97
|
+
| Sequence[
|
|
98
|
+
ResponseInputItemParam
|
|
99
|
+
| LanguageModelMessageOptions
|
|
100
|
+
| ResponseOutputItem # History is automatically convertible
|
|
101
|
+
],
|
|
102
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
103
|
+
tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
|
|
104
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
105
|
+
debug_info: dict | None = None,
|
|
106
|
+
start_text: str | None = None,
|
|
107
|
+
include: list[ResponseIncludable] | None = None,
|
|
108
|
+
instructions: str | None = None,
|
|
109
|
+
max_output_tokens: int | None = None,
|
|
110
|
+
metadata: Metadata | None = None,
|
|
111
|
+
parallel_tool_calls: bool | None = None,
|
|
112
|
+
text: ResponseTextConfigParam | None = None,
|
|
113
|
+
tool_choice: response_create_params.ToolChoice | None = None,
|
|
114
|
+
top_p: float | None = None,
|
|
115
|
+
reasoning: Reasoning | None = None,
|
|
116
|
+
other_options: dict | None = None,
|
|
117
|
+
) -> ResponsesLanguageModelStreamResponse: ...
|
|
118
|
+
|
|
119
|
+
async def complete_with_references_async(
|
|
120
|
+
self,
|
|
121
|
+
*,
|
|
122
|
+
model_name: LanguageModelName | str,
|
|
123
|
+
messages: str
|
|
124
|
+
| LanguageModelMessages
|
|
125
|
+
| Sequence[
|
|
126
|
+
ResponseInputItemParam
|
|
127
|
+
| LanguageModelMessageOptions
|
|
128
|
+
| ResponseOutputItem # History is automatically convertible
|
|
129
|
+
],
|
|
130
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
131
|
+
tools: Sequence[LanguageModelToolDescription | ToolParam] | None = None,
|
|
132
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
133
|
+
debug_info: dict | None = None,
|
|
134
|
+
start_text: str | None = None,
|
|
135
|
+
include: list[ResponseIncludable] | None = None,
|
|
136
|
+
instructions: str | None = None,
|
|
137
|
+
max_output_tokens: int | None = None,
|
|
138
|
+
metadata: Metadata | None = None,
|
|
139
|
+
parallel_tool_calls: bool | None = None,
|
|
140
|
+
text: ResponseTextConfigParam | None = None,
|
|
141
|
+
tool_choice: response_create_params.ToolChoice | None = None,
|
|
142
|
+
top_p: float | None = None,
|
|
143
|
+
reasoning: Reasoning | None = None,
|
|
144
|
+
other_options: dict | None = None,
|
|
145
|
+
) -> ResponsesLanguageModelStreamResponse: ...
|