unique_toolkit 0.7.9__py3-none-any.whl → 1.33.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/__init__.py +36 -3
- unique_toolkit/_common/api_calling/human_verification_manager.py +357 -0
- unique_toolkit/_common/base_model_type_attribute.py +303 -0
- unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
- unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
- unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
- unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
- unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
- unique_toolkit/_common/default_language_model.py +12 -0
- unique_toolkit/_common/docx_generator/__init__.py +7 -0
- unique_toolkit/_common/docx_generator/config.py +12 -0
- unique_toolkit/_common/docx_generator/schemas.py +80 -0
- unique_toolkit/_common/docx_generator/service.py +225 -0
- unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
- unique_toolkit/_common/endpoint_builder.py +368 -0
- unique_toolkit/_common/endpoint_requestor.py +480 -0
- unique_toolkit/_common/exception.py +24 -0
- unique_toolkit/_common/experimental/endpoint_builder.py +368 -0
- unique_toolkit/_common/experimental/endpoint_requestor.py +488 -0
- unique_toolkit/_common/feature_flags/schema.py +9 -0
- unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
- unique_toolkit/_common/pydantic_helpers.py +174 -0
- unique_toolkit/_common/referencing.py +53 -0
- unique_toolkit/_common/string_utilities.py +140 -0
- unique_toolkit/_common/tests/test_referencing.py +521 -0
- unique_toolkit/_common/tests/test_string_utilities.py +506 -0
- unique_toolkit/_common/token/image_token_counting.py +67 -0
- unique_toolkit/_common/token/token_counting.py +204 -0
- unique_toolkit/_common/utils/__init__.py +1 -0
- unique_toolkit/_common/utils/files.py +43 -0
- unique_toolkit/_common/utils/image/encode.py +25 -0
- unique_toolkit/_common/utils/jinja/helpers.py +10 -0
- unique_toolkit/_common/utils/jinja/render.py +18 -0
- unique_toolkit/_common/utils/jinja/schema.py +65 -0
- unique_toolkit/_common/utils/jinja/utils.py +80 -0
- unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
- unique_toolkit/_common/utils/structured_output/schema.py +5 -0
- unique_toolkit/_common/utils/write_configuration.py +51 -0
- unique_toolkit/_common/validators.py +101 -4
- unique_toolkit/agentic/__init__.py +1 -0
- unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
- unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
- unique_toolkit/agentic/evaluation/config.py +36 -0
- unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
- unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
- unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
- unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
- unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
- unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +112 -0
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +20 -16
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +32 -21
- unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
- unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
- unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
- unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
- unique_toolkit/agentic/history_manager/history_construction_with_contents.py +298 -0
- unique_toolkit/agentic/history_manager/history_manager.py +241 -0
- unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
- unique_toolkit/agentic/history_manager/utils.py +96 -0
- unique_toolkit/agentic/message_log_manager/__init__.py +5 -0
- unique_toolkit/agentic/message_log_manager/service.py +93 -0
- unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
- unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
- unique_toolkit/agentic/responses_api/__init__.py +19 -0
- unique_toolkit/agentic/responses_api/postprocessors/code_display.py +71 -0
- unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +297 -0
- unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
- unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
- unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
- unique_toolkit/agentic/tools/__init__.py +1 -0
- unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
- unique_toolkit/agentic/tools/a2a/config.py +17 -0
- unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
- unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
- unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
- unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
- unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
- unique_toolkit/agentic/tools/a2a/manager.py +55 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +240 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +84 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/config.py +78 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/display.py +264 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +421 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +2103 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
- unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
- unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
- unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
- unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
- unique_toolkit/agentic/tools/a2a/tool/config.py +158 -0
- unique_toolkit/agentic/tools/a2a/tool/service.py +393 -0
- unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
- unique_toolkit/agentic/tools/config.py +128 -0
- unique_toolkit/agentic/tools/factory.py +44 -0
- unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
- unique_toolkit/agentic/tools/mcp/manager.py +71 -0
- unique_toolkit/agentic/tools/mcp/models.py +28 -0
- unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
- unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
- unique_toolkit/agentic/tools/openai_builtin/base.py +46 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +88 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +250 -0
- unique_toolkit/agentic/tools/openai_builtin/manager.py +79 -0
- unique_toolkit/agentic/tools/schemas.py +145 -0
- unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
- unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
- unique_toolkit/agentic/tools/tool.py +187 -0
- unique_toolkit/agentic/tools/tool_manager.py +492 -0
- unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
- unique_toolkit/agentic/tools/utils/__init__.py +19 -0
- unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
- unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
- unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
- unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
- unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
- unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
- unique_toolkit/app/__init__.py +9 -0
- unique_toolkit/app/dev_util.py +180 -0
- unique_toolkit/app/fast_api_factory.py +131 -0
- unique_toolkit/app/init_sdk.py +32 -1
- unique_toolkit/app/schemas.py +206 -31
- unique_toolkit/app/unique_settings.py +367 -0
- unique_toolkit/app/webhook.py +77 -0
- unique_toolkit/chat/__init__.py +8 -1
- unique_toolkit/chat/deprecated/service.py +232 -0
- unique_toolkit/chat/functions.py +648 -78
- unique_toolkit/chat/rendering.py +34 -0
- unique_toolkit/chat/responses_api.py +461 -0
- unique_toolkit/chat/schemas.py +134 -2
- unique_toolkit/chat/service.py +115 -767
- unique_toolkit/content/functions.py +353 -8
- unique_toolkit/content/schemas.py +128 -15
- unique_toolkit/content/service.py +321 -45
- unique_toolkit/content/smart_rules.py +301 -0
- unique_toolkit/content/utils.py +10 -3
- unique_toolkit/data_extraction/README.md +96 -0
- unique_toolkit/data_extraction/__init__.py +11 -0
- unique_toolkit/data_extraction/augmented/__init__.py +5 -0
- unique_toolkit/data_extraction/augmented/service.py +93 -0
- unique_toolkit/data_extraction/base.py +25 -0
- unique_toolkit/data_extraction/basic/__init__.py +11 -0
- unique_toolkit/data_extraction/basic/config.py +18 -0
- unique_toolkit/data_extraction/basic/prompt.py +13 -0
- unique_toolkit/data_extraction/basic/service.py +55 -0
- unique_toolkit/embedding/service.py +103 -12
- unique_toolkit/framework_utilities/__init__.py +1 -0
- unique_toolkit/framework_utilities/langchain/__init__.py +10 -0
- unique_toolkit/framework_utilities/langchain/client.py +71 -0
- unique_toolkit/framework_utilities/langchain/history.py +19 -0
- unique_toolkit/framework_utilities/openai/__init__.py +6 -0
- unique_toolkit/framework_utilities/openai/client.py +84 -0
- unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
- unique_toolkit/framework_utilities/utils.py +23 -0
- unique_toolkit/language_model/__init__.py +3 -0
- unique_toolkit/language_model/_responses_api_utils.py +93 -0
- unique_toolkit/language_model/builder.py +27 -11
- unique_toolkit/language_model/default_language_model.py +3 -0
- unique_toolkit/language_model/functions.py +345 -43
- unique_toolkit/language_model/infos.py +1288 -46
- unique_toolkit/language_model/reference.py +242 -0
- unique_toolkit/language_model/schemas.py +481 -49
- unique_toolkit/language_model/service.py +229 -28
- unique_toolkit/protocols/support.py +145 -0
- unique_toolkit/services/__init__.py +7 -0
- unique_toolkit/services/chat_service.py +1631 -0
- unique_toolkit/services/knowledge_base.py +1094 -0
- unique_toolkit/short_term_memory/service.py +178 -41
- unique_toolkit/smart_rules/__init__.py +0 -0
- unique_toolkit/smart_rules/compile.py +56 -0
- unique_toolkit/test_utilities/events.py +197 -0
- unique_toolkit-1.33.3.dist-info/METADATA +1145 -0
- unique_toolkit-1.33.3.dist-info/RECORD +205 -0
- unique_toolkit/evaluators/__init__.py +0 -1
- unique_toolkit/evaluators/config.py +0 -35
- unique_toolkit/evaluators/constants.py +0 -1
- unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
- unique_toolkit/evaluators/context_relevancy/service.py +0 -53
- unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
- unique_toolkit/evaluators/hallucination/constants.py +0 -41
- unique_toolkit-0.7.9.dist-info/METADATA +0 -413
- unique_toolkit-0.7.9.dist-info/RECORD +0 -64
- /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
- {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/WHEEL +0 -0
|
@@ -1,53 +1,73 @@
|
|
|
1
|
+
import copy
|
|
1
2
|
import logging
|
|
2
|
-
|
|
3
|
+
import warnings
|
|
4
|
+
from datetime import UTC, datetime
|
|
5
|
+
from typing import Any, Sequence, cast
|
|
3
6
|
|
|
7
|
+
import humps
|
|
4
8
|
import unique_sdk
|
|
9
|
+
from openai.types.chat import ChatCompletionToolChoiceOptionParam
|
|
10
|
+
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
|
5
11
|
from pydantic import BaseModel
|
|
6
12
|
|
|
7
|
-
from unique_toolkit.
|
|
8
|
-
from unique_toolkit.
|
|
13
|
+
from unique_toolkit.chat.schemas import ChatMessage, ChatMessageRole
|
|
14
|
+
from unique_toolkit.content.schemas import ContentChunk, ContentReference
|
|
15
|
+
from unique_toolkit.language_model import (
|
|
16
|
+
LanguageModelMessageRole,
|
|
17
|
+
LanguageModelMessages,
|
|
18
|
+
LanguageModelResponse,
|
|
19
|
+
LanguageModelStreamResponse,
|
|
20
|
+
LanguageModelStreamResponseMessage,
|
|
21
|
+
LanguageModelTool,
|
|
22
|
+
LanguageModelToolDescription,
|
|
23
|
+
)
|
|
24
|
+
from unique_toolkit.language_model.infos import (
|
|
25
|
+
LanguageModelInfo,
|
|
26
|
+
LanguageModelName,
|
|
27
|
+
TemperatureBounds,
|
|
28
|
+
)
|
|
29
|
+
from unique_toolkit.language_model.reference import (
|
|
30
|
+
add_references_to_message,
|
|
31
|
+
)
|
|
9
32
|
|
|
10
33
|
from .constants import (
|
|
11
34
|
DEFAULT_COMPLETE_TEMPERATURE,
|
|
12
35
|
DEFAULT_COMPLETE_TIMEOUT,
|
|
13
36
|
)
|
|
14
|
-
from .infos import LanguageModelName
|
|
15
|
-
from .schemas import (
|
|
16
|
-
LanguageModelMessages,
|
|
17
|
-
LanguageModelResponse,
|
|
18
|
-
LanguageModelTool,
|
|
19
|
-
)
|
|
20
37
|
|
|
21
|
-
logger = logging.getLogger(f"toolkit.
|
|
38
|
+
logger = logging.getLogger(f"toolkit.language_model.{__name__}")
|
|
22
39
|
|
|
23
40
|
|
|
24
41
|
def complete(
|
|
25
42
|
company_id: str,
|
|
26
|
-
messages: LanguageModelMessages,
|
|
43
|
+
messages: LanguageModelMessages | list[ChatCompletionMessageParam],
|
|
27
44
|
model_name: LanguageModelName | str,
|
|
28
45
|
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
29
46
|
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
30
|
-
tools: list[LanguageModelTool] | None = None,
|
|
47
|
+
tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
31
48
|
other_options: dict | None = None,
|
|
32
|
-
structured_output_model:
|
|
49
|
+
structured_output_model: type[BaseModel] | None = None,
|
|
33
50
|
structured_output_enforce_schema: bool = False,
|
|
51
|
+
user_id: str | None = None,
|
|
34
52
|
) -> LanguageModelResponse:
|
|
35
|
-
"""
|
|
36
|
-
Calls the completion endpoint synchronously without streaming the response.
|
|
53
|
+
"""Call the completion endpoint synchronously without streaming the response.
|
|
37
54
|
|
|
38
55
|
Args:
|
|
56
|
+
----
|
|
39
57
|
company_id (str): The company ID associated with the request.
|
|
40
58
|
messages (LanguageModelMessages): The messages to complete.
|
|
41
59
|
model_name (LanguageModelName | str): The model name to use for the completion.
|
|
42
60
|
temperature (float): The temperature setting for the completion. Defaults to 0.
|
|
43
61
|
timeout (int): The timeout value in milliseconds. Defaults to 240_000.
|
|
44
|
-
tools (Optional[list[LanguageModelTool]]): Optional list of tools to include.
|
|
62
|
+
tools (Optional[list[LanguageModelTool | LanguageModelToolDescription ]]): Optional list of tools to include.
|
|
45
63
|
other_options (Optional[dict]): Additional options to use. Defaults to None.
|
|
46
64
|
|
|
47
65
|
Returns:
|
|
66
|
+
-------
|
|
48
67
|
LanguageModelResponse: The response object containing the completed result.
|
|
68
|
+
|
|
49
69
|
"""
|
|
50
|
-
options, model, messages_dict, _ =
|
|
70
|
+
options, model, messages_dict, _ = _prepare_all_completions_params_util(
|
|
51
71
|
messages=messages,
|
|
52
72
|
model_name=model_name,
|
|
53
73
|
temperature=temperature,
|
|
@@ -57,12 +77,19 @@ def complete(
|
|
|
57
77
|
structured_output_enforce_schema=structured_output_enforce_schema,
|
|
58
78
|
)
|
|
59
79
|
|
|
80
|
+
if not user_id:
|
|
81
|
+
warnings.warn(
|
|
82
|
+
"The user_id parameter is strongly recommended for tracking purposes.",
|
|
83
|
+
DeprecationWarning,
|
|
84
|
+
)
|
|
85
|
+
|
|
60
86
|
try:
|
|
61
87
|
response = unique_sdk.ChatCompletion.create(
|
|
62
88
|
company_id=company_id,
|
|
89
|
+
user_id=user_id,
|
|
63
90
|
model=model,
|
|
64
91
|
messages=cast(
|
|
65
|
-
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
|
92
|
+
"list[unique_sdk.Integrated.ChatCompletionRequestMessage]",
|
|
66
93
|
messages_dict,
|
|
67
94
|
),
|
|
68
95
|
timeout=timeout,
|
|
@@ -76,38 +103,49 @@ def complete(
|
|
|
76
103
|
|
|
77
104
|
async def complete_async(
|
|
78
105
|
company_id: str,
|
|
79
|
-
messages: LanguageModelMessages,
|
|
106
|
+
messages: LanguageModelMessages | list[ChatCompletionMessageParam],
|
|
80
107
|
model_name: LanguageModelName | str,
|
|
108
|
+
user_id: str | None = None,
|
|
81
109
|
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
82
110
|
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
83
|
-
tools: list[LanguageModelTool] | None = None,
|
|
111
|
+
tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
84
112
|
other_options: dict | None = None,
|
|
85
|
-
structured_output_model:
|
|
113
|
+
structured_output_model: type[BaseModel] | None = None,
|
|
86
114
|
structured_output_enforce_schema: bool = False,
|
|
87
115
|
) -> LanguageModelResponse:
|
|
88
|
-
"""
|
|
89
|
-
Calls the completion endpoint asynchronously without streaming the response.
|
|
116
|
+
"""Call the completion endpoint asynchronously without streaming the response.
|
|
90
117
|
|
|
91
118
|
This method sends a request to the completion endpoint using the provided messages, model name,
|
|
92
119
|
temperature, timeout, and optional tools. It returns a `LanguageModelResponse` object containing
|
|
93
120
|
the completed result.
|
|
94
121
|
|
|
95
122
|
Args:
|
|
123
|
+
----
|
|
96
124
|
company_id (str): The company ID associated with the request.
|
|
97
125
|
messages (LanguageModelMessages): The messages to complete.
|
|
98
126
|
model_name (LanguageModelName | str): The model name to use for the completion.
|
|
99
127
|
temperature (float): The temperature setting for the completion. Defaults to 0.
|
|
100
128
|
timeout (int): The timeout value in milliseconds for the request. Defaults to 240_000.
|
|
101
|
-
tools (Optional[list[LanguageModelTool]]): Optional list of tools to include in the request.
|
|
129
|
+
tools (Optional[list[LanguageModelTool | LanguageModelToolDescription ]]): Optional list of tools to include in the request.
|
|
102
130
|
other_options (Optional[dict]): The other options to use. Defaults to None.
|
|
103
131
|
|
|
104
132
|
Returns:
|
|
133
|
+
-------
|
|
105
134
|
LanguageModelResponse: The response object containing the completed result.
|
|
106
135
|
|
|
107
136
|
Raises:
|
|
108
|
-
|
|
137
|
+
------
|
|
138
|
+
Exception: If an error occurs during the request, an exception is raised
|
|
139
|
+
and logged.
|
|
140
|
+
|
|
109
141
|
"""
|
|
110
|
-
|
|
142
|
+
if not user_id:
|
|
143
|
+
warnings.warn(
|
|
144
|
+
"The user_id parameter is strongly recommended for tracking purposes.",
|
|
145
|
+
DeprecationWarning,
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
options, model, messages_dict, _ = _prepare_all_completions_params_util(
|
|
111
149
|
messages=messages,
|
|
112
150
|
model_name=model_name,
|
|
113
151
|
temperature=temperature,
|
|
@@ -120,9 +158,10 @@ async def complete_async(
|
|
|
120
158
|
try:
|
|
121
159
|
response = await unique_sdk.ChatCompletion.create_async(
|
|
122
160
|
company_id=company_id,
|
|
161
|
+
user_id=user_id,
|
|
123
162
|
model=model,
|
|
124
163
|
messages=cast(
|
|
125
|
-
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
|
164
|
+
"list[unique_sdk.Integrated.ChatCompletionRequestMessage]",
|
|
126
165
|
messages_dict,
|
|
127
166
|
),
|
|
128
167
|
timeout=timeout,
|
|
@@ -130,13 +169,13 @@ async def complete_async(
|
|
|
130
169
|
)
|
|
131
170
|
return LanguageModelResponse(**response)
|
|
132
171
|
except Exception as e:
|
|
133
|
-
logger.
|
|
172
|
+
logger.exception(f"Error completing: {e}")
|
|
134
173
|
raise e
|
|
135
174
|
|
|
136
175
|
|
|
137
176
|
def _add_tools_to_options(
|
|
138
177
|
options: dict,
|
|
139
|
-
tools:
|
|
178
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None,
|
|
140
179
|
) -> dict:
|
|
141
180
|
if tools:
|
|
142
181
|
options["tools"] = [
|
|
@@ -149,7 +188,12 @@ def _add_tools_to_options(
|
|
|
149
188
|
return options
|
|
150
189
|
|
|
151
190
|
|
|
152
|
-
|
|
191
|
+
SearchContext = list[unique_sdk.Integrated.SearchResult]
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def _to_search_context(
|
|
195
|
+
chunks: list[ContentChunk],
|
|
196
|
+
) -> SearchContext | None:
|
|
153
197
|
if not chunks:
|
|
154
198
|
return None
|
|
155
199
|
return [
|
|
@@ -163,14 +207,14 @@ def _to_search_context(chunks: list[ContentChunk]) -> dict | None:
|
|
|
163
207
|
endPage=chunk.end_page,
|
|
164
208
|
order=chunk.order,
|
|
165
209
|
object=chunk.object,
|
|
166
|
-
)
|
|
210
|
+
)
|
|
167
211
|
for chunk in chunks
|
|
168
212
|
]
|
|
169
213
|
|
|
170
214
|
|
|
171
215
|
def _add_response_format_to_options(
|
|
172
216
|
options: dict,
|
|
173
|
-
structured_output_model:
|
|
217
|
+
structured_output_model: type[BaseModel],
|
|
174
218
|
structured_output_enforce_schema: bool = False,
|
|
175
219
|
) -> dict:
|
|
176
220
|
options["responseFormat"] = {
|
|
@@ -188,38 +232,42 @@ def _prepare_completion_params_util(
|
|
|
188
232
|
messages: LanguageModelMessages,
|
|
189
233
|
model_name: LanguageModelName | str,
|
|
190
234
|
temperature: float,
|
|
191
|
-
tools:
|
|
235
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
192
236
|
other_options: dict | None = None,
|
|
193
237
|
content_chunks: list[ContentChunk] | None = None,
|
|
194
|
-
structured_output_model:
|
|
238
|
+
structured_output_model: type[BaseModel] | None = None,
|
|
195
239
|
structured_output_enforce_schema: bool = False,
|
|
196
|
-
) -> tuple[dict, str, dict,
|
|
197
|
-
"""
|
|
198
|
-
Prepares common parameters for completion requests.
|
|
240
|
+
) -> tuple[dict, str, dict, SearchContext | None]:
|
|
241
|
+
"""Prepare common parameters for completion requests.
|
|
199
242
|
|
|
200
|
-
Returns
|
|
243
|
+
Returns
|
|
244
|
+
-------
|
|
201
245
|
tuple containing:
|
|
202
246
|
- options (dict): Combined options including tools and temperature
|
|
203
247
|
- model (str): Resolved model name
|
|
204
248
|
- messages_dict (dict): Processed messages
|
|
205
249
|
- search_context (dict | None): Processed content chunks if provided
|
|
206
|
-
"""
|
|
207
250
|
|
|
251
|
+
"""
|
|
208
252
|
options = _add_tools_to_options({}, tools)
|
|
253
|
+
|
|
209
254
|
if structured_output_model:
|
|
210
255
|
options = _add_response_format_to_options(
|
|
211
|
-
options,
|
|
256
|
+
options,
|
|
257
|
+
structured_output_model,
|
|
258
|
+
structured_output_enforce_schema,
|
|
212
259
|
)
|
|
213
260
|
options["temperature"] = temperature
|
|
214
261
|
if other_options:
|
|
215
262
|
options.update(other_options)
|
|
216
263
|
|
|
217
|
-
model =
|
|
264
|
+
model = (
|
|
265
|
+
model_name.value if isinstance(model_name, LanguageModelName) else model_name
|
|
266
|
+
)
|
|
218
267
|
|
|
219
|
-
# Different methods need different message dump parameters
|
|
220
268
|
messages_dict = messages.model_dump(
|
|
221
269
|
exclude_none=True,
|
|
222
|
-
by_alias=
|
|
270
|
+
by_alias=True,
|
|
223
271
|
)
|
|
224
272
|
|
|
225
273
|
search_context = (
|
|
@@ -227,3 +275,257 @@ def _prepare_completion_params_util(
|
|
|
227
275
|
)
|
|
228
276
|
|
|
229
277
|
return options, model, messages_dict, search_context
|
|
278
|
+
|
|
279
|
+
|
|
280
|
+
def _prepare_openai_completion_params_util(
|
|
281
|
+
model_name: LanguageModelName | str,
|
|
282
|
+
temperature: float,
|
|
283
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
284
|
+
other_options: dict | None = None,
|
|
285
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
286
|
+
structured_output_model: type[BaseModel] | None = None,
|
|
287
|
+
structured_output_enforce_schema: bool = False,
|
|
288
|
+
) -> tuple[dict, str, SearchContext | None]:
|
|
289
|
+
"""Prepare common parameters for completion requests.
|
|
290
|
+
|
|
291
|
+
Returns
|
|
292
|
+
-------
|
|
293
|
+
tuple containing:
|
|
294
|
+
- options (dict): Combined options including tools and temperature
|
|
295
|
+
- model (str): Resolved model name
|
|
296
|
+
- messages_dict (dict): Processed messages
|
|
297
|
+
- search_context (dict | None): Processed content chunks if provided
|
|
298
|
+
|
|
299
|
+
"""
|
|
300
|
+
options = _add_tools_to_options({}, tools)
|
|
301
|
+
|
|
302
|
+
if structured_output_model:
|
|
303
|
+
options = _add_response_format_to_options(
|
|
304
|
+
options,
|
|
305
|
+
structured_output_model,
|
|
306
|
+
structured_output_enforce_schema,
|
|
307
|
+
)
|
|
308
|
+
options["temperature"] = temperature
|
|
309
|
+
if other_options:
|
|
310
|
+
options.update(other_options)
|
|
311
|
+
|
|
312
|
+
model = (
|
|
313
|
+
model_name.value if isinstance(model_name, LanguageModelName) else model_name
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
search_context = (
|
|
317
|
+
_to_search_context(content_chunks) if content_chunks is not None else None
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
return options, model, search_context
|
|
321
|
+
|
|
322
|
+
|
|
323
|
+
def __camelize_keys(data):
|
|
324
|
+
"""Recursively camelize dictionary keys using humps."""
|
|
325
|
+
if isinstance(data, dict):
|
|
326
|
+
return {humps.camelize(k): __camelize_keys(v) for k, v in data.items()}
|
|
327
|
+
if isinstance(data, list):
|
|
328
|
+
return [__camelize_keys(item) for item in data]
|
|
329
|
+
return data
|
|
330
|
+
|
|
331
|
+
|
|
332
|
+
def _clamp_temperature(
|
|
333
|
+
temperature: float, temperature_bounds: TemperatureBounds
|
|
334
|
+
) -> float:
|
|
335
|
+
temperature = max(temperature_bounds.min_temperature, temperature)
|
|
336
|
+
temperature = min(temperature_bounds.max_temperature, temperature)
|
|
337
|
+
return round(temperature, 2)
|
|
338
|
+
|
|
339
|
+
|
|
340
|
+
def _prepare_other_options(
|
|
341
|
+
other_options: dict | None,
|
|
342
|
+
default_options: dict,
|
|
343
|
+
) -> dict:
|
|
344
|
+
options = default_options
|
|
345
|
+
if other_options is not None:
|
|
346
|
+
options.update(other_options)
|
|
347
|
+
return options
|
|
348
|
+
|
|
349
|
+
|
|
350
|
+
def _prepare_all_completions_params_util(
|
|
351
|
+
messages: LanguageModelMessages | list[ChatCompletionMessageParam],
|
|
352
|
+
model_name: LanguageModelName | str,
|
|
353
|
+
temperature: float,
|
|
354
|
+
tools: Sequence[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
355
|
+
other_options: dict | None = None,
|
|
356
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
357
|
+
tool_choice: ChatCompletionToolChoiceOptionParam | None = None,
|
|
358
|
+
structured_output_model: type[BaseModel] | None = None,
|
|
359
|
+
structured_output_enforce_schema: bool = False,
|
|
360
|
+
) -> tuple[
|
|
361
|
+
dict,
|
|
362
|
+
str,
|
|
363
|
+
list[unique_sdk.Integrated.ChatCompletionRequestMessage],
|
|
364
|
+
SearchContext | None,
|
|
365
|
+
]:
|
|
366
|
+
model_info = None
|
|
367
|
+
|
|
368
|
+
other_options = copy.deepcopy(other_options)
|
|
369
|
+
|
|
370
|
+
if tool_choice is not None:
|
|
371
|
+
if other_options is None:
|
|
372
|
+
other_options = {}
|
|
373
|
+
if "toolChoice" not in other_options:
|
|
374
|
+
other_options["toolChoice"] = tool_choice # Backend expects CamelCase
|
|
375
|
+
|
|
376
|
+
if isinstance(model_name, LanguageModelName):
|
|
377
|
+
model_info = LanguageModelInfo.from_name(model_name)
|
|
378
|
+
other_options = _prepare_other_options(
|
|
379
|
+
other_options, model_info.default_options
|
|
380
|
+
)
|
|
381
|
+
|
|
382
|
+
if isinstance(messages, LanguageModelMessages):
|
|
383
|
+
options, model, messages_dict, search_context = _prepare_completion_params_util(
|
|
384
|
+
messages=messages,
|
|
385
|
+
model_name=model_name,
|
|
386
|
+
temperature=temperature,
|
|
387
|
+
tools=tools,
|
|
388
|
+
content_chunks=content_chunks,
|
|
389
|
+
other_options=other_options,
|
|
390
|
+
structured_output_model=structured_output_model,
|
|
391
|
+
structured_output_enforce_schema=structured_output_enforce_schema,
|
|
392
|
+
)
|
|
393
|
+
else:
|
|
394
|
+
options, model, search_context = _prepare_openai_completion_params_util(
|
|
395
|
+
model_name=model_name,
|
|
396
|
+
temperature=temperature,
|
|
397
|
+
tools=tools,
|
|
398
|
+
content_chunks=content_chunks,
|
|
399
|
+
other_options=other_options,
|
|
400
|
+
structured_output_model=structured_output_model,
|
|
401
|
+
structured_output_enforce_schema=structured_output_enforce_schema,
|
|
402
|
+
)
|
|
403
|
+
messages_dict = __camelize_keys(messages.copy())
|
|
404
|
+
|
|
405
|
+
if (
|
|
406
|
+
model_info is not None
|
|
407
|
+
and model_info.temperature_bounds is not None
|
|
408
|
+
and "temperature" in options
|
|
409
|
+
):
|
|
410
|
+
options["temperature"] = _clamp_temperature(
|
|
411
|
+
temperature, model_info.temperature_bounds
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
integrated_messages = cast(
|
|
415
|
+
"list[unique_sdk.Integrated.ChatCompletionRequestMessage]",
|
|
416
|
+
messages_dict,
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
return options, model, integrated_messages, search_context
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
def complete_with_references(
|
|
423
|
+
company_id: str,
|
|
424
|
+
messages: LanguageModelMessages,
|
|
425
|
+
model_name: LanguageModelName | str,
|
|
426
|
+
user_id: str | None = None,
|
|
427
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
428
|
+
debug_dict: dict = {},
|
|
429
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
430
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
431
|
+
tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
432
|
+
start_text: str | None = None,
|
|
433
|
+
other_options: dict[str, Any] | None = None,
|
|
434
|
+
) -> LanguageModelStreamResponse:
|
|
435
|
+
# Use toolkit language model functions for chat completion
|
|
436
|
+
|
|
437
|
+
response = complete(
|
|
438
|
+
company_id=company_id,
|
|
439
|
+
user_id=user_id,
|
|
440
|
+
model_name=model_name,
|
|
441
|
+
messages=messages,
|
|
442
|
+
temperature=temperature,
|
|
443
|
+
timeout=timeout,
|
|
444
|
+
tools=tools,
|
|
445
|
+
other_options=other_options,
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
return _create_language_model_stream_response_with_references(
|
|
449
|
+
response=response,
|
|
450
|
+
content_chunks=content_chunks,
|
|
451
|
+
start_text=start_text,
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
async def complete_with_references_async(
|
|
456
|
+
company_id: str,
|
|
457
|
+
user_id: str,
|
|
458
|
+
messages: LanguageModelMessages,
|
|
459
|
+
model_name: LanguageModelName | str,
|
|
460
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
461
|
+
debug_dict: dict | None = None,
|
|
462
|
+
temperature: float = DEFAULT_COMPLETE_TEMPERATURE,
|
|
463
|
+
timeout: int = DEFAULT_COMPLETE_TIMEOUT,
|
|
464
|
+
tools: list[LanguageModelTool | LanguageModelToolDescription] | None = None,
|
|
465
|
+
start_text: str | None = None,
|
|
466
|
+
other_options: dict[str, Any] | None = None,
|
|
467
|
+
) -> LanguageModelStreamResponse:
|
|
468
|
+
# Use toolkit language model functions for chat completion
|
|
469
|
+
response = await complete_async(
|
|
470
|
+
company_id=company_id,
|
|
471
|
+
user_id=user_id,
|
|
472
|
+
model_name=model_name,
|
|
473
|
+
messages=messages,
|
|
474
|
+
temperature=temperature,
|
|
475
|
+
timeout=timeout,
|
|
476
|
+
tools=tools,
|
|
477
|
+
other_options=other_options,
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
return _create_language_model_stream_response_with_references(
|
|
481
|
+
response=response,
|
|
482
|
+
content_chunks=content_chunks,
|
|
483
|
+
start_text=start_text,
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
|
|
487
|
+
def _create_language_model_stream_response_with_references(
|
|
488
|
+
response: LanguageModelResponse,
|
|
489
|
+
content_chunks: list[ContentChunk] | None = None,
|
|
490
|
+
start_text: str | None = None,
|
|
491
|
+
):
|
|
492
|
+
content = response.choices[0].message.content
|
|
493
|
+
content_chunks = content_chunks or []
|
|
494
|
+
|
|
495
|
+
if content is None:
|
|
496
|
+
raise ValueError("Content is None, which is not supported")
|
|
497
|
+
if isinstance(content, list):
|
|
498
|
+
raise ValueError("Content is a list, which is not supported")
|
|
499
|
+
content = start_text or "" + str(content)
|
|
500
|
+
|
|
501
|
+
message = ChatMessage(
|
|
502
|
+
id="msg_unknown",
|
|
503
|
+
text=copy.deepcopy(content),
|
|
504
|
+
role=ChatMessageRole.ASSISTANT,
|
|
505
|
+
created_at=datetime.now(UTC),
|
|
506
|
+
chat_id="chat_unknown",
|
|
507
|
+
)
|
|
508
|
+
|
|
509
|
+
message, __ = add_references_to_message(
|
|
510
|
+
message=message,
|
|
511
|
+
search_context=content_chunks,
|
|
512
|
+
)
|
|
513
|
+
|
|
514
|
+
stream_response_message = LanguageModelStreamResponseMessage(
|
|
515
|
+
id="stream_unknown",
|
|
516
|
+
previous_message_id=None,
|
|
517
|
+
role=LanguageModelMessageRole.ASSISTANT,
|
|
518
|
+
text=message.content or "",
|
|
519
|
+
original_text=content,
|
|
520
|
+
references=[
|
|
521
|
+
ContentReference(**u.model_dump()) for u in message.references or []
|
|
522
|
+
],
|
|
523
|
+
)
|
|
524
|
+
|
|
525
|
+
tool_calls = [r.function for r in response.choices[0].message.tool_calls or []]
|
|
526
|
+
tool_calls = tool_calls if len(tool_calls) > 0 else None
|
|
527
|
+
|
|
528
|
+
return LanguageModelStreamResponse(
|
|
529
|
+
message=stream_response_message,
|
|
530
|
+
tool_calls=tool_calls,
|
|
531
|
+
)
|