unique_orchestrator 1.5.0__py3-none-any.whl → 1.7.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unique_orchestrator might be problematic. Click here for more details.
- unique_orchestrator/config.py +42 -27
- unique_orchestrator/prompts/system_prompt.jinja2 +9 -0
- unique_orchestrator/tests/test_unique_ai_get_filtered_user_metadata.py +257 -0
- unique_orchestrator/unique_ai.py +66 -12
- unique_orchestrator/unique_ai_builder.py +140 -127
- {unique_orchestrator-1.5.0.dist-info → unique_orchestrator-1.7.7.dist-info}/METADATA +45 -3
- unique_orchestrator-1.7.7.dist-info/RECORD +12 -0
- unique_orchestrator-1.5.0.dist-info/RECORD +0 -11
- {unique_orchestrator-1.5.0.dist-info → unique_orchestrator-1.7.7.dist-info}/LICENSE +0 -0
- {unique_orchestrator-1.5.0.dist-info → unique_orchestrator-1.7.7.dist-info}/WHEEL +0 -0
unique_orchestrator/config.py
CHANGED
|
@@ -3,13 +3,13 @@ from pathlib import Path
|
|
|
3
3
|
from typing import Annotated, Any, Generic, Literal, TypeVar
|
|
4
4
|
|
|
5
5
|
from pydantic import BaseModel, Field, ValidationInfo, field_validator, model_validator
|
|
6
|
-
from pydantic.json_schema import SkipJsonSchema
|
|
7
6
|
from unique_deep_research.config import DeepResearchToolConfig
|
|
8
7
|
from unique_deep_research.service import DeepResearchTool
|
|
9
8
|
from unique_follow_up_questions.config import FollowUpQuestionsConfig
|
|
10
9
|
from unique_internal_search.config import InternalSearchConfig
|
|
11
10
|
from unique_internal_search.service import InternalSearchTool
|
|
12
11
|
from unique_stock_ticker.config import StockTickerConfig
|
|
12
|
+
from unique_swot import SwotAnalysisTool, SwotAnalysisToolConfig
|
|
13
13
|
from unique_toolkit._common.validators import (
|
|
14
14
|
LMI,
|
|
15
15
|
ClipInt,
|
|
@@ -33,6 +33,9 @@ from unique_toolkit.agentic.tools.openai_builtin.manager import (
|
|
|
33
33
|
OpenAICodeInterpreterConfig,
|
|
34
34
|
)
|
|
35
35
|
from unique_toolkit.agentic.tools.tool import ToolBuildConfig
|
|
36
|
+
from unique_toolkit.agentic.tools.tool_progress_reporter import (
|
|
37
|
+
ToolProgressReporterConfig,
|
|
38
|
+
)
|
|
36
39
|
from unique_toolkit.language_model.default_language_model import DEFAULT_GPT_4o
|
|
37
40
|
from unique_web_search.config import WebSearchConfig
|
|
38
41
|
from unique_web_search.service import WebSearchTool
|
|
@@ -87,6 +90,10 @@ class SpaceConfigBase(BaseModel, Generic[T]):
|
|
|
87
90
|
name=DeepResearchTool.name,
|
|
88
91
|
configuration=DeepResearchToolConfig(),
|
|
89
92
|
),
|
|
93
|
+
ToolBuildConfig(
|
|
94
|
+
name=SwotAnalysisTool.name,
|
|
95
|
+
configuration=SwotAnalysisToolConfig(),
|
|
96
|
+
),
|
|
90
97
|
],
|
|
91
98
|
)
|
|
92
99
|
|
|
@@ -155,6 +162,12 @@ class UniqueAIPromptConfig(BaseModel):
|
|
|
155
162
|
description="The user message prompt template as a Jinja2 template string.",
|
|
156
163
|
)
|
|
157
164
|
|
|
165
|
+
user_metadata: list[str] = Field(
|
|
166
|
+
default=[],
|
|
167
|
+
title="User Metadata",
|
|
168
|
+
description="User metadata fields to be ingested in the system prompt.",
|
|
169
|
+
)
|
|
170
|
+
|
|
158
171
|
|
|
159
172
|
class UniqueAIServices(BaseModel):
|
|
160
173
|
"""Determine the services the agent is using
|
|
@@ -191,6 +204,10 @@ class UniqueAIServices(BaseModel):
|
|
|
191
204
|
|
|
192
205
|
uploaded_content_config: UploadedContentConfig = UploadedContentConfig()
|
|
193
206
|
|
|
207
|
+
tool_progress_reporter_config: ToolProgressReporterConfig = (
|
|
208
|
+
ToolProgressReporterConfig()
|
|
209
|
+
)
|
|
210
|
+
|
|
194
211
|
|
|
195
212
|
class InputTokenDistributionConfig(BaseModel):
|
|
196
213
|
model_config = get_configuration_dict(frozen=True)
|
|
@@ -230,43 +247,36 @@ class SubAgentsConfig(BaseModel):
|
|
|
230
247
|
| DeactivatedNone
|
|
231
248
|
) = SubAgentEvaluationServiceConfig()
|
|
232
249
|
|
|
250
|
+
sleep_time_before_update: float = Field(
|
|
251
|
+
default=0.5,
|
|
252
|
+
description="Time to sleep before updating the main agent message to display the sub agent responses. Temporary fix to avoid rendering issues.",
|
|
253
|
+
)
|
|
233
254
|
|
|
234
|
-
class ResponsesApiConfig(BaseModel):
|
|
235
|
-
model_config = get_configuration_dict(frozen=True)
|
|
236
255
|
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
description="
|
|
256
|
+
class CodeInterpreterExtendedConfig(OpenAICodeInterpreterConfig):
|
|
257
|
+
generated_files_scope_id: str = Field(
|
|
258
|
+
description="Folder where files generated by the LLM will be uploaded",
|
|
240
259
|
)
|
|
241
|
-
|
|
260
|
+
display_config: (
|
|
242
261
|
Annotated[
|
|
243
262
|
ShowExecutedCodePostprocessorConfig,
|
|
244
263
|
Field(title="Active"),
|
|
245
264
|
]
|
|
246
265
|
| DeactivatedNone
|
|
247
|
-
) =
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
default=True,
|
|
251
|
-
description="[TEMPORARY] Whether to use the direct Azure client instead of the responses API.",
|
|
252
|
-
)
|
|
253
|
-
direct_azure_client_api_base_env_var: SkipJsonSchema[str] = Field(
|
|
254
|
-
default="OPENAI_BASE_URL",
|
|
255
|
-
description="[TEMPORARY] The environment variable that contains the API base for the direct Azure client.",
|
|
256
|
-
)
|
|
257
|
-
direct_azure_client_api_key_env_var: SkipJsonSchema[str] = Field(
|
|
258
|
-
default="OPENAI_API_KEY",
|
|
259
|
-
description="[TEMPORARY] The environment variable that contains the API key for the direct Azure client.",
|
|
266
|
+
) = Field(
|
|
267
|
+
ShowExecutedCodePostprocessorConfig(),
|
|
268
|
+
description="If active, generated code will be prepended to the LLM answer",
|
|
260
269
|
)
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
class ResponsesApiConfig(BaseModel):
|
|
273
|
+
model_config = get_configuration_dict(frozen=True)
|
|
274
|
+
|
|
261
275
|
code_interpreter: (
|
|
262
|
-
Annotated[
|
|
276
|
+
Annotated[CodeInterpreterExtendedConfig, Field(title="Active")]
|
|
277
|
+
| DeactivatedNone
|
|
263
278
|
) = Field(default=None, description="Config for openai code interpreter")
|
|
264
279
|
|
|
265
|
-
generated_files_scope_id: str = Field(
|
|
266
|
-
default="<SCOPE_ID_PLACEHOLDER>",
|
|
267
|
-
description="Scope ID for the responses API.",
|
|
268
|
-
)
|
|
269
|
-
|
|
270
280
|
|
|
271
281
|
class ExperimentalConfig(BaseModel):
|
|
272
282
|
"""Experimental features this part of the configuration might evolve in the future continuously"""
|
|
@@ -301,7 +311,12 @@ class ExperimentalConfig(BaseModel):
|
|
|
301
311
|
|
|
302
312
|
sub_agents_config: SubAgentsConfig = SubAgentsConfig()
|
|
303
313
|
|
|
304
|
-
responses_api_config:
|
|
314
|
+
responses_api_config: (
|
|
315
|
+
Annotated[ResponsesApiConfig, Field(title="Active")] | DeactivatedNone
|
|
316
|
+
) = Field(
|
|
317
|
+
default=None,
|
|
318
|
+
description="If active, the main agent will use the responses api from open ai",
|
|
319
|
+
)
|
|
305
320
|
|
|
306
321
|
|
|
307
322
|
class UniqueAIAgentConfig(BaseModel):
|
|
@@ -7,6 +7,15 @@ You are Unique AI Chat a system based on large language models
|
|
|
7
7
|
**Knowledge cutoff**: {{ model_info.info_cutoff_at | default('unknown') }}
|
|
8
8
|
**Current date**: {{ date_string }}
|
|
9
9
|
|
|
10
|
+
{#- User Metadata Section #}
|
|
11
|
+
{% if user_metadata and user_metadata|length > 0 %}
|
|
12
|
+
# User Information
|
|
13
|
+
Here is some metadata about the user, which may help you write better queries, and help contextualize the information you retrieve:
|
|
14
|
+
{%- for key, value in user_metadata.items() %}
|
|
15
|
+
- {{ key.replace('_', ' ').title() }}: {{ value }}
|
|
16
|
+
{%- endfor %}
|
|
17
|
+
{%- endif %}
|
|
18
|
+
|
|
10
19
|
Over the course of the conversation, you adapt to the user's tone and preference.
|
|
11
20
|
Try to match the user's vibe, tone, and generally how they are speaking. You want the conversation to feel natural.
|
|
12
21
|
You engage in authentic conversation by responding to the information provided, asking relevant questions, and showing genuine curiosity.
|
|
@@ -0,0 +1,257 @@
|
|
|
1
|
+
from unittest.mock import MagicMock
|
|
2
|
+
|
|
3
|
+
import pytest
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class TestGetFilteredUserMetadata:
|
|
7
|
+
"""Test suite for UniqueAI._get_filtered_user_metadata method"""
|
|
8
|
+
|
|
9
|
+
@pytest.fixture
|
|
10
|
+
def mock_unique_ai(self):
|
|
11
|
+
"""Create a minimal UniqueAI instance with mocked dependencies"""
|
|
12
|
+
# Lazy import to avoid heavy dependencies at module import time
|
|
13
|
+
from unique_orchestrator.unique_ai import UniqueAI
|
|
14
|
+
|
|
15
|
+
mock_logger = MagicMock()
|
|
16
|
+
|
|
17
|
+
# Create minimal event structure
|
|
18
|
+
dummy_event = MagicMock()
|
|
19
|
+
dummy_event.payload.assistant_message.id = "assist_1"
|
|
20
|
+
dummy_event.payload.user_message.text = "query"
|
|
21
|
+
|
|
22
|
+
# Create minimal config structure
|
|
23
|
+
mock_config = MagicMock()
|
|
24
|
+
mock_config.agent.prompt_config.user_metadata = []
|
|
25
|
+
|
|
26
|
+
# Create minimal required dependencies
|
|
27
|
+
mock_chat_service = MagicMock()
|
|
28
|
+
mock_content_service = MagicMock()
|
|
29
|
+
mock_debug_info_manager = MagicMock()
|
|
30
|
+
mock_reference_manager = MagicMock()
|
|
31
|
+
mock_thinking_manager = MagicMock()
|
|
32
|
+
mock_tool_manager = MagicMock()
|
|
33
|
+
mock_history_manager = MagicMock()
|
|
34
|
+
mock_evaluation_manager = MagicMock()
|
|
35
|
+
mock_postprocessor_manager = MagicMock()
|
|
36
|
+
mock_streaming_handler = MagicMock()
|
|
37
|
+
|
|
38
|
+
# Instantiate UniqueAI
|
|
39
|
+
ua = UniqueAI(
|
|
40
|
+
logger=mock_logger,
|
|
41
|
+
event=dummy_event,
|
|
42
|
+
config=mock_config,
|
|
43
|
+
chat_service=mock_chat_service,
|
|
44
|
+
content_service=mock_content_service,
|
|
45
|
+
debug_info_manager=mock_debug_info_manager,
|
|
46
|
+
streaming_handler=mock_streaming_handler,
|
|
47
|
+
reference_manager=mock_reference_manager,
|
|
48
|
+
thinking_manager=mock_thinking_manager,
|
|
49
|
+
tool_manager=mock_tool_manager,
|
|
50
|
+
history_manager=mock_history_manager,
|
|
51
|
+
evaluation_manager=mock_evaluation_manager,
|
|
52
|
+
postprocessor_manager=mock_postprocessor_manager,
|
|
53
|
+
mcp_servers=[],
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
return ua
|
|
57
|
+
|
|
58
|
+
def test_returns_empty_dict_when_config_is_empty_list(self, mock_unique_ai):
|
|
59
|
+
"""Test that empty dict is returned when config.user_metadata is an empty list"""
|
|
60
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = []
|
|
61
|
+
mock_unique_ai._event.payload.user_metadata = {
|
|
62
|
+
"department": "Engineering",
|
|
63
|
+
"role": "Developer",
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
67
|
+
|
|
68
|
+
assert result == {}
|
|
69
|
+
assert isinstance(result, dict)
|
|
70
|
+
|
|
71
|
+
def test_returns_empty_dict_when_user_metadata_is_none(self, mock_unique_ai):
|
|
72
|
+
"""Test that empty dict is returned when user_metadata is None"""
|
|
73
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = [
|
|
74
|
+
"department",
|
|
75
|
+
"role",
|
|
76
|
+
]
|
|
77
|
+
mock_unique_ai._event.payload.user_metadata = None
|
|
78
|
+
|
|
79
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
80
|
+
|
|
81
|
+
assert result == {}
|
|
82
|
+
assert isinstance(result, dict)
|
|
83
|
+
|
|
84
|
+
def test_returns_empty_dict_when_both_config_and_metadata_are_empty(
|
|
85
|
+
self, mock_unique_ai
|
|
86
|
+
):
|
|
87
|
+
"""Test that empty dict is returned when both config and user_metadata are empty/None"""
|
|
88
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = []
|
|
89
|
+
mock_unique_ai._event.payload.user_metadata = None
|
|
90
|
+
|
|
91
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
92
|
+
|
|
93
|
+
assert result == {}
|
|
94
|
+
assert isinstance(result, dict)
|
|
95
|
+
|
|
96
|
+
def test_filters_metadata_to_include_only_configured_keys(self, mock_unique_ai):
|
|
97
|
+
"""Test that only keys specified in config are included in the result"""
|
|
98
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = [
|
|
99
|
+
"department",
|
|
100
|
+
"role",
|
|
101
|
+
]
|
|
102
|
+
mock_unique_ai._event.payload.user_metadata = {
|
|
103
|
+
"department": "Engineering",
|
|
104
|
+
"role": "Developer",
|
|
105
|
+
"location": "San Francisco",
|
|
106
|
+
"salary": "100000",
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
110
|
+
|
|
111
|
+
assert result == {"department": "Engineering", "role": "Developer"}
|
|
112
|
+
assert "location" not in result
|
|
113
|
+
assert "salary" not in result
|
|
114
|
+
# Verify all values are strings
|
|
115
|
+
assert all(isinstance(v, str) for v in result.values())
|
|
116
|
+
|
|
117
|
+
def test_returns_only_existing_keys_from_user_metadata(self, mock_unique_ai):
|
|
118
|
+
"""Test that keys in config but not in user_metadata are not included"""
|
|
119
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = [
|
|
120
|
+
"department",
|
|
121
|
+
"role",
|
|
122
|
+
"team",
|
|
123
|
+
"manager",
|
|
124
|
+
]
|
|
125
|
+
mock_unique_ai._event.payload.user_metadata = {
|
|
126
|
+
"department": "Engineering",
|
|
127
|
+
"role": "Developer",
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
131
|
+
|
|
132
|
+
assert result == {"department": "Engineering", "role": "Developer"}
|
|
133
|
+
assert "team" not in result
|
|
134
|
+
assert "manager" not in result
|
|
135
|
+
|
|
136
|
+
def test_handles_single_key_in_config(self, mock_unique_ai):
|
|
137
|
+
"""Test filtering with a single key in config"""
|
|
138
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
|
|
139
|
+
mock_unique_ai._event.payload.user_metadata = {
|
|
140
|
+
"department": "Engineering",
|
|
141
|
+
"role": "Developer",
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
145
|
+
|
|
146
|
+
assert result == {"department": "Engineering"}
|
|
147
|
+
assert isinstance(result["department"], str)
|
|
148
|
+
|
|
149
|
+
def test_handles_string_values(self, mock_unique_ai):
|
|
150
|
+
"""Test that string values in user_metadata are preserved"""
|
|
151
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = [
|
|
152
|
+
"name",
|
|
153
|
+
"email",
|
|
154
|
+
"department",
|
|
155
|
+
"title",
|
|
156
|
+
]
|
|
157
|
+
mock_unique_ai._event.payload.user_metadata = {
|
|
158
|
+
"name": "John Doe",
|
|
159
|
+
"email": "john.doe@example.com",
|
|
160
|
+
"department": "Engineering",
|
|
161
|
+
"title": "Senior Developer",
|
|
162
|
+
"ignored": "This should not appear",
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
166
|
+
|
|
167
|
+
assert result == {
|
|
168
|
+
"name": "John Doe",
|
|
169
|
+
"email": "john.doe@example.com",
|
|
170
|
+
"department": "Engineering",
|
|
171
|
+
"title": "Senior Developer",
|
|
172
|
+
}
|
|
173
|
+
assert "ignored" not in result
|
|
174
|
+
# Verify all values are strings
|
|
175
|
+
assert all(isinstance(v, str) for v in result.values())
|
|
176
|
+
|
|
177
|
+
def test_handles_empty_dict_user_metadata(self, mock_unique_ai):
|
|
178
|
+
"""Test behavior when user_metadata is an empty dict"""
|
|
179
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
|
|
180
|
+
mock_unique_ai._event.payload.user_metadata = {}
|
|
181
|
+
|
|
182
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
183
|
+
|
|
184
|
+
assert result == {}
|
|
185
|
+
|
|
186
|
+
def test_handles_empty_string_values(self, mock_unique_ai):
|
|
187
|
+
"""Test that empty string values in user_metadata are preserved if key is in config"""
|
|
188
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = [
|
|
189
|
+
"department",
|
|
190
|
+
"role",
|
|
191
|
+
]
|
|
192
|
+
mock_unique_ai._event.payload.user_metadata = {
|
|
193
|
+
"department": "Engineering",
|
|
194
|
+
"role": "",
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
198
|
+
|
|
199
|
+
assert result == {"department": "Engineering", "role": ""}
|
|
200
|
+
assert isinstance(result["role"], str)
|
|
201
|
+
|
|
202
|
+
def test_preserves_original_metadata_unchanged(self, mock_unique_ai):
|
|
203
|
+
"""Test that the original user_metadata dict is not modified"""
|
|
204
|
+
original_metadata = {
|
|
205
|
+
"department": "Engineering",
|
|
206
|
+
"role": "Developer",
|
|
207
|
+
"location": "San Francisco",
|
|
208
|
+
}
|
|
209
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
|
|
210
|
+
mock_unique_ai._event.payload.user_metadata = original_metadata.copy()
|
|
211
|
+
|
|
212
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
213
|
+
|
|
214
|
+
# Original should still have all keys
|
|
215
|
+
assert mock_unique_ai._event.payload.user_metadata == original_metadata
|
|
216
|
+
# Result should only have filtered key
|
|
217
|
+
assert result == {"department": "Engineering"}
|
|
218
|
+
|
|
219
|
+
def test_handles_special_characters_in_values(self, mock_unique_ai):
|
|
220
|
+
"""Test that special characters in string values are preserved"""
|
|
221
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = [
|
|
222
|
+
"description",
|
|
223
|
+
"notes",
|
|
224
|
+
]
|
|
225
|
+
mock_unique_ai._event.payload.user_metadata = {
|
|
226
|
+
"description": "User with special chars: @#$%^&*()",
|
|
227
|
+
"notes": "Multi-line\ntext\twith\ttabs",
|
|
228
|
+
"other": "excluded",
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
232
|
+
|
|
233
|
+
assert result == {
|
|
234
|
+
"description": "User with special chars: @#$%^&*()",
|
|
235
|
+
"notes": "Multi-line\ntext\twith\ttabs",
|
|
236
|
+
}
|
|
237
|
+
assert all(isinstance(v, str) for v in result.values())
|
|
238
|
+
|
|
239
|
+
def test_return_type_is_dict_str_str(self, mock_unique_ai):
|
|
240
|
+
"""Test that return type is dict[str, str]"""
|
|
241
|
+
mock_unique_ai._config.agent.prompt_config.user_metadata = [
|
|
242
|
+
"department",
|
|
243
|
+
"role",
|
|
244
|
+
]
|
|
245
|
+
mock_unique_ai._event.payload.user_metadata = {
|
|
246
|
+
"department": "Engineering",
|
|
247
|
+
"role": "Developer",
|
|
248
|
+
}
|
|
249
|
+
|
|
250
|
+
result = mock_unique_ai._get_filtered_user_metadata()
|
|
251
|
+
|
|
252
|
+
# Check it's a dict
|
|
253
|
+
assert isinstance(result, dict)
|
|
254
|
+
# Check all keys are strings
|
|
255
|
+
assert all(isinstance(k, str) for k in result.keys())
|
|
256
|
+
# Check all values are strings
|
|
257
|
+
assert all(isinstance(v, str) for v in result.values())
|
unique_orchestrator/unique_ai.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import asyncio
|
|
1
2
|
from datetime import datetime
|
|
2
3
|
from logging import Logger
|
|
3
4
|
|
|
@@ -14,6 +15,7 @@ from unique_toolkit.agentic.reference_manager.reference_manager import Reference
|
|
|
14
15
|
from unique_toolkit.agentic.thinking_manager.thinking_manager import ThinkingManager
|
|
15
16
|
from unique_toolkit.agentic.tools.tool_manager import (
|
|
16
17
|
ResponsesApiToolManager,
|
|
18
|
+
SafeTaskExecutor,
|
|
17
19
|
ToolManager,
|
|
18
20
|
)
|
|
19
21
|
from unique_toolkit.app.schemas import ChatEvent, McpServer
|
|
@@ -133,9 +135,6 @@ class UniqueAI:
|
|
|
133
135
|
self.start_text = self._thinking_manager.update_start_text(
|
|
134
136
|
self.start_text, loop_response
|
|
135
137
|
)
|
|
136
|
-
await self._create_new_assistant_message_if_loop_response_contains_content(
|
|
137
|
-
loop_response
|
|
138
|
-
)
|
|
139
138
|
|
|
140
139
|
# Only set completed_at if no tool took control. Tools that take control will set the message state to completed themselves.
|
|
141
140
|
await self._chat_service.modify_assistant_message_async(
|
|
@@ -229,6 +228,9 @@ class UniqueAI:
|
|
|
229
228
|
self._logger.debug(
|
|
230
229
|
"Tools were called we process them and do not exit the loop"
|
|
231
230
|
)
|
|
231
|
+
await self._create_new_assistant_message_if_loop_response_contains_content(
|
|
232
|
+
loop_response
|
|
233
|
+
)
|
|
232
234
|
|
|
233
235
|
return await self._handle_tool_calls(loop_response)
|
|
234
236
|
|
|
@@ -259,12 +261,17 @@ class UniqueAI:
|
|
|
259
261
|
for prompts in self._tool_manager.get_tool_prompts()
|
|
260
262
|
]
|
|
261
263
|
|
|
262
|
-
used_tools = [
|
|
264
|
+
used_tools = [t.name for t in self._history_manager.get_tool_calls()]
|
|
265
|
+
sub_agent_calls = self._tool_manager.filter_tool_calls(
|
|
266
|
+
self._history_manager.get_tool_calls(), ["subagent"]
|
|
267
|
+
)
|
|
263
268
|
|
|
264
269
|
mcp_server_user_prompts = [
|
|
265
270
|
mcp_server.user_prompt for mcp_server in self._mcp_servers
|
|
266
271
|
]
|
|
267
272
|
|
|
273
|
+
user_metadata = self._get_filtered_user_metadata()
|
|
274
|
+
|
|
268
275
|
tool_descriptions = self._tool_manager.get_tool_prompts()
|
|
269
276
|
|
|
270
277
|
query = self._event.payload.user_message.text
|
|
@@ -272,6 +279,7 @@ class UniqueAI:
|
|
|
272
279
|
if (
|
|
273
280
|
self._config.agent.experimental.sub_agents_config.referencing_config
|
|
274
281
|
is not None
|
|
282
|
+
and len(sub_agent_calls) > 0
|
|
275
283
|
):
|
|
276
284
|
use_sub_agent_references = True
|
|
277
285
|
sub_agent_referencing_instructions = self._config.agent.experimental.sub_agents_config.referencing_config.referencing_instructions_for_user_prompt
|
|
@@ -287,16 +295,18 @@ class UniqueAI:
|
|
|
287
295
|
tool_descriptions_with_user_prompts=tool_descriptions_with_user_prompts,
|
|
288
296
|
use_sub_agent_references=use_sub_agent_references,
|
|
289
297
|
sub_agent_referencing_instructions=sub_agent_referencing_instructions,
|
|
298
|
+
user_metadata=user_metadata,
|
|
290
299
|
)
|
|
291
300
|
return user_msg
|
|
292
301
|
|
|
293
|
-
async def _render_system_prompt(
|
|
294
|
-
self,
|
|
295
|
-
) -> str:
|
|
302
|
+
async def _render_system_prompt(self) -> str:
|
|
296
303
|
# TODO: Collect tool information here and adapt to system prompt
|
|
297
304
|
tool_descriptions = self._tool_manager.get_tool_prompts()
|
|
298
305
|
|
|
299
|
-
used_tools = [
|
|
306
|
+
used_tools = [t.name for t in self._history_manager.get_tool_calls()]
|
|
307
|
+
sub_agent_calls = self._tool_manager.filter_tool_calls(
|
|
308
|
+
self._history_manager.get_tool_calls(), ["subagent"]
|
|
309
|
+
)
|
|
300
310
|
|
|
301
311
|
system_prompt_template = jinja2.Template(
|
|
302
312
|
self._config.agent.prompt_config.system_prompt_template
|
|
@@ -304,6 +314,8 @@ class UniqueAI:
|
|
|
304
314
|
|
|
305
315
|
date_string = datetime.now().strftime("%A %B %d, %Y")
|
|
306
316
|
|
|
317
|
+
user_metadata = self._get_filtered_user_metadata()
|
|
318
|
+
|
|
307
319
|
mcp_server_system_prompts = [
|
|
308
320
|
mcp_server.system_prompt for mcp_server in self._mcp_servers
|
|
309
321
|
]
|
|
@@ -311,6 +323,7 @@ class UniqueAI:
|
|
|
311
323
|
if (
|
|
312
324
|
self._config.agent.experimental.sub_agents_config.referencing_config
|
|
313
325
|
is not None
|
|
326
|
+
and len(sub_agent_calls) > 0
|
|
314
327
|
):
|
|
315
328
|
use_sub_agent_references = True
|
|
316
329
|
sub_agent_referencing_instructions = self._config.agent.experimental.sub_agents_config.referencing_config.referencing_instructions_for_system_prompt
|
|
@@ -331,6 +344,7 @@ class UniqueAI:
|
|
|
331
344
|
mcp_server_system_prompts=mcp_server_system_prompts,
|
|
332
345
|
use_sub_agent_references=use_sub_agent_references,
|
|
333
346
|
sub_agent_referencing_instructions=sub_agent_referencing_instructions,
|
|
347
|
+
user_metadata=user_metadata,
|
|
334
348
|
)
|
|
335
349
|
return system_message
|
|
336
350
|
|
|
@@ -338,14 +352,31 @@ class UniqueAI:
|
|
|
338
352
|
self, loop_response: LanguageModelStreamResponse
|
|
339
353
|
) -> bool:
|
|
340
354
|
"""Handle the case where no tool calls are returned."""
|
|
355
|
+
task_executor = SafeTaskExecutor(
|
|
356
|
+
logger=self._logger,
|
|
357
|
+
)
|
|
358
|
+
|
|
341
359
|
selected_evaluation_names = self._tool_manager.get_evaluation_check_list()
|
|
342
|
-
evaluation_results =
|
|
343
|
-
|
|
360
|
+
evaluation_results = task_executor.execute_async(
|
|
361
|
+
self._evaluation_manager.run_evaluations,
|
|
362
|
+
selected_evaluation_names,
|
|
363
|
+
loop_response,
|
|
364
|
+
self._latest_assistant_id,
|
|
365
|
+
)
|
|
366
|
+
|
|
367
|
+
postprocessor_result = task_executor.execute_async(
|
|
368
|
+
self._postprocessor_manager.run_postprocessors,
|
|
369
|
+
loop_response.model_copy(deep=True),
|
|
344
370
|
)
|
|
345
371
|
|
|
346
|
-
await
|
|
372
|
+
_, evaluation_results = await asyncio.gather(
|
|
373
|
+
postprocessor_result,
|
|
374
|
+
evaluation_results,
|
|
375
|
+
)
|
|
347
376
|
|
|
348
|
-
if not all(
|
|
377
|
+
if evaluation_results.success and not all(
|
|
378
|
+
result.is_positive for result in evaluation_results.unpack()
|
|
379
|
+
):
|
|
349
380
|
self._logger.warning(
|
|
350
381
|
"we should add here the retry counter add an instruction and retry the loop for now we just exit the loop"
|
|
351
382
|
) # TODO: add retry counter and instruction
|
|
@@ -363,6 +394,9 @@ class UniqueAI:
|
|
|
363
394
|
# Append function calls to history
|
|
364
395
|
self._history_manager._append_tool_calls_to_history(tool_calls)
|
|
365
396
|
|
|
397
|
+
for tool_call in tool_calls:
|
|
398
|
+
self._history_manager.add_tool_call(tool_call)
|
|
399
|
+
|
|
366
400
|
# Execute tool calls
|
|
367
401
|
tool_call_responses = await self._tool_manager.execute_selected_tools(
|
|
368
402
|
tool_calls
|
|
@@ -416,6 +450,26 @@ class UniqueAI:
|
|
|
416
450
|
)
|
|
417
451
|
)
|
|
418
452
|
|
|
453
|
+
def _get_filtered_user_metadata(self) -> dict[str, str]:
|
|
454
|
+
"""
|
|
455
|
+
Filter user metadata to only include keys specified in the agent's prompt config.
|
|
456
|
+
|
|
457
|
+
Returns:
|
|
458
|
+
Dictionary containing only the metadata keys that are configured to be included.
|
|
459
|
+
"""
|
|
460
|
+
user_metadata = {}
|
|
461
|
+
if (
|
|
462
|
+
self._config.agent.prompt_config.user_metadata
|
|
463
|
+
and self._event.payload.user_metadata is not None
|
|
464
|
+
):
|
|
465
|
+
# Filter metadata to only include selected keys
|
|
466
|
+
user_metadata = {
|
|
467
|
+
k: str(v)
|
|
468
|
+
for k, v in self._event.payload.user_metadata.items()
|
|
469
|
+
if k in self._config.agent.prompt_config.user_metadata
|
|
470
|
+
}
|
|
471
|
+
return user_metadata
|
|
472
|
+
|
|
419
473
|
|
|
420
474
|
class UniqueAIResponsesApi(UniqueAI):
|
|
421
475
|
def __init__(
|
|
@@ -1,8 +1,6 @@
|
|
|
1
|
-
import os
|
|
2
1
|
from logging import Logger
|
|
3
|
-
from typing import NamedTuple
|
|
2
|
+
from typing import NamedTuple, cast
|
|
4
3
|
|
|
5
|
-
from openai import AsyncOpenAI
|
|
6
4
|
from unique_follow_up_questions.follow_up_postprocessor import (
|
|
7
5
|
FollowUpPostprocessor,
|
|
8
6
|
)
|
|
@@ -31,7 +29,6 @@ from unique_toolkit.agentic.history_manager.history_manager import (
|
|
|
31
29
|
HistoryManagerConfig,
|
|
32
30
|
)
|
|
33
31
|
from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
|
34
|
-
Postprocessor,
|
|
35
32
|
PostprocessorManager,
|
|
36
33
|
)
|
|
37
34
|
from unique_toolkit.agentic.reference_manager.reference_manager import ReferenceManager
|
|
@@ -47,8 +44,13 @@ from unique_toolkit.agentic.thinking_manager.thinking_manager import (
|
|
|
47
44
|
from unique_toolkit.agentic.tools.a2a import (
|
|
48
45
|
A2AManager,
|
|
49
46
|
ExtendedSubAgentToolConfig,
|
|
47
|
+
SubAgentDisplaySpec,
|
|
50
48
|
SubAgentEvaluationService,
|
|
51
|
-
|
|
49
|
+
SubAgentEvaluationSpec,
|
|
50
|
+
SubAgentReferencesPostprocessor,
|
|
51
|
+
SubAgentResponsesDisplayPostprocessor,
|
|
52
|
+
SubAgentResponsesPostprocessorConfig,
|
|
53
|
+
SubAgentResponseWatcher,
|
|
52
54
|
)
|
|
53
55
|
from unique_toolkit.agentic.tools.config import ToolBuildConfig
|
|
54
56
|
from unique_toolkit.agentic.tools.mcp.manager import MCPManager
|
|
@@ -78,7 +80,7 @@ async def build_unique_ai(
|
|
|
78
80
|
) -> UniqueAI | UniqueAIResponsesApi:
|
|
79
81
|
common_components = _build_common(event, logger, config)
|
|
80
82
|
|
|
81
|
-
if config.agent.experimental.responses_api_config
|
|
83
|
+
if config.agent.experimental.responses_api_config is not None:
|
|
82
84
|
return await _build_responses(
|
|
83
85
|
event=event,
|
|
84
86
|
logger=logger,
|
|
@@ -104,13 +106,14 @@ class _CommonComponents(NamedTuple):
|
|
|
104
106
|
reference_manager: ReferenceManager
|
|
105
107
|
history_manager: HistoryManager
|
|
106
108
|
evaluation_manager: EvaluationManager
|
|
109
|
+
postprocessor_manager: PostprocessorManager
|
|
110
|
+
response_watcher: SubAgentResponseWatcher
|
|
107
111
|
# Tool Manager Components
|
|
108
112
|
tool_progress_reporter: ToolProgressReporter
|
|
109
113
|
tool_manager_config: ToolManagerConfig
|
|
110
114
|
mcp_manager: MCPManager
|
|
111
115
|
a2a_manager: A2AManager
|
|
112
116
|
mcp_servers: list[McpServer]
|
|
113
|
-
postprocessors: list[Postprocessor]
|
|
114
117
|
|
|
115
118
|
|
|
116
119
|
def _build_common(
|
|
@@ -124,7 +127,12 @@ def _build_common(
|
|
|
124
127
|
|
|
125
128
|
uploaded_documents = content_service.get_documents_uploaded_to_chat()
|
|
126
129
|
|
|
127
|
-
|
|
130
|
+
response_watcher = SubAgentResponseWatcher()
|
|
131
|
+
|
|
132
|
+
tool_progress_reporter = ToolProgressReporter(
|
|
133
|
+
chat_service=chat_service,
|
|
134
|
+
config=config.agent.services.tool_progress_reporter_config,
|
|
135
|
+
)
|
|
128
136
|
thinking_manager_config = ThinkingManagerConfig(
|
|
129
137
|
thinking_steps_display=config.agent.experimental.thinking_steps_display
|
|
130
138
|
)
|
|
@@ -138,9 +146,7 @@ def _build_common(
|
|
|
138
146
|
reference_manager = ReferenceManager()
|
|
139
147
|
|
|
140
148
|
history_manager_config = HistoryManagerConfig(
|
|
141
|
-
experimental_features=history_manager_module.ExperimentalFeatures(
|
|
142
|
-
full_sources_serialize_dump=False,
|
|
143
|
-
),
|
|
149
|
+
experimental_features=history_manager_module.ExperimentalFeatures(),
|
|
144
150
|
percent_of_max_tokens_for_history=config.agent.input_token_distribution.percent_for_history,
|
|
145
151
|
language_model=config.space.language_model,
|
|
146
152
|
uploaded_content_config=config.agent.services.uploaded_content_config,
|
|
@@ -171,16 +177,21 @@ def _build_common(
|
|
|
171
177
|
a2a_manager = A2AManager(
|
|
172
178
|
logger=logger,
|
|
173
179
|
tool_progress_reporter=tool_progress_reporter,
|
|
180
|
+
response_watcher=response_watcher,
|
|
174
181
|
)
|
|
182
|
+
|
|
175
183
|
tool_manager_config = ToolManagerConfig(
|
|
176
184
|
tools=config.space.tools,
|
|
177
185
|
max_tool_calls=config.agent.experimental.loop_configuration.max_tool_calls_per_iteration,
|
|
178
186
|
)
|
|
179
187
|
|
|
180
|
-
|
|
188
|
+
postprocessor_manager = PostprocessorManager(
|
|
189
|
+
logger=logger,
|
|
190
|
+
chat_service=chat_service,
|
|
191
|
+
)
|
|
181
192
|
|
|
182
|
-
if config.agent.services.stock_ticker_config:
|
|
183
|
-
|
|
193
|
+
if config.agent.services.stock_ticker_config is not None:
|
|
194
|
+
postprocessor_manager.add_postprocessor(
|
|
184
195
|
StockTickerPostprocessor(
|
|
185
196
|
config=config.agent.services.stock_ticker_config,
|
|
186
197
|
event=event,
|
|
@@ -191,7 +202,8 @@ def _build_common(
|
|
|
191
202
|
config.agent.services.follow_up_questions_config
|
|
192
203
|
and config.agent.services.follow_up_questions_config.number_of_questions > 0
|
|
193
204
|
):
|
|
194
|
-
|
|
205
|
+
# Should run last to make sure the follow up questions are displayed last.
|
|
206
|
+
postprocessor_manager.set_last_postprocessor(
|
|
195
207
|
FollowUpPostprocessor(
|
|
196
208
|
logger=logger,
|
|
197
209
|
config=config.agent.services.follow_up_questions_config,
|
|
@@ -214,40 +226,9 @@ def _build_common(
|
|
|
214
226
|
a2a_manager=a2a_manager,
|
|
215
227
|
tool_manager_config=tool_manager_config,
|
|
216
228
|
mcp_servers=event.payload.mcp_servers,
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
def _prepare_base_url(url: str, use_v1: bool) -> str:
|
|
222
|
-
url = url.rstrip("/") + "/openai"
|
|
223
|
-
|
|
224
|
-
if use_v1:
|
|
225
|
-
url += "/v1"
|
|
226
|
-
|
|
227
|
-
return url
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
def _get_openai_client_from_env(
|
|
231
|
-
config: UniqueAIConfig, use_v1: bool = False
|
|
232
|
-
) -> AsyncOpenAI:
|
|
233
|
-
use_direct_azure_client = (
|
|
234
|
-
config.agent.experimental.responses_api_config.use_direct_azure_client
|
|
229
|
+
postprocessor_manager=postprocessor_manager,
|
|
230
|
+
response_watcher=response_watcher,
|
|
235
231
|
)
|
|
236
|
-
api_key_env_var = config.agent.experimental.responses_api_config.direct_azure_client_api_key_env_var
|
|
237
|
-
api_base_env_var = config.agent.experimental.responses_api_config.direct_azure_client_api_base_env_var
|
|
238
|
-
|
|
239
|
-
if use_direct_azure_client:
|
|
240
|
-
# TODO: (for testing only), remove when v1 endpoint is working
|
|
241
|
-
return AsyncOpenAI(
|
|
242
|
-
api_key=os.environ[api_key_env_var],
|
|
243
|
-
base_url=_prepare_base_url(os.environ[api_base_env_var], use_v1=use_v1),
|
|
244
|
-
)
|
|
245
|
-
else:
|
|
246
|
-
return get_async_openai_client().copy(
|
|
247
|
-
default_headers={
|
|
248
|
-
"x-model": config.space.language_model.name
|
|
249
|
-
} # Backend requires a model name
|
|
250
|
-
)
|
|
251
232
|
|
|
252
233
|
|
|
253
234
|
async def _build_responses(
|
|
@@ -257,25 +238,52 @@ async def _build_responses(
|
|
|
257
238
|
common_components: _CommonComponents,
|
|
258
239
|
debug_info_manager: DebugInfoManager,
|
|
259
240
|
) -> UniqueAIResponsesApi:
|
|
260
|
-
client =
|
|
241
|
+
client = get_async_openai_client().copy(
|
|
242
|
+
default_headers={
|
|
243
|
+
"x-model": config.space.language_model.name,
|
|
244
|
+
"x-user-id": event.user_id,
|
|
245
|
+
"x-company-id": event.company_id,
|
|
246
|
+
"x-assistant-id": event.payload.assistant_id,
|
|
247
|
+
"x-chat-id": event.payload.chat_id,
|
|
248
|
+
}
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
assert config.agent.experimental.responses_api_config is not None
|
|
252
|
+
|
|
261
253
|
code_interpreter_config = (
|
|
262
254
|
config.agent.experimental.responses_api_config.code_interpreter
|
|
263
255
|
)
|
|
264
|
-
|
|
256
|
+
postprocessor_manager = common_components.postprocessor_manager
|
|
265
257
|
tool_names = [tool.name for tool in config.space.tools]
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
258
|
+
|
|
259
|
+
if code_interpreter_config is not None:
|
|
260
|
+
if OpenAIBuiltInToolName.CODE_INTERPRETER not in tool_names:
|
|
261
|
+
logger.info("Automatically adding code interpreter to the tools")
|
|
262
|
+
config = config.model_copy(deep=True)
|
|
263
|
+
config.space.tools.append(
|
|
264
|
+
ToolBuildConfig(
|
|
265
|
+
name=OpenAIBuiltInToolName.CODE_INTERPRETER,
|
|
266
|
+
configuration=code_interpreter_config,
|
|
267
|
+
)
|
|
268
|
+
)
|
|
269
|
+
common_components.tool_manager_config.tools = config.space.tools
|
|
270
|
+
|
|
271
|
+
if code_interpreter_config.display_config is not None:
|
|
272
|
+
postprocessor_manager.add_postprocessor(
|
|
273
|
+
ShowExecutedCodePostprocessor(
|
|
274
|
+
config=code_interpreter_config.display_config
|
|
275
|
+
)
|
|
276
|
+
)
|
|
277
|
+
|
|
278
|
+
postprocessor_manager.add_postprocessor(
|
|
279
|
+
DisplayCodeInterpreterFilesPostProcessor(
|
|
280
|
+
client=client,
|
|
281
|
+
content_service=common_components.content_service,
|
|
282
|
+
config=DisplayCodeInterpreterFilesPostProcessorConfig(
|
|
283
|
+
upload_scope_id=code_interpreter_config.generated_files_scope_id,
|
|
284
|
+
),
|
|
276
285
|
)
|
|
277
286
|
)
|
|
278
|
-
common_components.tool_manager_config.tools = config.space.tools
|
|
279
287
|
|
|
280
288
|
builtin_tool_manager = OpenAIBuiltInToolManager(
|
|
281
289
|
uploaded_files=common_components.uploaded_documents,
|
|
@@ -296,32 +304,7 @@ async def _build_responses(
|
|
|
296
304
|
builtin_tool_manager=builtin_tool_manager,
|
|
297
305
|
)
|
|
298
306
|
|
|
299
|
-
postprocessor_manager =
|
|
300
|
-
logger=logger,
|
|
301
|
-
chat_service=common_components.chat_service,
|
|
302
|
-
)
|
|
303
|
-
for postprocessor in common_components.postprocessors:
|
|
304
|
-
postprocessor_manager.add_postprocessor(postprocessor)
|
|
305
|
-
|
|
306
|
-
if (
|
|
307
|
-
config.agent.experimental.responses_api_config.code_interpreter_display_config
|
|
308
|
-
is not None
|
|
309
|
-
):
|
|
310
|
-
postprocessor_manager.add_postprocessor(
|
|
311
|
-
ShowExecutedCodePostprocessor(
|
|
312
|
-
config=config.agent.experimental.responses_api_config.code_interpreter_display_config
|
|
313
|
-
)
|
|
314
|
-
)
|
|
315
|
-
|
|
316
|
-
postprocessor_manager.add_postprocessor(
|
|
317
|
-
DisplayCodeInterpreterFilesPostProcessor(
|
|
318
|
-
client=client,
|
|
319
|
-
content_service=common_components.content_service,
|
|
320
|
-
config=DisplayCodeInterpreterFilesPostProcessorConfig(
|
|
321
|
-
upload_scope_id=config.agent.experimental.responses_api_config.generated_files_scope_id,
|
|
322
|
-
),
|
|
323
|
-
)
|
|
324
|
-
)
|
|
307
|
+
postprocessor_manager = common_components.postprocessor_manager
|
|
325
308
|
|
|
326
309
|
class ResponsesStreamingHandler(ResponsesSupportCompleteWithReferences):
|
|
327
310
|
def complete_with_references(self, *args, **kwargs):
|
|
@@ -339,15 +322,15 @@ async def _build_responses(
|
|
|
339
322
|
_add_sub_agents_postprocessor(
|
|
340
323
|
postprocessor_manager=postprocessor_manager,
|
|
341
324
|
tool_manager=tool_manager,
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
chat_id=event.payload.chat_id,
|
|
325
|
+
config=config,
|
|
326
|
+
response_watcher=common_components.response_watcher,
|
|
345
327
|
)
|
|
346
328
|
_add_sub_agents_evaluation(
|
|
347
329
|
evaluation_manager=common_components.evaluation_manager,
|
|
348
330
|
tool_manager=tool_manager,
|
|
349
331
|
config=config,
|
|
350
332
|
event=event,
|
|
333
|
+
response_watcher=common_components.response_watcher,
|
|
351
334
|
)
|
|
352
335
|
|
|
353
336
|
return UniqueAIResponsesApi(
|
|
@@ -375,17 +358,24 @@ def _build_completions(
|
|
|
375
358
|
common_components: _CommonComponents,
|
|
376
359
|
debug_info_manager: DebugInfoManager,
|
|
377
360
|
) -> UniqueAI:
|
|
378
|
-
|
|
361
|
+
# Uploaded content behavior is always to force uploaded search tool:
|
|
362
|
+
# 1. Add it to forced tools if there are tool choices.
|
|
363
|
+
# 2. Simply force it if there are no tool choices.
|
|
364
|
+
# 3. Not available if not uploaded documents.
|
|
365
|
+
UPLOADED_DOCUMENTS = len(common_components.uploaded_documents) > 0
|
|
366
|
+
TOOL_CHOICES = len(event.payload.tool_choices) > 0
|
|
367
|
+
if UPLOADED_DOCUMENTS:
|
|
379
368
|
logger.info(
|
|
380
369
|
f"Adding UploadedSearchTool with {len(common_components.uploaded_documents)} documents"
|
|
381
370
|
)
|
|
382
|
-
|
|
371
|
+
common_components.tool_manager_config.tools.append(
|
|
383
372
|
ToolBuildConfig(
|
|
384
373
|
name=UploadedSearchTool.name,
|
|
385
374
|
display_name=UploadedSearchTool.name,
|
|
386
375
|
configuration=UploadedSearchConfig(),
|
|
387
|
-
)
|
|
376
|
+
)
|
|
388
377
|
)
|
|
378
|
+
if TOOL_CHOICES and UPLOADED_DOCUMENTS:
|
|
389
379
|
event.payload.tool_choices.append(str(UploadedSearchTool.name))
|
|
390
380
|
|
|
391
381
|
tool_manager = ToolManager(
|
|
@@ -396,26 +386,23 @@ def _build_completions(
|
|
|
396
386
|
mcp_manager=common_components.mcp_manager,
|
|
397
387
|
a2a_manager=common_components.a2a_manager,
|
|
398
388
|
)
|
|
389
|
+
if not TOOL_CHOICES and UPLOADED_DOCUMENTS:
|
|
390
|
+
tool_manager.add_forced_tool(UploadedSearchTool.name)
|
|
399
391
|
|
|
400
|
-
postprocessor_manager =
|
|
401
|
-
logger=logger,
|
|
402
|
-
chat_service=common_components.chat_service,
|
|
403
|
-
)
|
|
404
|
-
for postprocessor in common_components.postprocessors:
|
|
405
|
-
postprocessor_manager.add_postprocessor(postprocessor)
|
|
392
|
+
postprocessor_manager = common_components.postprocessor_manager
|
|
406
393
|
|
|
407
394
|
_add_sub_agents_postprocessor(
|
|
408
395
|
postprocessor_manager=postprocessor_manager,
|
|
409
396
|
tool_manager=tool_manager,
|
|
410
|
-
|
|
411
|
-
|
|
412
|
-
chat_id=event.payload.chat_id,
|
|
397
|
+
config=config,
|
|
398
|
+
response_watcher=common_components.response_watcher,
|
|
413
399
|
)
|
|
414
400
|
_add_sub_agents_evaluation(
|
|
415
401
|
evaluation_manager=common_components.evaluation_manager,
|
|
416
402
|
tool_manager=tool_manager,
|
|
417
403
|
config=config,
|
|
418
404
|
event=event,
|
|
405
|
+
response_watcher=common_components.response_watcher,
|
|
419
406
|
)
|
|
420
407
|
|
|
421
408
|
return UniqueAI(
|
|
@@ -439,24 +426,37 @@ def _build_completions(
|
|
|
439
426
|
def _add_sub_agents_postprocessor(
|
|
440
427
|
postprocessor_manager: PostprocessorManager,
|
|
441
428
|
tool_manager: ToolManager | ResponsesApiToolManager,
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
chat_id: str,
|
|
429
|
+
config: UniqueAIConfig,
|
|
430
|
+
response_watcher: SubAgentResponseWatcher,
|
|
445
431
|
) -> None:
|
|
446
432
|
sub_agents = tool_manager.sub_agents
|
|
447
433
|
if len(sub_agents) > 0:
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
main_agent_chat_id=chat_id,
|
|
451
|
-
company_id=company_id,
|
|
434
|
+
display_config = SubAgentResponsesPostprocessorConfig(
|
|
435
|
+
sleep_time_before_update=config.agent.experimental.sub_agents_config.sleep_time_before_update,
|
|
452
436
|
)
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
437
|
+
display_specs = []
|
|
438
|
+
for tool in sub_agents:
|
|
439
|
+
tool_config = cast(
|
|
440
|
+
ExtendedSubAgentToolConfig, tool.settings.configuration
|
|
441
|
+
) # (BeforeValidator of ToolBuildConfig)
|
|
442
|
+
|
|
443
|
+
display_specs.append(
|
|
444
|
+
SubAgentDisplaySpec(
|
|
445
|
+
assistant_id=tool_config.assistant_id,
|
|
446
|
+
display_name=tool.display_name(),
|
|
447
|
+
display_config=tool_config.response_display_config,
|
|
448
|
+
)
|
|
459
449
|
)
|
|
450
|
+
reference_postprocessor = SubAgentReferencesPostprocessor(
|
|
451
|
+
response_watcher=response_watcher,
|
|
452
|
+
)
|
|
453
|
+
sub_agent_responses_postprocessor = SubAgentResponsesDisplayPostprocessor(
|
|
454
|
+
config=display_config,
|
|
455
|
+
response_watcher=response_watcher,
|
|
456
|
+
display_specs=display_specs,
|
|
457
|
+
)
|
|
458
|
+
postprocessor_manager.add_postprocessor(reference_postprocessor)
|
|
459
|
+
postprocessor_manager.add_postprocessor(sub_agent_responses_postprocessor)
|
|
460
460
|
|
|
461
461
|
|
|
462
462
|
def _add_sub_agents_evaluation(
|
|
@@ -464,18 +464,31 @@ def _add_sub_agents_evaluation(
|
|
|
464
464
|
tool_manager: ToolManager | ResponsesApiToolManager,
|
|
465
465
|
config: UniqueAIConfig,
|
|
466
466
|
event: ChatEvent,
|
|
467
|
+
response_watcher: SubAgentResponseWatcher,
|
|
467
468
|
) -> None:
|
|
468
469
|
sub_agents = tool_manager.sub_agents
|
|
469
|
-
if
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
|
|
479
|
-
|
|
480
|
-
|
|
470
|
+
if (
|
|
471
|
+
len(sub_agents) > 0
|
|
472
|
+
and config.agent.experimental.sub_agents_config.evaluation_config is not None
|
|
473
|
+
):
|
|
474
|
+
evaluation_specs = []
|
|
475
|
+
for tool in sub_agents:
|
|
476
|
+
tool_config = cast(
|
|
477
|
+
ExtendedSubAgentToolConfig, tool.settings.configuration
|
|
478
|
+
) # (BeforeValidator of ToolBuildConfig)
|
|
479
|
+
|
|
480
|
+
evaluation_specs.append(
|
|
481
|
+
SubAgentEvaluationSpec(
|
|
482
|
+
assistant_id=tool_config.assistant_id,
|
|
483
|
+
display_name=tool.display_name(),
|
|
484
|
+
config=tool_config.evaluation_config,
|
|
481
485
|
)
|
|
486
|
+
)
|
|
487
|
+
|
|
488
|
+
sub_agent_evaluation = SubAgentEvaluationService(
|
|
489
|
+
config=config.agent.experimental.sub_agents_config.evaluation_config,
|
|
490
|
+
language_model_service=LanguageModelService.from_event(event),
|
|
491
|
+
evaluation_specs=evaluation_specs,
|
|
492
|
+
response_watcher=response_watcher,
|
|
493
|
+
)
|
|
494
|
+
evaluation_manager.add_evaluation(sub_agent_evaluation)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: unique_orchestrator
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.7.7
|
|
4
4
|
Summary:
|
|
5
5
|
License: Proprietary
|
|
6
6
|
Author: Andreas Hauri
|
|
@@ -17,9 +17,10 @@ Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
|
|
|
17
17
|
Requires-Dist: unique-deep-research (>=3.0.0,<4.0.0)
|
|
18
18
|
Requires-Dist: unique-follow-up-questions (>=1.1.2,<2.0.0)
|
|
19
19
|
Requires-Dist: unique-internal-search (>=1.0.1,<2.0.0)
|
|
20
|
-
Requires-Dist: unique-sdk (>=0.10.
|
|
20
|
+
Requires-Dist: unique-sdk (>=0.10.34,<0.11.0)
|
|
21
21
|
Requires-Dist: unique-stock-ticker (>=1.0.2,<2.0.0)
|
|
22
|
-
Requires-Dist: unique-
|
|
22
|
+
Requires-Dist: unique-swot (>=0.1.0,<0.2.0)
|
|
23
|
+
Requires-Dist: unique-toolkit (>=1.23.0,<2.0.0)
|
|
23
24
|
Requires-Dist: unique-web-search (>=1.3.1,<2.0.0)
|
|
24
25
|
Description-Content-Type: text/markdown
|
|
25
26
|
|
|
@@ -33,6 +34,47 @@ All notable changes to this project will be documented in this file.
|
|
|
33
34
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
34
35
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
35
36
|
|
|
37
|
+
## [1.7.7] - 2025-11-10
|
|
38
|
+
- Remove direct azure client config from responses api config
|
|
39
|
+
- Organize Responses API config better
|
|
40
|
+
|
|
41
|
+
## [1.7.6] - 2025-11-05
|
|
42
|
+
- Update default system prompt (including user metadata section)
|
|
43
|
+
|
|
44
|
+
## [1.7.5] - 2025-11-05
|
|
45
|
+
- Adding functionality to include user metadata into user/system prompts of the orchestrator
|
|
46
|
+
|
|
47
|
+
## [1.7.4] - 2025-11-04
|
|
48
|
+
- Update and adapt to toolkit 1.23.0 (refactor sub agents implementation)
|
|
49
|
+
|
|
50
|
+
## [1.7.3] - 2025-11-03
|
|
51
|
+
- Fixed an issue where new assistant messages were not properly generated during streaming outputs with tool calls; the orchestrator now correctly creates messages via `_create_new_assistant_message_if_loop_response_contains_content` when loop_response includes text and tool invocations.
|
|
52
|
+
|
|
53
|
+
## [1.7.2] - 2025-11-03
|
|
54
|
+
- Add Swot tool to the orchestrator
|
|
55
|
+
|
|
56
|
+
## [1.7.1] - 2025-10-30
|
|
57
|
+
- Fixing that system format info is only appended to system prompt if tool is called
|
|
58
|
+
|
|
59
|
+
## [1.7.0] - 2025-10-30
|
|
60
|
+
- Add option to customize the display of tool progress statuses.
|
|
61
|
+
- Make follow-questions postprocessor run last to make sure the follow up questions are displayed last.
|
|
62
|
+
|
|
63
|
+
## [1.6.1] - 2025-10-28
|
|
64
|
+
- Removing unused experimental config `full_sources_serialize_dump` in `history_manager`
|
|
65
|
+
|
|
66
|
+
## [1.6.0] - 2025-10-27
|
|
67
|
+
- Add temporary config option `sleep_time_before_update` to avoid rendering issues with sub agent responses`
|
|
68
|
+
|
|
69
|
+
## [1.5.2] - 2025-10-23
|
|
70
|
+
- Run evaluation and post processing in parallel
|
|
71
|
+
|
|
72
|
+
## [1.5.1] - 2025-10-17
|
|
73
|
+
- revert behavior of unique ai upload and chat to
|
|
74
|
+
1. Add upload and chat tool to forced tools if there are tool choices
|
|
75
|
+
2. Simply force it if there are no tool choices.
|
|
76
|
+
3. Tool not available when no uploaded documents
|
|
77
|
+
|
|
36
78
|
## [1.5.0] - 2025-10-16
|
|
37
79
|
- Make code interpreter configurable through spaces 2.0.
|
|
38
80
|
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
unique_orchestrator/config.py,sha256=MWoms-7GDF7ki6KtsQGt5zwqYRVjbtGQnfEr5KsNhD8,11657
|
|
2
|
+
unique_orchestrator/prompts/generic_reference_prompt.jinja2,sha256=fYPaiE-N1gSoOqu85OeEBa_ttAim8grOhHuOHJjSHNU,2663
|
|
3
|
+
unique_orchestrator/prompts/system_prompt.jinja2,sha256=IcjkImrQxSrkcUs7BfiAeArhSH0RSxnVIrsJWs-53II,7571
|
|
4
|
+
unique_orchestrator/prompts/user_message_prompt.jinja2,sha256=BQokpBh3H2J-rFk8i-PRph3jy4T1gAJPPb1mxxRWNuM,878
|
|
5
|
+
unique_orchestrator/tests/test_unique_ai_get_filtered_user_metadata.py,sha256=I0xkhR_1DFZEiwSm5x6_B668fQTlYm5tYtPU9uULX3k,9661
|
|
6
|
+
unique_orchestrator/tests/test_unique_ai_reference_order.py,sha256=8mZeVP1k8neH4qrFW3oa3zwIdaq2c7R1VvurC7kjBU8,4445
|
|
7
|
+
unique_orchestrator/unique_ai.py,sha256=qmbWul6O0ri50PtWNt8qz4GHsxZe5upJW7XOo6fmQL0,20743
|
|
8
|
+
unique_orchestrator/unique_ai_builder.py,sha256=Vh4dL4rm-rt1ten3Xi3pkhgjkmr6tuEEithc5L_Z8Cg,18080
|
|
9
|
+
unique_orchestrator-1.7.7.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
|
|
10
|
+
unique_orchestrator-1.7.7.dist-info/METADATA,sha256=ZmqFUSyXzxYH-ShKQVxLy6hKGN3Z5F48rzVmzYSu7ck,4640
|
|
11
|
+
unique_orchestrator-1.7.7.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
12
|
+
unique_orchestrator-1.7.7.dist-info/RECORD,,
|
|
@@ -1,11 +0,0 @@
|
|
|
1
|
-
unique_orchestrator/config.py,sha256=IwYUrPUzyanKA8sQpS5J-u1WzsBnqQlKRv-vUM_2uRU,11361
|
|
2
|
-
unique_orchestrator/prompts/generic_reference_prompt.jinja2,sha256=fYPaiE-N1gSoOqu85OeEBa_ttAim8grOhHuOHJjSHNU,2663
|
|
3
|
-
unique_orchestrator/prompts/system_prompt.jinja2,sha256=YXFdx3PG2p4TKfjEpz7guIw2GaKoY-4zRMEzXaKhHXE,7213
|
|
4
|
-
unique_orchestrator/prompts/user_message_prompt.jinja2,sha256=BQokpBh3H2J-rFk8i-PRph3jy4T1gAJPPb1mxxRWNuM,878
|
|
5
|
-
unique_orchestrator/tests/test_unique_ai_reference_order.py,sha256=8mZeVP1k8neH4qrFW3oa3zwIdaq2c7R1VvurC7kjBU8,4445
|
|
6
|
-
unique_orchestrator/unique_ai.py,sha256=PqeDaXtr2krqQ_xqhr1Kb9j0A3rPaCT0BqIvnJlFVbU,18845
|
|
7
|
-
unique_orchestrator/unique_ai_builder.py,sha256=JEbMYZBY3mxx6kJkAsm33yO4mkP-rj3SLlCuFFOS0Zs,16977
|
|
8
|
-
unique_orchestrator-1.5.0.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
|
|
9
|
-
unique_orchestrator-1.5.0.dist-info/METADATA,sha256=uVIDWlKwLnl6WM23auxCXlIyhh0OOrzEdJLLWgkt1nQ,2918
|
|
10
|
-
unique_orchestrator-1.5.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
|
11
|
-
unique_orchestrator-1.5.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|