unique_orchestrator 1.3.0__tar.gz → 1.4.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unique_orchestrator might be problematic. Click here for more details.
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/CHANGELOG.md +3 -0
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/PKG-INFO +5 -2
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/pyproject.toml +2 -2
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/unique_orchestrator/config.py +30 -0
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/unique_orchestrator/unique_ai.py +53 -8
- unique_orchestrator-1.4.0/unique_orchestrator/unique_ai_builder.py +447 -0
- unique_orchestrator-1.3.0/unique_orchestrator/unique_ai_builder.py +0 -212
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/LICENSE +0 -0
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/README.md +0 -0
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/unique_orchestrator/prompts/generic_reference_prompt.jinja2 +0 -0
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/unique_orchestrator/prompts/system_prompt.jinja2 +0 -0
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/unique_orchestrator/prompts/user_message_prompt.jinja2 +0 -0
- {unique_orchestrator-1.3.0 → unique_orchestrator-1.4.0}/unique_orchestrator/tests/test_unique_ai_reference_order.py +0 -0
|
@@ -5,6 +5,9 @@ All notable changes to this project will be documented in this file.
|
|
|
5
5
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
6
6
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
7
7
|
|
|
8
|
+
## [1.4.0] - 2025-10-14
|
|
9
|
+
- Add responses api and code execution support.
|
|
10
|
+
|
|
8
11
|
## [1.3.0] - 2025-10-14
|
|
9
12
|
- Re-organize sub-agents configuration for clarity.
|
|
10
13
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: unique_orchestrator
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.4.0
|
|
4
4
|
Summary:
|
|
5
5
|
License: Proprietary
|
|
6
6
|
Author: Andreas Hauri
|
|
@@ -19,7 +19,7 @@ Requires-Dist: unique-follow-up-questions (>=1.1.2,<2.0.0)
|
|
|
19
19
|
Requires-Dist: unique-internal-search (>=1.0.1,<2.0.0)
|
|
20
20
|
Requires-Dist: unique-sdk (>=0.10.24,<0.11.0)
|
|
21
21
|
Requires-Dist: unique-stock-ticker (>=1.0.2,<2.0.0)
|
|
22
|
-
Requires-Dist: unique-toolkit (>=1.
|
|
22
|
+
Requires-Dist: unique-toolkit (>=1.16.0,<2.0.0)
|
|
23
23
|
Requires-Dist: unique-web-search (>=1.3.1,<2.0.0)
|
|
24
24
|
Description-Content-Type: text/markdown
|
|
25
25
|
|
|
@@ -33,6 +33,9 @@ All notable changes to this project will be documented in this file.
|
|
|
33
33
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
|
34
34
|
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
|
35
35
|
|
|
36
|
+
## [1.4.0] - 2025-10-14
|
|
37
|
+
- Add responses api and code execution support.
|
|
38
|
+
|
|
36
39
|
## [1.3.0] - 2025-10-14
|
|
37
40
|
- Re-organize sub-agents configuration for clarity.
|
|
38
41
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "unique_orchestrator"
|
|
3
|
-
version = "1.
|
|
3
|
+
version = "1.4.0"
|
|
4
4
|
description = ""
|
|
5
5
|
authors = ["Andreas Hauri <andreas.hauri@unique.ai>"]
|
|
6
6
|
readme = ["README.md", "CHANGELOG.md"]
|
|
@@ -15,7 +15,7 @@ python-dotenv = "^1.0.1"
|
|
|
15
15
|
pytest = "^8.4.1"
|
|
16
16
|
unique-sdk = "^0.10.24"
|
|
17
17
|
|
|
18
|
-
unique-toolkit = "^1.
|
|
18
|
+
unique-toolkit = "^1.16.0"
|
|
19
19
|
unique-stock-ticker = "^1.0.2"
|
|
20
20
|
unique-follow-up-questions = "^1.1.2"
|
|
21
21
|
unique-internal-search = "^1.0.1"
|
|
@@ -3,6 +3,7 @@ from pathlib import Path
|
|
|
3
3
|
from typing import Annotated, Any, Generic, Literal, TypeVar
|
|
4
4
|
|
|
5
5
|
from pydantic import BaseModel, Field, ValidationInfo, field_validator, model_validator
|
|
6
|
+
from pydantic.json_schema import SkipJsonSchema
|
|
6
7
|
from unique_deep_research.config import DeepResearchToolConfig
|
|
7
8
|
from unique_deep_research.service import DeepResearchTool
|
|
8
9
|
from unique_follow_up_questions.config import FollowUpQuestionsConfig
|
|
@@ -21,6 +22,7 @@ from unique_toolkit.agentic.evaluation.schemas import EvaluationMetricName
|
|
|
21
22
|
from unique_toolkit.agentic.history_manager.history_manager import (
|
|
22
23
|
UploadedContentConfig,
|
|
23
24
|
)
|
|
25
|
+
from unique_toolkit.agentic.responses_api import ShowExecutedCodePostprocessorConfig
|
|
24
26
|
from unique_toolkit.agentic.tools.a2a import (
|
|
25
27
|
REFERENCING_INSTRUCTIONS_FOR_SYSTEM_PROMPT,
|
|
26
28
|
REFERENCING_INSTRUCTIONS_FOR_USER_PROMPT,
|
|
@@ -226,6 +228,32 @@ class SubAgentsConfig(BaseModel):
|
|
|
226
228
|
) = SubAgentEvaluationServiceConfig()
|
|
227
229
|
|
|
228
230
|
|
|
231
|
+
class ResponsesApiConfig(BaseModel):
|
|
232
|
+
model_config = get_configuration_dict(frozen=True)
|
|
233
|
+
|
|
234
|
+
use_responses_api: bool = Field(
|
|
235
|
+
default=False,
|
|
236
|
+
description="Whether to use the responses API instead of the completions API.",
|
|
237
|
+
)
|
|
238
|
+
code_interpreter_display_config: (
|
|
239
|
+
Annotated[
|
|
240
|
+
ShowExecutedCodePostprocessorConfig,
|
|
241
|
+
Field(title="Active"),
|
|
242
|
+
]
|
|
243
|
+
| DeactivatedNone
|
|
244
|
+
) = ShowExecutedCodePostprocessorConfig()
|
|
245
|
+
|
|
246
|
+
use_direct_azure_client: SkipJsonSchema[bool] = Field(
|
|
247
|
+
default=True,
|
|
248
|
+
description="Temporary",
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
generated_files_scope_id: str = Field(
|
|
252
|
+
default="<SCOPE_ID_PLACEHOLDER>",
|
|
253
|
+
description="Scope ID for the responses API.",
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
|
|
229
257
|
class ExperimentalConfig(BaseModel):
|
|
230
258
|
"""Experimental features this part of the configuration might evolve in the future continuously"""
|
|
231
259
|
|
|
@@ -259,6 +287,8 @@ class ExperimentalConfig(BaseModel):
|
|
|
259
287
|
|
|
260
288
|
sub_agents_config: SubAgentsConfig = SubAgentsConfig()
|
|
261
289
|
|
|
290
|
+
responses_api_config: ResponsesApiConfig = ResponsesApiConfig()
|
|
291
|
+
|
|
262
292
|
|
|
263
293
|
class UniqueAIAgentConfig(BaseModel):
|
|
264
294
|
model_config = get_configuration_dict(frozen=True)
|
|
@@ -12,15 +12,22 @@ from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
|
|
12
12
|
)
|
|
13
13
|
from unique_toolkit.agentic.reference_manager.reference_manager import ReferenceManager
|
|
14
14
|
from unique_toolkit.agentic.thinking_manager.thinking_manager import ThinkingManager
|
|
15
|
-
from unique_toolkit.agentic.tools.tool_manager import
|
|
15
|
+
from unique_toolkit.agentic.tools.tool_manager import (
|
|
16
|
+
ResponsesApiToolManager,
|
|
17
|
+
ToolManager,
|
|
18
|
+
)
|
|
16
19
|
from unique_toolkit.app.schemas import ChatEvent, McpServer
|
|
17
20
|
from unique_toolkit.chat.service import ChatService
|
|
18
21
|
from unique_toolkit.content.service import ContentService
|
|
22
|
+
from unique_toolkit.language_model import LanguageModelAssistantMessage
|
|
19
23
|
from unique_toolkit.language_model.schemas import (
|
|
20
|
-
LanguageModelAssistantMessage,
|
|
21
24
|
LanguageModelMessages,
|
|
22
25
|
LanguageModelStreamResponse,
|
|
23
26
|
)
|
|
27
|
+
from unique_toolkit.protocols.support import (
|
|
28
|
+
ResponsesSupportCompleteWithReferences,
|
|
29
|
+
SupportCompleteWithReferences,
|
|
30
|
+
)
|
|
24
31
|
|
|
25
32
|
from unique_orchestrator.config import UniqueAIConfig
|
|
26
33
|
|
|
@@ -45,6 +52,7 @@ class UniqueAI:
|
|
|
45
52
|
chat_service: ChatService,
|
|
46
53
|
content_service: ContentService,
|
|
47
54
|
debug_info_manager: DebugInfoManager,
|
|
55
|
+
streaming_handler: SupportCompleteWithReferences,
|
|
48
56
|
reference_manager: ReferenceManager,
|
|
49
57
|
thinking_manager: ThinkingManager,
|
|
50
58
|
tool_manager: ToolManager,
|
|
@@ -70,6 +78,7 @@ class UniqueAI:
|
|
|
70
78
|
self._postprocessor_manager = postprocessor_manager
|
|
71
79
|
self._latest_assistant_id: str = event.payload.assistant_message.id
|
|
72
80
|
self._mcp_servers = mcp_servers
|
|
81
|
+
self._streaming_handler = streaming_handler
|
|
73
82
|
|
|
74
83
|
# Helper variable to support control loop
|
|
75
84
|
self._tool_took_control = False
|
|
@@ -148,7 +157,7 @@ class UniqueAI:
|
|
|
148
157
|
self._logger.info("Its needs forced tool calls.")
|
|
149
158
|
self._logger.info(f"Forced tools: {self._tool_manager.get_forced_tools()}")
|
|
150
159
|
responses = [
|
|
151
|
-
await self.
|
|
160
|
+
await self._streaming_handler.complete_with_references_async(
|
|
152
161
|
messages=messages,
|
|
153
162
|
model_name=self._config.space.language_model.name,
|
|
154
163
|
tools=self._tool_manager.get_tool_definitions(),
|
|
@@ -156,8 +165,8 @@ class UniqueAI:
|
|
|
156
165
|
start_text=self.start_text,
|
|
157
166
|
debug_info=self._debug_info_manager.get(),
|
|
158
167
|
temperature=self._config.agent.experimental.temperature,
|
|
159
|
-
|
|
160
|
-
|
|
168
|
+
tool_choice=opt,
|
|
169
|
+
other_options=self._config.agent.experimental.additional_llm_options,
|
|
161
170
|
)
|
|
162
171
|
for opt in self._tool_manager.get_forced_tools()
|
|
163
172
|
]
|
|
@@ -178,7 +187,7 @@ class UniqueAI:
|
|
|
178
187
|
"we are in the last iteration we need to produce an answer now"
|
|
179
188
|
)
|
|
180
189
|
# No tool calls in last iteration
|
|
181
|
-
stream_response = await self.
|
|
190
|
+
stream_response = await self._streaming_handler.complete_with_references_async(
|
|
182
191
|
messages=messages,
|
|
183
192
|
model_name=self._config.space.language_model.name,
|
|
184
193
|
content_chunks=self._reference_manager.get_chunks(),
|
|
@@ -192,7 +201,7 @@ class UniqueAI:
|
|
|
192
201
|
self._logger.info(
|
|
193
202
|
f"we are in the iteration {self.current_iteration_index} asking the model to tell if we should use tools or if it will just stream"
|
|
194
203
|
)
|
|
195
|
-
stream_response = await self.
|
|
204
|
+
stream_response = await self._streaming_handler.complete_with_references_async(
|
|
196
205
|
messages=messages,
|
|
197
206
|
model_name=self._config.space.language_model.name,
|
|
198
207
|
tools=self._tool_manager.get_tool_definitions(),
|
|
@@ -351,7 +360,7 @@ class UniqueAI:
|
|
|
351
360
|
|
|
352
361
|
tool_calls = loop_response.tool_calls or []
|
|
353
362
|
|
|
354
|
-
# Append function
|
|
363
|
+
# Append function calls to history
|
|
355
364
|
self._history_manager._append_tool_calls_to_history(tool_calls)
|
|
356
365
|
|
|
357
366
|
# Execute tool calls
|
|
@@ -404,3 +413,39 @@ class UniqueAI:
|
|
|
404
413
|
content=loop_response.message.original_text or "",
|
|
405
414
|
)
|
|
406
415
|
)
|
|
416
|
+
|
|
417
|
+
|
|
418
|
+
class UniqueAIResponsesApi(UniqueAI):
|
|
419
|
+
def __init__(
|
|
420
|
+
self,
|
|
421
|
+
logger: Logger,
|
|
422
|
+
event: ChatEvent,
|
|
423
|
+
config: UniqueAIConfig,
|
|
424
|
+
chat_service: ChatService,
|
|
425
|
+
content_service: ContentService,
|
|
426
|
+
debug_info_manager: DebugInfoManager,
|
|
427
|
+
streaming_handler: ResponsesSupportCompleteWithReferences,
|
|
428
|
+
reference_manager: ReferenceManager,
|
|
429
|
+
thinking_manager: ThinkingManager,
|
|
430
|
+
tool_manager: ResponsesApiToolManager,
|
|
431
|
+
history_manager: HistoryManager,
|
|
432
|
+
evaluation_manager: EvaluationManager,
|
|
433
|
+
postprocessor_manager: PostprocessorManager,
|
|
434
|
+
mcp_servers: list[McpServer],
|
|
435
|
+
) -> None:
|
|
436
|
+
super().__init__(
|
|
437
|
+
logger,
|
|
438
|
+
event=event,
|
|
439
|
+
config=config,
|
|
440
|
+
chat_service=chat_service,
|
|
441
|
+
content_service=content_service,
|
|
442
|
+
debug_info_manager=debug_info_manager,
|
|
443
|
+
streaming_handler=streaming_handler, # type: ignore
|
|
444
|
+
reference_manager=reference_manager,
|
|
445
|
+
thinking_manager=thinking_manager,
|
|
446
|
+
tool_manager=tool_manager, # type: ignore
|
|
447
|
+
history_manager=history_manager,
|
|
448
|
+
evaluation_manager=evaluation_manager,
|
|
449
|
+
postprocessor_manager=postprocessor_manager,
|
|
450
|
+
mcp_servers=mcp_servers,
|
|
451
|
+
)
|
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
import os
|
|
2
|
+
from logging import Logger
|
|
3
|
+
from typing import NamedTuple
|
|
4
|
+
|
|
5
|
+
from openai import AsyncOpenAI
|
|
6
|
+
from unique_follow_up_questions.follow_up_postprocessor import (
|
|
7
|
+
FollowUpPostprocessor,
|
|
8
|
+
)
|
|
9
|
+
from unique_internal_search.uploaded_search.config import (
|
|
10
|
+
UploadedSearchConfig,
|
|
11
|
+
)
|
|
12
|
+
from unique_internal_search.uploaded_search.service import (
|
|
13
|
+
UploadedSearchTool,
|
|
14
|
+
)
|
|
15
|
+
from unique_stock_ticker.stock_ticker_postprocessor import (
|
|
16
|
+
StockTickerPostprocessor,
|
|
17
|
+
)
|
|
18
|
+
from unique_toolkit import LanguageModelService, get_async_openai_client
|
|
19
|
+
from unique_toolkit.agentic.debug_info_manager.debug_info_manager import (
|
|
20
|
+
DebugInfoManager,
|
|
21
|
+
)
|
|
22
|
+
from unique_toolkit.agentic.evaluation.evaluation_manager import EvaluationManager
|
|
23
|
+
from unique_toolkit.agentic.evaluation.hallucination.hallucination_evaluation import (
|
|
24
|
+
HallucinationEvaluation,
|
|
25
|
+
)
|
|
26
|
+
from unique_toolkit.agentic.history_manager import (
|
|
27
|
+
history_manager as history_manager_module,
|
|
28
|
+
)
|
|
29
|
+
from unique_toolkit.agentic.history_manager.history_manager import (
|
|
30
|
+
HistoryManager,
|
|
31
|
+
HistoryManagerConfig,
|
|
32
|
+
)
|
|
33
|
+
from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
|
34
|
+
Postprocessor,
|
|
35
|
+
PostprocessorManager,
|
|
36
|
+
)
|
|
37
|
+
from unique_toolkit.agentic.reference_manager.reference_manager import ReferenceManager
|
|
38
|
+
from unique_toolkit.agentic.responses_api import (
|
|
39
|
+
DisplayCodeInterpreterFilesPostProcessor,
|
|
40
|
+
DisplayCodeInterpreterFilesPostProcessorConfig,
|
|
41
|
+
ShowExecutedCodePostprocessor,
|
|
42
|
+
)
|
|
43
|
+
from unique_toolkit.agentic.thinking_manager.thinking_manager import (
|
|
44
|
+
ThinkingManager,
|
|
45
|
+
ThinkingManagerConfig,
|
|
46
|
+
)
|
|
47
|
+
from unique_toolkit.agentic.tools.a2a import (
|
|
48
|
+
A2AManager,
|
|
49
|
+
ExtendedSubAgentToolConfig,
|
|
50
|
+
SubAgentEvaluationService,
|
|
51
|
+
SubAgentResponsesPostprocessor,
|
|
52
|
+
)
|
|
53
|
+
from unique_toolkit.agentic.tools.config import ToolBuildConfig
|
|
54
|
+
from unique_toolkit.agentic.tools.mcp.manager import MCPManager
|
|
55
|
+
from unique_toolkit.agentic.tools.tool_manager import (
|
|
56
|
+
OpenAIBuiltInToolManager,
|
|
57
|
+
ResponsesApiToolManager,
|
|
58
|
+
ToolManager,
|
|
59
|
+
ToolManagerConfig,
|
|
60
|
+
)
|
|
61
|
+
from unique_toolkit.agentic.tools.tool_progress_reporter import ToolProgressReporter
|
|
62
|
+
from unique_toolkit.app.schemas import ChatEvent, McpServer
|
|
63
|
+
from unique_toolkit.chat.service import ChatService
|
|
64
|
+
from unique_toolkit.content import Content
|
|
65
|
+
from unique_toolkit.content.service import ContentService
|
|
66
|
+
from unique_toolkit.protocols.support import ResponsesSupportCompleteWithReferences
|
|
67
|
+
|
|
68
|
+
from unique_orchestrator.config import UniqueAIConfig
|
|
69
|
+
from unique_orchestrator.unique_ai import UniqueAI, UniqueAIResponsesApi
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
async def build_unique_ai(
|
|
73
|
+
event: ChatEvent,
|
|
74
|
+
logger: Logger,
|
|
75
|
+
config: UniqueAIConfig,
|
|
76
|
+
debug_info_manager: DebugInfoManager,
|
|
77
|
+
) -> UniqueAI | UniqueAIResponsesApi:
|
|
78
|
+
common_components = _build_common(event, logger, config)
|
|
79
|
+
|
|
80
|
+
if config.agent.experimental.responses_api_config.use_responses_api:
|
|
81
|
+
return await _build_responses(
|
|
82
|
+
event=event,
|
|
83
|
+
logger=logger,
|
|
84
|
+
config=config,
|
|
85
|
+
debug_info_manager=debug_info_manager,
|
|
86
|
+
common_components=common_components,
|
|
87
|
+
)
|
|
88
|
+
else:
|
|
89
|
+
return _build_completions(
|
|
90
|
+
event=event,
|
|
91
|
+
logger=logger,
|
|
92
|
+
config=config,
|
|
93
|
+
debug_info_manager=debug_info_manager,
|
|
94
|
+
common_components=common_components,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class _CommonComponents(NamedTuple):
|
|
99
|
+
chat_service: ChatService
|
|
100
|
+
content_service: ContentService
|
|
101
|
+
uploaded_documents: list[Content]
|
|
102
|
+
thinking_manager: ThinkingManager
|
|
103
|
+
reference_manager: ReferenceManager
|
|
104
|
+
history_manager: HistoryManager
|
|
105
|
+
evaluation_manager: EvaluationManager
|
|
106
|
+
# Tool Manager Components
|
|
107
|
+
tool_progress_reporter: ToolProgressReporter
|
|
108
|
+
tool_manager_config: ToolManagerConfig
|
|
109
|
+
mcp_manager: MCPManager
|
|
110
|
+
a2a_manager: A2AManager
|
|
111
|
+
mcp_servers: list[McpServer]
|
|
112
|
+
postprocessors: list[Postprocessor]
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def _build_common(
|
|
116
|
+
event: ChatEvent,
|
|
117
|
+
logger: Logger,
|
|
118
|
+
config: UniqueAIConfig,
|
|
119
|
+
) -> _CommonComponents:
|
|
120
|
+
chat_service = ChatService(event)
|
|
121
|
+
|
|
122
|
+
content_service = ContentService.from_event(event)
|
|
123
|
+
|
|
124
|
+
uploaded_documents = content_service.get_documents_uploaded_to_chat()
|
|
125
|
+
|
|
126
|
+
tool_progress_reporter = ToolProgressReporter(chat_service=chat_service)
|
|
127
|
+
thinking_manager_config = ThinkingManagerConfig(
|
|
128
|
+
thinking_steps_display=config.agent.experimental.thinking_steps_display
|
|
129
|
+
)
|
|
130
|
+
thinking_manager = ThinkingManager(
|
|
131
|
+
logger=logger,
|
|
132
|
+
config=thinking_manager_config,
|
|
133
|
+
tool_progress_reporter=tool_progress_reporter,
|
|
134
|
+
chat_service=chat_service,
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
reference_manager = ReferenceManager()
|
|
138
|
+
|
|
139
|
+
history_manager_config = HistoryManagerConfig(
|
|
140
|
+
experimental_features=history_manager_module.ExperimentalFeatures(
|
|
141
|
+
full_sources_serialize_dump=False,
|
|
142
|
+
),
|
|
143
|
+
percent_of_max_tokens_for_history=config.agent.input_token_distribution.percent_for_history,
|
|
144
|
+
language_model=config.space.language_model,
|
|
145
|
+
uploaded_content_config=config.agent.services.uploaded_content_config,
|
|
146
|
+
)
|
|
147
|
+
history_manager = HistoryManager(
|
|
148
|
+
logger,
|
|
149
|
+
event,
|
|
150
|
+
history_manager_config,
|
|
151
|
+
config.space.language_model,
|
|
152
|
+
reference_manager,
|
|
153
|
+
)
|
|
154
|
+
|
|
155
|
+
evaluation_manager = EvaluationManager(logger=logger, chat_service=chat_service)
|
|
156
|
+
if config.agent.services.evaluation_config:
|
|
157
|
+
evaluation_manager.add_evaluation(
|
|
158
|
+
HallucinationEvaluation(
|
|
159
|
+
config.agent.services.evaluation_config.hallucination_config,
|
|
160
|
+
event,
|
|
161
|
+
reference_manager,
|
|
162
|
+
)
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
mcp_manager = MCPManager(
|
|
166
|
+
mcp_servers=event.payload.mcp_servers,
|
|
167
|
+
event=event,
|
|
168
|
+
tool_progress_reporter=tool_progress_reporter,
|
|
169
|
+
)
|
|
170
|
+
a2a_manager = A2AManager(
|
|
171
|
+
logger=logger,
|
|
172
|
+
tool_progress_reporter=tool_progress_reporter,
|
|
173
|
+
)
|
|
174
|
+
tool_manager_config = ToolManagerConfig(
|
|
175
|
+
tools=config.space.tools,
|
|
176
|
+
max_tool_calls=config.agent.experimental.loop_configuration.max_tool_calls_per_iteration,
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
postprocessors = []
|
|
180
|
+
|
|
181
|
+
if config.agent.services.stock_ticker_config:
|
|
182
|
+
postprocessors.append(
|
|
183
|
+
StockTickerPostprocessor(
|
|
184
|
+
config=config.agent.services.stock_ticker_config,
|
|
185
|
+
event=event,
|
|
186
|
+
)
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
if (
|
|
190
|
+
config.agent.services.follow_up_questions_config
|
|
191
|
+
and config.agent.services.follow_up_questions_config.number_of_questions > 0
|
|
192
|
+
):
|
|
193
|
+
postprocessors.append(
|
|
194
|
+
FollowUpPostprocessor(
|
|
195
|
+
logger=logger,
|
|
196
|
+
config=config.agent.services.follow_up_questions_config,
|
|
197
|
+
event=event,
|
|
198
|
+
historyManager=history_manager,
|
|
199
|
+
llm_service=LanguageModelService.from_event(event),
|
|
200
|
+
)
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
return _CommonComponents(
|
|
204
|
+
chat_service=chat_service,
|
|
205
|
+
content_service=content_service,
|
|
206
|
+
uploaded_documents=uploaded_documents,
|
|
207
|
+
thinking_manager=thinking_manager,
|
|
208
|
+
reference_manager=reference_manager,
|
|
209
|
+
history_manager=history_manager,
|
|
210
|
+
evaluation_manager=evaluation_manager,
|
|
211
|
+
tool_progress_reporter=tool_progress_reporter,
|
|
212
|
+
mcp_manager=mcp_manager,
|
|
213
|
+
a2a_manager=a2a_manager,
|
|
214
|
+
tool_manager_config=tool_manager_config,
|
|
215
|
+
mcp_servers=event.payload.mcp_servers,
|
|
216
|
+
postprocessors=postprocessors,
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
def _get_openai_client_from_env(config: UniqueAIConfig) -> AsyncOpenAI:
|
|
221
|
+
use_direct_azure_client = (
|
|
222
|
+
config.agent.experimental.responses_api_config.use_direct_azure_client
|
|
223
|
+
)
|
|
224
|
+
if use_direct_azure_client:
|
|
225
|
+
# TODO: (for testing only), remove when v1 endpoint is working
|
|
226
|
+
return AsyncOpenAI(
|
|
227
|
+
api_key=os.environ["OPENAI_API_KEY"],
|
|
228
|
+
base_url=os.environ["OPENAI_BASE_URL"],
|
|
229
|
+
)
|
|
230
|
+
else:
|
|
231
|
+
return get_async_openai_client().copy(
|
|
232
|
+
default_headers={
|
|
233
|
+
"x-model": config.space.language_model.name
|
|
234
|
+
} # Backend requires a model name
|
|
235
|
+
)
|
|
236
|
+
|
|
237
|
+
|
|
238
|
+
async def _build_responses(
|
|
239
|
+
event: ChatEvent,
|
|
240
|
+
logger: Logger,
|
|
241
|
+
config: UniqueAIConfig,
|
|
242
|
+
common_components: _CommonComponents,
|
|
243
|
+
debug_info_manager: DebugInfoManager,
|
|
244
|
+
) -> UniqueAIResponsesApi:
|
|
245
|
+
client = _get_openai_client_from_env(config)
|
|
246
|
+
builtin_tool_manager = OpenAIBuiltInToolManager(
|
|
247
|
+
uploaded_files=common_components.uploaded_documents,
|
|
248
|
+
chat_id=event.payload.chat_id,
|
|
249
|
+
content_service=common_components.content_service,
|
|
250
|
+
user_id=event.user_id,
|
|
251
|
+
company_id=event.company_id,
|
|
252
|
+
client=client,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
tool_manager = await ResponsesApiToolManager.build_manager(
|
|
256
|
+
logger=logger,
|
|
257
|
+
config=common_components.tool_manager_config,
|
|
258
|
+
event=event,
|
|
259
|
+
tool_progress_reporter=common_components.tool_progress_reporter,
|
|
260
|
+
mcp_manager=common_components.mcp_manager,
|
|
261
|
+
a2a_manager=common_components.a2a_manager,
|
|
262
|
+
builtin_tool_manager=builtin_tool_manager,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
postprocessor_manager = PostprocessorManager(
|
|
266
|
+
logger=logger,
|
|
267
|
+
chat_service=common_components.chat_service,
|
|
268
|
+
)
|
|
269
|
+
for postprocessor in common_components.postprocessors:
|
|
270
|
+
postprocessor_manager.add_postprocessor(postprocessor)
|
|
271
|
+
|
|
272
|
+
if (
|
|
273
|
+
config.agent.experimental.responses_api_config.code_interpreter_display_config
|
|
274
|
+
is not None
|
|
275
|
+
):
|
|
276
|
+
postprocessor_manager.add_postprocessor(
|
|
277
|
+
ShowExecutedCodePostprocessor(
|
|
278
|
+
config=config.agent.experimental.responses_api_config.code_interpreter_display_config
|
|
279
|
+
)
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
postprocessor_manager.add_postprocessor(
|
|
283
|
+
DisplayCodeInterpreterFilesPostProcessor(
|
|
284
|
+
client=client,
|
|
285
|
+
content_service=common_components.content_service,
|
|
286
|
+
config=DisplayCodeInterpreterFilesPostProcessorConfig(
|
|
287
|
+
upload_scope_id=config.agent.experimental.responses_api_config.generated_files_scope_id,
|
|
288
|
+
),
|
|
289
|
+
)
|
|
290
|
+
)
|
|
291
|
+
|
|
292
|
+
class ResponsesStreamingHandler(ResponsesSupportCompleteWithReferences):
|
|
293
|
+
def complete_with_references(self, *args, **kwargs):
|
|
294
|
+
return common_components.chat_service.complete_responses_with_references(
|
|
295
|
+
*args, **kwargs
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
async def complete_with_references_async(self, *args, **kwargs):
|
|
299
|
+
return await common_components.chat_service.complete_responses_with_references_async(
|
|
300
|
+
*args, **kwargs
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
streaming_handler = ResponsesStreamingHandler()
|
|
304
|
+
|
|
305
|
+
_add_sub_agents_postprocessor(
|
|
306
|
+
postprocessor_manager=postprocessor_manager,
|
|
307
|
+
tool_manager=tool_manager,
|
|
308
|
+
user_id=event.user_id,
|
|
309
|
+
company_id=event.company_id,
|
|
310
|
+
chat_id=event.payload.chat_id,
|
|
311
|
+
)
|
|
312
|
+
_add_sub_agents_evaluation(
|
|
313
|
+
evaluation_manager=common_components.evaluation_manager,
|
|
314
|
+
tool_manager=tool_manager,
|
|
315
|
+
config=config,
|
|
316
|
+
event=event,
|
|
317
|
+
)
|
|
318
|
+
|
|
319
|
+
return UniqueAIResponsesApi(
|
|
320
|
+
event=event,
|
|
321
|
+
config=config,
|
|
322
|
+
logger=logger,
|
|
323
|
+
chat_service=common_components.chat_service,
|
|
324
|
+
content_service=common_components.content_service,
|
|
325
|
+
tool_manager=tool_manager,
|
|
326
|
+
thinking_manager=common_components.thinking_manager,
|
|
327
|
+
streaming_handler=streaming_handler,
|
|
328
|
+
history_manager=common_components.history_manager,
|
|
329
|
+
reference_manager=common_components.reference_manager,
|
|
330
|
+
evaluation_manager=common_components.evaluation_manager,
|
|
331
|
+
postprocessor_manager=postprocessor_manager,
|
|
332
|
+
debug_info_manager=debug_info_manager,
|
|
333
|
+
mcp_servers=event.payload.mcp_servers,
|
|
334
|
+
)
|
|
335
|
+
|
|
336
|
+
|
|
337
|
+
def _build_completions(
|
|
338
|
+
event: ChatEvent,
|
|
339
|
+
logger: Logger,
|
|
340
|
+
config: UniqueAIConfig,
|
|
341
|
+
common_components: _CommonComponents,
|
|
342
|
+
debug_info_manager: DebugInfoManager,
|
|
343
|
+
) -> UniqueAI:
|
|
344
|
+
if len(common_components.uploaded_documents) > 0:
|
|
345
|
+
logger.info(
|
|
346
|
+
f"Adding UploadedSearchTool with {len(common_components.uploaded_documents)} documents"
|
|
347
|
+
)
|
|
348
|
+
config.space.tools.append(
|
|
349
|
+
ToolBuildConfig(
|
|
350
|
+
name=UploadedSearchTool.name,
|
|
351
|
+
display_name=UploadedSearchTool.name,
|
|
352
|
+
configuration=UploadedSearchConfig(),
|
|
353
|
+
),
|
|
354
|
+
)
|
|
355
|
+
event.payload.tool_choices.append(str(UploadedSearchTool.name))
|
|
356
|
+
|
|
357
|
+
tool_manager = ToolManager(
|
|
358
|
+
logger=logger,
|
|
359
|
+
config=common_components.tool_manager_config,
|
|
360
|
+
event=event,
|
|
361
|
+
tool_progress_reporter=common_components.tool_progress_reporter,
|
|
362
|
+
mcp_manager=common_components.mcp_manager,
|
|
363
|
+
a2a_manager=common_components.a2a_manager,
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
postprocessor_manager = PostprocessorManager(
|
|
367
|
+
logger=logger,
|
|
368
|
+
chat_service=common_components.chat_service,
|
|
369
|
+
)
|
|
370
|
+
for postprocessor in common_components.postprocessors:
|
|
371
|
+
postprocessor_manager.add_postprocessor(postprocessor)
|
|
372
|
+
|
|
373
|
+
_add_sub_agents_postprocessor(
|
|
374
|
+
postprocessor_manager=postprocessor_manager,
|
|
375
|
+
tool_manager=tool_manager,
|
|
376
|
+
user_id=event.user_id,
|
|
377
|
+
company_id=event.company_id,
|
|
378
|
+
chat_id=event.payload.chat_id,
|
|
379
|
+
)
|
|
380
|
+
_add_sub_agents_evaluation(
|
|
381
|
+
evaluation_manager=common_components.evaluation_manager,
|
|
382
|
+
tool_manager=tool_manager,
|
|
383
|
+
config=config,
|
|
384
|
+
event=event,
|
|
385
|
+
)
|
|
386
|
+
|
|
387
|
+
return UniqueAI(
|
|
388
|
+
event=event,
|
|
389
|
+
config=config,
|
|
390
|
+
logger=logger,
|
|
391
|
+
chat_service=common_components.chat_service,
|
|
392
|
+
content_service=common_components.content_service,
|
|
393
|
+
tool_manager=tool_manager,
|
|
394
|
+
thinking_manager=common_components.thinking_manager,
|
|
395
|
+
history_manager=common_components.history_manager,
|
|
396
|
+
reference_manager=common_components.reference_manager,
|
|
397
|
+
streaming_handler=common_components.chat_service,
|
|
398
|
+
evaluation_manager=common_components.evaluation_manager,
|
|
399
|
+
postprocessor_manager=postprocessor_manager,
|
|
400
|
+
debug_info_manager=debug_info_manager,
|
|
401
|
+
mcp_servers=event.payload.mcp_servers,
|
|
402
|
+
)
|
|
403
|
+
|
|
404
|
+
|
|
405
|
+
def _add_sub_agents_postprocessor(
|
|
406
|
+
postprocessor_manager: PostprocessorManager,
|
|
407
|
+
tool_manager: ToolManager | ResponsesApiToolManager,
|
|
408
|
+
user_id: str,
|
|
409
|
+
company_id: str,
|
|
410
|
+
chat_id: str,
|
|
411
|
+
) -> None:
|
|
412
|
+
sub_agents = tool_manager.sub_agents
|
|
413
|
+
if len(sub_agents) > 0:
|
|
414
|
+
sub_agent_responses_postprocessor = SubAgentResponsesPostprocessor(
|
|
415
|
+
user_id=user_id,
|
|
416
|
+
main_agent_chat_id=chat_id,
|
|
417
|
+
company_id=company_id,
|
|
418
|
+
)
|
|
419
|
+
postprocessor_manager.add_postprocessor(sub_agent_responses_postprocessor)
|
|
420
|
+
|
|
421
|
+
for tool in tool_manager.sub_agents:
|
|
422
|
+
assert isinstance(tool.config, ExtendedSubAgentToolConfig)
|
|
423
|
+
sub_agent_responses_postprocessor.register_sub_agent_tool(
|
|
424
|
+
tool, tool.config.response_display_config
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
|
|
428
|
+
def _add_sub_agents_evaluation(
|
|
429
|
+
evaluation_manager: EvaluationManager,
|
|
430
|
+
tool_manager: ToolManager | ResponsesApiToolManager,
|
|
431
|
+
config: UniqueAIConfig,
|
|
432
|
+
event: ChatEvent,
|
|
433
|
+
) -> None:
|
|
434
|
+
sub_agents = tool_manager.sub_agents
|
|
435
|
+
if len(sub_agents) > 0:
|
|
436
|
+
sub_agent_evaluation = None
|
|
437
|
+
if config.agent.experimental.sub_agents_config.evaluation_config is not None:
|
|
438
|
+
sub_agent_evaluation = SubAgentEvaluationService(
|
|
439
|
+
config=config.agent.experimental.sub_agents_config.evaluation_config,
|
|
440
|
+
language_model_service=LanguageModelService.from_event(event),
|
|
441
|
+
)
|
|
442
|
+
evaluation_manager.add_evaluation(sub_agent_evaluation)
|
|
443
|
+
for tool in tool_manager.sub_agents:
|
|
444
|
+
assert isinstance(tool.config, ExtendedSubAgentToolConfig)
|
|
445
|
+
sub_agent_evaluation.register_sub_agent_tool(
|
|
446
|
+
tool, tool.config.evaluation_config
|
|
447
|
+
)
|
|
@@ -1,212 +0,0 @@
|
|
|
1
|
-
from logging import Logger
|
|
2
|
-
|
|
3
|
-
from unique_follow_up_questions.follow_up_postprocessor import (
|
|
4
|
-
FollowUpPostprocessor,
|
|
5
|
-
)
|
|
6
|
-
from unique_internal_search.uploaded_search.config import (
|
|
7
|
-
UploadedSearchConfig,
|
|
8
|
-
)
|
|
9
|
-
from unique_internal_search.uploaded_search.service import (
|
|
10
|
-
UploadedSearchTool,
|
|
11
|
-
)
|
|
12
|
-
from unique_stock_ticker.stock_ticker_postprocessor import (
|
|
13
|
-
StockTickerPostprocessor,
|
|
14
|
-
)
|
|
15
|
-
from unique_toolkit import LanguageModelService
|
|
16
|
-
from unique_toolkit.agentic.debug_info_manager.debug_info_manager import (
|
|
17
|
-
DebugInfoManager,
|
|
18
|
-
)
|
|
19
|
-
from unique_toolkit.agentic.evaluation.evaluation_manager import EvaluationManager
|
|
20
|
-
from unique_toolkit.agentic.evaluation.hallucination.hallucination_evaluation import (
|
|
21
|
-
HallucinationEvaluation,
|
|
22
|
-
)
|
|
23
|
-
from unique_toolkit.agentic.history_manager import (
|
|
24
|
-
history_manager as history_manager_module,
|
|
25
|
-
)
|
|
26
|
-
from unique_toolkit.agentic.history_manager.history_manager import (
|
|
27
|
-
HistoryManager,
|
|
28
|
-
HistoryManagerConfig,
|
|
29
|
-
)
|
|
30
|
-
from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
|
31
|
-
PostprocessorManager,
|
|
32
|
-
)
|
|
33
|
-
from unique_toolkit.agentic.reference_manager.reference_manager import ReferenceManager
|
|
34
|
-
from unique_toolkit.agentic.thinking_manager.thinking_manager import (
|
|
35
|
-
ThinkingManager,
|
|
36
|
-
ThinkingManagerConfig,
|
|
37
|
-
)
|
|
38
|
-
from unique_toolkit.agentic.tools.a2a import (
|
|
39
|
-
A2AManager,
|
|
40
|
-
ExtendedSubAgentToolConfig,
|
|
41
|
-
SubAgentEvaluationService,
|
|
42
|
-
SubAgentResponsesPostprocessor,
|
|
43
|
-
)
|
|
44
|
-
from unique_toolkit.agentic.tools.config import ToolBuildConfig
|
|
45
|
-
from unique_toolkit.agentic.tools.mcp.manager import MCPManager
|
|
46
|
-
from unique_toolkit.agentic.tools.tool_manager import ToolManager, ToolManagerConfig
|
|
47
|
-
from unique_toolkit.agentic.tools.tool_progress_reporter import ToolProgressReporter
|
|
48
|
-
from unique_toolkit.app.schemas import ChatEvent
|
|
49
|
-
from unique_toolkit.chat.service import ChatService
|
|
50
|
-
from unique_toolkit.content.service import ContentService
|
|
51
|
-
|
|
52
|
-
from unique_orchestrator.config import UniqueAIConfig
|
|
53
|
-
from unique_orchestrator.unique_ai import UniqueAI
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
def build_unique_ai(
|
|
57
|
-
event: ChatEvent,
|
|
58
|
-
logger: Logger,
|
|
59
|
-
config: UniqueAIConfig,
|
|
60
|
-
debug_info_manager: DebugInfoManager,
|
|
61
|
-
) -> UniqueAI:
|
|
62
|
-
chat_service = ChatService(event)
|
|
63
|
-
|
|
64
|
-
content_service = ContentService.from_event(event)
|
|
65
|
-
tool_progress_reporter = ToolProgressReporter(chat_service=chat_service)
|
|
66
|
-
reference_manager = ReferenceManager()
|
|
67
|
-
thinking_manager_config = ThinkingManagerConfig(
|
|
68
|
-
thinking_steps_display=config.agent.experimental.thinking_steps_display
|
|
69
|
-
)
|
|
70
|
-
|
|
71
|
-
thinking_manager = ThinkingManager(
|
|
72
|
-
logger=logger,
|
|
73
|
-
config=thinking_manager_config,
|
|
74
|
-
tool_progress_reporter=tool_progress_reporter,
|
|
75
|
-
chat_service=chat_service,
|
|
76
|
-
)
|
|
77
|
-
|
|
78
|
-
uploaded_documents = content_service.get_documents_uploaded_to_chat()
|
|
79
|
-
if len(uploaded_documents) > 0:
|
|
80
|
-
logger.info(
|
|
81
|
-
f"Adding UploadedSearchTool with {len(uploaded_documents)} documents"
|
|
82
|
-
)
|
|
83
|
-
config.space.tools.append(
|
|
84
|
-
ToolBuildConfig(
|
|
85
|
-
name=UploadedSearchTool.name,
|
|
86
|
-
display_name=UploadedSearchTool.name,
|
|
87
|
-
configuration=UploadedSearchConfig(),
|
|
88
|
-
),
|
|
89
|
-
)
|
|
90
|
-
event.payload.tool_choices.append(str(UploadedSearchTool.name))
|
|
91
|
-
|
|
92
|
-
mcp_manager = MCPManager(
|
|
93
|
-
mcp_servers=event.payload.mcp_servers,
|
|
94
|
-
event=event,
|
|
95
|
-
tool_progress_reporter=tool_progress_reporter,
|
|
96
|
-
)
|
|
97
|
-
|
|
98
|
-
a2a_manager = A2AManager(
|
|
99
|
-
logger=logger,
|
|
100
|
-
tool_progress_reporter=tool_progress_reporter,
|
|
101
|
-
)
|
|
102
|
-
|
|
103
|
-
tool_config = ToolManagerConfig(
|
|
104
|
-
tools=config.space.tools,
|
|
105
|
-
max_tool_calls=config.agent.experimental.loop_configuration.max_tool_calls_per_iteration,
|
|
106
|
-
)
|
|
107
|
-
|
|
108
|
-
tool_manager = ToolManager(
|
|
109
|
-
logger=logger,
|
|
110
|
-
config=tool_config,
|
|
111
|
-
event=event,
|
|
112
|
-
tool_progress_reporter=tool_progress_reporter,
|
|
113
|
-
mcp_manager=mcp_manager,
|
|
114
|
-
a2a_manager=a2a_manager,
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
history_manager_config = HistoryManagerConfig(
|
|
118
|
-
experimental_features=history_manager_module.ExperimentalFeatures(
|
|
119
|
-
full_sources_serialize_dump=False,
|
|
120
|
-
),
|
|
121
|
-
percent_of_max_tokens_for_history=config.agent.input_token_distribution.percent_for_history,
|
|
122
|
-
language_model=config.space.language_model,
|
|
123
|
-
uploaded_content_config=config.agent.services.uploaded_content_config,
|
|
124
|
-
)
|
|
125
|
-
|
|
126
|
-
history_manager = HistoryManager(
|
|
127
|
-
logger,
|
|
128
|
-
event,
|
|
129
|
-
history_manager_config,
|
|
130
|
-
config.space.language_model,
|
|
131
|
-
reference_manager,
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
evaluation_manager = EvaluationManager(logger=logger, chat_service=chat_service)
|
|
135
|
-
|
|
136
|
-
if config.agent.services.evaluation_config:
|
|
137
|
-
evaluation_manager.add_evaluation(
|
|
138
|
-
HallucinationEvaluation(
|
|
139
|
-
config.agent.services.evaluation_config.hallucination_config,
|
|
140
|
-
event,
|
|
141
|
-
reference_manager,
|
|
142
|
-
)
|
|
143
|
-
)
|
|
144
|
-
|
|
145
|
-
postprocessor_manager = PostprocessorManager(
|
|
146
|
-
logger=logger,
|
|
147
|
-
chat_service=chat_service,
|
|
148
|
-
)
|
|
149
|
-
|
|
150
|
-
if config.agent.services.stock_ticker_config:
|
|
151
|
-
postprocessor_manager.add_postprocessor(
|
|
152
|
-
StockTickerPostprocessor(
|
|
153
|
-
config=config.agent.services.stock_ticker_config,
|
|
154
|
-
event=event,
|
|
155
|
-
)
|
|
156
|
-
)
|
|
157
|
-
|
|
158
|
-
if (
|
|
159
|
-
config.agent.services.follow_up_questions_config
|
|
160
|
-
and config.agent.services.follow_up_questions_config.number_of_questions > 0
|
|
161
|
-
):
|
|
162
|
-
postprocessor_manager.add_postprocessor(
|
|
163
|
-
FollowUpPostprocessor(
|
|
164
|
-
logger=logger,
|
|
165
|
-
config=config.agent.services.follow_up_questions_config,
|
|
166
|
-
event=event,
|
|
167
|
-
historyManager=history_manager,
|
|
168
|
-
llm_service=LanguageModelService.from_event(event),
|
|
169
|
-
)
|
|
170
|
-
)
|
|
171
|
-
|
|
172
|
-
if len(tool_manager.sub_agents) > 0:
|
|
173
|
-
sub_agent_responses_postprocessor = SubAgentResponsesPostprocessor(
|
|
174
|
-
user_id=event.user_id,
|
|
175
|
-
main_agent_chat_id=event.payload.chat_id,
|
|
176
|
-
company_id=event.company_id,
|
|
177
|
-
)
|
|
178
|
-
postprocessor_manager.add_postprocessor(sub_agent_responses_postprocessor)
|
|
179
|
-
|
|
180
|
-
sub_agent_evaluation = None
|
|
181
|
-
if config.agent.experimental.sub_agents_config.evaluation_config is not None:
|
|
182
|
-
sub_agent_evaluation = SubAgentEvaluationService(
|
|
183
|
-
config=config.agent.experimental.sub_agents_config.evaluation_config,
|
|
184
|
-
language_model_service=LanguageModelService.from_event(event),
|
|
185
|
-
)
|
|
186
|
-
evaluation_manager.add_evaluation(sub_agent_evaluation)
|
|
187
|
-
|
|
188
|
-
for tool in tool_manager.sub_agents:
|
|
189
|
-
assert isinstance(tool.config, ExtendedSubAgentToolConfig)
|
|
190
|
-
sub_agent_responses_postprocessor.register_sub_agent_tool(
|
|
191
|
-
tool, tool.config.response_display_config
|
|
192
|
-
)
|
|
193
|
-
if sub_agent_evaluation is not None:
|
|
194
|
-
sub_agent_evaluation.register_sub_agent_tool(
|
|
195
|
-
tool, tool.config.evaluation_config
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
return UniqueAI(
|
|
199
|
-
event=event,
|
|
200
|
-
config=config,
|
|
201
|
-
logger=logger,
|
|
202
|
-
chat_service=chat_service,
|
|
203
|
-
content_service=content_service,
|
|
204
|
-
tool_manager=tool_manager,
|
|
205
|
-
thinking_manager=thinking_manager,
|
|
206
|
-
history_manager=history_manager,
|
|
207
|
-
reference_manager=reference_manager,
|
|
208
|
-
evaluation_manager=evaluation_manager,
|
|
209
|
-
postprocessor_manager=postprocessor_manager,
|
|
210
|
-
debug_info_manager=debug_info_manager,
|
|
211
|
-
mcp_servers=event.payload.mcp_servers,
|
|
212
|
-
)
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|