unique_orchestrator 1.11.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unique_orchestrator might be problematic. Click here for more details.
- unique_orchestrator/config.py +401 -0
- unique_orchestrator/prompts/generic_reference_prompt.jinja2 +46 -0
- unique_orchestrator/prompts/system_prompt.jinja2 +166 -0
- unique_orchestrator/prompts/user_message_prompt.jinja2 +23 -0
- unique_orchestrator/tests/test_unique_ai_get_filtered_user_metadata.py +259 -0
- unique_orchestrator/tests/test_unique_ai_log_tool_calls.py +729 -0
- unique_orchestrator/tests/test_unique_ai_reference_order.py +134 -0
- unique_orchestrator/tests/test_unique_ai_update_debug_info_for_tool_control.py +339 -0
- unique_orchestrator/unique_ai.py +537 -0
- unique_orchestrator/unique_ai_builder.py +568 -0
- unique_orchestrator-1.11.1.dist-info/LICENSE +1 -0
- unique_orchestrator-1.11.1.dist-info/METADATA +199 -0
- unique_orchestrator-1.11.1.dist-info/RECORD +14 -0
- unique_orchestrator-1.11.1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,537 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
from datetime import datetime, timezone
|
|
3
|
+
from logging import Logger
|
|
4
|
+
|
|
5
|
+
import jinja2
|
|
6
|
+
from unique_toolkit.agentic.debug_info_manager.debug_info_manager import (
|
|
7
|
+
DebugInfoManager,
|
|
8
|
+
)
|
|
9
|
+
from unique_toolkit.agentic.evaluation.evaluation_manager import EvaluationManager
|
|
10
|
+
from unique_toolkit.agentic.history_manager.history_manager import HistoryManager
|
|
11
|
+
from unique_toolkit.agentic.loop_runner import LoopIterationRunner
|
|
12
|
+
from unique_toolkit.agentic.message_log_manager.service import MessageStepLogger
|
|
13
|
+
from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
|
|
14
|
+
PostprocessorManager,
|
|
15
|
+
)
|
|
16
|
+
from unique_toolkit.agentic.reference_manager.reference_manager import ReferenceManager
|
|
17
|
+
from unique_toolkit.agentic.thinking_manager.thinking_manager import ThinkingManager
|
|
18
|
+
from unique_toolkit.agentic.tools.tool_manager import (
|
|
19
|
+
ResponsesApiToolManager,
|
|
20
|
+
SafeTaskExecutor,
|
|
21
|
+
ToolManager,
|
|
22
|
+
)
|
|
23
|
+
from unique_toolkit.app.schemas import ChatEvent, McpServer
|
|
24
|
+
from unique_toolkit.chat.service import ChatService
|
|
25
|
+
from unique_toolkit.content.service import ContentService
|
|
26
|
+
from unique_toolkit.language_model import LanguageModelAssistantMessage
|
|
27
|
+
from unique_toolkit.language_model.schemas import (
|
|
28
|
+
LanguageModelMessages,
|
|
29
|
+
LanguageModelStreamResponse,
|
|
30
|
+
)
|
|
31
|
+
from unique_toolkit.protocols.support import (
|
|
32
|
+
ResponsesSupportCompleteWithReferences,
|
|
33
|
+
SupportCompleteWithReferences,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
from unique_orchestrator.config import UniqueAIConfig
|
|
37
|
+
|
|
38
|
+
EMPTY_MESSAGE_WARNING = (
|
|
39
|
+
"⚠️ **The language model was unable to produce an output.**\n"
|
|
40
|
+
"It did not generate any content or perform a tool call in response to your request. "
|
|
41
|
+
"This is a limitation of the language model itself.\n\n"
|
|
42
|
+
"**Please try adapting or simplifying your prompt.** "
|
|
43
|
+
"Rewording your input can often help the model respond successfully."
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
class UniqueAI:
|
|
48
|
+
start_text = ""
|
|
49
|
+
current_iteration_index = 0
|
|
50
|
+
|
|
51
|
+
def __init__(
|
|
52
|
+
self,
|
|
53
|
+
logger: Logger,
|
|
54
|
+
event: ChatEvent,
|
|
55
|
+
config: UniqueAIConfig,
|
|
56
|
+
chat_service: ChatService,
|
|
57
|
+
content_service: ContentService,
|
|
58
|
+
debug_info_manager: DebugInfoManager,
|
|
59
|
+
streaming_handler: SupportCompleteWithReferences,
|
|
60
|
+
reference_manager: ReferenceManager,
|
|
61
|
+
thinking_manager: ThinkingManager,
|
|
62
|
+
tool_manager: ToolManager,
|
|
63
|
+
history_manager: HistoryManager,
|
|
64
|
+
evaluation_manager: EvaluationManager,
|
|
65
|
+
postprocessor_manager: PostprocessorManager,
|
|
66
|
+
message_step_logger: MessageStepLogger,
|
|
67
|
+
mcp_servers: list[McpServer],
|
|
68
|
+
loop_iteration_runner: LoopIterationRunner,
|
|
69
|
+
):
|
|
70
|
+
self._logger = logger
|
|
71
|
+
self._event = event
|
|
72
|
+
self._config = config
|
|
73
|
+
self._chat_service = chat_service
|
|
74
|
+
self._content_service = content_service
|
|
75
|
+
|
|
76
|
+
self._debug_info_manager = debug_info_manager
|
|
77
|
+
self._reference_manager = reference_manager
|
|
78
|
+
self._thinking_manager = thinking_manager
|
|
79
|
+
self._tool_manager = tool_manager
|
|
80
|
+
|
|
81
|
+
self._history_manager = history_manager
|
|
82
|
+
|
|
83
|
+
self._evaluation_manager = evaluation_manager
|
|
84
|
+
self._postprocessor_manager = postprocessor_manager
|
|
85
|
+
self._latest_assistant_id: str = event.payload.assistant_message.id
|
|
86
|
+
self._mcp_servers = mcp_servers
|
|
87
|
+
self._streaming_handler = streaming_handler
|
|
88
|
+
|
|
89
|
+
self._message_step_logger = message_step_logger
|
|
90
|
+
# Helper variable to support control loop
|
|
91
|
+
self._tool_took_control = False
|
|
92
|
+
self._loop_iteration_runner = loop_iteration_runner
|
|
93
|
+
|
|
94
|
+
############################################################
|
|
95
|
+
# Override of base methods
|
|
96
|
+
############################################################
|
|
97
|
+
# @track(name="loop_agent_run") # Group traces together
|
|
98
|
+
async def run(self):
|
|
99
|
+
"""
|
|
100
|
+
Main loop of the agent. The agent will iterate through the loop, runs the plan and
|
|
101
|
+
processes tool calls if any are returned.
|
|
102
|
+
"""
|
|
103
|
+
self._logger.info("Start LoopAgent...")
|
|
104
|
+
|
|
105
|
+
if self._history_manager.has_no_loop_messages(): # TODO: why do we even need to check its always no loop messages on this when its called.
|
|
106
|
+
self._chat_service.modify_assistant_message(
|
|
107
|
+
content="Starting agentic loop..." # TODO: this must be more informative
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
## Loop iteration
|
|
111
|
+
for i in range(self._config.agent.max_loop_iterations):
|
|
112
|
+
self.current_iteration_index = i
|
|
113
|
+
self._logger.info(f"Starting iteration {i + 1}...")
|
|
114
|
+
|
|
115
|
+
# Plan execution
|
|
116
|
+
loop_response = await self._plan_or_execute()
|
|
117
|
+
self._logger.info("Done with _plan_or_execute")
|
|
118
|
+
|
|
119
|
+
self._reference_manager.add_references(loop_response.message.references)
|
|
120
|
+
self._logger.info("Done with adding references")
|
|
121
|
+
|
|
122
|
+
# Update tool progress reporter
|
|
123
|
+
self._thinking_manager.update_tool_progress_reporter(loop_response)
|
|
124
|
+
|
|
125
|
+
# Execute the plan
|
|
126
|
+
exit_loop = await self._process_plan(loop_response)
|
|
127
|
+
self._logger.info("Done with _process_plan")
|
|
128
|
+
|
|
129
|
+
if exit_loop:
|
|
130
|
+
self._thinking_manager.close_thinking_steps(loop_response)
|
|
131
|
+
self._logger.info("Exiting loop.")
|
|
132
|
+
break
|
|
133
|
+
|
|
134
|
+
if i == self._config.agent.max_loop_iterations - 1:
|
|
135
|
+
self._logger.error("Max iterations reached.")
|
|
136
|
+
await self._chat_service.modify_assistant_message_async(
|
|
137
|
+
content="I have reached the maximum number of self-reflection iterations. Please clarify your request and try again...",
|
|
138
|
+
)
|
|
139
|
+
break
|
|
140
|
+
|
|
141
|
+
self.start_text = self._thinking_manager.update_start_text(
|
|
142
|
+
self.start_text, loop_response
|
|
143
|
+
)
|
|
144
|
+
await self._update_debug_info_if_tool_took_control()
|
|
145
|
+
|
|
146
|
+
# Only set completed_at if no tool took control. Tools that take control will set the message state to completed themselves.
|
|
147
|
+
await self._chat_service.modify_assistant_message_async(
|
|
148
|
+
set_completed_at=not self._tool_took_control,
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
# @track()
|
|
152
|
+
async def _plan_or_execute(self) -> LanguageModelStreamResponse:
|
|
153
|
+
self._logger.info("Planning or executing the loop.")
|
|
154
|
+
messages = await self._compose_message_plan_execution()
|
|
155
|
+
|
|
156
|
+
self._logger.info("Done composing message plan execution.")
|
|
157
|
+
|
|
158
|
+
return await self._loop_iteration_runner(
|
|
159
|
+
messages=messages,
|
|
160
|
+
iteration_index=self.current_iteration_index,
|
|
161
|
+
streaming_handler=self._streaming_handler,
|
|
162
|
+
model=self._config.space.language_model,
|
|
163
|
+
tools=self._tool_manager.get_tool_definitions(),
|
|
164
|
+
content_chunks=self._reference_manager.get_chunks(),
|
|
165
|
+
start_text=self.start_text,
|
|
166
|
+
debug_info=self._debug_info_manager.get(),
|
|
167
|
+
temperature=self._config.agent.experimental.temperature,
|
|
168
|
+
tool_choices=self._tool_manager.get_forced_tools(),
|
|
169
|
+
other_options=self._config.agent.experimental.additional_llm_options,
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
async def _process_plan(self, loop_response: LanguageModelStreamResponse) -> bool:
|
|
173
|
+
self._logger.info(
|
|
174
|
+
"Processing the plan, executing the tools and checking for loop exit conditions once all is done."
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
if loop_response.is_empty():
|
|
178
|
+
self._logger.debug("Empty model response, exiting loop.")
|
|
179
|
+
self._chat_service.modify_assistant_message(content=EMPTY_MESSAGE_WARNING)
|
|
180
|
+
return True
|
|
181
|
+
|
|
182
|
+
call_tools = len(loop_response.tool_calls or []) > 0
|
|
183
|
+
if call_tools:
|
|
184
|
+
self._logger.debug(
|
|
185
|
+
"Tools were called we process them and do not exit the loop"
|
|
186
|
+
)
|
|
187
|
+
await self._create_new_assistant_message_if_loop_response_contains_content(
|
|
188
|
+
loop_response
|
|
189
|
+
)
|
|
190
|
+
|
|
191
|
+
return await self._handle_tool_calls(loop_response)
|
|
192
|
+
|
|
193
|
+
self._logger.debug("No tool calls. we might exit the loop")
|
|
194
|
+
|
|
195
|
+
return await self._handle_no_tool_calls(loop_response)
|
|
196
|
+
|
|
197
|
+
async def _compose_message_plan_execution(self) -> LanguageModelMessages:
|
|
198
|
+
original_user_message = self._event.payload.user_message.text
|
|
199
|
+
rendered_user_message_string = await self._render_user_prompt()
|
|
200
|
+
rendered_system_message_string = await self._render_system_prompt()
|
|
201
|
+
|
|
202
|
+
messages = await self._history_manager.get_history_for_model_call(
|
|
203
|
+
original_user_message,
|
|
204
|
+
rendered_user_message_string,
|
|
205
|
+
rendered_system_message_string,
|
|
206
|
+
self._postprocessor_manager.remove_from_text,
|
|
207
|
+
)
|
|
208
|
+
return messages
|
|
209
|
+
|
|
210
|
+
async def _render_user_prompt(self) -> str:
|
|
211
|
+
user_message_template = jinja2.Template(
|
|
212
|
+
self._config.agent.prompt_config.user_message_prompt_template
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
tool_descriptions_with_user_prompts = [
|
|
216
|
+
prompts.tool_user_prompt
|
|
217
|
+
for prompts in self._tool_manager.get_tool_prompts()
|
|
218
|
+
]
|
|
219
|
+
|
|
220
|
+
used_tools = [t.name for t in self._history_manager.get_tool_calls()]
|
|
221
|
+
sub_agent_calls = self._tool_manager.filter_tool_calls(
|
|
222
|
+
self._history_manager.get_tool_calls(), ["subagent"]
|
|
223
|
+
)
|
|
224
|
+
|
|
225
|
+
mcp_server_user_prompts = [
|
|
226
|
+
mcp_server.user_prompt for mcp_server in self._mcp_servers
|
|
227
|
+
]
|
|
228
|
+
|
|
229
|
+
user_metadata = self._get_filtered_user_metadata()
|
|
230
|
+
|
|
231
|
+
tool_descriptions = self._tool_manager.get_tool_prompts()
|
|
232
|
+
|
|
233
|
+
query = self._event.payload.user_message.text
|
|
234
|
+
|
|
235
|
+
if (
|
|
236
|
+
self._config.agent.experimental.sub_agents_config.referencing_config
|
|
237
|
+
is not None
|
|
238
|
+
and len(sub_agent_calls) > 0
|
|
239
|
+
):
|
|
240
|
+
use_sub_agent_references = True
|
|
241
|
+
sub_agent_referencing_instructions = self._config.agent.experimental.sub_agents_config.referencing_config.referencing_instructions_for_user_prompt
|
|
242
|
+
else:
|
|
243
|
+
use_sub_agent_references = False
|
|
244
|
+
sub_agent_referencing_instructions = None
|
|
245
|
+
|
|
246
|
+
user_msg = user_message_template.render(
|
|
247
|
+
query=query,
|
|
248
|
+
tool_descriptions=tool_descriptions,
|
|
249
|
+
used_tools=used_tools,
|
|
250
|
+
mcp_server_user_prompts=list(mcp_server_user_prompts),
|
|
251
|
+
tool_descriptions_with_user_prompts=tool_descriptions_with_user_prompts,
|
|
252
|
+
use_sub_agent_references=use_sub_agent_references,
|
|
253
|
+
sub_agent_referencing_instructions=sub_agent_referencing_instructions,
|
|
254
|
+
user_metadata=user_metadata,
|
|
255
|
+
)
|
|
256
|
+
return user_msg
|
|
257
|
+
|
|
258
|
+
async def _render_system_prompt(self) -> str:
|
|
259
|
+
# TODO: Collect tool information here and adapt to system prompt
|
|
260
|
+
tool_descriptions = self._tool_manager.get_tool_prompts()
|
|
261
|
+
|
|
262
|
+
used_tools = [t.name for t in self._history_manager.get_tool_calls()]
|
|
263
|
+
sub_agent_calls = self._tool_manager.filter_tool_calls(
|
|
264
|
+
self._history_manager.get_tool_calls(), ["subagent"]
|
|
265
|
+
)
|
|
266
|
+
|
|
267
|
+
system_prompt_template = jinja2.Template(
|
|
268
|
+
self._config.agent.prompt_config.system_prompt_template
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
date_string = datetime.now().strftime("%A %B %d, %Y")
|
|
272
|
+
|
|
273
|
+
user_metadata = self._get_filtered_user_metadata()
|
|
274
|
+
|
|
275
|
+
mcp_server_system_prompts = [
|
|
276
|
+
mcp_server.system_prompt for mcp_server in self._mcp_servers
|
|
277
|
+
]
|
|
278
|
+
|
|
279
|
+
if (
|
|
280
|
+
self._config.agent.experimental.sub_agents_config.referencing_config
|
|
281
|
+
is not None
|
|
282
|
+
and len(sub_agent_calls) > 0
|
|
283
|
+
):
|
|
284
|
+
use_sub_agent_references = True
|
|
285
|
+
sub_agent_referencing_instructions = self._config.agent.experimental.sub_agents_config.referencing_config.referencing_instructions_for_system_prompt
|
|
286
|
+
else:
|
|
287
|
+
use_sub_agent_references = False
|
|
288
|
+
sub_agent_referencing_instructions = None
|
|
289
|
+
|
|
290
|
+
uploaded_documents = self._content_service.get_documents_uploaded_to_chat()
|
|
291
|
+
uploaded_documents_expired = [
|
|
292
|
+
doc
|
|
293
|
+
for doc in uploaded_documents
|
|
294
|
+
if doc.expired_at is not None
|
|
295
|
+
and doc.expired_at <= datetime.now(timezone.utc)
|
|
296
|
+
]
|
|
297
|
+
|
|
298
|
+
system_message = system_prompt_template.render(
|
|
299
|
+
model_info=self._config.space.language_model.model_dump(mode="json"),
|
|
300
|
+
date_string=date_string,
|
|
301
|
+
tool_descriptions=tool_descriptions,
|
|
302
|
+
used_tools=used_tools,
|
|
303
|
+
project_name=self._config.space.project_name,
|
|
304
|
+
custom_instructions=self._config.space.custom_instructions,
|
|
305
|
+
max_tools_per_iteration=self._config.agent.experimental.loop_configuration.max_tool_calls_per_iteration,
|
|
306
|
+
max_loop_iterations=self._config.agent.max_loop_iterations,
|
|
307
|
+
current_iteration=self.current_iteration_index + 1,
|
|
308
|
+
mcp_server_system_prompts=mcp_server_system_prompts,
|
|
309
|
+
use_sub_agent_references=use_sub_agent_references,
|
|
310
|
+
sub_agent_referencing_instructions=sub_agent_referencing_instructions,
|
|
311
|
+
user_metadata=user_metadata,
|
|
312
|
+
uploaded_documents_expired=uploaded_documents_expired,
|
|
313
|
+
)
|
|
314
|
+
return system_message
|
|
315
|
+
|
|
316
|
+
async def _handle_no_tool_calls(
|
|
317
|
+
self, loop_response: LanguageModelStreamResponse
|
|
318
|
+
) -> bool:
|
|
319
|
+
"""Handle the case where no tool calls are returned."""
|
|
320
|
+
task_executor = SafeTaskExecutor(
|
|
321
|
+
logger=self._logger,
|
|
322
|
+
)
|
|
323
|
+
|
|
324
|
+
selected_evaluation_names = self._tool_manager.get_evaluation_check_list()
|
|
325
|
+
evaluation_results = task_executor.execute_async(
|
|
326
|
+
self._evaluation_manager.run_evaluations,
|
|
327
|
+
selected_evaluation_names,
|
|
328
|
+
loop_response,
|
|
329
|
+
self._latest_assistant_id,
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
postprocessor_result = task_executor.execute_async(
|
|
333
|
+
self._postprocessor_manager.run_postprocessors,
|
|
334
|
+
loop_response.model_copy(deep=True),
|
|
335
|
+
)
|
|
336
|
+
|
|
337
|
+
_, evaluation_results = await asyncio.gather(
|
|
338
|
+
postprocessor_result,
|
|
339
|
+
evaluation_results,
|
|
340
|
+
)
|
|
341
|
+
|
|
342
|
+
if evaluation_results.success and not all(
|
|
343
|
+
result.is_positive for result in evaluation_results.unpack()
|
|
344
|
+
):
|
|
345
|
+
self._logger.warning(
|
|
346
|
+
"we should add here the retry counter add an instruction and retry the loop for now we just exit the loop"
|
|
347
|
+
) # TODO: add retry counter and instruction
|
|
348
|
+
|
|
349
|
+
return True
|
|
350
|
+
|
|
351
|
+
def _log_tool_calls(self, tool_calls: list) -> None:
|
|
352
|
+
# Create dictionary mapping tool names to display names for efficient lookup
|
|
353
|
+
all_tools_dict: dict[str, str] = {
|
|
354
|
+
tool.name: tool.display_name()
|
|
355
|
+
for tool in self._tool_manager.available_tools
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
# Tool names that should not be logged in the message steps
|
|
359
|
+
tool_names_not_to_log = ["DeepResearch"]
|
|
360
|
+
|
|
361
|
+
tool_string: str = ""
|
|
362
|
+
used_tools: dict[str, int] = {}
|
|
363
|
+
for tool_call in tool_calls:
|
|
364
|
+
self._history_manager.add_tool_call(tool_call)
|
|
365
|
+
if tool_call.name in all_tools_dict:
|
|
366
|
+
used_tools[tool_call.name] = used_tools.get(tool_call.name, 0) + 1
|
|
367
|
+
|
|
368
|
+
for tool_name, count in used_tools.items():
|
|
369
|
+
if tool_name in tool_names_not_to_log:
|
|
370
|
+
continue
|
|
371
|
+
display_name = all_tools_dict[tool_name] or tool_name
|
|
372
|
+
tool_string += (
|
|
373
|
+
f"\n• {display_name} ({count}x)" if count > 1 else f"\n• {display_name}"
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
if tool_string:
|
|
377
|
+
self._message_step_logger.create_message_log_entry(
|
|
378
|
+
text=f"**Triggered Tool Calls:**\n {tool_string}", references=[]
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
async def _handle_tool_calls(
|
|
382
|
+
self, loop_response: LanguageModelStreamResponse
|
|
383
|
+
) -> bool:
|
|
384
|
+
"""Handle the case where tool calls are returned."""
|
|
385
|
+
self._logger.info("Processing tool calls")
|
|
386
|
+
|
|
387
|
+
tool_calls = loop_response.tool_calls or []
|
|
388
|
+
|
|
389
|
+
# Filter tool calls
|
|
390
|
+
tool_calls = self._tool_manager.filter_duplicate_tool_calls(tool_calls)
|
|
391
|
+
tool_calls = self._tool_manager.filter_tool_calls_by_max_tool_calls_allowed(
|
|
392
|
+
tool_calls
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
# Append function calls to history
|
|
396
|
+
self._history_manager._append_tool_calls_to_history(tool_calls)
|
|
397
|
+
|
|
398
|
+
# Log tool calls
|
|
399
|
+
self._log_tool_calls(tool_calls)
|
|
400
|
+
# Execute tool calls
|
|
401
|
+
tool_call_responses = await self._tool_manager.execute_selected_tools(
|
|
402
|
+
tool_calls
|
|
403
|
+
)
|
|
404
|
+
|
|
405
|
+
# Process results with error handling
|
|
406
|
+
# Add tool call results to history first to stabilize source numbering,
|
|
407
|
+
# then extract referenceable chunks and debug info
|
|
408
|
+
self._history_manager.add_tool_call_results(tool_call_responses)
|
|
409
|
+
self._reference_manager.extract_referenceable_chunks(tool_call_responses)
|
|
410
|
+
self._debug_info_manager.extract_tool_debug_info(
|
|
411
|
+
tool_call_responses, self.current_iteration_index
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
self._tool_took_control = self._tool_manager.does_a_tool_take_control(
|
|
415
|
+
tool_calls
|
|
416
|
+
)
|
|
417
|
+
return self._tool_took_control
|
|
418
|
+
|
|
419
|
+
async def _create_new_assistant_message_if_loop_response_contains_content(
|
|
420
|
+
self, loop_response: LanguageModelStreamResponse
|
|
421
|
+
) -> None:
|
|
422
|
+
if self._thinking_manager.thinking_is_displayed():
|
|
423
|
+
return
|
|
424
|
+
if not loop_response.message.text:
|
|
425
|
+
return
|
|
426
|
+
|
|
427
|
+
# if anything sets the start text the model did not produce content.
|
|
428
|
+
# So we need to remove that text from the message.
|
|
429
|
+
message_text_without_start_text = loop_response.message.text.replace(
|
|
430
|
+
self.start_text.strip(), ""
|
|
431
|
+
).strip()
|
|
432
|
+
if message_text_without_start_text == "":
|
|
433
|
+
return
|
|
434
|
+
|
|
435
|
+
###
|
|
436
|
+
# ToDo: Once references on existing assistant messages can be deleted, we will switch from creating a new assistant message to modifying the existing one (with previous references deleted)
|
|
437
|
+
###
|
|
438
|
+
new_assistant_message = await self._chat_service.create_assistant_message_async(
|
|
439
|
+
content=""
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
# the new message must have an id that is valid else we use the old one
|
|
443
|
+
self._latest_assistant_id = (
|
|
444
|
+
new_assistant_message.id or self._latest_assistant_id
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
self._history_manager.add_assistant_message(
|
|
448
|
+
LanguageModelAssistantMessage(
|
|
449
|
+
content=loop_response.message.original_text or "",
|
|
450
|
+
)
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
def _get_filtered_user_metadata(self) -> dict[str, str]:
|
|
454
|
+
"""
|
|
455
|
+
Filter user metadata to only include keys specified in the agent's prompt config.
|
|
456
|
+
|
|
457
|
+
Returns:
|
|
458
|
+
Dictionary containing only the metadata keys that are configured to be included.
|
|
459
|
+
"""
|
|
460
|
+
user_metadata = {}
|
|
461
|
+
if (
|
|
462
|
+
self._config.agent.prompt_config.user_metadata
|
|
463
|
+
and self._event.payload.user_metadata is not None
|
|
464
|
+
):
|
|
465
|
+
# Filter metadata to only include selected keys
|
|
466
|
+
user_metadata = {
|
|
467
|
+
k: str(v)
|
|
468
|
+
for k, v in self._event.payload.user_metadata.items()
|
|
469
|
+
if k in self._config.agent.prompt_config.user_metadata
|
|
470
|
+
}
|
|
471
|
+
return user_metadata
|
|
472
|
+
|
|
473
|
+
async def _update_debug_info_if_tool_took_control(self) -> None:
|
|
474
|
+
"""
|
|
475
|
+
Update debug info when a tool takes control of the conversation.
|
|
476
|
+
DeepResearch is excluded as it handles debug info directly since it calls
|
|
477
|
+
the orchestrator multiple times.
|
|
478
|
+
"""
|
|
479
|
+
if not self._tool_took_control:
|
|
480
|
+
return
|
|
481
|
+
|
|
482
|
+
tool_names = [tool["name"] for tool in self._debug_info_manager.get()["tools"]]
|
|
483
|
+
if "DeepResearch" in tool_names:
|
|
484
|
+
return
|
|
485
|
+
|
|
486
|
+
debug_info_event = {
|
|
487
|
+
"assistant": {
|
|
488
|
+
"id": self._event.payload.assistant_id,
|
|
489
|
+
"name": self._event.payload.name,
|
|
490
|
+
},
|
|
491
|
+
"chosenModule": self._event.payload.name,
|
|
492
|
+
"userMetadata": self._event.payload.user_metadata,
|
|
493
|
+
"toolParameters": self._event.payload.tool_parameters,
|
|
494
|
+
**self._debug_info_manager.get(),
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
await self._chat_service.update_debug_info_async(debug_info=debug_info_event)
|
|
498
|
+
|
|
499
|
+
|
|
500
|
+
class UniqueAIResponsesApi(UniqueAI):
|
|
501
|
+
def __init__(
|
|
502
|
+
self,
|
|
503
|
+
logger: Logger,
|
|
504
|
+
event: ChatEvent,
|
|
505
|
+
config: UniqueAIConfig,
|
|
506
|
+
chat_service: ChatService,
|
|
507
|
+
content_service: ContentService,
|
|
508
|
+
debug_info_manager: DebugInfoManager,
|
|
509
|
+
streaming_handler: ResponsesSupportCompleteWithReferences,
|
|
510
|
+
reference_manager: ReferenceManager,
|
|
511
|
+
thinking_manager: ThinkingManager,
|
|
512
|
+
tool_manager: ResponsesApiToolManager,
|
|
513
|
+
history_manager: HistoryManager,
|
|
514
|
+
evaluation_manager: EvaluationManager,
|
|
515
|
+
postprocessor_manager: PostprocessorManager,
|
|
516
|
+
message_step_logger: MessageStepLogger,
|
|
517
|
+
mcp_servers: list[McpServer],
|
|
518
|
+
loop_iteration_runner: LoopIterationRunner,
|
|
519
|
+
) -> None:
|
|
520
|
+
super().__init__(
|
|
521
|
+
logger,
|
|
522
|
+
event=event,
|
|
523
|
+
config=config,
|
|
524
|
+
chat_service=chat_service,
|
|
525
|
+
content_service=content_service,
|
|
526
|
+
debug_info_manager=debug_info_manager,
|
|
527
|
+
streaming_handler=streaming_handler, # type: ignore
|
|
528
|
+
reference_manager=reference_manager,
|
|
529
|
+
thinking_manager=thinking_manager,
|
|
530
|
+
tool_manager=tool_manager, # type: ignore
|
|
531
|
+
history_manager=history_manager,
|
|
532
|
+
evaluation_manager=evaluation_manager,
|
|
533
|
+
postprocessor_manager=postprocessor_manager,
|
|
534
|
+
message_step_logger=message_step_logger,
|
|
535
|
+
mcp_servers=mcp_servers,
|
|
536
|
+
loop_iteration_runner=loop_iteration_runner,
|
|
537
|
+
)
|