unique_orchestrator 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_orchestrator might be problematic. Click here for more details.

@@ -0,0 +1,18 @@
1
+ {# Comments for the user message template
2
+ - This template is used to format the user message for the LLM
3
+ - Variables available:
4
+ - query: The original user query
5
+ - model_info: Information about the language model being used
6
+ - date_string: The current date in formatted string
7
+ - mcp_server_user_prompts: List of unique server-wide user prompts from MCP servers
8
+ - tool_descriptions_with_user_prompts: List of UniqueToolDescription objects with user prompts
9
+ #}{{ query }}
10
+
11
+ {%- if mcp_server_user_prompts and mcp_server_user_prompts|length > 0 %}
12
+
13
+ ## MCP Server Context
14
+ {%- for server_prompt in mcp_server_user_prompts %}
15
+
16
+ {{ server_prompt }}
17
+ {%- endfor %}
18
+ {%- endif %}
@@ -0,0 +1,115 @@
1
+ from unique_orchestrator.config import (
2
+ SearchAgentConfig,
3
+ search_agent_config_to_unique_ai_space_config,
4
+ )
5
+
6
+
7
+ def test_ai_tools_conversion():
8
+ """
9
+ AI-authored test: Validates the transfer of tool configurations.
10
+
11
+ This test ensures that:
12
+ 1. All tools from the old config are present in the new config
13
+ 2. Tool names are preserved exactly
14
+ 3. Tool configurations are maintained without modification
15
+
16
+ Written by AI Assistant to verify tool configuration preservation.
17
+ """
18
+ old_config = SearchAgentConfig()
19
+ new_config = search_agent_config_to_unique_ai_space_config(old_config)
20
+
21
+ assert len(new_config.space.tools) == len(old_config.tools)
22
+ for old_tool, new_tool in zip(old_config.tools, new_config.space.tools):
23
+ assert old_tool.name == new_tool.name
24
+ assert old_tool.configuration == new_tool.configuration
25
+
26
+
27
+ def test_ai_services_conversion():
28
+ """
29
+ AI-authored test: Verifies the conversion of service configurations.
30
+
31
+ This test checks that all service configurations are properly transferred:
32
+ 1. Follow-up questions configuration
33
+ 2. Evaluation configuration
34
+ 3. Stock ticker configuration
35
+ 4. Reference manager configuration
36
+
37
+ Written by AI Assistant to ensure service configuration integrity.
38
+ """
39
+ old_config = SearchAgentConfig()
40
+ new_config = search_agent_config_to_unique_ai_space_config(old_config)
41
+
42
+ services = new_config.agent.services
43
+ assert services.follow_up_questions_config == old_config.follow_up_questions_config
44
+ assert services.evaluation_config == old_config.evaluation_config
45
+ assert services.stock_ticker_config == old_config.stock_ticker_config
46
+
47
+
48
+ def test_ai_experimental_config_conversion():
49
+ """
50
+ AI-authored test: Checks the conversion of experimental features.
51
+
52
+ This test verifies that:
53
+ 1. Experimental features like thinking_steps_display are properly transferred
54
+ 2. Boolean values are preserved accurately
55
+
56
+ Written by AI Assistant to ensure experimental feature preservation.
57
+ """
58
+ old_config = SearchAgentConfig()
59
+ old_config.thinking_steps_display = True
60
+ new_config = search_agent_config_to_unique_ai_space_config(old_config)
61
+
62
+ assert (
63
+ new_config.agent.experimental.thinking_steps_display
64
+ == old_config.thinking_steps_display
65
+ )
66
+
67
+
68
+ def test_ai_force_checks_conversion():
69
+ """
70
+ AI-authored test: Validates the conversion of force checks configuration.
71
+
72
+ This test ensures that:
73
+ 1. Force checks for stream response references are properly transferred
74
+ 2. The configuration maintains its integrity during conversion
75
+
76
+ Written by AI Assistant to verify force checks preservation.
77
+ """
78
+ old_config = SearchAgentConfig()
79
+ new_config = search_agent_config_to_unique_ai_space_config(old_config)
80
+
81
+ assert (
82
+ new_config.agent.experimental.force_checks_on_stream_response_references
83
+ == old_config.force_checks_on_stream_response_references
84
+ )
85
+
86
+
87
+ def test_ai_custom_values_conversion():
88
+ """
89
+ AI-authored test: Verifies the conversion of custom configuration values.
90
+
91
+ This test validates that custom values are properly transferred:
92
+ 1. Project name
93
+ 2. Custom instructions
94
+ 3. Temperature settings
95
+ 4. Loop iteration limits
96
+ 5. Additional LLM options
97
+
98
+ Written by AI Assistant to ensure custom configuration preservation.
99
+ """
100
+ old_config = SearchAgentConfig(
101
+ project_name="Custom Project",
102
+ custom_instructions="Custom Instructions",
103
+ temperature=0.8,
104
+ max_loop_iterations=5,
105
+ additional_llm_options={"some_option": "value"},
106
+ )
107
+ new_config = search_agent_config_to_unique_ai_space_config(old_config)
108
+
109
+ assert new_config.space.project_name == "Custom Project"
110
+ assert new_config.space.custom_instructions == "Custom Instructions"
111
+ assert new_config.agent.experimental.temperature == 0.8
112
+ assert new_config.agent.max_loop_iterations == 5
113
+ assert new_config.agent.experimental.additional_llm_options == {
114
+ "some_option": "value"
115
+ }
@@ -0,0 +1,127 @@
1
+ from unittest.mock import AsyncMock, MagicMock
2
+
3
+ import pytest
4
+
5
+
6
+ @pytest.mark.asyncio
7
+ async def test_history_updated_before_reference_extraction(monkeypatch):
8
+ # Lazy import to avoid heavy dependencies at module import time
9
+ from unique_orchestrator.unique_ai import UniqueAI
10
+
11
+ # Create a minimal UniqueAI instance with mocked dependencies
12
+ mock_logger = MagicMock()
13
+
14
+ class DummyEvent:
15
+ class Payload:
16
+ class AssistantMessage:
17
+ id = "assist_1"
18
+
19
+ assistant_message = AssistantMessage()
20
+ user_message = MagicMock()
21
+ user_message.text = "query"
22
+
23
+ payload = Payload()
24
+
25
+ dummy_event = MagicMock()
26
+ dummy_event.payload = DummyEvent.Payload()
27
+
28
+ mock_config = MagicMock()
29
+ mock_config.agent.max_loop_iterations = 1
30
+ mock_config.space.language_model.name = "dummy-model"
31
+ mock_config.agent.experimental.temperature = 0.0
32
+ mock_config.agent.experimental.additional_llm_options = {}
33
+
34
+ # Managers
35
+ mock_history_manager = MagicMock()
36
+ mock_history_manager.has_no_loop_messages.return_value = True
37
+ mock_history_manager._append_tool_calls_to_history = MagicMock()
38
+ mock_history_manager.add_tool_call_results = MagicMock()
39
+
40
+ mock_reference_manager = MagicMock()
41
+ mock_reference_manager.extract_referenceable_chunks = MagicMock()
42
+ mock_reference_manager.get_chunks.return_value = []
43
+
44
+ mock_thinking_manager = MagicMock()
45
+ mock_debug_info_manager = MagicMock()
46
+ mock_debug_info_manager.get.return_value = {}
47
+ mock_debug_info_manager.extract_tool_debug_info = MagicMock()
48
+
49
+ mock_tool_manager = MagicMock()
50
+ mock_tool_manager.get_forced_tools.return_value = []
51
+ mock_tool_manager.get_tool_definitions.return_value = []
52
+ mock_tool_manager.execute_selected_tools = AsyncMock(return_value=[MagicMock()])
53
+ mock_tool_manager.does_a_tool_take_control.return_value = False
54
+
55
+ class DummyStreamResponse:
56
+ def __init__(self):
57
+ self.tool_calls = [MagicMock()]
58
+ self.message = MagicMock()
59
+ self.message.references = []
60
+ self.message.text = ""
61
+
62
+ def is_empty(self):
63
+ return False
64
+
65
+ mock_chat_service = MagicMock()
66
+ mock_chat_service.complete_with_references_async = AsyncMock(
67
+ return_value=DummyStreamResponse()
68
+ )
69
+ mock_chat_service.modify_assistant_message_async = AsyncMock(return_value=None)
70
+ mock_chat_service.create_assistant_message_async = AsyncMock(
71
+ return_value=MagicMock(id="assist_new")
72
+ )
73
+ mock_content_service = MagicMock()
74
+ mock_history_manager.get_history_for_model_call = AsyncMock(
75
+ return_value=MagicMock()
76
+ )
77
+
78
+ # Instantiate
79
+ ua = UniqueAI(
80
+ logger=mock_logger,
81
+ event=dummy_event,
82
+ config=mock_config,
83
+ chat_service=mock_chat_service,
84
+ content_service=mock_content_service,
85
+ debug_info_manager=mock_debug_info_manager,
86
+ reference_manager=mock_reference_manager,
87
+ thinking_manager=mock_thinking_manager,
88
+ tool_manager=mock_tool_manager,
89
+ history_manager=mock_history_manager,
90
+ evaluation_manager=MagicMock(),
91
+ postprocessor_manager=MagicMock(),
92
+ mcp_servers=[],
93
+ )
94
+
95
+ # Bypass Jinja template compilation by stubbing prompt renderers
96
+ ua._render_user_prompt = AsyncMock(return_value="user") # type: ignore
97
+ ua._render_system_prompt = AsyncMock(return_value="system") # type: ignore
98
+ # Avoid creating new assistant messages path
99
+ ua._thinking_manager.thinking_is_displayed = MagicMock(return_value=True) # type: ignore
100
+
101
+ # Spy on call order by recording sequence
102
+ call_sequence = []
103
+
104
+ def record_history_add(results):
105
+ call_sequence.append("history_add")
106
+
107
+ def record_reference_extract(results):
108
+ call_sequence.append("reference_extract")
109
+
110
+ def record_debug_extract(results):
111
+ call_sequence.append("debug_extract")
112
+
113
+ mock_history_manager.add_tool_call_results.side_effect = record_history_add
114
+ mock_reference_manager.extract_referenceable_chunks.side_effect = (
115
+ record_reference_extract
116
+ )
117
+ mock_debug_info_manager.extract_tool_debug_info.side_effect = record_debug_extract
118
+
119
+ # Run a single iteration
120
+ await ua.run()
121
+
122
+ # Verify order: history first, then references, then debug
123
+ assert call_sequence[:3] == [
124
+ "history_add",
125
+ "reference_extract",
126
+ "debug_extract",
127
+ ]
@@ -0,0 +1,375 @@
1
+ from datetime import datetime
2
+ from logging import Logger
3
+
4
+ import jinja2
5
+ from unique_toolkit.app.schemas import ChatEvent, McpServer
6
+ from unique_toolkit.chat.service import ChatService
7
+ from unique_toolkit.content.service import ContentService
8
+ from unique_toolkit.debug_info_manager.debug_info_manager import (
9
+ DebugInfoManager,
10
+ )
11
+ from unique_toolkit.evals.evaluation_manager import EvaluationManager
12
+ from unique_toolkit.history_manager.history_manager import HistoryManager
13
+ from unique_toolkit.language_model.schemas import (
14
+ LanguageModelAssistantMessage,
15
+ LanguageModelMessages,
16
+ LanguageModelStreamResponse,
17
+ )
18
+ from unique_toolkit.postprocessor.postprocessor_manager import (
19
+ PostprocessorManager,
20
+ )
21
+ from unique_toolkit.reference_manager.reference_manager import ReferenceManager
22
+ from unique_toolkit.thinking_manager.thinking_manager import ThinkingManager
23
+ from unique_toolkit.tools.tool_manager import ToolManager
24
+
25
+ from unique_orchestrator.config import UniqueAIConfig
26
+
27
+ EMPTY_MESSAGE_WARNING = (
28
+ "⚠️ **The language model was unable to produce an output.**\n"
29
+ "It did not generate any content or perform a tool call in response to your request. "
30
+ "This is a limitation of the language model itself.\n\n"
31
+ "**Please try adapting or simplifying your prompt.** "
32
+ "Rewording your input can often help the model respond successfully."
33
+ )
34
+
35
+
36
+ class UniqueAI:
37
+ start_text = ""
38
+ current_iteration_index = 0
39
+
40
+ def __init__(
41
+ self,
42
+ logger: Logger,
43
+ event: ChatEvent,
44
+ config: UniqueAIConfig,
45
+ chat_service: ChatService,
46
+ content_service: ContentService,
47
+ debug_info_manager: DebugInfoManager,
48
+ reference_manager: ReferenceManager,
49
+ thinking_manager: ThinkingManager,
50
+ tool_manager: ToolManager,
51
+ history_manager: HistoryManager,
52
+ evaluation_manager: EvaluationManager,
53
+ postprocessor_manager: PostprocessorManager,
54
+ mcp_servers: list[McpServer],
55
+ ):
56
+ self._logger = logger
57
+ self._event = event
58
+ self._config = config
59
+ self._chat_service = chat_service
60
+ self._content_service = content_service
61
+
62
+ self._debug_info_manager = debug_info_manager
63
+ self._reference_manager = reference_manager
64
+ self._thinking_manager = thinking_manager
65
+ self._tool_manager = tool_manager
66
+
67
+ self._history_manager = history_manager
68
+
69
+ self._evaluation_manager = evaluation_manager
70
+ self._postprocessor_manager = postprocessor_manager
71
+ self._latest_assistant_id: str = event.payload.assistant_message.id
72
+ self._mcp_servers = mcp_servers
73
+
74
+ ############################################################
75
+ # Override of base methods
76
+ ############################################################
77
+ # @track(name="loop_agent_run") # Group traces together
78
+ async def run(self):
79
+ """
80
+ Main loop of the agent. The agent will iterate through the loop, runs the plan and
81
+ processes tool calls if any are returned.
82
+ """
83
+ self._logger.info("Start LoopAgent...")
84
+
85
+ if self._history_manager.has_no_loop_messages(): # TODO: why do we even need to check its always no loop messages on this when its called.
86
+ self._chat_service.modify_assistant_message(
87
+ content="Starting agentic loop..." # TODO: this must be more informative
88
+ )
89
+
90
+ ## Loop iteration
91
+ for i in range(self._config.agent.max_loop_iterations):
92
+ self.current_iteration_index = i
93
+ self._logger.info(f"Starting iteration {i + 1}...")
94
+
95
+ # Plan execution
96
+ loop_response = await self._plan_or_execute()
97
+ self._logger.info("Done with _plan_or_execute")
98
+
99
+ self._reference_manager.add_references(loop_response.message.references)
100
+ self._logger.info("Done with adding references")
101
+
102
+ # Update tool progress reporter
103
+ self._thinking_manager.update_tool_progress_reporter(loop_response)
104
+
105
+ # Execute the plan
106
+ exit_loop = await self._process_plan(loop_response)
107
+ self._logger.info("Done with _process_plan")
108
+
109
+ if exit_loop:
110
+ self._thinking_manager.close_thinking_steps(loop_response)
111
+ self._logger.info("Exiting loop.")
112
+ break
113
+
114
+ if i == self._config.agent.max_loop_iterations - 1:
115
+ self._logger.error("Max iterations reached.")
116
+ await self._chat_service.modify_assistant_message_async(
117
+ content="I have reached the maximum number of self-reflection iterations. Please clarify your request and try again...",
118
+ )
119
+ break
120
+
121
+ self.start_text = self._thinking_manager.update_start_text(
122
+ self.start_text, loop_response
123
+ )
124
+ await self._create_new_assistant_message_if_loop_response_contains_content(
125
+ loop_response
126
+ )
127
+
128
+ await self._chat_service.modify_assistant_message_async(
129
+ set_completed_at=True,
130
+ )
131
+
132
+ # @track()
133
+ async def _plan_or_execute(self) -> LanguageModelStreamResponse:
134
+ self._logger.info("Planning or executing the loop.")
135
+ messages = await self._compose_message_plan_execution()
136
+
137
+ self._logger.info("Done composing message plan execution.")
138
+
139
+ # Forces tool calls only in first iteration
140
+ if (
141
+ len(self._tool_manager.get_forced_tools()) > 0
142
+ and self.current_iteration_index == 0
143
+ ):
144
+ self._logger.info("Its needs forced tool calls.")
145
+ self._logger.info(f"Forced tools: {self._tool_manager.get_forced_tools()}")
146
+ responses = [
147
+ await self._chat_service.complete_with_references_async(
148
+ messages=messages,
149
+ model_name=self._config.space.language_model.name,
150
+ tools=self._tool_manager.get_tool_definitions(),
151
+ content_chunks=self._reference_manager.get_chunks(),
152
+ start_text=self.start_text,
153
+ debug_info=self._debug_info_manager.get(),
154
+ temperature=self._config.agent.experimental.temperature,
155
+ other_options=self._config.agent.experimental.additional_llm_options
156
+ | {"toolChoice": opt},
157
+ )
158
+ for opt in self._tool_manager.get_forced_tools()
159
+ ]
160
+
161
+ # Merge responses and refs:
162
+ tool_calls = []
163
+ references = []
164
+ for r in responses:
165
+ if r.tool_calls:
166
+ tool_calls.extend(r.tool_calls)
167
+ references.extend(r.message.references)
168
+
169
+ stream_response = responses[0]
170
+ stream_response.tool_calls = tool_calls if len(tool_calls) > 0 else None
171
+ stream_response.message.references = references
172
+ elif self.current_iteration_index == self._config.agent.max_loop_iterations - 1:
173
+ self._logger.info(
174
+ "we are in the last iteration we need to produce an answer now"
175
+ )
176
+ # No tool calls in last iteration
177
+ stream_response = await self._chat_service.complete_with_references_async(
178
+ messages=messages,
179
+ model_name=self._config.space.language_model.name,
180
+ content_chunks=self._reference_manager.get_chunks(),
181
+ start_text=self.start_text,
182
+ debug_info=self._debug_info_manager.get(),
183
+ temperature=self._config.agent.experimental.temperature,
184
+ other_options=self._config.agent.experimental.additional_llm_options,
185
+ )
186
+
187
+ else:
188
+ self._logger.info(
189
+ f"we are in the iteration {self.current_iteration_index} asking the model to tell if we should use tools or if it will just stream"
190
+ )
191
+ stream_response = await self._chat_service.complete_with_references_async(
192
+ messages=messages,
193
+ model_name=self._config.space.language_model.name,
194
+ tools=self._tool_manager.get_tool_definitions(),
195
+ content_chunks=self._reference_manager.get_chunks(),
196
+ start_text=self.start_text,
197
+ debug_info=self._debug_info_manager.get(),
198
+ temperature=self._config.agent.experimental.temperature,
199
+ other_options=self._config.agent.experimental.additional_llm_options,
200
+ )
201
+
202
+ return stream_response
203
+
204
+ async def _process_plan(self, loop_response: LanguageModelStreamResponse) -> bool:
205
+ self._logger.info(
206
+ "Processing the plan, executing the tools and checking for loop exit conditions once all is done."
207
+ )
208
+
209
+ if loop_response.is_empty():
210
+ self._logger.debug("Empty model response, exiting loop.")
211
+ self._chat_service.modify_assistant_message(content=EMPTY_MESSAGE_WARNING)
212
+ return True
213
+
214
+ call_tools = len(loop_response.tool_calls or []) > 0
215
+ if call_tools:
216
+ self._logger.debug(
217
+ "Tools were called we process them and do not exit the loop"
218
+ )
219
+
220
+ return await self._handle_tool_calls(loop_response)
221
+
222
+ self._logger.debug("No tool calls. we might exit the loop")
223
+
224
+ return await self._handle_no_tool_calls(loop_response)
225
+
226
+ async def _compose_message_plan_execution(self) -> LanguageModelMessages:
227
+ original_user_message = self._event.payload.user_message.text
228
+ rendered_user_message_string = await self._render_user_prompt()
229
+ rendered_system_message_string = await self._render_system_prompt()
230
+
231
+ messages = await self._history_manager.get_history_for_model_call(
232
+ original_user_message,
233
+ rendered_user_message_string,
234
+ rendered_system_message_string,
235
+ self._postprocessor_manager.remove_from_text,
236
+ )
237
+ return messages
238
+
239
+ async def _render_user_prompt(self) -> str:
240
+ user_message_template = jinja2.Template(
241
+ self._config.agent.prompt_config.user_message_prompt_template
242
+ )
243
+
244
+ tool_descriptions_with_user_prompts = [
245
+ prompts.tool_user_prompt
246
+ for prompts in self._tool_manager.get_tool_prompts()
247
+ ]
248
+
249
+ used_tools = [m.name for m in self._tool_manager.get_tools()]
250
+
251
+ mcp_server_user_prompts = [
252
+ mcp_server.user_prompt for mcp_server in self._mcp_servers
253
+ ]
254
+
255
+ tool_descriptions = self._tool_manager.get_tool_prompts()
256
+
257
+ query = self._event.payload.user_message.text
258
+
259
+ user_msg = user_message_template.render(
260
+ query=query,
261
+ tool_descriptions=tool_descriptions,
262
+ used_tools=used_tools,
263
+ mcp_server_user_prompts=list(mcp_server_user_prompts),
264
+ tool_descriptions_with_user_prompts=tool_descriptions_with_user_prompts,
265
+ )
266
+ return user_msg
267
+
268
+ async def _render_system_prompt(
269
+ self,
270
+ ) -> str:
271
+ # TODO: Collect tool information here and adapt to system prompt
272
+ tool_descriptions = self._tool_manager.get_tool_prompts()
273
+
274
+ used_tools = [m.name for m in self._tool_manager.get_tools()]
275
+
276
+ system_prompt_template = jinja2.Template(
277
+ self._config.agent.prompt_config.system_prompt_template
278
+ )
279
+
280
+ date_string = datetime.now().strftime("%A %B %d, %Y")
281
+
282
+ mcp_server_system_prompts = [
283
+ mcp_server.system_prompt for mcp_server in self._mcp_servers
284
+ ]
285
+
286
+ system_message = system_prompt_template.render(
287
+ model_info=self._config.space.language_model.model_dump(mode="json"),
288
+ date_string=date_string,
289
+ tool_descriptions=tool_descriptions,
290
+ used_tools=used_tools,
291
+ project_name=self._config.space.project_name,
292
+ custom_instructions=self._config.space.custom_instructions,
293
+ max_tools_per_iteration=self._config.agent.experimental.loop_configuration.max_tool_calls_per_iteration,
294
+ max_loop_iterations=self._config.agent.max_loop_iterations,
295
+ current_iteration=self.current_iteration_index + 1,
296
+ mcp_server_system_prompts=mcp_server_system_prompts,
297
+ )
298
+ return system_message
299
+
300
+ async def _handle_no_tool_calls(
301
+ self, loop_response: LanguageModelStreamResponse
302
+ ) -> bool:
303
+ """Handle the case where no tool calls are returned."""
304
+ selected_evaluation_names = self._tool_manager.get_evaluation_check_list()
305
+ evaluation_results = await self._evaluation_manager.run_evaluations(
306
+ selected_evaluation_names, loop_response, self._latest_assistant_id
307
+ )
308
+
309
+ await self._postprocessor_manager.run_postprocessors(loop_response)
310
+
311
+ if not all(result.is_positive for result in evaluation_results):
312
+ self._logger.warning(
313
+ "we should add here the retry counter add an instruction and retry the loop for now we just exit the loop"
314
+ ) # TODO: add retry counter and instruction
315
+
316
+ return True
317
+
318
+ async def _handle_tool_calls(
319
+ self, loop_response: LanguageModelStreamResponse
320
+ ) -> bool:
321
+ """Handle the case where tool calls are returned."""
322
+ self._logger.info("Processing tool calls")
323
+
324
+ tool_calls = loop_response.tool_calls or []
325
+
326
+ # Append function call to history
327
+ self._history_manager._append_tool_calls_to_history(tool_calls)
328
+
329
+ # Execute tool calls
330
+ tool_call_responses = await self._tool_manager.execute_selected_tools(
331
+ tool_calls
332
+ )
333
+
334
+ # Process results with error handling
335
+ # Add tool call results to history first to stabilize source numbering,
336
+ # then extract referenceable chunks and debug info
337
+ self._history_manager.add_tool_call_results(tool_call_responses)
338
+ self._reference_manager.extract_referenceable_chunks(tool_call_responses)
339
+ self._debug_info_manager.extract_tool_debug_info(tool_call_responses)
340
+
341
+ return self._tool_manager.does_a_tool_take_control(tool_calls)
342
+
343
+ async def _create_new_assistant_message_if_loop_response_contains_content(
344
+ self, loop_response: LanguageModelStreamResponse
345
+ ) -> None:
346
+ if self._thinking_manager.thinking_is_displayed():
347
+ return
348
+ if not loop_response.message.text:
349
+ return
350
+
351
+ # if anything sets the start text the model did not produce content.
352
+ # So we need to remove that text from the message.
353
+ message_text_without_start_text = loop_response.message.text.replace(
354
+ self.start_text.strip(), ""
355
+ ).strip()
356
+ if message_text_without_start_text == "":
357
+ return
358
+
359
+ ###
360
+ # ToDo: Once references on existing assistant messages can be deleted, we will switch from creating a new assistant message to modifying the existing one (with previous references deleted)
361
+ ###
362
+ new_assistant_message = await self._chat_service.create_assistant_message_async(
363
+ content=""
364
+ )
365
+
366
+ # the new message must have an id that is valid else we use the old one
367
+ self._latest_assistant_id = (
368
+ new_assistant_message.id or self._latest_assistant_id
369
+ )
370
+
371
+ self._history_manager.add_assistant_message(
372
+ LanguageModelAssistantMessage(
373
+ content=loop_response.message.original_text or "",
374
+ )
375
+ )