unique_orchestrator 0.0.4__py3-none-any.whl → 1.7.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,4 +1,5 @@
1
- from datetime import datetime
1
+ import asyncio
2
+ from datetime import datetime, timezone
2
3
  from logging import Logger
3
4
 
4
5
  import jinja2
@@ -7,20 +8,29 @@ from unique_toolkit.agentic.debug_info_manager.debug_info_manager import (
7
8
  )
8
9
  from unique_toolkit.agentic.evaluation.evaluation_manager import EvaluationManager
9
10
  from unique_toolkit.agentic.history_manager.history_manager import HistoryManager
11
+ from unique_toolkit.agentic.message_log_manager.service import MessageStepLogger
10
12
  from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
11
13
  PostprocessorManager,
12
14
  )
13
15
  from unique_toolkit.agentic.reference_manager.reference_manager import ReferenceManager
14
16
  from unique_toolkit.agentic.thinking_manager.thinking_manager import ThinkingManager
15
- from unique_toolkit.agentic.tools.tool_manager import ToolManager
17
+ from unique_toolkit.agentic.tools.tool_manager import (
18
+ ResponsesApiToolManager,
19
+ SafeTaskExecutor,
20
+ ToolManager,
21
+ )
16
22
  from unique_toolkit.app.schemas import ChatEvent, McpServer
17
23
  from unique_toolkit.chat.service import ChatService
18
24
  from unique_toolkit.content.service import ContentService
25
+ from unique_toolkit.language_model import LanguageModelAssistantMessage
19
26
  from unique_toolkit.language_model.schemas import (
20
- LanguageModelAssistantMessage,
21
27
  LanguageModelMessages,
22
28
  LanguageModelStreamResponse,
23
29
  )
30
+ from unique_toolkit.protocols.support import (
31
+ ResponsesSupportCompleteWithReferences,
32
+ SupportCompleteWithReferences,
33
+ )
24
34
 
25
35
  from unique_orchestrator.config import UniqueAIConfig
26
36
 
@@ -45,12 +55,14 @@ class UniqueAI:
45
55
  chat_service: ChatService,
46
56
  content_service: ContentService,
47
57
  debug_info_manager: DebugInfoManager,
58
+ streaming_handler: SupportCompleteWithReferences,
48
59
  reference_manager: ReferenceManager,
49
60
  thinking_manager: ThinkingManager,
50
61
  tool_manager: ToolManager,
51
62
  history_manager: HistoryManager,
52
63
  evaluation_manager: EvaluationManager,
53
64
  postprocessor_manager: PostprocessorManager,
65
+ message_step_logger: MessageStepLogger,
54
66
  mcp_servers: list[McpServer],
55
67
  ):
56
68
  self._logger = logger
@@ -70,6 +82,11 @@ class UniqueAI:
70
82
  self._postprocessor_manager = postprocessor_manager
71
83
  self._latest_assistant_id: str = event.payload.assistant_message.id
72
84
  self._mcp_servers = mcp_servers
85
+ self._streaming_handler = streaming_handler
86
+
87
+ self._message_step_logger = message_step_logger
88
+ # Helper variable to support control loop
89
+ self._tool_took_control = False
73
90
 
74
91
  ############################################################
75
92
  # Override of base methods
@@ -121,12 +138,10 @@ class UniqueAI:
121
138
  self.start_text = self._thinking_manager.update_start_text(
122
139
  self.start_text, loop_response
123
140
  )
124
- await self._create_new_assistant_message_if_loop_response_contains_content(
125
- loop_response
126
- )
127
141
 
142
+ # Only set completed_at if no tool took control. Tools that take control will set the message state to completed themselves.
128
143
  await self._chat_service.modify_assistant_message_async(
129
- set_completed_at=True,
144
+ set_completed_at=not self._tool_took_control,
130
145
  )
131
146
 
132
147
  # @track()
@@ -144,7 +159,7 @@ class UniqueAI:
144
159
  self._logger.info("Its needs forced tool calls.")
145
160
  self._logger.info(f"Forced tools: {self._tool_manager.get_forced_tools()}")
146
161
  responses = [
147
- await self._chat_service.complete_with_references_async(
162
+ await self._streaming_handler.complete_with_references_async(
148
163
  messages=messages,
149
164
  model_name=self._config.space.language_model.name,
150
165
  tools=self._tool_manager.get_tool_definitions(),
@@ -152,8 +167,8 @@ class UniqueAI:
152
167
  start_text=self.start_text,
153
168
  debug_info=self._debug_info_manager.get(),
154
169
  temperature=self._config.agent.experimental.temperature,
155
- other_options=self._config.agent.experimental.additional_llm_options
156
- | {"toolChoice": opt},
170
+ tool_choice=opt,
171
+ other_options=self._config.agent.experimental.additional_llm_options,
157
172
  )
158
173
  for opt in self._tool_manager.get_forced_tools()
159
174
  ]
@@ -174,7 +189,7 @@ class UniqueAI:
174
189
  "we are in the last iteration we need to produce an answer now"
175
190
  )
176
191
  # No tool calls in last iteration
177
- stream_response = await self._chat_service.complete_with_references_async(
192
+ stream_response = await self._streaming_handler.complete_with_references_async(
178
193
  messages=messages,
179
194
  model_name=self._config.space.language_model.name,
180
195
  content_chunks=self._reference_manager.get_chunks(),
@@ -188,7 +203,7 @@ class UniqueAI:
188
203
  self._logger.info(
189
204
  f"we are in the iteration {self.current_iteration_index} asking the model to tell if we should use tools or if it will just stream"
190
205
  )
191
- stream_response = await self._chat_service.complete_with_references_async(
206
+ stream_response = await self._streaming_handler.complete_with_references_async(
192
207
  messages=messages,
193
208
  model_name=self._config.space.language_model.name,
194
209
  tools=self._tool_manager.get_tool_definitions(),
@@ -216,6 +231,9 @@ class UniqueAI:
216
231
  self._logger.debug(
217
232
  "Tools were called we process them and do not exit the loop"
218
233
  )
234
+ await self._create_new_assistant_message_if_loop_response_contains_content(
235
+ loop_response
236
+ )
219
237
 
220
238
  return await self._handle_tool_calls(loop_response)
221
239
 
@@ -246,32 +264,52 @@ class UniqueAI:
246
264
  for prompts in self._tool_manager.get_tool_prompts()
247
265
  ]
248
266
 
249
- used_tools = [m.name for m in self._tool_manager.get_tools()]
267
+ used_tools = [t.name for t in self._history_manager.get_tool_calls()]
268
+ sub_agent_calls = self._tool_manager.filter_tool_calls(
269
+ self._history_manager.get_tool_calls(), ["subagent"]
270
+ )
250
271
 
251
272
  mcp_server_user_prompts = [
252
273
  mcp_server.user_prompt for mcp_server in self._mcp_servers
253
274
  ]
254
275
 
276
+ user_metadata = self._get_filtered_user_metadata()
277
+
255
278
  tool_descriptions = self._tool_manager.get_tool_prompts()
256
279
 
257
280
  query = self._event.payload.user_message.text
258
281
 
282
+ if (
283
+ self._config.agent.experimental.sub_agents_config.referencing_config
284
+ is not None
285
+ and len(sub_agent_calls) > 0
286
+ ):
287
+ use_sub_agent_references = True
288
+ sub_agent_referencing_instructions = self._config.agent.experimental.sub_agents_config.referencing_config.referencing_instructions_for_user_prompt
289
+ else:
290
+ use_sub_agent_references = False
291
+ sub_agent_referencing_instructions = None
292
+
259
293
  user_msg = user_message_template.render(
260
294
  query=query,
261
295
  tool_descriptions=tool_descriptions,
262
296
  used_tools=used_tools,
263
297
  mcp_server_user_prompts=list(mcp_server_user_prompts),
264
298
  tool_descriptions_with_user_prompts=tool_descriptions_with_user_prompts,
299
+ use_sub_agent_references=use_sub_agent_references,
300
+ sub_agent_referencing_instructions=sub_agent_referencing_instructions,
301
+ user_metadata=user_metadata,
265
302
  )
266
303
  return user_msg
267
304
 
268
- async def _render_system_prompt(
269
- self,
270
- ) -> str:
305
+ async def _render_system_prompt(self) -> str:
271
306
  # TODO: Collect tool information here and adapt to system prompt
272
307
  tool_descriptions = self._tool_manager.get_tool_prompts()
273
308
 
274
- used_tools = [m.name for m in self._tool_manager.get_tools()]
309
+ used_tools = [t.name for t in self._history_manager.get_tool_calls()]
310
+ sub_agent_calls = self._tool_manager.filter_tool_calls(
311
+ self._history_manager.get_tool_calls(), ["subagent"]
312
+ )
275
313
 
276
314
  system_prompt_template = jinja2.Template(
277
315
  self._config.agent.prompt_config.system_prompt_template
@@ -279,10 +317,31 @@ class UniqueAI:
279
317
 
280
318
  date_string = datetime.now().strftime("%A %B %d, %Y")
281
319
 
320
+ user_metadata = self._get_filtered_user_metadata()
321
+
282
322
  mcp_server_system_prompts = [
283
323
  mcp_server.system_prompt for mcp_server in self._mcp_servers
284
324
  ]
285
325
 
326
+ if (
327
+ self._config.agent.experimental.sub_agents_config.referencing_config
328
+ is not None
329
+ and len(sub_agent_calls) > 0
330
+ ):
331
+ use_sub_agent_references = True
332
+ sub_agent_referencing_instructions = self._config.agent.experimental.sub_agents_config.referencing_config.referencing_instructions_for_system_prompt
333
+ else:
334
+ use_sub_agent_references = False
335
+ sub_agent_referencing_instructions = None
336
+
337
+ uploaded_documents = self._content_service.get_documents_uploaded_to_chat()
338
+ uploaded_documents_expired = [
339
+ doc
340
+ for doc in uploaded_documents
341
+ if doc.expired_at is not None
342
+ and doc.expired_at <= datetime.now(timezone.utc)
343
+ ]
344
+
286
345
  system_message = system_prompt_template.render(
287
346
  model_info=self._config.space.language_model.model_dump(mode="json"),
288
347
  date_string=date_string,
@@ -294,6 +353,10 @@ class UniqueAI:
294
353
  max_loop_iterations=self._config.agent.max_loop_iterations,
295
354
  current_iteration=self.current_iteration_index + 1,
296
355
  mcp_server_system_prompts=mcp_server_system_prompts,
356
+ use_sub_agent_references=use_sub_agent_references,
357
+ sub_agent_referencing_instructions=sub_agent_referencing_instructions,
358
+ user_metadata=user_metadata,
359
+ uploaded_documents_expired=uploaded_documents_expired,
297
360
  )
298
361
  return system_message
299
362
 
@@ -301,20 +364,62 @@ class UniqueAI:
301
364
  self, loop_response: LanguageModelStreamResponse
302
365
  ) -> bool:
303
366
  """Handle the case where no tool calls are returned."""
367
+ task_executor = SafeTaskExecutor(
368
+ logger=self._logger,
369
+ )
370
+
304
371
  selected_evaluation_names = self._tool_manager.get_evaluation_check_list()
305
- evaluation_results = await self._evaluation_manager.run_evaluations(
306
- selected_evaluation_names, loop_response, self._latest_assistant_id
372
+ evaluation_results = task_executor.execute_async(
373
+ self._evaluation_manager.run_evaluations,
374
+ selected_evaluation_names,
375
+ loop_response,
376
+ self._latest_assistant_id,
307
377
  )
308
378
 
309
- await self._postprocessor_manager.run_postprocessors(loop_response)
379
+ postprocessor_result = task_executor.execute_async(
380
+ self._postprocessor_manager.run_postprocessors,
381
+ loop_response.model_copy(deep=True),
382
+ )
310
383
 
311
- if not all(result.is_positive for result in evaluation_results):
384
+ _, evaluation_results = await asyncio.gather(
385
+ postprocessor_result,
386
+ evaluation_results,
387
+ )
388
+
389
+ if evaluation_results.success and not all(
390
+ result.is_positive for result in evaluation_results.unpack()
391
+ ):
312
392
  self._logger.warning(
313
393
  "we should add here the retry counter add an instruction and retry the loop for now we just exit the loop"
314
394
  ) # TODO: add retry counter and instruction
315
395
 
316
396
  return True
317
397
 
398
+ def _log_tool_calls(self, tool_calls: list) -> None:
399
+ # Create dictionary mapping tool names to display names for efficient lookup
400
+ all_tools_dict: dict[str, str] = {
401
+ tool.name: tool.display_name()
402
+ for tool in self._tool_manager.available_tools
403
+ }
404
+
405
+ # Tool names that should not be logged in the message steps
406
+ tool_names_not_to_log = ["DeepResearch"]
407
+
408
+ tool_string: str = ""
409
+ used_tools = []
410
+ for tool_call in tool_calls:
411
+ if tool_call.name in all_tools_dict.keys() and tool_call.name not in tool_names_not_to_log:
412
+ if tool_call.name not in used_tools:
413
+ used_tools.append(tool_call.name)
414
+ tool_name = (all_tools_dict[tool_call.name]) or tool_call.name
415
+ tool_string += f"\n• {tool_name}"
416
+ self._history_manager.add_tool_call(tool_call)
417
+
418
+ if tool_string:
419
+ self._message_step_logger.create_message_log_entry(
420
+ text=f"**Triggered Tool Calls:**\n {tool_string}", references=[]
421
+ )
422
+
318
423
  async def _handle_tool_calls(
319
424
  self, loop_response: LanguageModelStreamResponse
320
425
  ) -> bool:
@@ -323,9 +428,11 @@ class UniqueAI:
323
428
 
324
429
  tool_calls = loop_response.tool_calls or []
325
430
 
326
- # Append function call to history
431
+ # Append function calls to history
327
432
  self._history_manager._append_tool_calls_to_history(tool_calls)
328
433
 
434
+ # Log tool calls
435
+ self._log_tool_calls(tool_calls)
329
436
  # Execute tool calls
330
437
  tool_call_responses = await self._tool_manager.execute_selected_tools(
331
438
  tool_calls
@@ -336,9 +443,14 @@ class UniqueAI:
336
443
  # then extract referenceable chunks and debug info
337
444
  self._history_manager.add_tool_call_results(tool_call_responses)
338
445
  self._reference_manager.extract_referenceable_chunks(tool_call_responses)
339
- self._debug_info_manager.extract_tool_debug_info(tool_call_responses)
446
+ self._debug_info_manager.extract_tool_debug_info(
447
+ tool_call_responses, self.current_iteration_index
448
+ )
340
449
 
341
- return self._tool_manager.does_a_tool_take_control(tool_calls)
450
+ self._tool_took_control = self._tool_manager.does_a_tool_take_control(
451
+ tool_calls
452
+ )
453
+ return self._tool_took_control
342
454
 
343
455
  async def _create_new_assistant_message_if_loop_response_contains_content(
344
456
  self, loop_response: LanguageModelStreamResponse
@@ -373,3 +485,61 @@ class UniqueAI:
373
485
  content=loop_response.message.original_text or "",
374
486
  )
375
487
  )
488
+
489
+ def _get_filtered_user_metadata(self) -> dict[str, str]:
490
+ """
491
+ Filter user metadata to only include keys specified in the agent's prompt config.
492
+
493
+ Returns:
494
+ Dictionary containing only the metadata keys that are configured to be included.
495
+ """
496
+ user_metadata = {}
497
+ if (
498
+ self._config.agent.prompt_config.user_metadata
499
+ and self._event.payload.user_metadata is not None
500
+ ):
501
+ # Filter metadata to only include selected keys
502
+ user_metadata = {
503
+ k: str(v)
504
+ for k, v in self._event.payload.user_metadata.items()
505
+ if k in self._config.agent.prompt_config.user_metadata
506
+ }
507
+ return user_metadata
508
+
509
+
510
+ class UniqueAIResponsesApi(UniqueAI):
511
+ def __init__(
512
+ self,
513
+ logger: Logger,
514
+ event: ChatEvent,
515
+ config: UniqueAIConfig,
516
+ chat_service: ChatService,
517
+ content_service: ContentService,
518
+ debug_info_manager: DebugInfoManager,
519
+ streaming_handler: ResponsesSupportCompleteWithReferences,
520
+ reference_manager: ReferenceManager,
521
+ thinking_manager: ThinkingManager,
522
+ tool_manager: ResponsesApiToolManager,
523
+ history_manager: HistoryManager,
524
+ evaluation_manager: EvaluationManager,
525
+ postprocessor_manager: PostprocessorManager,
526
+ message_step_logger: MessageStepLogger,
527
+ mcp_servers: list[McpServer],
528
+ ) -> None:
529
+ super().__init__(
530
+ logger,
531
+ event=event,
532
+ config=config,
533
+ chat_service=chat_service,
534
+ content_service=content_service,
535
+ debug_info_manager=debug_info_manager,
536
+ streaming_handler=streaming_handler, # type: ignore
537
+ reference_manager=reference_manager,
538
+ thinking_manager=thinking_manager,
539
+ tool_manager=tool_manager, # type: ignore
540
+ history_manager=history_manager,
541
+ evaluation_manager=evaluation_manager,
542
+ postprocessor_manager=postprocessor_manager,
543
+ message_step_logger=message_step_logger,
544
+ mcp_servers=mcp_servers,
545
+ )