unique_orchestrator 1.3.0__py3-none-any.whl → 1.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_orchestrator might be problematic. Click here for more details.

@@ -3,6 +3,7 @@ from pathlib import Path
3
3
  from typing import Annotated, Any, Generic, Literal, TypeVar
4
4
 
5
5
  from pydantic import BaseModel, Field, ValidationInfo, field_validator, model_validator
6
+ from pydantic.json_schema import SkipJsonSchema
6
7
  from unique_deep_research.config import DeepResearchToolConfig
7
8
  from unique_deep_research.service import DeepResearchTool
8
9
  from unique_follow_up_questions.config import FollowUpQuestionsConfig
@@ -21,6 +22,7 @@ from unique_toolkit.agentic.evaluation.schemas import EvaluationMetricName
21
22
  from unique_toolkit.agentic.history_manager.history_manager import (
22
23
  UploadedContentConfig,
23
24
  )
25
+ from unique_toolkit.agentic.responses_api import ShowExecutedCodePostprocessorConfig
24
26
  from unique_toolkit.agentic.tools.a2a import (
25
27
  REFERENCING_INSTRUCTIONS_FOR_SYSTEM_PROMPT,
26
28
  REFERENCING_INSTRUCTIONS_FOR_USER_PROMPT,
@@ -226,6 +228,40 @@ class SubAgentsConfig(BaseModel):
226
228
  ) = SubAgentEvaluationServiceConfig()
227
229
 
228
230
 
231
+ class ResponsesApiConfig(BaseModel):
232
+ model_config = get_configuration_dict(frozen=True)
233
+
234
+ use_responses_api: bool = Field(
235
+ default=False,
236
+ description="Whether to use the responses API instead of the completions API.",
237
+ )
238
+ code_interpreter_display_config: (
239
+ Annotated[
240
+ ShowExecutedCodePostprocessorConfig,
241
+ Field(title="Active"),
242
+ ]
243
+ | DeactivatedNone
244
+ ) = ShowExecutedCodePostprocessorConfig()
245
+
246
+ use_direct_azure_client: SkipJsonSchema[bool] = Field(
247
+ default=True,
248
+ description="[TEMPORARY] Whether to use the direct Azure client instead of the responses API.",
249
+ )
250
+ direct_azure_client_api_base_env_var: SkipJsonSchema[str] = Field(
251
+ default="OPENAI_BASE_URL",
252
+ description="[TEMPORARY] The environment variable that contains the API base for the direct Azure client.",
253
+ )
254
+ direct_azure_client_api_key_env_var: SkipJsonSchema[str] = Field(
255
+ default="OPENAI_API_KEY",
256
+ description="[TEMPORARY] The environment variable that contains the API key for the direct Azure client.",
257
+ )
258
+
259
+ generated_files_scope_id: str = Field(
260
+ default="<SCOPE_ID_PLACEHOLDER>",
261
+ description="Scope ID for the responses API.",
262
+ )
263
+
264
+
229
265
  class ExperimentalConfig(BaseModel):
230
266
  """Experimental features this part of the configuration might evolve in the future continuously"""
231
267
 
@@ -259,6 +295,8 @@ class ExperimentalConfig(BaseModel):
259
295
 
260
296
  sub_agents_config: SubAgentsConfig = SubAgentsConfig()
261
297
 
298
+ responses_api_config: ResponsesApiConfig = ResponsesApiConfig()
299
+
262
300
 
263
301
  class UniqueAIAgentConfig(BaseModel):
264
302
  model_config = get_configuration_dict(frozen=True)
@@ -12,15 +12,22 @@ from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
12
12
  )
13
13
  from unique_toolkit.agentic.reference_manager.reference_manager import ReferenceManager
14
14
  from unique_toolkit.agentic.thinking_manager.thinking_manager import ThinkingManager
15
- from unique_toolkit.agentic.tools.tool_manager import ToolManager
15
+ from unique_toolkit.agentic.tools.tool_manager import (
16
+ ResponsesApiToolManager,
17
+ ToolManager,
18
+ )
16
19
  from unique_toolkit.app.schemas import ChatEvent, McpServer
17
20
  from unique_toolkit.chat.service import ChatService
18
21
  from unique_toolkit.content.service import ContentService
22
+ from unique_toolkit.language_model import LanguageModelAssistantMessage
19
23
  from unique_toolkit.language_model.schemas import (
20
- LanguageModelAssistantMessage,
21
24
  LanguageModelMessages,
22
25
  LanguageModelStreamResponse,
23
26
  )
27
+ from unique_toolkit.protocols.support import (
28
+ ResponsesSupportCompleteWithReferences,
29
+ SupportCompleteWithReferences,
30
+ )
24
31
 
25
32
  from unique_orchestrator.config import UniqueAIConfig
26
33
 
@@ -45,6 +52,7 @@ class UniqueAI:
45
52
  chat_service: ChatService,
46
53
  content_service: ContentService,
47
54
  debug_info_manager: DebugInfoManager,
55
+ streaming_handler: SupportCompleteWithReferences,
48
56
  reference_manager: ReferenceManager,
49
57
  thinking_manager: ThinkingManager,
50
58
  tool_manager: ToolManager,
@@ -70,6 +78,7 @@ class UniqueAI:
70
78
  self._postprocessor_manager = postprocessor_manager
71
79
  self._latest_assistant_id: str = event.payload.assistant_message.id
72
80
  self._mcp_servers = mcp_servers
81
+ self._streaming_handler = streaming_handler
73
82
 
74
83
  # Helper variable to support control loop
75
84
  self._tool_took_control = False
@@ -148,7 +157,7 @@ class UniqueAI:
148
157
  self._logger.info("Its needs forced tool calls.")
149
158
  self._logger.info(f"Forced tools: {self._tool_manager.get_forced_tools()}")
150
159
  responses = [
151
- await self._chat_service.complete_with_references_async(
160
+ await self._streaming_handler.complete_with_references_async(
152
161
  messages=messages,
153
162
  model_name=self._config.space.language_model.name,
154
163
  tools=self._tool_manager.get_tool_definitions(),
@@ -156,8 +165,8 @@ class UniqueAI:
156
165
  start_text=self.start_text,
157
166
  debug_info=self._debug_info_manager.get(),
158
167
  temperature=self._config.agent.experimental.temperature,
159
- other_options=self._config.agent.experimental.additional_llm_options
160
- | {"toolChoice": opt},
168
+ tool_choice=opt,
169
+ other_options=self._config.agent.experimental.additional_llm_options,
161
170
  )
162
171
  for opt in self._tool_manager.get_forced_tools()
163
172
  ]
@@ -178,7 +187,7 @@ class UniqueAI:
178
187
  "we are in the last iteration we need to produce an answer now"
179
188
  )
180
189
  # No tool calls in last iteration
181
- stream_response = await self._chat_service.complete_with_references_async(
190
+ stream_response = await self._streaming_handler.complete_with_references_async(
182
191
  messages=messages,
183
192
  model_name=self._config.space.language_model.name,
184
193
  content_chunks=self._reference_manager.get_chunks(),
@@ -192,7 +201,7 @@ class UniqueAI:
192
201
  self._logger.info(
193
202
  f"we are in the iteration {self.current_iteration_index} asking the model to tell if we should use tools or if it will just stream"
194
203
  )
195
- stream_response = await self._chat_service.complete_with_references_async(
204
+ stream_response = await self._streaming_handler.complete_with_references_async(
196
205
  messages=messages,
197
206
  model_name=self._config.space.language_model.name,
198
207
  tools=self._tool_manager.get_tool_definitions(),
@@ -351,7 +360,7 @@ class UniqueAI:
351
360
 
352
361
  tool_calls = loop_response.tool_calls or []
353
362
 
354
- # Append function call to history
363
+ # Append function calls to history
355
364
  self._history_manager._append_tool_calls_to_history(tool_calls)
356
365
 
357
366
  # Execute tool calls
@@ -404,3 +413,39 @@ class UniqueAI:
404
413
  content=loop_response.message.original_text or "",
405
414
  )
406
415
  )
416
+
417
+
418
+ class UniqueAIResponsesApi(UniqueAI):
419
+ def __init__(
420
+ self,
421
+ logger: Logger,
422
+ event: ChatEvent,
423
+ config: UniqueAIConfig,
424
+ chat_service: ChatService,
425
+ content_service: ContentService,
426
+ debug_info_manager: DebugInfoManager,
427
+ streaming_handler: ResponsesSupportCompleteWithReferences,
428
+ reference_manager: ReferenceManager,
429
+ thinking_manager: ThinkingManager,
430
+ tool_manager: ResponsesApiToolManager,
431
+ history_manager: HistoryManager,
432
+ evaluation_manager: EvaluationManager,
433
+ postprocessor_manager: PostprocessorManager,
434
+ mcp_servers: list[McpServer],
435
+ ) -> None:
436
+ super().__init__(
437
+ logger,
438
+ event=event,
439
+ config=config,
440
+ chat_service=chat_service,
441
+ content_service=content_service,
442
+ debug_info_manager=debug_info_manager,
443
+ streaming_handler=streaming_handler, # type: ignore
444
+ reference_manager=reference_manager,
445
+ thinking_manager=thinking_manager,
446
+ tool_manager=tool_manager, # type: ignore
447
+ history_manager=history_manager,
448
+ evaluation_manager=evaluation_manager,
449
+ postprocessor_manager=postprocessor_manager,
450
+ mcp_servers=mcp_servers,
451
+ )
@@ -1,5 +1,8 @@
1
+ import os
1
2
  from logging import Logger
3
+ from typing import NamedTuple
2
4
 
5
+ from openai import AsyncOpenAI
3
6
  from unique_follow_up_questions.follow_up_postprocessor import (
4
7
  FollowUpPostprocessor,
5
8
  )
@@ -12,7 +15,7 @@ from unique_internal_search.uploaded_search.service import (
12
15
  from unique_stock_ticker.stock_ticker_postprocessor import (
13
16
  StockTickerPostprocessor,
14
17
  )
15
- from unique_toolkit import LanguageModelService
18
+ from unique_toolkit import LanguageModelService, get_async_openai_client
16
19
  from unique_toolkit.agentic.debug_info_manager.debug_info_manager import (
17
20
  DebugInfoManager,
18
21
  )
@@ -28,9 +31,15 @@ from unique_toolkit.agentic.history_manager.history_manager import (
28
31
  HistoryManagerConfig,
29
32
  )
30
33
  from unique_toolkit.agentic.postprocessor.postprocessor_manager import (
34
+ Postprocessor,
31
35
  PostprocessorManager,
32
36
  )
33
37
  from unique_toolkit.agentic.reference_manager.reference_manager import ReferenceManager
38
+ from unique_toolkit.agentic.responses_api import (
39
+ DisplayCodeInterpreterFilesPostProcessor,
40
+ DisplayCodeInterpreterFilesPostProcessorConfig,
41
+ ShowExecutedCodePostprocessor,
42
+ )
34
43
  from unique_toolkit.agentic.thinking_manager.thinking_manager import (
35
44
  ThinkingManager,
36
45
  ThinkingManagerConfig,
@@ -43,31 +52,81 @@ from unique_toolkit.agentic.tools.a2a import (
43
52
  )
44
53
  from unique_toolkit.agentic.tools.config import ToolBuildConfig
45
54
  from unique_toolkit.agentic.tools.mcp.manager import MCPManager
46
- from unique_toolkit.agentic.tools.tool_manager import ToolManager, ToolManagerConfig
55
+ from unique_toolkit.agentic.tools.tool_manager import (
56
+ OpenAIBuiltInToolManager,
57
+ ResponsesApiToolManager,
58
+ ToolManager,
59
+ ToolManagerConfig,
60
+ )
47
61
  from unique_toolkit.agentic.tools.tool_progress_reporter import ToolProgressReporter
48
- from unique_toolkit.app.schemas import ChatEvent
62
+ from unique_toolkit.app.schemas import ChatEvent, McpServer
49
63
  from unique_toolkit.chat.service import ChatService
64
+ from unique_toolkit.content import Content
50
65
  from unique_toolkit.content.service import ContentService
66
+ from unique_toolkit.protocols.support import ResponsesSupportCompleteWithReferences
51
67
 
52
68
  from unique_orchestrator.config import UniqueAIConfig
53
- from unique_orchestrator.unique_ai import UniqueAI
69
+ from unique_orchestrator.unique_ai import UniqueAI, UniqueAIResponsesApi
54
70
 
55
71
 
56
- def build_unique_ai(
72
+ async def build_unique_ai(
57
73
  event: ChatEvent,
58
74
  logger: Logger,
59
75
  config: UniqueAIConfig,
60
76
  debug_info_manager: DebugInfoManager,
61
- ) -> UniqueAI:
77
+ ) -> UniqueAI | UniqueAIResponsesApi:
78
+ common_components = _build_common(event, logger, config)
79
+
80
+ if config.agent.experimental.responses_api_config.use_responses_api:
81
+ return await _build_responses(
82
+ event=event,
83
+ logger=logger,
84
+ config=config,
85
+ debug_info_manager=debug_info_manager,
86
+ common_components=common_components,
87
+ )
88
+ else:
89
+ return _build_completions(
90
+ event=event,
91
+ logger=logger,
92
+ config=config,
93
+ debug_info_manager=debug_info_manager,
94
+ common_components=common_components,
95
+ )
96
+
97
+
98
+ class _CommonComponents(NamedTuple):
99
+ chat_service: ChatService
100
+ content_service: ContentService
101
+ uploaded_documents: list[Content]
102
+ thinking_manager: ThinkingManager
103
+ reference_manager: ReferenceManager
104
+ history_manager: HistoryManager
105
+ evaluation_manager: EvaluationManager
106
+ # Tool Manager Components
107
+ tool_progress_reporter: ToolProgressReporter
108
+ tool_manager_config: ToolManagerConfig
109
+ mcp_manager: MCPManager
110
+ a2a_manager: A2AManager
111
+ mcp_servers: list[McpServer]
112
+ postprocessors: list[Postprocessor]
113
+
114
+
115
+ def _build_common(
116
+ event: ChatEvent,
117
+ logger: Logger,
118
+ config: UniqueAIConfig,
119
+ ) -> _CommonComponents:
62
120
  chat_service = ChatService(event)
63
121
 
64
122
  content_service = ContentService.from_event(event)
123
+
124
+ uploaded_documents = content_service.get_documents_uploaded_to_chat()
125
+
65
126
  tool_progress_reporter = ToolProgressReporter(chat_service=chat_service)
66
- reference_manager = ReferenceManager()
67
127
  thinking_manager_config = ThinkingManagerConfig(
68
128
  thinking_steps_display=config.agent.experimental.thinking_steps_display
69
129
  )
70
-
71
130
  thinking_manager = ThinkingManager(
72
131
  logger=logger,
73
132
  config=thinking_manager_config,
@@ -75,44 +134,7 @@ def build_unique_ai(
75
134
  chat_service=chat_service,
76
135
  )
77
136
 
78
- uploaded_documents = content_service.get_documents_uploaded_to_chat()
79
- if len(uploaded_documents) > 0:
80
- logger.info(
81
- f"Adding UploadedSearchTool with {len(uploaded_documents)} documents"
82
- )
83
- config.space.tools.append(
84
- ToolBuildConfig(
85
- name=UploadedSearchTool.name,
86
- display_name=UploadedSearchTool.name,
87
- configuration=UploadedSearchConfig(),
88
- ),
89
- )
90
- event.payload.tool_choices.append(str(UploadedSearchTool.name))
91
-
92
- mcp_manager = MCPManager(
93
- mcp_servers=event.payload.mcp_servers,
94
- event=event,
95
- tool_progress_reporter=tool_progress_reporter,
96
- )
97
-
98
- a2a_manager = A2AManager(
99
- logger=logger,
100
- tool_progress_reporter=tool_progress_reporter,
101
- )
102
-
103
- tool_config = ToolManagerConfig(
104
- tools=config.space.tools,
105
- max_tool_calls=config.agent.experimental.loop_configuration.max_tool_calls_per_iteration,
106
- )
107
-
108
- tool_manager = ToolManager(
109
- logger=logger,
110
- config=tool_config,
111
- event=event,
112
- tool_progress_reporter=tool_progress_reporter,
113
- mcp_manager=mcp_manager,
114
- a2a_manager=a2a_manager,
115
- )
137
+ reference_manager = ReferenceManager()
116
138
 
117
139
  history_manager_config = HistoryManagerConfig(
118
140
  experimental_features=history_manager_module.ExperimentalFeatures(
@@ -122,7 +144,6 @@ def build_unique_ai(
122
144
  language_model=config.space.language_model,
123
145
  uploaded_content_config=config.agent.services.uploaded_content_config,
124
146
  )
125
-
126
147
  history_manager = HistoryManager(
127
148
  logger,
128
149
  event,
@@ -132,7 +153,6 @@ def build_unique_ai(
132
153
  )
133
154
 
134
155
  evaluation_manager = EvaluationManager(logger=logger, chat_service=chat_service)
135
-
136
156
  if config.agent.services.evaluation_config:
137
157
  evaluation_manager.add_evaluation(
138
158
  HallucinationEvaluation(
@@ -142,13 +162,24 @@ def build_unique_ai(
142
162
  )
143
163
  )
144
164
 
145
- postprocessor_manager = PostprocessorManager(
165
+ mcp_manager = MCPManager(
166
+ mcp_servers=event.payload.mcp_servers,
167
+ event=event,
168
+ tool_progress_reporter=tool_progress_reporter,
169
+ )
170
+ a2a_manager = A2AManager(
146
171
  logger=logger,
147
- chat_service=chat_service,
172
+ tool_progress_reporter=tool_progress_reporter,
173
+ )
174
+ tool_manager_config = ToolManagerConfig(
175
+ tools=config.space.tools,
176
+ max_tool_calls=config.agent.experimental.loop_configuration.max_tool_calls_per_iteration,
148
177
  )
149
178
 
179
+ postprocessors = []
180
+
150
181
  if config.agent.services.stock_ticker_config:
151
- postprocessor_manager.add_postprocessor(
182
+ postprocessors.append(
152
183
  StockTickerPostprocessor(
153
184
  config=config.agent.services.stock_ticker_config,
154
185
  event=event,
@@ -159,7 +190,7 @@ def build_unique_ai(
159
190
  config.agent.services.follow_up_questions_config
160
191
  and config.agent.services.follow_up_questions_config.number_of_questions > 0
161
192
  ):
162
- postprocessor_manager.add_postprocessor(
193
+ postprocessors.append(
163
194
  FollowUpPostprocessor(
164
195
  logger=logger,
165
196
  config=config.agent.services.follow_up_questions_config,
@@ -169,44 +200,251 @@ def build_unique_ai(
169
200
  )
170
201
  )
171
202
 
172
- if len(tool_manager.sub_agents) > 0:
173
- sub_agent_responses_postprocessor = SubAgentResponsesPostprocessor(
174
- user_id=event.user_id,
175
- main_agent_chat_id=event.payload.chat_id,
176
- company_id=event.company_id,
203
+ return _CommonComponents(
204
+ chat_service=chat_service,
205
+ content_service=content_service,
206
+ uploaded_documents=uploaded_documents,
207
+ thinking_manager=thinking_manager,
208
+ reference_manager=reference_manager,
209
+ history_manager=history_manager,
210
+ evaluation_manager=evaluation_manager,
211
+ tool_progress_reporter=tool_progress_reporter,
212
+ mcp_manager=mcp_manager,
213
+ a2a_manager=a2a_manager,
214
+ tool_manager_config=tool_manager_config,
215
+ mcp_servers=event.payload.mcp_servers,
216
+ postprocessors=postprocessors,
217
+ )
218
+
219
+
220
+ def _get_openai_client_from_env(config: UniqueAIConfig) -> AsyncOpenAI:
221
+ use_direct_azure_client = (
222
+ config.agent.experimental.responses_api_config.use_direct_azure_client
223
+ )
224
+ api_key_env_var = config.agent.experimental.responses_api_config.direct_azure_client_api_key_env_var
225
+ api_base_env_var = config.agent.experimental.responses_api_config.direct_azure_client_api_base_env_var
226
+
227
+ if use_direct_azure_client:
228
+ # TODO: (for testing only), remove when v1 endpoint is working
229
+ return AsyncOpenAI(
230
+ api_key=os.environ[api_key_env_var],
231
+ base_url=os.environ[api_base_env_var],
232
+ )
233
+ else:
234
+ return get_async_openai_client().copy(
235
+ default_headers={
236
+ "x-model": config.space.language_model.name
237
+ } # Backend requires a model name
177
238
  )
178
- postprocessor_manager.add_postprocessor(sub_agent_responses_postprocessor)
179
239
 
180
- sub_agent_evaluation = None
181
- if config.agent.experimental.sub_agents_config.evaluation_config is not None:
182
- sub_agent_evaluation = SubAgentEvaluationService(
183
- config=config.agent.experimental.sub_agents_config.evaluation_config,
184
- language_model_service=LanguageModelService.from_event(event),
240
+
241
+ async def _build_responses(
242
+ event: ChatEvent,
243
+ logger: Logger,
244
+ config: UniqueAIConfig,
245
+ common_components: _CommonComponents,
246
+ debug_info_manager: DebugInfoManager,
247
+ ) -> UniqueAIResponsesApi:
248
+ client = _get_openai_client_from_env(config)
249
+ builtin_tool_manager = OpenAIBuiltInToolManager(
250
+ uploaded_files=common_components.uploaded_documents,
251
+ chat_id=event.payload.chat_id,
252
+ content_service=common_components.content_service,
253
+ user_id=event.user_id,
254
+ company_id=event.company_id,
255
+ client=client,
256
+ )
257
+
258
+ tool_manager = await ResponsesApiToolManager.build_manager(
259
+ logger=logger,
260
+ config=common_components.tool_manager_config,
261
+ event=event,
262
+ tool_progress_reporter=common_components.tool_progress_reporter,
263
+ mcp_manager=common_components.mcp_manager,
264
+ a2a_manager=common_components.a2a_manager,
265
+ builtin_tool_manager=builtin_tool_manager,
266
+ )
267
+
268
+ postprocessor_manager = PostprocessorManager(
269
+ logger=logger,
270
+ chat_service=common_components.chat_service,
271
+ )
272
+ for postprocessor in common_components.postprocessors:
273
+ postprocessor_manager.add_postprocessor(postprocessor)
274
+
275
+ if (
276
+ config.agent.experimental.responses_api_config.code_interpreter_display_config
277
+ is not None
278
+ ):
279
+ postprocessor_manager.add_postprocessor(
280
+ ShowExecutedCodePostprocessor(
281
+ config=config.agent.experimental.responses_api_config.code_interpreter_display_config
185
282
  )
186
- evaluation_manager.add_evaluation(sub_agent_evaluation)
283
+ )
187
284
 
188
- for tool in tool_manager.sub_agents:
189
- assert isinstance(tool.config, ExtendedSubAgentToolConfig)
190
- sub_agent_responses_postprocessor.register_sub_agent_tool(
191
- tool, tool.config.response_display_config
285
+ postprocessor_manager.add_postprocessor(
286
+ DisplayCodeInterpreterFilesPostProcessor(
287
+ client=client,
288
+ content_service=common_components.content_service,
289
+ config=DisplayCodeInterpreterFilesPostProcessorConfig(
290
+ upload_scope_id=config.agent.experimental.responses_api_config.generated_files_scope_id,
291
+ ),
292
+ )
293
+ )
294
+
295
+ class ResponsesStreamingHandler(ResponsesSupportCompleteWithReferences):
296
+ def complete_with_references(self, *args, **kwargs):
297
+ return common_components.chat_service.complete_responses_with_references(
298
+ *args, **kwargs
192
299
  )
193
- if sub_agent_evaluation is not None:
194
- sub_agent_evaluation.register_sub_agent_tool(
195
- tool, tool.config.evaluation_config
196
- )
300
+
301
+ async def complete_with_references_async(self, *args, **kwargs):
302
+ return await common_components.chat_service.complete_responses_with_references_async(
303
+ *args, **kwargs
304
+ )
305
+
306
+ streaming_handler = ResponsesStreamingHandler()
307
+
308
+ _add_sub_agents_postprocessor(
309
+ postprocessor_manager=postprocessor_manager,
310
+ tool_manager=tool_manager,
311
+ user_id=event.user_id,
312
+ company_id=event.company_id,
313
+ chat_id=event.payload.chat_id,
314
+ )
315
+ _add_sub_agents_evaluation(
316
+ evaluation_manager=common_components.evaluation_manager,
317
+ tool_manager=tool_manager,
318
+ config=config,
319
+ event=event,
320
+ )
321
+
322
+ return UniqueAIResponsesApi(
323
+ event=event,
324
+ config=config,
325
+ logger=logger,
326
+ chat_service=common_components.chat_service,
327
+ content_service=common_components.content_service,
328
+ tool_manager=tool_manager,
329
+ thinking_manager=common_components.thinking_manager,
330
+ streaming_handler=streaming_handler,
331
+ history_manager=common_components.history_manager,
332
+ reference_manager=common_components.reference_manager,
333
+ evaluation_manager=common_components.evaluation_manager,
334
+ postprocessor_manager=postprocessor_manager,
335
+ debug_info_manager=debug_info_manager,
336
+ mcp_servers=event.payload.mcp_servers,
337
+ )
338
+
339
+
340
+ def _build_completions(
341
+ event: ChatEvent,
342
+ logger: Logger,
343
+ config: UniqueAIConfig,
344
+ common_components: _CommonComponents,
345
+ debug_info_manager: DebugInfoManager,
346
+ ) -> UniqueAI:
347
+ if len(common_components.uploaded_documents) > 0:
348
+ logger.info(
349
+ f"Adding UploadedSearchTool with {len(common_components.uploaded_documents)} documents"
350
+ )
351
+ config.space.tools.append(
352
+ ToolBuildConfig(
353
+ name=UploadedSearchTool.name,
354
+ display_name=UploadedSearchTool.name,
355
+ configuration=UploadedSearchConfig(),
356
+ ),
357
+ )
358
+ event.payload.tool_choices.append(str(UploadedSearchTool.name))
359
+
360
+ tool_manager = ToolManager(
361
+ logger=logger,
362
+ config=common_components.tool_manager_config,
363
+ event=event,
364
+ tool_progress_reporter=common_components.tool_progress_reporter,
365
+ mcp_manager=common_components.mcp_manager,
366
+ a2a_manager=common_components.a2a_manager,
367
+ )
368
+
369
+ postprocessor_manager = PostprocessorManager(
370
+ logger=logger,
371
+ chat_service=common_components.chat_service,
372
+ )
373
+ for postprocessor in common_components.postprocessors:
374
+ postprocessor_manager.add_postprocessor(postprocessor)
375
+
376
+ _add_sub_agents_postprocessor(
377
+ postprocessor_manager=postprocessor_manager,
378
+ tool_manager=tool_manager,
379
+ user_id=event.user_id,
380
+ company_id=event.company_id,
381
+ chat_id=event.payload.chat_id,
382
+ )
383
+ _add_sub_agents_evaluation(
384
+ evaluation_manager=common_components.evaluation_manager,
385
+ tool_manager=tool_manager,
386
+ config=config,
387
+ event=event,
388
+ )
197
389
 
198
390
  return UniqueAI(
199
391
  event=event,
200
392
  config=config,
201
393
  logger=logger,
202
- chat_service=chat_service,
203
- content_service=content_service,
394
+ chat_service=common_components.chat_service,
395
+ content_service=common_components.content_service,
204
396
  tool_manager=tool_manager,
205
- thinking_manager=thinking_manager,
206
- history_manager=history_manager,
207
- reference_manager=reference_manager,
208
- evaluation_manager=evaluation_manager,
397
+ thinking_manager=common_components.thinking_manager,
398
+ history_manager=common_components.history_manager,
399
+ reference_manager=common_components.reference_manager,
400
+ streaming_handler=common_components.chat_service,
401
+ evaluation_manager=common_components.evaluation_manager,
209
402
  postprocessor_manager=postprocessor_manager,
210
403
  debug_info_manager=debug_info_manager,
211
404
  mcp_servers=event.payload.mcp_servers,
212
405
  )
406
+
407
+
408
+ def _add_sub_agents_postprocessor(
409
+ postprocessor_manager: PostprocessorManager,
410
+ tool_manager: ToolManager | ResponsesApiToolManager,
411
+ user_id: str,
412
+ company_id: str,
413
+ chat_id: str,
414
+ ) -> None:
415
+ sub_agents = tool_manager.sub_agents
416
+ if len(sub_agents) > 0:
417
+ sub_agent_responses_postprocessor = SubAgentResponsesPostprocessor(
418
+ user_id=user_id,
419
+ main_agent_chat_id=chat_id,
420
+ company_id=company_id,
421
+ )
422
+ postprocessor_manager.add_postprocessor(sub_agent_responses_postprocessor)
423
+
424
+ for tool in tool_manager.sub_agents:
425
+ assert isinstance(tool.config, ExtendedSubAgentToolConfig)
426
+ sub_agent_responses_postprocessor.register_sub_agent_tool(
427
+ tool, tool.config.response_display_config
428
+ )
429
+
430
+
431
+ def _add_sub_agents_evaluation(
432
+ evaluation_manager: EvaluationManager,
433
+ tool_manager: ToolManager | ResponsesApiToolManager,
434
+ config: UniqueAIConfig,
435
+ event: ChatEvent,
436
+ ) -> None:
437
+ sub_agents = tool_manager.sub_agents
438
+ if len(sub_agents) > 0:
439
+ sub_agent_evaluation = None
440
+ if config.agent.experimental.sub_agents_config.evaluation_config is not None:
441
+ sub_agent_evaluation = SubAgentEvaluationService(
442
+ config=config.agent.experimental.sub_agents_config.evaluation_config,
443
+ language_model_service=LanguageModelService.from_event(event),
444
+ )
445
+ evaluation_manager.add_evaluation(sub_agent_evaluation)
446
+ for tool in tool_manager.sub_agents:
447
+ assert isinstance(tool.config, ExtendedSubAgentToolConfig)
448
+ sub_agent_evaluation.register_sub_agent_tool(
449
+ tool, tool.config.evaluation_config
450
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_orchestrator
3
- Version: 1.3.0
3
+ Version: 1.4.1
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Andreas Hauri
@@ -19,7 +19,7 @@ Requires-Dist: unique-follow-up-questions (>=1.1.2,<2.0.0)
19
19
  Requires-Dist: unique-internal-search (>=1.0.1,<2.0.0)
20
20
  Requires-Dist: unique-sdk (>=0.10.24,<0.11.0)
21
21
  Requires-Dist: unique-stock-ticker (>=1.0.2,<2.0.0)
22
- Requires-Dist: unique-toolkit (>=1.14.5,<2.0.0)
22
+ Requires-Dist: unique-toolkit (>=1.16.0,<2.0.0)
23
23
  Requires-Dist: unique-web-search (>=1.3.1,<2.0.0)
24
24
  Description-Content-Type: text/markdown
25
25
 
@@ -33,6 +33,12 @@ All notable changes to this project will be documented in this file.
33
33
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
34
34
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
35
35
 
36
+ ## [1.4.1] - 2025-10-16
37
+ - Temporarily make open ai env vars configurable
38
+
39
+ ## [1.4.0] - 2025-10-14
40
+ - Add responses api and code execution support.
41
+
36
42
  ## [1.3.0] - 2025-10-14
37
43
  - Re-organize sub-agents configuration for clarity.
38
44
 
@@ -0,0 +1,11 @@
1
+ unique_orchestrator/config.py,sha256=n6kPZ4yjk3gVmGZNq4YHmwc-D43XhpzuLBqYZXKunz8,11070
2
+ unique_orchestrator/prompts/generic_reference_prompt.jinja2,sha256=fYPaiE-N1gSoOqu85OeEBa_ttAim8grOhHuOHJjSHNU,2663
3
+ unique_orchestrator/prompts/system_prompt.jinja2,sha256=YXFdx3PG2p4TKfjEpz7guIw2GaKoY-4zRMEzXaKhHXE,7213
4
+ unique_orchestrator/prompts/user_message_prompt.jinja2,sha256=BQokpBh3H2J-rFk8i-PRph3jy4T1gAJPPb1mxxRWNuM,878
5
+ unique_orchestrator/tests/test_unique_ai_reference_order.py,sha256=8mZeVP1k8neH4qrFW3oa3zwIdaq2c7R1VvurC7kjBU8,4445
6
+ unique_orchestrator/unique_ai.py,sha256=7jnFkWbjm4musN7LTK03Ewwny9de_HrQ1kpcXzLeFfo,18793
7
+ unique_orchestrator/unique_ai_builder.py,sha256=dMAGa2BlEfQpkW9DCIp981IEKBsURBG1iF19A2n_hQo,15974
8
+ unique_orchestrator-1.4.1.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
9
+ unique_orchestrator-1.4.1.dist-info/METADATA,sha256=36H5Xslzv6dm9N-7QpcB8vcW26K68y7m3zG_n3ApRu4,2703
10
+ unique_orchestrator-1.4.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
11
+ unique_orchestrator-1.4.1.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- unique_orchestrator/config.py,sha256=2A3pDEc37PlOULgQzJu_PoLchwLT7T0OEA1Gieue2Ow,9603
2
- unique_orchestrator/prompts/generic_reference_prompt.jinja2,sha256=fYPaiE-N1gSoOqu85OeEBa_ttAim8grOhHuOHJjSHNU,2663
3
- unique_orchestrator/prompts/system_prompt.jinja2,sha256=YXFdx3PG2p4TKfjEpz7guIw2GaKoY-4zRMEzXaKhHXE,7213
4
- unique_orchestrator/prompts/user_message_prompt.jinja2,sha256=BQokpBh3H2J-rFk8i-PRph3jy4T1gAJPPb1mxxRWNuM,878
5
- unique_orchestrator/tests/test_unique_ai_reference_order.py,sha256=8mZeVP1k8neH4qrFW3oa3zwIdaq2c7R1VvurC7kjBU8,4445
6
- unique_orchestrator/unique_ai.py,sha256=4juvEzASX6FMQJywTYLzH-V-vaCNviqiu7HZ2jkrDSc,17148
7
- unique_orchestrator/unique_ai_builder.py,sha256=knwm1cJ59SarJE1DsKGVS5WVpU1B_FCPbMHEFQg4uZY,7524
8
- unique_orchestrator-1.3.0.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
9
- unique_orchestrator-1.3.0.dist-info/METADATA,sha256=7jZHo2njuFcvuYZmSvQp2_9VaLT37a3P9Xwu5LrE6Ro,2556
10
- unique_orchestrator-1.3.0.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
11
- unique_orchestrator-1.3.0.dist-info/RECORD,,