unique_orchestrator 1.7.2__py3-none-any.whl → 1.7.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_orchestrator might be problematic. Click here for more details.

@@ -3,7 +3,6 @@ from pathlib import Path
3
3
  from typing import Annotated, Any, Generic, Literal, TypeVar
4
4
 
5
5
  from pydantic import BaseModel, Field, ValidationInfo, field_validator, model_validator
6
- from pydantic.json_schema import SkipJsonSchema
7
6
  from unique_deep_research.config import DeepResearchToolConfig
8
7
  from unique_deep_research.service import DeepResearchTool
9
8
  from unique_follow_up_questions.config import FollowUpQuestionsConfig
@@ -163,6 +162,12 @@ class UniqueAIPromptConfig(BaseModel):
163
162
  description="The user message prompt template as a Jinja2 template string.",
164
163
  )
165
164
 
165
+ user_metadata: list[str] = Field(
166
+ default=[],
167
+ title="User Metadata",
168
+ description="User metadata fields to be ingested in the system prompt.",
169
+ )
170
+
166
171
 
167
172
  class UniqueAIServices(BaseModel):
168
173
  """Determine the services the agent is using
@@ -248,42 +253,30 @@ class SubAgentsConfig(BaseModel):
248
253
  )
249
254
 
250
255
 
251
- class ResponsesApiConfig(BaseModel):
252
- model_config = get_configuration_dict(frozen=True)
253
-
254
- use_responses_api: bool = Field(
255
- default=False,
256
- description="Whether to use the responses API instead of the completions API.",
256
+ class CodeInterpreterExtendedConfig(OpenAICodeInterpreterConfig):
257
+ generated_files_scope_id: str = Field(
258
+ description="Folder where files generated by the LLM will be uploaded",
257
259
  )
258
- code_interpreter_display_config: (
260
+ display_config: (
259
261
  Annotated[
260
262
  ShowExecutedCodePostprocessorConfig,
261
263
  Field(title="Active"),
262
264
  ]
263
265
  | DeactivatedNone
264
- ) = ShowExecutedCodePostprocessorConfig()
265
-
266
- use_direct_azure_client: SkipJsonSchema[bool] = Field(
267
- default=True,
268
- description="[TEMPORARY] Whether to use the direct Azure client instead of the responses API.",
269
- )
270
- direct_azure_client_api_base_env_var: SkipJsonSchema[str] = Field(
271
- default="OPENAI_BASE_URL",
272
- description="[TEMPORARY] The environment variable that contains the API base for the direct Azure client.",
273
- )
274
- direct_azure_client_api_key_env_var: SkipJsonSchema[str] = Field(
275
- default="OPENAI_API_KEY",
276
- description="[TEMPORARY] The environment variable that contains the API key for the direct Azure client.",
266
+ ) = Field(
267
+ ShowExecutedCodePostprocessorConfig(),
268
+ description="If active, generated code will be prepended to the LLM answer",
277
269
  )
270
+
271
+
272
+ class ResponsesApiConfig(BaseModel):
273
+ model_config = get_configuration_dict(frozen=True)
274
+
278
275
  code_interpreter: (
279
- Annotated[OpenAICodeInterpreterConfig, Field(title="Active")] | DeactivatedNone
276
+ Annotated[CodeInterpreterExtendedConfig, Field(title="Active")]
277
+ | DeactivatedNone
280
278
  ) = Field(default=None, description="Config for openai code interpreter")
281
279
 
282
- generated_files_scope_id: str = Field(
283
- default="<SCOPE_ID_PLACEHOLDER>",
284
- description="Scope ID for the responses API.",
285
- )
286
-
287
280
 
288
281
  class ExperimentalConfig(BaseModel):
289
282
  """Experimental features this part of the configuration might evolve in the future continuously"""
@@ -318,7 +311,12 @@ class ExperimentalConfig(BaseModel):
318
311
 
319
312
  sub_agents_config: SubAgentsConfig = SubAgentsConfig()
320
313
 
321
- responses_api_config: ResponsesApiConfig = ResponsesApiConfig()
314
+ responses_api_config: (
315
+ Annotated[ResponsesApiConfig, Field(title="Active")] | DeactivatedNone
316
+ ) = Field(
317
+ default=None,
318
+ description="If active, the main agent will use the responses api from open ai",
319
+ )
322
320
 
323
321
 
324
322
  class UniqueAIAgentConfig(BaseModel):
@@ -7,6 +7,15 @@ You are Unique AI Chat a system based on large language models
7
7
  **Knowledge cutoff**: {{ model_info.info_cutoff_at | default('unknown') }}
8
8
  **Current date**: {{ date_string }}
9
9
 
10
+ {#- User Metadata Section #}
11
+ {% if user_metadata and user_metadata|length > 0 %}
12
+ # User Information
13
+ Here is some metadata about the user, which may help you write better queries, and help contextualize the information you retrieve:
14
+ {%- for key, value in user_metadata.items() %}
15
+ - {{ key.replace('_', ' ').title() }}: {{ value }}
16
+ {%- endfor %}
17
+ {%- endif %}
18
+
10
19
  Over the course of the conversation, you adapt to the user's tone and preference.
11
20
  Try to match the user's vibe, tone, and generally how they are speaking. You want the conversation to feel natural.
12
21
  You engage in authentic conversation by responding to the information provided, asking relevant questions, and showing genuine curiosity.
@@ -0,0 +1,257 @@
1
+ from unittest.mock import MagicMock
2
+
3
+ import pytest
4
+
5
+
6
+ class TestGetFilteredUserMetadata:
7
+ """Test suite for UniqueAI._get_filtered_user_metadata method"""
8
+
9
+ @pytest.fixture
10
+ def mock_unique_ai(self):
11
+ """Create a minimal UniqueAI instance with mocked dependencies"""
12
+ # Lazy import to avoid heavy dependencies at module import time
13
+ from unique_orchestrator.unique_ai import UniqueAI
14
+
15
+ mock_logger = MagicMock()
16
+
17
+ # Create minimal event structure
18
+ dummy_event = MagicMock()
19
+ dummy_event.payload.assistant_message.id = "assist_1"
20
+ dummy_event.payload.user_message.text = "query"
21
+
22
+ # Create minimal config structure
23
+ mock_config = MagicMock()
24
+ mock_config.agent.prompt_config.user_metadata = []
25
+
26
+ # Create minimal required dependencies
27
+ mock_chat_service = MagicMock()
28
+ mock_content_service = MagicMock()
29
+ mock_debug_info_manager = MagicMock()
30
+ mock_reference_manager = MagicMock()
31
+ mock_thinking_manager = MagicMock()
32
+ mock_tool_manager = MagicMock()
33
+ mock_history_manager = MagicMock()
34
+ mock_evaluation_manager = MagicMock()
35
+ mock_postprocessor_manager = MagicMock()
36
+ mock_streaming_handler = MagicMock()
37
+
38
+ # Instantiate UniqueAI
39
+ ua = UniqueAI(
40
+ logger=mock_logger,
41
+ event=dummy_event,
42
+ config=mock_config,
43
+ chat_service=mock_chat_service,
44
+ content_service=mock_content_service,
45
+ debug_info_manager=mock_debug_info_manager,
46
+ streaming_handler=mock_streaming_handler,
47
+ reference_manager=mock_reference_manager,
48
+ thinking_manager=mock_thinking_manager,
49
+ tool_manager=mock_tool_manager,
50
+ history_manager=mock_history_manager,
51
+ evaluation_manager=mock_evaluation_manager,
52
+ postprocessor_manager=mock_postprocessor_manager,
53
+ mcp_servers=[],
54
+ )
55
+
56
+ return ua
57
+
58
+ def test_returns_empty_dict_when_config_is_empty_list(self, mock_unique_ai):
59
+ """Test that empty dict is returned when config.user_metadata is an empty list"""
60
+ mock_unique_ai._config.agent.prompt_config.user_metadata = []
61
+ mock_unique_ai._event.payload.user_metadata = {
62
+ "department": "Engineering",
63
+ "role": "Developer",
64
+ }
65
+
66
+ result = mock_unique_ai._get_filtered_user_metadata()
67
+
68
+ assert result == {}
69
+ assert isinstance(result, dict)
70
+
71
+ def test_returns_empty_dict_when_user_metadata_is_none(self, mock_unique_ai):
72
+ """Test that empty dict is returned when user_metadata is None"""
73
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
74
+ "department",
75
+ "role",
76
+ ]
77
+ mock_unique_ai._event.payload.user_metadata = None
78
+
79
+ result = mock_unique_ai._get_filtered_user_metadata()
80
+
81
+ assert result == {}
82
+ assert isinstance(result, dict)
83
+
84
+ def test_returns_empty_dict_when_both_config_and_metadata_are_empty(
85
+ self, mock_unique_ai
86
+ ):
87
+ """Test that empty dict is returned when both config and user_metadata are empty/None"""
88
+ mock_unique_ai._config.agent.prompt_config.user_metadata = []
89
+ mock_unique_ai._event.payload.user_metadata = None
90
+
91
+ result = mock_unique_ai._get_filtered_user_metadata()
92
+
93
+ assert result == {}
94
+ assert isinstance(result, dict)
95
+
96
+ def test_filters_metadata_to_include_only_configured_keys(self, mock_unique_ai):
97
+ """Test that only keys specified in config are included in the result"""
98
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
99
+ "department",
100
+ "role",
101
+ ]
102
+ mock_unique_ai._event.payload.user_metadata = {
103
+ "department": "Engineering",
104
+ "role": "Developer",
105
+ "location": "San Francisco",
106
+ "salary": "100000",
107
+ }
108
+
109
+ result = mock_unique_ai._get_filtered_user_metadata()
110
+
111
+ assert result == {"department": "Engineering", "role": "Developer"}
112
+ assert "location" not in result
113
+ assert "salary" not in result
114
+ # Verify all values are strings
115
+ assert all(isinstance(v, str) for v in result.values())
116
+
117
+ def test_returns_only_existing_keys_from_user_metadata(self, mock_unique_ai):
118
+ """Test that keys in config but not in user_metadata are not included"""
119
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
120
+ "department",
121
+ "role",
122
+ "team",
123
+ "manager",
124
+ ]
125
+ mock_unique_ai._event.payload.user_metadata = {
126
+ "department": "Engineering",
127
+ "role": "Developer",
128
+ }
129
+
130
+ result = mock_unique_ai._get_filtered_user_metadata()
131
+
132
+ assert result == {"department": "Engineering", "role": "Developer"}
133
+ assert "team" not in result
134
+ assert "manager" not in result
135
+
136
+ def test_handles_single_key_in_config(self, mock_unique_ai):
137
+ """Test filtering with a single key in config"""
138
+ mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
139
+ mock_unique_ai._event.payload.user_metadata = {
140
+ "department": "Engineering",
141
+ "role": "Developer",
142
+ }
143
+
144
+ result = mock_unique_ai._get_filtered_user_metadata()
145
+
146
+ assert result == {"department": "Engineering"}
147
+ assert isinstance(result["department"], str)
148
+
149
+ def test_handles_string_values(self, mock_unique_ai):
150
+ """Test that string values in user_metadata are preserved"""
151
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
152
+ "name",
153
+ "email",
154
+ "department",
155
+ "title",
156
+ ]
157
+ mock_unique_ai._event.payload.user_metadata = {
158
+ "name": "John Doe",
159
+ "email": "john.doe@example.com",
160
+ "department": "Engineering",
161
+ "title": "Senior Developer",
162
+ "ignored": "This should not appear",
163
+ }
164
+
165
+ result = mock_unique_ai._get_filtered_user_metadata()
166
+
167
+ assert result == {
168
+ "name": "John Doe",
169
+ "email": "john.doe@example.com",
170
+ "department": "Engineering",
171
+ "title": "Senior Developer",
172
+ }
173
+ assert "ignored" not in result
174
+ # Verify all values are strings
175
+ assert all(isinstance(v, str) for v in result.values())
176
+
177
+ def test_handles_empty_dict_user_metadata(self, mock_unique_ai):
178
+ """Test behavior when user_metadata is an empty dict"""
179
+ mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
180
+ mock_unique_ai._event.payload.user_metadata = {}
181
+
182
+ result = mock_unique_ai._get_filtered_user_metadata()
183
+
184
+ assert result == {}
185
+
186
+ def test_handles_empty_string_values(self, mock_unique_ai):
187
+ """Test that empty string values in user_metadata are preserved if key is in config"""
188
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
189
+ "department",
190
+ "role",
191
+ ]
192
+ mock_unique_ai._event.payload.user_metadata = {
193
+ "department": "Engineering",
194
+ "role": "",
195
+ }
196
+
197
+ result = mock_unique_ai._get_filtered_user_metadata()
198
+
199
+ assert result == {"department": "Engineering", "role": ""}
200
+ assert isinstance(result["role"], str)
201
+
202
+ def test_preserves_original_metadata_unchanged(self, mock_unique_ai):
203
+ """Test that the original user_metadata dict is not modified"""
204
+ original_metadata = {
205
+ "department": "Engineering",
206
+ "role": "Developer",
207
+ "location": "San Francisco",
208
+ }
209
+ mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
210
+ mock_unique_ai._event.payload.user_metadata = original_metadata.copy()
211
+
212
+ result = mock_unique_ai._get_filtered_user_metadata()
213
+
214
+ # Original should still have all keys
215
+ assert mock_unique_ai._event.payload.user_metadata == original_metadata
216
+ # Result should only have filtered key
217
+ assert result == {"department": "Engineering"}
218
+
219
+ def test_handles_special_characters_in_values(self, mock_unique_ai):
220
+ """Test that special characters in string values are preserved"""
221
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
222
+ "description",
223
+ "notes",
224
+ ]
225
+ mock_unique_ai._event.payload.user_metadata = {
226
+ "description": "User with special chars: @#$%^&*()",
227
+ "notes": "Multi-line\ntext\twith\ttabs",
228
+ "other": "excluded",
229
+ }
230
+
231
+ result = mock_unique_ai._get_filtered_user_metadata()
232
+
233
+ assert result == {
234
+ "description": "User with special chars: @#$%^&*()",
235
+ "notes": "Multi-line\ntext\twith\ttabs",
236
+ }
237
+ assert all(isinstance(v, str) for v in result.values())
238
+
239
+ def test_return_type_is_dict_str_str(self, mock_unique_ai):
240
+ """Test that return type is dict[str, str]"""
241
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
242
+ "department",
243
+ "role",
244
+ ]
245
+ mock_unique_ai._event.payload.user_metadata = {
246
+ "department": "Engineering",
247
+ "role": "Developer",
248
+ }
249
+
250
+ result = mock_unique_ai._get_filtered_user_metadata()
251
+
252
+ # Check it's a dict
253
+ assert isinstance(result, dict)
254
+ # Check all keys are strings
255
+ assert all(isinstance(k, str) for k in result.keys())
256
+ # Check all values are strings
257
+ assert all(isinstance(v, str) for v in result.values())
@@ -135,9 +135,6 @@ class UniqueAI:
135
135
  self.start_text = self._thinking_manager.update_start_text(
136
136
  self.start_text, loop_response
137
137
  )
138
- await self._create_new_assistant_message_if_loop_response_contains_content(
139
- loop_response
140
- )
141
138
 
142
139
  # Only set completed_at if no tool took control. Tools that take control will set the message state to completed themselves.
143
140
  await self._chat_service.modify_assistant_message_async(
@@ -231,6 +228,9 @@ class UniqueAI:
231
228
  self._logger.debug(
232
229
  "Tools were called we process them and do not exit the loop"
233
230
  )
231
+ await self._create_new_assistant_message_if_loop_response_contains_content(
232
+ loop_response
233
+ )
234
234
 
235
235
  return await self._handle_tool_calls(loop_response)
236
236
 
@@ -270,6 +270,8 @@ class UniqueAI:
270
270
  mcp_server.user_prompt for mcp_server in self._mcp_servers
271
271
  ]
272
272
 
273
+ user_metadata = self._get_filtered_user_metadata()
274
+
273
275
  tool_descriptions = self._tool_manager.get_tool_prompts()
274
276
 
275
277
  query = self._event.payload.user_message.text
@@ -293,12 +295,11 @@ class UniqueAI:
293
295
  tool_descriptions_with_user_prompts=tool_descriptions_with_user_prompts,
294
296
  use_sub_agent_references=use_sub_agent_references,
295
297
  sub_agent_referencing_instructions=sub_agent_referencing_instructions,
298
+ user_metadata=user_metadata,
296
299
  )
297
300
  return user_msg
298
301
 
299
- async def _render_system_prompt(
300
- self,
301
- ) -> str:
302
+ async def _render_system_prompt(self) -> str:
302
303
  # TODO: Collect tool information here and adapt to system prompt
303
304
  tool_descriptions = self._tool_manager.get_tool_prompts()
304
305
 
@@ -313,6 +314,8 @@ class UniqueAI:
313
314
 
314
315
  date_string = datetime.now().strftime("%A %B %d, %Y")
315
316
 
317
+ user_metadata = self._get_filtered_user_metadata()
318
+
316
319
  mcp_server_system_prompts = [
317
320
  mcp_server.system_prompt for mcp_server in self._mcp_servers
318
321
  ]
@@ -341,6 +344,7 @@ class UniqueAI:
341
344
  mcp_server_system_prompts=mcp_server_system_prompts,
342
345
  use_sub_agent_references=use_sub_agent_references,
343
346
  sub_agent_referencing_instructions=sub_agent_referencing_instructions,
347
+ user_metadata=user_metadata,
344
348
  )
345
349
  return system_message
346
350
 
@@ -446,6 +450,26 @@ class UniqueAI:
446
450
  )
447
451
  )
448
452
 
453
+ def _get_filtered_user_metadata(self) -> dict[str, str]:
454
+ """
455
+ Filter user metadata to only include keys specified in the agent's prompt config.
456
+
457
+ Returns:
458
+ Dictionary containing only the metadata keys that are configured to be included.
459
+ """
460
+ user_metadata = {}
461
+ if (
462
+ self._config.agent.prompt_config.user_metadata
463
+ and self._event.payload.user_metadata is not None
464
+ ):
465
+ # Filter metadata to only include selected keys
466
+ user_metadata = {
467
+ k: str(v)
468
+ for k, v in self._event.payload.user_metadata.items()
469
+ if k in self._config.agent.prompt_config.user_metadata
470
+ }
471
+ return user_metadata
472
+
449
473
 
450
474
  class UniqueAIResponsesApi(UniqueAI):
451
475
  def __init__(
@@ -1,8 +1,6 @@
1
- import os
2
1
  from logging import Logger
3
- from typing import NamedTuple
2
+ from typing import NamedTuple, cast
4
3
 
5
- from openai import AsyncOpenAI
6
4
  from unique_follow_up_questions.follow_up_postprocessor import (
7
5
  FollowUpPostprocessor,
8
6
  )
@@ -46,11 +44,13 @@ from unique_toolkit.agentic.thinking_manager.thinking_manager import (
46
44
  from unique_toolkit.agentic.tools.a2a import (
47
45
  A2AManager,
48
46
  ExtendedSubAgentToolConfig,
47
+ SubAgentDisplaySpec,
49
48
  SubAgentEvaluationService,
50
- SubAgentResponsesPostprocessor,
51
- )
52
- from unique_toolkit.agentic.tools.a2a.postprocessing.postprocessor import (
49
+ SubAgentEvaluationSpec,
50
+ SubAgentReferencesPostprocessor,
51
+ SubAgentResponsesDisplayPostprocessor,
53
52
  SubAgentResponsesPostprocessorConfig,
53
+ SubAgentResponseWatcher,
54
54
  )
55
55
  from unique_toolkit.agentic.tools.config import ToolBuildConfig
56
56
  from unique_toolkit.agentic.tools.mcp.manager import MCPManager
@@ -80,7 +80,7 @@ async def build_unique_ai(
80
80
  ) -> UniqueAI | UniqueAIResponsesApi:
81
81
  common_components = _build_common(event, logger, config)
82
82
 
83
- if config.agent.experimental.responses_api_config.use_responses_api:
83
+ if config.agent.experimental.responses_api_config is not None:
84
84
  return await _build_responses(
85
85
  event=event,
86
86
  logger=logger,
@@ -107,6 +107,7 @@ class _CommonComponents(NamedTuple):
107
107
  history_manager: HistoryManager
108
108
  evaluation_manager: EvaluationManager
109
109
  postprocessor_manager: PostprocessorManager
110
+ response_watcher: SubAgentResponseWatcher
110
111
  # Tool Manager Components
111
112
  tool_progress_reporter: ToolProgressReporter
112
113
  tool_manager_config: ToolManagerConfig
@@ -126,6 +127,8 @@ def _build_common(
126
127
 
127
128
  uploaded_documents = content_service.get_documents_uploaded_to_chat()
128
129
 
130
+ response_watcher = SubAgentResponseWatcher()
131
+
129
132
  tool_progress_reporter = ToolProgressReporter(
130
133
  chat_service=chat_service,
131
134
  config=config.agent.services.tool_progress_reporter_config,
@@ -174,7 +177,9 @@ def _build_common(
174
177
  a2a_manager = A2AManager(
175
178
  logger=logger,
176
179
  tool_progress_reporter=tool_progress_reporter,
180
+ response_watcher=response_watcher,
177
181
  )
182
+
178
183
  tool_manager_config = ToolManagerConfig(
179
184
  tools=config.space.tools,
180
185
  max_tool_calls=config.agent.experimental.loop_configuration.max_tool_calls_per_iteration,
@@ -222,41 +227,10 @@ def _build_common(
222
227
  tool_manager_config=tool_manager_config,
223
228
  mcp_servers=event.payload.mcp_servers,
224
229
  postprocessor_manager=postprocessor_manager,
230
+ response_watcher=response_watcher,
225
231
  )
226
232
 
227
233
 
228
- def _prepare_base_url(url: str, use_v1: bool) -> str:
229
- url = url.rstrip("/") + "/openai"
230
-
231
- if use_v1:
232
- url += "/v1"
233
-
234
- return url
235
-
236
-
237
- def _get_openai_client_from_env(
238
- config: UniqueAIConfig, use_v1: bool = False
239
- ) -> AsyncOpenAI:
240
- use_direct_azure_client = (
241
- config.agent.experimental.responses_api_config.use_direct_azure_client
242
- )
243
- api_key_env_var = config.agent.experimental.responses_api_config.direct_azure_client_api_key_env_var
244
- api_base_env_var = config.agent.experimental.responses_api_config.direct_azure_client_api_base_env_var
245
-
246
- if use_direct_azure_client:
247
- # TODO: (for testing only), remove when v1 endpoint is working
248
- return AsyncOpenAI(
249
- api_key=os.environ[api_key_env_var],
250
- base_url=_prepare_base_url(os.environ[api_base_env_var], use_v1=use_v1),
251
- )
252
- else:
253
- return get_async_openai_client().copy(
254
- default_headers={
255
- "x-model": config.space.language_model.name
256
- } # Backend requires a model name
257
- )
258
-
259
-
260
234
  async def _build_responses(
261
235
  event: ChatEvent,
262
236
  logger: Logger,
@@ -264,25 +238,52 @@ async def _build_responses(
264
238
  common_components: _CommonComponents,
265
239
  debug_info_manager: DebugInfoManager,
266
240
  ) -> UniqueAIResponsesApi:
267
- client = _get_openai_client_from_env(config, use_v1=True)
241
+ client = get_async_openai_client().copy(
242
+ default_headers={
243
+ "x-model": config.space.language_model.name,
244
+ "x-user-id": event.user_id,
245
+ "x-company-id": event.company_id,
246
+ "x-assistant-id": event.payload.assistant_id,
247
+ "x-chat-id": event.payload.chat_id,
248
+ }
249
+ )
250
+
251
+ assert config.agent.experimental.responses_api_config is not None
252
+
268
253
  code_interpreter_config = (
269
254
  config.agent.experimental.responses_api_config.code_interpreter
270
255
  )
271
-
256
+ postprocessor_manager = common_components.postprocessor_manager
272
257
  tool_names = [tool.name for tool in config.space.tools]
273
- if (
274
- code_interpreter_config is not None
275
- and OpenAIBuiltInToolName.CODE_INTERPRETER not in tool_names
276
- ):
277
- logger.info("Automatically adding code interpreter to the tools")
278
- config = config.model_copy(deep=True)
279
- config.space.tools.append(
280
- ToolBuildConfig(
281
- name=OpenAIBuiltInToolName.CODE_INTERPRETER,
282
- configuration=code_interpreter_config,
258
+
259
+ if code_interpreter_config is not None:
260
+ if OpenAIBuiltInToolName.CODE_INTERPRETER not in tool_names:
261
+ logger.info("Automatically adding code interpreter to the tools")
262
+ config = config.model_copy(deep=True)
263
+ config.space.tools.append(
264
+ ToolBuildConfig(
265
+ name=OpenAIBuiltInToolName.CODE_INTERPRETER,
266
+ configuration=code_interpreter_config,
267
+ )
268
+ )
269
+ common_components.tool_manager_config.tools = config.space.tools
270
+
271
+ if code_interpreter_config.display_config is not None:
272
+ postprocessor_manager.add_postprocessor(
273
+ ShowExecutedCodePostprocessor(
274
+ config=code_interpreter_config.display_config
275
+ )
276
+ )
277
+
278
+ postprocessor_manager.add_postprocessor(
279
+ DisplayCodeInterpreterFilesPostProcessor(
280
+ client=client,
281
+ content_service=common_components.content_service,
282
+ config=DisplayCodeInterpreterFilesPostProcessorConfig(
283
+ upload_scope_id=code_interpreter_config.generated_files_scope_id,
284
+ ),
283
285
  )
284
286
  )
285
- common_components.tool_manager_config.tools = config.space.tools
286
287
 
287
288
  builtin_tool_manager = OpenAIBuiltInToolManager(
288
289
  uploaded_files=common_components.uploaded_documents,
@@ -305,26 +306,6 @@ async def _build_responses(
305
306
 
306
307
  postprocessor_manager = common_components.postprocessor_manager
307
308
 
308
- if (
309
- config.agent.experimental.responses_api_config.code_interpreter_display_config
310
- is not None
311
- ):
312
- postprocessor_manager.add_postprocessor(
313
- ShowExecutedCodePostprocessor(
314
- config=config.agent.experimental.responses_api_config.code_interpreter_display_config
315
- )
316
- )
317
-
318
- postprocessor_manager.add_postprocessor(
319
- DisplayCodeInterpreterFilesPostProcessor(
320
- client=client,
321
- content_service=common_components.content_service,
322
- config=DisplayCodeInterpreterFilesPostProcessorConfig(
323
- upload_scope_id=config.agent.experimental.responses_api_config.generated_files_scope_id,
324
- ),
325
- )
326
- )
327
-
328
309
  class ResponsesStreamingHandler(ResponsesSupportCompleteWithReferences):
329
310
  def complete_with_references(self, *args, **kwargs):
330
311
  return common_components.chat_service.complete_responses_with_references(
@@ -341,16 +322,15 @@ async def _build_responses(
341
322
  _add_sub_agents_postprocessor(
342
323
  postprocessor_manager=postprocessor_manager,
343
324
  tool_manager=tool_manager,
344
- user_id=event.user_id,
345
- company_id=event.company_id,
346
- chat_id=event.payload.chat_id,
347
- sleep_time_before_update=config.agent.experimental.sub_agents_config.sleep_time_before_update,
325
+ config=config,
326
+ response_watcher=common_components.response_watcher,
348
327
  )
349
328
  _add_sub_agents_evaluation(
350
329
  evaluation_manager=common_components.evaluation_manager,
351
330
  tool_manager=tool_manager,
352
331
  config=config,
353
332
  event=event,
333
+ response_watcher=common_components.response_watcher,
354
334
  )
355
335
 
356
336
  return UniqueAIResponsesApi(
@@ -414,16 +394,15 @@ def _build_completions(
414
394
  _add_sub_agents_postprocessor(
415
395
  postprocessor_manager=postprocessor_manager,
416
396
  tool_manager=tool_manager,
417
- user_id=event.user_id,
418
- company_id=event.company_id,
419
- chat_id=event.payload.chat_id,
420
- sleep_time_before_update=config.agent.experimental.sub_agents_config.sleep_time_before_update,
397
+ config=config,
398
+ response_watcher=common_components.response_watcher,
421
399
  )
422
400
  _add_sub_agents_evaluation(
423
401
  evaluation_manager=common_components.evaluation_manager,
424
402
  tool_manager=tool_manager,
425
403
  config=config,
426
404
  event=event,
405
+ response_watcher=common_components.response_watcher,
427
406
  )
428
407
 
429
408
  return UniqueAI(
@@ -447,28 +426,37 @@ def _build_completions(
447
426
  def _add_sub_agents_postprocessor(
448
427
  postprocessor_manager: PostprocessorManager,
449
428
  tool_manager: ToolManager | ResponsesApiToolManager,
450
- user_id: str,
451
- company_id: str,
452
- chat_id: str,
453
- sleep_time_before_update: float,
429
+ config: UniqueAIConfig,
430
+ response_watcher: SubAgentResponseWatcher,
454
431
  ) -> None:
455
432
  sub_agents = tool_manager.sub_agents
456
433
  if len(sub_agents) > 0:
457
- sub_agent_responses_postprocessor = SubAgentResponsesPostprocessor(
458
- user_id=user_id,
459
- main_agent_chat_id=chat_id,
460
- company_id=company_id,
461
- config=SubAgentResponsesPostprocessorConfig(
462
- sleep_time_before_update=sleep_time_before_update,
463
- ),
434
+ display_config = SubAgentResponsesPostprocessorConfig(
435
+ sleep_time_before_update=config.agent.experimental.sub_agents_config.sleep_time_before_update,
464
436
  )
465
- postprocessor_manager.add_postprocessor(sub_agent_responses_postprocessor)
466
-
467
- for tool in tool_manager.sub_agents:
468
- assert isinstance(tool.config, ExtendedSubAgentToolConfig)
469
- sub_agent_responses_postprocessor.register_sub_agent_tool(
470
- tool, tool.config.response_display_config
437
+ display_specs = []
438
+ for tool in sub_agents:
439
+ tool_config = cast(
440
+ ExtendedSubAgentToolConfig, tool.settings.configuration
441
+ ) # (BeforeValidator of ToolBuildConfig)
442
+
443
+ display_specs.append(
444
+ SubAgentDisplaySpec(
445
+ assistant_id=tool_config.assistant_id,
446
+ display_name=tool.display_name(),
447
+ display_config=tool_config.response_display_config,
448
+ )
471
449
  )
450
+ reference_postprocessor = SubAgentReferencesPostprocessor(
451
+ response_watcher=response_watcher,
452
+ )
453
+ sub_agent_responses_postprocessor = SubAgentResponsesDisplayPostprocessor(
454
+ config=display_config,
455
+ response_watcher=response_watcher,
456
+ display_specs=display_specs,
457
+ )
458
+ postprocessor_manager.add_postprocessor(reference_postprocessor)
459
+ postprocessor_manager.add_postprocessor(sub_agent_responses_postprocessor)
472
460
 
473
461
 
474
462
  def _add_sub_agents_evaluation(
@@ -476,18 +464,31 @@ def _add_sub_agents_evaluation(
476
464
  tool_manager: ToolManager | ResponsesApiToolManager,
477
465
  config: UniqueAIConfig,
478
466
  event: ChatEvent,
467
+ response_watcher: SubAgentResponseWatcher,
479
468
  ) -> None:
480
469
  sub_agents = tool_manager.sub_agents
481
- if len(sub_agents) > 0:
482
- sub_agent_evaluation = None
483
- if config.agent.experimental.sub_agents_config.evaluation_config is not None:
484
- sub_agent_evaluation = SubAgentEvaluationService(
485
- config=config.agent.experimental.sub_agents_config.evaluation_config,
486
- language_model_service=LanguageModelService.from_event(event),
487
- )
488
- evaluation_manager.add_evaluation(sub_agent_evaluation)
489
- for tool in tool_manager.sub_agents:
490
- assert isinstance(tool.config, ExtendedSubAgentToolConfig)
491
- sub_agent_evaluation.register_sub_agent_tool(
492
- tool, tool.config.evaluation_config
470
+ if (
471
+ len(sub_agents) > 0
472
+ and config.agent.experimental.sub_agents_config.evaluation_config is not None
473
+ ):
474
+ evaluation_specs = []
475
+ for tool in sub_agents:
476
+ tool_config = cast(
477
+ ExtendedSubAgentToolConfig, tool.settings.configuration
478
+ ) # (BeforeValidator of ToolBuildConfig)
479
+
480
+ evaluation_specs.append(
481
+ SubAgentEvaluationSpec(
482
+ assistant_id=tool_config.assistant_id,
483
+ display_name=tool.display_name(),
484
+ config=tool_config.evaluation_config,
493
485
  )
486
+ )
487
+
488
+ sub_agent_evaluation = SubAgentEvaluationService(
489
+ config=config.agent.experimental.sub_agents_config.evaluation_config,
490
+ language_model_service=LanguageModelService.from_event(event),
491
+ evaluation_specs=evaluation_specs,
492
+ response_watcher=response_watcher,
493
+ )
494
+ evaluation_manager.add_evaluation(sub_agent_evaluation)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: unique_orchestrator
3
- Version: 1.7.2
3
+ Version: 1.7.7
4
4
  Summary:
5
5
  License: Proprietary
6
6
  Author: Andreas Hauri
@@ -20,7 +20,7 @@ Requires-Dist: unique-internal-search (>=1.0.1,<2.0.0)
20
20
  Requires-Dist: unique-sdk (>=0.10.34,<0.11.0)
21
21
  Requires-Dist: unique-stock-ticker (>=1.0.2,<2.0.0)
22
22
  Requires-Dist: unique-swot (>=0.1.0,<0.2.0)
23
- Requires-Dist: unique-toolkit (>=1.22.1,<2.0.0)
23
+ Requires-Dist: unique-toolkit (>=1.23.0,<2.0.0)
24
24
  Requires-Dist: unique-web-search (>=1.3.1,<2.0.0)
25
25
  Description-Content-Type: text/markdown
26
26
 
@@ -34,6 +34,22 @@ All notable changes to this project will be documented in this file.
34
34
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
35
35
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
36
36
 
37
+ ## [1.7.7] - 2025-11-10
38
+ - Remove direct azure client config from responses api config
39
+ - Organize Responses API config better
40
+
41
+ ## [1.7.6] - 2025-11-05
42
+ - Update default system prompt (including user metadata section)
43
+
44
+ ## [1.7.5] - 2025-11-05
45
+ - Adding functionality to include user metadata into user/system prompts of the orchestrator
46
+
47
+ ## [1.7.4] - 2025-11-04
48
+ - Update and adapt to toolkit 1.23.0 (refactor sub agents implementation)
49
+
50
+ ## [1.7.3] - 2025-11-03
51
+ - Fixed an issue where new assistant messages were not properly generated during streaming outputs with tool calls; the orchestrator now correctly creates messages via `_create_new_assistant_message_if_loop_response_contains_content` when loop_response includes text and tool invocations.
52
+
37
53
  ## [1.7.2] - 2025-11-03
38
54
  - Add Swot tool to the orchestrator
39
55
 
@@ -0,0 +1,12 @@
1
+ unique_orchestrator/config.py,sha256=MWoms-7GDF7ki6KtsQGt5zwqYRVjbtGQnfEr5KsNhD8,11657
2
+ unique_orchestrator/prompts/generic_reference_prompt.jinja2,sha256=fYPaiE-N1gSoOqu85OeEBa_ttAim8grOhHuOHJjSHNU,2663
3
+ unique_orchestrator/prompts/system_prompt.jinja2,sha256=IcjkImrQxSrkcUs7BfiAeArhSH0RSxnVIrsJWs-53II,7571
4
+ unique_orchestrator/prompts/user_message_prompt.jinja2,sha256=BQokpBh3H2J-rFk8i-PRph3jy4T1gAJPPb1mxxRWNuM,878
5
+ unique_orchestrator/tests/test_unique_ai_get_filtered_user_metadata.py,sha256=I0xkhR_1DFZEiwSm5x6_B668fQTlYm5tYtPU9uULX3k,9661
6
+ unique_orchestrator/tests/test_unique_ai_reference_order.py,sha256=8mZeVP1k8neH4qrFW3oa3zwIdaq2c7R1VvurC7kjBU8,4445
7
+ unique_orchestrator/unique_ai.py,sha256=qmbWul6O0ri50PtWNt8qz4GHsxZe5upJW7XOo6fmQL0,20743
8
+ unique_orchestrator/unique_ai_builder.py,sha256=Vh4dL4rm-rt1ten3Xi3pkhgjkmr6tuEEithc5L_Z8Cg,18080
9
+ unique_orchestrator-1.7.7.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
10
+ unique_orchestrator-1.7.7.dist-info/METADATA,sha256=ZmqFUSyXzxYH-ShKQVxLy6hKGN3Z5F48rzVmzYSu7ck,4640
11
+ unique_orchestrator-1.7.7.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
12
+ unique_orchestrator-1.7.7.dist-info/RECORD,,
@@ -1,11 +0,0 @@
1
- unique_orchestrator/config.py,sha256=fKjBYrkzgzxA0EH4hSQIkQ2sJz8_Au9Lsa3T4EXOEws,12006
2
- unique_orchestrator/prompts/generic_reference_prompt.jinja2,sha256=fYPaiE-N1gSoOqu85OeEBa_ttAim8grOhHuOHJjSHNU,2663
3
- unique_orchestrator/prompts/system_prompt.jinja2,sha256=YXFdx3PG2p4TKfjEpz7guIw2GaKoY-4zRMEzXaKhHXE,7213
4
- unique_orchestrator/prompts/user_message_prompt.jinja2,sha256=BQokpBh3H2J-rFk8i-PRph3jy4T1gAJPPb1mxxRWNuM,878
5
- unique_orchestrator/tests/test_unique_ai_reference_order.py,sha256=8mZeVP1k8neH4qrFW3oa3zwIdaq2c7R1VvurC7kjBU8,4445
6
- unique_orchestrator/unique_ai.py,sha256=lYwaAZtwlEc-ApzvU7xzJeOkCK557O4yam62BM5pB8o,19797
7
- unique_orchestrator/unique_ai_builder.py,sha256=p7cXCdXN36_CAgTiq-YXuPlaTjWI2D-QTwMSe0S7Chg,17877
8
- unique_orchestrator-1.7.2.dist-info/LICENSE,sha256=GlN8wHNdh53xwOPg44URnwag6TEolCjoq3YD_KrWgss,193
9
- unique_orchestrator-1.7.2.dist-info/METADATA,sha256=OfSMpad8RPtQcTkOmzYUVvO-OaLvpnyEkNvE6U9uQzg,3893
10
- unique_orchestrator-1.7.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
11
- unique_orchestrator-1.7.2.dist-info/RECORD,,