unique_orchestrator 1.0.1__py3-none-any.whl → 1.7.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,14 +2,14 @@ from enum import StrEnum
2
2
  from pathlib import Path
3
3
  from typing import Annotated, Any, Generic, Literal, TypeVar
4
4
 
5
- from pydantic import BaseModel, Field, ValidationInfo, field_validator
5
+ from pydantic import BaseModel, Field, ValidationInfo, field_validator, model_validator
6
6
  from unique_deep_research.config import DeepResearchToolConfig
7
7
  from unique_deep_research.service import DeepResearchTool
8
8
  from unique_follow_up_questions.config import FollowUpQuestionsConfig
9
9
  from unique_internal_search.config import InternalSearchConfig
10
10
  from unique_internal_search.service import InternalSearchTool
11
11
  from unique_stock_ticker.config import StockTickerConfig
12
- from unique_toolkit._common.default_language_model import DEFAULT_GPT_4o
12
+ from unique_swot import SwotAnalysisTool, SwotAnalysisToolConfig
13
13
  from unique_toolkit._common.validators import (
14
14
  LMI,
15
15
  ClipInt,
@@ -22,11 +22,32 @@ from unique_toolkit.agentic.evaluation.schemas import EvaluationMetricName
22
22
  from unique_toolkit.agentic.history_manager.history_manager import (
23
23
  UploadedContentConfig,
24
24
  )
25
+ from unique_toolkit.agentic.responses_api import (
26
+ DisplayCodeInterpreterFilesPostProcessorConfig,
27
+ ShowExecutedCodePostprocessorConfig,
28
+ )
29
+ from unique_toolkit.agentic.tools.a2a import (
30
+ REFERENCING_INSTRUCTIONS_FOR_SYSTEM_PROMPT,
31
+ REFERENCING_INSTRUCTIONS_FOR_USER_PROMPT,
32
+ )
33
+ from unique_toolkit.agentic.tools.a2a.evaluation import SubAgentEvaluationServiceConfig
25
34
  from unique_toolkit.agentic.tools.config import get_configuration_dict
35
+ from unique_toolkit.agentic.tools.openai_builtin.manager import (
36
+ OpenAICodeInterpreterConfig,
37
+ )
26
38
  from unique_toolkit.agentic.tools.tool import ToolBuildConfig
39
+ from unique_toolkit.agentic.tools.tool_progress_reporter import (
40
+ ToolProgressReporterConfig,
41
+ )
42
+ from unique_toolkit.language_model.default_language_model import DEFAULT_GPT_4o
27
43
  from unique_web_search.config import WebSearchConfig
28
44
  from unique_web_search.service import WebSearchTool
29
45
 
46
+ DeactivatedNone = Annotated[
47
+ None,
48
+ Field(title="Deactivated", description="None"),
49
+ ]
50
+
30
51
 
31
52
  class SpaceType(StrEnum):
32
53
  UNIQUE_CUSTOM = "unique_custom"
@@ -72,6 +93,10 @@ class SpaceConfigBase(BaseModel, Generic[T]):
72
93
  name=DeepResearchTool.name,
73
94
  configuration=DeepResearchToolConfig(),
74
95
  ),
96
+ ToolBuildConfig(
97
+ name=SwotAnalysisTool.name,
98
+ configuration=SwotAnalysisToolConfig(),
99
+ ),
75
100
  ],
76
101
  )
77
102
 
@@ -140,11 +165,11 @@ class UniqueAIPromptConfig(BaseModel):
140
165
  description="The user message prompt template as a Jinja2 template string.",
141
166
  )
142
167
 
143
-
144
- DeactivatedNone = Annotated[
145
- None,
146
- Field(title="Deactivated", description="None"),
147
- ]
168
+ user_metadata: list[str] = Field(
169
+ default=[],
170
+ title="User Metadata",
171
+ description="User metadata fields to be ingested in the system prompt.",
172
+ )
148
173
 
149
174
 
150
175
  class UniqueAIServices(BaseModel):
@@ -182,6 +207,10 @@ class UniqueAIServices(BaseModel):
182
207
 
183
208
  uploaded_content_config: UploadedContentConfig = UploadedContentConfig()
184
209
 
210
+ tool_progress_reporter_config: ToolProgressReporterConfig = (
211
+ ToolProgressReporterConfig()
212
+ )
213
+
185
214
 
186
215
  class InputTokenDistributionConfig(BaseModel):
187
216
  model_config = get_configuration_dict(frozen=True)
@@ -197,6 +226,79 @@ class InputTokenDistributionConfig(BaseModel):
197
226
  return int(self.percent_for_history * max_input_token)
198
227
 
199
228
 
229
+ class SubAgentsReferencingConfig(BaseModel):
230
+ model_config = get_configuration_dict()
231
+
232
+ referencing_instructions_for_system_prompt: str = Field(
233
+ default=REFERENCING_INSTRUCTIONS_FOR_SYSTEM_PROMPT,
234
+ description="Referencing instructions for the main agent's system prompt.",
235
+ )
236
+ referencing_instructions_for_user_prompt: str = Field(
237
+ default=REFERENCING_INSTRUCTIONS_FOR_USER_PROMPT,
238
+ description="Referencing instructions for the main agent's user prompt. Should correspond to a short reminder.",
239
+ )
240
+
241
+
242
+ class SubAgentsConfig(BaseModel):
243
+ model_config = get_configuration_dict()
244
+
245
+ referencing_config: (
246
+ Annotated[SubAgentsReferencingConfig, Field(title="Active")] | DeactivatedNone
247
+ ) = SubAgentsReferencingConfig()
248
+ evaluation_config: (
249
+ Annotated[SubAgentEvaluationServiceConfig, Field(title="Active")]
250
+ | DeactivatedNone
251
+ ) = SubAgentEvaluationServiceConfig()
252
+
253
+ sleep_time_before_update: float = Field(
254
+ default=0.5,
255
+ description="Time to sleep before updating the main agent message to display the sub agent responses. Temporary fix to avoid rendering issues.",
256
+ )
257
+
258
+
259
+ class CodeInterpreterExtendedConfig(BaseModel):
260
+ model_config = get_configuration_dict()
261
+
262
+ generated_files_config: DisplayCodeInterpreterFilesPostProcessorConfig = Field(
263
+ default=DisplayCodeInterpreterFilesPostProcessorConfig(),
264
+ title="Generated files config",
265
+ description="Display config for generated files",
266
+ )
267
+
268
+ executed_code_display_config: (
269
+ Annotated[
270
+ ShowExecutedCodePostprocessorConfig,
271
+ Field(title="Active"),
272
+ ]
273
+ | DeactivatedNone
274
+ ) = Field(
275
+ ShowExecutedCodePostprocessorConfig(),
276
+ description="If active, generated code will be prepended to the LLM answer",
277
+ )
278
+
279
+ tool_config: OpenAICodeInterpreterConfig = Field(
280
+ default=OpenAICodeInterpreterConfig(),
281
+ title="Tool config",
282
+ )
283
+
284
+
285
+ class ResponsesApiConfig(BaseModel):
286
+ model_config = get_configuration_dict(frozen=True)
287
+
288
+ code_interpreter: (
289
+ Annotated[CodeInterpreterExtendedConfig, Field(title="Active")]
290
+ | DeactivatedNone
291
+ ) = Field(
292
+ default=None,
293
+ description="If active, the main agent will have acces to the OpenAI Code Interpreter tool",
294
+ )
295
+
296
+ use_responses_api: bool = Field(
297
+ default=False,
298
+ description="If set, the main agent will use the Responses API from OpenAI",
299
+ )
300
+
301
+
200
302
  class ExperimentalConfig(BaseModel):
201
303
  """Experimental features this part of the configuration might evolve in the future continuously"""
202
304
 
@@ -228,6 +330,10 @@ class ExperimentalConfig(BaseModel):
228
330
  max_tool_calls_per_iteration=5
229
331
  )
230
332
 
333
+ sub_agents_config: SubAgentsConfig = SubAgentsConfig()
334
+
335
+ responses_api_config: ResponsesApiConfig = ResponsesApiConfig()
336
+
231
337
 
232
338
  class UniqueAIAgentConfig(BaseModel):
233
339
  model_config = get_configuration_dict(frozen=True)
@@ -252,3 +358,9 @@ class UniqueAIConfig(BaseModel):
252
358
  space: UniqueAISpaceConfig = UniqueAISpaceConfig()
253
359
 
254
360
  agent: UniqueAIAgentConfig = UniqueAIAgentConfig()
361
+
362
+ @model_validator(mode="after")
363
+ def disable_sub_agent_referencing_if_not_used(self) -> "UniqueAIConfig":
364
+ if not any(tool.is_sub_agent for tool in self.space.tools):
365
+ self.agent.experimental.sub_agents_config.referencing_config = None
366
+ return self
@@ -7,6 +7,24 @@ You are Unique AI Chat a system based on large language models
7
7
  **Knowledge cutoff**: {{ model_info.info_cutoff_at | default('unknown') }}
8
8
  **Current date**: {{ date_string }}
9
9
 
10
+ {#- Expired Uploaded Documents Section #}
11
+ {% if uploaded_documents_expired and uploaded_documents_expired|length > 0 %}
12
+ # Expired Uploaded Documents
13
+ Here are the uploaded documents that are expired due to company retention policy. These documents can not be accessed anymore:
14
+ {%- for doc in uploaded_documents_expired %}
15
+ - {{ doc.title or doc.key }}
16
+ {%- endfor %}
17
+ {%- endif %}
18
+
19
+ {#- User Metadata Section #}
20
+ {% if user_metadata and user_metadata|length > 0 %}
21
+ # User Information
22
+ Here is some metadata about the user, which may help you write better queries, and help contextualize the information you retrieve:
23
+ {%- for key, value in user_metadata.items() %}
24
+ - {{ key.replace('_', ' ').title() }}: {{ value }}
25
+ {%- endfor %}
26
+ {%- endif %}
27
+
10
28
  Over the course of the conversation, you adapt to the user's tone and preference.
11
29
  Try to match the user's vibe, tone, and generally how they are speaking. You want the conversation to feel natural.
12
30
  You engage in authentic conversation by responding to the information provided, asking relevant questions, and showing genuine curiosity.
@@ -52,13 +70,13 @@ This tool is called {{ tool_description.display_name }} by the user.
52
70
  {%- set tool_messages = {
53
71
  'automations': {'display': 'automations', 'message': 'Cannot create reminders, recurring tasks, or scheduled prompts.'},
54
72
  'canmore': {'display': 'canmore', 'message': 'Cannot create or edit documents/canvas for writing or coding.'},
55
- 'InternalSearch': {'display': 'file_search', 'message': 'Cannot search across internal company sources (Google Drive, Slack, Notion, etc.).'},
73
+ 'InternalSearch': {'display': 'Internal Search', 'message': 'Cannot search across internal company sources. If you cannot force it in this space, ask an admin to activate the tool.'},
56
74
  'gcal': {'display': 'gcal (Google Calendar)', 'message': 'Cannot show or search calendar events.'},
57
75
  'gcontacts': {'display': 'gcontacts (Google Contacts)', 'message': 'Cannot look up or retrieve contact information.'},
58
76
  'gmail': {'display': 'gmail', 'message': 'Cannot search, read, or summarize emails.'},
59
77
  'image_gen': {'display': 'image_gen', 'message': 'Cannot generate or edit images.'},
60
78
  'python': {'display': 'python', 'message': 'Cannot analyze data, process files, generate charts, or create/export different file formats.'},
61
- 'WebSearch': {'display': 'web', 'message': 'Cannot perform live web searches, fetch fresh news, or look up real-time information.'},
79
+ 'WebSearch': {'display': 'Web Search', 'message': 'Cannot perform live web searches, fetch fresh news, or look up real-time information. If you cannot force it in this space, ask an admin to activate the tool.'},
62
80
  'recording_knowledge': {'display': 'recording_knowledge', 'message': 'Cannot access or summarize meeting transcripts from ChatGPT Record Mode.'}
63
81
  } -%}
64
82
  {%- set ns = namespace(any=false) -%}
@@ -73,7 +91,12 @@ This tool is called {{ tool_description.display_name }} by the user.
73
91
  - {{ meta.message }}
74
92
  {%- endif -%}
75
93
  {%- endfor -%}
76
- {%- endif -%}
94
+ {%- endif %}
95
+
96
+ {% if use_sub_agent_references and sub_agent_referencing_instructions | length > 0 %}
97
+ # Special Instructions: Referencing Sub Agents Answers
98
+ {{ sub_agent_referencing_instructions }}
99
+ {%- endif %}
77
100
 
78
101
  {# Answer Style Section #}
79
102
  # Answer Style
@@ -6,13 +6,18 @@
6
6
  - date_string: The current date in formatted string
7
7
  - mcp_server_user_prompts: List of unique server-wide user prompts from MCP servers
8
8
  - tool_descriptions_with_user_prompts: List of UniqueToolDescription objects with user prompts
9
- #}{{ query }}
9
+ #}
10
+ # User Query
11
+ {{ query }}
10
12
 
11
- {%- if mcp_server_user_prompts and mcp_server_user_prompts|length > 0 %}
13
+ {% if use_sub_agent_references and sub_agent_referencing_instructions | length > 0 %}
14
+ # Sub Agents Referencing Reminder
15
+ {{ sub_agent_referencing_instructions }}
16
+ {%- endif %}
12
17
 
13
- ## MCP Server Context
18
+ {%- if mcp_server_user_prompts and mcp_server_user_prompts|length > 0 %}
19
+ # MCP Server Context
14
20
  {%- for server_prompt in mcp_server_user_prompts %}
15
-
16
21
  {{ server_prompt }}
17
22
  {%- endfor %}
18
23
  {%- endif %}
@@ -0,0 +1,259 @@
1
+ from unittest.mock import MagicMock
2
+
3
+ import pytest
4
+
5
+
6
+ class TestGetFilteredUserMetadata:
7
+ """Test suite for UniqueAI._get_filtered_user_metadata method"""
8
+
9
+ @pytest.fixture
10
+ def mock_unique_ai(self):
11
+ """Create a minimal UniqueAI instance with mocked dependencies"""
12
+ # Lazy import to avoid heavy dependencies at module import time
13
+ from unique_orchestrator.unique_ai import UniqueAI
14
+
15
+ mock_logger = MagicMock()
16
+
17
+ # Create minimal event structure
18
+ dummy_event = MagicMock()
19
+ dummy_event.payload.assistant_message.id = "assist_1"
20
+ dummy_event.payload.user_message.text = "query"
21
+
22
+ # Create minimal config structure
23
+ mock_config = MagicMock()
24
+ mock_config.agent.prompt_config.user_metadata = []
25
+
26
+ # Create minimal required dependencies
27
+ mock_chat_service = MagicMock()
28
+ mock_content_service = MagicMock()
29
+ mock_debug_info_manager = MagicMock()
30
+ mock_reference_manager = MagicMock()
31
+ mock_thinking_manager = MagicMock()
32
+ mock_tool_manager = MagicMock()
33
+ mock_history_manager = MagicMock()
34
+ mock_evaluation_manager = MagicMock()
35
+ mock_postprocessor_manager = MagicMock()
36
+ mock_streaming_handler = MagicMock()
37
+ mock_message_step_logger = MagicMock()
38
+
39
+ # Instantiate UniqueAI
40
+ ua = UniqueAI(
41
+ logger=mock_logger,
42
+ event=dummy_event,
43
+ config=mock_config,
44
+ chat_service=mock_chat_service,
45
+ content_service=mock_content_service,
46
+ debug_info_manager=mock_debug_info_manager,
47
+ streaming_handler=mock_streaming_handler,
48
+ reference_manager=mock_reference_manager,
49
+ thinking_manager=mock_thinking_manager,
50
+ tool_manager=mock_tool_manager,
51
+ history_manager=mock_history_manager,
52
+ evaluation_manager=mock_evaluation_manager,
53
+ postprocessor_manager=mock_postprocessor_manager,
54
+ message_step_logger=mock_message_step_logger,
55
+ mcp_servers=[],
56
+ )
57
+
58
+ return ua
59
+
60
+ def test_returns_empty_dict_when_config_is_empty_list(self, mock_unique_ai):
61
+ """Test that empty dict is returned when config.user_metadata is an empty list"""
62
+ mock_unique_ai._config.agent.prompt_config.user_metadata = []
63
+ mock_unique_ai._event.payload.user_metadata = {
64
+ "department": "Engineering",
65
+ "role": "Developer",
66
+ }
67
+
68
+ result = mock_unique_ai._get_filtered_user_metadata()
69
+
70
+ assert result == {}
71
+ assert isinstance(result, dict)
72
+
73
+ def test_returns_empty_dict_when_user_metadata_is_none(self, mock_unique_ai):
74
+ """Test that empty dict is returned when user_metadata is None"""
75
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
76
+ "department",
77
+ "role",
78
+ ]
79
+ mock_unique_ai._event.payload.user_metadata = None
80
+
81
+ result = mock_unique_ai._get_filtered_user_metadata()
82
+
83
+ assert result == {}
84
+ assert isinstance(result, dict)
85
+
86
+ def test_returns_empty_dict_when_both_config_and_metadata_are_empty(
87
+ self, mock_unique_ai
88
+ ):
89
+ """Test that empty dict is returned when both config and user_metadata are empty/None"""
90
+ mock_unique_ai._config.agent.prompt_config.user_metadata = []
91
+ mock_unique_ai._event.payload.user_metadata = None
92
+
93
+ result = mock_unique_ai._get_filtered_user_metadata()
94
+
95
+ assert result == {}
96
+ assert isinstance(result, dict)
97
+
98
+ def test_filters_metadata_to_include_only_configured_keys(self, mock_unique_ai):
99
+ """Test that only keys specified in config are included in the result"""
100
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
101
+ "department",
102
+ "role",
103
+ ]
104
+ mock_unique_ai._event.payload.user_metadata = {
105
+ "department": "Engineering",
106
+ "role": "Developer",
107
+ "location": "San Francisco",
108
+ "salary": "100000",
109
+ }
110
+
111
+ result = mock_unique_ai._get_filtered_user_metadata()
112
+
113
+ assert result == {"department": "Engineering", "role": "Developer"}
114
+ assert "location" not in result
115
+ assert "salary" not in result
116
+ # Verify all values are strings
117
+ assert all(isinstance(v, str) for v in result.values())
118
+
119
+ def test_returns_only_existing_keys_from_user_metadata(self, mock_unique_ai):
120
+ """Test that keys in config but not in user_metadata are not included"""
121
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
122
+ "department",
123
+ "role",
124
+ "team",
125
+ "manager",
126
+ ]
127
+ mock_unique_ai._event.payload.user_metadata = {
128
+ "department": "Engineering",
129
+ "role": "Developer",
130
+ }
131
+
132
+ result = mock_unique_ai._get_filtered_user_metadata()
133
+
134
+ assert result == {"department": "Engineering", "role": "Developer"}
135
+ assert "team" not in result
136
+ assert "manager" not in result
137
+
138
+ def test_handles_single_key_in_config(self, mock_unique_ai):
139
+ """Test filtering with a single key in config"""
140
+ mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
141
+ mock_unique_ai._event.payload.user_metadata = {
142
+ "department": "Engineering",
143
+ "role": "Developer",
144
+ }
145
+
146
+ result = mock_unique_ai._get_filtered_user_metadata()
147
+
148
+ assert result == {"department": "Engineering"}
149
+ assert isinstance(result["department"], str)
150
+
151
+ def test_handles_string_values(self, mock_unique_ai):
152
+ """Test that string values in user_metadata are preserved"""
153
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
154
+ "name",
155
+ "email",
156
+ "department",
157
+ "title",
158
+ ]
159
+ mock_unique_ai._event.payload.user_metadata = {
160
+ "name": "John Doe",
161
+ "email": "john.doe@example.com",
162
+ "department": "Engineering",
163
+ "title": "Senior Developer",
164
+ "ignored": "This should not appear",
165
+ }
166
+
167
+ result = mock_unique_ai._get_filtered_user_metadata()
168
+
169
+ assert result == {
170
+ "name": "John Doe",
171
+ "email": "john.doe@example.com",
172
+ "department": "Engineering",
173
+ "title": "Senior Developer",
174
+ }
175
+ assert "ignored" not in result
176
+ # Verify all values are strings
177
+ assert all(isinstance(v, str) for v in result.values())
178
+
179
+ def test_handles_empty_dict_user_metadata(self, mock_unique_ai):
180
+ """Test behavior when user_metadata is an empty dict"""
181
+ mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
182
+ mock_unique_ai._event.payload.user_metadata = {}
183
+
184
+ result = mock_unique_ai._get_filtered_user_metadata()
185
+
186
+ assert result == {}
187
+
188
+ def test_handles_empty_string_values(self, mock_unique_ai):
189
+ """Test that empty string values in user_metadata are preserved if key is in config"""
190
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
191
+ "department",
192
+ "role",
193
+ ]
194
+ mock_unique_ai._event.payload.user_metadata = {
195
+ "department": "Engineering",
196
+ "role": "",
197
+ }
198
+
199
+ result = mock_unique_ai._get_filtered_user_metadata()
200
+
201
+ assert result == {"department": "Engineering", "role": ""}
202
+ assert isinstance(result["role"], str)
203
+
204
+ def test_preserves_original_metadata_unchanged(self, mock_unique_ai):
205
+ """Test that the original user_metadata dict is not modified"""
206
+ original_metadata = {
207
+ "department": "Engineering",
208
+ "role": "Developer",
209
+ "location": "San Francisco",
210
+ }
211
+ mock_unique_ai._config.agent.prompt_config.user_metadata = ["department"]
212
+ mock_unique_ai._event.payload.user_metadata = original_metadata.copy()
213
+
214
+ result = mock_unique_ai._get_filtered_user_metadata()
215
+
216
+ # Original should still have all keys
217
+ assert mock_unique_ai._event.payload.user_metadata == original_metadata
218
+ # Result should only have filtered key
219
+ assert result == {"department": "Engineering"}
220
+
221
+ def test_handles_special_characters_in_values(self, mock_unique_ai):
222
+ """Test that special characters in string values are preserved"""
223
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
224
+ "description",
225
+ "notes",
226
+ ]
227
+ mock_unique_ai._event.payload.user_metadata = {
228
+ "description": "User with special chars: @#$%^&*()",
229
+ "notes": "Multi-line\ntext\twith\ttabs",
230
+ "other": "excluded",
231
+ }
232
+
233
+ result = mock_unique_ai._get_filtered_user_metadata()
234
+
235
+ assert result == {
236
+ "description": "User with special chars: @#$%^&*()",
237
+ "notes": "Multi-line\ntext\twith\ttabs",
238
+ }
239
+ assert all(isinstance(v, str) for v in result.values())
240
+
241
+ def test_return_type_is_dict_str_str(self, mock_unique_ai):
242
+ """Test that return type is dict[str, str]"""
243
+ mock_unique_ai._config.agent.prompt_config.user_metadata = [
244
+ "department",
245
+ "role",
246
+ ]
247
+ mock_unique_ai._event.payload.user_metadata = {
248
+ "department": "Engineering",
249
+ "role": "Developer",
250
+ }
251
+
252
+ result = mock_unique_ai._get_filtered_user_metadata()
253
+
254
+ # Check it's a dict
255
+ assert isinstance(result, dict)
256
+ # Check all keys are strings
257
+ assert all(isinstance(k, str) for k in result.keys())
258
+ # Check all values are strings
259
+ assert all(isinstance(v, str) for v in result.values())