unique_orchestrator 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of unique_orchestrator might be problematic. Click here for more details.
- unique_orchestrator/config.py +505 -0
- unique_orchestrator/prompts/generic_reference_prompt.jinja2 +46 -0
- unique_orchestrator/prompts/system_prompt.jinja2 +141 -0
- unique_orchestrator/prompts/user_message_prompt.jinja2 +18 -0
- unique_orchestrator/tests/test_config.py +115 -0
- unique_orchestrator/tests/test_unique_ai_reference_order.py +127 -0
- unique_orchestrator/unique_ai.py +375 -0
- unique_orchestrator/unique_ai_builder.py +178 -0
- unique_orchestrator-0.0.1.dist-info/LICENSE +1 -0
- unique_orchestrator-0.0.1.dist-info/METADATA +39 -0
- unique_orchestrator-0.0.1.dist-info/RECORD +12 -0
- unique_orchestrator-0.0.1.dist-info/WHEEL +4 -0
|
@@ -0,0 +1,505 @@
|
|
|
1
|
+
from enum import StrEnum
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from typing import Annotated, Any, Generic, Literal, TypeVar
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field, ValidationInfo, field_validator
|
|
6
|
+
from unique_deep_research.config import DeepResearchToolConfig
|
|
7
|
+
from unique_deep_research.service import DeepResearchTool
|
|
8
|
+
from unique_follow_up_questions.config import FollowUpQuestionsConfig
|
|
9
|
+
from unique_internal_search.config import InternalSearchConfig
|
|
10
|
+
from unique_internal_search.service import InternalSearchTool
|
|
11
|
+
from unique_stock_ticker.config import StockTickerConfig
|
|
12
|
+
from unique_toolkit._common.default_language_model import DEFAULT_GPT_4o
|
|
13
|
+
from unique_toolkit._common.validators import (
|
|
14
|
+
LMI,
|
|
15
|
+
ClipInt,
|
|
16
|
+
get_LMI_default_field,
|
|
17
|
+
)
|
|
18
|
+
from unique_toolkit.evals.hallucination.constants import HallucinationConfig
|
|
19
|
+
from unique_toolkit.evals.schemas import EvaluationMetricName
|
|
20
|
+
from unique_toolkit.history_manager.history_manager import (
|
|
21
|
+
UploadedContentConfig,
|
|
22
|
+
)
|
|
23
|
+
from unique_toolkit.language_model import LanguageModelName
|
|
24
|
+
from unique_toolkit.language_model.infos import (
|
|
25
|
+
LanguageModelInfo,
|
|
26
|
+
)
|
|
27
|
+
from unique_toolkit.tools.config import get_configuration_dict
|
|
28
|
+
from unique_toolkit.tools.factory import ToolFactory
|
|
29
|
+
from unique_toolkit.tools.schemas import BaseToolConfig
|
|
30
|
+
from unique_toolkit.tools.tool import ToolBuildConfig
|
|
31
|
+
from unique_web_search.config import WebSearchConfig
|
|
32
|
+
from unique_web_search.service import WebSearchTool
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class SpaceType(StrEnum):
|
|
36
|
+
UNIQUE_CUSTOM = "unique_custom"
|
|
37
|
+
UNIQUE_AI = "unique_ai"
|
|
38
|
+
UNIQUE_TRANSLATION = "unique_translation"
|
|
39
|
+
UNIQUE_MAGIC_TABLE = ""
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
T = TypeVar("T", bound=SpaceType)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class SpaceConfigBase(BaseModel, Generic[T]):
|
|
46
|
+
"""Base class for space configuration."""
|
|
47
|
+
|
|
48
|
+
model_config = get_configuration_dict(frozen=True)
|
|
49
|
+
type: T = Field(description="The type of the space.")
|
|
50
|
+
|
|
51
|
+
project_name: str = Field(
|
|
52
|
+
default="Unique AI",
|
|
53
|
+
description="The project name as optained from spaces 2.0",
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
language_model: LMI = get_LMI_default_field(DEFAULT_GPT_4o)
|
|
57
|
+
|
|
58
|
+
custom_instructions: str = Field(
|
|
59
|
+
default="",
|
|
60
|
+
description="A custom instruction provided by the system admin.",
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
tools: list[ToolBuildConfig] = Field(
|
|
64
|
+
default=[
|
|
65
|
+
ToolBuildConfig(
|
|
66
|
+
name=InternalSearchTool.name,
|
|
67
|
+
configuration=InternalSearchConfig(
|
|
68
|
+
exclude_uploaded_files=True,
|
|
69
|
+
),
|
|
70
|
+
),
|
|
71
|
+
ToolBuildConfig(
|
|
72
|
+
name=WebSearchTool.name,
|
|
73
|
+
configuration=WebSearchConfig(),
|
|
74
|
+
),
|
|
75
|
+
ToolBuildConfig(
|
|
76
|
+
name=DeepResearchTool.name,
|
|
77
|
+
configuration=DeepResearchToolConfig(),
|
|
78
|
+
),
|
|
79
|
+
],
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
@field_validator("tools", mode="after")
|
|
83
|
+
@classmethod
|
|
84
|
+
def set_input_context_size(
|
|
85
|
+
cls, tools: list[ToolBuildConfig], info: ValidationInfo
|
|
86
|
+
) -> list[ToolBuildConfig]:
|
|
87
|
+
for tool in tools:
|
|
88
|
+
if tool.name == InternalSearchTool.name:
|
|
89
|
+
tool.configuration.language_model_max_input_tokens = ( # type: ignore
|
|
90
|
+
info.data["language_model"].token_limits.token_limit_input
|
|
91
|
+
)
|
|
92
|
+
elif tool.name == WebSearchTool.name:
|
|
93
|
+
tool.configuration.language_model_max_input_tokens = ( # type: ignore
|
|
94
|
+
info.data["language_model"].token_limits.token_limit_input
|
|
95
|
+
)
|
|
96
|
+
return tools
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class UniqueAISpaceConfig(SpaceConfigBase):
|
|
100
|
+
"""Contains configuration for the entities that a space provides."""
|
|
101
|
+
|
|
102
|
+
type: Literal[SpaceType.UNIQUE_AI] = SpaceType.UNIQUE_AI
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
UniqueAISpaceConfig.model_rebuild()
|
|
106
|
+
|
|
107
|
+
LIMIT_LOOP_ITERATIONS = 50
|
|
108
|
+
LIMIT_MAX_TOOL_CALLS_PER_ITERATION = 50
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class LoopConfiguration(BaseModel):
|
|
112
|
+
model_config = get_configuration_dict()
|
|
113
|
+
|
|
114
|
+
max_tool_calls_per_iteration: Annotated[
|
|
115
|
+
int,
|
|
116
|
+
*ClipInt(min_value=1, max_value=LIMIT_MAX_TOOL_CALLS_PER_ITERATION),
|
|
117
|
+
] = 10
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class EvaluationConfig(BaseModel):
|
|
121
|
+
model_config = get_configuration_dict()
|
|
122
|
+
max_review_steps: int = 3
|
|
123
|
+
hallucination_config: HallucinationConfig = HallucinationConfig()
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class LoopAgentTokenLimitsConfig(BaseModel):
|
|
127
|
+
model_config = get_configuration_dict()
|
|
128
|
+
|
|
129
|
+
language_model: LMI = LanguageModelInfo.from_name(
|
|
130
|
+
LanguageModelName.AZURE_GPT_4o_2024_1120
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
percent_of_max_tokens_for_history: float = Field(
|
|
134
|
+
default=0.2,
|
|
135
|
+
ge=0.0,
|
|
136
|
+
lt=1.0,
|
|
137
|
+
description="The fraction of the max input tokens that will be reserved for the history.",
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
@property
|
|
141
|
+
def max_history_tokens(self) -> int:
|
|
142
|
+
return int(
|
|
143
|
+
self.language_model.token_limits.token_limit_input
|
|
144
|
+
* self.percent_of_max_tokens_for_history,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class SearchAgentConfig(BaseModel):
|
|
149
|
+
"""Configure the search agent."""
|
|
150
|
+
|
|
151
|
+
model_config = get_configuration_dict(frozen=True)
|
|
152
|
+
|
|
153
|
+
language_model: LMI = LanguageModelInfo.from_name(DEFAULT_GPT_4o)
|
|
154
|
+
|
|
155
|
+
token_limits: LoopAgentTokenLimitsConfig = Field(
|
|
156
|
+
default=LoopAgentTokenLimitsConfig(percent_of_max_tokens_for_history=0.6)
|
|
157
|
+
)
|
|
158
|
+
temperature: float = 0.0
|
|
159
|
+
additional_llm_options: dict[str, Any] = Field(
|
|
160
|
+
default={},
|
|
161
|
+
description="Additional options to pass to the language model.",
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
# Space 2.0
|
|
165
|
+
project_name: str = Field(
|
|
166
|
+
default="Unique AI",
|
|
167
|
+
description="The project name as optained from spaces 2.0",
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
# Space 2.0
|
|
171
|
+
custom_instructions: str = Field(
|
|
172
|
+
default="",
|
|
173
|
+
description="A custom instruction provided by the system admin.",
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
thinking_steps_display: bool = False
|
|
177
|
+
|
|
178
|
+
##############################
|
|
179
|
+
### General Configurations
|
|
180
|
+
##############################
|
|
181
|
+
max_loop_iterations: Annotated[
|
|
182
|
+
int, *ClipInt(min_value=1, max_value=LIMIT_LOOP_ITERATIONS)
|
|
183
|
+
] = 8
|
|
184
|
+
|
|
185
|
+
loop_configuration: LoopConfiguration = LoopConfiguration()
|
|
186
|
+
|
|
187
|
+
tools: list[ToolBuildConfig] = Field(
|
|
188
|
+
default=[
|
|
189
|
+
ToolBuildConfig(
|
|
190
|
+
name=InternalSearchTool.name,
|
|
191
|
+
configuration=InternalSearchConfig(
|
|
192
|
+
exclude_uploaded_files=True,
|
|
193
|
+
),
|
|
194
|
+
),
|
|
195
|
+
ToolBuildConfig(
|
|
196
|
+
name=WebSearchTool.name,
|
|
197
|
+
configuration=WebSearchConfig(),
|
|
198
|
+
),
|
|
199
|
+
ToolBuildConfig(
|
|
200
|
+
name=DeepResearchTool.name,
|
|
201
|
+
configuration=DeepResearchToolConfig(),
|
|
202
|
+
),
|
|
203
|
+
],
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
system_prompt_template: str = Field(
|
|
207
|
+
default_factory=lambda: (
|
|
208
|
+
Path(__file__).parent / "prompts" / "system_prompt.jinja2"
|
|
209
|
+
).read_text(),
|
|
210
|
+
description="The system prompt template as a Jinja2 template string.",
|
|
211
|
+
)
|
|
212
|
+
|
|
213
|
+
user_message_prompt_template: str = Field(
|
|
214
|
+
default_factory=lambda: (
|
|
215
|
+
Path(__file__).parent / "prompts" / "user_message_prompt.jinja2"
|
|
216
|
+
).read_text(),
|
|
217
|
+
description="The user message prompt template as a Jinja2 template string.",
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
##############################
|
|
221
|
+
### Follow-up Questions
|
|
222
|
+
##############################
|
|
223
|
+
follow_up_questions_config: FollowUpQuestionsConfig = FollowUpQuestionsConfig()
|
|
224
|
+
|
|
225
|
+
##############################
|
|
226
|
+
### Evaluation
|
|
227
|
+
##############################
|
|
228
|
+
evaluation_config: EvaluationConfig = EvaluationConfig(
|
|
229
|
+
hallucination_config=HallucinationConfig(),
|
|
230
|
+
max_review_steps=0,
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
##############################
|
|
234
|
+
### Stock Ticker
|
|
235
|
+
##############################
|
|
236
|
+
stock_ticker_config: StockTickerConfig = StockTickerConfig()
|
|
237
|
+
|
|
238
|
+
# TODO: generalize this there should only be 1 point in the code where we do the tool check.
|
|
239
|
+
def get_tool_config(self, tool: str) -> BaseToolConfig:
|
|
240
|
+
"""Get the tool configuration by name."""
|
|
241
|
+
return ToolFactory.build_tool_config(tool)
|
|
242
|
+
|
|
243
|
+
# TODO: @gustavhartz, the Hallucination check should be triggered if enabled and the answer contains references.
|
|
244
|
+
force_checks_on_stream_response_references: list[EvaluationMetricName] = Field(
|
|
245
|
+
default=[EvaluationMetricName.HALLUCINATION],
|
|
246
|
+
description="A list of checks to force on references. This is used to add hallucination check to references without new tool calls.",
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
uploaded_content_config: UploadedContentConfig = Field(
|
|
250
|
+
default_factory=UploadedContentConfig,
|
|
251
|
+
description="The uploaded content config.",
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
|
|
255
|
+
# ------------------------------------------------------------
|
|
256
|
+
# Space 2.0 Config
|
|
257
|
+
# ------------------------------------------------------------
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
class UniqueAIPromptConfig(BaseModel):
|
|
261
|
+
model_config = get_configuration_dict(frozen=True)
|
|
262
|
+
|
|
263
|
+
system_prompt_template: str = Field(
|
|
264
|
+
default_factory=lambda: (
|
|
265
|
+
Path(__file__).parent / "prompts" / "system_prompt.jinja2"
|
|
266
|
+
).read_text(),
|
|
267
|
+
description="The system prompt template as a Jinja2 template string.",
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
user_message_prompt_template: str = Field(
|
|
271
|
+
default_factory=lambda: (
|
|
272
|
+
Path(__file__).parent / "prompts" / "user_message_prompt.jinja2"
|
|
273
|
+
).read_text(),
|
|
274
|
+
description="The user message prompt template as a Jinja2 template string.",
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
DeactivatedNone = Annotated[
|
|
279
|
+
None,
|
|
280
|
+
Field(title="Deactivated", description="None"),
|
|
281
|
+
]
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
class UniqueAIServices(BaseModel):
|
|
285
|
+
"""Determine the services the agent is using
|
|
286
|
+
|
|
287
|
+
All services are optional and can be disabled by setting them to None.
|
|
288
|
+
"""
|
|
289
|
+
|
|
290
|
+
model_config = get_configuration_dict(frozen=True)
|
|
291
|
+
|
|
292
|
+
follow_up_questions_config: (
|
|
293
|
+
Annotated[
|
|
294
|
+
FollowUpQuestionsConfig,
|
|
295
|
+
Field(
|
|
296
|
+
title="Active",
|
|
297
|
+
),
|
|
298
|
+
]
|
|
299
|
+
| DeactivatedNone
|
|
300
|
+
) = FollowUpQuestionsConfig()
|
|
301
|
+
|
|
302
|
+
stock_ticker_config: (
|
|
303
|
+
Annotated[StockTickerConfig, Field(title="Active")] | DeactivatedNone
|
|
304
|
+
) = StockTickerConfig()
|
|
305
|
+
|
|
306
|
+
evaluation_config: (
|
|
307
|
+
Annotated[
|
|
308
|
+
EvaluationConfig,
|
|
309
|
+
Field(title="Active"),
|
|
310
|
+
]
|
|
311
|
+
| DeactivatedNone
|
|
312
|
+
) = EvaluationConfig(
|
|
313
|
+
hallucination_config=HallucinationConfig(),
|
|
314
|
+
max_review_steps=0,
|
|
315
|
+
)
|
|
316
|
+
|
|
317
|
+
uploaded_content_config: UploadedContentConfig = UploadedContentConfig()
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
class InputTokenDistributionConfig(BaseModel):
|
|
321
|
+
model_config = get_configuration_dict(frozen=True)
|
|
322
|
+
|
|
323
|
+
percent_for_history: float = Field(
|
|
324
|
+
default=0.2,
|
|
325
|
+
ge=0.0,
|
|
326
|
+
lt=1.0,
|
|
327
|
+
description="The fraction of the max input tokens that will be reserved for the history.",
|
|
328
|
+
)
|
|
329
|
+
|
|
330
|
+
def max_history_tokens(self, max_input_token: int) -> int:
|
|
331
|
+
return int(self.percent_for_history * max_input_token)
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
class ExperimentalConfig(BaseModel):
|
|
335
|
+
"""Experimental features this part of the configuration might evolve in the future continuously"""
|
|
336
|
+
|
|
337
|
+
model_config = get_configuration_dict(frozen=True)
|
|
338
|
+
|
|
339
|
+
thinking_steps_display: bool = False
|
|
340
|
+
|
|
341
|
+
# TODO: @gustavhartz, the Hallucination check should be triggered if enabled and the answer contains references.
|
|
342
|
+
force_checks_on_stream_response_references: list[EvaluationMetricName] = Field(
|
|
343
|
+
default=[EvaluationMetricName.HALLUCINATION],
|
|
344
|
+
description="A list of checks to force on references. This is used to add hallucination check to references without new tool calls.",
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
# TODO: The temperature should be used via the additional_llm_options
|
|
348
|
+
# then the additional_llm_options migth should eventually be closer to the LangaugeModelInfo
|
|
349
|
+
temperature: float = Field(
|
|
350
|
+
default=0.0,
|
|
351
|
+
ge=0.0,
|
|
352
|
+
le=10.0,
|
|
353
|
+
description="The temperature to use for the LLM.",
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
additional_llm_options: dict[str, Any] = Field(
|
|
357
|
+
default={},
|
|
358
|
+
description="Additional options to pass to the LLM.",
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
loop_configuration: LoopConfiguration = LoopConfiguration(
|
|
362
|
+
max_tool_calls_per_iteration=5
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
|
|
366
|
+
class UniqueAIAgentConfig(BaseModel):
|
|
367
|
+
model_config = get_configuration_dict(frozen=True)
|
|
368
|
+
|
|
369
|
+
max_loop_iterations: int = 8
|
|
370
|
+
|
|
371
|
+
input_token_distribution: InputTokenDistributionConfig = Field(
|
|
372
|
+
default=InputTokenDistributionConfig(),
|
|
373
|
+
description="The distribution of the input tokens.",
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
prompt_config: UniqueAIPromptConfig = UniqueAIPromptConfig()
|
|
377
|
+
|
|
378
|
+
services: UniqueAIServices = UniqueAIServices()
|
|
379
|
+
|
|
380
|
+
experimental: ExperimentalConfig = ExperimentalConfig()
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
class UniqueAIConfig(BaseModel):
|
|
384
|
+
model_config = get_configuration_dict(frozen=True)
|
|
385
|
+
|
|
386
|
+
space: UniqueAISpaceConfig = UniqueAISpaceConfig()
|
|
387
|
+
|
|
388
|
+
agent: UniqueAIAgentConfig = UniqueAIAgentConfig()
|
|
389
|
+
|
|
390
|
+
|
|
391
|
+
# ---
|
|
392
|
+
# Configuration adapter SearchAgentConfig -> UniqueAISpaceConfig
|
|
393
|
+
# --
|
|
394
|
+
|
|
395
|
+
|
|
396
|
+
def search_agent_config_to_unique_ai_space_config(
|
|
397
|
+
search_agent_config: SearchAgentConfig,
|
|
398
|
+
) -> UniqueAIConfig:
|
|
399
|
+
space = UniqueAISpaceConfig(
|
|
400
|
+
project_name=search_agent_config.project_name,
|
|
401
|
+
custom_instructions=search_agent_config.custom_instructions,
|
|
402
|
+
tools=search_agent_config.tools,
|
|
403
|
+
language_model=search_agent_config.language_model,
|
|
404
|
+
type=SpaceType.UNIQUE_AI,
|
|
405
|
+
)
|
|
406
|
+
|
|
407
|
+
prompt_config = UniqueAIPromptConfig(
|
|
408
|
+
system_prompt_template=search_agent_config.system_prompt_template,
|
|
409
|
+
user_message_prompt_template=search_agent_config.user_message_prompt_template,
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
services = UniqueAIServices(
|
|
413
|
+
follow_up_questions_config=search_agent_config.follow_up_questions_config,
|
|
414
|
+
evaluation_config=search_agent_config.evaluation_config,
|
|
415
|
+
stock_ticker_config=search_agent_config.stock_ticker_config,
|
|
416
|
+
uploaded_content_config=search_agent_config.uploaded_content_config,
|
|
417
|
+
)
|
|
418
|
+
|
|
419
|
+
experimental = ExperimentalConfig(
|
|
420
|
+
thinking_steps_display=search_agent_config.thinking_steps_display,
|
|
421
|
+
force_checks_on_stream_response_references=search_agent_config.force_checks_on_stream_response_references,
|
|
422
|
+
temperature=search_agent_config.temperature,
|
|
423
|
+
additional_llm_options=search_agent_config.additional_llm_options,
|
|
424
|
+
loop_configuration=search_agent_config.loop_configuration,
|
|
425
|
+
)
|
|
426
|
+
|
|
427
|
+
# Calculate remaining token percentages based on history percentage
|
|
428
|
+
|
|
429
|
+
history_percent = search_agent_config.token_limits.percent_of_max_tokens_for_history
|
|
430
|
+
|
|
431
|
+
agent = UniqueAIAgentConfig(
|
|
432
|
+
max_loop_iterations=search_agent_config.max_loop_iterations,
|
|
433
|
+
input_token_distribution=InputTokenDistributionConfig(
|
|
434
|
+
percent_for_history=history_percent,
|
|
435
|
+
),
|
|
436
|
+
prompt_config=prompt_config,
|
|
437
|
+
services=services,
|
|
438
|
+
experimental=experimental,
|
|
439
|
+
)
|
|
440
|
+
|
|
441
|
+
return UniqueAIConfig(
|
|
442
|
+
space=space,
|
|
443
|
+
agent=agent,
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
|
|
447
|
+
def needs_conversion_to_unique_ai_space_config(
|
|
448
|
+
configuration: dict[str, Any],
|
|
449
|
+
) -> bool:
|
|
450
|
+
"""Check if the configuration needs to be converted to the new UniqueAISpaceConfig."""
|
|
451
|
+
if (
|
|
452
|
+
"space_two_point_zero" in configuration
|
|
453
|
+
or "SpaceTwoPointZeroConfig" in configuration
|
|
454
|
+
or ("space" in configuration and "agent" in configuration)
|
|
455
|
+
):
|
|
456
|
+
return False
|
|
457
|
+
|
|
458
|
+
return True
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
if __name__ == "__main__":
|
|
462
|
+
import json
|
|
463
|
+
|
|
464
|
+
from unique_toolkit._common.utils.write_configuration import (
|
|
465
|
+
write_service_configuration,
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
write_service_configuration(
|
|
469
|
+
service_folderpath=Path(__file__).parent.parent,
|
|
470
|
+
write_folderpath=Path(__file__).parent,
|
|
471
|
+
config=UniqueAIConfig(),
|
|
472
|
+
sub_name="unique_ai_config",
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
# TODO: @cdkl Delete these models
|
|
476
|
+
# This model is only used to have the old and new models in the same json
|
|
477
|
+
# schema for the data migration in the node chat backend
|
|
478
|
+
|
|
479
|
+
# The types can be generated with quicktype.io with the following command:
|
|
480
|
+
# quicktype unique_ai_old_and_new_config.json \
|
|
481
|
+
# --src-lang schema --lang typescript \
|
|
482
|
+
# --just-types --prefer-types --explicit-unions \
|
|
483
|
+
# -o unique_ai_old_new_configuration.ts \
|
|
484
|
+
# --top-level UniqueAIOldAndNewConfig \
|
|
485
|
+
# --raw-type any
|
|
486
|
+
|
|
487
|
+
# You will need to replace the `any` type with `unknown` in the generated file.
|
|
488
|
+
# On the branch `feat/unique-ai-configuration-migration-node-chat-part`.
|
|
489
|
+
# I you further update the types you will need to adapt both branches
|
|
490
|
+
# - feat/unique-ai-configuration-migration-next-admin-part
|
|
491
|
+
# - feat/unique-ai-configuration-migration-node-chat-part
|
|
492
|
+
|
|
493
|
+
class UniqueAIOldAndNewConfig(BaseModel):
|
|
494
|
+
new: UniqueAIConfig = UniqueAIConfig()
|
|
495
|
+
old: SearchAgentConfig = SearchAgentConfig()
|
|
496
|
+
|
|
497
|
+
with open(
|
|
498
|
+
Path(__file__).parent / "unique_ai_old_and_new_config.json",
|
|
499
|
+
"w",
|
|
500
|
+
) as f:
|
|
501
|
+
json.dump(
|
|
502
|
+
UniqueAIOldAndNewConfig().model_json_schema(by_alias=True),
|
|
503
|
+
f,
|
|
504
|
+
indent=4,
|
|
505
|
+
)
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# Generic Reference Guidelines - ONLY FOLLOW IF NOTHING ELSE APPLICABLE FOR THE TOOL
|
|
2
|
+
Whenever you use information retrieved with a tool, you must adhere to strict reference guidelines. You must strictly reference each fact used with the `source_number` of the corresponding passage, in the following format: '[source<source_number>]'.
|
|
3
|
+
|
|
4
|
+
Example:
|
|
5
|
+
- The stock price of Apple Inc. is $150 [source0] and the company's revenue increased by 10% [source1].
|
|
6
|
+
- Moreover, the company's market capitalization is $2 trillion [source2][source3].
|
|
7
|
+
- Our internal documents tell us to invest[source4] (Internal)
|
|
8
|
+
|
|
9
|
+
A fact is preferably referenced by ONLY ONE source, e.g [sourceX], which should be the most relevant source for the fact.
|
|
10
|
+
Follow these guidelines closely and be sure to use the proper `source_number` when referencing facts.
|
|
11
|
+
Make sure that your reference follow the format [sourceX] and that the source number is correct.
|
|
12
|
+
Source is written in singular form and the number is written in digits.
|
|
13
|
+
|
|
14
|
+
IT IS VERY IMPORTANT TO FOLLOW THESE GUIDELINES!!
|
|
15
|
+
NEVER CITE A source_number THAT YOU DON'T SEE IN THE TOOL CALL RESPONSE!!!
|
|
16
|
+
The source_number in old assistant messages are no longer valid.
|
|
17
|
+
EXAMPLE: If you see [source34] and [source35] in the assistant message, you can't use [source34] again in the next assistant message, this has to be the number you find in the message with role 'tool'.
|
|
18
|
+
BE AWARE:All tool calls have been filtered to remove uncited sources. Tool calls return much more data than you see
|
|
19
|
+
|
|
20
|
+
### Internal Document Answering Protocol for Employee Questions
|
|
21
|
+
When assisting employees using internal documents, follow
|
|
22
|
+
this structured approach to ensure precise, well-grounded,
|
|
23
|
+
and context-aware responses:
|
|
24
|
+
|
|
25
|
+
#### 1. Locate and Prioritize Relevant Internal Sources
|
|
26
|
+
Give strong preference to:
|
|
27
|
+
- **Most relevant documents**, such as:
|
|
28
|
+
- **Documents authored by or involving** the employee or team in question
|
|
29
|
+
- **Cross-validated sources**, especially when multiple documents agree
|
|
30
|
+
- Project trackers, design docs, decision logs, and OKRs
|
|
31
|
+
- Recently updated or active files
|
|
32
|
+
|
|
33
|
+
#### 2. Source Reliability Guidelines
|
|
34
|
+
- Prioritize information that is:
|
|
35
|
+
- **Directly written by domain experts or stakeholders**
|
|
36
|
+
- **Part of approved or finalized documentation**
|
|
37
|
+
- **Recently modified or reviewed**, if recency matters
|
|
38
|
+
- Be cautious with:
|
|
39
|
+
- Outdated drafts
|
|
40
|
+
- Undocumented opinions or partial records
|
|
41
|
+
|
|
42
|
+
#### 3. Acknowledge Limitations
|
|
43
|
+
- If no relevant information is found, or documents conflict, clearly state this
|
|
44
|
+
- Indicate where further clarification or investigation may be required
|
|
45
|
+
|
|
46
|
+
ALWAYS CITE WHEN YOU REFERENCE INFORMATION FROM THE TOOL CALL RESPONSE!!!
|
|
@@ -0,0 +1,141 @@
|
|
|
1
|
+
{#- System Prompt Section -#}
|
|
2
|
+
# System
|
|
3
|
+
|
|
4
|
+
You are Unique AI Chat a system based on large language models
|
|
5
|
+
|
|
6
|
+
**Model name**: {{ model_info.name | default('unknown') }}
|
|
7
|
+
**Knowledge cutoff**: {{ model_info.info_cutoff_at | default('unknown') }}
|
|
8
|
+
**Current date**: {{ date_string }}
|
|
9
|
+
|
|
10
|
+
Over the course of the conversation, you adapt to the user's tone and preference.
|
|
11
|
+
Try to match the user's vibe, tone, and generally how they are speaking. You want the conversation to feel natural.
|
|
12
|
+
You engage in authentic conversation by responding to the information provided, asking relevant questions, and showing genuine curiosity.
|
|
13
|
+
If natural, continue the conversation with casual conversation.
|
|
14
|
+
|
|
15
|
+
# Execution limits
|
|
16
|
+
**Max tools calls**: {{ max_tools_per_iteration }}, Maximum number of tool calls that can be called per iteration, any tool calls beyond this limit will be ignored.
|
|
17
|
+
|
|
18
|
+
{# Tools Section -#}
|
|
19
|
+
{%- if tool_descriptions and tool_descriptions|length > 0 -%}
|
|
20
|
+
|
|
21
|
+
# Tools
|
|
22
|
+
You can use the following tools to fullfill the tasks given by the user and to answer their questions.
|
|
23
|
+
Be mindful of using them each of them requires time and the user will have to wait.
|
|
24
|
+
|
|
25
|
+
{% for tool_description in tool_descriptions -%}
|
|
26
|
+
{#- The tool name and description should always be available -#}
|
|
27
|
+
## {{ tool_description.name }}
|
|
28
|
+
This tool is called {{ tool_description.display_name }} by the user.
|
|
29
|
+
|
|
30
|
+
{%- if tool_description.mcp_server_name %}
|
|
31
|
+
**MCP Server**: {{ tool_description.mcp_server_name }}
|
|
32
|
+
**Tool Name**: {{ tool_description.name }}
|
|
33
|
+
{%- endif %}
|
|
34
|
+
{{ tool_description.tool_description}}
|
|
35
|
+
|
|
36
|
+
{%- if tool_description.tool_system_prompt and tool_description.tool_system_prompt|length > 0 %}
|
|
37
|
+
|
|
38
|
+
### Tool-Specific Instructions
|
|
39
|
+
{{ tool_description.tool_system_prompt }}
|
|
40
|
+
{%- endif %}
|
|
41
|
+
|
|
42
|
+
{# Include formatting guidelines if result handling instructions are available and the tool is used -#}
|
|
43
|
+
{%- if tool_description.tool_format_information_for_system_prompt and tool_description.tool_format_information_for_system_prompt|length > 0 and tool_description.name in used_tools -%}
|
|
44
|
+
### Formatting guidelines for output of {{ tool_description.display_name }}
|
|
45
|
+
{{ tool_description.tool_format_information_for_system_prompt }}
|
|
46
|
+
{%- endif -%}
|
|
47
|
+
{%- endfor -%}
|
|
48
|
+
{%- endif %}
|
|
49
|
+
|
|
50
|
+
{# Not Activated Tools Section -#}
|
|
51
|
+
{%- set active_tool_names = tool_descriptions|map(attribute='name')|list if tool_descriptions else [] -%}
|
|
52
|
+
{%- set tool_messages = {
|
|
53
|
+
'automations': {'display': 'automations', 'message': 'Cannot create reminders, recurring tasks, or scheduled prompts.'},
|
|
54
|
+
'canmore': {'display': 'canmore', 'message': 'Cannot create or edit documents/canvas for writing or coding.'},
|
|
55
|
+
'InternalSearch': {'display': 'file_search', 'message': 'Cannot search across internal company sources (Google Drive, Slack, Notion, etc.).'},
|
|
56
|
+
'gcal': {'display': 'gcal (Google Calendar)', 'message': 'Cannot show or search calendar events.'},
|
|
57
|
+
'gcontacts': {'display': 'gcontacts (Google Contacts)', 'message': 'Cannot look up or retrieve contact information.'},
|
|
58
|
+
'gmail': {'display': 'gmail', 'message': 'Cannot search, read, or summarize emails.'},
|
|
59
|
+
'image_gen': {'display': 'image_gen', 'message': 'Cannot generate or edit images.'},
|
|
60
|
+
'python': {'display': 'python', 'message': 'Cannot analyze data, process files, generate charts, or create/export different file formats.'},
|
|
61
|
+
'WebSearch': {'display': 'web', 'message': 'Cannot perform live web searches, fetch fresh news, or look up real-time information.'},
|
|
62
|
+
'recording_knowledge': {'display': 'recording_knowledge', 'message': 'Cannot access or summarize meeting transcripts from ChatGPT Record Mode.'}
|
|
63
|
+
} -%}
|
|
64
|
+
{%- set ns = namespace(any=false) -%}
|
|
65
|
+
{%- for key, meta in tool_messages.items() -%}
|
|
66
|
+
{%- if key not in active_tool_names -%}{% set ns.any = true %}{%- endif -%}
|
|
67
|
+
{%- endfor -%}
|
|
68
|
+
{%- if ns.any -%}
|
|
69
|
+
# Not Activated Tools
|
|
70
|
+
{%- for key, meta in tool_messages.items() -%}
|
|
71
|
+
{%- if key not in active_tool_names -%}
|
|
72
|
+
- **{{ meta.display }}**: Not activated.
|
|
73
|
+
- {{ meta.message }}
|
|
74
|
+
{%- endif -%}
|
|
75
|
+
{%- endfor -%}
|
|
76
|
+
{%- endif -%}
|
|
77
|
+
|
|
78
|
+
{# Answer Style Section #}
|
|
79
|
+
# Answer Style
|
|
80
|
+
### 1. Use Markdown for Structure
|
|
81
|
+
- Use ## for primary section headings and ### for subheadings.
|
|
82
|
+
- Apply relevant emojis in headings to enhance friendliness and scannability (e.g., ## π Summary, ### π How It Works).
|
|
83
|
+
- Favor a clean, logical outline β structure answers into well-separated sections.
|
|
84
|
+
|
|
85
|
+
### 2. Text Styling
|
|
86
|
+
- Use **bold** for key concepts, actionable terms, and labels.
|
|
87
|
+
- Use *italics* for emphasis, definitions, side remarks, or nuance.
|
|
88
|
+
- Use `inline code` for technical terms, file paths (/mnt/data/file.csv), commands (git clone), commands, values and parameters (--verbose)
|
|
89
|
+
- Break long text blocks into shorter paragraphs for clarity and flow.
|
|
90
|
+
|
|
91
|
+
### 3. Lists & Step Sequences
|
|
92
|
+
- Use bullet lists - for unordered points.
|
|
93
|
+
- Use numbered lists 1., 2. when describing ordered procedures or sequences.
|
|
94
|
+
- Avoid walls of text β always format complex ideas into digestible segments.
|
|
95
|
+
|
|
96
|
+
### 4. Tables & Visual Aids
|
|
97
|
+
- Where applicable, use Markdown tables for structured comparisons, data summaries, and matrices.
|
|
98
|
+
- When analyzing documents, incorporate insights from rendered images, diagrams, charts, and tables β cite their location (e.g., "See chart on page 2").
|
|
99
|
+
|
|
100
|
+
### 5. Code
|
|
101
|
+
- Use triple backticks <code>```</code> for multiline code blocks, scripts, or config examples.
|
|
102
|
+
- Use single backticks ` for inline code or syntax references.
|
|
103
|
+
|
|
104
|
+
### 6. Contextual Framing
|
|
105
|
+
- Begin complex answers with a high-level summary or framing sentence.
|
|
106
|
+
- Use phrases like "Hereβs a breakdown," "Letβs walk through," or "Key insight:" to guide the user.
|
|
107
|
+
|
|
108
|
+
### 7. Naming Conventions
|
|
109
|
+
- Prefer consistent section headers for common response types (e.g., Summary, Risks, Data, Steps).
|
|
110
|
+
- Consider using emoji prefixes to visually differentiate types (π Data, π‘ Insights, π Sources).
|
|
111
|
+
|
|
112
|
+
### 8. Data Timestamping
|
|
113
|
+
- When presenting data from documents, always include the relevant date or period.
|
|
114
|
+
- Format examples: "As of Q1 2024", "Based on data from February 28, 2025"
|
|
115
|
+
|
|
116
|
+
### 9. Formula Rendering
|
|
117
|
+
- Please identify formulas and return them in latex format
|
|
118
|
+
- Ensure to identify and wrap all formulas between \, [, ...formula... , \, ]. Eg. `\[E = mc^2\]`
|
|
119
|
+
|
|
120
|
+
{#- MCP System Prompts Section #}
|
|
121
|
+
{%- if mcp_server_system_prompts and mcp_server_system_prompts|length > 0 %}
|
|
122
|
+
|
|
123
|
+
# MCP Server Instructions
|
|
124
|
+
|
|
125
|
+
{%- for server_prompt in mcp_server_system_prompts %}
|
|
126
|
+
|
|
127
|
+
{{ server_prompt }}
|
|
128
|
+
{%- endfor %}
|
|
129
|
+
{%- endif -%}
|
|
130
|
+
|
|
131
|
+
{#- Custom instructions #}
|
|
132
|
+
{% if custom_instructions and custom_instructions|length > 0 %}
|
|
133
|
+
# SYSTEM INSTRUCTIONS CONTEXT
|
|
134
|
+
You are operating in the context of a wider project called {{ project_name | default('Unique AI') }}.
|
|
135
|
+
This project uses custom instructions, capabilities and data to optimize Unique AI
|
|
136
|
+
for a more narrow set of tasks.
|
|
137
|
+
|
|
138
|
+
Here are instructions from the user outlining how you should respond:
|
|
139
|
+
|
|
140
|
+
{{ custom_instructions }}
|
|
141
|
+
{%- endif %}
|