unique_orchestrator 0.0.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of unique_orchestrator might be problematic. Click here for more details.

@@ -0,0 +1,10 @@
1
+ # Changelog
2
+
3
+ All notable changes to this project will be documented in this file.
4
+
5
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
6
+ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
+
8
+
9
+ ## [0.0.1] - 2025-08-18
10
+ - Initial release of `orchestrator`.
@@ -0,0 +1 @@
1
+ `unique_toolkit` is covered by the [`Unique License v1`](https://github.com/Unique-AG/license/releases/tag/unique-license.v1), unless the/a header or a nested LICENSE specifies another license.
@@ -0,0 +1,39 @@
1
+ Metadata-Version: 2.1
2
+ Name: unique_orchestrator
3
+ Version: 0.0.1
4
+ Summary:
5
+ License: Proprietary
6
+ Author: Martin Fadler
7
+ Author-email: martin.fadler@unique.ch
8
+ Requires-Python: >=3.11,<4.0
9
+ Classifier: License :: Other/Proprietary License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.11
12
+ Classifier: Programming Language :: Python :: 3.12
13
+ Requires-Dist: pydantic (>=2.8.2,<3.0.0)
14
+ Requires-Dist: pydantic-settings (>=2.10.1,<3.0.0)
15
+ Requires-Dist: pytest (>=8.4.1,<9.0.0)
16
+ Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
17
+ Requires-Dist: typing-extensions (>=4.9.0,<5.0.0)
18
+ Requires-Dist: unique-deep-research (>=0.0.10,<0.0.11)
19
+ Requires-Dist: unique-follow-up-questions (>=0.0.4,<0.0.5)
20
+ Requires-Dist: unique-internal-search (==0.0.5)
21
+ Requires-Dist: unique-sdk (>=0.10.0,<0.11.0)
22
+ Requires-Dist: unique-stock-ticker (>=0.0.5,<0.0.6)
23
+ Requires-Dist: unique-toolkit (>=0.8.57,<0.9.0)
24
+ Requires-Dist: unique-web-search (>=0.1.0,<0.2.0)
25
+ Description-Content-Type: text/markdown
26
+
27
+ # Internal Search Tool
28
+
29
+ Internal Search Tool to find documents in the Knowledge Base
30
+ # Changelog
31
+
32
+ All notable changes to this project will be documented in this file.
33
+
34
+ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
35
+ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
36
+
37
+
38
+ ## [0.0.1] - 2025-08-18
39
+ - Initial release of `orchestrator`.
@@ -0,0 +1,3 @@
1
+ # Internal Search Tool
2
+
3
+ Internal Search Tool to find documents in the Knowledge Base
@@ -0,0 +1,55 @@
1
+ [tool.poetry]
2
+ name = "unique_orchestrator"
3
+ version = "0.0.1"
4
+ description = ""
5
+ authors = [
6
+ "Martin Fadler <martin.fadler@unique.ch>",
7
+ "Sadique Sheik <sadique@unique.ch>",
8
+ "Fabian Schläpfer <fabian@unique.ch>",
9
+ "Pascal Hauri <pascal@unique.ch>",
10
+ ]
11
+ readme = ["README.md", "CHANGELOG.md"]
12
+ license = "Proprietary"
13
+
14
+ [tool.poetry.dependencies]
15
+ python = "^3.11"
16
+ typing-extensions = "^4.9.0"
17
+ pydantic = "^2.8.2"
18
+ pydantic-settings = "^2.10.1"
19
+ python-dotenv = "^1.0.1"
20
+ pytest = "^8.4.1"
21
+ unique-sdk = "^0.10.0"
22
+ unique-toolkit = "^0.8.57"
23
+ unique-stock-ticker = "^0.0.5"
24
+ unique-follow-up-questions = "^0.0.4"
25
+ unique-web-search = "^0.1.0"
26
+ unique-deep-research = "^0.0.10"
27
+ unique-internal-search = "0.0.5"
28
+
29
+
30
+ [tool.poetry.group.dev.dependencies]
31
+ ruff = "^0.12.10"
32
+ python = "^3.11"
33
+ typing-extensions = "^4.9.0"
34
+ pydantic = "^2.8.2"
35
+ pydantic-settings = "^2.10.1"
36
+ python-dotenv = "^1.0.1"
37
+ pytest = "^8.4.1"
38
+ unique-sdk = { path = "../unique_sdk" }
39
+ unique-toolkit = { path = "../unique_toolkit", develop = true }
40
+ unique-stock-ticker = { path = "../postprocessors/unique_stock_ticker", develop = true }
41
+ unique-follow-up-questions = { path = "../postprocessors/unique_follow_up_questions", develop = true }
42
+ unique-web-search = { path = "../tool_packages/unique_web_search", develop = true }
43
+ unique-deep-research = { path = "../tool_packages/unique_deep_research", develop = true }
44
+ unique-internal-search = { path = "../tool_packages/unique_internal_search", develop = true }
45
+
46
+
47
+ [build-system]
48
+ requires = ["poetry-core"]
49
+ build-backend = "poetry.core.masonry.api"
50
+
51
+ [tool.ruff]
52
+ target-version = "py311"
53
+
54
+ [tool.ruff.lint]
55
+ extend-select = ["I"]
@@ -0,0 +1,505 @@
1
+ from enum import StrEnum
2
+ from pathlib import Path
3
+ from typing import Annotated, Any, Generic, Literal, TypeVar
4
+
5
+ from pydantic import BaseModel, Field, ValidationInfo, field_validator
6
+ from unique_deep_research.config import DeepResearchToolConfig
7
+ from unique_deep_research.service import DeepResearchTool
8
+ from unique_follow_up_questions.config import FollowUpQuestionsConfig
9
+ from unique_internal_search.config import InternalSearchConfig
10
+ from unique_internal_search.service import InternalSearchTool
11
+ from unique_stock_ticker.config import StockTickerConfig
12
+ from unique_toolkit._common.default_language_model import DEFAULT_GPT_4o
13
+ from unique_toolkit._common.validators import (
14
+ LMI,
15
+ ClipInt,
16
+ get_LMI_default_field,
17
+ )
18
+ from unique_toolkit.evals.hallucination.constants import HallucinationConfig
19
+ from unique_toolkit.evals.schemas import EvaluationMetricName
20
+ from unique_toolkit.history_manager.history_manager import (
21
+ UploadedContentConfig,
22
+ )
23
+ from unique_toolkit.language_model import LanguageModelName
24
+ from unique_toolkit.language_model.infos import (
25
+ LanguageModelInfo,
26
+ )
27
+ from unique_toolkit.tools.config import get_configuration_dict
28
+ from unique_toolkit.tools.factory import ToolFactory
29
+ from unique_toolkit.tools.schemas import BaseToolConfig
30
+ from unique_toolkit.tools.tool import ToolBuildConfig
31
+ from unique_web_search.config import WebSearchConfig
32
+ from unique_web_search.service import WebSearchTool
33
+
34
+
35
+ class SpaceType(StrEnum):
36
+ UNIQUE_CUSTOM = "unique_custom"
37
+ UNIQUE_AI = "unique_ai"
38
+ UNIQUE_TRANSLATION = "unique_translation"
39
+ UNIQUE_MAGIC_TABLE = ""
40
+
41
+
42
+ T = TypeVar("T", bound=SpaceType)
43
+
44
+
45
+ class SpaceConfigBase(BaseModel, Generic[T]):
46
+ """Base class for space configuration."""
47
+
48
+ model_config = get_configuration_dict(frozen=True)
49
+ type: T = Field(description="The type of the space.")
50
+
51
+ project_name: str = Field(
52
+ default="Unique AI",
53
+ description="The project name as optained from spaces 2.0",
54
+ )
55
+
56
+ language_model: LMI = get_LMI_default_field(DEFAULT_GPT_4o)
57
+
58
+ custom_instructions: str = Field(
59
+ default="",
60
+ description="A custom instruction provided by the system admin.",
61
+ )
62
+
63
+ tools: list[ToolBuildConfig] = Field(
64
+ default=[
65
+ ToolBuildConfig(
66
+ name=InternalSearchTool.name,
67
+ configuration=InternalSearchConfig(
68
+ exclude_uploaded_files=True,
69
+ ),
70
+ ),
71
+ ToolBuildConfig(
72
+ name=WebSearchTool.name,
73
+ configuration=WebSearchConfig(),
74
+ ),
75
+ ToolBuildConfig(
76
+ name=DeepResearchTool.name,
77
+ configuration=DeepResearchToolConfig(),
78
+ ),
79
+ ],
80
+ )
81
+
82
+ @field_validator("tools", mode="after")
83
+ @classmethod
84
+ def set_input_context_size(
85
+ cls, tools: list[ToolBuildConfig], info: ValidationInfo
86
+ ) -> list[ToolBuildConfig]:
87
+ for tool in tools:
88
+ if tool.name == InternalSearchTool.name:
89
+ tool.configuration.language_model_max_input_tokens = ( # type: ignore
90
+ info.data["language_model"].token_limits.token_limit_input
91
+ )
92
+ elif tool.name == WebSearchTool.name:
93
+ tool.configuration.language_model_max_input_tokens = ( # type: ignore
94
+ info.data["language_model"].token_limits.token_limit_input
95
+ )
96
+ return tools
97
+
98
+
99
+ class UniqueAISpaceConfig(SpaceConfigBase):
100
+ """Contains configuration for the entities that a space provides."""
101
+
102
+ type: Literal[SpaceType.UNIQUE_AI] = SpaceType.UNIQUE_AI
103
+
104
+
105
+ UniqueAISpaceConfig.model_rebuild()
106
+
107
+ LIMIT_LOOP_ITERATIONS = 50
108
+ LIMIT_MAX_TOOL_CALLS_PER_ITERATION = 50
109
+
110
+
111
+ class LoopConfiguration(BaseModel):
112
+ model_config = get_configuration_dict()
113
+
114
+ max_tool_calls_per_iteration: Annotated[
115
+ int,
116
+ *ClipInt(min_value=1, max_value=LIMIT_MAX_TOOL_CALLS_PER_ITERATION),
117
+ ] = 10
118
+
119
+
120
+ class EvaluationConfig(BaseModel):
121
+ model_config = get_configuration_dict()
122
+ max_review_steps: int = 3
123
+ hallucination_config: HallucinationConfig = HallucinationConfig()
124
+
125
+
126
+ class LoopAgentTokenLimitsConfig(BaseModel):
127
+ model_config = get_configuration_dict()
128
+
129
+ language_model: LMI = LanguageModelInfo.from_name(
130
+ LanguageModelName.AZURE_GPT_4o_2024_1120
131
+ )
132
+
133
+ percent_of_max_tokens_for_history: float = Field(
134
+ default=0.2,
135
+ ge=0.0,
136
+ lt=1.0,
137
+ description="The fraction of the max input tokens that will be reserved for the history.",
138
+ )
139
+
140
+ @property
141
+ def max_history_tokens(self) -> int:
142
+ return int(
143
+ self.language_model.token_limits.token_limit_input
144
+ * self.percent_of_max_tokens_for_history,
145
+ )
146
+
147
+
148
+ class SearchAgentConfig(BaseModel):
149
+ """Configure the search agent."""
150
+
151
+ model_config = get_configuration_dict(frozen=True)
152
+
153
+ language_model: LMI = LanguageModelInfo.from_name(DEFAULT_GPT_4o)
154
+
155
+ token_limits: LoopAgentTokenLimitsConfig = Field(
156
+ default=LoopAgentTokenLimitsConfig(percent_of_max_tokens_for_history=0.6)
157
+ )
158
+ temperature: float = 0.0
159
+ additional_llm_options: dict[str, Any] = Field(
160
+ default={},
161
+ description="Additional options to pass to the language model.",
162
+ )
163
+
164
+ # Space 2.0
165
+ project_name: str = Field(
166
+ default="Unique AI",
167
+ description="The project name as optained from spaces 2.0",
168
+ )
169
+
170
+ # Space 2.0
171
+ custom_instructions: str = Field(
172
+ default="",
173
+ description="A custom instruction provided by the system admin.",
174
+ )
175
+
176
+ thinking_steps_display: bool = False
177
+
178
+ ##############################
179
+ ### General Configurations
180
+ ##############################
181
+ max_loop_iterations: Annotated[
182
+ int, *ClipInt(min_value=1, max_value=LIMIT_LOOP_ITERATIONS)
183
+ ] = 8
184
+
185
+ loop_configuration: LoopConfiguration = LoopConfiguration()
186
+
187
+ tools: list[ToolBuildConfig] = Field(
188
+ default=[
189
+ ToolBuildConfig(
190
+ name=InternalSearchTool.name,
191
+ configuration=InternalSearchConfig(
192
+ exclude_uploaded_files=True,
193
+ ),
194
+ ),
195
+ ToolBuildConfig(
196
+ name=WebSearchTool.name,
197
+ configuration=WebSearchConfig(),
198
+ ),
199
+ ToolBuildConfig(
200
+ name=DeepResearchTool.name,
201
+ configuration=DeepResearchToolConfig(),
202
+ ),
203
+ ],
204
+ )
205
+
206
+ system_prompt_template: str = Field(
207
+ default_factory=lambda: (
208
+ Path(__file__).parent / "prompts" / "system_prompt.jinja2"
209
+ ).read_text(),
210
+ description="The system prompt template as a Jinja2 template string.",
211
+ )
212
+
213
+ user_message_prompt_template: str = Field(
214
+ default_factory=lambda: (
215
+ Path(__file__).parent / "prompts" / "user_message_prompt.jinja2"
216
+ ).read_text(),
217
+ description="The user message prompt template as a Jinja2 template string.",
218
+ )
219
+
220
+ ##############################
221
+ ### Follow-up Questions
222
+ ##############################
223
+ follow_up_questions_config: FollowUpQuestionsConfig = FollowUpQuestionsConfig()
224
+
225
+ ##############################
226
+ ### Evaluation
227
+ ##############################
228
+ evaluation_config: EvaluationConfig = EvaluationConfig(
229
+ hallucination_config=HallucinationConfig(),
230
+ max_review_steps=0,
231
+ )
232
+
233
+ ##############################
234
+ ### Stock Ticker
235
+ ##############################
236
+ stock_ticker_config: StockTickerConfig = StockTickerConfig()
237
+
238
+ # TODO: generalize this there should only be 1 point in the code where we do the tool check.
239
+ def get_tool_config(self, tool: str) -> BaseToolConfig:
240
+ """Get the tool configuration by name."""
241
+ return ToolFactory.build_tool_config(tool)
242
+
243
+ # TODO: @gustavhartz, the Hallucination check should be triggered if enabled and the answer contains references.
244
+ force_checks_on_stream_response_references: list[EvaluationMetricName] = Field(
245
+ default=[EvaluationMetricName.HALLUCINATION],
246
+ description="A list of checks to force on references. This is used to add hallucination check to references without new tool calls.",
247
+ )
248
+
249
+ uploaded_content_config: UploadedContentConfig = Field(
250
+ default_factory=UploadedContentConfig,
251
+ description="The uploaded content config.",
252
+ )
253
+
254
+
255
+ # ------------------------------------------------------------
256
+ # Space 2.0 Config
257
+ # ------------------------------------------------------------
258
+
259
+
260
+ class UniqueAIPromptConfig(BaseModel):
261
+ model_config = get_configuration_dict(frozen=True)
262
+
263
+ system_prompt_template: str = Field(
264
+ default_factory=lambda: (
265
+ Path(__file__).parent / "prompts" / "system_prompt.jinja2"
266
+ ).read_text(),
267
+ description="The system prompt template as a Jinja2 template string.",
268
+ )
269
+
270
+ user_message_prompt_template: str = Field(
271
+ default_factory=lambda: (
272
+ Path(__file__).parent / "prompts" / "user_message_prompt.jinja2"
273
+ ).read_text(),
274
+ description="The user message prompt template as a Jinja2 template string.",
275
+ )
276
+
277
+
278
+ DeactivatedNone = Annotated[
279
+ None,
280
+ Field(title="Deactivated", description="None"),
281
+ ]
282
+
283
+
284
+ class UniqueAIServices(BaseModel):
285
+ """Determine the services the agent is using
286
+
287
+ All services are optional and can be disabled by setting them to None.
288
+ """
289
+
290
+ model_config = get_configuration_dict(frozen=True)
291
+
292
+ follow_up_questions_config: (
293
+ Annotated[
294
+ FollowUpQuestionsConfig,
295
+ Field(
296
+ title="Active",
297
+ ),
298
+ ]
299
+ | DeactivatedNone
300
+ ) = FollowUpQuestionsConfig()
301
+
302
+ stock_ticker_config: (
303
+ Annotated[StockTickerConfig, Field(title="Active")] | DeactivatedNone
304
+ ) = StockTickerConfig()
305
+
306
+ evaluation_config: (
307
+ Annotated[
308
+ EvaluationConfig,
309
+ Field(title="Active"),
310
+ ]
311
+ | DeactivatedNone
312
+ ) = EvaluationConfig(
313
+ hallucination_config=HallucinationConfig(),
314
+ max_review_steps=0,
315
+ )
316
+
317
+ uploaded_content_config: UploadedContentConfig = UploadedContentConfig()
318
+
319
+
320
+ class InputTokenDistributionConfig(BaseModel):
321
+ model_config = get_configuration_dict(frozen=True)
322
+
323
+ percent_for_history: float = Field(
324
+ default=0.2,
325
+ ge=0.0,
326
+ lt=1.0,
327
+ description="The fraction of the max input tokens that will be reserved for the history.",
328
+ )
329
+
330
+ def max_history_tokens(self, max_input_token: int) -> int:
331
+ return int(self.percent_for_history * max_input_token)
332
+
333
+
334
+ class ExperimentalConfig(BaseModel):
335
+ """Experimental features this part of the configuration might evolve in the future continuously"""
336
+
337
+ model_config = get_configuration_dict(frozen=True)
338
+
339
+ thinking_steps_display: bool = False
340
+
341
+ # TODO: @gustavhartz, the Hallucination check should be triggered if enabled and the answer contains references.
342
+ force_checks_on_stream_response_references: list[EvaluationMetricName] = Field(
343
+ default=[EvaluationMetricName.HALLUCINATION],
344
+ description="A list of checks to force on references. This is used to add hallucination check to references without new tool calls.",
345
+ )
346
+
347
+ # TODO: The temperature should be used via the additional_llm_options
348
+ # then the additional_llm_options migth should eventually be closer to the LangaugeModelInfo
349
+ temperature: float = Field(
350
+ default=0.0,
351
+ ge=0.0,
352
+ le=10.0,
353
+ description="The temperature to use for the LLM.",
354
+ )
355
+
356
+ additional_llm_options: dict[str, Any] = Field(
357
+ default={},
358
+ description="Additional options to pass to the LLM.",
359
+ )
360
+
361
+ loop_configuration: LoopConfiguration = LoopConfiguration(
362
+ max_tool_calls_per_iteration=5
363
+ )
364
+
365
+
366
+ class UniqueAIAgentConfig(BaseModel):
367
+ model_config = get_configuration_dict(frozen=True)
368
+
369
+ max_loop_iterations: int = 8
370
+
371
+ input_token_distribution: InputTokenDistributionConfig = Field(
372
+ default=InputTokenDistributionConfig(),
373
+ description="The distribution of the input tokens.",
374
+ )
375
+
376
+ prompt_config: UniqueAIPromptConfig = UniqueAIPromptConfig()
377
+
378
+ services: UniqueAIServices = UniqueAIServices()
379
+
380
+ experimental: ExperimentalConfig = ExperimentalConfig()
381
+
382
+
383
+ class UniqueAIConfig(BaseModel):
384
+ model_config = get_configuration_dict(frozen=True)
385
+
386
+ space: UniqueAISpaceConfig = UniqueAISpaceConfig()
387
+
388
+ agent: UniqueAIAgentConfig = UniqueAIAgentConfig()
389
+
390
+
391
+ # ---
392
+ # Configuration adapter SearchAgentConfig -> UniqueAISpaceConfig
393
+ # --
394
+
395
+
396
+ def search_agent_config_to_unique_ai_space_config(
397
+ search_agent_config: SearchAgentConfig,
398
+ ) -> UniqueAIConfig:
399
+ space = UniqueAISpaceConfig(
400
+ project_name=search_agent_config.project_name,
401
+ custom_instructions=search_agent_config.custom_instructions,
402
+ tools=search_agent_config.tools,
403
+ language_model=search_agent_config.language_model,
404
+ type=SpaceType.UNIQUE_AI,
405
+ )
406
+
407
+ prompt_config = UniqueAIPromptConfig(
408
+ system_prompt_template=search_agent_config.system_prompt_template,
409
+ user_message_prompt_template=search_agent_config.user_message_prompt_template,
410
+ )
411
+
412
+ services = UniqueAIServices(
413
+ follow_up_questions_config=search_agent_config.follow_up_questions_config,
414
+ evaluation_config=search_agent_config.evaluation_config,
415
+ stock_ticker_config=search_agent_config.stock_ticker_config,
416
+ uploaded_content_config=search_agent_config.uploaded_content_config,
417
+ )
418
+
419
+ experimental = ExperimentalConfig(
420
+ thinking_steps_display=search_agent_config.thinking_steps_display,
421
+ force_checks_on_stream_response_references=search_agent_config.force_checks_on_stream_response_references,
422
+ temperature=search_agent_config.temperature,
423
+ additional_llm_options=search_agent_config.additional_llm_options,
424
+ loop_configuration=search_agent_config.loop_configuration,
425
+ )
426
+
427
+ # Calculate remaining token percentages based on history percentage
428
+
429
+ history_percent = search_agent_config.token_limits.percent_of_max_tokens_for_history
430
+
431
+ agent = UniqueAIAgentConfig(
432
+ max_loop_iterations=search_agent_config.max_loop_iterations,
433
+ input_token_distribution=InputTokenDistributionConfig(
434
+ percent_for_history=history_percent,
435
+ ),
436
+ prompt_config=prompt_config,
437
+ services=services,
438
+ experimental=experimental,
439
+ )
440
+
441
+ return UniqueAIConfig(
442
+ space=space,
443
+ agent=agent,
444
+ )
445
+
446
+
447
+ def needs_conversion_to_unique_ai_space_config(
448
+ configuration: dict[str, Any],
449
+ ) -> bool:
450
+ """Check if the configuration needs to be converted to the new UniqueAISpaceConfig."""
451
+ if (
452
+ "space_two_point_zero" in configuration
453
+ or "SpaceTwoPointZeroConfig" in configuration
454
+ or ("space" in configuration and "agent" in configuration)
455
+ ):
456
+ return False
457
+
458
+ return True
459
+
460
+
461
+ if __name__ == "__main__":
462
+ import json
463
+
464
+ from unique_toolkit._common.utils.write_configuration import (
465
+ write_service_configuration,
466
+ )
467
+
468
+ write_service_configuration(
469
+ service_folderpath=Path(__file__).parent.parent,
470
+ write_folderpath=Path(__file__).parent,
471
+ config=UniqueAIConfig(),
472
+ sub_name="unique_ai_config",
473
+ )
474
+
475
+ # TODO: @cdkl Delete these models
476
+ # This model is only used to have the old and new models in the same json
477
+ # schema for the data migration in the node chat backend
478
+
479
+ # The types can be generated with quicktype.io with the following command:
480
+ # quicktype unique_ai_old_and_new_config.json \
481
+ # --src-lang schema --lang typescript \
482
+ # --just-types --prefer-types --explicit-unions \
483
+ # -o unique_ai_old_new_configuration.ts \
484
+ # --top-level UniqueAIOldAndNewConfig \
485
+ # --raw-type any
486
+
487
+ # You will need to replace the `any` type with `unknown` in the generated file.
488
+ # On the branch `feat/unique-ai-configuration-migration-node-chat-part`.
489
+ # I you further update the types you will need to adapt both branches
490
+ # - feat/unique-ai-configuration-migration-next-admin-part
491
+ # - feat/unique-ai-configuration-migration-node-chat-part
492
+
493
+ class UniqueAIOldAndNewConfig(BaseModel):
494
+ new: UniqueAIConfig = UniqueAIConfig()
495
+ old: SearchAgentConfig = SearchAgentConfig()
496
+
497
+ with open(
498
+ Path(__file__).parent / "unique_ai_old_and_new_config.json",
499
+ "w",
500
+ ) as f:
501
+ json.dump(
502
+ UniqueAIOldAndNewConfig().model_json_schema(by_alias=True),
503
+ f,
504
+ indent=4,
505
+ )
@@ -0,0 +1,46 @@
1
+ # Generic Reference Guidelines - ONLY FOLLOW IF NOTHING ELSE APPLICABLE FOR THE TOOL
2
+ Whenever you use information retrieved with a tool, you must adhere to strict reference guidelines. You must strictly reference each fact used with the `source_number` of the corresponding passage, in the following format: '[source<source_number>]'.
3
+
4
+ Example:
5
+ - The stock price of Apple Inc. is $150 [source0] and the company's revenue increased by 10% [source1].
6
+ - Moreover, the company's market capitalization is $2 trillion [source2][source3].
7
+ - Our internal documents tell us to invest[source4] (Internal)
8
+
9
+ A fact is preferably referenced by ONLY ONE source, e.g [sourceX], which should be the most relevant source for the fact.
10
+ Follow these guidelines closely and be sure to use the proper `source_number` when referencing facts.
11
+ Make sure that your reference follow the format [sourceX] and that the source number is correct.
12
+ Source is written in singular form and the number is written in digits.
13
+
14
+ IT IS VERY IMPORTANT TO FOLLOW THESE GUIDELINES!!
15
+ NEVER CITE A source_number THAT YOU DON'T SEE IN THE TOOL CALL RESPONSE!!!
16
+ The source_number in old assistant messages are no longer valid.
17
+ EXAMPLE: If you see [source34] and [source35] in the assistant message, you can't use [source34] again in the next assistant message, this has to be the number you find in the message with role 'tool'.
18
+ BE AWARE:All tool calls have been filtered to remove uncited sources. Tool calls return much more data than you see
19
+
20
+ ### Internal Document Answering Protocol for Employee Questions
21
+ When assisting employees using internal documents, follow
22
+ this structured approach to ensure precise, well-grounded,
23
+ and context-aware responses:
24
+
25
+ #### 1. Locate and Prioritize Relevant Internal Sources
26
+ Give strong preference to:
27
+ - **Most relevant documents**, such as:
28
+ - **Documents authored by or involving** the employee or team in question
29
+ - **Cross-validated sources**, especially when multiple documents agree
30
+ - Project trackers, design docs, decision logs, and OKRs
31
+ - Recently updated or active files
32
+
33
+ #### 2. Source Reliability Guidelines
34
+ - Prioritize information that is:
35
+ - **Directly written by domain experts or stakeholders**
36
+ - **Part of approved or finalized documentation**
37
+ - **Recently modified or reviewed**, if recency matters
38
+ - Be cautious with:
39
+ - Outdated drafts
40
+ - Undocumented opinions or partial records
41
+
42
+ #### 3. Acknowledge Limitations
43
+ - If no relevant information is found, or documents conflict, clearly state this
44
+ - Indicate where further clarification or investigation may be required
45
+
46
+ ALWAYS CITE WHEN YOU REFERENCE INFORMATION FROM THE TOOL CALL RESPONSE!!!