unique_toolkit 0.7.9__py3-none-any.whl → 1.33.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- unique_toolkit/__init__.py +36 -3
- unique_toolkit/_common/api_calling/human_verification_manager.py +357 -0
- unique_toolkit/_common/base_model_type_attribute.py +303 -0
- unique_toolkit/_common/chunk_relevancy_sorter/config.py +49 -0
- unique_toolkit/_common/chunk_relevancy_sorter/exception.py +5 -0
- unique_toolkit/_common/chunk_relevancy_sorter/schemas.py +46 -0
- unique_toolkit/_common/chunk_relevancy_sorter/service.py +374 -0
- unique_toolkit/_common/chunk_relevancy_sorter/tests/test_service.py +275 -0
- unique_toolkit/_common/default_language_model.py +12 -0
- unique_toolkit/_common/docx_generator/__init__.py +7 -0
- unique_toolkit/_common/docx_generator/config.py +12 -0
- unique_toolkit/_common/docx_generator/schemas.py +80 -0
- unique_toolkit/_common/docx_generator/service.py +225 -0
- unique_toolkit/_common/docx_generator/template/Doc Template.docx +0 -0
- unique_toolkit/_common/endpoint_builder.py +368 -0
- unique_toolkit/_common/endpoint_requestor.py +480 -0
- unique_toolkit/_common/exception.py +24 -0
- unique_toolkit/_common/experimental/endpoint_builder.py +368 -0
- unique_toolkit/_common/experimental/endpoint_requestor.py +488 -0
- unique_toolkit/_common/feature_flags/schema.py +9 -0
- unique_toolkit/_common/pydantic/rjsf_tags.py +936 -0
- unique_toolkit/_common/pydantic_helpers.py +174 -0
- unique_toolkit/_common/referencing.py +53 -0
- unique_toolkit/_common/string_utilities.py +140 -0
- unique_toolkit/_common/tests/test_referencing.py +521 -0
- unique_toolkit/_common/tests/test_string_utilities.py +506 -0
- unique_toolkit/_common/token/image_token_counting.py +67 -0
- unique_toolkit/_common/token/token_counting.py +204 -0
- unique_toolkit/_common/utils/__init__.py +1 -0
- unique_toolkit/_common/utils/files.py +43 -0
- unique_toolkit/_common/utils/image/encode.py +25 -0
- unique_toolkit/_common/utils/jinja/helpers.py +10 -0
- unique_toolkit/_common/utils/jinja/render.py +18 -0
- unique_toolkit/_common/utils/jinja/schema.py +65 -0
- unique_toolkit/_common/utils/jinja/utils.py +80 -0
- unique_toolkit/_common/utils/structured_output/__init__.py +1 -0
- unique_toolkit/_common/utils/structured_output/schema.py +5 -0
- unique_toolkit/_common/utils/write_configuration.py +51 -0
- unique_toolkit/_common/validators.py +101 -4
- unique_toolkit/agentic/__init__.py +1 -0
- unique_toolkit/agentic/debug_info_manager/debug_info_manager.py +28 -0
- unique_toolkit/agentic/debug_info_manager/test/test_debug_info_manager.py +278 -0
- unique_toolkit/agentic/evaluation/config.py +36 -0
- unique_toolkit/{evaluators → agentic/evaluation}/context_relevancy/prompts.py +25 -0
- unique_toolkit/agentic/evaluation/context_relevancy/schema.py +80 -0
- unique_toolkit/agentic/evaluation/context_relevancy/service.py +273 -0
- unique_toolkit/agentic/evaluation/evaluation_manager.py +218 -0
- unique_toolkit/agentic/evaluation/hallucination/constants.py +61 -0
- unique_toolkit/agentic/evaluation/hallucination/hallucination_evaluation.py +112 -0
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/prompts.py +1 -1
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/service.py +20 -16
- unique_toolkit/{evaluators → agentic/evaluation}/hallucination/utils.py +32 -21
- unique_toolkit/{evaluators → agentic/evaluation}/output_parser.py +20 -2
- unique_toolkit/{evaluators → agentic/evaluation}/schemas.py +27 -7
- unique_toolkit/agentic/evaluation/tests/test_context_relevancy_service.py +253 -0
- unique_toolkit/agentic/evaluation/tests/test_output_parser.py +87 -0
- unique_toolkit/agentic/history_manager/history_construction_with_contents.py +298 -0
- unique_toolkit/agentic/history_manager/history_manager.py +241 -0
- unique_toolkit/agentic/history_manager/loop_token_reducer.py +484 -0
- unique_toolkit/agentic/history_manager/utils.py +96 -0
- unique_toolkit/agentic/message_log_manager/__init__.py +5 -0
- unique_toolkit/agentic/message_log_manager/service.py +93 -0
- unique_toolkit/agentic/postprocessor/postprocessor_manager.py +212 -0
- unique_toolkit/agentic/reference_manager/reference_manager.py +103 -0
- unique_toolkit/agentic/responses_api/__init__.py +19 -0
- unique_toolkit/agentic/responses_api/postprocessors/code_display.py +71 -0
- unique_toolkit/agentic/responses_api/postprocessors/generated_files.py +297 -0
- unique_toolkit/agentic/responses_api/stream_handler.py +15 -0
- unique_toolkit/agentic/short_term_memory_manager/persistent_short_term_memory_manager.py +141 -0
- unique_toolkit/agentic/thinking_manager/thinking_manager.py +103 -0
- unique_toolkit/agentic/tools/__init__.py +1 -0
- unique_toolkit/agentic/tools/a2a/__init__.py +36 -0
- unique_toolkit/agentic/tools/a2a/config.py +17 -0
- unique_toolkit/agentic/tools/a2a/evaluation/__init__.py +15 -0
- unique_toolkit/agentic/tools/a2a/evaluation/_utils.py +66 -0
- unique_toolkit/agentic/tools/a2a/evaluation/config.py +55 -0
- unique_toolkit/agentic/tools/a2a/evaluation/evaluator.py +260 -0
- unique_toolkit/agentic/tools/a2a/evaluation/summarization_user_message.j2 +9 -0
- unique_toolkit/agentic/tools/a2a/manager.py +55 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/__init__.py +21 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_display_utils.py +240 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/_ref_utils.py +84 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/config.py +78 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/display.py +264 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/references.py +101 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display.py +421 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_display_utils.py +2103 -0
- unique_toolkit/agentic/tools/a2a/postprocessing/test/test_ref_utils.py +603 -0
- unique_toolkit/agentic/tools/a2a/prompts.py +46 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/__init__.py +6 -0
- unique_toolkit/agentic/tools/a2a/response_watcher/service.py +91 -0
- unique_toolkit/agentic/tools/a2a/tool/__init__.py +4 -0
- unique_toolkit/agentic/tools/a2a/tool/_memory.py +26 -0
- unique_toolkit/agentic/tools/a2a/tool/_schema.py +9 -0
- unique_toolkit/agentic/tools/a2a/tool/config.py +158 -0
- unique_toolkit/agentic/tools/a2a/tool/service.py +393 -0
- unique_toolkit/agentic/tools/agent_chunks_hanlder.py +65 -0
- unique_toolkit/agentic/tools/config.py +128 -0
- unique_toolkit/agentic/tools/factory.py +44 -0
- unique_toolkit/agentic/tools/mcp/__init__.py +4 -0
- unique_toolkit/agentic/tools/mcp/manager.py +71 -0
- unique_toolkit/agentic/tools/mcp/models.py +28 -0
- unique_toolkit/agentic/tools/mcp/tool_wrapper.py +234 -0
- unique_toolkit/agentic/tools/openai_builtin/__init__.py +11 -0
- unique_toolkit/agentic/tools/openai_builtin/base.py +46 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/__init__.py +8 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/config.py +88 -0
- unique_toolkit/agentic/tools/openai_builtin/code_interpreter/service.py +250 -0
- unique_toolkit/agentic/tools/openai_builtin/manager.py +79 -0
- unique_toolkit/agentic/tools/schemas.py +145 -0
- unique_toolkit/agentic/tools/test/test_mcp_manager.py +536 -0
- unique_toolkit/agentic/tools/test/test_tool_progress_reporter.py +445 -0
- unique_toolkit/agentic/tools/tool.py +187 -0
- unique_toolkit/agentic/tools/tool_manager.py +492 -0
- unique_toolkit/agentic/tools/tool_progress_reporter.py +285 -0
- unique_toolkit/agentic/tools/utils/__init__.py +19 -0
- unique_toolkit/agentic/tools/utils/execution/__init__.py +1 -0
- unique_toolkit/agentic/tools/utils/execution/execution.py +286 -0
- unique_toolkit/agentic/tools/utils/source_handling/__init__.py +0 -0
- unique_toolkit/agentic/tools/utils/source_handling/schema.py +21 -0
- unique_toolkit/agentic/tools/utils/source_handling/source_formatting.py +207 -0
- unique_toolkit/agentic/tools/utils/source_handling/tests/test_source_formatting.py +216 -0
- unique_toolkit/app/__init__.py +9 -0
- unique_toolkit/app/dev_util.py +180 -0
- unique_toolkit/app/fast_api_factory.py +131 -0
- unique_toolkit/app/init_sdk.py +32 -1
- unique_toolkit/app/schemas.py +206 -31
- unique_toolkit/app/unique_settings.py +367 -0
- unique_toolkit/app/webhook.py +77 -0
- unique_toolkit/chat/__init__.py +8 -1
- unique_toolkit/chat/deprecated/service.py +232 -0
- unique_toolkit/chat/functions.py +648 -78
- unique_toolkit/chat/rendering.py +34 -0
- unique_toolkit/chat/responses_api.py +461 -0
- unique_toolkit/chat/schemas.py +134 -2
- unique_toolkit/chat/service.py +115 -767
- unique_toolkit/content/functions.py +353 -8
- unique_toolkit/content/schemas.py +128 -15
- unique_toolkit/content/service.py +321 -45
- unique_toolkit/content/smart_rules.py +301 -0
- unique_toolkit/content/utils.py +10 -3
- unique_toolkit/data_extraction/README.md +96 -0
- unique_toolkit/data_extraction/__init__.py +11 -0
- unique_toolkit/data_extraction/augmented/__init__.py +5 -0
- unique_toolkit/data_extraction/augmented/service.py +93 -0
- unique_toolkit/data_extraction/base.py +25 -0
- unique_toolkit/data_extraction/basic/__init__.py +11 -0
- unique_toolkit/data_extraction/basic/config.py +18 -0
- unique_toolkit/data_extraction/basic/prompt.py +13 -0
- unique_toolkit/data_extraction/basic/service.py +55 -0
- unique_toolkit/embedding/service.py +103 -12
- unique_toolkit/framework_utilities/__init__.py +1 -0
- unique_toolkit/framework_utilities/langchain/__init__.py +10 -0
- unique_toolkit/framework_utilities/langchain/client.py +71 -0
- unique_toolkit/framework_utilities/langchain/history.py +19 -0
- unique_toolkit/framework_utilities/openai/__init__.py +6 -0
- unique_toolkit/framework_utilities/openai/client.py +84 -0
- unique_toolkit/framework_utilities/openai/message_builder.py +229 -0
- unique_toolkit/framework_utilities/utils.py +23 -0
- unique_toolkit/language_model/__init__.py +3 -0
- unique_toolkit/language_model/_responses_api_utils.py +93 -0
- unique_toolkit/language_model/builder.py +27 -11
- unique_toolkit/language_model/default_language_model.py +3 -0
- unique_toolkit/language_model/functions.py +345 -43
- unique_toolkit/language_model/infos.py +1288 -46
- unique_toolkit/language_model/reference.py +242 -0
- unique_toolkit/language_model/schemas.py +481 -49
- unique_toolkit/language_model/service.py +229 -28
- unique_toolkit/protocols/support.py +145 -0
- unique_toolkit/services/__init__.py +7 -0
- unique_toolkit/services/chat_service.py +1631 -0
- unique_toolkit/services/knowledge_base.py +1094 -0
- unique_toolkit/short_term_memory/service.py +178 -41
- unique_toolkit/smart_rules/__init__.py +0 -0
- unique_toolkit/smart_rules/compile.py +56 -0
- unique_toolkit/test_utilities/events.py +197 -0
- unique_toolkit-1.33.3.dist-info/METADATA +1145 -0
- unique_toolkit-1.33.3.dist-info/RECORD +205 -0
- unique_toolkit/evaluators/__init__.py +0 -1
- unique_toolkit/evaluators/config.py +0 -35
- unique_toolkit/evaluators/constants.py +0 -1
- unique_toolkit/evaluators/context_relevancy/constants.py +0 -32
- unique_toolkit/evaluators/context_relevancy/service.py +0 -53
- unique_toolkit/evaluators/context_relevancy/utils.py +0 -142
- unique_toolkit/evaluators/hallucination/constants.py +0 -41
- unique_toolkit-0.7.9.dist-info/METADATA +0 -413
- unique_toolkit-0.7.9.dist-info/RECORD +0 -64
- /unique_toolkit/{evaluators → agentic/evaluation}/exception.py +0 -0
- {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/LICENSE +0 -0
- {unique_toolkit-0.7.9.dist-info → unique_toolkit-1.33.3.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
from pydantic import Field
|
|
2
|
+
from pydantic.json_schema import SkipJsonSchema
|
|
3
|
+
|
|
4
|
+
from unique_toolkit.agentic.tools.factory import ToolFactory
|
|
5
|
+
from unique_toolkit.agentic.tools.openai_builtin.base import (
|
|
6
|
+
OpenAIBuiltInToolName,
|
|
7
|
+
)
|
|
8
|
+
from unique_toolkit.agentic.tools.schemas import BaseToolConfig
|
|
9
|
+
|
|
10
|
+
DEFAULT_TOOL_DESCRIPTION = "Use this tool to run python code, e.g to generate plots, process excel files, perform calculations, etc."
|
|
11
|
+
|
|
12
|
+
DEFAULT_TOOL_DESCRIPTION_FOR_SYSTEM_PROMPT = """
|
|
13
|
+
Use this tool to run python code, e.g to generate plots, process excel files, perform calculations, etc.
|
|
14
|
+
Instructions:
|
|
15
|
+
- All files uploaded to the chat are available in the code interpreter under the path `/mnt/data/<filename>
|
|
16
|
+
- All files generated through code should be saved in the `/mnt/data` folder
|
|
17
|
+
|
|
18
|
+
CRUCIAL Instructions for displaying images and files in the chat:
|
|
19
|
+
Once files are generated in the `/mnt/data` folder you MUST reference them in the chat using markdown syntax in order to display them in the chat.
|
|
20
|
+
|
|
21
|
+
WHENEVER you reference a generated file, you MUST use the following format:
|
|
22
|
+
```
|
|
23
|
+
**Descriptive Title of Graph/Chart/File** (<- linebreak is important) (You must choose a good user friendly title, Other markdown syntax such as `#` can be used too)
|
|
24
|
+
[*Generating your {Graph/Chart/File}…*](sandbox:/mnt/data/<filename>)
|
|
25
|
+
```
|
|
26
|
+
IMPORTANT: Do NOT append a leading `!` even when displaying an image.
|
|
27
|
+
Always use a line break between the title and the markdown!
|
|
28
|
+
|
|
29
|
+
- Files with image file extensions are displayed directly in the chat, while other file extensions are shown as download links.
|
|
30
|
+
- Not using syntax above will FAIL to show images to the user.
|
|
31
|
+
- YOU MUST use the syntax above to display images, otherwise the image will not be displayed in the chat.
|
|
32
|
+
- Only the following file types are allowed to be uploaded to the platform, anything else will FAIL: PDF, DOCX, XLSX, PPTX, CSV, HTML, MD, TXT, PNG, JPG, JPEG.
|
|
33
|
+
|
|
34
|
+
You MUST always use this syntax, otherwise the files will not be displayed in the chat.
|
|
35
|
+
""".strip()
|
|
36
|
+
|
|
37
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_SYSTEM_PROMPT = ""
|
|
38
|
+
|
|
39
|
+
DEFAULT_TOOL_FORMAT_INFORMATION_FOR_USER_PROMPT = ""
|
|
40
|
+
|
|
41
|
+
DEFAULT_TOOL_DESCRIPTION_FOR_USER_PROMPT = ""
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class OpenAICodeInterpreterConfig(BaseToolConfig):
|
|
45
|
+
upload_files_in_chat_to_container: bool = Field(
|
|
46
|
+
default=True,
|
|
47
|
+
description="If set, the files uploaded to the chat will be uploaded to the container where code is executed.",
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
tool_description: str = Field(
|
|
51
|
+
default=DEFAULT_TOOL_DESCRIPTION,
|
|
52
|
+
description="The description of the tool that will be sent to the model.",
|
|
53
|
+
)
|
|
54
|
+
tool_description_for_system_prompt: str = Field(
|
|
55
|
+
default=DEFAULT_TOOL_DESCRIPTION_FOR_SYSTEM_PROMPT,
|
|
56
|
+
description="The description of the tool that will be included in the system prompt.",
|
|
57
|
+
)
|
|
58
|
+
tool_format_information_for_system_prompt: SkipJsonSchema[str] = Field(
|
|
59
|
+
# Since the tool is executed in Azure, it's not always possible to have dynamic format information
|
|
60
|
+
default=DEFAULT_TOOL_FORMAT_INFORMATION_FOR_SYSTEM_PROMPT,
|
|
61
|
+
description="The format information of the tool that will be included in the system prompt.",
|
|
62
|
+
)
|
|
63
|
+
tool_description_for_user_prompt: SkipJsonSchema[str] = (
|
|
64
|
+
Field( # At the moment, this is not appended to the user prompt
|
|
65
|
+
default=DEFAULT_TOOL_DESCRIPTION_FOR_USER_PROMPT,
|
|
66
|
+
description="The description of the tool that will be included in the user prompt.",
|
|
67
|
+
)
|
|
68
|
+
)
|
|
69
|
+
tool_format_information_for_user_prompt: SkipJsonSchema[str] = (
|
|
70
|
+
Field( # At the moment, this is not appended to the user prompt
|
|
71
|
+
default=DEFAULT_TOOL_FORMAT_INFORMATION_FOR_USER_PROMPT,
|
|
72
|
+
description="The format information of the tool that will be included in the user prompt.",
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
expires_after_minutes: int = Field(
|
|
77
|
+
default=20,
|
|
78
|
+
description="The number of minutes after which the container will be deleted.",
|
|
79
|
+
)
|
|
80
|
+
use_auto_container: bool = Field(
|
|
81
|
+
default=False,
|
|
82
|
+
description="If set, use the `auto` container setting from OpenAI. Note that this will recreate the container on each call.",
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
ToolFactory.register_tool_config(
|
|
87
|
+
OpenAIBuiltInToolName.CODE_INTERPRETER, OpenAICodeInterpreterConfig
|
|
88
|
+
)
|
|
@@ -0,0 +1,250 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import override
|
|
3
|
+
|
|
4
|
+
from openai import AsyncOpenAI, BaseModel, NotFoundError
|
|
5
|
+
from openai.types.responses.tool_param import CodeInterpreter
|
|
6
|
+
|
|
7
|
+
from unique_toolkit import ContentService, ShortTermMemoryService
|
|
8
|
+
from unique_toolkit.agentic.short_term_memory_manager.persistent_short_term_memory_manager import (
|
|
9
|
+
PersistentShortMemoryManager,
|
|
10
|
+
)
|
|
11
|
+
from unique_toolkit.agentic.tools.openai_builtin.base import (
|
|
12
|
+
OpenAIBuiltInTool,
|
|
13
|
+
OpenAIBuiltInToolName,
|
|
14
|
+
)
|
|
15
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter.config import (
|
|
16
|
+
OpenAICodeInterpreterConfig,
|
|
17
|
+
)
|
|
18
|
+
from unique_toolkit.agentic.tools.schemas import ToolPrompts
|
|
19
|
+
from unique_toolkit.content.schemas import (
|
|
20
|
+
Content,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
_SHORT_TERM_MEMORY_NAME = "container_code_execution"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class CodeExecutionShortTermMemorySchema(BaseModel):
|
|
30
|
+
container_id: str | None = None
|
|
31
|
+
file_ids: dict[str, str] = {} # Mapping of unique file id to openai file id
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
CodeExecutionMemoryManager = PersistentShortMemoryManager[
|
|
35
|
+
CodeExecutionShortTermMemorySchema
|
|
36
|
+
]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _get_container_code_execution_short_term_memory_manager(
|
|
40
|
+
company_id: str, user_id: str, chat_id: str
|
|
41
|
+
) -> CodeExecutionMemoryManager:
|
|
42
|
+
short_term_memory_service = ShortTermMemoryService(
|
|
43
|
+
company_id=company_id,
|
|
44
|
+
user_id=user_id,
|
|
45
|
+
chat_id=chat_id,
|
|
46
|
+
message_id=None,
|
|
47
|
+
)
|
|
48
|
+
short_term_memory_manager = PersistentShortMemoryManager(
|
|
49
|
+
short_term_memory_service=short_term_memory_service,
|
|
50
|
+
short_term_memory_schema=CodeExecutionShortTermMemorySchema,
|
|
51
|
+
short_term_memory_name=_SHORT_TERM_MEMORY_NAME,
|
|
52
|
+
)
|
|
53
|
+
return short_term_memory_manager
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
async def _create_container_if_not_exists(
|
|
57
|
+
client: AsyncOpenAI,
|
|
58
|
+
chat_id: str,
|
|
59
|
+
user_id: str,
|
|
60
|
+
company_id: str,
|
|
61
|
+
expires_after_minutes: int,
|
|
62
|
+
memory: CodeExecutionShortTermMemorySchema | None = None,
|
|
63
|
+
) -> CodeExecutionShortTermMemorySchema:
|
|
64
|
+
if memory is not None:
|
|
65
|
+
logger.info("Container found in short term memory")
|
|
66
|
+
else:
|
|
67
|
+
logger.info("No Container in short term memory, creating a new container")
|
|
68
|
+
memory = CodeExecutionShortTermMemorySchema()
|
|
69
|
+
|
|
70
|
+
container_id = memory.container_id
|
|
71
|
+
|
|
72
|
+
if container_id is not None:
|
|
73
|
+
try:
|
|
74
|
+
container = await client.containers.retrieve(container_id)
|
|
75
|
+
if container.status not in ["active", "running"]:
|
|
76
|
+
logger.info(
|
|
77
|
+
"Container has status `%s`, recreating a new one", container.status
|
|
78
|
+
)
|
|
79
|
+
container_id = None
|
|
80
|
+
except NotFoundError:
|
|
81
|
+
container_id = None
|
|
82
|
+
|
|
83
|
+
if container_id is None:
|
|
84
|
+
memory = CodeExecutionShortTermMemorySchema()
|
|
85
|
+
|
|
86
|
+
container = await client.containers.create(
|
|
87
|
+
name=f"code_execution_{company_id}_{user_id}_{chat_id}",
|
|
88
|
+
expires_after={
|
|
89
|
+
"anchor": "last_active_at",
|
|
90
|
+
"minutes": expires_after_minutes,
|
|
91
|
+
},
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
memory.container_id = container.id
|
|
95
|
+
|
|
96
|
+
return memory
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
async def _upload_files_to_container(
|
|
100
|
+
client: AsyncOpenAI,
|
|
101
|
+
uploaded_files: list[Content],
|
|
102
|
+
memory: CodeExecutionShortTermMemorySchema,
|
|
103
|
+
content_service: ContentService,
|
|
104
|
+
chat_id: str,
|
|
105
|
+
) -> CodeExecutionShortTermMemorySchema:
|
|
106
|
+
container_id = memory.container_id
|
|
107
|
+
|
|
108
|
+
assert container_id is not None
|
|
109
|
+
|
|
110
|
+
memory = memory.model_copy(deep=True)
|
|
111
|
+
|
|
112
|
+
for file in uploaded_files:
|
|
113
|
+
upload = True
|
|
114
|
+
if file.id in memory.file_ids:
|
|
115
|
+
try:
|
|
116
|
+
_ = await client.containers.files.retrieve(
|
|
117
|
+
container_id=container_id, file_id=memory.file_ids[file.id]
|
|
118
|
+
)
|
|
119
|
+
logger.info("File with id %s already uploaded to container", file.id)
|
|
120
|
+
upload = False
|
|
121
|
+
except NotFoundError:
|
|
122
|
+
upload = True
|
|
123
|
+
|
|
124
|
+
if upload:
|
|
125
|
+
logger.info(
|
|
126
|
+
"Uploding file %s to container %s", file.id, memory.container_id
|
|
127
|
+
)
|
|
128
|
+
file_content = content_service.download_content_to_bytes(
|
|
129
|
+
content_id=file.id, chat_id=chat_id
|
|
130
|
+
) # TODO: Use async version when available
|
|
131
|
+
|
|
132
|
+
openai_file = await client.containers.files.create(
|
|
133
|
+
container_id=container_id,
|
|
134
|
+
file=(file.key, file_content),
|
|
135
|
+
)
|
|
136
|
+
memory.file_ids[file.id] = openai_file.id
|
|
137
|
+
|
|
138
|
+
return memory
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class OpenAICodeInterpreterTool(OpenAIBuiltInTool[CodeInterpreter]):
|
|
142
|
+
DISPLAY_NAME = "Code Interpreter"
|
|
143
|
+
|
|
144
|
+
def __init__(
|
|
145
|
+
self,
|
|
146
|
+
config: OpenAICodeInterpreterConfig,
|
|
147
|
+
container_id: str | None,
|
|
148
|
+
is_exclusive: bool = False,
|
|
149
|
+
) -> None:
|
|
150
|
+
self._config = config
|
|
151
|
+
|
|
152
|
+
if not config.use_auto_container and container_id is None:
|
|
153
|
+
raise ValueError("`container_id` required when not using `auto` containers")
|
|
154
|
+
|
|
155
|
+
self._container_id = container_id
|
|
156
|
+
self._is_exclusive = is_exclusive
|
|
157
|
+
|
|
158
|
+
@property
|
|
159
|
+
@override
|
|
160
|
+
def name(self) -> OpenAIBuiltInToolName:
|
|
161
|
+
return OpenAIBuiltInToolName.CODE_INTERPRETER
|
|
162
|
+
|
|
163
|
+
@override
|
|
164
|
+
def tool_description(self) -> CodeInterpreter:
|
|
165
|
+
if self._config.use_auto_container:
|
|
166
|
+
return {"container": {"type": "auto"}, "type": "code_interpreter"}
|
|
167
|
+
|
|
168
|
+
return {
|
|
169
|
+
"container": self._container_id, # type: ignore
|
|
170
|
+
"type": "code_interpreter",
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
@override
|
|
174
|
+
def is_enabled(self) -> bool:
|
|
175
|
+
return True
|
|
176
|
+
|
|
177
|
+
@override
|
|
178
|
+
def takes_control(self) -> bool:
|
|
179
|
+
return False
|
|
180
|
+
|
|
181
|
+
@override
|
|
182
|
+
def is_exclusive(self) -> bool:
|
|
183
|
+
return self._is_exclusive
|
|
184
|
+
|
|
185
|
+
@classmethod
|
|
186
|
+
async def build_tool(
|
|
187
|
+
cls,
|
|
188
|
+
config: OpenAICodeInterpreterConfig,
|
|
189
|
+
uploaded_files: list[Content],
|
|
190
|
+
client: AsyncOpenAI,
|
|
191
|
+
content_service: ContentService,
|
|
192
|
+
company_id: str,
|
|
193
|
+
user_id: str,
|
|
194
|
+
chat_id: str,
|
|
195
|
+
is_exclusive: bool = False,
|
|
196
|
+
) -> "OpenAICodeInterpreterTool":
|
|
197
|
+
if config.use_auto_container:
|
|
198
|
+
logger.info("Using `auto` container setting")
|
|
199
|
+
return cls(config=config, container_id=None)
|
|
200
|
+
|
|
201
|
+
memory_manager = _get_container_code_execution_short_term_memory_manager(
|
|
202
|
+
company_id=company_id,
|
|
203
|
+
user_id=user_id,
|
|
204
|
+
chat_id=chat_id,
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
memory = await memory_manager.load_async()
|
|
208
|
+
|
|
209
|
+
memory = await _create_container_if_not_exists(
|
|
210
|
+
client=client,
|
|
211
|
+
memory=memory,
|
|
212
|
+
chat_id=chat_id,
|
|
213
|
+
user_id=user_id,
|
|
214
|
+
company_id=company_id,
|
|
215
|
+
expires_after_minutes=config.expires_after_minutes,
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
if config.upload_files_in_chat_to_container:
|
|
219
|
+
memory = await _upload_files_to_container(
|
|
220
|
+
client=client,
|
|
221
|
+
uploaded_files=uploaded_files,
|
|
222
|
+
content_service=content_service,
|
|
223
|
+
chat_id=chat_id,
|
|
224
|
+
memory=memory,
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
await memory_manager.save_async(memory)
|
|
228
|
+
|
|
229
|
+
assert memory.container_id is not None
|
|
230
|
+
|
|
231
|
+
return OpenAICodeInterpreterTool(
|
|
232
|
+
config=config, container_id=memory.container_id, is_exclusive=is_exclusive
|
|
233
|
+
)
|
|
234
|
+
|
|
235
|
+
@override
|
|
236
|
+
def get_tool_prompts(self) -> ToolPrompts:
|
|
237
|
+
return ToolPrompts(
|
|
238
|
+
name="the python tool", # https://platform.openai.com/docs/guides/tools-code-interpreter
|
|
239
|
+
display_name=self.DISPLAY_NAME,
|
|
240
|
+
tool_description=self._config.tool_description,
|
|
241
|
+
tool_system_prompt=self._config.tool_description_for_system_prompt,
|
|
242
|
+
tool_format_information_for_system_prompt=self._config.tool_format_information_for_system_prompt,
|
|
243
|
+
tool_user_prompt=self._config.tool_description_for_user_prompt,
|
|
244
|
+
tool_format_information_for_user_prompt=self._config.tool_format_information_for_user_prompt,
|
|
245
|
+
input_model={},
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
@override
|
|
249
|
+
def display_name(self) -> str:
|
|
250
|
+
return self.DISPLAY_NAME
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
from openai import AsyncOpenAI
|
|
2
|
+
|
|
3
|
+
from unique_toolkit.agentic.tools.config import ToolBuildConfig
|
|
4
|
+
from unique_toolkit.agentic.tools.openai_builtin.base import (
|
|
5
|
+
OpenAIBuiltInTool,
|
|
6
|
+
OpenAIBuiltInToolName,
|
|
7
|
+
)
|
|
8
|
+
from unique_toolkit.agentic.tools.openai_builtin.code_interpreter import (
|
|
9
|
+
OpenAICodeInterpreterConfig,
|
|
10
|
+
OpenAICodeInterpreterTool,
|
|
11
|
+
)
|
|
12
|
+
from unique_toolkit.content.schemas import Content
|
|
13
|
+
from unique_toolkit.content.service import ContentService
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class OpenAIBuiltInToolManager:
|
|
17
|
+
def __init__(
|
|
18
|
+
self,
|
|
19
|
+
builtin_tools: list[OpenAIBuiltInTool],
|
|
20
|
+
):
|
|
21
|
+
self._builtin_tools = builtin_tools
|
|
22
|
+
|
|
23
|
+
@classmethod
|
|
24
|
+
async def _build_tool(
|
|
25
|
+
cls,
|
|
26
|
+
uploaded_files: list[Content],
|
|
27
|
+
content_service: ContentService,
|
|
28
|
+
user_id: str,
|
|
29
|
+
company_id: str,
|
|
30
|
+
chat_id: str,
|
|
31
|
+
client: AsyncOpenAI,
|
|
32
|
+
tool_config: ToolBuildConfig,
|
|
33
|
+
) -> OpenAIBuiltInTool:
|
|
34
|
+
if tool_config.name == OpenAIBuiltInToolName.CODE_INTERPRETER:
|
|
35
|
+
assert isinstance(tool_config.configuration, OpenAICodeInterpreterConfig)
|
|
36
|
+
tool = await OpenAICodeInterpreterTool.build_tool(
|
|
37
|
+
config=tool_config.configuration,
|
|
38
|
+
uploaded_files=uploaded_files,
|
|
39
|
+
content_service=content_service,
|
|
40
|
+
client=client,
|
|
41
|
+
company_id=company_id,
|
|
42
|
+
user_id=user_id,
|
|
43
|
+
chat_id=chat_id,
|
|
44
|
+
is_exclusive=tool_config.is_exclusive,
|
|
45
|
+
)
|
|
46
|
+
return tool
|
|
47
|
+
else:
|
|
48
|
+
raise ValueError(f"Unknown built-in tool name: {tool_config.name}")
|
|
49
|
+
|
|
50
|
+
@classmethod
|
|
51
|
+
async def build_manager(
|
|
52
|
+
cls,
|
|
53
|
+
uploaded_files: list[Content],
|
|
54
|
+
content_service: ContentService,
|
|
55
|
+
user_id: str,
|
|
56
|
+
company_id: str,
|
|
57
|
+
chat_id: str,
|
|
58
|
+
client: AsyncOpenAI,
|
|
59
|
+
tool_configs: list[ToolBuildConfig],
|
|
60
|
+
) -> "OpenAIBuiltInToolManager":
|
|
61
|
+
builtin_tools = []
|
|
62
|
+
for tool_config in tool_configs:
|
|
63
|
+
if tool_config.name in OpenAIBuiltInToolName and tool_config.is_enabled:
|
|
64
|
+
builtin_tools.append(
|
|
65
|
+
await cls._build_tool(
|
|
66
|
+
uploaded_files,
|
|
67
|
+
content_service,
|
|
68
|
+
user_id,
|
|
69
|
+
company_id,
|
|
70
|
+
chat_id,
|
|
71
|
+
client,
|
|
72
|
+
tool_config,
|
|
73
|
+
)
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
return OpenAIBuiltInToolManager(builtin_tools)
|
|
77
|
+
|
|
78
|
+
def get_all_openai_builtin_tools(self) -> list[OpenAIBuiltInTool]:
|
|
79
|
+
return self._builtin_tools.copy()
|
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
import base64
|
|
2
|
+
import gzip
|
|
3
|
+
import re
|
|
4
|
+
from typing import Any, Optional
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, ConfigDict, Field, field_serializer, field_validator
|
|
7
|
+
|
|
8
|
+
from unique_toolkit._common.pydantic_helpers import get_configuration_dict
|
|
9
|
+
from unique_toolkit.agentic.tools.utils.source_handling.schema import SourceFormatConfig
|
|
10
|
+
from unique_toolkit.content.schemas import ContentChunk
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
# TODO: this needs to be more general as the tools can potentially return anything maybe make a base class and then derive per "type" of tool
|
|
14
|
+
class ToolCallResponse(BaseModel):
|
|
15
|
+
id: str
|
|
16
|
+
name: str
|
|
17
|
+
content: str = ""
|
|
18
|
+
debug_info: Optional[dict] = None # TODO: Make the default {}
|
|
19
|
+
content_chunks: Optional[list[ContentChunk]] = None # TODO: Make the default []
|
|
20
|
+
reasoning_result: Optional[dict] = None # TODO: Make the default {}
|
|
21
|
+
error_message: str = ""
|
|
22
|
+
system_reminder: str = Field(
|
|
23
|
+
default="",
|
|
24
|
+
description="A reminder for the agent to consider when using the tool that will be appended to the tool call response",
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
@property
|
|
28
|
+
def successful(self) -> bool:
|
|
29
|
+
return self.error_message == ""
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class BaseToolConfig(BaseModel):
|
|
33
|
+
model_config = get_configuration_dict()
|
|
34
|
+
# TODO: add a check for the parameters to all be consistent within the tool config
|
|
35
|
+
pass
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class Source(BaseModel):
|
|
39
|
+
"""Represents the sources in the tool call response that the llm will see
|
|
40
|
+
|
|
41
|
+
Args:
|
|
42
|
+
source_number: The number of the source
|
|
43
|
+
content: The content of the source
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
model_config = ConfigDict(
|
|
47
|
+
validate_by_alias=True, serialize_by_alias=True, validate_by_name=True
|
|
48
|
+
)
|
|
49
|
+
|
|
50
|
+
source_number: int | None = Field(
|
|
51
|
+
default=None,
|
|
52
|
+
serialization_alias="[source_number] - Used for citations!",
|
|
53
|
+
validation_alias="[source_number] - Used for citations!",
|
|
54
|
+
)
|
|
55
|
+
content: str = Field(
|
|
56
|
+
serialization_alias="[content] - Content of source",
|
|
57
|
+
validation_alias="[content] - Content of source",
|
|
58
|
+
)
|
|
59
|
+
order: int = Field(
|
|
60
|
+
serialization_alias="[order] - Index in the document!",
|
|
61
|
+
validation_alias="[order] - Index in the document!",
|
|
62
|
+
)
|
|
63
|
+
chunk_id: str | None = Field(
|
|
64
|
+
default=None,
|
|
65
|
+
serialization_alias="[chunk_id] - IGNORE",
|
|
66
|
+
validation_alias="[chunk_id] - IGNORE",
|
|
67
|
+
)
|
|
68
|
+
id: str = Field(
|
|
69
|
+
serialization_alias="[id] - IGNORE",
|
|
70
|
+
validation_alias="[id] - IGNORE",
|
|
71
|
+
)
|
|
72
|
+
key: str | None = Field(
|
|
73
|
+
default=None,
|
|
74
|
+
serialization_alias="[key] - IGNORE",
|
|
75
|
+
validation_alias="[key] - IGNORE",
|
|
76
|
+
)
|
|
77
|
+
metadata: dict[str, str] | str | None = Field(
|
|
78
|
+
default=None,
|
|
79
|
+
serialization_alias="[metadata] - Formatted metadata",
|
|
80
|
+
validation_alias="[metadata] - Formatted metadata",
|
|
81
|
+
)
|
|
82
|
+
url: str | None = Field(
|
|
83
|
+
default=None,
|
|
84
|
+
serialization_alias="[url] - IGNORE",
|
|
85
|
+
validation_alias="[url] - IGNORE",
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
@field_validator("metadata", mode="before")
|
|
89
|
+
def _metadata_str_to_dict(
|
|
90
|
+
cls, v: str | dict[str, str] | None
|
|
91
|
+
) -> dict[str, str] | None:
|
|
92
|
+
"""
|
|
93
|
+
Accept • dict → keep as-is
|
|
94
|
+
• str → parse tag-string back to dict
|
|
95
|
+
"""
|
|
96
|
+
if v is None or isinstance(v, dict):
|
|
97
|
+
return v
|
|
98
|
+
|
|
99
|
+
# v is the rendered string. Build a dict by matching the
|
|
100
|
+
# patterns defined in SourceFormatConfig.sections.
|
|
101
|
+
cfg = SourceFormatConfig() # or inject your app-wide config
|
|
102
|
+
out: dict[str, str] = {}
|
|
103
|
+
for key, tmpl in cfg.sections.items():
|
|
104
|
+
pattern = cfg.template_to_pattern(tmpl)
|
|
105
|
+
m = re.search(pattern, v, flags=re.S)
|
|
106
|
+
if m:
|
|
107
|
+
out[key] = m.group(1).strip()
|
|
108
|
+
|
|
109
|
+
return out if out else v # type: ignore
|
|
110
|
+
|
|
111
|
+
# Compression + Base64 for url to hide it from the LLM
|
|
112
|
+
@field_serializer("url")
|
|
113
|
+
def serialize_url(self, value: str | None) -> str | None:
|
|
114
|
+
if value is None:
|
|
115
|
+
return None
|
|
116
|
+
# Compress then base64 encode
|
|
117
|
+
compressed = gzip.compress(value.encode())
|
|
118
|
+
return base64.b64encode(compressed).decode()
|
|
119
|
+
|
|
120
|
+
@field_validator("url", mode="before")
|
|
121
|
+
@classmethod
|
|
122
|
+
def validate_url(cls, value: Any) -> str | None:
|
|
123
|
+
if value is None or isinstance(value, str) and not value:
|
|
124
|
+
return None
|
|
125
|
+
if isinstance(value, str):
|
|
126
|
+
try:
|
|
127
|
+
# Try to decode base64 then decompress
|
|
128
|
+
decoded_bytes = base64.b64decode(value.encode())
|
|
129
|
+
decompressed = gzip.decompress(decoded_bytes).decode()
|
|
130
|
+
return decompressed
|
|
131
|
+
except Exception:
|
|
132
|
+
# If decoding/decompression fails, assume it's plain text
|
|
133
|
+
return value
|
|
134
|
+
return str(value)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class ToolPrompts(BaseModel):
|
|
138
|
+
name: str
|
|
139
|
+
display_name: str
|
|
140
|
+
tool_system_prompt: str
|
|
141
|
+
tool_format_information_for_system_prompt: str
|
|
142
|
+
tool_user_prompt: str
|
|
143
|
+
tool_format_information_for_user_prompt: str
|
|
144
|
+
tool_description: str
|
|
145
|
+
input_model: dict[str, Any]
|