alita-sdk 0.3.351__py3-none-any.whl → 0.3.499__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3601 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1256 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +64 -8
- alita_sdk/community/inventory/__init__.py +224 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/artifact.py +1 -1
- alita_sdk/runtime/clients/client.py +214 -42
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +373 -0
- alita_sdk/runtime/langchain/assistant.py +118 -30
- alita_sdk/runtime/langchain/constants.py +8 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +41 -12
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +116 -99
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +2 -2
- alita_sdk/runtime/langchain/langraph_agent.py +307 -71
- alita_sdk/runtime/langchain/utils.py +48 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/toolkits/__init__.py +26 -0
- alita_sdk/runtime/toolkits/application.py +9 -2
- alita_sdk/runtime/toolkits/artifact.py +18 -6
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +780 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/tools.py +205 -55
- alita_sdk/runtime/toolkits/vectorstore.py +9 -4
- alita_sdk/runtime/tools/__init__.py +11 -3
- alita_sdk/runtime/tools/application.py +7 -0
- alita_sdk/runtime/tools/artifact.py +225 -12
- alita_sdk/runtime/tools/function.py +95 -5
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +212 -0
- alita_sdk/runtime/tools/llm.py +494 -102
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +4 -4
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +180 -79
- alita_sdk/runtime/tools/vectorstore.py +22 -21
- alita_sdk/runtime/tools/vectorstore_base.py +125 -52
- alita_sdk/runtime/utils/AlitaCallback.py +106 -20
- alita_sdk/runtime/utils/mcp_client.py +465 -0
- alita_sdk/runtime/utils/mcp_oauth.py +244 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +40 -13
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +12 -0
- alita_sdk/tools/__init__.py +77 -33
- alita_sdk/tools/ado/repos/__init__.py +7 -6
- alita_sdk/tools/ado/repos/repos_wrapper.py +11 -11
- alita_sdk/tools/ado/test_plan/__init__.py +7 -7
- alita_sdk/tools/ado/wiki/__init__.py +7 -11
- alita_sdk/tools/ado/wiki/ado_wrapper.py +89 -15
- alita_sdk/tools/ado/work_item/__init__.py +7 -11
- alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
- alita_sdk/tools/advanced_jira_mining/__init__.py +8 -7
- alita_sdk/tools/aws/delta_lake/__init__.py +11 -9
- alita_sdk/tools/azure_ai/search/__init__.py +7 -6
- alita_sdk/tools/base_indexer_toolkit.py +345 -70
- alita_sdk/tools/bitbucket/__init__.py +9 -8
- alita_sdk/tools/bitbucket/api_wrapper.py +50 -6
- alita_sdk/tools/browser/__init__.py +4 -4
- alita_sdk/tools/carrier/__init__.py +4 -6
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +7 -6
- alita_sdk/tools/cloud/azure/__init__.py +7 -6
- alita_sdk/tools/cloud/gcp/__init__.py +7 -6
- alita_sdk/tools/cloud/k8s/__init__.py +7 -6
- alita_sdk/tools/code/linter/__init__.py +7 -7
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +8 -7
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +9 -8
- alita_sdk/tools/confluence/api_wrapper.py +171 -75
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/custom_open_api/__init__.py +9 -4
- alita_sdk/tools/elastic/__init__.py +8 -7
- alita_sdk/tools/elitea_base.py +492 -52
- alita_sdk/tools/figma/__init__.py +7 -7
- alita_sdk/tools/figma/api_wrapper.py +2 -1
- alita_sdk/tools/github/__init__.py +9 -9
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +62 -2
- alita_sdk/tools/gitlab/__init__.py +8 -8
- alita_sdk/tools/gitlab/api_wrapper.py +135 -33
- alita_sdk/tools/gitlab_org/__init__.py +7 -8
- alita_sdk/tools/google/bigquery/__init__.py +11 -12
- alita_sdk/tools/google_places/__init__.py +8 -7
- alita_sdk/tools/jira/__init__.py +9 -7
- alita_sdk/tools/jira/api_wrapper.py +100 -52
- alita_sdk/tools/keycloak/__init__.py +8 -7
- alita_sdk/tools/localgit/local_git.py +56 -54
- alita_sdk/tools/memory/__init__.py +1 -1
- alita_sdk/tools/non_code_indexer_toolkit.py +3 -2
- alita_sdk/tools/ocr/__init__.py +8 -7
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/pandas/__init__.py +8 -7
- alita_sdk/tools/postman/__init__.py +7 -8
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +8 -9
- alita_sdk/tools/qtest/__init__.py +16 -11
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +7 -8
- alita_sdk/tools/report_portal/__init__.py +9 -7
- alita_sdk/tools/salesforce/__init__.py +7 -7
- alita_sdk/tools/servicenow/__init__.py +10 -10
- alita_sdk/tools/sharepoint/__init__.py +7 -6
- alita_sdk/tools/sharepoint/api_wrapper.py +127 -36
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +7 -6
- alita_sdk/tools/sql/__init__.py +8 -7
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +7 -6
- alita_sdk/tools/testrail/__init__.py +8 -9
- alita_sdk/tools/utils/__init__.py +26 -4
- alita_sdk/tools/utils/content_parser.py +88 -60
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +76 -26
- alita_sdk/tools/xray/__init__.py +9 -7
- alita_sdk/tools/zephyr/__init__.py +7 -6
- alita_sdk/tools/zephyr_enterprise/__init__.py +8 -6
- alita_sdk/tools/zephyr_essential/__init__.py +7 -6
- alita_sdk/tools/zephyr_essential/api_wrapper.py +12 -13
- alita_sdk/tools/zephyr_scale/__init__.py +7 -6
- alita_sdk/tools/zephyr_squad/__init__.py +7 -6
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/METADATA +147 -2
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/RECORD +206 -130
- alita_sdk-0.3.499.dist-info/entry_points.txt +2 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Dict, Optional
|
|
3
|
+
from urllib.parse import quote
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
from typing import Any
|
|
7
|
+
from json import dumps
|
|
8
|
+
import chardet
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ApiDetailsRequestError(Exception):
|
|
14
|
+
...
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SandboxArtifact:
|
|
18
|
+
def __init__(self, client: Any, bucket_name: str):
|
|
19
|
+
self.client = client
|
|
20
|
+
self.bucket_name = bucket_name
|
|
21
|
+
if not self.client.bucket_exists(bucket_name):
|
|
22
|
+
self.client.create_bucket(bucket_name)
|
|
23
|
+
|
|
24
|
+
def create(self, artifact_name: str, artifact_data: Any, bucket_name: str = None):
|
|
25
|
+
try:
|
|
26
|
+
if not bucket_name:
|
|
27
|
+
bucket_name = self.bucket_name
|
|
28
|
+
return dumps(self.client.create_artifact(bucket_name, artifact_name, artifact_data))
|
|
29
|
+
except Exception as e:
|
|
30
|
+
logger.error(f'Error: {e}')
|
|
31
|
+
return f'Error: {e}'
|
|
32
|
+
|
|
33
|
+
def get(self,
|
|
34
|
+
artifact_name: str,
|
|
35
|
+
bucket_name: str = None,
|
|
36
|
+
is_capture_image: bool = False,
|
|
37
|
+
page_number: int = None,
|
|
38
|
+
sheet_name: str = None,
|
|
39
|
+
excel_by_sheets: bool = False,
|
|
40
|
+
llm=None):
|
|
41
|
+
if not bucket_name:
|
|
42
|
+
bucket_name = self.bucket_name
|
|
43
|
+
data = self.client.download_artifact(bucket_name, artifact_name)
|
|
44
|
+
if len(data) == 0:
|
|
45
|
+
# empty file might be created
|
|
46
|
+
return ''
|
|
47
|
+
if isinstance(data, dict) and data['error']:
|
|
48
|
+
return f'{data['error']}. {data['content'] if data['content'] else ''}'
|
|
49
|
+
detected = chardet.detect(data)
|
|
50
|
+
return data
|
|
51
|
+
# TODO: add proper handling for binary files (images, pdf, etc.) for sandbox
|
|
52
|
+
# if detected['encoding'] is not None:
|
|
53
|
+
# try:
|
|
54
|
+
# return data.decode(detected['encoding'])
|
|
55
|
+
# except Exception:
|
|
56
|
+
# logger.error('Error while default encoding')
|
|
57
|
+
# return parse_file_content(file_name=artifact_name,
|
|
58
|
+
# file_content=data,
|
|
59
|
+
# is_capture_image=is_capture_image,
|
|
60
|
+
# page_number=page_number,
|
|
61
|
+
# sheet_name=sheet_name,
|
|
62
|
+
# excel_by_sheets=excel_by_sheets,
|
|
63
|
+
# llm=llm)
|
|
64
|
+
# else:
|
|
65
|
+
# return parse_file_content(file_name=artifact_name,
|
|
66
|
+
# file_content=data,
|
|
67
|
+
# is_capture_image=is_capture_image,
|
|
68
|
+
# page_number=page_number,
|
|
69
|
+
# sheet_name=sheet_name,
|
|
70
|
+
# excel_by_sheets=excel_by_sheets,
|
|
71
|
+
# llm=llm)
|
|
72
|
+
|
|
73
|
+
def delete(self, artifact_name: str, bucket_name=None):
|
|
74
|
+
if not bucket_name:
|
|
75
|
+
bucket_name = self.bucket_name
|
|
76
|
+
self.client.delete_artifact(bucket_name, artifact_name)
|
|
77
|
+
|
|
78
|
+
def list(self, bucket_name: str = None, return_as_string=True) -> str | dict:
|
|
79
|
+
if not bucket_name:
|
|
80
|
+
bucket_name = self.bucket_name
|
|
81
|
+
artifacts = self.client.list_artifacts(bucket_name)
|
|
82
|
+
return str(artifacts) if return_as_string else artifacts
|
|
83
|
+
|
|
84
|
+
def append(self, artifact_name: str, additional_data: Any, bucket_name: str = None):
|
|
85
|
+
if not bucket_name:
|
|
86
|
+
bucket_name = self.bucket_name
|
|
87
|
+
data = self.get(artifact_name, bucket_name)
|
|
88
|
+
if data == 'Could not detect encoding':
|
|
89
|
+
return data
|
|
90
|
+
data += f'{additional_data}' if len(data) > 0 else additional_data
|
|
91
|
+
self.client.create_artifact(bucket_name, artifact_name, data)
|
|
92
|
+
return 'Data appended successfully'
|
|
93
|
+
|
|
94
|
+
def overwrite(self, artifact_name: str, new_data: Any, bucket_name: str = None):
|
|
95
|
+
if not bucket_name:
|
|
96
|
+
bucket_name = self.bucket_name
|
|
97
|
+
return self.create(artifact_name, new_data, bucket_name)
|
|
98
|
+
|
|
99
|
+
def get_content_bytes(self,
|
|
100
|
+
artifact_name: str,
|
|
101
|
+
bucket_name: str = None):
|
|
102
|
+
if not bucket_name:
|
|
103
|
+
bucket_name = self.bucket_name
|
|
104
|
+
return self.client.download_artifact(bucket_name, artifact_name)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class SandboxClient:
|
|
108
|
+
def __init__(self,
|
|
109
|
+
base_url: str,
|
|
110
|
+
project_id: int,
|
|
111
|
+
auth_token: str,
|
|
112
|
+
api_extra_headers: Optional[dict] = None,
|
|
113
|
+
configurations: Optional[list] = None,
|
|
114
|
+
**kwargs):
|
|
115
|
+
|
|
116
|
+
self.base_url = base_url.rstrip('/')
|
|
117
|
+
self.api_path = '/api/v1'
|
|
118
|
+
self.llm_path = '/llm/v1'
|
|
119
|
+
self.project_id = project_id
|
|
120
|
+
self.auth_token = auth_token
|
|
121
|
+
self.headers = {
|
|
122
|
+
'Authorization': f'Bearer {auth_token}',
|
|
123
|
+
'X-SECRET': kwargs.get('XSECRET', 'secret')
|
|
124
|
+
}
|
|
125
|
+
if api_extra_headers is not None:
|
|
126
|
+
self.headers.update(api_extra_headers)
|
|
127
|
+
self.predict_url = f'{self.base_url}{self.api_path}/prompt_lib/predict/prompt_lib/{self.project_id}'
|
|
128
|
+
self.prompt_versions = f'{self.base_url}{self.api_path}/prompt_lib/version/prompt_lib/{self.project_id}'
|
|
129
|
+
self.prompts = f'{self.base_url}{self.api_path}/prompt_lib/prompt/prompt_lib/{self.project_id}'
|
|
130
|
+
self.datasources = f'{self.base_url}{self.api_path}/datasources/datasource/prompt_lib/{self.project_id}'
|
|
131
|
+
self.datasources_predict = f'{self.base_url}{self.api_path}/datasources/predict/prompt_lib/{self.project_id}'
|
|
132
|
+
self.datasources_search = f'{self.base_url}{self.api_path}/datasources/search/prompt_lib/{self.project_id}'
|
|
133
|
+
self.app = f'{self.base_url}{self.api_path}/applications/application/prompt_lib/{self.project_id}'
|
|
134
|
+
self.mcp_tools_list = f'{self.base_url}{self.api_path}/mcp_sse/tools_list/{self.project_id}'
|
|
135
|
+
self.mcp_tools_call = f'{self.base_url}{self.api_path}/mcp_sse/tools_call/{self.project_id}'
|
|
136
|
+
self.application_versions = f'{self.base_url}{self.api_path}/applications/version/prompt_lib/{self.project_id}'
|
|
137
|
+
self.list_apps_url = f'{self.base_url}{self.api_path}/applications/applications/prompt_lib/{self.project_id}'
|
|
138
|
+
self.integration_details = f'{self.base_url}{self.api_path}/integrations/integration/{self.project_id}'
|
|
139
|
+
self.secrets_url = f'{self.base_url}{self.api_path}/secrets/secret/{self.project_id}'
|
|
140
|
+
self.artifacts_url = f'{self.base_url}{self.api_path}/artifacts/artifacts/default/{self.project_id}'
|
|
141
|
+
self.artifact_url = f'{self.base_url}{self.api_path}/artifacts/artifact/default/{self.project_id}'
|
|
142
|
+
self.bucket_url = f'{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}'
|
|
143
|
+
self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
|
|
144
|
+
self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
|
|
145
|
+
self.image_generation_url = f'{self.base_url}{self.llm_path}/images/generations'
|
|
146
|
+
self.auth_user_url = f'{self.base_url}{self.api_path}/auth/user'
|
|
147
|
+
self.configurations: list = configurations or []
|
|
148
|
+
self.model_timeout = kwargs.get('model_timeout', 120)
|
|
149
|
+
self.model_image_generation = kwargs.get('model_image_generation')
|
|
150
|
+
|
|
151
|
+
def get_mcp_toolkits(self):
|
|
152
|
+
if user_id := self._get_real_user_id():
|
|
153
|
+
url = f'{self.mcp_tools_list}/{user_id}'
|
|
154
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
155
|
+
return data
|
|
156
|
+
else:
|
|
157
|
+
return []
|
|
158
|
+
|
|
159
|
+
def mcp_tool_call(self, params: dict[str, Any]):
|
|
160
|
+
if user_id := self._get_real_user_id():
|
|
161
|
+
url = f'{self.mcp_tools_call}/{user_id}'
|
|
162
|
+
#
|
|
163
|
+
# This loop iterates over each key-value pair in the arguments dictionary,
|
|
164
|
+
# and if a value is a Pydantic object, it replaces it with its dictionary representation using .dict().
|
|
165
|
+
for arg_name, arg_value in params.get('params', {}).get('arguments', {}).items():
|
|
166
|
+
if isinstance(arg_value, list):
|
|
167
|
+
params['params']['arguments'][arg_name] = [
|
|
168
|
+
item.dict() if hasattr(item, 'dict') and callable(item.dict) else item
|
|
169
|
+
for item in arg_value
|
|
170
|
+
]
|
|
171
|
+
elif hasattr(arg_value, 'dict') and callable(arg_value.dict):
|
|
172
|
+
params['params']['arguments'][arg_name] = arg_value.dict()
|
|
173
|
+
#
|
|
174
|
+
response = requests.post(url, headers=self.headers, json=params, verify=False)
|
|
175
|
+
try:
|
|
176
|
+
return response.json()
|
|
177
|
+
except (ValueError, TypeError):
|
|
178
|
+
return response.text
|
|
179
|
+
else:
|
|
180
|
+
return f'Error: Could not determine user ID for MCP tool call'
|
|
181
|
+
|
|
182
|
+
def get_app_details(self, application_id: int):
|
|
183
|
+
url = f'{self.app}/{application_id}'
|
|
184
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
185
|
+
return data
|
|
186
|
+
|
|
187
|
+
def get_list_of_apps(self):
|
|
188
|
+
apps = []
|
|
189
|
+
limit = 10
|
|
190
|
+
offset = 0
|
|
191
|
+
total_count = None
|
|
192
|
+
|
|
193
|
+
while total_count is None or offset < total_count:
|
|
194
|
+
params = {'offset': offset, 'limit': limit}
|
|
195
|
+
resp = requests.get(self.list_apps_url, headers=self.headers, params=params, verify=False)
|
|
196
|
+
|
|
197
|
+
if resp.ok:
|
|
198
|
+
data = resp.json()
|
|
199
|
+
total_count = data.get('total')
|
|
200
|
+
apps.extend([{'name': app['name'], 'id': app['id']} for app in data.get('rows', [])])
|
|
201
|
+
offset += limit
|
|
202
|
+
else:
|
|
203
|
+
break
|
|
204
|
+
|
|
205
|
+
return apps
|
|
206
|
+
|
|
207
|
+
def fetch_available_configurations(self) -> list:
|
|
208
|
+
resp = requests.get(self.configurations_url, headers=self.headers, verify=False)
|
|
209
|
+
if resp.ok:
|
|
210
|
+
return resp.json()
|
|
211
|
+
return []
|
|
212
|
+
|
|
213
|
+
def all_models_and_integrations(self):
|
|
214
|
+
resp = requests.get(self.ai_section_url, headers=self.headers, verify=False)
|
|
215
|
+
if resp.ok:
|
|
216
|
+
return resp.json()
|
|
217
|
+
return []
|
|
218
|
+
|
|
219
|
+
def generate_image(self,
|
|
220
|
+
prompt: str,
|
|
221
|
+
n: int = 1,
|
|
222
|
+
size: str = 'auto',
|
|
223
|
+
quality: str = 'auto',
|
|
224
|
+
response_format: str = 'b64_json',
|
|
225
|
+
style: Optional[str] = None) -> dict:
|
|
226
|
+
|
|
227
|
+
if not self.model_image_generation:
|
|
228
|
+
raise ValueError('Image generation model is not configured for this client')
|
|
229
|
+
|
|
230
|
+
image_generation_data = {
|
|
231
|
+
'prompt': prompt,
|
|
232
|
+
'model': self.model_image_generation,
|
|
233
|
+
'n': n,
|
|
234
|
+
'response_format': response_format,
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
# Only add optional parameters if they have meaningful values
|
|
238
|
+
if size and size.lower() != 'auto':
|
|
239
|
+
image_generation_data['size'] = size
|
|
240
|
+
|
|
241
|
+
if quality and quality.lower() != 'auto':
|
|
242
|
+
image_generation_data['quality'] = quality
|
|
243
|
+
|
|
244
|
+
if style:
|
|
245
|
+
image_generation_data['style'] = style
|
|
246
|
+
|
|
247
|
+
# Standard headers for image generation
|
|
248
|
+
image_headers = self.headers.copy()
|
|
249
|
+
image_headers.update({
|
|
250
|
+
'Content-Type': 'application/json',
|
|
251
|
+
})
|
|
252
|
+
|
|
253
|
+
logger.info(f'Generating image with model: {self.model_image_generation}, prompt: {prompt[:50]}...')
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
response = requests.post(
|
|
257
|
+
self.image_generation_url,
|
|
258
|
+
headers=image_headers,
|
|
259
|
+
json=image_generation_data,
|
|
260
|
+
verify=False,
|
|
261
|
+
timeout=self.model_timeout
|
|
262
|
+
)
|
|
263
|
+
response.raise_for_status()
|
|
264
|
+
return response.json()
|
|
265
|
+
|
|
266
|
+
except requests.exceptions.HTTPError as e:
|
|
267
|
+
logger.error(f'Image generation failed: {e.response.status_code} - {e.response.text}')
|
|
268
|
+
raise
|
|
269
|
+
except requests.exceptions.RequestException as e:
|
|
270
|
+
logger.error(f'Image generation request failed: {e}')
|
|
271
|
+
raise
|
|
272
|
+
|
|
273
|
+
def get_app_version_details(self, application_id: int, application_version_id: int) -> dict:
|
|
274
|
+
url = f'{self.application_versions}/{application_id}/{application_version_id}'
|
|
275
|
+
if self.configurations:
|
|
276
|
+
configs = self.configurations
|
|
277
|
+
else:
|
|
278
|
+
configs = self.fetch_available_configurations()
|
|
279
|
+
|
|
280
|
+
resp = requests.patch(url, headers=self.headers, verify=False, json={'configurations': configs})
|
|
281
|
+
if resp.ok:
|
|
282
|
+
return resp.json()
|
|
283
|
+
logger.error(f'Failed to fetch application version details: {resp.status_code} - {resp.text}.'
|
|
284
|
+
f' Application ID: {application_id}, Version ID: {application_version_id}')
|
|
285
|
+
raise ApiDetailsRequestError(
|
|
286
|
+
f'Failed to fetch application version details for {application_id}/{application_version_id}.')
|
|
287
|
+
|
|
288
|
+
def get_integration_details(self, integration_id: str, format_for_model: bool = False):
|
|
289
|
+
url = f'{self.integration_details}/{integration_id}'
|
|
290
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
291
|
+
return data
|
|
292
|
+
|
|
293
|
+
def unsecret(self, secret_name: str):
|
|
294
|
+
url = f'{self.secrets_url}/{secret_name}'
|
|
295
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
296
|
+
logger.info(f'Unsecret response: {data}')
|
|
297
|
+
return data.get('value', None)
|
|
298
|
+
|
|
299
|
+
def artifact(self, bucket_name):
|
|
300
|
+
return SandboxArtifact(self, bucket_name)
|
|
301
|
+
|
|
302
|
+
def _process_requst(self, data: requests.Response) -> Dict[str, str]:
|
|
303
|
+
if data.status_code == 403:
|
|
304
|
+
return {'error': 'You are not authorized to access this resource'}
|
|
305
|
+
elif data.status_code == 404:
|
|
306
|
+
return {'error': 'Resource not found'}
|
|
307
|
+
elif data.status_code != 200:
|
|
308
|
+
return {
|
|
309
|
+
'error': 'An error occurred while fetching the resource',
|
|
310
|
+
'content': data.text
|
|
311
|
+
}
|
|
312
|
+
else:
|
|
313
|
+
return data.json()
|
|
314
|
+
|
|
315
|
+
def bucket_exists(self, bucket_name):
|
|
316
|
+
try:
|
|
317
|
+
resp = self._process_requst(
|
|
318
|
+
requests.get(f'{self.bucket_url}', headers=self.headers, verify=False)
|
|
319
|
+
)
|
|
320
|
+
for each in resp.get('rows', []):
|
|
321
|
+
if each['name'] == bucket_name:
|
|
322
|
+
return True
|
|
323
|
+
return False
|
|
324
|
+
except:
|
|
325
|
+
return False
|
|
326
|
+
|
|
327
|
+
def create_bucket(self, bucket_name, expiration_measure='months', expiration_value=1):
|
|
328
|
+
post_data = {
|
|
329
|
+
'name': bucket_name,
|
|
330
|
+
'expiration_measure': expiration_measure,
|
|
331
|
+
'expiration_value': expiration_value
|
|
332
|
+
}
|
|
333
|
+
resp = requests.post(f'{self.bucket_url}', headers=self.headers, json=post_data, verify=False)
|
|
334
|
+
return self._process_requst(resp)
|
|
335
|
+
|
|
336
|
+
def list_artifacts(self, bucket_name: str):
|
|
337
|
+
# Ensure bucket name is lowercase as required by the API
|
|
338
|
+
url = f'{self.artifacts_url}/{bucket_name.lower()}'
|
|
339
|
+
data = requests.get(url, headers=self.headers, verify=False)
|
|
340
|
+
return self._process_requst(data)
|
|
341
|
+
|
|
342
|
+
def create_artifact(self, bucket_name, artifact_name, artifact_data):
|
|
343
|
+
url = f'{self.artifacts_url}/{bucket_name.lower()}'
|
|
344
|
+
data = requests.post(url, headers=self.headers, files={
|
|
345
|
+
'file': (artifact_name, artifact_data)
|
|
346
|
+
}, verify=False)
|
|
347
|
+
return self._process_requst(data)
|
|
348
|
+
|
|
349
|
+
def download_artifact(self, bucket_name, artifact_name):
|
|
350
|
+
url = f'{self.artifact_url}/{bucket_name.lower()}/{artifact_name}'
|
|
351
|
+
data = requests.get(url, headers=self.headers, verify=False)
|
|
352
|
+
if data.status_code == 403:
|
|
353
|
+
return {'error': 'You are not authorized to access this resource'}
|
|
354
|
+
elif data.status_code == 404:
|
|
355
|
+
return {'error': 'Resource not found'}
|
|
356
|
+
elif data.status_code != 200:
|
|
357
|
+
return {
|
|
358
|
+
'error': 'An error occurred while fetching the resource',
|
|
359
|
+
'content': data.content
|
|
360
|
+
}
|
|
361
|
+
return data.content
|
|
362
|
+
|
|
363
|
+
def delete_artifact(self, bucket_name, artifact_name):
|
|
364
|
+
url = f'{self.artifact_url}/{bucket_name}'
|
|
365
|
+
data = requests.delete(url, headers=self.headers, verify=False, params={'filename': quote(artifact_name)})
|
|
366
|
+
return self._process_requst(data)
|
|
367
|
+
|
|
368
|
+
def get_user_data(self) -> Dict[str, Any]:
|
|
369
|
+
resp = requests.get(self.auth_user_url, headers=self.headers, verify=False)
|
|
370
|
+
if resp.ok:
|
|
371
|
+
return resp.json()
|
|
372
|
+
logger.error(f'Failed to fetch user data: {resp.status_code} - {resp.text}')
|
|
373
|
+
raise ApiDetailsRequestError(f'Failed to fetch user data with status code {resp.status_code}.')
|
|
@@ -17,6 +17,7 @@ from .constants import REACT_ADDON, REACT_VARS, XML_ADDON
|
|
|
17
17
|
from .chat_message_template import Jinja2TemplatedChatMessagesTemplate
|
|
18
18
|
from ..tools.echo import EchoTool
|
|
19
19
|
from langchain_core.tools import BaseTool, ToolException
|
|
20
|
+
from jinja2 import Environment, DebugUndefined
|
|
20
21
|
|
|
21
22
|
logger = logging.getLogger(__name__)
|
|
22
23
|
|
|
@@ -29,7 +30,10 @@ class Assistant:
|
|
|
29
30
|
app_type: str = "openai",
|
|
30
31
|
tools: Optional[list] = [],
|
|
31
32
|
memory: Optional[Any] = None,
|
|
32
|
-
store: Optional[BaseStore] = None
|
|
33
|
+
store: Optional[BaseStore] = None,
|
|
34
|
+
debug_mode: Optional[bool] = False,
|
|
35
|
+
mcp_tokens: Optional[dict] = None,
|
|
36
|
+
conversation_id: Optional[str] = None):
|
|
33
37
|
|
|
34
38
|
self.app_type = app_type
|
|
35
39
|
self.memory = memory
|
|
@@ -38,7 +42,8 @@ class Assistant:
|
|
|
38
42
|
|
|
39
43
|
logger.debug("Data for agent creation: %s", data)
|
|
40
44
|
logger.info("App type: %s", app_type)
|
|
41
|
-
|
|
45
|
+
|
|
46
|
+
self.alita_client = alita
|
|
42
47
|
self.client = client
|
|
43
48
|
# For predict agents, use the client as-is since it's already configured
|
|
44
49
|
# if app_type == "predict":
|
|
@@ -76,13 +81,71 @@ class Assistant:
|
|
|
76
81
|
else:
|
|
77
82
|
# For predict agents, initialize memory store to None since they don't use memory
|
|
78
83
|
self.store = None
|
|
79
|
-
|
|
84
|
+
|
|
80
85
|
# Lazy import to avoid circular dependency
|
|
81
86
|
from ..toolkits.tools import get_tools
|
|
82
|
-
|
|
83
|
-
|
|
87
|
+
version_tools = data['tools']
|
|
88
|
+
# Handle internal tools
|
|
89
|
+
meta = data.get('meta', {})
|
|
90
|
+
if meta.get("internal_tools"):
|
|
91
|
+
for internal_tool_name in meta.get("internal_tools"):
|
|
92
|
+
version_tools.append({"type": "internal_tool", "name": internal_tool_name})
|
|
93
|
+
|
|
94
|
+
self.tools = get_tools(
|
|
95
|
+
version_tools,
|
|
96
|
+
alita_client=alita,
|
|
97
|
+
llm=self.client,
|
|
98
|
+
memory_store=self.store,
|
|
99
|
+
debug_mode=debug_mode,
|
|
100
|
+
mcp_tokens=mcp_tokens,
|
|
101
|
+
conversation_id=conversation_id
|
|
102
|
+
)
|
|
84
103
|
if tools:
|
|
85
104
|
self.tools += tools
|
|
105
|
+
|
|
106
|
+
# Create ToolRegistry to track tool metadata and handle name collisions
|
|
107
|
+
self.tool_registry = {}
|
|
108
|
+
tool_name_counts = {} # Track how many times each base name appears
|
|
109
|
+
|
|
110
|
+
for tool in self.tools:
|
|
111
|
+
if hasattr(tool, 'name'):
|
|
112
|
+
original_name = tool.name
|
|
113
|
+
base_name = original_name
|
|
114
|
+
|
|
115
|
+
# Extract toolkit metadata from tool configuration
|
|
116
|
+
toolkit_name = ""
|
|
117
|
+
toolkit_type = ""
|
|
118
|
+
|
|
119
|
+
# Find matching tool config to extract metadata
|
|
120
|
+
for tool_config in version_tools:
|
|
121
|
+
# Try to match by toolkit_name or name field
|
|
122
|
+
config_toolkit_name = tool_config.get('toolkit_name', tool_config.get('name', ''))
|
|
123
|
+
# Simple heuristic: toolkit info should be accessible from tool config
|
|
124
|
+
# For now, use toolkit_name and type from config
|
|
125
|
+
toolkit_name = config_toolkit_name
|
|
126
|
+
toolkit_type = tool_config.get('type', '')
|
|
127
|
+
break # Use first match for now; will refine with better matching
|
|
128
|
+
|
|
129
|
+
# Handle duplicate tool names by appending numeric suffix
|
|
130
|
+
if base_name in tool_name_counts:
|
|
131
|
+
tool_name_counts[base_name] += 1
|
|
132
|
+
# Append suffix to make unique
|
|
133
|
+
new_name = f"{base_name}_{tool_name_counts[base_name]}"
|
|
134
|
+
tool.name = new_name
|
|
135
|
+
logger.info(f"Tool name collision detected: '{base_name}' -> '{new_name}'")
|
|
136
|
+
else:
|
|
137
|
+
tool_name_counts[base_name] = 0
|
|
138
|
+
new_name = base_name
|
|
139
|
+
|
|
140
|
+
# Store in registry
|
|
141
|
+
self.tool_registry[tool.name] = {
|
|
142
|
+
'toolkit_name': toolkit_name,
|
|
143
|
+
'toolkit_type': toolkit_type,
|
|
144
|
+
'original_tool_name': base_name
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
logger.info(f"ToolRegistry initialized with {len(self.tool_registry)} tools")
|
|
148
|
+
|
|
86
149
|
# Handle prompt setup
|
|
87
150
|
if app_type in ["pipeline", "predict", "react"]:
|
|
88
151
|
self.prompt = data['instructions']
|
|
@@ -110,13 +173,18 @@ class Assistant:
|
|
|
110
173
|
messages.extend(chat_history)
|
|
111
174
|
self.prompt = Jinja2TemplatedChatMessagesTemplate(messages=messages)
|
|
112
175
|
if input_variables:
|
|
113
|
-
self.prompt.input_variables
|
|
176
|
+
if hasattr(self.prompt, 'input_variables') and self.prompt.input_variables is not None:
|
|
177
|
+
self.prompt.input_variables.extend(input_variables)
|
|
178
|
+
else:
|
|
179
|
+
self.prompt.input_variables = input_variables
|
|
114
180
|
if variables:
|
|
115
181
|
self.prompt.partial_variables = variables
|
|
116
182
|
try:
|
|
117
|
-
logger.info(
|
|
183
|
+
logger.info(
|
|
184
|
+
f"Client was created with client setting: temperature - {self.client._get_model_default_parameters}")
|
|
118
185
|
except Exception as e:
|
|
119
|
-
logger.info(
|
|
186
|
+
logger.info(
|
|
187
|
+
f"Client was created with client setting: temperature - {self.client.temperature} : {self.client.max_tokens}")
|
|
120
188
|
|
|
121
189
|
def _configure_store(self, memory_tool: dict | None) -> None:
|
|
122
190
|
"""
|
|
@@ -133,11 +201,9 @@ class Assistant:
|
|
|
133
201
|
def runnable(self):
|
|
134
202
|
if self.app_type == 'pipeline':
|
|
135
203
|
return self.pipeline()
|
|
136
|
-
elif self.app_type == 'openai':
|
|
137
|
-
return self.getOpenAIToolsAgentExecutor()
|
|
138
204
|
elif self.app_type == 'xml':
|
|
139
205
|
return self.getXMLAgentExecutor()
|
|
140
|
-
elif self.app_type in ['predict', 'react']:
|
|
206
|
+
elif self.app_type in ['predict', 'react', 'openai']:
|
|
141
207
|
return self.getLangGraphReactAgent()
|
|
142
208
|
else:
|
|
143
209
|
self.tools = [EchoTool()] + self.tools
|
|
@@ -155,7 +221,6 @@ class Assistant:
|
|
|
155
221
|
agent = create_json_chat_agent(llm=self.client, tools=simple_tools, prompt=self.prompt)
|
|
156
222
|
return self._agent_executor(agent)
|
|
157
223
|
|
|
158
|
-
|
|
159
224
|
def getXMLAgentExecutor(self):
|
|
160
225
|
# Exclude compiled graph runnables from simple tool agents
|
|
161
226
|
simple_tools = [t for t in self.tools if isinstance(t, (BaseTool, CompiledStateGraph))]
|
|
@@ -176,22 +241,6 @@ class Assistant:
|
|
|
176
241
|
# Exclude compiled graph runnables from simple tool agents
|
|
177
242
|
simple_tools = [t for t in self.tools if isinstance(t, (BaseTool, CompiledStateGraph))]
|
|
178
243
|
|
|
179
|
-
# Add sandbox tool by default for react agents
|
|
180
|
-
try:
|
|
181
|
-
from ..tools.sandbox import create_sandbox_tool
|
|
182
|
-
sandbox_tool = create_sandbox_tool(stateful=False, allow_net=True)
|
|
183
|
-
simple_tools.append(sandbox_tool)
|
|
184
|
-
logger.info("Added PyodideSandboxTool to react agent")
|
|
185
|
-
except ImportError as e:
|
|
186
|
-
logger.warning(f"Failed to add PyodideSandboxTool: {e}. Install langchain-sandbox to enable this feature.")
|
|
187
|
-
except RuntimeError as e:
|
|
188
|
-
if "Deno" in str(e):
|
|
189
|
-
logger.warning("Failed to add PyodideSandboxTool: Deno is required. Install from https://docs.deno.com/runtime/getting_started/installation/")
|
|
190
|
-
else:
|
|
191
|
-
logger.warning(f"Failed to add PyodideSandboxTool: {e}")
|
|
192
|
-
except Exception as e:
|
|
193
|
-
logger.error(f"Error adding PyodideSandboxTool: {e}")
|
|
194
|
-
|
|
195
244
|
# Set up memory/checkpointer if available
|
|
196
245
|
checkpointer = None
|
|
197
246
|
if self.memory is not None:
|
|
@@ -225,6 +274,10 @@ class Assistant:
|
|
|
225
274
|
# Only use prompt_instructions if explicitly specified (for predict app_type)
|
|
226
275
|
if self.app_type == "predict" and isinstance(self.prompt, str):
|
|
227
276
|
prompt_instructions = self.prompt
|
|
277
|
+
|
|
278
|
+
# take the system message from the openai prompt as a prompt instructions
|
|
279
|
+
if self.app_type == "openai" and hasattr(self.prompt, 'messages'):
|
|
280
|
+
prompt_instructions = self.__take_prompt_from_openai_messages()
|
|
228
281
|
|
|
229
282
|
# Create a unified YAML schema with conditional tool binding
|
|
230
283
|
# Build the base node configuration
|
|
@@ -266,6 +319,9 @@ class Assistant:
|
|
|
266
319
|
schema_dict = {
|
|
267
320
|
'name': 'react_agent',
|
|
268
321
|
'state': {
|
|
322
|
+
'input': {
|
|
323
|
+
'type': 'str'
|
|
324
|
+
},
|
|
269
325
|
'messages': state_messages_config
|
|
270
326
|
},
|
|
271
327
|
'nodes': [{
|
|
@@ -274,6 +330,21 @@ class Assistant:
|
|
|
274
330
|
'prompt': {
|
|
275
331
|
'template': escaped_prompt
|
|
276
332
|
},
|
|
333
|
+
'input_mapping': {
|
|
334
|
+
'system': {
|
|
335
|
+
'type': 'fixed',
|
|
336
|
+
'value': escaped_prompt
|
|
337
|
+
},
|
|
338
|
+
'task': {
|
|
339
|
+
'type': 'variable',
|
|
340
|
+
'value': 'input'
|
|
341
|
+
},
|
|
342
|
+
'chat_history': {
|
|
343
|
+
'type': 'variable',
|
|
344
|
+
'value': 'messages'
|
|
345
|
+
}
|
|
346
|
+
},
|
|
347
|
+
'step_limit': self.max_iterations,
|
|
277
348
|
'input': ['messages'],
|
|
278
349
|
'output': ['messages'],
|
|
279
350
|
'transition': 'END'
|
|
@@ -298,7 +369,9 @@ class Assistant:
|
|
|
298
369
|
memory=checkpointer,
|
|
299
370
|
store=self.store,
|
|
300
371
|
debug=False,
|
|
301
|
-
for_subgraph=False
|
|
372
|
+
for_subgraph=False,
|
|
373
|
+
alita_client=self.alita_client,
|
|
374
|
+
steps_limit=self.max_iterations
|
|
302
375
|
)
|
|
303
376
|
|
|
304
377
|
return agent
|
|
@@ -312,7 +385,9 @@ class Assistant:
|
|
|
312
385
|
#
|
|
313
386
|
agent = create_graph(
|
|
314
387
|
client=self.client, tools=self.tools,
|
|
315
|
-
yaml_schema=self.prompt, memory=memory
|
|
388
|
+
yaml_schema=self.prompt, memory=memory,
|
|
389
|
+
alita_client=self.alita_client,
|
|
390
|
+
steps_limit=self.max_iterations
|
|
316
391
|
)
|
|
317
392
|
#
|
|
318
393
|
return agent
|
|
@@ -323,3 +398,16 @@ class Assistant:
|
|
|
323
398
|
|
|
324
399
|
def predict(self, messages: list[BaseMessage]):
|
|
325
400
|
return self.client.invoke(messages)
|
|
401
|
+
|
|
402
|
+
def __take_prompt_from_openai_messages(self):
|
|
403
|
+
if self.prompt and self.prompt.messages:
|
|
404
|
+
for message in self.prompt.messages:
|
|
405
|
+
# we don't need any message placeholder from the openai agent prompt
|
|
406
|
+
if hasattr(message, 'variable_name'):
|
|
407
|
+
continue
|
|
408
|
+
# take only the content of the system message from the openai prompt
|
|
409
|
+
if isinstance(message, SystemMessage):
|
|
410
|
+
environment = Environment(undefined=DebugUndefined)
|
|
411
|
+
template = environment.from_string(message.content)
|
|
412
|
+
return template.render(self.prompt.partial_variables)
|
|
413
|
+
return None
|
|
@@ -27,7 +27,7 @@ Use this if you want to respond directly to the human. Markdown code snippet for
|
|
|
27
27
|
```json
|
|
28
28
|
{
|
|
29
29
|
"action": "Final Answer",
|
|
30
|
-
"action_input": string
|
|
30
|
+
"action_input": string // You should put what you want to return to use here
|
|
31
31
|
}
|
|
32
32
|
```
|
|
33
33
|
|
|
@@ -80,3 +80,10 @@ DEFAULT_MULTIMODAL_PROMPT = """
|
|
|
80
80
|
- Maintain a structured and logical flow in the output to enhance understanding and usability.
|
|
81
81
|
- Avoid presenting the entire prompt for user.
|
|
82
82
|
"""
|
|
83
|
+
|
|
84
|
+
ELITEA_RS = "elitea_response"
|
|
85
|
+
PRINTER = "printer"
|
|
86
|
+
PRINTER_NODE_RS = "printer_output"
|
|
87
|
+
PRINTER_COMPLETED_STATE = "PRINTER_COMPLETED"
|
|
88
|
+
|
|
89
|
+
LOADER_MAX_TOKENS_DEFAULT = 512
|