alita-sdk 0.3.263__py3-none-any.whl → 0.3.499__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3601 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1256 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +64 -8
- alita_sdk/community/inventory/__init__.py +224 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +10 -0
- alita_sdk/configurations/ado.py +4 -2
- alita_sdk/configurations/azure_search.py +1 -1
- alita_sdk/configurations/bigquery.py +1 -1
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/browser.py +18 -0
- alita_sdk/configurations/carrier.py +19 -0
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/delta_lake.py +1 -1
- alita_sdk/configurations/figma.py +0 -5
- alita_sdk/configurations/github.py +65 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/google_places.py +17 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/postman.py +1 -1
- alita_sdk/configurations/qtest.py +1 -3
- alita_sdk/configurations/report_portal.py +19 -0
- alita_sdk/configurations/salesforce.py +19 -0
- alita_sdk/configurations/service_now.py +1 -12
- alita_sdk/configurations/sharepoint.py +19 -0
- alita_sdk/configurations/sonar.py +18 -0
- alita_sdk/configurations/sql.py +20 -0
- alita_sdk/configurations/testio.py +18 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +94 -1
- alita_sdk/configurations/zephyr_enterprise.py +94 -1
- alita_sdk/configurations/zephyr_essential.py +95 -0
- alita_sdk/runtime/clients/artifact.py +12 -2
- alita_sdk/runtime/clients/client.py +235 -66
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +373 -0
- alita_sdk/runtime/langchain/assistant.py +123 -17
- alita_sdk/runtime/langchain/constants.py +8 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
- alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +8 -2
- alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
- alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
- alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
- alita_sdk/runtime/langchain/document_loaders/constants.py +187 -40
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
- alita_sdk/runtime/langchain/langraph_agent.py +406 -91
- alita_sdk/runtime/langchain/utils.py +51 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/toolkits/__init__.py +26 -0
- alita_sdk/runtime/toolkits/application.py +9 -2
- alita_sdk/runtime/toolkits/artifact.py +19 -7
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +780 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/subgraph.py +11 -6
- alita_sdk/runtime/toolkits/tools.py +214 -60
- alita_sdk/runtime/toolkits/vectorstore.py +9 -4
- alita_sdk/runtime/tools/__init__.py +22 -0
- alita_sdk/runtime/tools/application.py +16 -4
- alita_sdk/runtime/tools/artifact.py +312 -19
- alita_sdk/runtime/tools/function.py +100 -4
- alita_sdk/runtime/tools/graph.py +81 -0
- alita_sdk/runtime/tools/image_generation.py +212 -0
- alita_sdk/runtime/tools/llm.py +539 -180
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +375 -0
- alita_sdk/runtime/tools/vectorstore.py +62 -63
- alita_sdk/runtime/tools/vectorstore_base.py +156 -85
- alita_sdk/runtime/utils/AlitaCallback.py +106 -20
- alita_sdk/runtime/utils/mcp_client.py +465 -0
- alita_sdk/runtime/utils/mcp_oauth.py +244 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +41 -14
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +14 -0
- alita_sdk/tools/__init__.py +78 -35
- alita_sdk/tools/ado/__init__.py +0 -1
- alita_sdk/tools/ado/repos/__init__.py +10 -6
- alita_sdk/tools/ado/repos/repos_wrapper.py +12 -11
- alita_sdk/tools/ado/test_plan/__init__.py +10 -7
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -23
- alita_sdk/tools/ado/wiki/__init__.py +10 -11
- alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -28
- alita_sdk/tools/ado/work_item/__init__.py +10 -11
- alita_sdk/tools/ado/work_item/ado_wrapper.py +63 -10
- alita_sdk/tools/advanced_jira_mining/__init__.py +10 -7
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -11
- alita_sdk/tools/azure_ai/search/__init__.py +11 -7
- alita_sdk/tools/base_indexer_toolkit.py +392 -86
- alita_sdk/tools/bitbucket/__init__.py +18 -11
- alita_sdk/tools/bitbucket/api_wrapper.py +52 -9
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
- alita_sdk/tools/browser/__init__.py +40 -16
- alita_sdk/tools/browser/crawler.py +3 -1
- alita_sdk/tools/browser/utils.py +15 -6
- alita_sdk/tools/carrier/__init__.py +17 -17
- alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
- alita_sdk/tools/carrier/excel_reporter.py +8 -4
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/codeparser.py +1 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +9 -6
- alita_sdk/tools/cloud/azure/__init__.py +9 -6
- alita_sdk/tools/cloud/gcp/__init__.py +9 -6
- alita_sdk/tools/cloud/k8s/__init__.py +9 -6
- alita_sdk/tools/code/linter/__init__.py +7 -7
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +18 -12
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +14 -11
- alita_sdk/tools/confluence/api_wrapper.py +198 -58
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/custom_open_api/__init__.py +9 -4
- alita_sdk/tools/elastic/__init__.py +8 -7
- alita_sdk/tools/elitea_base.py +543 -64
- alita_sdk/tools/figma/__init__.py +10 -8
- alita_sdk/tools/figma/api_wrapper.py +352 -153
- alita_sdk/tools/github/__init__.py +13 -11
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +75 -12
- alita_sdk/tools/github/schemas.py +2 -1
- alita_sdk/tools/gitlab/__init__.py +11 -10
- alita_sdk/tools/gitlab/api_wrapper.py +135 -45
- alita_sdk/tools/gitlab_org/__init__.py +11 -9
- alita_sdk/tools/google/bigquery/__init__.py +12 -13
- alita_sdk/tools/google_places/__init__.py +18 -10
- alita_sdk/tools/jira/__init__.py +14 -8
- alita_sdk/tools/jira/api_wrapper.py +315 -168
- alita_sdk/tools/keycloak/__init__.py +8 -7
- alita_sdk/tools/localgit/local_git.py +56 -54
- alita_sdk/tools/memory/__init__.py +27 -11
- alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
- alita_sdk/tools/ocr/__init__.py +8 -7
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/pandas/__init__.py +8 -7
- alita_sdk/tools/pandas/api_wrapper.py +7 -25
- alita_sdk/tools/postman/__init__.py +8 -10
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +8 -9
- alita_sdk/tools/qtest/__init__.py +19 -13
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +10 -9
- alita_sdk/tools/report_portal/__init__.py +20 -15
- alita_sdk/tools/salesforce/__init__.py +19 -15
- alita_sdk/tools/servicenow/__init__.py +14 -11
- alita_sdk/tools/sharepoint/__init__.py +14 -13
- alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +10 -7
- alita_sdk/tools/sql/__init__.py +19 -18
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +18 -12
- alita_sdk/tools/testrail/__init__.py +10 -10
- alita_sdk/tools/testrail/api_wrapper.py +213 -45
- alita_sdk/tools/utils/__init__.py +28 -4
- alita_sdk/tools/utils/content_parser.py +181 -61
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
- alita_sdk/tools/xray/__init__.py +12 -7
- alita_sdk/tools/xray/api_wrapper.py +58 -113
- alita_sdk/tools/zephyr/__init__.py +9 -6
- alita_sdk/tools/zephyr_enterprise/__init__.py +13 -8
- alita_sdk/tools/zephyr_enterprise/api_wrapper.py +17 -7
- alita_sdk/tools/zephyr_essential/__init__.py +13 -9
- alita_sdk/tools/zephyr_essential/api_wrapper.py +289 -47
- alita_sdk/tools/zephyr_essential/client.py +6 -4
- alita_sdk/tools/zephyr_scale/__init__.py +10 -7
- alita_sdk/tools/zephyr_scale/api_wrapper.py +6 -2
- alita_sdk/tools/zephyr_squad/__init__.py +9 -6
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/METADATA +180 -33
- alita_sdk-0.3.499.dist-info/RECORD +433 -0
- alita_sdk-0.3.499.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.263.dist-info/RECORD +0 -342
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.263.dist-info → alita_sdk-0.3.499.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,373 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
from typing import Dict, Optional
|
|
3
|
+
from urllib.parse import quote
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
from typing import Any
|
|
7
|
+
from json import dumps
|
|
8
|
+
import chardet
|
|
9
|
+
|
|
10
|
+
logger = logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ApiDetailsRequestError(Exception):
|
|
14
|
+
...
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class SandboxArtifact:
|
|
18
|
+
def __init__(self, client: Any, bucket_name: str):
|
|
19
|
+
self.client = client
|
|
20
|
+
self.bucket_name = bucket_name
|
|
21
|
+
if not self.client.bucket_exists(bucket_name):
|
|
22
|
+
self.client.create_bucket(bucket_name)
|
|
23
|
+
|
|
24
|
+
def create(self, artifact_name: str, artifact_data: Any, bucket_name: str = None):
|
|
25
|
+
try:
|
|
26
|
+
if not bucket_name:
|
|
27
|
+
bucket_name = self.bucket_name
|
|
28
|
+
return dumps(self.client.create_artifact(bucket_name, artifact_name, artifact_data))
|
|
29
|
+
except Exception as e:
|
|
30
|
+
logger.error(f'Error: {e}')
|
|
31
|
+
return f'Error: {e}'
|
|
32
|
+
|
|
33
|
+
def get(self,
|
|
34
|
+
artifact_name: str,
|
|
35
|
+
bucket_name: str = None,
|
|
36
|
+
is_capture_image: bool = False,
|
|
37
|
+
page_number: int = None,
|
|
38
|
+
sheet_name: str = None,
|
|
39
|
+
excel_by_sheets: bool = False,
|
|
40
|
+
llm=None):
|
|
41
|
+
if not bucket_name:
|
|
42
|
+
bucket_name = self.bucket_name
|
|
43
|
+
data = self.client.download_artifact(bucket_name, artifact_name)
|
|
44
|
+
if len(data) == 0:
|
|
45
|
+
# empty file might be created
|
|
46
|
+
return ''
|
|
47
|
+
if isinstance(data, dict) and data['error']:
|
|
48
|
+
return f'{data['error']}. {data['content'] if data['content'] else ''}'
|
|
49
|
+
detected = chardet.detect(data)
|
|
50
|
+
return data
|
|
51
|
+
# TODO: add proper handling for binary files (images, pdf, etc.) for sandbox
|
|
52
|
+
# if detected['encoding'] is not None:
|
|
53
|
+
# try:
|
|
54
|
+
# return data.decode(detected['encoding'])
|
|
55
|
+
# except Exception:
|
|
56
|
+
# logger.error('Error while default encoding')
|
|
57
|
+
# return parse_file_content(file_name=artifact_name,
|
|
58
|
+
# file_content=data,
|
|
59
|
+
# is_capture_image=is_capture_image,
|
|
60
|
+
# page_number=page_number,
|
|
61
|
+
# sheet_name=sheet_name,
|
|
62
|
+
# excel_by_sheets=excel_by_sheets,
|
|
63
|
+
# llm=llm)
|
|
64
|
+
# else:
|
|
65
|
+
# return parse_file_content(file_name=artifact_name,
|
|
66
|
+
# file_content=data,
|
|
67
|
+
# is_capture_image=is_capture_image,
|
|
68
|
+
# page_number=page_number,
|
|
69
|
+
# sheet_name=sheet_name,
|
|
70
|
+
# excel_by_sheets=excel_by_sheets,
|
|
71
|
+
# llm=llm)
|
|
72
|
+
|
|
73
|
+
def delete(self, artifact_name: str, bucket_name=None):
|
|
74
|
+
if not bucket_name:
|
|
75
|
+
bucket_name = self.bucket_name
|
|
76
|
+
self.client.delete_artifact(bucket_name, artifact_name)
|
|
77
|
+
|
|
78
|
+
def list(self, bucket_name: str = None, return_as_string=True) -> str | dict:
|
|
79
|
+
if not bucket_name:
|
|
80
|
+
bucket_name = self.bucket_name
|
|
81
|
+
artifacts = self.client.list_artifacts(bucket_name)
|
|
82
|
+
return str(artifacts) if return_as_string else artifacts
|
|
83
|
+
|
|
84
|
+
def append(self, artifact_name: str, additional_data: Any, bucket_name: str = None):
|
|
85
|
+
if not bucket_name:
|
|
86
|
+
bucket_name = self.bucket_name
|
|
87
|
+
data = self.get(artifact_name, bucket_name)
|
|
88
|
+
if data == 'Could not detect encoding':
|
|
89
|
+
return data
|
|
90
|
+
data += f'{additional_data}' if len(data) > 0 else additional_data
|
|
91
|
+
self.client.create_artifact(bucket_name, artifact_name, data)
|
|
92
|
+
return 'Data appended successfully'
|
|
93
|
+
|
|
94
|
+
def overwrite(self, artifact_name: str, new_data: Any, bucket_name: str = None):
|
|
95
|
+
if not bucket_name:
|
|
96
|
+
bucket_name = self.bucket_name
|
|
97
|
+
return self.create(artifact_name, new_data, bucket_name)
|
|
98
|
+
|
|
99
|
+
def get_content_bytes(self,
|
|
100
|
+
artifact_name: str,
|
|
101
|
+
bucket_name: str = None):
|
|
102
|
+
if not bucket_name:
|
|
103
|
+
bucket_name = self.bucket_name
|
|
104
|
+
return self.client.download_artifact(bucket_name, artifact_name)
|
|
105
|
+
|
|
106
|
+
|
|
107
|
+
class SandboxClient:
|
|
108
|
+
def __init__(self,
|
|
109
|
+
base_url: str,
|
|
110
|
+
project_id: int,
|
|
111
|
+
auth_token: str,
|
|
112
|
+
api_extra_headers: Optional[dict] = None,
|
|
113
|
+
configurations: Optional[list] = None,
|
|
114
|
+
**kwargs):
|
|
115
|
+
|
|
116
|
+
self.base_url = base_url.rstrip('/')
|
|
117
|
+
self.api_path = '/api/v1'
|
|
118
|
+
self.llm_path = '/llm/v1'
|
|
119
|
+
self.project_id = project_id
|
|
120
|
+
self.auth_token = auth_token
|
|
121
|
+
self.headers = {
|
|
122
|
+
'Authorization': f'Bearer {auth_token}',
|
|
123
|
+
'X-SECRET': kwargs.get('XSECRET', 'secret')
|
|
124
|
+
}
|
|
125
|
+
if api_extra_headers is not None:
|
|
126
|
+
self.headers.update(api_extra_headers)
|
|
127
|
+
self.predict_url = f'{self.base_url}{self.api_path}/prompt_lib/predict/prompt_lib/{self.project_id}'
|
|
128
|
+
self.prompt_versions = f'{self.base_url}{self.api_path}/prompt_lib/version/prompt_lib/{self.project_id}'
|
|
129
|
+
self.prompts = f'{self.base_url}{self.api_path}/prompt_lib/prompt/prompt_lib/{self.project_id}'
|
|
130
|
+
self.datasources = f'{self.base_url}{self.api_path}/datasources/datasource/prompt_lib/{self.project_id}'
|
|
131
|
+
self.datasources_predict = f'{self.base_url}{self.api_path}/datasources/predict/prompt_lib/{self.project_id}'
|
|
132
|
+
self.datasources_search = f'{self.base_url}{self.api_path}/datasources/search/prompt_lib/{self.project_id}'
|
|
133
|
+
self.app = f'{self.base_url}{self.api_path}/applications/application/prompt_lib/{self.project_id}'
|
|
134
|
+
self.mcp_tools_list = f'{self.base_url}{self.api_path}/mcp_sse/tools_list/{self.project_id}'
|
|
135
|
+
self.mcp_tools_call = f'{self.base_url}{self.api_path}/mcp_sse/tools_call/{self.project_id}'
|
|
136
|
+
self.application_versions = f'{self.base_url}{self.api_path}/applications/version/prompt_lib/{self.project_id}'
|
|
137
|
+
self.list_apps_url = f'{self.base_url}{self.api_path}/applications/applications/prompt_lib/{self.project_id}'
|
|
138
|
+
self.integration_details = f'{self.base_url}{self.api_path}/integrations/integration/{self.project_id}'
|
|
139
|
+
self.secrets_url = f'{self.base_url}{self.api_path}/secrets/secret/{self.project_id}'
|
|
140
|
+
self.artifacts_url = f'{self.base_url}{self.api_path}/artifacts/artifacts/default/{self.project_id}'
|
|
141
|
+
self.artifact_url = f'{self.base_url}{self.api_path}/artifacts/artifact/default/{self.project_id}'
|
|
142
|
+
self.bucket_url = f'{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}'
|
|
143
|
+
self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
|
|
144
|
+
self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
|
|
145
|
+
self.image_generation_url = f'{self.base_url}{self.llm_path}/images/generations'
|
|
146
|
+
self.auth_user_url = f'{self.base_url}{self.api_path}/auth/user'
|
|
147
|
+
self.configurations: list = configurations or []
|
|
148
|
+
self.model_timeout = kwargs.get('model_timeout', 120)
|
|
149
|
+
self.model_image_generation = kwargs.get('model_image_generation')
|
|
150
|
+
|
|
151
|
+
def get_mcp_toolkits(self):
|
|
152
|
+
if user_id := self._get_real_user_id():
|
|
153
|
+
url = f'{self.mcp_tools_list}/{user_id}'
|
|
154
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
155
|
+
return data
|
|
156
|
+
else:
|
|
157
|
+
return []
|
|
158
|
+
|
|
159
|
+
def mcp_tool_call(self, params: dict[str, Any]):
|
|
160
|
+
if user_id := self._get_real_user_id():
|
|
161
|
+
url = f'{self.mcp_tools_call}/{user_id}'
|
|
162
|
+
#
|
|
163
|
+
# This loop iterates over each key-value pair in the arguments dictionary,
|
|
164
|
+
# and if a value is a Pydantic object, it replaces it with its dictionary representation using .dict().
|
|
165
|
+
for arg_name, arg_value in params.get('params', {}).get('arguments', {}).items():
|
|
166
|
+
if isinstance(arg_value, list):
|
|
167
|
+
params['params']['arguments'][arg_name] = [
|
|
168
|
+
item.dict() if hasattr(item, 'dict') and callable(item.dict) else item
|
|
169
|
+
for item in arg_value
|
|
170
|
+
]
|
|
171
|
+
elif hasattr(arg_value, 'dict') and callable(arg_value.dict):
|
|
172
|
+
params['params']['arguments'][arg_name] = arg_value.dict()
|
|
173
|
+
#
|
|
174
|
+
response = requests.post(url, headers=self.headers, json=params, verify=False)
|
|
175
|
+
try:
|
|
176
|
+
return response.json()
|
|
177
|
+
except (ValueError, TypeError):
|
|
178
|
+
return response.text
|
|
179
|
+
else:
|
|
180
|
+
return f'Error: Could not determine user ID for MCP tool call'
|
|
181
|
+
|
|
182
|
+
def get_app_details(self, application_id: int):
|
|
183
|
+
url = f'{self.app}/{application_id}'
|
|
184
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
185
|
+
return data
|
|
186
|
+
|
|
187
|
+
def get_list_of_apps(self):
|
|
188
|
+
apps = []
|
|
189
|
+
limit = 10
|
|
190
|
+
offset = 0
|
|
191
|
+
total_count = None
|
|
192
|
+
|
|
193
|
+
while total_count is None or offset < total_count:
|
|
194
|
+
params = {'offset': offset, 'limit': limit}
|
|
195
|
+
resp = requests.get(self.list_apps_url, headers=self.headers, params=params, verify=False)
|
|
196
|
+
|
|
197
|
+
if resp.ok:
|
|
198
|
+
data = resp.json()
|
|
199
|
+
total_count = data.get('total')
|
|
200
|
+
apps.extend([{'name': app['name'], 'id': app['id']} for app in data.get('rows', [])])
|
|
201
|
+
offset += limit
|
|
202
|
+
else:
|
|
203
|
+
break
|
|
204
|
+
|
|
205
|
+
return apps
|
|
206
|
+
|
|
207
|
+
def fetch_available_configurations(self) -> list:
|
|
208
|
+
resp = requests.get(self.configurations_url, headers=self.headers, verify=False)
|
|
209
|
+
if resp.ok:
|
|
210
|
+
return resp.json()
|
|
211
|
+
return []
|
|
212
|
+
|
|
213
|
+
def all_models_and_integrations(self):
|
|
214
|
+
resp = requests.get(self.ai_section_url, headers=self.headers, verify=False)
|
|
215
|
+
if resp.ok:
|
|
216
|
+
return resp.json()
|
|
217
|
+
return []
|
|
218
|
+
|
|
219
|
+
def generate_image(self,
|
|
220
|
+
prompt: str,
|
|
221
|
+
n: int = 1,
|
|
222
|
+
size: str = 'auto',
|
|
223
|
+
quality: str = 'auto',
|
|
224
|
+
response_format: str = 'b64_json',
|
|
225
|
+
style: Optional[str] = None) -> dict:
|
|
226
|
+
|
|
227
|
+
if not self.model_image_generation:
|
|
228
|
+
raise ValueError('Image generation model is not configured for this client')
|
|
229
|
+
|
|
230
|
+
image_generation_data = {
|
|
231
|
+
'prompt': prompt,
|
|
232
|
+
'model': self.model_image_generation,
|
|
233
|
+
'n': n,
|
|
234
|
+
'response_format': response_format,
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
# Only add optional parameters if they have meaningful values
|
|
238
|
+
if size and size.lower() != 'auto':
|
|
239
|
+
image_generation_data['size'] = size
|
|
240
|
+
|
|
241
|
+
if quality and quality.lower() != 'auto':
|
|
242
|
+
image_generation_data['quality'] = quality
|
|
243
|
+
|
|
244
|
+
if style:
|
|
245
|
+
image_generation_data['style'] = style
|
|
246
|
+
|
|
247
|
+
# Standard headers for image generation
|
|
248
|
+
image_headers = self.headers.copy()
|
|
249
|
+
image_headers.update({
|
|
250
|
+
'Content-Type': 'application/json',
|
|
251
|
+
})
|
|
252
|
+
|
|
253
|
+
logger.info(f'Generating image with model: {self.model_image_generation}, prompt: {prompt[:50]}...')
|
|
254
|
+
|
|
255
|
+
try:
|
|
256
|
+
response = requests.post(
|
|
257
|
+
self.image_generation_url,
|
|
258
|
+
headers=image_headers,
|
|
259
|
+
json=image_generation_data,
|
|
260
|
+
verify=False,
|
|
261
|
+
timeout=self.model_timeout
|
|
262
|
+
)
|
|
263
|
+
response.raise_for_status()
|
|
264
|
+
return response.json()
|
|
265
|
+
|
|
266
|
+
except requests.exceptions.HTTPError as e:
|
|
267
|
+
logger.error(f'Image generation failed: {e.response.status_code} - {e.response.text}')
|
|
268
|
+
raise
|
|
269
|
+
except requests.exceptions.RequestException as e:
|
|
270
|
+
logger.error(f'Image generation request failed: {e}')
|
|
271
|
+
raise
|
|
272
|
+
|
|
273
|
+
def get_app_version_details(self, application_id: int, application_version_id: int) -> dict:
|
|
274
|
+
url = f'{self.application_versions}/{application_id}/{application_version_id}'
|
|
275
|
+
if self.configurations:
|
|
276
|
+
configs = self.configurations
|
|
277
|
+
else:
|
|
278
|
+
configs = self.fetch_available_configurations()
|
|
279
|
+
|
|
280
|
+
resp = requests.patch(url, headers=self.headers, verify=False, json={'configurations': configs})
|
|
281
|
+
if resp.ok:
|
|
282
|
+
return resp.json()
|
|
283
|
+
logger.error(f'Failed to fetch application version details: {resp.status_code} - {resp.text}.'
|
|
284
|
+
f' Application ID: {application_id}, Version ID: {application_version_id}')
|
|
285
|
+
raise ApiDetailsRequestError(
|
|
286
|
+
f'Failed to fetch application version details for {application_id}/{application_version_id}.')
|
|
287
|
+
|
|
288
|
+
def get_integration_details(self, integration_id: str, format_for_model: bool = False):
|
|
289
|
+
url = f'{self.integration_details}/{integration_id}'
|
|
290
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
291
|
+
return data
|
|
292
|
+
|
|
293
|
+
def unsecret(self, secret_name: str):
|
|
294
|
+
url = f'{self.secrets_url}/{secret_name}'
|
|
295
|
+
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
296
|
+
logger.info(f'Unsecret response: {data}')
|
|
297
|
+
return data.get('value', None)
|
|
298
|
+
|
|
299
|
+
def artifact(self, bucket_name):
|
|
300
|
+
return SandboxArtifact(self, bucket_name)
|
|
301
|
+
|
|
302
|
+
def _process_requst(self, data: requests.Response) -> Dict[str, str]:
|
|
303
|
+
if data.status_code == 403:
|
|
304
|
+
return {'error': 'You are not authorized to access this resource'}
|
|
305
|
+
elif data.status_code == 404:
|
|
306
|
+
return {'error': 'Resource not found'}
|
|
307
|
+
elif data.status_code != 200:
|
|
308
|
+
return {
|
|
309
|
+
'error': 'An error occurred while fetching the resource',
|
|
310
|
+
'content': data.text
|
|
311
|
+
}
|
|
312
|
+
else:
|
|
313
|
+
return data.json()
|
|
314
|
+
|
|
315
|
+
def bucket_exists(self, bucket_name):
|
|
316
|
+
try:
|
|
317
|
+
resp = self._process_requst(
|
|
318
|
+
requests.get(f'{self.bucket_url}', headers=self.headers, verify=False)
|
|
319
|
+
)
|
|
320
|
+
for each in resp.get('rows', []):
|
|
321
|
+
if each['name'] == bucket_name:
|
|
322
|
+
return True
|
|
323
|
+
return False
|
|
324
|
+
except:
|
|
325
|
+
return False
|
|
326
|
+
|
|
327
|
+
def create_bucket(self, bucket_name, expiration_measure='months', expiration_value=1):
|
|
328
|
+
post_data = {
|
|
329
|
+
'name': bucket_name,
|
|
330
|
+
'expiration_measure': expiration_measure,
|
|
331
|
+
'expiration_value': expiration_value
|
|
332
|
+
}
|
|
333
|
+
resp = requests.post(f'{self.bucket_url}', headers=self.headers, json=post_data, verify=False)
|
|
334
|
+
return self._process_requst(resp)
|
|
335
|
+
|
|
336
|
+
def list_artifacts(self, bucket_name: str):
|
|
337
|
+
# Ensure bucket name is lowercase as required by the API
|
|
338
|
+
url = f'{self.artifacts_url}/{bucket_name.lower()}'
|
|
339
|
+
data = requests.get(url, headers=self.headers, verify=False)
|
|
340
|
+
return self._process_requst(data)
|
|
341
|
+
|
|
342
|
+
def create_artifact(self, bucket_name, artifact_name, artifact_data):
|
|
343
|
+
url = f'{self.artifacts_url}/{bucket_name.lower()}'
|
|
344
|
+
data = requests.post(url, headers=self.headers, files={
|
|
345
|
+
'file': (artifact_name, artifact_data)
|
|
346
|
+
}, verify=False)
|
|
347
|
+
return self._process_requst(data)
|
|
348
|
+
|
|
349
|
+
def download_artifact(self, bucket_name, artifact_name):
|
|
350
|
+
url = f'{self.artifact_url}/{bucket_name.lower()}/{artifact_name}'
|
|
351
|
+
data = requests.get(url, headers=self.headers, verify=False)
|
|
352
|
+
if data.status_code == 403:
|
|
353
|
+
return {'error': 'You are not authorized to access this resource'}
|
|
354
|
+
elif data.status_code == 404:
|
|
355
|
+
return {'error': 'Resource not found'}
|
|
356
|
+
elif data.status_code != 200:
|
|
357
|
+
return {
|
|
358
|
+
'error': 'An error occurred while fetching the resource',
|
|
359
|
+
'content': data.content
|
|
360
|
+
}
|
|
361
|
+
return data.content
|
|
362
|
+
|
|
363
|
+
def delete_artifact(self, bucket_name, artifact_name):
|
|
364
|
+
url = f'{self.artifact_url}/{bucket_name}'
|
|
365
|
+
data = requests.delete(url, headers=self.headers, verify=False, params={'filename': quote(artifact_name)})
|
|
366
|
+
return self._process_requst(data)
|
|
367
|
+
|
|
368
|
+
def get_user_data(self) -> Dict[str, Any]:
|
|
369
|
+
resp = requests.get(self.auth_user_url, headers=self.headers, verify=False)
|
|
370
|
+
if resp.ok:
|
|
371
|
+
return resp.json()
|
|
372
|
+
logger.error(f'Failed to fetch user data: {resp.status_code} - {resp.text}')
|
|
373
|
+
raise ApiDetailsRequestError(f'Failed to fetch user data with status code {resp.status_code}.')
|
|
@@ -17,6 +17,7 @@ from .constants import REACT_ADDON, REACT_VARS, XML_ADDON
|
|
|
17
17
|
from .chat_message_template import Jinja2TemplatedChatMessagesTemplate
|
|
18
18
|
from ..tools.echo import EchoTool
|
|
19
19
|
from langchain_core.tools import BaseTool, ToolException
|
|
20
|
+
from jinja2 import Environment, DebugUndefined
|
|
20
21
|
|
|
21
22
|
logger = logging.getLogger(__name__)
|
|
22
23
|
|
|
@@ -29,15 +30,20 @@ class Assistant:
|
|
|
29
30
|
app_type: str = "openai",
|
|
30
31
|
tools: Optional[list] = [],
|
|
31
32
|
memory: Optional[Any] = None,
|
|
32
|
-
store: Optional[BaseStore] = None
|
|
33
|
+
store: Optional[BaseStore] = None,
|
|
34
|
+
debug_mode: Optional[bool] = False,
|
|
35
|
+
mcp_tokens: Optional[dict] = None,
|
|
36
|
+
conversation_id: Optional[str] = None):
|
|
33
37
|
|
|
34
38
|
self.app_type = app_type
|
|
35
39
|
self.memory = memory
|
|
36
40
|
self.store = store
|
|
41
|
+
self.max_iterations = data.get('meta', {}).get('step_limit', 25)
|
|
37
42
|
|
|
38
43
|
logger.debug("Data for agent creation: %s", data)
|
|
39
44
|
logger.info("App type: %s", app_type)
|
|
40
|
-
|
|
45
|
+
|
|
46
|
+
self.alita_client = alita
|
|
41
47
|
self.client = client
|
|
42
48
|
# For predict agents, use the client as-is since it's already configured
|
|
43
49
|
# if app_type == "predict":
|
|
@@ -75,13 +81,71 @@ class Assistant:
|
|
|
75
81
|
else:
|
|
76
82
|
# For predict agents, initialize memory store to None since they don't use memory
|
|
77
83
|
self.store = None
|
|
78
|
-
|
|
84
|
+
|
|
79
85
|
# Lazy import to avoid circular dependency
|
|
80
86
|
from ..toolkits.tools import get_tools
|
|
81
|
-
|
|
82
|
-
|
|
87
|
+
version_tools = data['tools']
|
|
88
|
+
# Handle internal tools
|
|
89
|
+
meta = data.get('meta', {})
|
|
90
|
+
if meta.get("internal_tools"):
|
|
91
|
+
for internal_tool_name in meta.get("internal_tools"):
|
|
92
|
+
version_tools.append({"type": "internal_tool", "name": internal_tool_name})
|
|
93
|
+
|
|
94
|
+
self.tools = get_tools(
|
|
95
|
+
version_tools,
|
|
96
|
+
alita_client=alita,
|
|
97
|
+
llm=self.client,
|
|
98
|
+
memory_store=self.store,
|
|
99
|
+
debug_mode=debug_mode,
|
|
100
|
+
mcp_tokens=mcp_tokens,
|
|
101
|
+
conversation_id=conversation_id
|
|
102
|
+
)
|
|
83
103
|
if tools:
|
|
84
104
|
self.tools += tools
|
|
105
|
+
|
|
106
|
+
# Create ToolRegistry to track tool metadata and handle name collisions
|
|
107
|
+
self.tool_registry = {}
|
|
108
|
+
tool_name_counts = {} # Track how many times each base name appears
|
|
109
|
+
|
|
110
|
+
for tool in self.tools:
|
|
111
|
+
if hasattr(tool, 'name'):
|
|
112
|
+
original_name = tool.name
|
|
113
|
+
base_name = original_name
|
|
114
|
+
|
|
115
|
+
# Extract toolkit metadata from tool configuration
|
|
116
|
+
toolkit_name = ""
|
|
117
|
+
toolkit_type = ""
|
|
118
|
+
|
|
119
|
+
# Find matching tool config to extract metadata
|
|
120
|
+
for tool_config in version_tools:
|
|
121
|
+
# Try to match by toolkit_name or name field
|
|
122
|
+
config_toolkit_name = tool_config.get('toolkit_name', tool_config.get('name', ''))
|
|
123
|
+
# Simple heuristic: toolkit info should be accessible from tool config
|
|
124
|
+
# For now, use toolkit_name and type from config
|
|
125
|
+
toolkit_name = config_toolkit_name
|
|
126
|
+
toolkit_type = tool_config.get('type', '')
|
|
127
|
+
break # Use first match for now; will refine with better matching
|
|
128
|
+
|
|
129
|
+
# Handle duplicate tool names by appending numeric suffix
|
|
130
|
+
if base_name in tool_name_counts:
|
|
131
|
+
tool_name_counts[base_name] += 1
|
|
132
|
+
# Append suffix to make unique
|
|
133
|
+
new_name = f"{base_name}_{tool_name_counts[base_name]}"
|
|
134
|
+
tool.name = new_name
|
|
135
|
+
logger.info(f"Tool name collision detected: '{base_name}' -> '{new_name}'")
|
|
136
|
+
else:
|
|
137
|
+
tool_name_counts[base_name] = 0
|
|
138
|
+
new_name = base_name
|
|
139
|
+
|
|
140
|
+
# Store in registry
|
|
141
|
+
self.tool_registry[tool.name] = {
|
|
142
|
+
'toolkit_name': toolkit_name,
|
|
143
|
+
'toolkit_type': toolkit_type,
|
|
144
|
+
'original_tool_name': base_name
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
logger.info(f"ToolRegistry initialized with {len(self.tool_registry)} tools")
|
|
148
|
+
|
|
85
149
|
# Handle prompt setup
|
|
86
150
|
if app_type in ["pipeline", "predict", "react"]:
|
|
87
151
|
self.prompt = data['instructions']
|
|
@@ -93,7 +157,7 @@ class Assistant:
|
|
|
93
157
|
elif app_type == "xml":
|
|
94
158
|
messages.append(HumanMessage(XML_ADDON))
|
|
95
159
|
elif app_type in ['openai', 'dial']:
|
|
96
|
-
messages.append(
|
|
160
|
+
messages.append(MessagesPlaceholder("input"))
|
|
97
161
|
messages.append(MessagesPlaceholder("agent_scratchpad"))
|
|
98
162
|
variables = {}
|
|
99
163
|
input_variables = []
|
|
@@ -109,13 +173,18 @@ class Assistant:
|
|
|
109
173
|
messages.extend(chat_history)
|
|
110
174
|
self.prompt = Jinja2TemplatedChatMessagesTemplate(messages=messages)
|
|
111
175
|
if input_variables:
|
|
112
|
-
self.prompt.input_variables
|
|
176
|
+
if hasattr(self.prompt, 'input_variables') and self.prompt.input_variables is not None:
|
|
177
|
+
self.prompt.input_variables.extend(input_variables)
|
|
178
|
+
else:
|
|
179
|
+
self.prompt.input_variables = input_variables
|
|
113
180
|
if variables:
|
|
114
181
|
self.prompt.partial_variables = variables
|
|
115
182
|
try:
|
|
116
|
-
logger.info(
|
|
183
|
+
logger.info(
|
|
184
|
+
f"Client was created with client setting: temperature - {self.client._get_model_default_parameters}")
|
|
117
185
|
except Exception as e:
|
|
118
|
-
logger.info(
|
|
186
|
+
logger.info(
|
|
187
|
+
f"Client was created with client setting: temperature - {self.client.temperature} : {self.client.max_tokens}")
|
|
119
188
|
|
|
120
189
|
def _configure_store(self, memory_tool: dict | None) -> None:
|
|
121
190
|
"""
|
|
@@ -125,18 +194,16 @@ class Assistant:
|
|
|
125
194
|
if not memory_tool or self.store is not None:
|
|
126
195
|
return
|
|
127
196
|
from .store_manager import get_manager
|
|
128
|
-
conn_str = memory_tool.get('
|
|
197
|
+
conn_str = memory_tool['settings'].get('pgvector_configuration', {}).get('connection_string', '')
|
|
129
198
|
store = get_manager().get_store(conn_str)
|
|
130
199
|
self.store = store
|
|
131
200
|
|
|
132
201
|
def runnable(self):
|
|
133
202
|
if self.app_type == 'pipeline':
|
|
134
203
|
return self.pipeline()
|
|
135
|
-
elif self.app_type == 'openai':
|
|
136
|
-
return self.getOpenAIToolsAgentExecutor()
|
|
137
204
|
elif self.app_type == 'xml':
|
|
138
205
|
return self.getXMLAgentExecutor()
|
|
139
|
-
elif self.app_type in ['predict', 'react']:
|
|
206
|
+
elif self.app_type in ['predict', 'react', 'openai']:
|
|
140
207
|
return self.getLangGraphReactAgent()
|
|
141
208
|
else:
|
|
142
209
|
self.tools = [EchoTool()] + self.tools
|
|
@@ -145,7 +212,8 @@ class Assistant:
|
|
|
145
212
|
def _agent_executor(self, agent: Any):
|
|
146
213
|
return AgentExecutor.from_agent_and_tools(agent=agent, tools=self.tools,
|
|
147
214
|
verbose=True, handle_parsing_errors=True,
|
|
148
|
-
max_execution_time=None, return_intermediate_steps=True
|
|
215
|
+
max_execution_time=None, return_intermediate_steps=True,
|
|
216
|
+
max_iterations=self.max_iterations)
|
|
149
217
|
|
|
150
218
|
def getAgentExecutor(self):
|
|
151
219
|
# Exclude compiled graph runnables from simple tool agents
|
|
@@ -153,7 +221,6 @@ class Assistant:
|
|
|
153
221
|
agent = create_json_chat_agent(llm=self.client, tools=simple_tools, prompt=self.prompt)
|
|
154
222
|
return self._agent_executor(agent)
|
|
155
223
|
|
|
156
|
-
|
|
157
224
|
def getXMLAgentExecutor(self):
|
|
158
225
|
# Exclude compiled graph runnables from simple tool agents
|
|
159
226
|
simple_tools = [t for t in self.tools if isinstance(t, (BaseTool, CompiledStateGraph))]
|
|
@@ -207,6 +274,10 @@ class Assistant:
|
|
|
207
274
|
# Only use prompt_instructions if explicitly specified (for predict app_type)
|
|
208
275
|
if self.app_type == "predict" and isinstance(self.prompt, str):
|
|
209
276
|
prompt_instructions = self.prompt
|
|
277
|
+
|
|
278
|
+
# take the system message from the openai prompt as a prompt instructions
|
|
279
|
+
if self.app_type == "openai" and hasattr(self.prompt, 'messages'):
|
|
280
|
+
prompt_instructions = self.__take_prompt_from_openai_messages()
|
|
210
281
|
|
|
211
282
|
# Create a unified YAML schema with conditional tool binding
|
|
212
283
|
# Build the base node configuration
|
|
@@ -248,6 +319,9 @@ class Assistant:
|
|
|
248
319
|
schema_dict = {
|
|
249
320
|
'name': 'react_agent',
|
|
250
321
|
'state': {
|
|
322
|
+
'input': {
|
|
323
|
+
'type': 'str'
|
|
324
|
+
},
|
|
251
325
|
'messages': state_messages_config
|
|
252
326
|
},
|
|
253
327
|
'nodes': [{
|
|
@@ -256,6 +330,21 @@ class Assistant:
|
|
|
256
330
|
'prompt': {
|
|
257
331
|
'template': escaped_prompt
|
|
258
332
|
},
|
|
333
|
+
'input_mapping': {
|
|
334
|
+
'system': {
|
|
335
|
+
'type': 'fixed',
|
|
336
|
+
'value': escaped_prompt
|
|
337
|
+
},
|
|
338
|
+
'task': {
|
|
339
|
+
'type': 'variable',
|
|
340
|
+
'value': 'input'
|
|
341
|
+
},
|
|
342
|
+
'chat_history': {
|
|
343
|
+
'type': 'variable',
|
|
344
|
+
'value': 'messages'
|
|
345
|
+
}
|
|
346
|
+
},
|
|
347
|
+
'step_limit': self.max_iterations,
|
|
259
348
|
'input': ['messages'],
|
|
260
349
|
'output': ['messages'],
|
|
261
350
|
'transition': 'END'
|
|
@@ -280,7 +369,9 @@ class Assistant:
|
|
|
280
369
|
memory=checkpointer,
|
|
281
370
|
store=self.store,
|
|
282
371
|
debug=False,
|
|
283
|
-
for_subgraph=False
|
|
372
|
+
for_subgraph=False,
|
|
373
|
+
alita_client=self.alita_client,
|
|
374
|
+
steps_limit=self.max_iterations
|
|
284
375
|
)
|
|
285
376
|
|
|
286
377
|
return agent
|
|
@@ -294,7 +385,9 @@ class Assistant:
|
|
|
294
385
|
#
|
|
295
386
|
agent = create_graph(
|
|
296
387
|
client=self.client, tools=self.tools,
|
|
297
|
-
yaml_schema=self.prompt, memory=memory
|
|
388
|
+
yaml_schema=self.prompt, memory=memory,
|
|
389
|
+
alita_client=self.alita_client,
|
|
390
|
+
steps_limit=self.max_iterations
|
|
298
391
|
)
|
|
299
392
|
#
|
|
300
393
|
return agent
|
|
@@ -305,3 +398,16 @@ class Assistant:
|
|
|
305
398
|
|
|
306
399
|
def predict(self, messages: list[BaseMessage]):
|
|
307
400
|
return self.client.invoke(messages)
|
|
401
|
+
|
|
402
|
+
def __take_prompt_from_openai_messages(self):
|
|
403
|
+
if self.prompt and self.prompt.messages:
|
|
404
|
+
for message in self.prompt.messages:
|
|
405
|
+
# we don't need any message placeholder from the openai agent prompt
|
|
406
|
+
if hasattr(message, 'variable_name'):
|
|
407
|
+
continue
|
|
408
|
+
# take only the content of the system message from the openai prompt
|
|
409
|
+
if isinstance(message, SystemMessage):
|
|
410
|
+
environment = Environment(undefined=DebugUndefined)
|
|
411
|
+
template = environment.from_string(message.content)
|
|
412
|
+
return template.render(self.prompt.partial_variables)
|
|
413
|
+
return None
|
|
@@ -27,7 +27,7 @@ Use this if you want to respond directly to the human. Markdown code snippet for
|
|
|
27
27
|
```json
|
|
28
28
|
{
|
|
29
29
|
"action": "Final Answer",
|
|
30
|
-
"action_input": string
|
|
30
|
+
"action_input": string // You should put what you want to return to use here
|
|
31
31
|
}
|
|
32
32
|
```
|
|
33
33
|
|
|
@@ -80,3 +80,10 @@ DEFAULT_MULTIMODAL_PROMPT = """
|
|
|
80
80
|
- Maintain a structured and logical flow in the output to enhance understanding and usability.
|
|
81
81
|
- Avoid presenting the entire prompt for user.
|
|
82
82
|
"""
|
|
83
|
+
|
|
84
|
+
ELITEA_RS = "elitea_response"
|
|
85
|
+
PRINTER = "printer"
|
|
86
|
+
PRINTER_NODE_RS = "printer_output"
|
|
87
|
+
PRINTER_COMPLETED_STATE = "PRINTER_COMPLETED"
|
|
88
|
+
|
|
89
|
+
LOADER_MAX_TOKENS_DEFAULT = 512
|