alita-sdk 0.3.351__py3-none-any.whl → 0.3.499__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +155 -0
- alita_sdk/cli/agent_loader.py +215 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3601 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1256 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +64 -8
- alita_sdk/community/inventory/__init__.py +224 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/confluence.py +96 -1
- alita_sdk/configurations/gitlab.py +79 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/artifact.py +1 -1
- alita_sdk/runtime/clients/client.py +214 -42
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +373 -0
- alita_sdk/runtime/langchain/assistant.py +118 -30
- alita_sdk/runtime/langchain/constants.py +8 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +41 -12
- alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +116 -99
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +2 -2
- alita_sdk/runtime/langchain/langraph_agent.py +307 -71
- alita_sdk/runtime/langchain/utils.py +48 -8
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/toolkits/__init__.py +26 -0
- alita_sdk/runtime/toolkits/application.py +9 -2
- alita_sdk/runtime/toolkits/artifact.py +18 -6
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +780 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/tools.py +205 -55
- alita_sdk/runtime/toolkits/vectorstore.py +9 -4
- alita_sdk/runtime/tools/__init__.py +11 -3
- alita_sdk/runtime/tools/application.py +7 -0
- alita_sdk/runtime/tools/artifact.py +225 -12
- alita_sdk/runtime/tools/function.py +95 -5
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +212 -0
- alita_sdk/runtime/tools/llm.py +494 -102
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +4 -4
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -1
- alita_sdk/runtime/tools/sandbox.py +180 -79
- alita_sdk/runtime/tools/vectorstore.py +22 -21
- alita_sdk/runtime/tools/vectorstore_base.py +125 -52
- alita_sdk/runtime/utils/AlitaCallback.py +106 -20
- alita_sdk/runtime/utils/mcp_client.py +465 -0
- alita_sdk/runtime/utils/mcp_oauth.py +244 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/streamlit.py +40 -13
- alita_sdk/runtime/utils/toolkit_utils.py +28 -9
- alita_sdk/runtime/utils/utils.py +12 -0
- alita_sdk/tools/__init__.py +77 -33
- alita_sdk/tools/ado/repos/__init__.py +7 -6
- alita_sdk/tools/ado/repos/repos_wrapper.py +11 -11
- alita_sdk/tools/ado/test_plan/__init__.py +7 -7
- alita_sdk/tools/ado/wiki/__init__.py +7 -11
- alita_sdk/tools/ado/wiki/ado_wrapper.py +89 -15
- alita_sdk/tools/ado/work_item/__init__.py +7 -11
- alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
- alita_sdk/tools/advanced_jira_mining/__init__.py +8 -7
- alita_sdk/tools/aws/delta_lake/__init__.py +11 -9
- alita_sdk/tools/azure_ai/search/__init__.py +7 -6
- alita_sdk/tools/base_indexer_toolkit.py +345 -70
- alita_sdk/tools/bitbucket/__init__.py +9 -8
- alita_sdk/tools/bitbucket/api_wrapper.py +50 -6
- alita_sdk/tools/browser/__init__.py +4 -4
- alita_sdk/tools/carrier/__init__.py +4 -6
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +7 -6
- alita_sdk/tools/cloud/azure/__init__.py +7 -6
- alita_sdk/tools/cloud/gcp/__init__.py +7 -6
- alita_sdk/tools/cloud/k8s/__init__.py +7 -6
- alita_sdk/tools/code/linter/__init__.py +7 -7
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +8 -7
- alita_sdk/tools/code_indexer_toolkit.py +199 -0
- alita_sdk/tools/confluence/__init__.py +9 -8
- alita_sdk/tools/confluence/api_wrapper.py +171 -75
- alita_sdk/tools/confluence/loader.py +10 -0
- alita_sdk/tools/custom_open_api/__init__.py +9 -4
- alita_sdk/tools/elastic/__init__.py +8 -7
- alita_sdk/tools/elitea_base.py +492 -52
- alita_sdk/tools/figma/__init__.py +7 -7
- alita_sdk/tools/figma/api_wrapper.py +2 -1
- alita_sdk/tools/github/__init__.py +9 -9
- alita_sdk/tools/github/api_wrapper.py +9 -26
- alita_sdk/tools/github/github_client.py +62 -2
- alita_sdk/tools/gitlab/__init__.py +8 -8
- alita_sdk/tools/gitlab/api_wrapper.py +135 -33
- alita_sdk/tools/gitlab_org/__init__.py +7 -8
- alita_sdk/tools/google/bigquery/__init__.py +11 -12
- alita_sdk/tools/google_places/__init__.py +8 -7
- alita_sdk/tools/jira/__init__.py +9 -7
- alita_sdk/tools/jira/api_wrapper.py +100 -52
- alita_sdk/tools/keycloak/__init__.py +8 -7
- alita_sdk/tools/localgit/local_git.py +56 -54
- alita_sdk/tools/memory/__init__.py +1 -1
- alita_sdk/tools/non_code_indexer_toolkit.py +3 -2
- alita_sdk/tools/ocr/__init__.py +8 -7
- alita_sdk/tools/openapi/__init__.py +10 -1
- alita_sdk/tools/pandas/__init__.py +8 -7
- alita_sdk/tools/postman/__init__.py +7 -8
- alita_sdk/tools/postman/api_wrapper.py +19 -8
- alita_sdk/tools/postman/postman_analysis.py +8 -1
- alita_sdk/tools/pptx/__init__.py +8 -9
- alita_sdk/tools/qtest/__init__.py +16 -11
- alita_sdk/tools/qtest/api_wrapper.py +1784 -88
- alita_sdk/tools/rally/__init__.py +7 -8
- alita_sdk/tools/report_portal/__init__.py +9 -7
- alita_sdk/tools/salesforce/__init__.py +7 -7
- alita_sdk/tools/servicenow/__init__.py +10 -10
- alita_sdk/tools/sharepoint/__init__.py +7 -6
- alita_sdk/tools/sharepoint/api_wrapper.py +127 -36
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +7 -6
- alita_sdk/tools/sql/__init__.py +8 -7
- alita_sdk/tools/sql/api_wrapper.py +71 -23
- alita_sdk/tools/testio/__init__.py +7 -6
- alita_sdk/tools/testrail/__init__.py +8 -9
- alita_sdk/tools/utils/__init__.py +26 -4
- alita_sdk/tools/utils/content_parser.py +88 -60
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +76 -26
- alita_sdk/tools/xray/__init__.py +9 -7
- alita_sdk/tools/zephyr/__init__.py +7 -6
- alita_sdk/tools/zephyr_enterprise/__init__.py +8 -6
- alita_sdk/tools/zephyr_essential/__init__.py +7 -6
- alita_sdk/tools/zephyr_essential/api_wrapper.py +12 -13
- alita_sdk/tools/zephyr_scale/__init__.py +7 -6
- alita_sdk/tools/zephyr_squad/__init__.py +7 -6
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/METADATA +147 -2
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/RECORD +206 -130
- alita_sdk-0.3.499.dist-info/entry_points.txt +2 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/top_level.txt +0 -0
|
@@ -18,3 +18,78 @@ class ZephyrEssentialConfiguration(BaseModel):
|
|
|
18
18
|
)
|
|
19
19
|
base_url: Optional[str] = Field(description="Zephyr Essential API Base URL", default=None)
|
|
20
20
|
token: SecretStr = Field(description="Zephyr Essential API Token")
|
|
21
|
+
|
|
22
|
+
@staticmethod
|
|
23
|
+
def check_connection(settings: dict) -> str | None:
|
|
24
|
+
"""
|
|
25
|
+
Check the connection to Zephyr Essential (Zephyr Scale).
|
|
26
|
+
|
|
27
|
+
Args:
|
|
28
|
+
settings: Dictionary containing Zephyr Essential configuration
|
|
29
|
+
- base_url: Zephyr Essential API Base URL (optional, defaults to Zephyr Scale Cloud API)
|
|
30
|
+
- token: Zephyr Essential API Token (required)
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
None if connection successful, error message string if failed
|
|
34
|
+
"""
|
|
35
|
+
import requests
|
|
36
|
+
|
|
37
|
+
# Get base_url or use default
|
|
38
|
+
base_url = settings.get("base_url")
|
|
39
|
+
if base_url:
|
|
40
|
+
base_url = base_url.strip().rstrip("/")
|
|
41
|
+
# Validate URL format if provided
|
|
42
|
+
if not base_url.startswith(("http://", "https://")):
|
|
43
|
+
return "Zephyr Essential URL must start with http:// or https://"
|
|
44
|
+
else:
|
|
45
|
+
# Default to Zephyr Scale Cloud API
|
|
46
|
+
base_url = "https://api.zephyrscale.smartbear.com/v2"
|
|
47
|
+
|
|
48
|
+
# Validate token
|
|
49
|
+
token = settings.get("token")
|
|
50
|
+
if not token:
|
|
51
|
+
return "Zephyr Essential API token is required"
|
|
52
|
+
|
|
53
|
+
# Extract token value if it's a SecretStr
|
|
54
|
+
token_value = token.get_secret_value() if hasattr(token, 'get_secret_value') else token
|
|
55
|
+
|
|
56
|
+
if not token_value or not str(token_value).strip():
|
|
57
|
+
return "Zephyr Essential API token cannot be empty"
|
|
58
|
+
|
|
59
|
+
# Test connection using /projects endpoint (requires authentication)
|
|
60
|
+
test_url = f"{base_url}/projects"
|
|
61
|
+
|
|
62
|
+
headers = {
|
|
63
|
+
"Authorization": f"Bearer {str(token_value).strip()}"
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
try:
|
|
67
|
+
response = requests.get(
|
|
68
|
+
test_url,
|
|
69
|
+
headers=headers,
|
|
70
|
+
timeout=10
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Check response status
|
|
74
|
+
if response.status_code == 200:
|
|
75
|
+
# Successfully connected and authenticated
|
|
76
|
+
return None
|
|
77
|
+
elif response.status_code == 401:
|
|
78
|
+
return "Authentication failed: invalid API token"
|
|
79
|
+
elif response.status_code == 403:
|
|
80
|
+
return "Access forbidden: token lacks required permissions"
|
|
81
|
+
elif response.status_code == 404:
|
|
82
|
+
return "Zephyr Essential API endpoint not found: verify the API URL"
|
|
83
|
+
else:
|
|
84
|
+
return f"Zephyr Essential API returned status code {response.status_code}"
|
|
85
|
+
|
|
86
|
+
except requests.exceptions.SSLError as e:
|
|
87
|
+
return f"SSL certificate verification failed: {str(e)}"
|
|
88
|
+
except requests.exceptions.ConnectionError:
|
|
89
|
+
return f"Cannot connect to Zephyr Essential at {base_url}: connection refused"
|
|
90
|
+
except requests.exceptions.Timeout:
|
|
91
|
+
return f"Connection to Zephyr Essential at {base_url} timed out"
|
|
92
|
+
except requests.exceptions.RequestException as e:
|
|
93
|
+
return f"Error connecting to Zephyr Essential: {str(e)}"
|
|
94
|
+
except Exception as e:
|
|
95
|
+
return f"Unexpected error: {str(e)}"
|
|
@@ -20,8 +20,9 @@ from .prompt import AlitaPrompt
|
|
|
20
20
|
from .datasource import AlitaDataSource
|
|
21
21
|
from .artifact import Artifact
|
|
22
22
|
from ..langchain.chat_message_template import Jinja2TemplatedChatMessagesTemplate
|
|
23
|
-
from ..utils.
|
|
23
|
+
from ..utils.mcp_oauth import McpAuthorizationRequired
|
|
24
24
|
from ...tools import get_available_toolkit_models
|
|
25
|
+
from ...tools.base_indexer_toolkit import IndexTools
|
|
25
26
|
|
|
26
27
|
logger = logging.getLogger(__name__)
|
|
27
28
|
|
|
@@ -68,8 +69,11 @@ class AlitaClient:
|
|
|
68
69
|
self.bucket_url = f"{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}"
|
|
69
70
|
self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
|
|
70
71
|
self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
|
|
72
|
+
self.models_url = f'{self.base_url}{self.api_path}/configurations/models/{self.project_id}?include_shared=true'
|
|
73
|
+
self.image_generation_url = f"{self.base_url}{self.llm_path}/images/generations"
|
|
71
74
|
self.configurations: list = configurations or []
|
|
72
75
|
self.model_timeout = kwargs.get('model_timeout', 120)
|
|
76
|
+
self.model_image_generation = kwargs.get('model_image_generation')
|
|
73
77
|
|
|
74
78
|
def get_mcp_toolkits(self):
|
|
75
79
|
if user_id := self._get_real_user_id():
|
|
@@ -173,6 +177,20 @@ class AlitaClient:
|
|
|
173
177
|
return resp.json()
|
|
174
178
|
return []
|
|
175
179
|
|
|
180
|
+
def get_available_models(self):
|
|
181
|
+
"""Get list of available models from the configurations API.
|
|
182
|
+
|
|
183
|
+
Returns:
|
|
184
|
+
List of model dictionaries with 'name' and other properties,
|
|
185
|
+
or empty list if request fails.
|
|
186
|
+
"""
|
|
187
|
+
resp = requests.get(self.models_url, headers=self.headers, verify=False)
|
|
188
|
+
if resp.ok:
|
|
189
|
+
data = resp.json()
|
|
190
|
+
# API returns {"items": [...], ...}
|
|
191
|
+
return data.get('items', [])
|
|
192
|
+
return []
|
|
193
|
+
|
|
176
194
|
def get_embeddings(self, embedding_model: str) -> OpenAIEmbeddings:
|
|
177
195
|
"""
|
|
178
196
|
Get an instance of OpenAIEmbeddings configured with the project ID and auth token.
|
|
@@ -204,20 +222,99 @@ class AlitaClient:
|
|
|
204
222
|
|
|
205
223
|
logger.info(f"Creating ChatOpenAI model: {model_name} with config: {model_config}")
|
|
206
224
|
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
225
|
+
try:
|
|
226
|
+
from tools import this # pylint: disable=E0401,C0415
|
|
227
|
+
worker_config = this.for_module("indexer_worker").descriptor.config
|
|
228
|
+
except: # pylint: disable=W0702
|
|
229
|
+
worker_config = {}
|
|
230
|
+
|
|
231
|
+
use_responses_api = False
|
|
232
|
+
|
|
233
|
+
if worker_config and isinstance(worker_config, dict):
|
|
234
|
+
for target_name_tag in worker_config.get("use_responses_api_for", []):
|
|
235
|
+
if target_name_tag in model_name:
|
|
236
|
+
use_responses_api = True
|
|
237
|
+
break
|
|
238
|
+
|
|
239
|
+
# handle case when max_tokens are auto-configurable == -1
|
|
240
|
+
llm_max_tokens = model_config.get("max_tokens", None)
|
|
241
|
+
if llm_max_tokens and llm_max_tokens == -1:
|
|
242
|
+
logger.warning(f'User selected `MAX COMPLETION TOKENS` as `auto`')
|
|
243
|
+
# default nuber for a case when auto is selected for an agent
|
|
244
|
+
llm_max_tokens = 4000
|
|
245
|
+
|
|
246
|
+
target_kwargs = {
|
|
247
|
+
"base_url": f"{self.base_url}{self.llm_path}",
|
|
248
|
+
"model": model_name,
|
|
249
|
+
"api_key": self.auth_token,
|
|
250
|
+
"streaming": model_config.get("streaming", True),
|
|
251
|
+
"stream_usage": model_config.get("stream_usage", True),
|
|
252
|
+
"max_tokens": llm_max_tokens,
|
|
253
|
+
"temperature": model_config.get("temperature"),
|
|
254
|
+
"reasoning_effort": model_config.get("reasoning_effort"),
|
|
255
|
+
"max_retries": model_config.get("max_retries", 3),
|
|
256
|
+
"seed": model_config.get("seed", None),
|
|
257
|
+
"openai_organization": str(self.project_id),
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
if use_responses_api:
|
|
261
|
+
target_kwargs["use_responses_api"] = True
|
|
262
|
+
|
|
263
|
+
return ChatOpenAI(**target_kwargs)
|
|
264
|
+
|
|
265
|
+
def generate_image(self,
|
|
266
|
+
prompt: str,
|
|
267
|
+
n: int = 1,
|
|
268
|
+
size: str = "auto",
|
|
269
|
+
quality: str = "auto",
|
|
270
|
+
response_format: str = "b64_json",
|
|
271
|
+
style: Optional[str] = None) -> dict:
|
|
272
|
+
|
|
273
|
+
if not self.model_image_generation:
|
|
274
|
+
raise ValueError("Image generation model is not configured for this client")
|
|
275
|
+
|
|
276
|
+
image_generation_data = {
|
|
277
|
+
"prompt": prompt,
|
|
278
|
+
"model": self.model_image_generation,
|
|
279
|
+
"n": n,
|
|
280
|
+
"response_format": response_format,
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
# Only add optional parameters if they have meaningful values
|
|
284
|
+
if size and size.lower() != "auto":
|
|
285
|
+
image_generation_data["size"] = size
|
|
286
|
+
|
|
287
|
+
if quality and quality.lower() != "auto":
|
|
288
|
+
image_generation_data["quality"] = quality
|
|
289
|
+
|
|
290
|
+
if style:
|
|
291
|
+
image_generation_data["style"] = style
|
|
220
292
|
|
|
293
|
+
# Standard headers for image generation
|
|
294
|
+
image_headers = self.headers.copy()
|
|
295
|
+
image_headers.update({
|
|
296
|
+
"Content-Type": "application/json",
|
|
297
|
+
})
|
|
298
|
+
|
|
299
|
+
logger.info(f"Generating image with model: {self.model_image_generation}, prompt: {prompt[:50]}...")
|
|
300
|
+
|
|
301
|
+
try:
|
|
302
|
+
response = requests.post(
|
|
303
|
+
self.image_generation_url,
|
|
304
|
+
headers=image_headers,
|
|
305
|
+
json=image_generation_data,
|
|
306
|
+
verify=False,
|
|
307
|
+
timeout=self.model_timeout
|
|
308
|
+
)
|
|
309
|
+
response.raise_for_status()
|
|
310
|
+
return response.json()
|
|
311
|
+
|
|
312
|
+
except requests.exceptions.HTTPError as e:
|
|
313
|
+
logger.error(f"Image generation failed: {e.response.status_code} - {e.response.text}")
|
|
314
|
+
raise
|
|
315
|
+
except requests.exceptions.RequestException as e:
|
|
316
|
+
logger.error(f"Image generation request failed: {e}")
|
|
317
|
+
raise
|
|
221
318
|
|
|
222
319
|
def get_app_version_details(self, application_id: int, application_version_id: int) -> dict:
|
|
223
320
|
url = f"{self.application_versions}/{application_id}/{application_version_id}"
|
|
@@ -249,7 +346,8 @@ class AlitaClient:
|
|
|
249
346
|
app_type=None, memory=None, runtime='langchain',
|
|
250
347
|
application_variables: Optional[dict] = None,
|
|
251
348
|
version_details: Optional[dict] = None, store: Optional[BaseStore] = None,
|
|
252
|
-
llm: Optional[ChatOpenAI] = None
|
|
349
|
+
llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None,
|
|
350
|
+
conversation_id: Optional[str] = None):
|
|
253
351
|
if tools is None:
|
|
254
352
|
tools = []
|
|
255
353
|
if chat_history is None:
|
|
@@ -269,11 +367,15 @@ class AlitaClient:
|
|
|
269
367
|
if var['name'] in application_variables:
|
|
270
368
|
var.update(application_variables[var['name']])
|
|
271
369
|
if llm is None:
|
|
370
|
+
max_tokens = data['llm_settings'].get('max_tokens', 4000)
|
|
371
|
+
if max_tokens == -1:
|
|
372
|
+
# default nuber for case when auto is selected for agent
|
|
373
|
+
max_tokens = 4000
|
|
272
374
|
llm = self.get_llm(
|
|
273
375
|
model_name=data['llm_settings']['model_name'],
|
|
274
376
|
model_config={
|
|
275
|
-
"max_tokens":
|
|
276
|
-
"
|
|
377
|
+
"max_tokens": max_tokens,
|
|
378
|
+
"reasoning_effort": data['llm_settings'].get('reasoning_effort'),
|
|
277
379
|
"temperature": data['llm_settings']['temperature'],
|
|
278
380
|
"model_project_id": data['llm_settings'].get('model_project_id'),
|
|
279
381
|
}
|
|
@@ -288,13 +390,18 @@ class AlitaClient:
|
|
|
288
390
|
app_type = "react"
|
|
289
391
|
elif app_type == 'autogen':
|
|
290
392
|
app_type = "react"
|
|
393
|
+
|
|
394
|
+
# LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
|
|
395
|
+
# The exception will propagate naturally to the indexer worker's outer handler
|
|
291
396
|
if runtime == 'nonrunnable':
|
|
292
397
|
return LangChainAssistant(self, data, llm, chat_history, app_type,
|
|
293
|
-
tools=tools, memory=memory, store=store
|
|
398
|
+
tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
|
|
399
|
+
conversation_id=conversation_id)
|
|
294
400
|
if runtime == 'langchain':
|
|
295
401
|
return LangChainAssistant(self, data, llm,
|
|
296
402
|
chat_history, app_type,
|
|
297
|
-
tools=tools, memory=memory, store=store
|
|
403
|
+
tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
|
|
404
|
+
conversation_id=conversation_id).runnable()
|
|
298
405
|
elif runtime == 'llama':
|
|
299
406
|
raise NotImplementedError("LLama runtime is not supported")
|
|
300
407
|
|
|
@@ -362,11 +469,44 @@ class AlitaClient:
|
|
|
362
469
|
return self._process_requst(data)
|
|
363
470
|
|
|
364
471
|
def create_artifact(self, bucket_name, artifact_name, artifact_data):
|
|
472
|
+
# Sanitize filename to prevent regex errors during indexing
|
|
473
|
+
sanitized_name, was_modified = self._sanitize_artifact_name(artifact_name)
|
|
474
|
+
if was_modified:
|
|
475
|
+
logger.warning(f"Artifact filename sanitized: '{artifact_name}' -> '{sanitized_name}'")
|
|
476
|
+
|
|
365
477
|
url = f'{self.artifacts_url}/{bucket_name.lower()}'
|
|
366
478
|
data = requests.post(url, headers=self.headers, files={
|
|
367
|
-
'file': (
|
|
479
|
+
'file': (sanitized_name, artifact_data)
|
|
368
480
|
}, verify=False)
|
|
369
481
|
return self._process_requst(data)
|
|
482
|
+
|
|
483
|
+
@staticmethod
|
|
484
|
+
def _sanitize_artifact_name(filename: str) -> tuple:
|
|
485
|
+
"""Sanitize filename for safe storage and regex pattern matching."""
|
|
486
|
+
import re
|
|
487
|
+
from pathlib import Path
|
|
488
|
+
|
|
489
|
+
if not filename or not filename.strip():
|
|
490
|
+
return "unnamed_file", True
|
|
491
|
+
|
|
492
|
+
original = filename
|
|
493
|
+
path_obj = Path(filename)
|
|
494
|
+
name = path_obj.stem
|
|
495
|
+
extension = path_obj.suffix
|
|
496
|
+
|
|
497
|
+
# Whitelist: alphanumeric, underscore, hyphen, space, Unicode letters/digits
|
|
498
|
+
sanitized_name = re.sub(r'[^\w\s-]', '', name, flags=re.UNICODE)
|
|
499
|
+
sanitized_name = re.sub(r'[-\s]+', '-', sanitized_name)
|
|
500
|
+
sanitized_name = sanitized_name.strip('-').strip()
|
|
501
|
+
|
|
502
|
+
if not sanitized_name:
|
|
503
|
+
sanitized_name = "file"
|
|
504
|
+
|
|
505
|
+
if extension:
|
|
506
|
+
extension = re.sub(r'[^\w.-]', '', extension, flags=re.UNICODE)
|
|
507
|
+
|
|
508
|
+
sanitized = sanitized_name + extension
|
|
509
|
+
return sanitized, (sanitized != original)
|
|
370
510
|
|
|
371
511
|
def download_artifact(self, bucket_name, artifact_name):
|
|
372
512
|
url = f'{self.artifact_url}/{bucket_name.lower()}/{artifact_name}'
|
|
@@ -508,25 +648,29 @@ class AlitaClient:
|
|
|
508
648
|
monitoring_meta = tasknode_task.meta.get("monitoring", {})
|
|
509
649
|
return monitoring_meta["user_id"]
|
|
510
650
|
except Exception as e:
|
|
511
|
-
logger.
|
|
651
|
+
logger.debug(f"Error: Could not determine user ID for MCP tool: {e}")
|
|
512
652
|
return None
|
|
513
653
|
|
|
514
654
|
def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
|
|
515
655
|
tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
|
|
516
656
|
memory=None, runtime='langchain', variables: Optional[list] = None,
|
|
517
|
-
store: Optional[BaseStore] = None
|
|
657
|
+
store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False,
|
|
658
|
+
mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None):
|
|
518
659
|
"""
|
|
519
660
|
Create a predict-type agent with minimal configuration.
|
|
520
661
|
|
|
521
662
|
Args:
|
|
522
663
|
llm: The LLM to use
|
|
523
664
|
instructions: System instructions for the agent
|
|
524
|
-
tools: Optional list of
|
|
665
|
+
tools: Optional list of tool configurations (not tool instances) to provide to the agent.
|
|
666
|
+
Tool configs will be processed through get_tools() to create tool instances.
|
|
667
|
+
Each tool config should have 'type', 'settings', etc.
|
|
525
668
|
chat_history: Optional chat history
|
|
526
669
|
memory: Optional memory/checkpointer
|
|
527
670
|
runtime: Runtime type (default: 'langchain')
|
|
528
671
|
variables: Optional list of variables for the agent
|
|
529
672
|
store: Optional store for memory
|
|
673
|
+
debug_mode: Enable debug mode for cases when assistant can be initialized without tools
|
|
530
674
|
|
|
531
675
|
Returns:
|
|
532
676
|
Runnable agent ready for execution
|
|
@@ -540,17 +684,32 @@ class AlitaClient:
|
|
|
540
684
|
|
|
541
685
|
# Create a minimal data structure for predict agent
|
|
542
686
|
# All LLM settings are taken from the passed client instance
|
|
687
|
+
# Note: 'tools' here are tool CONFIGURATIONS, not tool instances
|
|
688
|
+
# They will be converted to tool instances by LangChainAssistant via get_tools()
|
|
543
689
|
agent_data = {
|
|
544
690
|
'instructions': instructions,
|
|
545
|
-
'tools': tools, #
|
|
691
|
+
'tools': tools, # Tool configs that will be processed by get_tools()
|
|
546
692
|
'variables': variables
|
|
547
693
|
}
|
|
548
|
-
|
|
549
|
-
|
|
694
|
+
|
|
695
|
+
# LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
|
|
696
|
+
# The exception will propagate naturally to the indexer worker's outer handler
|
|
697
|
+
return LangChainAssistant(
|
|
698
|
+
self,
|
|
699
|
+
agent_data,
|
|
700
|
+
llm,
|
|
701
|
+
chat_history,
|
|
702
|
+
"predict",
|
|
703
|
+
memory=memory,
|
|
704
|
+
store=store,
|
|
705
|
+
debug_mode=debug_mode,
|
|
706
|
+
mcp_tokens=mcp_tokens,
|
|
707
|
+
conversation_id=conversation_id
|
|
708
|
+
).runnable()
|
|
550
709
|
|
|
551
710
|
def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
|
|
552
711
|
runtime_config: dict = None, llm_model: str = None,
|
|
553
|
-
llm_config: dict = None) -> dict:
|
|
712
|
+
llm_config: dict = None, mcp_tokens: dict = None) -> dict:
|
|
554
713
|
"""
|
|
555
714
|
Test a single tool from a toolkit with given parameters and runtime callbacks.
|
|
556
715
|
|
|
@@ -569,6 +728,7 @@ class AlitaClient:
|
|
|
569
728
|
- configurable: Additional configuration parameters
|
|
570
729
|
- tags: Tags for the execution
|
|
571
730
|
llm_model: Name of the LLM model to use (default: 'gpt-4o-mini')
|
|
731
|
+
mcp_tokens: Optional dictionary of MCP OAuth tokens by server URL
|
|
572
732
|
llm_config: Configuration for the LLM containing:
|
|
573
733
|
- max_tokens: Maximum tokens for response (default: 1000)
|
|
574
734
|
- temperature: Temperature for response generation (default: 0.1)
|
|
@@ -616,7 +776,6 @@ class AlitaClient:
|
|
|
616
776
|
llm_config = {
|
|
617
777
|
'max_tokens': 1024,
|
|
618
778
|
'temperature': 0.1,
|
|
619
|
-
'top_p': 1.0
|
|
620
779
|
}
|
|
621
780
|
import logging
|
|
622
781
|
logger = logging.getLogger(__name__)
|
|
@@ -687,7 +846,23 @@ class AlitaClient:
|
|
|
687
846
|
}
|
|
688
847
|
|
|
689
848
|
# Instantiate the toolkit with client and LLM support
|
|
690
|
-
|
|
849
|
+
try:
|
|
850
|
+
tools = instantiate_toolkit_with_client(toolkit_config, llm, self, mcp_tokens=mcp_tokens, use_prefix=False)
|
|
851
|
+
except McpAuthorizationRequired:
|
|
852
|
+
# Re-raise McpAuthorizationRequired to allow proper handling upstream
|
|
853
|
+
logger.info(f"McpAuthorizationRequired detected, re-raising")
|
|
854
|
+
raise
|
|
855
|
+
except Exception as toolkit_error:
|
|
856
|
+
# For other errors, return error response
|
|
857
|
+
return {
|
|
858
|
+
"success": False,
|
|
859
|
+
"error": f"Failed to instantiate toolkit '{toolkit_config.get('toolkit_name')}': {str(toolkit_error)}",
|
|
860
|
+
"tool_name": tool_name,
|
|
861
|
+
"toolkit_config": toolkit_config_parsed_json,
|
|
862
|
+
"llm_model": llm_model,
|
|
863
|
+
"events_dispatched": events_dispatched,
|
|
864
|
+
"execution_time_seconds": 0.0
|
|
865
|
+
}
|
|
691
866
|
|
|
692
867
|
if not tools:
|
|
693
868
|
return {
|
|
@@ -763,7 +938,6 @@ class AlitaClient:
|
|
|
763
938
|
if target_tool is None:
|
|
764
939
|
available_tools = []
|
|
765
940
|
base_available_tools = []
|
|
766
|
-
full_available_tools = []
|
|
767
941
|
|
|
768
942
|
for tool in tools:
|
|
769
943
|
tool_name_attr = None
|
|
@@ -780,16 +954,14 @@ class AlitaClient:
|
|
|
780
954
|
if base_name not in base_available_tools:
|
|
781
955
|
base_available_tools.append(base_name)
|
|
782
956
|
|
|
783
|
-
# Track full names separately
|
|
784
|
-
if TOOLKIT_SPLITTER in tool_name_attr:
|
|
785
|
-
full_available_tools.append(tool_name_attr)
|
|
786
|
-
|
|
787
957
|
# Create comprehensive error message
|
|
788
|
-
error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'
|
|
958
|
+
error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'.\n"
|
|
959
|
+
|
|
960
|
+
# Custom error for index tools
|
|
961
|
+
if toolkit_name in [tool.value for tool in IndexTools]:
|
|
962
|
+
error_msg += f" Please make sure proper PGVector configuration and embedding model are set in the platform.\n"
|
|
789
963
|
|
|
790
|
-
if base_available_tools
|
|
791
|
-
error_msg += f" Available tools: {base_available_tools} (base names) or {full_available_tools} (full names)"
|
|
792
|
-
elif base_available_tools:
|
|
964
|
+
if base_available_tools:
|
|
793
965
|
error_msg += f" Available tools: {base_available_tools}"
|
|
794
966
|
elif available_tools:
|
|
795
967
|
error_msg += f" Available tools: {available_tools}"
|
|
@@ -798,10 +970,7 @@ class AlitaClient:
|
|
|
798
970
|
|
|
799
971
|
# Add helpful hint about naming conventions
|
|
800
972
|
if '___' in tool_name:
|
|
801
|
-
error_msg += f" Note:
|
|
802
|
-
elif full_available_tools:
|
|
803
|
-
possible_full_name = create_full_tool_name(tool_name, toolkit_name)
|
|
804
|
-
error_msg += f" Note: You provided a base name '{tool_name}'. The full name might be '{possible_full_name}'."
|
|
973
|
+
error_msg += f" Note: Tool names no longer use '___' prefixes. Try using just the base name '{extract_base_tool_name(tool_name)}'."
|
|
805
974
|
|
|
806
975
|
return {
|
|
807
976
|
"success": False,
|
|
@@ -907,6 +1076,9 @@ class AlitaClient:
|
|
|
907
1076
|
}
|
|
908
1077
|
|
|
909
1078
|
except Exception as e:
|
|
1079
|
+
# Re-raise McpAuthorizationRequired to allow proper handling upstream
|
|
1080
|
+
if isinstance(e, McpAuthorizationRequired):
|
|
1081
|
+
raise
|
|
910
1082
|
logger = logging.getLogger(__name__)
|
|
911
1083
|
logger.error(f"Error in test_toolkit_tool: {str(e)}")
|
|
912
1084
|
return {
|