alita-sdk 0.3.379__py3-none-any.whl → 0.3.627__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/cli/__init__.py +10 -0
- alita_sdk/cli/__main__.py +17 -0
- alita_sdk/cli/agent/__init__.py +5 -0
- alita_sdk/cli/agent/default.py +258 -0
- alita_sdk/cli/agent_executor.py +156 -0
- alita_sdk/cli/agent_loader.py +245 -0
- alita_sdk/cli/agent_ui.py +228 -0
- alita_sdk/cli/agents.py +3113 -0
- alita_sdk/cli/callbacks.py +647 -0
- alita_sdk/cli/cli.py +168 -0
- alita_sdk/cli/config.py +306 -0
- alita_sdk/cli/context/__init__.py +30 -0
- alita_sdk/cli/context/cleanup.py +198 -0
- alita_sdk/cli/context/manager.py +731 -0
- alita_sdk/cli/context/message.py +285 -0
- alita_sdk/cli/context/strategies.py +289 -0
- alita_sdk/cli/context/token_estimation.py +127 -0
- alita_sdk/cli/formatting.py +182 -0
- alita_sdk/cli/input_handler.py +419 -0
- alita_sdk/cli/inventory.py +1073 -0
- alita_sdk/cli/mcp_loader.py +315 -0
- alita_sdk/cli/testcases/__init__.py +94 -0
- alita_sdk/cli/testcases/data_generation.py +119 -0
- alita_sdk/cli/testcases/discovery.py +96 -0
- alita_sdk/cli/testcases/executor.py +84 -0
- alita_sdk/cli/testcases/logger.py +85 -0
- alita_sdk/cli/testcases/parser.py +172 -0
- alita_sdk/cli/testcases/prompts.py +91 -0
- alita_sdk/cli/testcases/reporting.py +125 -0
- alita_sdk/cli/testcases/setup.py +108 -0
- alita_sdk/cli/testcases/test_runner.py +282 -0
- alita_sdk/cli/testcases/utils.py +39 -0
- alita_sdk/cli/testcases/validation.py +90 -0
- alita_sdk/cli/testcases/workflow.py +196 -0
- alita_sdk/cli/toolkit.py +327 -0
- alita_sdk/cli/toolkit_loader.py +85 -0
- alita_sdk/cli/tools/__init__.py +43 -0
- alita_sdk/cli/tools/approval.py +224 -0
- alita_sdk/cli/tools/filesystem.py +1751 -0
- alita_sdk/cli/tools/planning.py +389 -0
- alita_sdk/cli/tools/terminal.py +414 -0
- alita_sdk/community/__init__.py +72 -12
- alita_sdk/community/inventory/__init__.py +236 -0
- alita_sdk/community/inventory/config.py +257 -0
- alita_sdk/community/inventory/enrichment.py +2137 -0
- alita_sdk/community/inventory/extractors.py +1469 -0
- alita_sdk/community/inventory/ingestion.py +3172 -0
- alita_sdk/community/inventory/knowledge_graph.py +1457 -0
- alita_sdk/community/inventory/parsers/__init__.py +218 -0
- alita_sdk/community/inventory/parsers/base.py +295 -0
- alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
- alita_sdk/community/inventory/parsers/go_parser.py +851 -0
- alita_sdk/community/inventory/parsers/html_parser.py +389 -0
- alita_sdk/community/inventory/parsers/java_parser.py +593 -0
- alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
- alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
- alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
- alita_sdk/community/inventory/parsers/python_parser.py +604 -0
- alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
- alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
- alita_sdk/community/inventory/parsers/text_parser.py +322 -0
- alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
- alita_sdk/community/inventory/patterns/__init__.py +61 -0
- alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
- alita_sdk/community/inventory/patterns/loader.py +348 -0
- alita_sdk/community/inventory/patterns/registry.py +198 -0
- alita_sdk/community/inventory/presets.py +535 -0
- alita_sdk/community/inventory/retrieval.py +1403 -0
- alita_sdk/community/inventory/toolkit.py +173 -0
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/community/inventory/visualize.py +1370 -0
- alita_sdk/configurations/__init__.py +1 -1
- alita_sdk/configurations/ado.py +141 -20
- alita_sdk/configurations/bitbucket.py +94 -2
- alita_sdk/configurations/confluence.py +130 -1
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +91 -0
- alita_sdk/configurations/jira.py +103 -0
- alita_sdk/configurations/openapi.py +329 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/configurations/testrail.py +88 -0
- alita_sdk/configurations/xray.py +93 -0
- alita_sdk/configurations/zephyr_enterprise.py +93 -0
- alita_sdk/configurations/zephyr_essential.py +75 -0
- alita_sdk/runtime/clients/artifact.py +3 -3
- alita_sdk/runtime/clients/client.py +388 -46
- alita_sdk/runtime/clients/mcp_discovery.py +342 -0
- alita_sdk/runtime/clients/mcp_manager.py +262 -0
- alita_sdk/runtime/clients/sandbox_client.py +8 -21
- alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
- alita_sdk/runtime/langchain/assistant.py +157 -39
- alita_sdk/runtime/langchain/constants.py +647 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -4
- alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
- alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
- alita_sdk/runtime/langchain/document_loaders/constants.py +40 -19
- alita_sdk/runtime/langchain/langraph_agent.py +405 -84
- alita_sdk/runtime/langchain/utils.py +106 -7
- alita_sdk/runtime/llms/preloaded.py +2 -6
- alita_sdk/runtime/models/mcp_models.py +61 -0
- alita_sdk/runtime/skills/__init__.py +91 -0
- alita_sdk/runtime/skills/callbacks.py +498 -0
- alita_sdk/runtime/skills/discovery.py +540 -0
- alita_sdk/runtime/skills/executor.py +610 -0
- alita_sdk/runtime/skills/input_builder.py +371 -0
- alita_sdk/runtime/skills/models.py +330 -0
- alita_sdk/runtime/skills/registry.py +355 -0
- alita_sdk/runtime/skills/skill_runner.py +330 -0
- alita_sdk/runtime/toolkits/__init__.py +31 -0
- alita_sdk/runtime/toolkits/application.py +29 -10
- alita_sdk/runtime/toolkits/artifact.py +20 -11
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +783 -0
- alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
- alita_sdk/runtime/toolkits/planning.py +178 -0
- alita_sdk/runtime/toolkits/skill_router.py +238 -0
- alita_sdk/runtime/toolkits/subgraph.py +251 -6
- alita_sdk/runtime/toolkits/tools.py +356 -69
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/__init__.py +10 -3
- alita_sdk/runtime/tools/application.py +27 -6
- alita_sdk/runtime/tools/artifact.py +511 -28
- alita_sdk/runtime/tools/data_analysis.py +183 -0
- alita_sdk/runtime/tools/function.py +67 -35
- alita_sdk/runtime/tools/graph.py +10 -4
- alita_sdk/runtime/tools/image_generation.py +148 -46
- alita_sdk/runtime/tools/llm.py +1003 -128
- alita_sdk/runtime/tools/loop.py +3 -1
- alita_sdk/runtime/tools/loop_output.py +3 -1
- alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
- alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
- alita_sdk/runtime/tools/mcp_server_tool.py +8 -5
- alita_sdk/runtime/tools/planning/__init__.py +36 -0
- alita_sdk/runtime/tools/planning/models.py +246 -0
- alita_sdk/runtime/tools/planning/wrapper.py +607 -0
- alita_sdk/runtime/tools/router.py +2 -4
- alita_sdk/runtime/tools/sandbox.py +65 -48
- alita_sdk/runtime/tools/skill_router.py +776 -0
- alita_sdk/runtime/tools/tool.py +3 -1
- alita_sdk/runtime/tools/vectorstore.py +9 -3
- alita_sdk/runtime/tools/vectorstore_base.py +70 -14
- alita_sdk/runtime/utils/AlitaCallback.py +137 -21
- alita_sdk/runtime/utils/constants.py +5 -1
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +361 -0
- alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/serialization.py +155 -0
- alita_sdk/runtime/utils/streamlit.py +40 -13
- alita_sdk/runtime/utils/toolkit_utils.py +30 -9
- alita_sdk/runtime/utils/utils.py +36 -0
- alita_sdk/tools/__init__.py +134 -35
- alita_sdk/tools/ado/repos/__init__.py +51 -32
- alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
- alita_sdk/tools/ado/test_plan/__init__.py +25 -9
- alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
- alita_sdk/tools/ado/utils.py +1 -18
- alita_sdk/tools/ado/wiki/__init__.py +25 -12
- alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
- alita_sdk/tools/ado/work_item/__init__.py +26 -13
- alita_sdk/tools/ado/work_item/ado_wrapper.py +73 -11
- alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +11 -8
- alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +271 -84
- alita_sdk/tools/bitbucket/__init__.py +17 -11
- alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
- alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
- alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
- alita_sdk/tools/chunkers/__init__.py +3 -1
- alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
- alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
- alita_sdk/tools/chunkers/universal_chunker.py +270 -0
- alita_sdk/tools/cloud/aws/__init__.py +10 -7
- alita_sdk/tools/cloud/azure/__init__.py +10 -7
- alita_sdk/tools/cloud/gcp/__init__.py +10 -7
- alita_sdk/tools/cloud/k8s/__init__.py +10 -7
- alita_sdk/tools/code/linter/__init__.py +10 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +11 -8
- alita_sdk/tools/code_indexer_toolkit.py +82 -22
- alita_sdk/tools/confluence/__init__.py +22 -16
- alita_sdk/tools/confluence/api_wrapper.py +107 -30
- alita_sdk/tools/confluence/loader.py +14 -2
- alita_sdk/tools/custom_open_api/__init__.py +12 -5
- alita_sdk/tools/elastic/__init__.py +11 -8
- alita_sdk/tools/elitea_base.py +493 -30
- alita_sdk/tools/figma/__init__.py +58 -11
- alita_sdk/tools/figma/api_wrapper.py +1235 -143
- alita_sdk/tools/figma/figma_client.py +73 -0
- alita_sdk/tools/figma/toon_tools.py +2748 -0
- alita_sdk/tools/github/__init__.py +14 -15
- alita_sdk/tools/github/github_client.py +224 -100
- alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
- alita_sdk/tools/github/schemas.py +14 -5
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/github/tool_prompts.py +9 -22
- alita_sdk/tools/gitlab/__init__.py +16 -11
- alita_sdk/tools/gitlab/api_wrapper.py +218 -48
- alita_sdk/tools/gitlab_org/__init__.py +10 -9
- alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
- alita_sdk/tools/google/bigquery/__init__.py +13 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +11 -8
- alita_sdk/tools/google_places/api_wrapper.py +1 -1
- alita_sdk/tools/jira/__init__.py +17 -10
- alita_sdk/tools/jira/api_wrapper.py +92 -41
- alita_sdk/tools/keycloak/__init__.py +11 -8
- alita_sdk/tools/localgit/__init__.py +9 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +12 -4
- alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
- alita_sdk/tools/ocr/__init__.py +11 -8
- alita_sdk/tools/openapi/__init__.py +491 -106
- alita_sdk/tools/openapi/api_wrapper.py +1368 -0
- alita_sdk/tools/openapi/tool.py +20 -0
- alita_sdk/tools/pandas/__init__.py +20 -12
- alita_sdk/tools/pandas/api_wrapper.py +38 -25
- alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
- alita_sdk/tools/postman/__init__.py +10 -9
- alita_sdk/tools/pptx/__init__.py +11 -10
- alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
- alita_sdk/tools/qtest/__init__.py +31 -11
- alita_sdk/tools/qtest/api_wrapper.py +2135 -86
- alita_sdk/tools/rally/__init__.py +10 -9
- alita_sdk/tools/rally/api_wrapper.py +1 -1
- alita_sdk/tools/report_portal/__init__.py +12 -8
- alita_sdk/tools/salesforce/__init__.py +10 -8
- alita_sdk/tools/servicenow/__init__.py +17 -15
- alita_sdk/tools/servicenow/api_wrapper.py +1 -1
- alita_sdk/tools/sharepoint/__init__.py +10 -7
- alita_sdk/tools/sharepoint/api_wrapper.py +129 -38
- alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
- alita_sdk/tools/sharepoint/utils.py +8 -2
- alita_sdk/tools/slack/__init__.py +10 -7
- alita_sdk/tools/slack/api_wrapper.py +2 -2
- alita_sdk/tools/sql/__init__.py +12 -9
- alita_sdk/tools/testio/__init__.py +10 -7
- alita_sdk/tools/testrail/__init__.py +11 -10
- alita_sdk/tools/testrail/api_wrapper.py +1 -1
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/content_parser.py +103 -18
- alita_sdk/tools/utils/text_operations.py +410 -0
- alita_sdk/tools/utils/tool_prompts.py +79 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +30 -13
- alita_sdk/tools/xray/__init__.py +13 -9
- alita_sdk/tools/yagmail/__init__.py +9 -3
- alita_sdk/tools/zephyr/__init__.py +10 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +11 -7
- alita_sdk/tools/zephyr_essential/__init__.py +10 -7
- alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
- alita_sdk/tools/zephyr_essential/client.py +2 -2
- alita_sdk/tools/zephyr_scale/__init__.py +11 -8
- alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
- alita_sdk/tools/zephyr_squad/__init__.py +10 -7
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +154 -8
- alita_sdk-0.3.627.dist-info/RECORD +468 -0
- alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
- alita_sdk-0.3.379.dist-info/RECORD +0 -360
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.379.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
|
@@ -13,6 +13,7 @@ from langchain_core.messages import (
|
|
|
13
13
|
from langchain_core.tools import ToolException
|
|
14
14
|
from langgraph.store.base import BaseStore
|
|
15
15
|
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
|
|
16
|
+
from langchain_anthropic import ChatAnthropic
|
|
16
17
|
|
|
17
18
|
from ..langchain.assistant import Assistant as LangChainAssistant
|
|
18
19
|
# from ..llamaindex.assistant import Assistant as LLamaAssistant
|
|
@@ -20,8 +21,9 @@ from .prompt import AlitaPrompt
|
|
|
20
21
|
from .datasource import AlitaDataSource
|
|
21
22
|
from .artifact import Artifact
|
|
22
23
|
from ..langchain.chat_message_template import Jinja2TemplatedChatMessagesTemplate
|
|
23
|
-
from ..utils.
|
|
24
|
-
from ...tools import get_available_toolkit_models
|
|
24
|
+
from ..utils.mcp_oauth import McpAuthorizationRequired
|
|
25
|
+
from ...tools import get_available_toolkit_models, instantiate_toolkit
|
|
26
|
+
from ...tools.base_indexer_toolkit import IndexTools
|
|
25
27
|
|
|
26
28
|
logger = logging.getLogger(__name__)
|
|
27
29
|
|
|
@@ -42,6 +44,7 @@ class AlitaClient:
|
|
|
42
44
|
self.base_url = base_url.rstrip('/')
|
|
43
45
|
self.api_path = '/api/v1'
|
|
44
46
|
self.llm_path = '/llm/v1'
|
|
47
|
+
self.allm_path = '/llm'
|
|
45
48
|
self.project_id = project_id
|
|
46
49
|
self.auth_token = auth_token
|
|
47
50
|
self.headers = {
|
|
@@ -68,10 +71,15 @@ class AlitaClient:
|
|
|
68
71
|
self.bucket_url = f"{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}"
|
|
69
72
|
self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
|
|
70
73
|
self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
|
|
74
|
+
self.models_url = f'{self.base_url}{self.api_path}/configurations/models/{self.project_id}?include_shared=true'
|
|
71
75
|
self.image_generation_url = f"{self.base_url}{self.llm_path}/images/generations"
|
|
72
76
|
self.configurations: list = configurations or []
|
|
73
77
|
self.model_timeout = kwargs.get('model_timeout', 120)
|
|
74
78
|
self.model_image_generation = kwargs.get('model_image_generation')
|
|
79
|
+
|
|
80
|
+
# Cache for generated images to avoid token consumption
|
|
81
|
+
# This is used by image_generation and artifact toolkits to pass data via reference
|
|
82
|
+
self._generated_images_cache: Dict[str, Dict[str, Any]] = {}
|
|
75
83
|
|
|
76
84
|
def get_mcp_toolkits(self):
|
|
77
85
|
if user_id := self._get_real_user_id():
|
|
@@ -143,6 +151,19 @@ class AlitaClient:
|
|
|
143
151
|
data = requests.get(url, headers=self.headers, verify=False).json()
|
|
144
152
|
return data
|
|
145
153
|
|
|
154
|
+
def toolkit(self, toolkit_id: int):
|
|
155
|
+
url = f"{self.base_url}{self.api_path}/tool/prompt_lib/{self.project_id}/{toolkit_id}"
|
|
156
|
+
response = requests.get(url, headers=self.headers, verify=False)
|
|
157
|
+
if not response.ok:
|
|
158
|
+
raise ValueError(f"Failed to fetch toolkit {toolkit_id}: {response.text}")
|
|
159
|
+
|
|
160
|
+
tool_data = response.json()
|
|
161
|
+
if 'settings' not in tool_data:
|
|
162
|
+
tool_data['settings'] = {}
|
|
163
|
+
tool_data['settings']['alita'] = self
|
|
164
|
+
|
|
165
|
+
return instantiate_toolkit(tool_data)
|
|
166
|
+
|
|
146
167
|
def get_list_of_apps(self):
|
|
147
168
|
apps = []
|
|
148
169
|
limit = 10
|
|
@@ -175,6 +196,20 @@ class AlitaClient:
|
|
|
175
196
|
return resp.json()
|
|
176
197
|
return []
|
|
177
198
|
|
|
199
|
+
def get_available_models(self):
|
|
200
|
+
"""Get list of available models from the configurations API.
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
List of model dictionaries with 'name' and other properties,
|
|
204
|
+
or empty list if request fails.
|
|
205
|
+
"""
|
|
206
|
+
resp = requests.get(self.models_url, headers=self.headers, verify=False)
|
|
207
|
+
if resp.ok:
|
|
208
|
+
data = resp.json()
|
|
209
|
+
# API returns {"items": [...], ...}
|
|
210
|
+
return data.get('items', [])
|
|
211
|
+
return []
|
|
212
|
+
|
|
178
213
|
def get_embeddings(self, embedding_model: str) -> OpenAIEmbeddings:
|
|
179
214
|
"""
|
|
180
215
|
Get an instance of OpenAIEmbeddings configured with the project ID and auth token.
|
|
@@ -190,35 +225,104 @@ class AlitaClient:
|
|
|
190
225
|
request_timeout=self.model_timeout
|
|
191
226
|
)
|
|
192
227
|
|
|
193
|
-
def get_llm(self, model_name: str, model_config: dict)
|
|
228
|
+
def get_llm(self, model_name: str, model_config: dict):
|
|
194
229
|
"""
|
|
195
|
-
Get a ChatOpenAI model instance based on the model name and configuration.
|
|
230
|
+
Get a ChatOpenAI or ChatAnthropic model instance based on the model name and configuration.
|
|
196
231
|
|
|
197
232
|
Args:
|
|
198
233
|
model_name: Name of the model to retrieve
|
|
199
234
|
model_config: Configuration parameters for the model
|
|
200
235
|
|
|
201
236
|
Returns:
|
|
202
|
-
An instance of ChatOpenAI configured with the provided parameters.
|
|
237
|
+
An instance of ChatOpenAI or ChatAnthropic configured with the provided parameters.
|
|
203
238
|
"""
|
|
204
239
|
if not model_name:
|
|
205
240
|
raise ValueError("Model name must be provided")
|
|
206
241
|
|
|
207
|
-
|
|
242
|
+
# Determine if this is an Anthropic model
|
|
243
|
+
model_name_lower = model_name.lower()
|
|
244
|
+
is_anthropic = "anthropic" in model_name_lower or "claude" in model_name_lower
|
|
208
245
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
246
|
+
logger.info(f"Creating {'ChatAnthropic' if is_anthropic else 'ChatOpenAI'} model: {model_name} with config: {model_config}")
|
|
247
|
+
|
|
248
|
+
try:
|
|
249
|
+
from tools import this # pylint: disable=E0401,C0415
|
|
250
|
+
worker_config = this.for_module("indexer_worker").descriptor.config
|
|
251
|
+
except: # pylint: disable=W0702
|
|
252
|
+
worker_config = {}
|
|
253
|
+
|
|
254
|
+
use_responses_api = False
|
|
255
|
+
|
|
256
|
+
if worker_config and isinstance(worker_config, dict):
|
|
257
|
+
for target_name_tag in worker_config.get("use_responses_api_for", []):
|
|
258
|
+
if target_name_tag in model_name:
|
|
259
|
+
use_responses_api = True
|
|
260
|
+
break
|
|
261
|
+
|
|
262
|
+
# handle case when max_tokens are auto-configurable == -1 or None
|
|
263
|
+
llm_max_tokens = model_config.get("max_tokens", None)
|
|
264
|
+
if llm_max_tokens is None or llm_max_tokens == -1:
|
|
265
|
+
logger.warning(f'User selected `MAX COMPLETION TOKENS` as `auto` or value is None/missing')
|
|
266
|
+
# default number for a case when auto is selected for an agent
|
|
267
|
+
llm_max_tokens = 4000
|
|
268
|
+
|
|
269
|
+
if is_anthropic:
|
|
270
|
+
# ChatAnthropic configuration
|
|
271
|
+
# Anthropic requires max_tokens to be an integer, never None
|
|
272
|
+
target_kwargs = {
|
|
273
|
+
"base_url": f"{self.base_url}{self.allm_path}",
|
|
274
|
+
"model": model_name,
|
|
275
|
+
"api_key": self.auth_token,
|
|
276
|
+
"streaming": model_config.get("streaming", True),
|
|
277
|
+
"max_tokens": llm_max_tokens, # Always an integer now
|
|
278
|
+
"temperature": model_config.get("temperature"),
|
|
279
|
+
"max_retries": model_config.get("max_retries", 3),
|
|
280
|
+
"default_headers": {"openai-organization": str(self.project_id),
|
|
281
|
+
"Authorization": f"Bearer {self.auth_token}"},
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
# TODO": Check on ChatAnthropic client when they get "effort" support back
|
|
285
|
+
if model_config.get("reasoning_effort"):
|
|
286
|
+
if model_config["reasoning_effort"].lower() == "low":
|
|
287
|
+
target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 2048}
|
|
288
|
+
target_kwargs['temperature'] = 1
|
|
289
|
+
target_kwargs["max_tokens"] = 2048 + target_kwargs["max_tokens"]
|
|
290
|
+
elif model_config["reasoning_effort"].lower() == "medium":
|
|
291
|
+
target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 4096}
|
|
292
|
+
target_kwargs['temperature'] = 1
|
|
293
|
+
target_kwargs["max_tokens"] = 4096 + target_kwargs["max_tokens"]
|
|
294
|
+
elif model_config["reasoning_effort"].lower() == "high":
|
|
295
|
+
target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 9092}
|
|
296
|
+
target_kwargs['temperature'] = 1
|
|
297
|
+
target_kwargs["max_tokens"] = 9092 + target_kwargs["max_tokens"]
|
|
298
|
+
|
|
299
|
+
# Add http_client if provided
|
|
300
|
+
if "http_client" in model_config:
|
|
301
|
+
target_kwargs["http_client"] = model_config["http_client"]
|
|
302
|
+
|
|
303
|
+
llm = ChatAnthropic(**target_kwargs)
|
|
304
|
+
else:
|
|
305
|
+
# ChatOpenAI configuration
|
|
306
|
+
target_kwargs = {
|
|
307
|
+
"base_url": f"{self.base_url}{self.llm_path}",
|
|
308
|
+
"model": model_name,
|
|
309
|
+
"api_key": self.auth_token,
|
|
310
|
+
"streaming": model_config.get("streaming", True),
|
|
311
|
+
"stream_usage": model_config.get("stream_usage", True),
|
|
312
|
+
"max_tokens": llm_max_tokens,
|
|
313
|
+
"temperature": model_config.get("temperature"),
|
|
314
|
+
"reasoning_effort": model_config.get("reasoning_effort"),
|
|
315
|
+
"max_retries": model_config.get("max_retries", 3),
|
|
316
|
+
"seed": model_config.get("seed", None),
|
|
317
|
+
"openai_organization": str(self.project_id),
|
|
318
|
+
}
|
|
221
319
|
|
|
320
|
+
if use_responses_api:
|
|
321
|
+
target_kwargs["use_responses_api"] = True
|
|
322
|
+
|
|
323
|
+
llm = ChatOpenAI(**target_kwargs)
|
|
324
|
+
return llm
|
|
325
|
+
|
|
222
326
|
def generate_image(self,
|
|
223
327
|
prompt: str,
|
|
224
328
|
n: int = 1,
|
|
@@ -303,7 +407,9 @@ class AlitaClient:
|
|
|
303
407
|
app_type=None, memory=None, runtime='langchain',
|
|
304
408
|
application_variables: Optional[dict] = None,
|
|
305
409
|
version_details: Optional[dict] = None, store: Optional[BaseStore] = None,
|
|
306
|
-
llm: Optional[ChatOpenAI] = None
|
|
410
|
+
llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None,
|
|
411
|
+
conversation_id: Optional[str] = None, ignored_mcp_servers: Optional[list] = None,
|
|
412
|
+
is_subgraph: bool = False):
|
|
307
413
|
if tools is None:
|
|
308
414
|
tools = []
|
|
309
415
|
if chat_history is None:
|
|
@@ -323,11 +429,15 @@ class AlitaClient:
|
|
|
323
429
|
if var['name'] in application_variables:
|
|
324
430
|
var.update(application_variables[var['name']])
|
|
325
431
|
if llm is None:
|
|
432
|
+
max_tokens = data['llm_settings'].get('max_tokens', 4000)
|
|
433
|
+
if max_tokens == -1:
|
|
434
|
+
# default nuber for case when auto is selected for agent
|
|
435
|
+
max_tokens = 4000
|
|
326
436
|
llm = self.get_llm(
|
|
327
437
|
model_name=data['llm_settings']['model_name'],
|
|
328
438
|
model_config={
|
|
329
|
-
"max_tokens":
|
|
330
|
-
"
|
|
439
|
+
"max_tokens": max_tokens,
|
|
440
|
+
"reasoning_effort": data['llm_settings'].get('reasoning_effort'),
|
|
331
441
|
"temperature": data['llm_settings']['temperature'],
|
|
332
442
|
"model_project_id": data['llm_settings'].get('model_project_id'),
|
|
333
443
|
}
|
|
@@ -342,13 +452,20 @@ class AlitaClient:
|
|
|
342
452
|
app_type = "react"
|
|
343
453
|
elif app_type == 'autogen':
|
|
344
454
|
app_type = "react"
|
|
455
|
+
|
|
456
|
+
# LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
|
|
457
|
+
# The exception will propagate naturally to the indexer worker's outer handler
|
|
345
458
|
if runtime == 'nonrunnable':
|
|
346
459
|
return LangChainAssistant(self, data, llm, chat_history, app_type,
|
|
347
|
-
tools=tools, memory=memory, store=store
|
|
460
|
+
tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
|
|
461
|
+
conversation_id=conversation_id, ignored_mcp_servers=ignored_mcp_servers,
|
|
462
|
+
is_subgraph=is_subgraph)
|
|
348
463
|
if runtime == 'langchain':
|
|
349
464
|
return LangChainAssistant(self, data, llm,
|
|
350
465
|
chat_history, app_type,
|
|
351
|
-
tools=tools, memory=memory, store=store
|
|
466
|
+
tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
|
|
467
|
+
conversation_id=conversation_id, ignored_mcp_servers=ignored_mcp_servers,
|
|
468
|
+
is_subgraph=is_subgraph).runnable()
|
|
352
469
|
elif runtime == 'llama':
|
|
353
470
|
raise NotImplementedError("LLama runtime is not supported")
|
|
354
471
|
|
|
@@ -416,11 +533,44 @@ class AlitaClient:
|
|
|
416
533
|
return self._process_requst(data)
|
|
417
534
|
|
|
418
535
|
def create_artifact(self, bucket_name, artifact_name, artifact_data):
|
|
536
|
+
# Sanitize filename to prevent regex errors during indexing
|
|
537
|
+
sanitized_name, was_modified = self._sanitize_artifact_name(artifact_name)
|
|
538
|
+
if was_modified:
|
|
539
|
+
logger.warning(f"Artifact filename sanitized: '{artifact_name}' -> '{sanitized_name}'")
|
|
540
|
+
|
|
419
541
|
url = f'{self.artifacts_url}/{bucket_name.lower()}'
|
|
420
542
|
data = requests.post(url, headers=self.headers, files={
|
|
421
|
-
'file': (
|
|
543
|
+
'file': (sanitized_name, artifact_data)
|
|
422
544
|
}, verify=False)
|
|
423
545
|
return self._process_requst(data)
|
|
546
|
+
|
|
547
|
+
@staticmethod
|
|
548
|
+
def _sanitize_artifact_name(filename: str) -> tuple:
|
|
549
|
+
"""Sanitize filename for safe storage and regex pattern matching."""
|
|
550
|
+
import re
|
|
551
|
+
from pathlib import Path
|
|
552
|
+
|
|
553
|
+
if not filename or not filename.strip():
|
|
554
|
+
return "unnamed_file", True
|
|
555
|
+
|
|
556
|
+
original = filename
|
|
557
|
+
path_obj = Path(filename)
|
|
558
|
+
name = path_obj.stem
|
|
559
|
+
extension = path_obj.suffix
|
|
560
|
+
|
|
561
|
+
# Whitelist: alphanumeric, underscore, hyphen, space, Unicode letters/digits
|
|
562
|
+
sanitized_name = re.sub(r'[^\w\s-]', '', name, flags=re.UNICODE)
|
|
563
|
+
sanitized_name = re.sub(r'[-\s]+', '-', sanitized_name)
|
|
564
|
+
sanitized_name = sanitized_name.strip('-').strip()
|
|
565
|
+
|
|
566
|
+
if not sanitized_name:
|
|
567
|
+
sanitized_name = "file"
|
|
568
|
+
|
|
569
|
+
if extension:
|
|
570
|
+
extension = re.sub(r'[^\w.-]', '', extension, flags=re.UNICODE)
|
|
571
|
+
|
|
572
|
+
sanitized = sanitized_name + extension
|
|
573
|
+
return sanitized, (sanitized != original)
|
|
424
574
|
|
|
425
575
|
def download_artifact(self, bucket_name, artifact_name):
|
|
426
576
|
url = f'{self.artifact_url}/{bucket_name.lower()}/{artifact_name}'
|
|
@@ -562,25 +712,32 @@ class AlitaClient:
|
|
|
562
712
|
monitoring_meta = tasknode_task.meta.get("monitoring", {})
|
|
563
713
|
return monitoring_meta["user_id"]
|
|
564
714
|
except Exception as e:
|
|
565
|
-
logger.
|
|
715
|
+
logger.debug(f"Error: Could not determine user ID for MCP tool: {e}")
|
|
566
716
|
return None
|
|
567
717
|
|
|
568
718
|
def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
|
|
569
719
|
tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
|
|
570
720
|
memory=None, runtime='langchain', variables: Optional[list] = None,
|
|
571
|
-
store: Optional[BaseStore] = None
|
|
721
|
+
store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False,
|
|
722
|
+
mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None,
|
|
723
|
+
ignored_mcp_servers: Optional[list] = None, persona: Optional[str] = "generic"):
|
|
572
724
|
"""
|
|
573
725
|
Create a predict-type agent with minimal configuration.
|
|
574
726
|
|
|
575
727
|
Args:
|
|
576
728
|
llm: The LLM to use
|
|
577
729
|
instructions: System instructions for the agent
|
|
578
|
-
tools: Optional list of
|
|
730
|
+
tools: Optional list of tool configurations (not tool instances) to provide to the agent.
|
|
731
|
+
Tool configs will be processed through get_tools() to create tool instances.
|
|
732
|
+
Each tool config should have 'type', 'settings', etc.
|
|
579
733
|
chat_history: Optional chat history
|
|
580
734
|
memory: Optional memory/checkpointer
|
|
581
735
|
runtime: Runtime type (default: 'langchain')
|
|
582
736
|
variables: Optional list of variables for the agent
|
|
583
737
|
store: Optional store for memory
|
|
738
|
+
debug_mode: Enable debug mode for cases when assistant can be initialized without tools
|
|
739
|
+
ignored_mcp_servers: Optional list of MCP server URLs to ignore (user chose to continue without auth)
|
|
740
|
+
persona: Default persona for chat: 'generic' or 'qa' (default: 'generic')
|
|
584
741
|
|
|
585
742
|
Returns:
|
|
586
743
|
Runnable agent ready for execution
|
|
@@ -594,17 +751,34 @@ class AlitaClient:
|
|
|
594
751
|
|
|
595
752
|
# Create a minimal data structure for predict agent
|
|
596
753
|
# All LLM settings are taken from the passed client instance
|
|
754
|
+
# Note: 'tools' here are tool CONFIGURATIONS, not tool instances
|
|
755
|
+
# They will be converted to tool instances by LangChainAssistant via get_tools()
|
|
597
756
|
agent_data = {
|
|
598
757
|
'instructions': instructions,
|
|
599
|
-
'tools': tools, #
|
|
758
|
+
'tools': tools, # Tool configs that will be processed by get_tools()
|
|
600
759
|
'variables': variables
|
|
601
760
|
}
|
|
602
|
-
|
|
603
|
-
|
|
761
|
+
|
|
762
|
+
# LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
|
|
763
|
+
# The exception will propagate naturally to the indexer worker's outer handler
|
|
764
|
+
return LangChainAssistant(
|
|
765
|
+
self,
|
|
766
|
+
agent_data,
|
|
767
|
+
llm,
|
|
768
|
+
chat_history,
|
|
769
|
+
"predict",
|
|
770
|
+
memory=memory,
|
|
771
|
+
store=store,
|
|
772
|
+
debug_mode=debug_mode,
|
|
773
|
+
mcp_tokens=mcp_tokens,
|
|
774
|
+
conversation_id=conversation_id,
|
|
775
|
+
ignored_mcp_servers=ignored_mcp_servers,
|
|
776
|
+
persona=persona
|
|
777
|
+
).runnable()
|
|
604
778
|
|
|
605
779
|
def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
|
|
606
780
|
runtime_config: dict = None, llm_model: str = None,
|
|
607
|
-
llm_config: dict = None) -> dict:
|
|
781
|
+
llm_config: dict = None, mcp_tokens: dict = None) -> dict:
|
|
608
782
|
"""
|
|
609
783
|
Test a single tool from a toolkit with given parameters and runtime callbacks.
|
|
610
784
|
|
|
@@ -623,6 +797,7 @@ class AlitaClient:
|
|
|
623
797
|
- configurable: Additional configuration parameters
|
|
624
798
|
- tags: Tags for the execution
|
|
625
799
|
llm_model: Name of the LLM model to use (default: 'gpt-4o-mini')
|
|
800
|
+
mcp_tokens: Optional dictionary of MCP OAuth tokens by server URL
|
|
626
801
|
llm_config: Configuration for the LLM containing:
|
|
627
802
|
- max_tokens: Maximum tokens for response (default: 1000)
|
|
628
803
|
- temperature: Temperature for response generation (default: 0.1)
|
|
@@ -670,7 +845,6 @@ class AlitaClient:
|
|
|
670
845
|
llm_config = {
|
|
671
846
|
'max_tokens': 1024,
|
|
672
847
|
'temperature': 0.1,
|
|
673
|
-
'top_p': 1.0
|
|
674
848
|
}
|
|
675
849
|
import logging
|
|
676
850
|
logger = logging.getLogger(__name__)
|
|
@@ -741,7 +915,23 @@ class AlitaClient:
|
|
|
741
915
|
}
|
|
742
916
|
|
|
743
917
|
# Instantiate the toolkit with client and LLM support
|
|
744
|
-
|
|
918
|
+
try:
|
|
919
|
+
tools = instantiate_toolkit_with_client(toolkit_config, llm, self, mcp_tokens=mcp_tokens, use_prefix=False)
|
|
920
|
+
except McpAuthorizationRequired:
|
|
921
|
+
# Re-raise McpAuthorizationRequired to allow proper handling upstream
|
|
922
|
+
logger.info(f"McpAuthorizationRequired detected, re-raising")
|
|
923
|
+
raise
|
|
924
|
+
except Exception as toolkit_error:
|
|
925
|
+
# For other errors, return error response
|
|
926
|
+
return {
|
|
927
|
+
"success": False,
|
|
928
|
+
"error": f"Failed to instantiate toolkit '{toolkit_config.get('toolkit_name')}': {str(toolkit_error)}",
|
|
929
|
+
"tool_name": tool_name,
|
|
930
|
+
"toolkit_config": toolkit_config_parsed_json,
|
|
931
|
+
"llm_model": llm_model,
|
|
932
|
+
"events_dispatched": events_dispatched,
|
|
933
|
+
"execution_time_seconds": 0.0
|
|
934
|
+
}
|
|
745
935
|
|
|
746
936
|
if not tools:
|
|
747
937
|
return {
|
|
@@ -817,7 +1007,6 @@ class AlitaClient:
|
|
|
817
1007
|
if target_tool is None:
|
|
818
1008
|
available_tools = []
|
|
819
1009
|
base_available_tools = []
|
|
820
|
-
full_available_tools = []
|
|
821
1010
|
|
|
822
1011
|
for tool in tools:
|
|
823
1012
|
tool_name_attr = None
|
|
@@ -834,16 +1023,14 @@ class AlitaClient:
|
|
|
834
1023
|
if base_name not in base_available_tools:
|
|
835
1024
|
base_available_tools.append(base_name)
|
|
836
1025
|
|
|
837
|
-
# Track full names separately
|
|
838
|
-
if TOOLKIT_SPLITTER in tool_name_attr:
|
|
839
|
-
full_available_tools.append(tool_name_attr)
|
|
840
|
-
|
|
841
1026
|
# Create comprehensive error message
|
|
842
|
-
error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'
|
|
1027
|
+
error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'.\n"
|
|
843
1028
|
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
1029
|
+
# Custom error for index tools
|
|
1030
|
+
if toolkit_name in [tool.value for tool in IndexTools]:
|
|
1031
|
+
error_msg += f" Please make sure proper PGVector configuration and embedding model are set in the platform.\n"
|
|
1032
|
+
|
|
1033
|
+
if base_available_tools:
|
|
847
1034
|
error_msg += f" Available tools: {base_available_tools}"
|
|
848
1035
|
elif available_tools:
|
|
849
1036
|
error_msg += f" Available tools: {available_tools}"
|
|
@@ -852,10 +1039,7 @@ class AlitaClient:
|
|
|
852
1039
|
|
|
853
1040
|
# Add helpful hint about naming conventions
|
|
854
1041
|
if '___' in tool_name:
|
|
855
|
-
error_msg += f" Note:
|
|
856
|
-
elif full_available_tools:
|
|
857
|
-
possible_full_name = create_full_tool_name(tool_name, toolkit_name)
|
|
858
|
-
error_msg += f" Note: You provided a base name '{tool_name}'. The full name might be '{possible_full_name}'."
|
|
1042
|
+
error_msg += f" Note: Tool names no longer use '___' prefixes. Try using just the base name '{extract_base_tool_name(tool_name)}'."
|
|
859
1043
|
|
|
860
1044
|
return {
|
|
861
1045
|
"success": False,
|
|
@@ -961,6 +1145,9 @@ class AlitaClient:
|
|
|
961
1145
|
}
|
|
962
1146
|
|
|
963
1147
|
except Exception as e:
|
|
1148
|
+
# Re-raise McpAuthorizationRequired to allow proper handling upstream
|
|
1149
|
+
if isinstance(e, McpAuthorizationRequired):
|
|
1150
|
+
raise
|
|
964
1151
|
logger = logging.getLogger(__name__)
|
|
965
1152
|
logger.error(f"Error in test_toolkit_tool: {str(e)}")
|
|
966
1153
|
return {
|
|
@@ -972,3 +1159,158 @@ class AlitaClient:
|
|
|
972
1159
|
"events_dispatched": [],
|
|
973
1160
|
"execution_time_seconds": 0.0
|
|
974
1161
|
}
|
|
1162
|
+
|
|
1163
|
+
def test_mcp_connection(self, toolkit_config: dict, mcp_tokens: dict = None) -> dict:
|
|
1164
|
+
"""
|
|
1165
|
+
Test MCP server connection using protocol-level list_tools.
|
|
1166
|
+
|
|
1167
|
+
This method verifies MCP server connectivity and authentication by calling
|
|
1168
|
+
the protocol-level tools/list JSON-RPC method (NOT executing a tool).
|
|
1169
|
+
This is ideal for auth checks as it validates the connection without
|
|
1170
|
+
requiring any tool execution.
|
|
1171
|
+
|
|
1172
|
+
Args:
|
|
1173
|
+
toolkit_config: Configuration dictionary for the MCP toolkit containing:
|
|
1174
|
+
- toolkit_name: Name of the toolkit
|
|
1175
|
+
- settings: Dictionary with 'url', optional 'headers', 'session_id'
|
|
1176
|
+
mcp_tokens: Optional dictionary of MCP OAuth tokens by server URL
|
|
1177
|
+
Format: {canonical_url: {access_token: str, session_id: str}}
|
|
1178
|
+
|
|
1179
|
+
Returns:
|
|
1180
|
+
Dictionary containing:
|
|
1181
|
+
- success: Boolean indicating if the connection was successful
|
|
1182
|
+
- tools: List of tool names available on the MCP server (if successful)
|
|
1183
|
+
- tools_count: Number of tools discovered
|
|
1184
|
+
- server_session_id: Session ID provided by the server (if any)
|
|
1185
|
+
- error: Error message (if unsuccessful)
|
|
1186
|
+
- toolkit_config: Original toolkit configuration
|
|
1187
|
+
|
|
1188
|
+
Raises:
|
|
1189
|
+
McpAuthorizationRequired: If MCP server requires OAuth authorization
|
|
1190
|
+
|
|
1191
|
+
Example:
|
|
1192
|
+
>>> config = {
|
|
1193
|
+
... 'toolkit_name': 'my-mcp-server',
|
|
1194
|
+
... 'type': 'mcp',
|
|
1195
|
+
... 'settings': {
|
|
1196
|
+
... 'url': 'https://mcp-server.example.com/mcp',
|
|
1197
|
+
... 'headers': {'X-Custom': 'value'}
|
|
1198
|
+
... }
|
|
1199
|
+
... }
|
|
1200
|
+
>>> result = client.test_mcp_connection(config)
|
|
1201
|
+
>>> if result['success']:
|
|
1202
|
+
... print(f"Connected! Found {result['tools_count']} tools")
|
|
1203
|
+
"""
|
|
1204
|
+
import asyncio
|
|
1205
|
+
import time
|
|
1206
|
+
from ..utils.mcp_client import McpClient
|
|
1207
|
+
from ..utils.mcp_oauth import canonical_resource
|
|
1208
|
+
|
|
1209
|
+
toolkit_name = toolkit_config.get('toolkit_name', 'unknown')
|
|
1210
|
+
settings = toolkit_config.get('settings', {})
|
|
1211
|
+
|
|
1212
|
+
# Extract connection parameters
|
|
1213
|
+
url = settings.get('url')
|
|
1214
|
+
if not url:
|
|
1215
|
+
return {
|
|
1216
|
+
"success": False,
|
|
1217
|
+
"error": "MCP toolkit configuration missing 'url' in settings",
|
|
1218
|
+
"toolkit_config": toolkit_config,
|
|
1219
|
+
"tools": [],
|
|
1220
|
+
"tools_count": 0
|
|
1221
|
+
}
|
|
1222
|
+
|
|
1223
|
+
headers = settings.get('headers') or {}
|
|
1224
|
+
session_id = settings.get('session_id')
|
|
1225
|
+
|
|
1226
|
+
# Apply OAuth token if available
|
|
1227
|
+
if mcp_tokens and url:
|
|
1228
|
+
canonical_url = canonical_resource(url)
|
|
1229
|
+
token_data = mcp_tokens.get(canonical_url)
|
|
1230
|
+
if token_data:
|
|
1231
|
+
if isinstance(token_data, dict):
|
|
1232
|
+
access_token = token_data.get('access_token')
|
|
1233
|
+
if not session_id:
|
|
1234
|
+
session_id = token_data.get('session_id')
|
|
1235
|
+
else:
|
|
1236
|
+
# Backward compatibility: plain token string
|
|
1237
|
+
access_token = token_data
|
|
1238
|
+
|
|
1239
|
+
if access_token:
|
|
1240
|
+
headers = dict(headers) # Copy to avoid mutating original
|
|
1241
|
+
headers.setdefault('Authorization', f'Bearer {access_token}')
|
|
1242
|
+
logger.info(f"[MCP Auth Check] Applied OAuth token for {canonical_url}")
|
|
1243
|
+
|
|
1244
|
+
logger.info(f"Testing MCP connection to '{toolkit_name}' at {url}")
|
|
1245
|
+
|
|
1246
|
+
start_time = time.time()
|
|
1247
|
+
|
|
1248
|
+
async def _test_connection():
|
|
1249
|
+
client = McpClient(
|
|
1250
|
+
url=url,
|
|
1251
|
+
session_id=session_id,
|
|
1252
|
+
headers=headers,
|
|
1253
|
+
timeout=60 # Reasonable timeout for connection test
|
|
1254
|
+
)
|
|
1255
|
+
|
|
1256
|
+
async with client:
|
|
1257
|
+
# Initialize MCP protocol session
|
|
1258
|
+
await client.initialize()
|
|
1259
|
+
logger.info(f"[MCP Auth Check] Session initialized (transport={client.detected_transport})")
|
|
1260
|
+
|
|
1261
|
+
# Call protocol-level list_tools (tools/list JSON-RPC method)
|
|
1262
|
+
tools = await client.list_tools()
|
|
1263
|
+
|
|
1264
|
+
return {
|
|
1265
|
+
"tools": tools,
|
|
1266
|
+
"server_session_id": client.server_session_id,
|
|
1267
|
+
"transport": client.detected_transport
|
|
1268
|
+
}
|
|
1269
|
+
|
|
1270
|
+
try:
|
|
1271
|
+
# Run async operation
|
|
1272
|
+
try:
|
|
1273
|
+
loop = asyncio.get_event_loop()
|
|
1274
|
+
if loop.is_running():
|
|
1275
|
+
# If we're already in an async context, create a new task
|
|
1276
|
+
import concurrent.futures
|
|
1277
|
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
1278
|
+
future = executor.submit(asyncio.run, _test_connection())
|
|
1279
|
+
result = future.result(timeout=120)
|
|
1280
|
+
else:
|
|
1281
|
+
result = loop.run_until_complete(_test_connection())
|
|
1282
|
+
except RuntimeError:
|
|
1283
|
+
# No event loop, create one
|
|
1284
|
+
result = asyncio.run(_test_connection())
|
|
1285
|
+
|
|
1286
|
+
execution_time = time.time() - start_time
|
|
1287
|
+
|
|
1288
|
+
# Extract tool names for the response
|
|
1289
|
+
tool_names = [tool.get('name', 'unknown') for tool in result.get('tools', [])]
|
|
1290
|
+
|
|
1291
|
+
logger.info(f"[MCP Auth Check] Connection successful to '{toolkit_name}': {len(tool_names)} tools in {execution_time:.3f}s")
|
|
1292
|
+
|
|
1293
|
+
return {
|
|
1294
|
+
"success": True,
|
|
1295
|
+
"tools": tool_names,
|
|
1296
|
+
"tools_count": len(tool_names),
|
|
1297
|
+
"server_session_id": result.get('server_session_id'),
|
|
1298
|
+
"transport": result.get('transport'),
|
|
1299
|
+
"toolkit_config": toolkit_config,
|
|
1300
|
+
"execution_time_seconds": execution_time
|
|
1301
|
+
}
|
|
1302
|
+
|
|
1303
|
+
except McpAuthorizationRequired:
|
|
1304
|
+
# Re-raise to allow proper handling upstream
|
|
1305
|
+
raise
|
|
1306
|
+
except Exception as e:
|
|
1307
|
+
execution_time = time.time() - start_time
|
|
1308
|
+
logger.error(f"[MCP Auth Check] Connection failed to '{toolkit_name}': {str(e)}")
|
|
1309
|
+
return {
|
|
1310
|
+
"success": False,
|
|
1311
|
+
"error": f"MCP connection failed: {str(e)}",
|
|
1312
|
+
"toolkit_config": toolkit_config,
|
|
1313
|
+
"tools": [],
|
|
1314
|
+
"tools_count": 0,
|
|
1315
|
+
"execution_time_seconds": execution_time
|
|
1316
|
+
}
|