alita-sdk 0.3.257__py3-none-any.whl → 0.3.584__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (281) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +5 -0
  4. alita_sdk/cli/agent/default.py +258 -0
  5. alita_sdk/cli/agent_executor.py +155 -0
  6. alita_sdk/cli/agent_loader.py +215 -0
  7. alita_sdk/cli/agent_ui.py +228 -0
  8. alita_sdk/cli/agents.py +3794 -0
  9. alita_sdk/cli/callbacks.py +647 -0
  10. alita_sdk/cli/cli.py +168 -0
  11. alita_sdk/cli/config.py +306 -0
  12. alita_sdk/cli/context/__init__.py +30 -0
  13. alita_sdk/cli/context/cleanup.py +198 -0
  14. alita_sdk/cli/context/manager.py +731 -0
  15. alita_sdk/cli/context/message.py +285 -0
  16. alita_sdk/cli/context/strategies.py +289 -0
  17. alita_sdk/cli/context/token_estimation.py +127 -0
  18. alita_sdk/cli/formatting.py +182 -0
  19. alita_sdk/cli/input_handler.py +419 -0
  20. alita_sdk/cli/inventory.py +1073 -0
  21. alita_sdk/cli/mcp_loader.py +315 -0
  22. alita_sdk/cli/toolkit.py +327 -0
  23. alita_sdk/cli/toolkit_loader.py +85 -0
  24. alita_sdk/cli/tools/__init__.py +43 -0
  25. alita_sdk/cli/tools/approval.py +224 -0
  26. alita_sdk/cli/tools/filesystem.py +1751 -0
  27. alita_sdk/cli/tools/planning.py +389 -0
  28. alita_sdk/cli/tools/terminal.py +414 -0
  29. alita_sdk/community/__init__.py +72 -12
  30. alita_sdk/community/inventory/__init__.py +236 -0
  31. alita_sdk/community/inventory/config.py +257 -0
  32. alita_sdk/community/inventory/enrichment.py +2137 -0
  33. alita_sdk/community/inventory/extractors.py +1469 -0
  34. alita_sdk/community/inventory/ingestion.py +3172 -0
  35. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  36. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  37. alita_sdk/community/inventory/parsers/base.py +295 -0
  38. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  39. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  40. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  41. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  42. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  43. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  44. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  45. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  46. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  47. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  48. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  49. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  50. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  51. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  52. alita_sdk/community/inventory/patterns/loader.py +348 -0
  53. alita_sdk/community/inventory/patterns/registry.py +198 -0
  54. alita_sdk/community/inventory/presets.py +535 -0
  55. alita_sdk/community/inventory/retrieval.py +1403 -0
  56. alita_sdk/community/inventory/toolkit.py +173 -0
  57. alita_sdk/community/inventory/toolkit_utils.py +176 -0
  58. alita_sdk/community/inventory/visualize.py +1370 -0
  59. alita_sdk/configurations/__init__.py +11 -0
  60. alita_sdk/configurations/ado.py +148 -2
  61. alita_sdk/configurations/azure_search.py +1 -1
  62. alita_sdk/configurations/bigquery.py +1 -1
  63. alita_sdk/configurations/bitbucket.py +94 -2
  64. alita_sdk/configurations/browser.py +18 -0
  65. alita_sdk/configurations/carrier.py +19 -0
  66. alita_sdk/configurations/confluence.py +130 -1
  67. alita_sdk/configurations/delta_lake.py +1 -1
  68. alita_sdk/configurations/figma.py +76 -5
  69. alita_sdk/configurations/github.py +65 -1
  70. alita_sdk/configurations/gitlab.py +81 -0
  71. alita_sdk/configurations/google_places.py +17 -0
  72. alita_sdk/configurations/jira.py +103 -0
  73. alita_sdk/configurations/openapi.py +323 -0
  74. alita_sdk/configurations/postman.py +1 -1
  75. alita_sdk/configurations/qtest.py +72 -3
  76. alita_sdk/configurations/report_portal.py +115 -0
  77. alita_sdk/configurations/salesforce.py +19 -0
  78. alita_sdk/configurations/service_now.py +1 -12
  79. alita_sdk/configurations/sharepoint.py +167 -0
  80. alita_sdk/configurations/sonar.py +18 -0
  81. alita_sdk/configurations/sql.py +20 -0
  82. alita_sdk/configurations/testio.py +101 -0
  83. alita_sdk/configurations/testrail.py +88 -0
  84. alita_sdk/configurations/xray.py +94 -1
  85. alita_sdk/configurations/zephyr_enterprise.py +94 -1
  86. alita_sdk/configurations/zephyr_essential.py +95 -0
  87. alita_sdk/runtime/clients/artifact.py +21 -4
  88. alita_sdk/runtime/clients/client.py +458 -67
  89. alita_sdk/runtime/clients/mcp_discovery.py +342 -0
  90. alita_sdk/runtime/clients/mcp_manager.py +262 -0
  91. alita_sdk/runtime/clients/sandbox_client.py +352 -0
  92. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  93. alita_sdk/runtime/langchain/assistant.py +183 -43
  94. alita_sdk/runtime/langchain/constants.py +647 -1
  95. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  96. alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +209 -31
  97. alita_sdk/runtime/langchain/document_loaders/AlitaImageLoader.py +1 -1
  98. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
  99. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +10 -3
  100. alita_sdk/runtime/langchain/document_loaders/AlitaMarkdownLoader.py +66 -0
  101. alita_sdk/runtime/langchain/document_loaders/AlitaPDFLoader.py +79 -10
  102. alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +52 -15
  103. alita_sdk/runtime/langchain/document_loaders/AlitaPythonLoader.py +9 -0
  104. alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -4
  105. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +15 -2
  106. alita_sdk/runtime/langchain/document_loaders/ImageParser.py +30 -0
  107. alita_sdk/runtime/langchain/document_loaders/constants.py +189 -41
  108. alita_sdk/runtime/langchain/interfaces/llm_processor.py +4 -2
  109. alita_sdk/runtime/langchain/langraph_agent.py +493 -105
  110. alita_sdk/runtime/langchain/utils.py +118 -8
  111. alita_sdk/runtime/llms/preloaded.py +2 -6
  112. alita_sdk/runtime/models/mcp_models.py +61 -0
  113. alita_sdk/runtime/skills/__init__.py +91 -0
  114. alita_sdk/runtime/skills/callbacks.py +498 -0
  115. alita_sdk/runtime/skills/discovery.py +540 -0
  116. alita_sdk/runtime/skills/executor.py +610 -0
  117. alita_sdk/runtime/skills/input_builder.py +371 -0
  118. alita_sdk/runtime/skills/models.py +330 -0
  119. alita_sdk/runtime/skills/registry.py +355 -0
  120. alita_sdk/runtime/skills/skill_runner.py +330 -0
  121. alita_sdk/runtime/toolkits/__init__.py +28 -0
  122. alita_sdk/runtime/toolkits/application.py +14 -4
  123. alita_sdk/runtime/toolkits/artifact.py +25 -9
  124. alita_sdk/runtime/toolkits/datasource.py +13 -6
  125. alita_sdk/runtime/toolkits/mcp.py +782 -0
  126. alita_sdk/runtime/toolkits/planning.py +178 -0
  127. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  128. alita_sdk/runtime/toolkits/subgraph.py +11 -6
  129. alita_sdk/runtime/toolkits/tools.py +314 -70
  130. alita_sdk/runtime/toolkits/vectorstore.py +11 -5
  131. alita_sdk/runtime/tools/__init__.py +24 -0
  132. alita_sdk/runtime/tools/application.py +16 -4
  133. alita_sdk/runtime/tools/artifact.py +367 -33
  134. alita_sdk/runtime/tools/data_analysis.py +183 -0
  135. alita_sdk/runtime/tools/function.py +100 -4
  136. alita_sdk/runtime/tools/graph.py +81 -0
  137. alita_sdk/runtime/tools/image_generation.py +218 -0
  138. alita_sdk/runtime/tools/llm.py +1032 -177
  139. alita_sdk/runtime/tools/loop.py +3 -1
  140. alita_sdk/runtime/tools/loop_output.py +3 -1
  141. alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
  142. alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
  143. alita_sdk/runtime/tools/mcp_server_tool.py +3 -1
  144. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  145. alita_sdk/runtime/tools/planning/models.py +246 -0
  146. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  147. alita_sdk/runtime/tools/router.py +2 -1
  148. alita_sdk/runtime/tools/sandbox.py +375 -0
  149. alita_sdk/runtime/tools/skill_router.py +776 -0
  150. alita_sdk/runtime/tools/tool.py +3 -1
  151. alita_sdk/runtime/tools/vectorstore.py +69 -65
  152. alita_sdk/runtime/tools/vectorstore_base.py +163 -90
  153. alita_sdk/runtime/utils/AlitaCallback.py +137 -21
  154. alita_sdk/runtime/utils/constants.py +5 -1
  155. alita_sdk/runtime/utils/mcp_client.py +492 -0
  156. alita_sdk/runtime/utils/mcp_oauth.py +361 -0
  157. alita_sdk/runtime/utils/mcp_sse_client.py +434 -0
  158. alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
  159. alita_sdk/runtime/utils/streamlit.py +41 -14
  160. alita_sdk/runtime/utils/toolkit_utils.py +28 -9
  161. alita_sdk/runtime/utils/utils.py +48 -0
  162. alita_sdk/tools/__init__.py +135 -37
  163. alita_sdk/tools/ado/__init__.py +2 -2
  164. alita_sdk/tools/ado/repos/__init__.py +16 -19
  165. alita_sdk/tools/ado/repos/repos_wrapper.py +12 -20
  166. alita_sdk/tools/ado/test_plan/__init__.py +27 -8
  167. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +56 -28
  168. alita_sdk/tools/ado/wiki/__init__.py +28 -12
  169. alita_sdk/tools/ado/wiki/ado_wrapper.py +114 -40
  170. alita_sdk/tools/ado/work_item/__init__.py +28 -12
  171. alita_sdk/tools/ado/work_item/ado_wrapper.py +95 -11
  172. alita_sdk/tools/advanced_jira_mining/__init__.py +13 -8
  173. alita_sdk/tools/aws/delta_lake/__init__.py +15 -11
  174. alita_sdk/tools/aws/delta_lake/tool.py +5 -1
  175. alita_sdk/tools/azure_ai/search/__init__.py +14 -8
  176. alita_sdk/tools/base/tool.py +5 -1
  177. alita_sdk/tools/base_indexer_toolkit.py +454 -110
  178. alita_sdk/tools/bitbucket/__init__.py +28 -19
  179. alita_sdk/tools/bitbucket/api_wrapper.py +285 -27
  180. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +5 -5
  181. alita_sdk/tools/browser/__init__.py +41 -16
  182. alita_sdk/tools/browser/crawler.py +3 -1
  183. alita_sdk/tools/browser/utils.py +15 -6
  184. alita_sdk/tools/carrier/__init__.py +18 -17
  185. alita_sdk/tools/carrier/backend_reports_tool.py +8 -4
  186. alita_sdk/tools/carrier/excel_reporter.py +8 -4
  187. alita_sdk/tools/chunkers/__init__.py +3 -1
  188. alita_sdk/tools/chunkers/code/codeparser.py +1 -1
  189. alita_sdk/tools/chunkers/sematic/json_chunker.py +2 -1
  190. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
  191. alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
  192. alita_sdk/tools/chunkers/universal_chunker.py +270 -0
  193. alita_sdk/tools/cloud/aws/__init__.py +12 -7
  194. alita_sdk/tools/cloud/azure/__init__.py +12 -7
  195. alita_sdk/tools/cloud/gcp/__init__.py +12 -7
  196. alita_sdk/tools/cloud/k8s/__init__.py +12 -7
  197. alita_sdk/tools/code/linter/__init__.py +10 -8
  198. alita_sdk/tools/code/loaders/codesearcher.py +3 -2
  199. alita_sdk/tools/code/sonar/__init__.py +21 -13
  200. alita_sdk/tools/code_indexer_toolkit.py +199 -0
  201. alita_sdk/tools/confluence/__init__.py +22 -14
  202. alita_sdk/tools/confluence/api_wrapper.py +197 -58
  203. alita_sdk/tools/confluence/loader.py +14 -2
  204. alita_sdk/tools/custom_open_api/__init__.py +12 -5
  205. alita_sdk/tools/elastic/__init__.py +11 -8
  206. alita_sdk/tools/elitea_base.py +546 -64
  207. alita_sdk/tools/figma/__init__.py +60 -11
  208. alita_sdk/tools/figma/api_wrapper.py +1400 -167
  209. alita_sdk/tools/figma/figma_client.py +73 -0
  210. alita_sdk/tools/figma/toon_tools.py +2748 -0
  211. alita_sdk/tools/github/__init__.py +18 -17
  212. alita_sdk/tools/github/api_wrapper.py +9 -26
  213. alita_sdk/tools/github/github_client.py +81 -12
  214. alita_sdk/tools/github/schemas.py +2 -1
  215. alita_sdk/tools/github/tool.py +5 -1
  216. alita_sdk/tools/gitlab/__init__.py +19 -13
  217. alita_sdk/tools/gitlab/api_wrapper.py +256 -80
  218. alita_sdk/tools/gitlab_org/__init__.py +14 -10
  219. alita_sdk/tools/google/bigquery/__init__.py +14 -13
  220. alita_sdk/tools/google/bigquery/tool.py +5 -1
  221. alita_sdk/tools/google_places/__init__.py +21 -11
  222. alita_sdk/tools/jira/__init__.py +22 -11
  223. alita_sdk/tools/jira/api_wrapper.py +315 -168
  224. alita_sdk/tools/keycloak/__init__.py +11 -8
  225. alita_sdk/tools/localgit/__init__.py +9 -3
  226. alita_sdk/tools/localgit/local_git.py +62 -54
  227. alita_sdk/tools/localgit/tool.py +5 -1
  228. alita_sdk/tools/memory/__init__.py +38 -14
  229. alita_sdk/tools/non_code_indexer_toolkit.py +7 -2
  230. alita_sdk/tools/ocr/__init__.py +11 -8
  231. alita_sdk/tools/openapi/__init__.py +491 -106
  232. alita_sdk/tools/openapi/api_wrapper.py +1357 -0
  233. alita_sdk/tools/openapi/tool.py +20 -0
  234. alita_sdk/tools/pandas/__init__.py +20 -12
  235. alita_sdk/tools/pandas/api_wrapper.py +40 -45
  236. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  237. alita_sdk/tools/postman/__init__.py +11 -11
  238. alita_sdk/tools/postman/api_wrapper.py +19 -8
  239. alita_sdk/tools/postman/postman_analysis.py +8 -1
  240. alita_sdk/tools/pptx/__init__.py +11 -10
  241. alita_sdk/tools/qtest/__init__.py +22 -14
  242. alita_sdk/tools/qtest/api_wrapper.py +1784 -88
  243. alita_sdk/tools/rally/__init__.py +13 -10
  244. alita_sdk/tools/report_portal/__init__.py +23 -16
  245. alita_sdk/tools/salesforce/__init__.py +22 -16
  246. alita_sdk/tools/servicenow/__init__.py +21 -16
  247. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  248. alita_sdk/tools/sharepoint/__init__.py +17 -14
  249. alita_sdk/tools/sharepoint/api_wrapper.py +179 -39
  250. alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
  251. alita_sdk/tools/sharepoint/utils.py +8 -2
  252. alita_sdk/tools/slack/__init__.py +13 -8
  253. alita_sdk/tools/sql/__init__.py +22 -19
  254. alita_sdk/tools/sql/api_wrapper.py +71 -23
  255. alita_sdk/tools/testio/__init__.py +21 -13
  256. alita_sdk/tools/testrail/__init__.py +13 -11
  257. alita_sdk/tools/testrail/api_wrapper.py +214 -46
  258. alita_sdk/tools/utils/__init__.py +28 -4
  259. alita_sdk/tools/utils/content_parser.py +241 -55
  260. alita_sdk/tools/utils/text_operations.py +254 -0
  261. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +83 -27
  262. alita_sdk/tools/xray/__init__.py +18 -14
  263. alita_sdk/tools/xray/api_wrapper.py +58 -113
  264. alita_sdk/tools/yagmail/__init__.py +9 -3
  265. alita_sdk/tools/zephyr/__init__.py +12 -7
  266. alita_sdk/tools/zephyr_enterprise/__init__.py +16 -9
  267. alita_sdk/tools/zephyr_enterprise/api_wrapper.py +30 -15
  268. alita_sdk/tools/zephyr_essential/__init__.py +16 -10
  269. alita_sdk/tools/zephyr_essential/api_wrapper.py +297 -54
  270. alita_sdk/tools/zephyr_essential/client.py +6 -4
  271. alita_sdk/tools/zephyr_scale/__init__.py +13 -8
  272. alita_sdk/tools/zephyr_scale/api_wrapper.py +39 -31
  273. alita_sdk/tools/zephyr_squad/__init__.py +12 -7
  274. {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/METADATA +184 -37
  275. alita_sdk-0.3.584.dist-info/RECORD +452 -0
  276. alita_sdk-0.3.584.dist-info/entry_points.txt +2 -0
  277. alita_sdk/tools/bitbucket/tools.py +0 -304
  278. alita_sdk-0.3.257.dist-info/RECORD +0 -343
  279. {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/WHEEL +0 -0
  280. {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/licenses/LICENSE +0 -0
  281. {alita_sdk-0.3.257.dist-info → alita_sdk-0.3.584.dist-info}/top_level.txt +0 -0
@@ -13,6 +13,7 @@ from langchain_core.messages import (
13
13
  from langchain_core.tools import ToolException
14
14
  from langgraph.store.base import BaseStore
15
15
  from langchain_openai import OpenAIEmbeddings, ChatOpenAI
16
+ from langchain_anthropic import ChatAnthropic
16
17
 
17
18
  from ..langchain.assistant import Assistant as LangChainAssistant
18
19
  # from ..llamaindex.assistant import Assistant as LLamaAssistant
@@ -20,7 +21,9 @@ from .prompt import AlitaPrompt
20
21
  from .datasource import AlitaDataSource
21
22
  from .artifact import Artifact
22
23
  from ..langchain.chat_message_template import Jinja2TemplatedChatMessagesTemplate
23
- from ...tools import get_available_toolkit_models
24
+ from ..utils.mcp_oauth import McpAuthorizationRequired
25
+ from ...tools import get_available_toolkit_models, instantiate_toolkit
26
+ from ...tools.base_indexer_toolkit import IndexTools
24
27
 
25
28
  logger = logging.getLogger(__name__)
26
29
 
@@ -41,6 +44,7 @@ class AlitaClient:
41
44
  self.base_url = base_url.rstrip('/')
42
45
  self.api_path = '/api/v1'
43
46
  self.llm_path = '/llm/v1'
47
+ self.allm_path = '/llm'
44
48
  self.project_id = project_id
45
49
  self.auth_token = auth_token
46
50
  self.headers = {
@@ -67,7 +71,15 @@ class AlitaClient:
67
71
  self.bucket_url = f"{self.base_url}{self.api_path}/artifacts/buckets/{self.project_id}"
68
72
  self.configurations_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=configurations&unsecret=true'
69
73
  self.ai_section_url = f'{self.base_url}{self.api_path}/integrations/integrations/default/{self.project_id}?section=ai'
74
+ self.models_url = f'{self.base_url}{self.api_path}/configurations/models/{self.project_id}?include_shared=true'
75
+ self.image_generation_url = f"{self.base_url}{self.llm_path}/images/generations"
70
76
  self.configurations: list = configurations or []
77
+ self.model_timeout = kwargs.get('model_timeout', 120)
78
+ self.model_image_generation = kwargs.get('model_image_generation')
79
+
80
+ # Cache for generated images to avoid token consumption
81
+ # This is used by image_generation and artifact toolkits to pass data via reference
82
+ self._generated_images_cache: Dict[str, Dict[str, Any]] = {}
71
83
 
72
84
  def get_mcp_toolkits(self):
73
85
  if user_id := self._get_real_user_id():
@@ -80,8 +92,23 @@ class AlitaClient:
80
92
  def mcp_tool_call(self, params: dict[str, Any]):
81
93
  if user_id := self._get_real_user_id():
82
94
  url = f"{self.mcp_tools_call}/{user_id}"
83
- data = requests.post(url, headers=self.headers, json=params, verify=False).json()
84
- return data
95
+ #
96
+ # This loop iterates over each key-value pair in the arguments dictionary,
97
+ # and if a value is a Pydantic object, it replaces it with its dictionary representation using .dict().
98
+ for arg_name, arg_value in params.get('params', {}).get('arguments', {}).items():
99
+ if isinstance(arg_value, list):
100
+ params['params']['arguments'][arg_name] = [
101
+ item.dict() if hasattr(item, "dict") and callable(item.dict) else item
102
+ for item in arg_value
103
+ ]
104
+ elif hasattr(arg_value, "dict") and callable(arg_value.dict):
105
+ params['params']['arguments'][arg_name] = arg_value.dict()
106
+ #
107
+ response = requests.post(url, headers=self.headers, json=params, verify=False)
108
+ try:
109
+ return response.json()
110
+ except (ValueError, TypeError):
111
+ return response.text
85
112
  else:
86
113
  return f"Error: Could not determine user ID for MCP tool call"
87
114
 
@@ -124,6 +151,19 @@ class AlitaClient:
124
151
  data = requests.get(url, headers=self.headers, verify=False).json()
125
152
  return data
126
153
 
154
+ def toolkit(self, toolkit_id: int):
155
+ url = f"{self.base_url}{self.api_path}/tool/prompt_lib/{self.project_id}/{toolkit_id}"
156
+ response = requests.get(url, headers=self.headers, verify=False)
157
+ if not response.ok:
158
+ raise ValueError(f"Failed to fetch toolkit {toolkit_id}: {response.text}")
159
+
160
+ tool_data = response.json()
161
+ if 'settings' not in tool_data:
162
+ tool_data['settings'] = {}
163
+ tool_data['settings']['alita'] = self
164
+
165
+ return instantiate_toolkit(tool_data)
166
+
127
167
  def get_list_of_apps(self):
128
168
  apps = []
129
169
  limit = 10
@@ -156,6 +196,20 @@ class AlitaClient:
156
196
  return resp.json()
157
197
  return []
158
198
 
199
+ def get_available_models(self):
200
+ """Get list of available models from the configurations API.
201
+
202
+ Returns:
203
+ List of model dictionaries with 'name' and other properties,
204
+ or empty list if request fails.
205
+ """
206
+ resp = requests.get(self.models_url, headers=self.headers, verify=False)
207
+ if resp.ok:
208
+ data = resp.json()
209
+ # API returns {"items": [...], ...}
210
+ return data.get('items', [])
211
+ return []
212
+
159
213
  def get_embeddings(self, embedding_model: str) -> OpenAIEmbeddings:
160
214
  """
161
215
  Get an instance of OpenAIEmbeddings configured with the project ID and auth token.
@@ -168,38 +222,160 @@ class AlitaClient:
168
222
  model=embedding_model,
169
223
  api_key=self.auth_token,
170
224
  openai_organization=str(self.project_id),
225
+ request_timeout=self.model_timeout
171
226
  )
172
227
 
173
- def get_llm(self, model_name: str, model_config: dict) -> ChatOpenAI:
228
+ def get_llm(self, model_name: str, model_config: dict):
174
229
  """
175
- Get a ChatOpenAI model instance based on the model name and configuration.
230
+ Get a ChatOpenAI or ChatAnthropic model instance based on the model name and configuration.
176
231
 
177
232
  Args:
178
233
  model_name: Name of the model to retrieve
179
234
  model_config: Configuration parameters for the model
180
235
 
181
236
  Returns:
182
- An instance of ChatOpenAI configured with the provided parameters.
237
+ An instance of ChatOpenAI or ChatAnthropic configured with the provided parameters.
183
238
  """
184
239
  if not model_name:
185
240
  raise ValueError("Model name must be provided")
186
241
 
187
- logger.info(f"Creating ChatOpenAI model: {model_name} with config: {model_config}")
242
+ # Determine if this is an Anthropic model
243
+ model_name_lower = model_name.lower()
244
+ is_anthropic = "anthropic" in model_name_lower or "claude" in model_name_lower
188
245
 
189
- return ChatOpenAI(
190
- base_url=f"{self.base_url}{self.llm_path}",
191
- model=model_name,
192
- api_key=self.auth_token,
193
- streaming=model_config.get("streaming", True),
194
- stream_usage=model_config.get("stream_usage", True),
195
- max_tokens=model_config.get("max_tokens", None),
196
- top_p=model_config.get("top_p"),
197
- temperature=model_config.get("temperature"),
198
- max_retries=model_config.get("max_retries", 3),
199
- seed=model_config.get("seed", None),
200
- openai_organization=str(self.project_id),
201
- )
246
+ logger.info(f"Creating {'ChatAnthropic' if is_anthropic else 'ChatOpenAI'} model: {model_name} with config: {model_config}")
247
+
248
+ try:
249
+ from tools import this # pylint: disable=E0401,C0415
250
+ worker_config = this.for_module("indexer_worker").descriptor.config
251
+ except: # pylint: disable=W0702
252
+ worker_config = {}
253
+
254
+ use_responses_api = False
255
+
256
+ if worker_config and isinstance(worker_config, dict):
257
+ for target_name_tag in worker_config.get("use_responses_api_for", []):
258
+ if target_name_tag in model_name:
259
+ use_responses_api = True
260
+ break
261
+
262
+ # handle case when max_tokens are auto-configurable == -1 or None
263
+ llm_max_tokens = model_config.get("max_tokens", None)
264
+ if llm_max_tokens is None or llm_max_tokens == -1:
265
+ logger.warning(f'User selected `MAX COMPLETION TOKENS` as `auto` or value is None/missing')
266
+ # default number for a case when auto is selected for an agent
267
+ llm_max_tokens = 4000
268
+
269
+ if is_anthropic:
270
+ # ChatAnthropic configuration
271
+ # Anthropic requires max_tokens to be an integer, never None
272
+ target_kwargs = {
273
+ "base_url": f"{self.base_url}{self.allm_path}",
274
+ "model": model_name,
275
+ "api_key": self.auth_token,
276
+ "streaming": model_config.get("streaming", True),
277
+ "max_tokens": llm_max_tokens, # Always an integer now
278
+ "temperature": model_config.get("temperature"),
279
+ "max_retries": model_config.get("max_retries", 3),
280
+ "default_headers": {"openai-organization": str(self.project_id),
281
+ "Authorization": f"Bearer {self.auth_token}"},
282
+ }
283
+
284
+ # TODO": Check on ChatAnthropic client when they get "effort" support back
285
+ if model_config.get("reasoning_effort"):
286
+ if model_config["reasoning_effort"].lower() == "low":
287
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 2048}
288
+ target_kwargs['temperature'] = 1
289
+ target_kwargs["max_tokens"] = 2048 + target_kwargs["max_tokens"]
290
+ elif model_config["reasoning_effort"].lower() == "medium":
291
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 4096}
292
+ target_kwargs['temperature'] = 1
293
+ target_kwargs["max_tokens"] = 4096 + target_kwargs["max_tokens"]
294
+ elif model_config["reasoning_effort"].lower() == "high":
295
+ target_kwargs['thinking'] = {"type": "enabled", "budget_tokens": 9092}
296
+ target_kwargs['temperature'] = 1
297
+ target_kwargs["max_tokens"] = 9092 + target_kwargs["max_tokens"]
298
+
299
+ # Add http_client if provided
300
+ if "http_client" in model_config:
301
+ target_kwargs["http_client"] = model_config["http_client"]
302
+
303
+ llm = ChatAnthropic(**target_kwargs)
304
+ else:
305
+ # ChatOpenAI configuration
306
+ target_kwargs = {
307
+ "base_url": f"{self.base_url}{self.llm_path}",
308
+ "model": model_name,
309
+ "api_key": self.auth_token,
310
+ "streaming": model_config.get("streaming", True),
311
+ "stream_usage": model_config.get("stream_usage", True),
312
+ "max_tokens": llm_max_tokens,
313
+ "temperature": model_config.get("temperature"),
314
+ "reasoning_effort": model_config.get("reasoning_effort"),
315
+ "max_retries": model_config.get("max_retries", 3),
316
+ "seed": model_config.get("seed", None),
317
+ "openai_organization": str(self.project_id),
318
+ }
319
+
320
+ if use_responses_api:
321
+ target_kwargs["use_responses_api"] = True
322
+
323
+ llm = ChatOpenAI(**target_kwargs)
324
+ return llm
325
+
326
+ def generate_image(self,
327
+ prompt: str,
328
+ n: int = 1,
329
+ size: str = "auto",
330
+ quality: str = "auto",
331
+ response_format: str = "b64_json",
332
+ style: Optional[str] = None) -> dict:
333
+
334
+ if not self.model_image_generation:
335
+ raise ValueError("Image generation model is not configured for this client")
336
+
337
+ image_generation_data = {
338
+ "prompt": prompt,
339
+ "model": self.model_image_generation,
340
+ "n": n,
341
+ "response_format": response_format,
342
+ }
343
+
344
+ # Only add optional parameters if they have meaningful values
345
+ if size and size.lower() != "auto":
346
+ image_generation_data["size"] = size
347
+
348
+ if quality and quality.lower() != "auto":
349
+ image_generation_data["quality"] = quality
202
350
 
351
+ if style:
352
+ image_generation_data["style"] = style
353
+
354
+ # Standard headers for image generation
355
+ image_headers = self.headers.copy()
356
+ image_headers.update({
357
+ "Content-Type": "application/json",
358
+ })
359
+
360
+ logger.info(f"Generating image with model: {self.model_image_generation}, prompt: {prompt[:50]}...")
361
+
362
+ try:
363
+ response = requests.post(
364
+ self.image_generation_url,
365
+ headers=image_headers,
366
+ json=image_generation_data,
367
+ verify=False,
368
+ timeout=self.model_timeout
369
+ )
370
+ response.raise_for_status()
371
+ return response.json()
372
+
373
+ except requests.exceptions.HTTPError as e:
374
+ logger.error(f"Image generation failed: {e.response.status_code} - {e.response.text}")
375
+ raise
376
+ except requests.exceptions.RequestException as e:
377
+ logger.error(f"Image generation request failed: {e}")
378
+ raise
203
379
 
204
380
  def get_app_version_details(self, application_id: int, application_version_id: int) -> dict:
205
381
  url = f"{self.application_versions}/{application_id}/{application_version_id}"
@@ -231,7 +407,8 @@ class AlitaClient:
231
407
  app_type=None, memory=None, runtime='langchain',
232
408
  application_variables: Optional[dict] = None,
233
409
  version_details: Optional[dict] = None, store: Optional[BaseStore] = None,
234
- llm: Optional[ChatOpenAI] = None):
410
+ llm: Optional[ChatOpenAI] = None, mcp_tokens: Optional[dict] = None,
411
+ conversation_id: Optional[str] = None, ignored_mcp_servers: Optional[list] = None):
235
412
  if tools is None:
236
413
  tools = []
237
414
  if chat_history is None:
@@ -251,11 +428,15 @@ class AlitaClient:
251
428
  if var['name'] in application_variables:
252
429
  var.update(application_variables[var['name']])
253
430
  if llm is None:
431
+ max_tokens = data['llm_settings'].get('max_tokens', 4000)
432
+ if max_tokens == -1:
433
+ # default nuber for case when auto is selected for agent
434
+ max_tokens = 4000
254
435
  llm = self.get_llm(
255
436
  model_name=data['llm_settings']['model_name'],
256
437
  model_config={
257
- "max_tokens": data['llm_settings']['max_tokens'],
258
- "top_p": data['llm_settings']['top_p'],
438
+ "max_tokens": max_tokens,
439
+ "reasoning_effort": data['llm_settings'].get('reasoning_effort'),
259
440
  "temperature": data['llm_settings']['temperature'],
260
441
  "model_project_id": data['llm_settings'].get('model_project_id'),
261
442
  }
@@ -270,13 +451,18 @@ class AlitaClient:
270
451
  app_type = "react"
271
452
  elif app_type == 'autogen':
272
453
  app_type = "react"
454
+
455
+ # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
456
+ # The exception will propagate naturally to the indexer worker's outer handler
273
457
  if runtime == 'nonrunnable':
274
458
  return LangChainAssistant(self, data, llm, chat_history, app_type,
275
- tools=tools, memory=memory, store=store)
459
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
460
+ conversation_id=conversation_id, ignored_mcp_servers=ignored_mcp_servers)
276
461
  if runtime == 'langchain':
277
462
  return LangChainAssistant(self, data, llm,
278
463
  chat_history, app_type,
279
- tools=tools, memory=memory, store=store).runnable()
464
+ tools=tools, memory=memory, store=store, mcp_tokens=mcp_tokens,
465
+ conversation_id=conversation_id, ignored_mcp_servers=ignored_mcp_servers).runnable()
280
466
  elif runtime == 'llama':
281
467
  raise NotImplementedError("LLama runtime is not supported")
282
468
 
@@ -338,16 +524,50 @@ class AlitaClient:
338
524
  return self._process_requst(resp)
339
525
 
340
526
  def list_artifacts(self, bucket_name: str):
341
- url = f'{self.artifacts_url}/{bucket_name}'
527
+ # Ensure bucket name is lowercase as required by the API
528
+ url = f'{self.artifacts_url}/{bucket_name.lower()}'
342
529
  data = requests.get(url, headers=self.headers, verify=False)
343
530
  return self._process_requst(data)
344
531
 
345
532
  def create_artifact(self, bucket_name, artifact_name, artifact_data):
533
+ # Sanitize filename to prevent regex errors during indexing
534
+ sanitized_name, was_modified = self._sanitize_artifact_name(artifact_name)
535
+ if was_modified:
536
+ logger.warning(f"Artifact filename sanitized: '{artifact_name}' -> '{sanitized_name}'")
537
+
346
538
  url = f'{self.artifacts_url}/{bucket_name.lower()}'
347
539
  data = requests.post(url, headers=self.headers, files={
348
- 'file': (artifact_name, artifact_data)
540
+ 'file': (sanitized_name, artifact_data)
349
541
  }, verify=False)
350
542
  return self._process_requst(data)
543
+
544
+ @staticmethod
545
+ def _sanitize_artifact_name(filename: str) -> tuple:
546
+ """Sanitize filename for safe storage and regex pattern matching."""
547
+ import re
548
+ from pathlib import Path
549
+
550
+ if not filename or not filename.strip():
551
+ return "unnamed_file", True
552
+
553
+ original = filename
554
+ path_obj = Path(filename)
555
+ name = path_obj.stem
556
+ extension = path_obj.suffix
557
+
558
+ # Whitelist: alphanumeric, underscore, hyphen, space, Unicode letters/digits
559
+ sanitized_name = re.sub(r'[^\w\s-]', '', name, flags=re.UNICODE)
560
+ sanitized_name = re.sub(r'[-\s]+', '-', sanitized_name)
561
+ sanitized_name = sanitized_name.strip('-').strip()
562
+
563
+ if not sanitized_name:
564
+ sanitized_name = "file"
565
+
566
+ if extension:
567
+ extension = re.sub(r'[^\w.-]', '', extension, flags=re.UNICODE)
568
+
569
+ sanitized = sanitized_name + extension
570
+ return sanitized, (sanitized != original)
351
571
 
352
572
  def download_artifact(self, bucket_name, artifact_name):
353
573
  url = f'{self.artifact_url}/{bucket_name.lower()}/{artifact_name}'
@@ -489,25 +709,32 @@ class AlitaClient:
489
709
  monitoring_meta = tasknode_task.meta.get("monitoring", {})
490
710
  return monitoring_meta["user_id"]
491
711
  except Exception as e:
492
- logger.warning(f"Error: Could not determine user ID for MCP tool: {e}")
712
+ logger.debug(f"Error: Could not determine user ID for MCP tool: {e}")
493
713
  return None
494
714
 
495
715
  def predict_agent(self, llm: ChatOpenAI, instructions: str = "You are a helpful assistant.",
496
716
  tools: Optional[list] = None, chat_history: Optional[List[Any]] = None,
497
717
  memory=None, runtime='langchain', variables: Optional[list] = None,
498
- store: Optional[BaseStore] = None):
718
+ store: Optional[BaseStore] = None, debug_mode: Optional[bool] = False,
719
+ mcp_tokens: Optional[dict] = None, conversation_id: Optional[str] = None,
720
+ ignored_mcp_servers: Optional[list] = None, persona: Optional[str] = "generic"):
499
721
  """
500
722
  Create a predict-type agent with minimal configuration.
501
723
 
502
724
  Args:
503
725
  llm: The LLM to use
504
726
  instructions: System instructions for the agent
505
- tools: Optional list of tools to provide to the agent
727
+ tools: Optional list of tool configurations (not tool instances) to provide to the agent.
728
+ Tool configs will be processed through get_tools() to create tool instances.
729
+ Each tool config should have 'type', 'settings', etc.
506
730
  chat_history: Optional chat history
507
731
  memory: Optional memory/checkpointer
508
732
  runtime: Runtime type (default: 'langchain')
509
733
  variables: Optional list of variables for the agent
510
734
  store: Optional store for memory
735
+ debug_mode: Enable debug mode for cases when assistant can be initialized without tools
736
+ ignored_mcp_servers: Optional list of MCP server URLs to ignore (user chose to continue without auth)
737
+ persona: Default persona for chat: 'generic' or 'qa' (default: 'generic')
511
738
 
512
739
  Returns:
513
740
  Runnable agent ready for execution
@@ -521,17 +748,34 @@ class AlitaClient:
521
748
 
522
749
  # Create a minimal data structure for predict agent
523
750
  # All LLM settings are taken from the passed client instance
751
+ # Note: 'tools' here are tool CONFIGURATIONS, not tool instances
752
+ # They will be converted to tool instances by LangChainAssistant via get_tools()
524
753
  agent_data = {
525
754
  'instructions': instructions,
526
- 'tools': tools, # Tools are handled separately in predict agents
755
+ 'tools': tools, # Tool configs that will be processed by get_tools()
527
756
  'variables': variables
528
757
  }
529
- return LangChainAssistant(self, agent_data, llm,
530
- chat_history, "predict", memory=memory, store=store).runnable()
758
+
759
+ # LangChainAssistant constructor calls get_tools() which may raise McpAuthorizationRequired
760
+ # The exception will propagate naturally to the indexer worker's outer handler
761
+ return LangChainAssistant(
762
+ self,
763
+ agent_data,
764
+ llm,
765
+ chat_history,
766
+ "predict",
767
+ memory=memory,
768
+ store=store,
769
+ debug_mode=debug_mode,
770
+ mcp_tokens=mcp_tokens,
771
+ conversation_id=conversation_id,
772
+ ignored_mcp_servers=ignored_mcp_servers,
773
+ persona=persona
774
+ ).runnable()
531
775
 
532
776
  def test_toolkit_tool(self, toolkit_config: dict, tool_name: str, tool_params: dict = None,
533
777
  runtime_config: dict = None, llm_model: str = None,
534
- llm_config: dict = None) -> dict:
778
+ llm_config: dict = None, mcp_tokens: dict = None) -> dict:
535
779
  """
536
780
  Test a single tool from a toolkit with given parameters and runtime callbacks.
537
781
 
@@ -550,6 +794,7 @@ class AlitaClient:
550
794
  - configurable: Additional configuration parameters
551
795
  - tags: Tags for the execution
552
796
  llm_model: Name of the LLM model to use (default: 'gpt-4o-mini')
797
+ mcp_tokens: Optional dictionary of MCP OAuth tokens by server URL
553
798
  llm_config: Configuration for the LLM containing:
554
799
  - max_tokens: Maximum tokens for response (default: 1000)
555
800
  - temperature: Temperature for response generation (default: 0.1)
@@ -597,7 +842,6 @@ class AlitaClient:
597
842
  llm_config = {
598
843
  'max_tokens': 1024,
599
844
  'temperature': 0.1,
600
- 'top_p': 1.0
601
845
  }
602
846
  import logging
603
847
  logger = logging.getLogger(__name__)
@@ -668,7 +912,23 @@ class AlitaClient:
668
912
  }
669
913
 
670
914
  # Instantiate the toolkit with client and LLM support
671
- tools = instantiate_toolkit_with_client(toolkit_config, llm, self)
915
+ try:
916
+ tools = instantiate_toolkit_with_client(toolkit_config, llm, self, mcp_tokens=mcp_tokens, use_prefix=False)
917
+ except McpAuthorizationRequired:
918
+ # Re-raise McpAuthorizationRequired to allow proper handling upstream
919
+ logger.info(f"McpAuthorizationRequired detected, re-raising")
920
+ raise
921
+ except Exception as toolkit_error:
922
+ # For other errors, return error response
923
+ return {
924
+ "success": False,
925
+ "error": f"Failed to instantiate toolkit '{toolkit_config.get('toolkit_name')}': {str(toolkit_error)}",
926
+ "tool_name": tool_name,
927
+ "toolkit_config": toolkit_config_parsed_json,
928
+ "llm_model": llm_model,
929
+ "events_dispatched": events_dispatched,
930
+ "execution_time_seconds": 0.0
931
+ }
672
932
 
673
933
  if not tools:
674
934
  return {
@@ -744,7 +1004,6 @@ class AlitaClient:
744
1004
  if target_tool is None:
745
1005
  available_tools = []
746
1006
  base_available_tools = []
747
- full_available_tools = []
748
1007
 
749
1008
  for tool in tools:
750
1009
  tool_name_attr = None
@@ -761,16 +1020,14 @@ class AlitaClient:
761
1020
  if base_name not in base_available_tools:
762
1021
  base_available_tools.append(base_name)
763
1022
 
764
- # Track full names separately
765
- if '___' in tool_name_attr:
766
- full_available_tools.append(tool_name_attr)
767
-
768
1023
  # Create comprehensive error message
769
- error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'."
1024
+ error_msg = f"Tool '{tool_name}' not found in toolkit '{toolkit_config.get('toolkit_name')}'.\n"
770
1025
 
771
- if base_available_tools and full_available_tools:
772
- error_msg += f" Available tools: {base_available_tools} (base names) or {full_available_tools} (full names)"
773
- elif base_available_tools:
1026
+ # Custom error for index tools
1027
+ if toolkit_name in [tool.value for tool in IndexTools]:
1028
+ error_msg += f" Please make sure proper PGVector configuration and embedding model are set in the platform.\n"
1029
+
1030
+ if base_available_tools:
774
1031
  error_msg += f" Available tools: {base_available_tools}"
775
1032
  elif available_tools:
776
1033
  error_msg += f" Available tools: {available_tools}"
@@ -779,10 +1036,7 @@ class AlitaClient:
779
1036
 
780
1037
  # Add helpful hint about naming conventions
781
1038
  if '___' in tool_name:
782
- error_msg += f" Note: You provided a full name '{tool_name}'. Try using just the base name '{extract_base_tool_name(tool_name)}'."
783
- elif full_available_tools:
784
- possible_full_name = create_full_tool_name(tool_name, toolkit_name)
785
- error_msg += f" Note: You provided a base name '{tool_name}'. The full name might be '{possible_full_name}'."
1039
+ error_msg += f" Note: Tool names no longer use '___' prefixes. Try using just the base name '{extract_base_tool_name(tool_name)}'."
786
1040
 
787
1041
  return {
788
1042
  "success": False,
@@ -888,6 +1142,9 @@ class AlitaClient:
888
1142
  }
889
1143
 
890
1144
  except Exception as e:
1145
+ # Re-raise McpAuthorizationRequired to allow proper handling upstream
1146
+ if isinstance(e, McpAuthorizationRequired):
1147
+ raise
891
1148
  logger = logging.getLogger(__name__)
892
1149
  logger.error(f"Error in test_toolkit_tool: {str(e)}")
893
1150
  return {
@@ -900,23 +1157,157 @@ class AlitaClient:
900
1157
  "execution_time_seconds": 0.0
901
1158
  }
902
1159
 
903
- def _get_real_user_id(self) -> str:
904
- """Extract the real user ID from the auth token for MCP tool calls."""
1160
+ def test_mcp_connection(self, toolkit_config: dict, mcp_tokens: dict = None) -> dict:
1161
+ """
1162
+ Test MCP server connection using protocol-level list_tools.
1163
+
1164
+ This method verifies MCP server connectivity and authentication by calling
1165
+ the protocol-level tools/list JSON-RPC method (NOT executing a tool).
1166
+ This is ideal for auth checks as it validates the connection without
1167
+ requiring any tool execution.
1168
+
1169
+ Args:
1170
+ toolkit_config: Configuration dictionary for the MCP toolkit containing:
1171
+ - toolkit_name: Name of the toolkit
1172
+ - settings: Dictionary with 'url', optional 'headers', 'session_id'
1173
+ mcp_tokens: Optional dictionary of MCP OAuth tokens by server URL
1174
+ Format: {canonical_url: {access_token: str, session_id: str}}
1175
+
1176
+ Returns:
1177
+ Dictionary containing:
1178
+ - success: Boolean indicating if the connection was successful
1179
+ - tools: List of tool names available on the MCP server (if successful)
1180
+ - tools_count: Number of tools discovered
1181
+ - server_session_id: Session ID provided by the server (if any)
1182
+ - error: Error message (if unsuccessful)
1183
+ - toolkit_config: Original toolkit configuration
1184
+
1185
+ Raises:
1186
+ McpAuthorizationRequired: If MCP server requires OAuth authorization
1187
+
1188
+ Example:
1189
+ >>> config = {
1190
+ ... 'toolkit_name': 'my-mcp-server',
1191
+ ... 'type': 'mcp',
1192
+ ... 'settings': {
1193
+ ... 'url': 'https://mcp-server.example.com/mcp',
1194
+ ... 'headers': {'X-Custom': 'value'}
1195
+ ... }
1196
+ ... }
1197
+ >>> result = client.test_mcp_connection(config)
1198
+ >>> if result['success']:
1199
+ ... print(f"Connected! Found {result['tools_count']} tools")
1200
+ """
1201
+ import asyncio
1202
+ import time
1203
+ from ..utils.mcp_client import McpClient
1204
+ from ..utils.mcp_oauth import canonical_resource
1205
+
1206
+ toolkit_name = toolkit_config.get('toolkit_name', 'unknown')
1207
+ settings = toolkit_config.get('settings', {})
1208
+
1209
+ # Extract connection parameters
1210
+ url = settings.get('url')
1211
+ if not url:
1212
+ return {
1213
+ "success": False,
1214
+ "error": "MCP toolkit configuration missing 'url' in settings",
1215
+ "toolkit_config": toolkit_config,
1216
+ "tools": [],
1217
+ "tools_count": 0
1218
+ }
1219
+
1220
+ headers = settings.get('headers') or {}
1221
+ session_id = settings.get('session_id')
1222
+
1223
+ # Apply OAuth token if available
1224
+ if mcp_tokens and url:
1225
+ canonical_url = canonical_resource(url)
1226
+ token_data = mcp_tokens.get(canonical_url)
1227
+ if token_data:
1228
+ if isinstance(token_data, dict):
1229
+ access_token = token_data.get('access_token')
1230
+ if not session_id:
1231
+ session_id = token_data.get('session_id')
1232
+ else:
1233
+ # Backward compatibility: plain token string
1234
+ access_token = token_data
1235
+
1236
+ if access_token:
1237
+ headers = dict(headers) # Copy to avoid mutating original
1238
+ headers.setdefault('Authorization', f'Bearer {access_token}')
1239
+ logger.info(f"[MCP Auth Check] Applied OAuth token for {canonical_url}")
1240
+
1241
+ logger.info(f"Testing MCP connection to '{toolkit_name}' at {url}")
1242
+
1243
+ start_time = time.time()
1244
+
1245
+ async def _test_connection():
1246
+ client = McpClient(
1247
+ url=url,
1248
+ session_id=session_id,
1249
+ headers=headers,
1250
+ timeout=60 # Reasonable timeout for connection test
1251
+ )
1252
+
1253
+ async with client:
1254
+ # Initialize MCP protocol session
1255
+ await client.initialize()
1256
+ logger.info(f"[MCP Auth Check] Session initialized (transport={client.detected_transport})")
1257
+
1258
+ # Call protocol-level list_tools (tools/list JSON-RPC method)
1259
+ tools = await client.list_tools()
1260
+
1261
+ return {
1262
+ "tools": tools,
1263
+ "server_session_id": client.server_session_id,
1264
+ "transport": client.detected_transport
1265
+ }
1266
+
905
1267
  try:
906
- import base64
907
- import json
908
- # Assuming JWT token, extract user ID from payload
909
- # This is a basic implementation - adjust based on your token format
910
- token_parts = self.auth_token.split('.')
911
- if len(token_parts) >= 2:
912
- payload_part = token_parts[1]
913
- # Add padding if needed
914
- padding = len(payload_part) % 4
915
- if padding:
916
- payload_part += '=' * (4 - padding)
917
- payload = json.loads(base64.b64decode(payload_part))
918
- return payload.get('user_id') or payload.get('sub') or payload.get('uid')
919
- except Exception as e:
920
- logger.error(f"Error extracting user ID from token: {e}")
921
- return None
1268
+ # Run async operation
1269
+ try:
1270
+ loop = asyncio.get_event_loop()
1271
+ if loop.is_running():
1272
+ # If we're already in an async context, create a new task
1273
+ import concurrent.futures
1274
+ with concurrent.futures.ThreadPoolExecutor() as executor:
1275
+ future = executor.submit(asyncio.run, _test_connection())
1276
+ result = future.result(timeout=120)
1277
+ else:
1278
+ result = loop.run_until_complete(_test_connection())
1279
+ except RuntimeError:
1280
+ # No event loop, create one
1281
+ result = asyncio.run(_test_connection())
922
1282
 
1283
+ execution_time = time.time() - start_time
1284
+
1285
+ # Extract tool names for the response
1286
+ tool_names = [tool.get('name', 'unknown') for tool in result.get('tools', [])]
1287
+
1288
+ logger.info(f"[MCP Auth Check] Connection successful to '{toolkit_name}': {len(tool_names)} tools in {execution_time:.3f}s")
1289
+
1290
+ return {
1291
+ "success": True,
1292
+ "tools": tool_names,
1293
+ "tools_count": len(tool_names),
1294
+ "server_session_id": result.get('server_session_id'),
1295
+ "transport": result.get('transport'),
1296
+ "toolkit_config": toolkit_config,
1297
+ "execution_time_seconds": execution_time
1298
+ }
1299
+
1300
+ except McpAuthorizationRequired:
1301
+ # Re-raise to allow proper handling upstream
1302
+ raise
1303
+ except Exception as e:
1304
+ execution_time = time.time() - start_time
1305
+ logger.error(f"[MCP Auth Check] Connection failed to '{toolkit_name}': {str(e)}")
1306
+ return {
1307
+ "success": False,
1308
+ "error": f"MCP connection failed: {str(e)}",
1309
+ "toolkit_config": toolkit_config,
1310
+ "tools": [],
1311
+ "tools_count": 0,
1312
+ "execution_time_seconds": execution_time
1313
+ }