alita-sdk 0.3.351__py3-none-any.whl → 0.3.499__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (206) hide show
  1. alita_sdk/cli/__init__.py +10 -0
  2. alita_sdk/cli/__main__.py +17 -0
  3. alita_sdk/cli/agent/__init__.py +5 -0
  4. alita_sdk/cli/agent/default.py +258 -0
  5. alita_sdk/cli/agent_executor.py +155 -0
  6. alita_sdk/cli/agent_loader.py +215 -0
  7. alita_sdk/cli/agent_ui.py +228 -0
  8. alita_sdk/cli/agents.py +3601 -0
  9. alita_sdk/cli/callbacks.py +647 -0
  10. alita_sdk/cli/cli.py +168 -0
  11. alita_sdk/cli/config.py +306 -0
  12. alita_sdk/cli/context/__init__.py +30 -0
  13. alita_sdk/cli/context/cleanup.py +198 -0
  14. alita_sdk/cli/context/manager.py +731 -0
  15. alita_sdk/cli/context/message.py +285 -0
  16. alita_sdk/cli/context/strategies.py +289 -0
  17. alita_sdk/cli/context/token_estimation.py +127 -0
  18. alita_sdk/cli/formatting.py +182 -0
  19. alita_sdk/cli/input_handler.py +419 -0
  20. alita_sdk/cli/inventory.py +1256 -0
  21. alita_sdk/cli/mcp_loader.py +315 -0
  22. alita_sdk/cli/toolkit.py +327 -0
  23. alita_sdk/cli/toolkit_loader.py +85 -0
  24. alita_sdk/cli/tools/__init__.py +43 -0
  25. alita_sdk/cli/tools/approval.py +224 -0
  26. alita_sdk/cli/tools/filesystem.py +1751 -0
  27. alita_sdk/cli/tools/planning.py +389 -0
  28. alita_sdk/cli/tools/terminal.py +414 -0
  29. alita_sdk/community/__init__.py +64 -8
  30. alita_sdk/community/inventory/__init__.py +224 -0
  31. alita_sdk/community/inventory/config.py +257 -0
  32. alita_sdk/community/inventory/enrichment.py +2137 -0
  33. alita_sdk/community/inventory/extractors.py +1469 -0
  34. alita_sdk/community/inventory/ingestion.py +3172 -0
  35. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  36. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  37. alita_sdk/community/inventory/parsers/base.py +295 -0
  38. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  39. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  40. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  41. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  42. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  43. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  44. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  45. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  46. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  47. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  48. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  49. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  50. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  51. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  52. alita_sdk/community/inventory/patterns/loader.py +348 -0
  53. alita_sdk/community/inventory/patterns/registry.py +198 -0
  54. alita_sdk/community/inventory/presets.py +535 -0
  55. alita_sdk/community/inventory/retrieval.py +1403 -0
  56. alita_sdk/community/inventory/toolkit.py +173 -0
  57. alita_sdk/community/inventory/visualize.py +1370 -0
  58. alita_sdk/configurations/bitbucket.py +94 -2
  59. alita_sdk/configurations/confluence.py +96 -1
  60. alita_sdk/configurations/gitlab.py +79 -0
  61. alita_sdk/configurations/jira.py +103 -0
  62. alita_sdk/configurations/testrail.py +88 -0
  63. alita_sdk/configurations/xray.py +93 -0
  64. alita_sdk/configurations/zephyr_enterprise.py +93 -0
  65. alita_sdk/configurations/zephyr_essential.py +75 -0
  66. alita_sdk/runtime/clients/artifact.py +1 -1
  67. alita_sdk/runtime/clients/client.py +214 -42
  68. alita_sdk/runtime/clients/mcp_discovery.py +342 -0
  69. alita_sdk/runtime/clients/mcp_manager.py +262 -0
  70. alita_sdk/runtime/clients/sandbox_client.py +373 -0
  71. alita_sdk/runtime/langchain/assistant.py +118 -30
  72. alita_sdk/runtime/langchain/constants.py +8 -1
  73. alita_sdk/runtime/langchain/document_loaders/AlitaDocxMammothLoader.py +315 -3
  74. alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
  75. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -1
  76. alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +41 -12
  77. alita_sdk/runtime/langchain/document_loaders/AlitaTableLoader.py +1 -1
  78. alita_sdk/runtime/langchain/document_loaders/constants.py +116 -99
  79. alita_sdk/runtime/langchain/interfaces/llm_processor.py +2 -2
  80. alita_sdk/runtime/langchain/langraph_agent.py +307 -71
  81. alita_sdk/runtime/langchain/utils.py +48 -8
  82. alita_sdk/runtime/llms/preloaded.py +2 -6
  83. alita_sdk/runtime/models/mcp_models.py +61 -0
  84. alita_sdk/runtime/toolkits/__init__.py +26 -0
  85. alita_sdk/runtime/toolkits/application.py +9 -2
  86. alita_sdk/runtime/toolkits/artifact.py +18 -6
  87. alita_sdk/runtime/toolkits/datasource.py +13 -6
  88. alita_sdk/runtime/toolkits/mcp.py +780 -0
  89. alita_sdk/runtime/toolkits/planning.py +178 -0
  90. alita_sdk/runtime/toolkits/tools.py +205 -55
  91. alita_sdk/runtime/toolkits/vectorstore.py +9 -4
  92. alita_sdk/runtime/tools/__init__.py +11 -3
  93. alita_sdk/runtime/tools/application.py +7 -0
  94. alita_sdk/runtime/tools/artifact.py +225 -12
  95. alita_sdk/runtime/tools/function.py +95 -5
  96. alita_sdk/runtime/tools/graph.py +10 -4
  97. alita_sdk/runtime/tools/image_generation.py +212 -0
  98. alita_sdk/runtime/tools/llm.py +494 -102
  99. alita_sdk/runtime/tools/mcp_inspect_tool.py +284 -0
  100. alita_sdk/runtime/tools/mcp_remote_tool.py +181 -0
  101. alita_sdk/runtime/tools/mcp_server_tool.py +4 -4
  102. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  103. alita_sdk/runtime/tools/planning/models.py +246 -0
  104. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  105. alita_sdk/runtime/tools/router.py +2 -1
  106. alita_sdk/runtime/tools/sandbox.py +180 -79
  107. alita_sdk/runtime/tools/vectorstore.py +22 -21
  108. alita_sdk/runtime/tools/vectorstore_base.py +125 -52
  109. alita_sdk/runtime/utils/AlitaCallback.py +106 -20
  110. alita_sdk/runtime/utils/mcp_client.py +465 -0
  111. alita_sdk/runtime/utils/mcp_oauth.py +244 -0
  112. alita_sdk/runtime/utils/mcp_sse_client.py +405 -0
  113. alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
  114. alita_sdk/runtime/utils/streamlit.py +40 -13
  115. alita_sdk/runtime/utils/toolkit_utils.py +28 -9
  116. alita_sdk/runtime/utils/utils.py +12 -0
  117. alita_sdk/tools/__init__.py +77 -33
  118. alita_sdk/tools/ado/repos/__init__.py +7 -6
  119. alita_sdk/tools/ado/repos/repos_wrapper.py +11 -11
  120. alita_sdk/tools/ado/test_plan/__init__.py +7 -7
  121. alita_sdk/tools/ado/wiki/__init__.py +7 -11
  122. alita_sdk/tools/ado/wiki/ado_wrapper.py +89 -15
  123. alita_sdk/tools/ado/work_item/__init__.py +7 -11
  124. alita_sdk/tools/ado/work_item/ado_wrapper.py +17 -8
  125. alita_sdk/tools/advanced_jira_mining/__init__.py +8 -7
  126. alita_sdk/tools/aws/delta_lake/__init__.py +11 -9
  127. alita_sdk/tools/azure_ai/search/__init__.py +7 -6
  128. alita_sdk/tools/base_indexer_toolkit.py +345 -70
  129. alita_sdk/tools/bitbucket/__init__.py +9 -8
  130. alita_sdk/tools/bitbucket/api_wrapper.py +50 -6
  131. alita_sdk/tools/browser/__init__.py +4 -4
  132. alita_sdk/tools/carrier/__init__.py +4 -6
  133. alita_sdk/tools/chunkers/__init__.py +3 -1
  134. alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
  135. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
  136. alita_sdk/tools/chunkers/sematic/proposal_chunker.py +1 -1
  137. alita_sdk/tools/chunkers/universal_chunker.py +270 -0
  138. alita_sdk/tools/cloud/aws/__init__.py +7 -6
  139. alita_sdk/tools/cloud/azure/__init__.py +7 -6
  140. alita_sdk/tools/cloud/gcp/__init__.py +7 -6
  141. alita_sdk/tools/cloud/k8s/__init__.py +7 -6
  142. alita_sdk/tools/code/linter/__init__.py +7 -7
  143. alita_sdk/tools/code/loaders/codesearcher.py +3 -2
  144. alita_sdk/tools/code/sonar/__init__.py +8 -7
  145. alita_sdk/tools/code_indexer_toolkit.py +199 -0
  146. alita_sdk/tools/confluence/__init__.py +9 -8
  147. alita_sdk/tools/confluence/api_wrapper.py +171 -75
  148. alita_sdk/tools/confluence/loader.py +10 -0
  149. alita_sdk/tools/custom_open_api/__init__.py +9 -4
  150. alita_sdk/tools/elastic/__init__.py +8 -7
  151. alita_sdk/tools/elitea_base.py +492 -52
  152. alita_sdk/tools/figma/__init__.py +7 -7
  153. alita_sdk/tools/figma/api_wrapper.py +2 -1
  154. alita_sdk/tools/github/__init__.py +9 -9
  155. alita_sdk/tools/github/api_wrapper.py +9 -26
  156. alita_sdk/tools/github/github_client.py +62 -2
  157. alita_sdk/tools/gitlab/__init__.py +8 -8
  158. alita_sdk/tools/gitlab/api_wrapper.py +135 -33
  159. alita_sdk/tools/gitlab_org/__init__.py +7 -8
  160. alita_sdk/tools/google/bigquery/__init__.py +11 -12
  161. alita_sdk/tools/google_places/__init__.py +8 -7
  162. alita_sdk/tools/jira/__init__.py +9 -7
  163. alita_sdk/tools/jira/api_wrapper.py +100 -52
  164. alita_sdk/tools/keycloak/__init__.py +8 -7
  165. alita_sdk/tools/localgit/local_git.py +56 -54
  166. alita_sdk/tools/memory/__init__.py +1 -1
  167. alita_sdk/tools/non_code_indexer_toolkit.py +3 -2
  168. alita_sdk/tools/ocr/__init__.py +8 -7
  169. alita_sdk/tools/openapi/__init__.py +10 -1
  170. alita_sdk/tools/pandas/__init__.py +8 -7
  171. alita_sdk/tools/postman/__init__.py +7 -8
  172. alita_sdk/tools/postman/api_wrapper.py +19 -8
  173. alita_sdk/tools/postman/postman_analysis.py +8 -1
  174. alita_sdk/tools/pptx/__init__.py +8 -9
  175. alita_sdk/tools/qtest/__init__.py +16 -11
  176. alita_sdk/tools/qtest/api_wrapper.py +1784 -88
  177. alita_sdk/tools/rally/__init__.py +7 -8
  178. alita_sdk/tools/report_portal/__init__.py +9 -7
  179. alita_sdk/tools/salesforce/__init__.py +7 -7
  180. alita_sdk/tools/servicenow/__init__.py +10 -10
  181. alita_sdk/tools/sharepoint/__init__.py +7 -6
  182. alita_sdk/tools/sharepoint/api_wrapper.py +127 -36
  183. alita_sdk/tools/sharepoint/authorization_helper.py +191 -1
  184. alita_sdk/tools/sharepoint/utils.py +8 -2
  185. alita_sdk/tools/slack/__init__.py +7 -6
  186. alita_sdk/tools/sql/__init__.py +8 -7
  187. alita_sdk/tools/sql/api_wrapper.py +71 -23
  188. alita_sdk/tools/testio/__init__.py +7 -6
  189. alita_sdk/tools/testrail/__init__.py +8 -9
  190. alita_sdk/tools/utils/__init__.py +26 -4
  191. alita_sdk/tools/utils/content_parser.py +88 -60
  192. alita_sdk/tools/utils/text_operations.py +254 -0
  193. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +76 -26
  194. alita_sdk/tools/xray/__init__.py +9 -7
  195. alita_sdk/tools/zephyr/__init__.py +7 -6
  196. alita_sdk/tools/zephyr_enterprise/__init__.py +8 -6
  197. alita_sdk/tools/zephyr_essential/__init__.py +7 -6
  198. alita_sdk/tools/zephyr_essential/api_wrapper.py +12 -13
  199. alita_sdk/tools/zephyr_scale/__init__.py +7 -6
  200. alita_sdk/tools/zephyr_squad/__init__.py +7 -6
  201. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/METADATA +147 -2
  202. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/RECORD +206 -130
  203. alita_sdk-0.3.499.dist-info/entry_points.txt +2 -0
  204. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/WHEEL +0 -0
  205. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/licenses/LICENSE +0 -0
  206. {alita_sdk-0.3.351.dist-info → alita_sdk-0.3.499.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,10 @@
1
1
  from datetime import datetime, timezone
2
+ from urllib.parse import unquote, urlparse, quote
2
3
 
3
4
  import jwt
4
5
  import requests
6
+ from botocore.response import get_response
7
+
5
8
 
6
9
  class SharepointAuthorizationHelper:
7
10
 
@@ -54,4 +57,191 @@ class SharepointAuthorizationHelper:
54
57
  except jwt.ExpiredSignatureError:
55
58
  return False
56
59
  except jwt.InvalidTokenError:
57
- return False
60
+ return False
61
+
62
+ def _validate_response(self, response, required_field, error_prefix=None):
63
+ if response.status_code != 200:
64
+ raise RuntimeError(f"{error_prefix or 'Request'} failed: {response.status_code} {response.text}")
65
+ json_data = response.json()
66
+ if required_field not in json_data:
67
+ raise KeyError(f"'{required_field}' missing in response")
68
+ return json_data[required_field]
69
+
70
+ def generate_token_and_site_id(self, site_url: str) -> tuple[str, str]:
71
+ try:
72
+ parsed = urlparse(site_url)
73
+ domain = parsed.hostname
74
+ site_path = parsed.path.strip('/')
75
+ if not domain or not site_path:
76
+ raise ValueError(f"site_url missing domain or site path: {site_url}")
77
+ app_name = domain.split('.')[0]
78
+ openid_config_url = f"https://login.microsoftonline.com/{app_name}.onmicrosoft.com/v2.0/.well-known/openid-configuration"
79
+ response = requests.get(openid_config_url)
80
+ token_url = self._validate_response(response, required_field="token_endpoint", error_prefix="OpenID config")
81
+ token_data = {
82
+ "grant_type": "client_credentials",
83
+ "client_id": self.client_id,
84
+ "client_secret": self.client_secret,
85
+ "scope": "https://graph.microsoft.com/.default"
86
+ }
87
+ token_response = requests.post(token_url, data=token_data)
88
+ access_token = self._validate_response(token_response, required_field="access_token", error_prefix="Token request")
89
+ graph_site_url = f"https://graph.microsoft.com/v1.0/sites/{domain}:/{site_path}"
90
+ headers = {"Authorization": f"Bearer {access_token}"}
91
+ site_response = requests.get(graph_site_url, headers=headers)
92
+ site_id = self._validate_response(site_response, required_field="id", error_prefix="Site info")
93
+ return access_token, site_id
94
+ except Exception as e:
95
+ raise RuntimeError(f"Error while obtaining access_token and site_id: {e}")
96
+
97
+ def get_files_list(self, site_url: str, folder_name: str = None, limit_files: int = 100):
98
+ if not site_url or not site_url.startswith("https://"):
99
+ raise ValueError(f"Invalid site_url format: {site_url}")
100
+ if limit_files is not None and (not isinstance(limit_files, int) or limit_files <= 0):
101
+ raise ValueError(f"limit_files must be a positive integer, got: {limit_files}")
102
+ try:
103
+ access_token, site_id = self.generate_token_and_site_id(site_url)
104
+ headers = {"Authorization": f"Bearer {access_token}"}
105
+ drives_url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drives"
106
+ drives_response = requests.get(drives_url, headers=headers)
107
+ drives = self._validate_response(drives_response, required_field="value", error_prefix="Drives request")
108
+ result = []
109
+ def _recurse_drive(drive_id, drive_path, parent_folder, limit_files):
110
+ # Escape folder_name for URL safety if present
111
+ if parent_folder:
112
+ safe_folder_name = quote(parent_folder.strip('/'), safe="/")
113
+ url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drives/{drive_id}/root:/{safe_folder_name}:/children?$top={limit_files}"
114
+ else:
115
+ url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drives/{drive_id}/root/children?$top={limit_files}"
116
+ response = requests.get(url, headers=headers)
117
+ if response.status_code != 200:
118
+ return []
119
+ files_json = response.json()
120
+ if "value" not in files_json:
121
+ return []
122
+ files = []
123
+ for file in files_json["value"]:
124
+ file_name = file.get('name', '')
125
+ # Build full path reflecting nested folders
126
+ if parent_folder:
127
+ full_path = '/' + '/'.join([drive_path.strip('/'), parent_folder.strip('/'), file_name.strip('/')])
128
+ else:
129
+ full_path = '/' + '/'.join([drive_path.strip('/'), file_name.strip('/')])
130
+ temp_props = {
131
+ 'Name': file_name,
132
+ 'Path': full_path,
133
+ 'Created': file.get('createdDateTime'),
134
+ 'Modified': file.get('lastModifiedDateTime'),
135
+ 'Link': file.get('webUrl'),
136
+ 'id': file.get('id')
137
+ }
138
+ if not all([temp_props['Name'], temp_props['Path'], temp_props['id']]):
139
+ continue # skip files with missing required fields
140
+ if 'folder' in file:
141
+ # Recursively extract files from this folder
142
+ inner_folder = parent_folder + '/' + file_name if parent_folder else file_name
143
+ inner_files = _recurse_drive(drive_id, drive_path, inner_folder, limit_files)
144
+ files.extend(inner_files)
145
+ else:
146
+ files.append(temp_props)
147
+ if limit_files is not None and len(result) + len(files) >= limit_files:
148
+ return files[:limit_files - len(result)]
149
+ return files
150
+ #
151
+ site_segments = [seg for seg in site_url.strip('/').split('/') if seg][-2:]
152
+ full_path_prefix = '/'.join(site_segments)
153
+ #
154
+ for drive in drives:
155
+ drive_id = drive.get("id")
156
+ drive_path = unquote(urlparse(drive.get("webUrl")).path) if drive.get("webUrl") else ""
157
+ if not drive_id:
158
+ continue # skip drives without id
159
+ #
160
+ sub_folder = folder_name
161
+ if folder_name:
162
+ folder_path = folder_name.strip('/')
163
+ expected_prefix = drive_path.strip('/')#f'{full_path_prefix}/{library_type}'
164
+ if folder_path.startswith(full_path_prefix):
165
+ if folder_path.startswith(expected_prefix):
166
+ sub_folder = folder_path.removeprefix(f'{expected_prefix}').strip('/')#target_folder_url = folder_path.removeprefix(f'{full_path_prefix}/')
167
+ else:
168
+ # ignore full path folder which is not targeted to current drive
169
+ continue
170
+ #
171
+ files = _recurse_drive(drive_id, drive_path, sub_folder, limit_files)
172
+ result.extend(files)
173
+ if limit_files is not None and len(result) >= limit_files:
174
+ return result[:limit_files]
175
+ return result
176
+ except Exception as e:
177
+ raise RuntimeError(f"Error in get_files_list: {e}")
178
+
179
+ def get_file_content(self, site_url: str, path: str):
180
+ try:
181
+ access_token, site_id = self.generate_token_and_site_id(site_url)
182
+ headers = {"Authorization": f"Bearer {access_token}"}
183
+ drives_url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drives"
184
+ drives_response = requests.get(drives_url, headers=headers)
185
+ drives = self._validate_response(drives_response, required_field="value", error_prefix="Drives request")
186
+ path = path.strip('/')
187
+ #
188
+ for drive in drives:
189
+ drive_path = unquote(urlparse(drive.get("webUrl")).path).strip('/')
190
+ if not drive_path or not path.startswith(drive_path):
191
+ continue
192
+ drive_id = drive.get("id")
193
+ if not drive_id:
194
+ continue
195
+ path = path.replace(drive_path, '').strip('/')
196
+ safe_path = quote(path, safe="")
197
+ url = f"https://graph.microsoft.com/v1.0/drives/{drive_id}/root:/{safe_path}:/content"
198
+ response = requests.get(url, headers=headers)
199
+ if response.status_code == 200:
200
+ return response.content
201
+ raise RuntimeError(f"File '{path}' not found in any private or shared documents.")
202
+ except Exception as e:
203
+ raise RuntimeError(f"Error in get_file_content: {e}")
204
+
205
+ def get_list_items(self, site_url: str, list_title: str, limit: int = 1000):
206
+ """Fallback Graph API method to read SharePoint list items by list title.
207
+
208
+ Returns a list of dictionaries representing list item fields.
209
+ """
210
+ if not site_url or not site_url.startswith("https://"):
211
+ raise ValueError(f"Invalid site_url format: {site_url}")
212
+ try:
213
+ access_token, site_id = self.generate_token_and_site_id(site_url)
214
+ headers = {"Authorization": f"Bearer {access_token}"}
215
+ lists_url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/lists"
216
+ response = requests.get(lists_url, headers=headers)
217
+ if response.status_code != 200:
218
+ raise RuntimeError(f"Lists request failed: {response.status_code} {response.text}")
219
+ lists_json = response.json()
220
+ lists = lists_json.get("value", [])
221
+ target_list = None
222
+ normalized_title = list_title.strip().lower()
223
+ for lst in lists:
224
+ # displayName is the user-visible title. name can differ (internal name)
225
+ display_name = (lst.get("displayName") or lst.get("name") or '').strip().lower()
226
+ if display_name == normalized_title:
227
+ target_list = lst
228
+ break
229
+ if not target_list:
230
+ raise RuntimeError(f"List '{list_title}' not found via Graph API.")
231
+ list_id = target_list.get('id')
232
+ if not list_id:
233
+ raise RuntimeError(f"List '{list_title}' missing id field.")
234
+ items_url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/lists/{list_id}/items?expand=fields&$top={limit}"
235
+ items_response = requests.get(items_url, headers=headers)
236
+ if items_response.status_code != 200:
237
+ raise RuntimeError(f"List items request failed: {items_response.status_code} {items_response.text}")
238
+ items_json = items_response.json()
239
+ values = items_json.get('value', [])
240
+ result = []
241
+ for item in values:
242
+ fields = item.get('fields', {})
243
+ if fields:
244
+ result.append(fields)
245
+ return result
246
+ except Exception as e:
247
+ raise RuntimeError(f"Error in get_list_items: {e}")
@@ -1,5 +1,7 @@
1
- from docx import Document
1
+ import re
2
2
  from io import BytesIO
3
+ from docx import Document
4
+
3
5
 
4
6
  def read_docx_from_bytes(file_content):
5
7
  """Read and return content from a .docx file using a byte stream."""
@@ -11,4 +13,8 @@ def read_docx_from_bytes(file_content):
11
13
  return '\n'.join(text)
12
14
  except Exception as e:
13
15
  print(f"Error reading .docx from bytes: {e}")
14
- return ""
16
+ return ""
17
+
18
+
19
+ def decode_sharepoint_string(s):
20
+ return re.sub(r'_x([0-9A-Fa-f]{4})_', lambda m: chr(int(m.group(1), 16)), s)
@@ -12,7 +12,7 @@ from pydantic import create_model, BaseModel, Field
12
12
  from ..base.tool import BaseAction
13
13
 
14
14
  from .api_wrapper import SlackApiWrapper
15
- from ..utils import TOOLKIT_SPLITTER, clean_string, get_max_toolkit_length, check_connection_response
15
+ from ..utils import clean_string, get_max_toolkit_length, check_connection_response
16
16
  from slack_sdk.errors import SlackApiError
17
17
  from slack_sdk import WebClient
18
18
 
@@ -28,12 +28,10 @@ def get_tools(tool):
28
28
 
29
29
  class SlackToolkit(BaseToolkit):
30
30
  tools: List[BaseTool] = []
31
- toolkit_max_length: int = 0
32
31
 
33
32
  @staticmethod
34
33
  def toolkit_config_schema() -> BaseModel:
35
34
  selected_tools = {x['name']: x['args_schema'].schema() for x in SlackApiWrapper.model_construct().get_available_tools()}
36
- SlackToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
37
35
 
38
36
  @check_connection_response
39
37
  def check_connection(self):
@@ -78,16 +76,19 @@ class SlackToolkit(BaseToolkit):
78
76
  **kwargs['slack_configuration'],
79
77
  }
80
78
  slack_api_wrapper = SlackApiWrapper(**wrapper_payload)
81
- prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
82
79
  available_tools = slack_api_wrapper.get_available_tools()
83
80
  tools = []
84
81
  for tool in available_tools:
85
82
  if selected_tools and tool["name"] not in selected_tools:
86
83
  continue
84
+ description = f"Slack Tool: {tool['description']}"
85
+ if toolkit_name:
86
+ description = f"{description}\nToolkit: {toolkit_name}"
87
+ description = description[:1000]
87
88
  tools.append(BaseAction(
88
89
  api_wrapper=slack_api_wrapper,
89
- name=prefix + tool["name"],
90
- description=f"Slack Tool: {tool['description']}",
90
+ name=tool["name"],
91
+ description=description,
91
92
  args_schema=tool["args_schema"],
92
93
  ))
93
94
  return cls(tools=tools)
@@ -7,7 +7,7 @@ from .api_wrapper import SQLApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from .models import SQLDialect
9
9
  from ..elitea_base import filter_missconfigured_index_tools
10
- from ..utils import TOOLKIT_SPLITTER, clean_string, get_max_toolkit_length
10
+ from ..utils import clean_string, get_max_toolkit_length
11
11
  from ...configurations.sql import SqlConfiguration
12
12
 
13
13
  name = "sql"
@@ -24,17 +24,15 @@ def get_tools(tool):
24
24
 
25
25
  class SQLToolkit(BaseToolkit):
26
26
  tools: list[BaseTool] = []
27
- toolkit_max_length: int = 0
28
27
 
29
28
  @staticmethod
30
29
  def toolkit_config_schema() -> BaseModel:
31
30
  selected_tools = {x['name']: x['args_schema'].schema() for x in SQLApiWrapper.model_construct().get_available_tools()}
32
- SQLToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
33
31
  supported_dialects = (d.value for d in SQLDialect)
34
32
  return create_model(
35
33
  name,
36
34
  dialect=(Literal[tuple(supported_dialects)], Field(description="Database dialect (mysql or postgres)")),
37
- database_name=(str, Field(description="Database name", json_schema_extra={'toolkit_name': True, 'max_toolkit_length': SQLToolkit.toolkit_max_length})),
35
+ database_name=(str, Field(description="Database name")),
38
36
  sql_configuration=(SqlConfiguration, Field(description="SQL Configuration", json_schema_extra={'configuration_types': ['sql']})),
39
37
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
40
38
  __config__=ConfigDict(json_schema_extra=
@@ -56,16 +54,19 @@ class SQLToolkit(BaseToolkit):
56
54
  **kwargs.get('sql_configuration', {}),
57
55
  }
58
56
  sql_api_wrapper = SQLApiWrapper(**wrapper_payload)
59
- prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
60
57
  available_tools = sql_api_wrapper.get_available_tools()
61
58
  tools = []
62
59
  for tool in available_tools:
63
60
  if selected_tools and tool["name"] not in selected_tools:
64
61
  continue
62
+ description = f"{tool['description']}\nDatabase: {sql_api_wrapper.database_name}. Host: {sql_api_wrapper.host}"
63
+ if toolkit_name:
64
+ description = f"{description}\nToolkit: {toolkit_name}"
65
+ description = description[:1000]
65
66
  tools.append(BaseAction(
66
67
  api_wrapper=sql_api_wrapper,
67
- name=prefix + tool["name"],
68
- description=f"{tool['description']}\nDatabase: {sql_api_wrapper.database_name}. Host: {sql_api_wrapper.host}",
68
+ name=tool["name"],
69
+ description=description,
69
70
  args_schema=tool["args_schema"]
70
71
  ))
71
72
  return cls(tools=tools)
@@ -1,8 +1,9 @@
1
1
  import logging
2
- from typing import Optional, Any
2
+ from typing import Optional
3
3
 
4
- from pydantic import BaseModel, create_model, model_validator, Field, SecretStr
5
- from pydantic.fields import PrivateAttr
4
+ from langchain_core.tools import ToolException
5
+ from pydantic import create_model, SecretStr, model_validator
6
+ from pydantic.fields import PrivateAttr, Field
6
7
  from sqlalchemy import create_engine, text, inspect, Engine
7
8
  from sqlalchemy.orm import sessionmaker
8
9
 
@@ -27,7 +28,7 @@ class SQLApiWrapper(BaseToolApiWrapper):
27
28
  username: str
28
29
  password: SecretStr
29
30
  database_name: str
30
- _client: Optional[Engine] = PrivateAttr()
31
+ _client: Optional[Engine] = PrivateAttr(default=None)
31
32
 
32
33
  @model_validator(mode='before')
33
34
  @classmethod
@@ -35,27 +36,73 @@ class SQLApiWrapper(BaseToolApiWrapper):
35
36
  for field in SQLConfig.model_fields:
36
37
  if field not in values or not values[field]:
37
38
  raise ValueError(f"{field} is a required field and must be provided.")
38
-
39
- dialect = values['dialect']
40
- host = values['host']
41
- username = values['username']
42
- password = values['password']
43
- database_name = values['database_name']
44
- port = values['port']
45
-
46
- if dialect == SQLDialect.POSTGRES:
47
- connection_string = f'postgresql+psycopg2://{username}:{password}@{host}:{port}/{database_name}'
48
- elif dialect == SQLDialect.MYSQL:
49
- connection_string = f'mysql+pymysql://{username}:{password}@{host}:{port}/{database_name}'
50
- else:
51
- raise ValueError(f"Unsupported database type. Supported types are: {[e.value for e in SQLDialect]}")
52
-
53
- cls._client = create_engine(connection_string)
54
39
  return values
55
40
 
41
+ def _mask_password_in_error(self, error_message: str) -> str:
42
+ """Mask password in error messages, showing only last 4 characters."""
43
+ password_str = self.password.get_secret_value()
44
+ if len(password_str) <= 4:
45
+ masked_password = "****"
46
+ else:
47
+ masked_password = "****" + password_str[-4:]
48
+
49
+ # Replace all occurrences of the password, and any substring of the password that may appear in the error message
50
+ for part in [password_str, password_str.replace('@', ''), password_str.split('@')[-1]]:
51
+ if part and part in error_message:
52
+ error_message = error_message.replace(part, masked_password)
53
+ return error_message
54
+
55
+ @property
56
+ def client(self) -> Engine:
57
+ """Lazy property to create and return database engine with error handling."""
58
+ if self._client is None:
59
+ try:
60
+ dialect = self.dialect
61
+ host = self.host
62
+ username = self.username
63
+ password = self.password.get_secret_value()
64
+ database_name = self.database_name
65
+ port = self.port
66
+
67
+ if dialect == SQLDialect.POSTGRES:
68
+ connection_string = f'postgresql+psycopg2://{username}:{password}@{host}:{port}/{database_name}'
69
+ elif dialect == SQLDialect.MYSQL:
70
+ connection_string = f'mysql+pymysql://{username}:{password}@{host}:{port}/{database_name}'
71
+ else:
72
+ raise ValueError(f"Unsupported database type. Supported types are: {[e.value for e in SQLDialect]}")
73
+
74
+ self._client = create_engine(connection_string)
75
+
76
+ # Test the connection
77
+ with self._client.connect() as conn:
78
+ conn.execute(text("SELECT 1"))
79
+
80
+ except Exception as e:
81
+ error_message = str(e)
82
+ masked_error = self._mask_password_in_error(error_message)
83
+ logger.error(f"Database connection failed: {masked_error}")
84
+ raise ValueError(f"Database connection failed: {masked_error}")
85
+
86
+ return self._client
87
+
88
+ def _handle_database_errors(func):
89
+ """Decorator to catch exceptions and mask passwords in error messages."""
90
+
91
+ def wrapper(self, *args, **kwargs):
92
+ try:
93
+ return func(self, *args, **kwargs)
94
+ except Exception as e:
95
+ error_message = str(e)
96
+ masked_error = self._mask_password_in_error(error_message)
97
+ logger.error(f"Database operation failed in {func.__name__}: {masked_error}")
98
+ raise ToolException(masked_error)
99
+
100
+ return wrapper
101
+
102
+ @_handle_database_errors
56
103
  def execute_sql(self, sql_query: str):
57
104
  """Executes the provided SQL query on the configured database."""
58
- engine = self._client
105
+ engine = self.client
59
106
  maker_session = sessionmaker(bind=engine)
60
107
  session = maker_session()
61
108
  try:
@@ -76,9 +123,10 @@ class SQLApiWrapper(BaseToolApiWrapper):
76
123
  finally:
77
124
  session.close()
78
125
 
126
+ @_handle_database_errors
79
127
  def list_tables_and_columns(self):
80
128
  """Lists all tables and their columns in the configured database."""
81
- inspector = inspect(self._client)
129
+ inspector = inspect(self.client)
82
130
  data = {}
83
131
  tables = inspector.get_table_names()
84
132
  for table in tables:
@@ -109,4 +157,4 @@ class SQLApiWrapper(BaseToolApiWrapper):
109
157
  "description": self.list_tables_and_columns.__doc__,
110
158
  "args_schema": SQLNoInput,
111
159
  }
112
- ]
160
+ ]
@@ -6,7 +6,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field
6
6
  from .api_wrapper import TestIOApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from ..elitea_base import filter_missconfigured_index_tools
9
- from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
9
+ from ..utils import clean_string, get_max_toolkit_length
10
10
  from ...configurations.testio import TestIOConfiguration
11
11
 
12
12
  name = "testio"
@@ -19,8 +19,6 @@ def get_tools(tool):
19
19
  ).get_tools()
20
20
 
21
21
 
22
- TOOLKIT_MAX_LENGTH = 25
23
-
24
22
  class TestIOToolkit(BaseToolkit):
25
23
  tools: list[BaseTool] = []
26
24
 
@@ -47,16 +45,19 @@ class TestIOToolkit(BaseToolkit):
47
45
  **kwargs.get('testio_configuration', {}),
48
46
  }
49
47
  testio_api_wrapper = TestIOApiWrapper(**wrapper_payload)
50
- prefix = clean_string(toolkit_name, TOOLKIT_MAX_LENGTH) + TOOLKIT_SPLITTER if toolkit_name else ''
51
48
  available_tools = testio_api_wrapper.get_available_tools()
52
49
  tools = []
53
50
  for tool in available_tools:
54
51
  if selected_tools and tool["name"] not in selected_tools:
55
52
  continue
53
+ description = tool["description"]
54
+ if toolkit_name:
55
+ description = f"Toolkit: {toolkit_name}\n{description}"
56
+ description = description[:1000]
56
57
  tools.append(BaseAction(
57
58
  api_wrapper=testio_api_wrapper,
58
- name=prefix + tool["name"],
59
- description=tool["description"],
59
+ name=tool["name"],
60
+ description=description,
60
61
  args_schema=tool["args_schema"]
61
62
  ))
62
63
  return cls(tools=tools)
@@ -7,7 +7,7 @@ import requests
7
7
  from .api_wrapper import TestrailAPIWrapper
8
8
  from ..base.tool import BaseAction
9
9
  from ..elitea_base import filter_missconfigured_index_tools
10
- from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length, check_connection_response
10
+ from ..utils import clean_string, get_max_toolkit_length, check_connection_response
11
11
  from ...configurations.testrail import TestRailConfiguration
12
12
  from ...configurations.pgvector import PgVectorConfiguration
13
13
 
@@ -31,17 +31,12 @@ def get_tools(tool):
31
31
 
32
32
  class TestrailToolkit(BaseToolkit):
33
33
  tools: List[BaseTool] = []
34
- toolkit_max_length: int = 0
35
34
 
36
35
  @staticmethod
37
36
  def toolkit_config_schema() -> BaseModel:
38
37
  selected_tools = {x['name']: x['args_schema'].schema() for x in TestrailAPIWrapper.model_construct().get_available_tools()}
39
- TestrailToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
40
38
  m = create_model(
41
39
  name,
42
- name=(str, Field(description="Toolkit name", json_schema_extra={
43
- 'toolkit_name': True,
44
- "max_length": TestrailToolkit.toolkit_max_length})),
45
40
  testrail_configuration=(Optional[TestRailConfiguration], Field(description="TestRail Configuration", json_schema_extra={'configuration_types': ['testrail']})),
46
41
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default = None,
47
42
  description="PgVector Configuration", json_schema_extra={'configuration_types': ['pgvector']})),
@@ -79,17 +74,21 @@ class TestrailToolkit(BaseToolkit):
79
74
  **(kwargs.get('pgvector_configuration') or {}),
80
75
  }
81
76
  testrail_api_wrapper = TestrailAPIWrapper(**wrapper_payload)
82
- prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
83
77
  available_tools = testrail_api_wrapper.get_available_tools()
84
78
  tools = []
85
79
  for tool in available_tools:
86
80
  if selected_tools:
87
81
  if tool["name"] not in selected_tools:
88
82
  continue
83
+ description = tool["description"]
84
+ if toolkit_name:
85
+ description = f"Toolkit: {toolkit_name}\n{description}"
86
+ description = description + "\nTestrail instance: " + testrail_api_wrapper.url
87
+ description = description[:1000]
89
88
  tools.append(BaseAction(
90
89
  api_wrapper=testrail_api_wrapper,
91
- name=prefix + tool["name"],
92
- description=tool["description"] + "\nTestrail instance: " + testrail_api_wrapper.url,
90
+ name=tool["name"],
91
+ description=description,
93
92
  args_schema=tool["args_schema"]
94
93
  ))
95
94
  return cls(tools=tools)
@@ -7,6 +7,8 @@ import requests
7
7
  from pydantic import create_model, Field
8
8
 
9
9
 
10
+ # DEPRECATED: Tool names no longer use prefixes
11
+ # Kept for backward compatibility only
10
12
  TOOLKIT_SPLITTER = "___"
11
13
  TOOL_NAME_LIMIT = 64
12
14
 
@@ -22,10 +24,13 @@ def clean_string(s: str, max_length: int = 0):
22
24
 
23
25
 
24
26
  def get_max_toolkit_length(selected_tools: Any):
25
- """Calculates the maximum length of the toolkit name based on the selected tools per toolkit."""
26
-
27
- longest_tool_name_length = max(len(tool_name) for tool_name in selected_tools.keys())
28
- return TOOL_NAME_LIMIT - longest_tool_name_length - len(TOOLKIT_SPLITTER)
27
+ """DEPRECATED: Calculates the maximum length of the toolkit name.
28
+
29
+ This function is deprecated as tool names no longer use prefixes.
30
+ Returns a fixed value for backward compatibility.
31
+ """
32
+ # Return a reasonable default since we no longer use prefixes
33
+ return 50
29
34
 
30
35
 
31
36
  def parse_list(list_str: str = None) -> List[str]:
@@ -97,3 +102,20 @@ def check_connection_response(check_fun):
97
102
  else:
98
103
  return f"Service Unreachable: return code {response.status_code}"
99
104
  return _wrapper
105
+
106
+
107
+ def make_json_serializable(obj):
108
+ if isinstance(obj, BaseModel):
109
+ return obj.model_dump()
110
+ if isinstance(obj, dict):
111
+ return {k: make_json_serializable(v) for k, v in obj.items()}
112
+ if isinstance(obj, list):
113
+ return [make_json_serializable(i) for i in obj]
114
+ if isinstance(obj, bool):
115
+ return bool(obj)
116
+ if isinstance(obj, (str, int, float)) or obj is None:
117
+ return obj
118
+ # Fallback: handle objects that look like booleans but were not caught above
119
+ if str(obj) in ("True", "False"):
120
+ return str(obj) == "True"
121
+ return str(obj)