alita-sdk 0.3.462__py3-none-any.whl → 0.3.627__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (261) hide show
  1. alita_sdk/cli/agent/__init__.py +5 -0
  2. alita_sdk/cli/agent/default.py +258 -0
  3. alita_sdk/cli/agent_executor.py +15 -3
  4. alita_sdk/cli/agent_loader.py +56 -8
  5. alita_sdk/cli/agent_ui.py +93 -31
  6. alita_sdk/cli/agents.py +2274 -230
  7. alita_sdk/cli/callbacks.py +96 -25
  8. alita_sdk/cli/cli.py +10 -1
  9. alita_sdk/cli/config.py +162 -9
  10. alita_sdk/cli/context/__init__.py +30 -0
  11. alita_sdk/cli/context/cleanup.py +198 -0
  12. alita_sdk/cli/context/manager.py +731 -0
  13. alita_sdk/cli/context/message.py +285 -0
  14. alita_sdk/cli/context/strategies.py +289 -0
  15. alita_sdk/cli/context/token_estimation.py +127 -0
  16. alita_sdk/cli/input_handler.py +419 -0
  17. alita_sdk/cli/inventory.py +1073 -0
  18. alita_sdk/cli/testcases/__init__.py +94 -0
  19. alita_sdk/cli/testcases/data_generation.py +119 -0
  20. alita_sdk/cli/testcases/discovery.py +96 -0
  21. alita_sdk/cli/testcases/executor.py +84 -0
  22. alita_sdk/cli/testcases/logger.py +85 -0
  23. alita_sdk/cli/testcases/parser.py +172 -0
  24. alita_sdk/cli/testcases/prompts.py +91 -0
  25. alita_sdk/cli/testcases/reporting.py +125 -0
  26. alita_sdk/cli/testcases/setup.py +108 -0
  27. alita_sdk/cli/testcases/test_runner.py +282 -0
  28. alita_sdk/cli/testcases/utils.py +39 -0
  29. alita_sdk/cli/testcases/validation.py +90 -0
  30. alita_sdk/cli/testcases/workflow.py +196 -0
  31. alita_sdk/cli/toolkit.py +14 -17
  32. alita_sdk/cli/toolkit_loader.py +35 -5
  33. alita_sdk/cli/tools/__init__.py +36 -2
  34. alita_sdk/cli/tools/approval.py +224 -0
  35. alita_sdk/cli/tools/filesystem.py +910 -64
  36. alita_sdk/cli/tools/planning.py +389 -0
  37. alita_sdk/cli/tools/terminal.py +414 -0
  38. alita_sdk/community/__init__.py +72 -12
  39. alita_sdk/community/inventory/__init__.py +236 -0
  40. alita_sdk/community/inventory/config.py +257 -0
  41. alita_sdk/community/inventory/enrichment.py +2137 -0
  42. alita_sdk/community/inventory/extractors.py +1469 -0
  43. alita_sdk/community/inventory/ingestion.py +3172 -0
  44. alita_sdk/community/inventory/knowledge_graph.py +1457 -0
  45. alita_sdk/community/inventory/parsers/__init__.py +218 -0
  46. alita_sdk/community/inventory/parsers/base.py +295 -0
  47. alita_sdk/community/inventory/parsers/csharp_parser.py +907 -0
  48. alita_sdk/community/inventory/parsers/go_parser.py +851 -0
  49. alita_sdk/community/inventory/parsers/html_parser.py +389 -0
  50. alita_sdk/community/inventory/parsers/java_parser.py +593 -0
  51. alita_sdk/community/inventory/parsers/javascript_parser.py +629 -0
  52. alita_sdk/community/inventory/parsers/kotlin_parser.py +768 -0
  53. alita_sdk/community/inventory/parsers/markdown_parser.py +362 -0
  54. alita_sdk/community/inventory/parsers/python_parser.py +604 -0
  55. alita_sdk/community/inventory/parsers/rust_parser.py +858 -0
  56. alita_sdk/community/inventory/parsers/swift_parser.py +832 -0
  57. alita_sdk/community/inventory/parsers/text_parser.py +322 -0
  58. alita_sdk/community/inventory/parsers/yaml_parser.py +370 -0
  59. alita_sdk/community/inventory/patterns/__init__.py +61 -0
  60. alita_sdk/community/inventory/patterns/ast_adapter.py +380 -0
  61. alita_sdk/community/inventory/patterns/loader.py +348 -0
  62. alita_sdk/community/inventory/patterns/registry.py +198 -0
  63. alita_sdk/community/inventory/presets.py +535 -0
  64. alita_sdk/community/inventory/retrieval.py +1403 -0
  65. alita_sdk/community/inventory/toolkit.py +173 -0
  66. alita_sdk/community/inventory/toolkit_utils.py +176 -0
  67. alita_sdk/community/inventory/visualize.py +1370 -0
  68. alita_sdk/configurations/__init__.py +1 -1
  69. alita_sdk/configurations/ado.py +141 -20
  70. alita_sdk/configurations/bitbucket.py +0 -3
  71. alita_sdk/configurations/confluence.py +76 -42
  72. alita_sdk/configurations/figma.py +76 -0
  73. alita_sdk/configurations/gitlab.py +17 -5
  74. alita_sdk/configurations/openapi.py +329 -0
  75. alita_sdk/configurations/qtest.py +72 -1
  76. alita_sdk/configurations/report_portal.py +96 -0
  77. alita_sdk/configurations/sharepoint.py +148 -0
  78. alita_sdk/configurations/testio.py +83 -0
  79. alita_sdk/runtime/clients/artifact.py +3 -3
  80. alita_sdk/runtime/clients/client.py +353 -48
  81. alita_sdk/runtime/clients/sandbox_client.py +0 -21
  82. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  83. alita_sdk/runtime/langchain/assistant.py +123 -26
  84. alita_sdk/runtime/langchain/constants.py +642 -1
  85. alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
  86. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
  87. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +6 -3
  88. alita_sdk/runtime/langchain/document_loaders/AlitaPowerPointLoader.py +226 -7
  89. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
  90. alita_sdk/runtime/langchain/document_loaders/constants.py +12 -7
  91. alita_sdk/runtime/langchain/langraph_agent.py +279 -73
  92. alita_sdk/runtime/langchain/utils.py +82 -15
  93. alita_sdk/runtime/llms/preloaded.py +2 -6
  94. alita_sdk/runtime/skills/__init__.py +91 -0
  95. alita_sdk/runtime/skills/callbacks.py +498 -0
  96. alita_sdk/runtime/skills/discovery.py +540 -0
  97. alita_sdk/runtime/skills/executor.py +610 -0
  98. alita_sdk/runtime/skills/input_builder.py +371 -0
  99. alita_sdk/runtime/skills/models.py +330 -0
  100. alita_sdk/runtime/skills/registry.py +355 -0
  101. alita_sdk/runtime/skills/skill_runner.py +330 -0
  102. alita_sdk/runtime/toolkits/__init__.py +7 -0
  103. alita_sdk/runtime/toolkits/application.py +21 -9
  104. alita_sdk/runtime/toolkits/artifact.py +15 -5
  105. alita_sdk/runtime/toolkits/datasource.py +13 -6
  106. alita_sdk/runtime/toolkits/mcp.py +139 -251
  107. alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
  108. alita_sdk/runtime/toolkits/planning.py +178 -0
  109. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  110. alita_sdk/runtime/toolkits/subgraph.py +251 -6
  111. alita_sdk/runtime/toolkits/tools.py +238 -32
  112. alita_sdk/runtime/toolkits/vectorstore.py +11 -5
  113. alita_sdk/runtime/tools/__init__.py +3 -1
  114. alita_sdk/runtime/tools/application.py +20 -6
  115. alita_sdk/runtime/tools/artifact.py +511 -28
  116. alita_sdk/runtime/tools/data_analysis.py +183 -0
  117. alita_sdk/runtime/tools/function.py +43 -15
  118. alita_sdk/runtime/tools/image_generation.py +50 -44
  119. alita_sdk/runtime/tools/llm.py +852 -67
  120. alita_sdk/runtime/tools/loop.py +3 -1
  121. alita_sdk/runtime/tools/loop_output.py +3 -1
  122. alita_sdk/runtime/tools/mcp_remote_tool.py +25 -10
  123. alita_sdk/runtime/tools/mcp_server_tool.py +7 -6
  124. alita_sdk/runtime/tools/planning/__init__.py +36 -0
  125. alita_sdk/runtime/tools/planning/models.py +246 -0
  126. alita_sdk/runtime/tools/planning/wrapper.py +607 -0
  127. alita_sdk/runtime/tools/router.py +2 -4
  128. alita_sdk/runtime/tools/sandbox.py +9 -6
  129. alita_sdk/runtime/tools/skill_router.py +776 -0
  130. alita_sdk/runtime/tools/tool.py +3 -1
  131. alita_sdk/runtime/tools/vectorstore.py +7 -2
  132. alita_sdk/runtime/tools/vectorstore_base.py +51 -11
  133. alita_sdk/runtime/utils/AlitaCallback.py +137 -21
  134. alita_sdk/runtime/utils/constants.py +5 -1
  135. alita_sdk/runtime/utils/mcp_client.py +492 -0
  136. alita_sdk/runtime/utils/mcp_oauth.py +202 -5
  137. alita_sdk/runtime/utils/mcp_sse_client.py +36 -7
  138. alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
  139. alita_sdk/runtime/utils/serialization.py +155 -0
  140. alita_sdk/runtime/utils/streamlit.py +6 -10
  141. alita_sdk/runtime/utils/toolkit_utils.py +16 -5
  142. alita_sdk/runtime/utils/utils.py +36 -0
  143. alita_sdk/tools/__init__.py +113 -29
  144. alita_sdk/tools/ado/repos/__init__.py +51 -33
  145. alita_sdk/tools/ado/repos/repos_wrapper.py +148 -89
  146. alita_sdk/tools/ado/test_plan/__init__.py +25 -9
  147. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
  148. alita_sdk/tools/ado/utils.py +1 -18
  149. alita_sdk/tools/ado/wiki/__init__.py +25 -8
  150. alita_sdk/tools/ado/wiki/ado_wrapper.py +291 -22
  151. alita_sdk/tools/ado/work_item/__init__.py +26 -9
  152. alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
  153. alita_sdk/tools/advanced_jira_mining/__init__.py +11 -8
  154. alita_sdk/tools/aws/delta_lake/__init__.py +13 -9
  155. alita_sdk/tools/aws/delta_lake/tool.py +5 -1
  156. alita_sdk/tools/azure_ai/search/__init__.py +11 -8
  157. alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
  158. alita_sdk/tools/base/tool.py +5 -1
  159. alita_sdk/tools/base_indexer_toolkit.py +170 -45
  160. alita_sdk/tools/bitbucket/__init__.py +17 -12
  161. alita_sdk/tools/bitbucket/api_wrapper.py +59 -11
  162. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +49 -35
  163. alita_sdk/tools/browser/__init__.py +5 -4
  164. alita_sdk/tools/carrier/__init__.py +5 -6
  165. alita_sdk/tools/carrier/backend_reports_tool.py +6 -6
  166. alita_sdk/tools/carrier/run_ui_test_tool.py +6 -6
  167. alita_sdk/tools/carrier/ui_reports_tool.py +5 -5
  168. alita_sdk/tools/chunkers/__init__.py +3 -1
  169. alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
  170. alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
  171. alita_sdk/tools/chunkers/sematic/markdown_chunker.py +97 -6
  172. alita_sdk/tools/chunkers/universal_chunker.py +270 -0
  173. alita_sdk/tools/cloud/aws/__init__.py +10 -7
  174. alita_sdk/tools/cloud/azure/__init__.py +10 -7
  175. alita_sdk/tools/cloud/gcp/__init__.py +10 -7
  176. alita_sdk/tools/cloud/k8s/__init__.py +10 -7
  177. alita_sdk/tools/code/linter/__init__.py +10 -8
  178. alita_sdk/tools/code/loaders/codesearcher.py +3 -2
  179. alita_sdk/tools/code/sonar/__init__.py +10 -7
  180. alita_sdk/tools/code_indexer_toolkit.py +73 -23
  181. alita_sdk/tools/confluence/__init__.py +21 -15
  182. alita_sdk/tools/confluence/api_wrapper.py +78 -23
  183. alita_sdk/tools/confluence/loader.py +4 -2
  184. alita_sdk/tools/custom_open_api/__init__.py +12 -5
  185. alita_sdk/tools/elastic/__init__.py +11 -8
  186. alita_sdk/tools/elitea_base.py +493 -30
  187. alita_sdk/tools/figma/__init__.py +58 -11
  188. alita_sdk/tools/figma/api_wrapper.py +1235 -143
  189. alita_sdk/tools/figma/figma_client.py +73 -0
  190. alita_sdk/tools/figma/toon_tools.py +2748 -0
  191. alita_sdk/tools/github/__init__.py +13 -14
  192. alita_sdk/tools/github/github_client.py +224 -100
  193. alita_sdk/tools/github/graphql_client_wrapper.py +119 -33
  194. alita_sdk/tools/github/schemas.py +14 -5
  195. alita_sdk/tools/github/tool.py +5 -1
  196. alita_sdk/tools/github/tool_prompts.py +9 -22
  197. alita_sdk/tools/gitlab/__init__.py +15 -11
  198. alita_sdk/tools/gitlab/api_wrapper.py +207 -41
  199. alita_sdk/tools/gitlab_org/__init__.py +10 -8
  200. alita_sdk/tools/gitlab_org/api_wrapper.py +63 -64
  201. alita_sdk/tools/google/bigquery/__init__.py +13 -12
  202. alita_sdk/tools/google/bigquery/tool.py +5 -1
  203. alita_sdk/tools/google_places/__init__.py +10 -8
  204. alita_sdk/tools/google_places/api_wrapper.py +1 -1
  205. alita_sdk/tools/jira/__init__.py +17 -11
  206. alita_sdk/tools/jira/api_wrapper.py +91 -40
  207. alita_sdk/tools/keycloak/__init__.py +11 -8
  208. alita_sdk/tools/localgit/__init__.py +9 -3
  209. alita_sdk/tools/localgit/local_git.py +62 -54
  210. alita_sdk/tools/localgit/tool.py +5 -1
  211. alita_sdk/tools/memory/__init__.py +11 -3
  212. alita_sdk/tools/non_code_indexer_toolkit.py +1 -0
  213. alita_sdk/tools/ocr/__init__.py +11 -8
  214. alita_sdk/tools/openapi/__init__.py +490 -114
  215. alita_sdk/tools/openapi/api_wrapper.py +1368 -0
  216. alita_sdk/tools/openapi/tool.py +20 -0
  217. alita_sdk/tools/pandas/__init__.py +20 -12
  218. alita_sdk/tools/pandas/api_wrapper.py +38 -25
  219. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  220. alita_sdk/tools/postman/__init__.py +11 -11
  221. alita_sdk/tools/pptx/__init__.py +10 -9
  222. alita_sdk/tools/pptx/pptx_wrapper.py +1 -1
  223. alita_sdk/tools/qtest/__init__.py +30 -10
  224. alita_sdk/tools/qtest/api_wrapper.py +430 -13
  225. alita_sdk/tools/rally/__init__.py +10 -8
  226. alita_sdk/tools/rally/api_wrapper.py +1 -1
  227. alita_sdk/tools/report_portal/__init__.py +12 -9
  228. alita_sdk/tools/salesforce/__init__.py +10 -9
  229. alita_sdk/tools/servicenow/__init__.py +17 -14
  230. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  231. alita_sdk/tools/sharepoint/__init__.py +10 -8
  232. alita_sdk/tools/sharepoint/api_wrapper.py +4 -4
  233. alita_sdk/tools/slack/__init__.py +10 -8
  234. alita_sdk/tools/slack/api_wrapper.py +2 -2
  235. alita_sdk/tools/sql/__init__.py +11 -9
  236. alita_sdk/tools/testio/__init__.py +10 -8
  237. alita_sdk/tools/testrail/__init__.py +11 -8
  238. alita_sdk/tools/testrail/api_wrapper.py +1 -1
  239. alita_sdk/tools/utils/__init__.py +9 -4
  240. alita_sdk/tools/utils/content_parser.py +77 -3
  241. alita_sdk/tools/utils/text_operations.py +410 -0
  242. alita_sdk/tools/utils/tool_prompts.py +79 -0
  243. alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +17 -13
  244. alita_sdk/tools/xray/__init__.py +12 -9
  245. alita_sdk/tools/yagmail/__init__.py +9 -3
  246. alita_sdk/tools/zephyr/__init__.py +9 -7
  247. alita_sdk/tools/zephyr_enterprise/__init__.py +11 -8
  248. alita_sdk/tools/zephyr_essential/__init__.py +10 -8
  249. alita_sdk/tools/zephyr_essential/api_wrapper.py +30 -13
  250. alita_sdk/tools/zephyr_essential/client.py +2 -2
  251. alita_sdk/tools/zephyr_scale/__init__.py +11 -9
  252. alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
  253. alita_sdk/tools/zephyr_squad/__init__.py +10 -8
  254. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/METADATA +147 -7
  255. alita_sdk-0.3.627.dist-info/RECORD +468 -0
  256. alita_sdk-0.3.627.dist-info/entry_points.txt +2 -0
  257. alita_sdk-0.3.462.dist-info/RECORD +0 -384
  258. alita_sdk-0.3.462.dist-info/entry_points.txt +0 -2
  259. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/WHEEL +0 -0
  260. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/licenses/LICENSE +0 -0
  261. {alita_sdk-0.3.462.dist-info → alita_sdk-0.3.627.dist-info}/top_level.txt +0 -0
@@ -13,6 +13,8 @@ from pydantic.fields import PrivateAttr
13
13
 
14
14
  from ..code_indexer_toolkit import CodeIndexerToolkit
15
15
  from ..utils.available_tools_decorator import extend_with_parent_available_tools
16
+ from ..elitea_base import extend_with_file_operations, BaseCodeToolApiWrapper
17
+ from ..utils.tool_prompts import EDIT_FILE_DESCRIPTION, UPDATE_FILE_PROMPT_NO_PATH
16
18
 
17
19
  logger = logging.getLogger(__name__)
18
20
 
@@ -37,9 +39,7 @@ CreateFileModel = create_model(
37
39
  UpdateFileModel = create_model(
38
40
  "UpdateFileModel",
39
41
  file_path=(str, Field(description="The path of the file")),
40
- update_query=(str, Field(description="Contains the file contents required to be updated. "
41
- "The old file contents is wrapped in OLD <<<< and >>>> OLD. "
42
- "The new file contents is wrapped in NEW <<<< and >>>> NEW")),
42
+ update_query=(str, Field(description=UPDATE_FILE_PROMPT_NO_PATH)),
43
43
  branch=(str, Field(description="The branch to update the file in")),
44
44
  )
45
45
 
@@ -56,7 +56,7 @@ SetActiveBranchModel = create_model(
56
56
 
57
57
  ListBranchesInRepoModel = create_model(
58
58
  "ListBranchesInRepoModel",
59
- limit=(Optional[int], Field(default=20, description="Maximum number of branches to return. If not provided, all branches will be returned.")),
59
+ limit=(Optional[int], Field(default=20, description="Maximum number of branches to return. If not provided, all branches will be returned.", gt=0)),
60
60
  branch_wildcard=(Optional[str], Field(default=None, description="Wildcard pattern to filter branches by name. If not provided, all branches will be returned."))
61
61
  )
62
62
 
@@ -93,7 +93,7 @@ DeleteFileModel = create_model(
93
93
  "DeleteFileModel",
94
94
  file_path=(str, Field(description="The path of the file")),
95
95
  branch=(str, Field(description="The branch to delete the file from")),
96
- commit_message=(str, Field(default=None, description="Commit message for deleting the file. Optional.")),
96
+ commit_message=(Optional[str], Field(default=None, description="Commit message for deleting the file. Optional.")),
97
97
  )
98
98
 
99
99
  AppendFileModel = create_model(
@@ -123,6 +123,12 @@ class BitbucketAPIWrapper(CodeIndexerToolkit):
123
123
 
124
124
  _bitbucket: Any = PrivateAttr()
125
125
  _active_branch: Any = PrivateAttr()
126
+
127
+ # Import file operation methods from BaseCodeToolApiWrapper
128
+ read_file_chunk = BaseCodeToolApiWrapper.read_file_chunk
129
+ read_multiple_files = BaseCodeToolApiWrapper.read_multiple_files
130
+ search_file = BaseCodeToolApiWrapper.search_file
131
+ edit_file = BaseCodeToolApiWrapper.edit_file
126
132
  url: str = ''
127
133
  project: str = ''
128
134
  """The key of the project this repo belongs to"""
@@ -263,11 +269,20 @@ class BitbucketAPIWrapper(CodeIndexerToolkit):
263
269
  >>>> NEW
264
270
  branch(str): branch name (by default: active_branch)
265
271
  Returns:
266
- str: A success or failure message
272
+ str | ToolException: A success message or a ToolException on failure.
267
273
  """
268
274
  try:
269
- result = self._bitbucket.update_file(file_path=file_path, update_query=update_query, branch=branch)
270
- return result if isinstance(result, ToolException) else f"File has been updated: {file_path}."
275
+ # Use the shared edit_file logic from BaseCodeToolApiWrapper, operating on
276
+ # this wrapper instance, which provides _read_file and _write_file.
277
+ result = self.edit_file(
278
+ file_path=file_path,
279
+ branch=branch,
280
+ file_query=update_query,
281
+ )
282
+ return result
283
+ except ToolException as e:
284
+ # Pass through ToolExceptions as-is so callers can handle them uniformly.
285
+ return e
271
286
  except Exception as e:
272
287
  return ToolException(f"File was not updated due to error: {str(e)}")
273
288
 
@@ -360,12 +375,15 @@ class BitbucketAPIWrapper(CodeIndexerToolkit):
360
375
  # except Exception as e:
361
376
  # raise ToolException(f"Can't extract file commit hash (`{file_path}`) due to error:\n{str(e)}")
362
377
 
363
- def _read_file(self, file_path: str, branch: str) -> str:
378
+ def _read_file(self, file_path: str, branch: str, **kwargs) -> str:
364
379
  """
365
- Reads a file from the gitlab repo
380
+ Reads a file from the bitbucket repo with optional partial read support.
381
+
366
382
  Parameters:
367
383
  file_path(str): the file path
368
384
  branch(str): branch name (by default: active_branch)
385
+ **kwargs: Additional parameters (offset, limit, head, tail) - currently ignored,
386
+ partial read handled client-side by base class methods
369
387
  Returns:
370
388
  str: The file decoded as a string
371
389
  """
@@ -399,8 +417,38 @@ class BitbucketAPIWrapper(CodeIndexerToolkit):
399
417
  return self._read_file(file_path, branch)
400
418
  except Exception as e:
401
419
  return f"Failed to read file {file_path}: {str(e)}"
420
+
421
+ def _write_file(
422
+ self,
423
+ file_path: str,
424
+ content: str,
425
+ branch: str = None,
426
+ commit_message: str = None,
427
+ ) -> str:
428
+ """Write content to a file (create or update) via the underlying Bitbucket client.
429
+
430
+ This delegates to the low-level BitbucketServerApi/BitbucketCloudApi `_write_file`
431
+ implementations, so all backend-specific commit behavior (server vs cloud) is
432
+ centralized there. Used by BaseCodeToolApiWrapper.edit_file.
433
+ """
434
+ branch = branch or self._active_branch
435
+ try:
436
+ # Delegate actual write/commit to the underlying API wrapper, which
437
+ # implements _write_file(file_path, content, branch, commit_message).
438
+ self._bitbucket._write_file(
439
+ file_path=file_path,
440
+ content=content,
441
+ branch=branch,
442
+ commit_message=commit_message or f"Update {file_path}",
443
+ )
444
+ return f"Update {file_path}"
445
+ except ToolException:
446
+ raise
447
+ except Exception as e:
448
+ raise ToolException(f"Unable to write file {file_path} on branch {branch}: {str(e)}")
402
449
 
403
450
  @extend_with_parent_available_tools
451
+ @extend_with_file_operations
404
452
  def get_available_tools(self):
405
453
  return [
406
454
  {
@@ -442,7 +490,7 @@ class BitbucketAPIWrapper(CodeIndexerToolkit):
442
490
  {
443
491
  "name": "update_file",
444
492
  "ref": self.update_file,
445
- "description": self.update_file.__doc__ or "Update the contents of a file in the repository.",
493
+ "description": EDIT_FILE_DESCRIPTION,
446
494
  "args_schema": UpdateFileModel,
447
495
  },
448
496
  {
@@ -8,7 +8,7 @@ from typing import TYPE_CHECKING, Any, Dict, List
8
8
  from atlassian.bitbucket import Bitbucket, Cloud
9
9
  from langchain_core.tools import ToolException
10
10
  from requests import Response
11
- from ..ado.utils import extract_old_new_pairs
11
+ from ..utils.text_operations import parse_old_new_markers
12
12
 
13
13
  logger = logging.getLogger(__name__)
14
14
  logging.basicConfig(level=logging.DEBUG)
@@ -142,32 +142,28 @@ class BitbucketServerApi(BitbucketApiAbstract):
142
142
  filename=file_path
143
143
  )
144
144
 
145
- def update_file(self, file_path: str, update_query: str, branch: str) -> str:
146
- file_content = self.get_file(file_path=file_path, branch=branch)
147
- updated_file_content = file_content
148
- for old, new in extract_old_new_pairs(update_query):
149
- if not old.strip():
150
- continue
151
- updated_file_content = updated_file_content.replace(old, new)
145
+ def _write_file(self, file_path: str, content: str, branch: str, commit_message: str) -> str:
146
+ """Write updated file content to Bitbucket Server.
152
147
 
153
- if file_content == updated_file_content:
148
+ it creates a new commit on the given branch that edits the existing file.
149
+ """
150
+ # Get the latest commit on the branch (used as source_commit_id)
151
+ source_commit_generator = self.api_client.get_commits(project_key=self.project, repository_slug=self.repository,
152
+ hash_newest=branch, limit=1)
153
+ source_commit = next(source_commit_generator, None)
154
+ if not source_commit:
154
155
  raise ToolException(
155
- "File content was not updated because old content was not found or empty. "
156
- "It may be helpful to use the read_file action to get "
157
- "the current file contents."
156
+ f"Unable to determine latest commit on branch '{branch}' for repository '{self.repository}'."
158
157
  )
159
158
 
160
- source_commit_generator = self.api_client.get_commits(project_key=self.project, repository_slug=self.repository,
161
- hash_newest=branch, limit=1)
162
- source_commit = next(source_commit_generator)
163
159
  return self.api_client.update_file(
164
160
  project_key=self.project,
165
161
  repository_slug=self.repository,
166
- content=updated_file_content,
167
- message=f"Update {file_path}",
162
+ content=content,
163
+ message=commit_message or f"Update {file_path}",
168
164
  branch=branch,
169
165
  filename=file_path,
170
- source_commit_id=source_commit['id']
166
+ source_commit_id=source_commit['id'],
171
167
  )
172
168
 
173
169
  def get_pull_request_commits(self, pr_id: str) -> List[Dict[str, Any]]:
@@ -294,7 +290,37 @@ class BitbucketCloudApi(BitbucketApiAbstract):
294
290
  return None
295
291
 
296
292
  def get_file(self, file_path: str, branch: str) -> str:
297
- return self.repository.get(path=f'src/{branch}/{file_path}')
293
+ """Fetch a file's content from Bitbucket Cloud and return it as text.
294
+
295
+ Uses the 'get' endpoint with advanced_mode to get a rich response object.
296
+ """
297
+ try:
298
+ file_response = self.repository.get(
299
+ path=f"src/{branch}/{file_path}",
300
+ advanced_mode=True,
301
+ )
302
+
303
+ # Prefer HTTP status when available
304
+ status = getattr(file_response, "status_code", None)
305
+ if status is not None and status != 200:
306
+ raise ToolException(
307
+ f"Failed to retrieve text from file '{file_path}' from branch '{branch}': "
308
+ f"HTTP {status}"
309
+ )
310
+
311
+ # Safely extract text content
312
+ file_text = getattr(file_response, "text", None)
313
+ if not isinstance(file_text, str) or not file_text:
314
+ raise ToolException(
315
+ f"File '{file_path}' from branch '{branch}' is empty or could not be retrieved."
316
+ )
317
+
318
+ return file_text
319
+ except Exception as e:
320
+ # Network/transport or client-level failure
321
+ raise ToolException(
322
+ f"Failed to retrieve text from file '{file_path}' from branch '{branch}': {e}"
323
+ )
298
324
 
299
325
  def get_files_list(self, file_path: str, branch: str) -> list:
300
326
  files_list = []
@@ -315,22 +341,10 @@ class BitbucketCloudApi(BitbucketApiAbstract):
315
341
  return self.repository.post(path='src', data=form_data, files={},
316
342
  headers={'Content-Type': 'application/x-www-form-urlencoded'})
317
343
 
318
- def update_file(self, file_path: str, update_query: str, branch: str) -> ToolException | str:
319
-
320
- file_content = self.get_file(file_path=file_path, branch=branch)
321
- updated_file_content = file_content
322
- for old, new in extract_old_new_pairs(file_query=update_query):
323
- if not old.strip():
324
- continue
325
- updated_file_content = updated_file_content.replace(old, new)
326
-
327
- if file_content == updated_file_content:
328
- return ToolException(
329
- "File content was not updated because old content was not found or empty. "
330
- "It may be helpful to use the read_file action to get "
331
- "the current file contents."
332
- )
333
- return self.create_file(file_path, updated_file_content, branch)
344
+ def _write_file(self, file_path: str, content: str, branch: str, commit_message: str) -> str:
345
+ """Write updated file content to Bitbucket Cloud.
346
+ """
347
+ return self.create_file(file_path=file_path, file_contents=content, branch=branch)
334
348
 
335
349
  def get_pull_request_commits(self, pr_id: str) -> List[Dict[str, Any]]:
336
350
  """
@@ -8,7 +8,7 @@ from langchain_community.utilities.wikipedia import WikipediaAPIWrapper
8
8
  from .google_search_rag import GoogleSearchResults
9
9
  from .crawler import SingleURLCrawler, MultiURLCrawler, GetHTMLContent, GetPDFContent
10
10
  from .wiki import WikipediaQueryRun
11
- from ..utils import get_max_toolkit_length, clean_string, TOOLKIT_SPLITTER
11
+ from ..utils import get_max_toolkit_length, clean_string
12
12
  from ...configurations.browser import BrowserConfiguration
13
13
  from logging import getLogger
14
14
 
@@ -42,7 +42,6 @@ class BrowserToolkit(BaseToolkit):
42
42
  'google': GoogleSearchResults.__pydantic_fields__['args_schema'].default.schema(),
43
43
  'wiki': WikipediaQueryRun.__pydantic_fields__['args_schema'].default.schema()
44
44
  }
45
- BrowserToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
46
45
 
47
46
  def validate_google_fields(cls, values):
48
47
  if 'google' in values.get('selected_tools', []):
@@ -90,7 +89,6 @@ class BrowserToolkit(BaseToolkit):
90
89
  }
91
90
 
92
91
  tools = []
93
- prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
94
92
  if not selected_tools:
95
93
  selected_tools = [
96
94
  'single_url_crawler',
@@ -127,7 +125,10 @@ class BrowserToolkit(BaseToolkit):
127
125
 
128
126
  # Only add the tool if it was successfully created
129
127
  if tool_entry is not None:
130
- tool_entry.name = f"{prefix}{tool_entry.name}"
128
+ if toolkit_name:
129
+ tool_entry.description = f"{tool_entry.description}\nToolkit: {toolkit_name}"
130
+ tool_entry.description = tool_entry.description[:1000]
131
+ tool_entry.metadata = {"toolkit_name": toolkit_name, "toolkit_type": name}
131
132
  tools.append(tool_entry)
132
133
  return cls(tools=tools)
133
134
 
@@ -7,7 +7,7 @@ from functools import lru_cache
7
7
  from .api_wrapper import CarrierAPIWrapper
8
8
  from .tools import __all__
9
9
  from ..elitea_base import filter_missconfigured_index_tools
10
- from ..utils import clean_string, TOOLKIT_SPLITTER, get_max_toolkit_length
10
+ from ..utils import clean_string, get_max_toolkit_length
11
11
  from ...configurations.carrier import CarrierConfiguration
12
12
 
13
13
  logger = logging.getLogger(__name__)
@@ -17,7 +17,6 @@ name = 'carrier'
17
17
 
18
18
  class AlitaCarrierToolkit(BaseToolkit):
19
19
  tools: List[BaseTool] = []
20
- toolkit_max_length: int = 100
21
20
 
22
21
  @classmethod
23
22
  @lru_cache(maxsize=32)
@@ -26,7 +25,6 @@ class AlitaCarrierToolkit(BaseToolkit):
26
25
  for t in __all__:
27
26
  default = t['tool'].__pydantic_fields__['args_schema'].default
28
27
  selected_tools[t['name']] = default.schema() if default else default
29
- cls.toolkit_max_length = get_max_toolkit_length(selected_tools)
30
28
  return create_model(
31
29
  name,
32
30
  project_id=(Optional[str], Field(None, description="Optional project ID for scoped operations")),
@@ -70,15 +68,16 @@ class AlitaCarrierToolkit(BaseToolkit):
70
68
  logger.exception(f"[AlitaCarrierToolkit] Error initializing CarrierAPIWrapper: {e}")
71
69
  raise ValueError(f"CarrierAPIWrapper initialization error: {e}")
72
70
 
73
- prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
74
-
75
71
  tools = []
76
72
  for tool_def in __all__:
77
73
  if selected_tools and tool_def['name'] not in selected_tools:
78
74
  continue
79
75
  try:
80
76
  tool_instance = tool_def['tool'](api_wrapper=carrier_api_wrapper)
81
- tool_instance.name = prefix + tool_instance.name
77
+ if toolkit_name:
78
+ tool_instance.description = f"{tool_instance.description}\nToolkit: {toolkit_name}"
79
+ tool_instance.description = tool_instance.description[:1000]
80
+ tool_instance.metadata = {"toolkit_name": toolkit_name, "toolkit_type": name}
82
81
  tools.append(tool_instance)
83
82
  logger.info(f"[AlitaCarrierToolkit] Successfully initialized tool '{tool_instance.name}'")
84
83
  except Exception as e:
@@ -4,7 +4,7 @@ import json
4
4
  import zipfile
5
5
  from itertools import islice
6
6
  import traceback
7
- from typing import Type
7
+ from typing import Optional, Type
8
8
  from langchain_core.tools import BaseTool, ToolException
9
9
  from pydantic.fields import Field
10
10
  from pydantic import create_model, BaseModel
@@ -145,12 +145,12 @@ class CreateExcelReportTool(BaseTool):
145
145
  description: str = "Create excel report by report ID from Carrier."
146
146
  args_schema: Type[BaseModel] = create_model(
147
147
  "CreateExcelReportInput",
148
- report_id=(str, Field(default=None, description="Report ID to retrieve")),
149
- bucket=(str, Field(default=None, description="Bucket with jtl/log file")),
150
- file_name=(str, Field(default=None, description="File name for .jtl or .log report")),
148
+ report_id=(Optional[str], Field(default=None, description="Report ID to retrieve")),
149
+ bucket=(Optional[str], Field(default=None, description="Bucket with jtl/log file")),
150
+ file_name=(Optional[str], Field(default=None, description="File name for .jtl or .log report")),
151
151
  **{
152
- "think_time": (str, Field(default=None, description="Think time parameter")),
153
- "pct": (str, Field(default=None, description="Percentile parameter")),
152
+ "think_time": (Optional[str], Field(default=None, description="Think time parameter")),
153
+ "pct": (Optional[str], Field(default=None, description="Percentile parameter")),
154
154
  "tp_threshold": (int, Field(default=None, description="Throughput threshold")),
155
155
  "rt_threshold": (int, Field(default=None, description="Response time threshold")),
156
156
  "er_threshold": (int, Field(default=None, description="Error rate threshold")),
@@ -1,7 +1,7 @@
1
1
  import logging
2
2
  import json
3
3
  import traceback
4
- from typing import Type
4
+ from typing import Optional, Type
5
5
  from langchain_core.tools import BaseTool, ToolException
6
6
  from pydantic.fields import Field
7
7
  from pydantic import create_model, BaseModel
@@ -23,11 +23,11 @@ class RunUITestTool(BaseTool):
23
23
  "RunUITestInput",
24
24
  test_id=(str, Field(default="", description="Test ID to execute")),
25
25
  test_name=(str, Field(default="", description="Test name to execute")),
26
- cpu_quota=(str, Field(default=None, description="CPU quota for the test runner")),
27
- memory_quota=(str, Field(default=None, description="Memory quota for the test runner")),
28
- cloud_settings=(str, Field(default=None, description="Cloud settings name for the test runner")),
29
- custom_cmd=(str, Field(default=None, description="Custom command to run with the test")),
30
- loops=(str, Field(default=None, description="Number of loops to run the test")),
26
+ cpu_quota=(Optional[str], Field(default=None, description="CPU quota for the test runner")),
27
+ memory_quota=(Optional[str], Field(default=None, description="Memory quota for the test runner")),
28
+ cloud_settings=(Optional[str], Field(default=None, description="Cloud settings name for the test runner")),
29
+ custom_cmd=(Optional[str], Field(default=None, description="Custom command to run with the test")),
30
+ loops=(Optional[str], Field(default=None, description="Number of loops to run the test")),
31
31
  proceed_with_defaults=(bool, Field(default=False, description="Proceed with default configuration. True ONLY when user directly wants to run the test with default parameters." \
32
32
  " If cpu_quota, memory_quota, cloud_settings, custom_cmd, or loops are provided, proceed_with_defaults must be False")),
33
33
  )
@@ -2,7 +2,7 @@ import logging
2
2
  import json
3
3
  import traceback
4
4
  import datetime
5
- from typing import Type
5
+ from typing import Optional, Type
6
6
  from langchain_core.tools import BaseTool, ToolException
7
7
  from pydantic.fields import Field
8
8
  from pydantic import create_model, BaseModel
@@ -19,9 +19,9 @@ class GetUIReportsTool(BaseTool):
19
19
  report_id=(str, Field(description="UI Report id to retrieve")),
20
20
  current_date=(str, Field(default=datetime.datetime.now().strftime("%Y-%m-%d"), description="Current date in YYYY-MM-DD format (auto-filled)")),
21
21
  **{
22
- "name": (str, Field(default=None, description="Optional. Filter reports by name (case-insensitive, partial match)")),
23
- "start_time": (str, Field(default=None, description="Start date/time for filtering reports (YYYY-MM-DD or ISO format)")),
24
- "end_time": (str, Field(default=None, description="End date/time for filtering reports (YYYY-MM-DD or ISO format)")),
22
+ "name": (Optional[str], Field(default=None, description="Optional. Filter reports by name (case-insensitive, partial match)")),
23
+ "start_time": (Optional[str], Field(default=None, description="Start date/time for filtering reports (YYYY-MM-DD or ISO format)")),
24
+ "end_time": (Optional[str], Field(default=None, description="End date/time for filtering reports (YYYY-MM-DD or ISO format)")),
25
25
  }
26
26
  )
27
27
 
@@ -178,7 +178,7 @@ class GetUITestsTool(BaseTool):
178
178
  args_schema: Type[BaseModel] = create_model(
179
179
  "GetUITestsInput",
180
180
  **{
181
- "name": (str, Field(default=None, description="Optional. Filter tests by name (case-insensitive, partial match)")),
181
+ "name": (Optional[str], Field(default=None, description="Optional. Filter tests by name (case-insensitive, partial match)")),
182
182
  "include_schedules": (bool, Field(default=False, description="Optional. Include test schedules in the response")),
183
183
  "include_config": (bool, Field(default=False, description="Optional. Include detailed configuration in the response")),
184
184
  }
@@ -3,6 +3,7 @@ from .sematic.statistical_chunker import statistical_chunker
3
3
  from .sematic.markdown_chunker import markdown_chunker
4
4
  from .sematic.proposal_chunker import proposal_chunker
5
5
  from .sematic.json_chunker import json_chunker
6
+ from .universal_chunker import universal_chunker, chunk_single_document, get_file_type
6
7
  from .models import StatisticalChunkerConfig, MarkdownChunkerConfig, ProposalChunkerConfig
7
8
 
8
9
  __all__ = {
@@ -10,7 +11,8 @@ __all__ = {
10
11
  'statistical': statistical_chunker,
11
12
  'markdown': markdown_chunker,
12
13
  'proposal': proposal_chunker,
13
- 'json': json_chunker
14
+ 'json': json_chunker,
15
+ 'universal': universal_chunker,
14
16
  }
15
17
 
16
18
  __confluence_chunkers__ = {
@@ -40,25 +40,49 @@ class Treesitter(ABC):
40
40
  return TreesitterRegistry.create_treesitter(language)
41
41
 
42
42
  def parse(self, file_bytes: bytes) -> list[TreesitterMethodNode]:
43
- """
44
- Parses the given file bytes and extracts method nodes.
43
+ """Parses the given file bytes and extracts method nodes.
44
+
45
+ If no nodes matching the configured ``method_declaration_identifier`` are
46
+ found, a single fallback node spanning the entire file is returned so
47
+ that callers always receive at least one ``TreesitterMethodNode``.
45
48
 
46
49
  Args:
47
50
  file_bytes (bytes): The content of the file to be parsed.
48
51
 
49
52
  Returns:
50
- list[TreesitterMethodNode]: A list of TreesitterMethodNode objects representing the methods in the file.
53
+ list[TreesitterMethodNode]: A list of TreesitterMethodNode objects
54
+ representing the methods in the file, or a single fallback node
55
+ covering the whole file when no methods are detected.
51
56
  """
52
57
  self.tree = self.parser.parse(file_bytes)
53
- result = []
54
58
  methods = self._query_all_methods(self.tree.root_node)
55
- for method in methods:
56
- method_name = self._query_method_name(method["method"])
57
- doc_comment = method["doc_comment"]
58
- result.append(
59
- TreesitterMethodNode(method_name, doc_comment, None, method["method"])
59
+
60
+ # Normal path: at least one method node was found.
61
+ if methods:
62
+ result: list[TreesitterMethodNode] = []
63
+ for method in methods:
64
+ method_name = self._query_method_name(method["method"])
65
+ doc_comment = method["doc_comment"]
66
+ result.append(
67
+ TreesitterMethodNode(
68
+ method_name, doc_comment, None, method["method"]
69
+ )
70
+ )
71
+ return result
72
+
73
+ # Fallback path: no method nodes were found. Return a single node that
74
+ # spans the entire file so that callers can still index/summarize the
75
+ # content even when the language-specific patterns do not match.
76
+ full_source = file_bytes.decode(errors="replace")
77
+ fallback_node = self.tree.root_node
78
+ return [
79
+ TreesitterMethodNode(
80
+ name=None,
81
+ doc_comment=None,
82
+ method_source_code=full_source,
83
+ node=fallback_node,
60
84
  )
61
- return result
85
+ ]
62
86
 
63
87
  def _query_all_methods(
64
88
  self,
@@ -71,7 +95,8 @@ class Treesitter(ABC):
71
95
  node (tree_sitter.Node): The root node to start the query from.
72
96
 
73
97
  Returns:
74
- list: A list of dictionaries, each containing a method node and its associated doc comment (if any).
98
+ list: A list of dictionaries, each containing a method node and its
99
+ associated doc comment (if any).
75
100
  """
76
101
  methods = []
77
102
  if node.type == self.method_declaration_identifier:
@@ -88,8 +113,7 @@ class Treesitter(ABC):
88
113
  return methods
89
114
 
90
115
  def _query_method_name(self, node: tree_sitter.Node):
91
- """
92
- Queries the method name from the given syntax tree node.
116
+ """Queries the method name from the given syntax tree node.
93
117
 
94
118
  Args:
95
119
  node (tree_sitter.Node): The syntax tree node to query.
@@ -17,6 +17,7 @@ def json_chunker(file_content_generator: Generator[Document, None, None], config
17
17
  for chunk in chunks:
18
18
  metadata = doc.metadata.copy()
19
19
  metadata['chunk_id'] = chunk_id
20
+ metadata['method_name'] = 'json'
20
21
  chunk_id += 1
21
22
  yield Document(page_content=json.dumps(chunk), metadata=metadata)
22
23
  except Exception as e: