alita-sdk 0.3.554__py3-none-any.whl → 0.3.603__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (116) hide show
  1. alita_sdk/cli/agent_executor.py +2 -1
  2. alita_sdk/cli/agent_loader.py +34 -4
  3. alita_sdk/cli/agents.py +433 -203
  4. alita_sdk/configurations/openapi.py +227 -15
  5. alita_sdk/runtime/clients/client.py +4 -2
  6. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  7. alita_sdk/runtime/langchain/assistant.py +61 -11
  8. alita_sdk/runtime/langchain/constants.py +419 -171
  9. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -2
  10. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
  11. alita_sdk/runtime/langchain/langraph_agent.py +106 -21
  12. alita_sdk/runtime/langchain/utils.py +30 -14
  13. alita_sdk/runtime/toolkits/__init__.py +3 -0
  14. alita_sdk/runtime/toolkits/artifact.py +2 -1
  15. alita_sdk/runtime/toolkits/mcp.py +6 -3
  16. alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
  17. alita_sdk/runtime/toolkits/skill_router.py +2 -2
  18. alita_sdk/runtime/toolkits/tools.py +64 -2
  19. alita_sdk/runtime/toolkits/vectorstore.py +1 -1
  20. alita_sdk/runtime/tools/artifact.py +15 -0
  21. alita_sdk/runtime/tools/data_analysis.py +183 -0
  22. alita_sdk/runtime/tools/llm.py +30 -11
  23. alita_sdk/runtime/tools/mcp_server_tool.py +6 -3
  24. alita_sdk/runtime/tools/router.py +2 -4
  25. alita_sdk/runtime/tools/sandbox.py +9 -6
  26. alita_sdk/runtime/utils/constants.py +5 -1
  27. alita_sdk/runtime/utils/mcp_client.py +1 -1
  28. alita_sdk/runtime/utils/mcp_sse_client.py +1 -1
  29. alita_sdk/runtime/utils/toolkit_utils.py +2 -0
  30. alita_sdk/tools/__init__.py +3 -1
  31. alita_sdk/tools/ado/repos/__init__.py +26 -8
  32. alita_sdk/tools/ado/repos/repos_wrapper.py +78 -52
  33. alita_sdk/tools/ado/test_plan/__init__.py +3 -2
  34. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
  35. alita_sdk/tools/ado/utils.py +1 -18
  36. alita_sdk/tools/ado/wiki/__init__.py +2 -1
  37. alita_sdk/tools/ado/wiki/ado_wrapper.py +23 -1
  38. alita_sdk/tools/ado/work_item/__init__.py +3 -2
  39. alita_sdk/tools/ado/work_item/ado_wrapper.py +23 -1
  40. alita_sdk/tools/advanced_jira_mining/__init__.py +2 -1
  41. alita_sdk/tools/aws/delta_lake/__init__.py +2 -1
  42. alita_sdk/tools/azure_ai/search/__init__.py +2 -1
  43. alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
  44. alita_sdk/tools/base_indexer_toolkit.py +15 -6
  45. alita_sdk/tools/bitbucket/__init__.py +2 -1
  46. alita_sdk/tools/bitbucket/api_wrapper.py +1 -1
  47. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +3 -3
  48. alita_sdk/tools/browser/__init__.py +1 -1
  49. alita_sdk/tools/carrier/__init__.py +1 -1
  50. alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
  51. alita_sdk/tools/cloud/aws/__init__.py +2 -1
  52. alita_sdk/tools/cloud/azure/__init__.py +2 -1
  53. alita_sdk/tools/cloud/gcp/__init__.py +2 -1
  54. alita_sdk/tools/cloud/k8s/__init__.py +2 -1
  55. alita_sdk/tools/code/linter/__init__.py +2 -1
  56. alita_sdk/tools/code/sonar/__init__.py +2 -1
  57. alita_sdk/tools/code_indexer_toolkit.py +19 -2
  58. alita_sdk/tools/confluence/__init__.py +7 -6
  59. alita_sdk/tools/confluence/api_wrapper.py +2 -2
  60. alita_sdk/tools/custom_open_api/__init__.py +2 -1
  61. alita_sdk/tools/elastic/__init__.py +2 -1
  62. alita_sdk/tools/elitea_base.py +28 -9
  63. alita_sdk/tools/figma/__init__.py +52 -6
  64. alita_sdk/tools/figma/api_wrapper.py +1158 -123
  65. alita_sdk/tools/figma/figma_client.py +73 -0
  66. alita_sdk/tools/figma/toon_tools.py +2748 -0
  67. alita_sdk/tools/github/__init__.py +2 -1
  68. alita_sdk/tools/github/github_client.py +69 -97
  69. alita_sdk/tools/github/schemas.py +4 -4
  70. alita_sdk/tools/gitlab/__init__.py +2 -1
  71. alita_sdk/tools/gitlab/api_wrapper.py +118 -38
  72. alita_sdk/tools/gitlab_org/__init__.py +2 -1
  73. alita_sdk/tools/gitlab_org/api_wrapper.py +60 -62
  74. alita_sdk/tools/google/bigquery/__init__.py +2 -1
  75. alita_sdk/tools/google_places/__init__.py +2 -1
  76. alita_sdk/tools/jira/__init__.py +2 -1
  77. alita_sdk/tools/keycloak/__init__.py +2 -1
  78. alita_sdk/tools/localgit/__init__.py +2 -1
  79. alita_sdk/tools/memory/__init__.py +1 -1
  80. alita_sdk/tools/ocr/__init__.py +2 -1
  81. alita_sdk/tools/openapi/__init__.py +227 -15
  82. alita_sdk/tools/openapi/api_wrapper.py +1287 -802
  83. alita_sdk/tools/pandas/__init__.py +11 -5
  84. alita_sdk/tools/pandas/api_wrapper.py +38 -25
  85. alita_sdk/tools/postman/__init__.py +2 -1
  86. alita_sdk/tools/pptx/__init__.py +2 -1
  87. alita_sdk/tools/qtest/__init__.py +21 -2
  88. alita_sdk/tools/qtest/api_wrapper.py +430 -13
  89. alita_sdk/tools/rally/__init__.py +2 -1
  90. alita_sdk/tools/rally/api_wrapper.py +1 -1
  91. alita_sdk/tools/report_portal/__init__.py +2 -1
  92. alita_sdk/tools/salesforce/__init__.py +2 -1
  93. alita_sdk/tools/servicenow/__init__.py +2 -1
  94. alita_sdk/tools/sharepoint/__init__.py +2 -1
  95. alita_sdk/tools/sharepoint/api_wrapper.py +2 -2
  96. alita_sdk/tools/slack/__init__.py +3 -2
  97. alita_sdk/tools/slack/api_wrapper.py +2 -2
  98. alita_sdk/tools/sql/__init__.py +3 -2
  99. alita_sdk/tools/testio/__init__.py +2 -1
  100. alita_sdk/tools/testrail/__init__.py +2 -1
  101. alita_sdk/tools/utils/content_parser.py +77 -3
  102. alita_sdk/tools/utils/text_operations.py +163 -71
  103. alita_sdk/tools/xray/__init__.py +3 -2
  104. alita_sdk/tools/yagmail/__init__.py +2 -1
  105. alita_sdk/tools/zephyr/__init__.py +2 -1
  106. alita_sdk/tools/zephyr_enterprise/__init__.py +2 -1
  107. alita_sdk/tools/zephyr_essential/__init__.py +2 -1
  108. alita_sdk/tools/zephyr_scale/__init__.py +3 -2
  109. alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
  110. alita_sdk/tools/zephyr_squad/__init__.py +2 -1
  111. {alita_sdk-0.3.554.dist-info → alita_sdk-0.3.603.dist-info}/METADATA +7 -6
  112. {alita_sdk-0.3.554.dist-info → alita_sdk-0.3.603.dist-info}/RECORD +116 -111
  113. {alita_sdk-0.3.554.dist-info → alita_sdk-0.3.603.dist-info}/WHEEL +0 -0
  114. {alita_sdk-0.3.554.dist-info → alita_sdk-0.3.603.dist-info}/entry_points.txt +0 -0
  115. {alita_sdk-0.3.554.dist-info → alita_sdk-0.3.603.dist-info}/licenses/LICENSE +0 -0
  116. {alita_sdk-0.3.554.dist-info → alita_sdk-0.3.603.dist-info}/top_level.txt +0 -0
@@ -40,25 +40,49 @@ class Treesitter(ABC):
40
40
  return TreesitterRegistry.create_treesitter(language)
41
41
 
42
42
  def parse(self, file_bytes: bytes) -> list[TreesitterMethodNode]:
43
- """
44
- Parses the given file bytes and extracts method nodes.
43
+ """Parses the given file bytes and extracts method nodes.
44
+
45
+ If no nodes matching the configured ``method_declaration_identifier`` are
46
+ found, a single fallback node spanning the entire file is returned so
47
+ that callers always receive at least one ``TreesitterMethodNode``.
45
48
 
46
49
  Args:
47
50
  file_bytes (bytes): The content of the file to be parsed.
48
51
 
49
52
  Returns:
50
- list[TreesitterMethodNode]: A list of TreesitterMethodNode objects representing the methods in the file.
53
+ list[TreesitterMethodNode]: A list of TreesitterMethodNode objects
54
+ representing the methods in the file, or a single fallback node
55
+ covering the whole file when no methods are detected.
51
56
  """
52
57
  self.tree = self.parser.parse(file_bytes)
53
- result = []
54
58
  methods = self._query_all_methods(self.tree.root_node)
55
- for method in methods:
56
- method_name = self._query_method_name(method["method"])
57
- doc_comment = method["doc_comment"]
58
- result.append(
59
- TreesitterMethodNode(method_name, doc_comment, None, method["method"])
59
+
60
+ # Normal path: at least one method node was found.
61
+ if methods:
62
+ result: list[TreesitterMethodNode] = []
63
+ for method in methods:
64
+ method_name = self._query_method_name(method["method"])
65
+ doc_comment = method["doc_comment"]
66
+ result.append(
67
+ TreesitterMethodNode(
68
+ method_name, doc_comment, None, method["method"]
69
+ )
70
+ )
71
+ return result
72
+
73
+ # Fallback path: no method nodes were found. Return a single node that
74
+ # spans the entire file so that callers can still index/summarize the
75
+ # content even when the language-specific patterns do not match.
76
+ full_source = file_bytes.decode(errors="replace")
77
+ fallback_node = self.tree.root_node
78
+ return [
79
+ TreesitterMethodNode(
80
+ name=None,
81
+ doc_comment=None,
82
+ method_source_code=full_source,
83
+ node=fallback_node,
60
84
  )
61
- return result
85
+ ]
62
86
 
63
87
  def _query_all_methods(
64
88
  self,
@@ -71,7 +95,8 @@ class Treesitter(ABC):
71
95
  node (tree_sitter.Node): The root node to start the query from.
72
96
 
73
97
  Returns:
74
- list: A list of dictionaries, each containing a method node and its associated doc comment (if any).
98
+ list: A list of dictionaries, each containing a method node and its
99
+ associated doc comment (if any).
75
100
  """
76
101
  methods = []
77
102
  if node.type == self.method_declaration_identifier:
@@ -88,8 +113,7 @@ class Treesitter(ABC):
88
113
  return methods
89
114
 
90
115
  def _query_method_name(self, node: tree_sitter.Node):
91
- """
92
- Queries the method name from the given syntax tree node.
116
+ """Queries the method name from the given syntax tree node.
93
117
 
94
118
  Args:
95
119
  node (tree_sitter.Node): The syntax tree node to query.
@@ -7,6 +7,7 @@ from .api_wrapper import AWSToolConfig
7
7
  from ...base.tool import BaseAction
8
8
  from ...elitea_base import filter_missconfigured_index_tools
9
9
  from ...utils import clean_string, get_max_toolkit_length
10
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
10
11
 
11
12
  name = "aws"
12
13
 
@@ -64,7 +65,7 @@ class AWSToolkit(BaseToolkit):
64
65
  name=tool["name"],
65
66
  description=description,
66
67
  args_schema=tool["args_schema"],
67
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
68
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
68
69
  ))
69
70
  return cls(tools=tools)
70
71
 
@@ -7,6 +7,7 @@ from .api_wrapper import AzureApiWrapper
7
7
  from ...base.tool import BaseAction
8
8
  from ...elitea_base import filter_missconfigured_index_tools
9
9
  from ...utils import clean_string, get_max_toolkit_length
10
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
10
11
 
11
12
  name = "azure"
12
13
 
@@ -57,7 +58,7 @@ class AzureToolkit(BaseToolkit):
57
58
  name=tool["name"],
58
59
  description=description,
59
60
  args_schema=tool["args_schema"],
60
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
61
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
61
62
  ))
62
63
  return cls(tools=tools)
63
64
 
@@ -7,6 +7,7 @@ from .api_wrapper import GCPApiWrapper
7
7
  from ...base.tool import BaseAction
8
8
  from ...elitea_base import filter_missconfigured_index_tools
9
9
  from ...utils import clean_string, get_max_toolkit_length
10
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
10
11
 
11
12
  name = "gcp"
12
13
 
@@ -51,7 +52,7 @@ class GCPToolkit(BaseToolkit):
51
52
  name=tool["name"],
52
53
  description=description,
53
54
  args_schema=tool["args_schema"],
54
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
55
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
55
56
  ))
56
57
  return cls(tools=tools)
57
58
 
@@ -7,6 +7,7 @@ from .api_wrapper import KubernetesApiWrapper
7
7
  from ...base.tool import BaseAction
8
8
  from ...elitea_base import filter_missconfigured_index_tools
9
9
  from ...utils import clean_string, get_max_toolkit_length
10
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
10
11
 
11
12
  name = "kubernetes"
12
13
 
@@ -61,7 +62,7 @@ class KubernetesToolkit(BaseToolkit):
61
62
  name=tool["name"],
62
63
  description=description,
63
64
  args_schema=tool["args_schema"],
64
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
65
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
65
66
  ))
66
67
  return cls(tools=tools)
67
68
 
@@ -6,6 +6,7 @@ from pydantic import BaseModel, create_model, Field
6
6
  from .api_wrapper import PythonLinter
7
7
  from ...base.tool import BaseAction
8
8
  from ...utils import clean_string, get_max_toolkit_length
9
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
9
10
 
10
11
  name = "python_linter"
11
12
 
@@ -49,7 +50,7 @@ class PythonLinterToolkit(BaseToolkit):
49
50
  name=tool["name"],
50
51
  description=description,
51
52
  args_schema=tool["args_schema"],
52
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
53
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
53
54
  ))
54
55
  return cls(tools=tools)
55
56
 
@@ -7,6 +7,7 @@ from ...base.tool import BaseAction
7
7
  from ...elitea_base import filter_missconfigured_index_tools
8
8
  from ...utils import clean_string, get_max_toolkit_length
9
9
  from ....configurations.sonar import SonarConfiguration
10
+ from ....runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
10
11
 
11
12
  name = "sonar"
12
13
 
@@ -65,7 +66,7 @@ class SonarToolkit(BaseToolkit):
65
66
  name=tool["name"],
66
67
  description=description,
67
68
  args_schema=tool["args_schema"],
68
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
69
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
69
70
  ))
70
71
  return cls(tools=tools)
71
72
 
@@ -38,12 +38,14 @@ class CodeIndexerToolkit(BaseIndexerToolkit):
38
38
  branch: Optional[str] = None,
39
39
  whitelist: Optional[List[str]] = None,
40
40
  blacklist: Optional[List[str]] = None,
41
+ chunking_config: Optional[dict] = None,
41
42
  **kwargs) -> Generator[Document, None, None]:
42
43
  """Index repository files in the vector store using code parsing."""
43
44
  yield from self.loader(
44
45
  branch=branch,
45
46
  whitelist=whitelist,
46
- blacklist=blacklist
47
+ blacklist=blacklist,
48
+ chunking_config=chunking_config
47
49
  )
48
50
 
49
51
  def _extend_data(self, documents: Generator[Document, None, None]):
@@ -67,7 +69,8 @@ class CodeIndexerToolkit(BaseIndexerToolkit):
67
69
  branch: Optional[str] = None,
68
70
  whitelist: Optional[List[str]] = None,
69
71
  blacklist: Optional[List[str]] = None,
70
- chunked: bool = True) -> Generator[Document, None, None]:
72
+ chunked: bool = True,
73
+ chunking_config: Optional[dict] = None) -> Generator[Document, None, None]:
71
74
  """
72
75
  Generates Documents from files in a branch, respecting whitelist and blacklist patterns.
73
76
 
@@ -77,6 +80,7 @@ class CodeIndexerToolkit(BaseIndexerToolkit):
77
80
  - blacklist (Optional[List[str]]): File extensions or paths to exclude. Defaults to no exclusions if None.
78
81
  - chunked (bool): If True (default), applies universal chunker based on file type.
79
82
  If False, returns raw Documents without chunking.
83
+ - chunking_config (Optional[dict]): Chunking configuration by file extension
80
84
 
81
85
  Returns:
82
86
  - generator: Yields Documents from files matching the whitelist but not the blacklist.
@@ -101,6 +105,19 @@ class CodeIndexerToolkit(BaseIndexerToolkit):
101
105
  """
102
106
  import hashlib
103
107
 
108
+ # Auto-include extensions from chunking_config if whitelist is specified
109
+ # This allows chunking config to work without manually adding extensions to whitelist
110
+ if chunking_config and whitelist:
111
+ for ext_pattern in chunking_config.keys():
112
+ # Normalize extension pattern (both ".cbl" and "*.cbl" should work)
113
+ normalized = ext_pattern if ext_pattern.startswith('*') else f'*{ext_pattern}'
114
+ if normalized not in whitelist:
115
+ whitelist.append(normalized)
116
+ self._log_tool_event(
117
+ message=f"Auto-included extension '{normalized}' from chunking_config",
118
+ tool_name="loader"
119
+ )
120
+
104
121
  _files = self.__handle_get_files("", self.__get_branch(branch))
105
122
  self._log_tool_event(message="Listing files in branch", tool_name="loader")
106
123
  logger.info(f"Files in branch: {_files}")
@@ -10,6 +10,7 @@ from ..utils import clean_string, get_max_toolkit_length, parse_list, check_conn
10
10
  from ...configurations.confluence import ConfluenceConfiguration
11
11
  from ...configurations.pgvector import PgVectorConfiguration
12
12
  import requests
13
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
13
14
 
14
15
  name = "confluence"
15
16
 
@@ -70,16 +71,16 @@ class ConfluenceToolkit(BaseToolkit):
70
71
  name,
71
72
  space=(str, Field(description="Space")),
72
73
  cloud=(bool, Field(description="Hosting Option", json_schema_extra={'configuration': True})),
73
- limit=(int, Field(description="Pages limit per request", default=5)),
74
+ limit=(int, Field(description="Pages limit per request", default=5, gt=0)),
74
75
  labels=(Optional[str], Field(
75
76
  description="List of comma separated labels used for labeling of agent's created or updated entities",
76
77
  default=None,
77
78
  examples="alita,elitea;another-label"
78
79
  )),
79
- max_pages=(int, Field(description="Max total pages", default=10)),
80
- number_of_retries=(int, Field(description="Number of retries", default=2)),
81
- min_retry_seconds=(int, Field(description="Min retry, sec", default=10)),
82
- max_retry_seconds=(int, Field(description="Max retry, sec", default=60)),
80
+ max_pages=(int, Field(description="Max total pages", default=10, gt=0)),
81
+ number_of_retries=(int, Field(description="Number of retries", default=2, ge=0)),
82
+ min_retry_seconds=(int, Field(description="Min retry, sec", default=10, ge=0)),
83
+ max_retry_seconds=(int, Field(description="Max retry, sec", default=60, ge=0)),
83
84
  # optional field for custom headers as dictionary
84
85
  custom_headers=(Optional[dict], Field(description="Custom headers for API requests", default={})),
85
86
  confluence_configuration=(ConfluenceConfiguration, Field(description="Confluence Configuration", json_schema_extra={'configuration_types': ['confluence']})),
@@ -131,7 +132,7 @@ class ConfluenceToolkit(BaseToolkit):
131
132
  name=tool["name"],
132
133
  description=description,
133
134
  args_schema=tool["args_schema"],
134
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
135
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
135
136
  ))
136
137
  return cls(tools=tools)
137
138
 
@@ -1730,8 +1730,8 @@ class ConfluenceAPIWrapper(NonCodeIndexerToolkit):
1730
1730
  "page_ids": (Optional[List[str]], Field(description="List of page IDs to retrieve.", default=None)),
1731
1731
  "label": (Optional[str], Field(description="Label to filter pages.", default=None)),
1732
1732
  "cql": (Optional[str], Field(description="CQL query to filter pages.", default=None)),
1733
- "limit": (Optional[int], Field(description="Limit the number of results.", default=10)),
1734
- "max_pages": (Optional[int], Field(description="Maximum number of pages to retrieve.", default=1000)),
1733
+ "limit": (Optional[int], Field(description="Limit the number of results.", default=10, gt=0)),
1734
+ "max_pages": (Optional[int], Field(description="Maximum number of pages to retrieve.", default=1000, gt=0)),
1735
1735
  "include_restricted_content": (Optional[bool], Field(description="Include restricted content.", default=False)),
1736
1736
  "include_archived_content": (Optional[bool], Field(description="Include archived content.", default=False)),
1737
1737
  "include_attachments": (Optional[bool], Field(description="Include attachments.", default=False)),
@@ -6,6 +6,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field
6
6
  from .api_wrapper import OpenApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from ..utils import clean_string
9
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
9
10
 
10
11
  name = "openapi"
11
12
 
@@ -57,7 +58,7 @@ class OpenApiToolkit(BaseToolkit):
57
58
  name=tool["name"],
58
59
  description=description,
59
60
  args_schema=tool["args_schema"],
60
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
61
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
61
62
  ))
62
63
  return cls(tools=tools)
63
64
 
@@ -6,6 +6,7 @@ from pydantic import BaseModel, ConfigDict, create_model, Field, SecretStr
6
6
  from .api_wrapper import ELITEAElasticApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from ..utils import clean_string, get_max_toolkit_length
9
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
9
10
 
10
11
  name = "elastic"
11
12
 
@@ -58,7 +59,7 @@ class ElasticToolkit(BaseToolkit):
58
59
  name=tool["name"],
59
60
  description=description,
60
61
  args_schema=tool["args_schema"],
61
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
62
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
62
63
  ))
63
64
  return cls(tools=tools)
64
65
 
@@ -842,7 +842,7 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
842
842
  Raises:
843
843
  ToolException: If file is not text-editable or edit fails
844
844
  """
845
- from .utils.text_operations import parse_old_new_markers, is_text_editable
845
+ from .utils.text_operations import parse_old_new_markers, is_text_editable, try_apply_edit
846
846
  from langchain_core.callbacks import dispatch_custom_event
847
847
 
848
848
  # Validate file is text-editable
@@ -863,26 +863,45 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
863
863
  # Read current file content
864
864
  try:
865
865
  current_content = self._read_file(file_path, branch)
866
+ if not isinstance(current_content, str):
867
+ # If current_content is a ToolException or any non-str, raise or return it
868
+ raise current_content if isinstance(current_content, Exception) else ToolException(str(current_content))
866
869
  except Exception as e:
867
870
  raise ToolException(f"Failed to read file {file_path}: {e}")
868
871
 
869
- # Apply all edits
872
+ # Apply all edits (with tolerant fallback)
870
873
  updated_content = current_content
874
+ fallbacks_used = 0
875
+ edits_applied = 0
871
876
  for old_text, new_text in edits:
872
877
  if not old_text.strip():
873
878
  continue
874
879
 
875
- if old_text not in updated_content:
880
+ new_updated, used_fallback = try_apply_edit(
881
+ content=updated_content,
882
+ old_text=old_text,
883
+ new_text=new_text,
884
+ file_path=file_path,
885
+ )
886
+
887
+ if new_updated == updated_content:
888
+ # No change applied for this pair (exact nor fallback)
876
889
  logger.warning(
877
- f"Old content not found in {file_path}. "
878
- f"Looking for: {old_text[:100]}..."
890
+ "Old content not found or could not be safely matched in %s. Snippet: %s...",
891
+ file_path,
892
+ old_text[:100].replace("\n", "\\n"),
879
893
  )
880
894
  continue
881
-
882
- updated_content = updated_content.replace(old_text, new_text)
895
+
896
+ # A replacement was applied
897
+ edits_applied += 1
898
+ if used_fallback:
899
+ fallbacks_used += 1
900
+
901
+ updated_content = new_updated
883
902
 
884
903
  # Check if any changes were made
885
- if current_content == updated_content:
904
+ if current_content == updated_content or edits_applied == 0:
886
905
  return (
887
906
  f"No changes made to {file_path}. "
888
907
  "Old content was not found or is empty. "
@@ -908,7 +927,7 @@ class BaseCodeToolApiWrapper(BaseVectorStoreToolApiWrapper):
908
927
  "tool_name": "edit_file",
909
928
  "toolkit": self.__class__.__name__,
910
929
  "operation_type": "modify",
911
- "edits_applied": len(edits)
930
+ "edits_applied": edits_applied,
912
931
  })
913
932
  except Exception as e:
914
933
  logger.warning(f"Failed to dispatch file_modified event: {e}")
@@ -1,14 +1,20 @@
1
- from typing import List, Literal, Optional
1
+ from typing import Dict, List, Literal, Optional
2
2
 
3
3
  from langchain_core.tools import BaseTool, BaseToolkit
4
4
  from pydantic import BaseModel, ConfigDict, Field, create_model
5
5
 
6
6
  from ..base.tool import BaseAction
7
- from .api_wrapper import FigmaApiWrapper, GLOBAL_LIMIT
7
+ from .api_wrapper import (
8
+ FigmaApiWrapper,
9
+ GLOBAL_LIMIT,
10
+ DEFAULT_FIGMA_IMAGES_PROMPT,
11
+ DEFAULT_FIGMA_SUMMARY_PROMPT,
12
+ DEFAULT_NUMBER_OF_THREADS,
13
+ )
8
14
  from ..elitea_base import filter_missconfigured_index_tools
9
- from ..utils import clean_string, get_max_toolkit_length
10
15
  from ...configurations.figma import FigmaConfiguration
11
16
  from ...configurations.pgvector import PgVectorConfiguration
17
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
12
18
 
13
19
  name = "figma"
14
20
 
@@ -28,7 +34,14 @@ def get_tools(tool):
28
34
  collection_name=str(tool['toolkit_name']),
29
35
  doctype='doc',
30
36
  embedding_model=tool['settings'].get('embedding_model'),
31
- vectorstore_type="PGVector"
37
+ vectorstore_type="PGVector",
38
+ # figma summary/image prompt settings (toolkit-level)
39
+ # TODO disabled until new requirements
40
+ # apply_images_prompt=tool["settings"].get("apply_images_prompt"),
41
+ # images_prompt=tool["settings"].get("images_prompt"),
42
+ # apply_summary_prompt=tool["settings"].get("apply_summary_prompt"),
43
+ # summary_prompt=tool["settings"].get("summary_prompt"),
44
+ # number_of_threads=tool["settings"].get("number_of_threads"),
32
45
  )
33
46
  .get_tools()
34
47
  )
@@ -45,7 +58,40 @@ class FigmaToolkit(BaseToolkit):
45
58
  }
46
59
  return create_model(
47
60
  name,
48
- global_limit=(Optional[int], Field(description="Global limit", default=GLOBAL_LIMIT)),
61
+ # TODO disabled until new requirements
62
+ # apply_images_prompt=(Optional[bool], Field(
63
+ # description="Enable advanced image processing instructions for Figma image nodes.",
64
+ # default=True,
65
+ # )),
66
+ # images_prompt=(Optional[Dict[str, str]], Field(
67
+ # description=(
68
+ # "Instruction for how to analyze image-based nodes "
69
+ # "(screenshots, diagrams, etc.) during Figma file retrieving. "
70
+ # "Must contain a single 'prompt' key with the text."
71
+ # ),
72
+ # default=DEFAULT_FIGMA_IMAGES_PROMPT,
73
+ # )),
74
+ # apply_summary_prompt=(Optional[bool], Field(
75
+ # description="Enable LLM-based summarization over loaded Figma data.",
76
+ # default=True,
77
+ # )),
78
+ # summary_prompt=(Optional[Dict[str, str]], Field(
79
+ # description=(
80
+ # "Instruction for the LLM on how to summarize loaded Figma data. "
81
+ # "Must contain a single 'prompt' key with the text."
82
+ # ),
83
+ # default=DEFAULT_FIGMA_SUMMARY_PROMPT,
84
+ # )),
85
+ number_of_threads=(Optional[int], Field(
86
+ description=(
87
+ "Number of worker threads to use when downloading and processing Figma images. "
88
+ f"Valid values are from 1 to 5. Default is {DEFAULT_NUMBER_OF_THREADS}."
89
+ ),
90
+ default=DEFAULT_NUMBER_OF_THREADS,
91
+ ge=1,
92
+ le=5,
93
+ )),
94
+ global_limit=(Optional[int], Field(description="Global limit", default=GLOBAL_LIMIT, gt=0)),
49
95
  global_regexp=(Optional[str], Field(description="Global regex pattern", default=None)),
50
96
  selected_tools=(
51
97
  List[Literal[tuple(selected_tools)]],
@@ -98,7 +144,7 @@ class FigmaToolkit(BaseToolkit):
98
144
  name=tool["name"],
99
145
  description=description,
100
146
  args_schema=tool["args_schema"],
101
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
147
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
102
148
  )
103
149
  )
104
150
  return cls(tools=tools)