alita-sdk 0.3.532__py3-none-any.whl → 0.3.602__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of alita-sdk might be problematic. Click here for more details.

Files changed (137) hide show
  1. alita_sdk/cli/agent_executor.py +2 -1
  2. alita_sdk/cli/agent_loader.py +34 -4
  3. alita_sdk/cli/agents.py +433 -203
  4. alita_sdk/community/__init__.py +8 -4
  5. alita_sdk/configurations/__init__.py +1 -0
  6. alita_sdk/configurations/openapi.py +323 -0
  7. alita_sdk/runtime/clients/client.py +165 -7
  8. alita_sdk/runtime/langchain/_constants_bkup.py +1318 -0
  9. alita_sdk/runtime/langchain/assistant.py +61 -11
  10. alita_sdk/runtime/langchain/constants.py +419 -171
  11. alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +4 -2
  12. alita_sdk/runtime/langchain/document_loaders/AlitaTextLoader.py +5 -2
  13. alita_sdk/runtime/langchain/langraph_agent.py +108 -23
  14. alita_sdk/runtime/langchain/utils.py +76 -14
  15. alita_sdk/runtime/skills/__init__.py +91 -0
  16. alita_sdk/runtime/skills/callbacks.py +498 -0
  17. alita_sdk/runtime/skills/discovery.py +540 -0
  18. alita_sdk/runtime/skills/executor.py +610 -0
  19. alita_sdk/runtime/skills/input_builder.py +371 -0
  20. alita_sdk/runtime/skills/models.py +330 -0
  21. alita_sdk/runtime/skills/registry.py +355 -0
  22. alita_sdk/runtime/skills/skill_runner.py +330 -0
  23. alita_sdk/runtime/toolkits/__init__.py +5 -0
  24. alita_sdk/runtime/toolkits/artifact.py +2 -1
  25. alita_sdk/runtime/toolkits/mcp.py +6 -3
  26. alita_sdk/runtime/toolkits/mcp_config.py +1048 -0
  27. alita_sdk/runtime/toolkits/skill_router.py +238 -0
  28. alita_sdk/runtime/toolkits/tools.py +139 -10
  29. alita_sdk/runtime/toolkits/vectorstore.py +1 -1
  30. alita_sdk/runtime/tools/__init__.py +3 -1
  31. alita_sdk/runtime/tools/artifact.py +15 -0
  32. alita_sdk/runtime/tools/data_analysis.py +183 -0
  33. alita_sdk/runtime/tools/llm.py +260 -73
  34. alita_sdk/runtime/tools/loop.py +3 -1
  35. alita_sdk/runtime/tools/loop_output.py +3 -1
  36. alita_sdk/runtime/tools/mcp_server_tool.py +6 -3
  37. alita_sdk/runtime/tools/router.py +2 -4
  38. alita_sdk/runtime/tools/sandbox.py +9 -6
  39. alita_sdk/runtime/tools/skill_router.py +776 -0
  40. alita_sdk/runtime/tools/tool.py +3 -1
  41. alita_sdk/runtime/tools/vectorstore.py +7 -2
  42. alita_sdk/runtime/tools/vectorstore_base.py +7 -2
  43. alita_sdk/runtime/utils/constants.py +5 -1
  44. alita_sdk/runtime/utils/mcp_client.py +1 -1
  45. alita_sdk/runtime/utils/mcp_sse_client.py +1 -1
  46. alita_sdk/runtime/utils/toolkit_utils.py +2 -0
  47. alita_sdk/tools/__init__.py +44 -2
  48. alita_sdk/tools/ado/repos/__init__.py +26 -8
  49. alita_sdk/tools/ado/repos/repos_wrapper.py +78 -52
  50. alita_sdk/tools/ado/test_plan/__init__.py +3 -2
  51. alita_sdk/tools/ado/test_plan/test_plan_wrapper.py +23 -1
  52. alita_sdk/tools/ado/utils.py +1 -18
  53. alita_sdk/tools/ado/wiki/__init__.py +2 -1
  54. alita_sdk/tools/ado/wiki/ado_wrapper.py +23 -1
  55. alita_sdk/tools/ado/work_item/__init__.py +3 -2
  56. alita_sdk/tools/ado/work_item/ado_wrapper.py +56 -3
  57. alita_sdk/tools/advanced_jira_mining/__init__.py +2 -1
  58. alita_sdk/tools/aws/delta_lake/__init__.py +2 -1
  59. alita_sdk/tools/azure_ai/search/__init__.py +2 -1
  60. alita_sdk/tools/azure_ai/search/api_wrapper.py +1 -1
  61. alita_sdk/tools/base_indexer_toolkit.py +51 -30
  62. alita_sdk/tools/bitbucket/__init__.py +2 -1
  63. alita_sdk/tools/bitbucket/api_wrapper.py +1 -1
  64. alita_sdk/tools/bitbucket/cloud_api_wrapper.py +3 -3
  65. alita_sdk/tools/browser/__init__.py +1 -1
  66. alita_sdk/tools/carrier/__init__.py +1 -1
  67. alita_sdk/tools/chunkers/code/treesitter/treesitter.py +37 -13
  68. alita_sdk/tools/cloud/aws/__init__.py +2 -1
  69. alita_sdk/tools/cloud/azure/__init__.py +2 -1
  70. alita_sdk/tools/cloud/gcp/__init__.py +2 -1
  71. alita_sdk/tools/cloud/k8s/__init__.py +2 -1
  72. alita_sdk/tools/code/linter/__init__.py +2 -1
  73. alita_sdk/tools/code/sonar/__init__.py +2 -1
  74. alita_sdk/tools/code_indexer_toolkit.py +19 -2
  75. alita_sdk/tools/confluence/__init__.py +7 -6
  76. alita_sdk/tools/confluence/api_wrapper.py +7 -8
  77. alita_sdk/tools/confluence/loader.py +4 -2
  78. alita_sdk/tools/custom_open_api/__init__.py +2 -1
  79. alita_sdk/tools/elastic/__init__.py +2 -1
  80. alita_sdk/tools/elitea_base.py +28 -9
  81. alita_sdk/tools/figma/__init__.py +52 -6
  82. alita_sdk/tools/figma/api_wrapper.py +1158 -123
  83. alita_sdk/tools/figma/figma_client.py +73 -0
  84. alita_sdk/tools/figma/toon_tools.py +2748 -0
  85. alita_sdk/tools/github/__init__.py +2 -1
  86. alita_sdk/tools/github/github_client.py +56 -92
  87. alita_sdk/tools/github/schemas.py +4 -4
  88. alita_sdk/tools/gitlab/__init__.py +2 -1
  89. alita_sdk/tools/gitlab/api_wrapper.py +118 -38
  90. alita_sdk/tools/gitlab_org/__init__.py +2 -1
  91. alita_sdk/tools/gitlab_org/api_wrapper.py +60 -62
  92. alita_sdk/tools/google/bigquery/__init__.py +2 -1
  93. alita_sdk/tools/google_places/__init__.py +2 -1
  94. alita_sdk/tools/jira/__init__.py +2 -1
  95. alita_sdk/tools/keycloak/__init__.py +2 -1
  96. alita_sdk/tools/localgit/__init__.py +2 -1
  97. alita_sdk/tools/memory/__init__.py +1 -1
  98. alita_sdk/tools/ocr/__init__.py +2 -1
  99. alita_sdk/tools/openapi/__init__.py +490 -118
  100. alita_sdk/tools/openapi/api_wrapper.py +1368 -0
  101. alita_sdk/tools/openapi/tool.py +20 -0
  102. alita_sdk/tools/pandas/__init__.py +11 -5
  103. alita_sdk/tools/pandas/api_wrapper.py +38 -25
  104. alita_sdk/tools/pandas/dataframe/generator/base.py +3 -1
  105. alita_sdk/tools/postman/__init__.py +2 -1
  106. alita_sdk/tools/pptx/__init__.py +2 -1
  107. alita_sdk/tools/qtest/__init__.py +21 -2
  108. alita_sdk/tools/qtest/api_wrapper.py +430 -13
  109. alita_sdk/tools/rally/__init__.py +2 -1
  110. alita_sdk/tools/rally/api_wrapper.py +1 -1
  111. alita_sdk/tools/report_portal/__init__.py +2 -1
  112. alita_sdk/tools/salesforce/__init__.py +2 -1
  113. alita_sdk/tools/servicenow/__init__.py +11 -10
  114. alita_sdk/tools/servicenow/api_wrapper.py +1 -1
  115. alita_sdk/tools/sharepoint/__init__.py +2 -1
  116. alita_sdk/tools/sharepoint/api_wrapper.py +2 -2
  117. alita_sdk/tools/slack/__init__.py +3 -2
  118. alita_sdk/tools/slack/api_wrapper.py +2 -2
  119. alita_sdk/tools/sql/__init__.py +3 -2
  120. alita_sdk/tools/testio/__init__.py +2 -1
  121. alita_sdk/tools/testrail/__init__.py +2 -1
  122. alita_sdk/tools/utils/content_parser.py +77 -3
  123. alita_sdk/tools/utils/text_operations.py +163 -71
  124. alita_sdk/tools/xray/__init__.py +3 -2
  125. alita_sdk/tools/yagmail/__init__.py +2 -1
  126. alita_sdk/tools/zephyr/__init__.py +2 -1
  127. alita_sdk/tools/zephyr_enterprise/__init__.py +2 -1
  128. alita_sdk/tools/zephyr_essential/__init__.py +2 -1
  129. alita_sdk/tools/zephyr_scale/__init__.py +3 -2
  130. alita_sdk/tools/zephyr_scale/api_wrapper.py +2 -2
  131. alita_sdk/tools/zephyr_squad/__init__.py +2 -1
  132. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/METADATA +7 -6
  133. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/RECORD +137 -119
  134. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/WHEEL +0 -0
  135. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/entry_points.txt +0 -0
  136. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/licenses/LICENSE +0 -0
  137. {alita_sdk-0.3.532.dist-info → alita_sdk-0.3.602.dist-info}/top_level.txt +0 -0
@@ -15,6 +15,7 @@ from .api_wrapper import SlackApiWrapper
15
15
  from ..utils import clean_string, get_max_toolkit_length, check_connection_response
16
16
  from slack_sdk.errors import SlackApiError
17
17
  from slack_sdk import WebClient
18
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
18
19
 
19
20
  name = "slack"
20
21
 
@@ -85,12 +86,12 @@ class SlackToolkit(BaseToolkit):
85
86
  if toolkit_name:
86
87
  description = f"{description}\nToolkit: {toolkit_name}"
87
88
  description = description[:1000]
88
- tools.append(BaseAction(
89
+ tools.append(BaseAction(
89
90
  api_wrapper=slack_api_wrapper,
90
91
  name=tool["name"],
91
92
  description=description,
92
93
  args_schema=tool["args_schema"],
93
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
94
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
94
95
  ))
95
96
  return cls(tools=tools)
96
97
 
@@ -17,8 +17,8 @@ SendMessageModel = create_model(
17
17
 
18
18
  ReadMessagesModel = create_model(
19
19
  "ReadMessagesModel",
20
- channel_id=(Optional[str], Field(default=None,description="Channel ID, user ID, or conversation ID to read messages from. (like C12345678 for public channels, D12345678 for DMs)")),
21
- limit=(int, Field(default=10, description="The number of messages to fetch (default is 10)."))
20
+ channel_id=(Optional[str], Field(default=None,description="Channel ID, user ID, or conversation ID to read messages from. (like C12345678 for public channels, D12345678 for DMs)")),
21
+ limit=(int, Field(default=10, description="The number of messages to fetch (default is 10).", gt=0))
22
22
  )
23
23
 
24
24
  CreateChannelModel = create_model(
@@ -9,6 +9,7 @@ from .models import SQLDialect
9
9
  from ..elitea_base import filter_missconfigured_index_tools
10
10
  from ..utils import clean_string, get_max_toolkit_length
11
11
  from ...configurations.sql import SqlConfiguration
12
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
12
13
 
13
14
  name = "sql"
14
15
 
@@ -31,7 +32,7 @@ class SQLToolkit(BaseToolkit):
31
32
  supported_dialects = (d.value for d in SQLDialect)
32
33
  return create_model(
33
34
  name,
34
- dialect=(Literal[tuple(supported_dialects)], Field(description="Database dialect (mysql or postgres)")),
35
+ dialect=(Literal[tuple(supported_dialects)], Field(default=SQLDialect.POSTGRES.value, description="Database dialect (mysql or postgres)")),
35
36
  database_name=(str, Field(description="Database name")),
36
37
  sql_configuration=(SqlConfiguration, Field(description="SQL Configuration", json_schema_extra={'configuration_types': ['sql']})),
37
38
  selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
@@ -68,7 +69,7 @@ class SQLToolkit(BaseToolkit):
68
69
  name=tool["name"],
69
70
  description=description,
70
71
  args_schema=tool["args_schema"],
71
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
72
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
72
73
  ))
73
74
  return cls(tools=tools)
74
75
 
@@ -8,6 +8,7 @@ from ..base.tool import BaseAction
8
8
  from ..elitea_base import filter_missconfigured_index_tools
9
9
  from ..utils import clean_string, get_max_toolkit_length
10
10
  from ...configurations.testio import TestIOConfiguration
11
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
11
12
 
12
13
  name = "testio"
13
14
 
@@ -59,7 +60,7 @@ class TestIOToolkit(BaseToolkit):
59
60
  name=tool["name"],
60
61
  description=description,
61
62
  args_schema=tool["args_schema"],
62
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
63
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
63
64
  ))
64
65
  return cls(tools=tools)
65
66
 
@@ -10,6 +10,7 @@ from ..elitea_base import filter_missconfigured_index_tools
10
10
  from ..utils import clean_string, get_max_toolkit_length, check_connection_response
11
11
  from ...configurations.testrail import TestRailConfiguration
12
12
  from ...configurations.pgvector import PgVectorConfiguration
13
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
13
14
 
14
15
  name = "testrail"
15
16
 
@@ -90,7 +91,7 @@ class TestrailToolkit(BaseToolkit):
90
91
  name=tool["name"],
91
92
  description=description,
92
93
  args_schema=tool["args_schema"],
93
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
94
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
94
95
  ))
95
96
  return cls(tools=tools)
96
97
 
@@ -109,7 +109,15 @@ def parse_file_content(file_name=None, file_content=None, is_capture_image: bool
109
109
  loader_extra_config=loader_kwargs,
110
110
  llm=llm)
111
111
  except Exception as e:
112
- return ToolException(f"Error reading file ({file_name or file_path}) content. Make sure these types are supported: {str(e)}")
112
+ # Surface full underlying error message (including nested causes) so that
113
+ # JSONDecodeError or other specific issues are not hidden behind
114
+ # generic RuntimeError messages from loaders.
115
+ root_msg = str(e)
116
+ if getattr(e, "__cause__", None):
117
+ root_msg = f"{root_msg} | Cause: {e.__cause__}"
118
+ return ToolException(
119
+ f"Error reading file ({file_name or file_path}) content. Make sure these types are supported: {root_msg}"
120
+ )
113
121
 
114
122
  def load_file_docs(file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
115
123
  sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False) -> List[Document] | ToolException:
@@ -130,7 +138,38 @@ def load_file_docs(file_name=None, file_content=None, is_capture_image: bool = F
130
138
 
131
139
  def get_loader_kwargs(loader_object, file_name=None, file_content=None, is_capture_image: bool = False, page_number: int = None,
132
140
  sheet_name: str = None, llm=None, file_path: str = None, excel_by_sheets: bool = False, prompt=None):
133
- loader_kwargs = deepcopy(loader_object['kwargs'])
141
+ """Build loader kwargs safely without deepcopying non-picklable objects like LLMs.
142
+
143
+ We avoid copying keys that are going to be overridden by this function anyway
144
+ (file_path, file_content, file_name, extract_images, llm, page_number,
145
+ sheet_name, excel_by_sheets, prompt, row_content, json_documents) to
146
+ prevent errors such as `cannot pickle '_thread.RLock' object` when an LLM
147
+ or client with internal locks is stored in the original kwargs.
148
+ """
149
+ if not loader_object:
150
+ raise ToolException("Loader configuration is missing.")
151
+
152
+ original_kwargs = loader_object.get("kwargs", {}) or {}
153
+
154
+ # Keys that will be overwritten below – skip them when copying
155
+ overridden_keys = {
156
+ "file_path",
157
+ "file_content",
158
+ "file_name",
159
+ "extract_images",
160
+ "llm",
161
+ "page_number",
162
+ "sheet_name",
163
+ "excel_by_sheets",
164
+ "prompt",
165
+ "row_content",
166
+ "json_documents",
167
+ }
168
+
169
+ # Build a safe shallow copy without overridden keys to avoid deepcopy
170
+ # of potentially non-picklable objects (e.g., llm with internal RLock).
171
+ loader_kwargs = {k: v for k, v in original_kwargs.items() if k not in overridden_keys}
172
+
134
173
  loader_kwargs.update({
135
174
  "file_path": file_path,
136
175
  "file_content": file_content,
@@ -212,6 +251,41 @@ def load_content_from_bytes(file_content: bytes, extension: str = None, loader_e
212
251
  if temp_file_path and os.path.exists(temp_file_path):
213
252
  os.remove(temp_file_path)
214
253
 
254
+
255
+ def _load_content_from_bytes_with_prompt(file_content: bytes, extension: str = None, loader_extra_config: dict = None, llm = None, prompt: str = image_processing_prompt) -> str:
256
+ """Internal helper that behaves like load_content_from_bytes but also propagates prompt.
257
+
258
+ This keeps the public load_content_from_bytes API unchanged while allowing newer
259
+ code paths to pass an explicit prompt through to the loader.
260
+ """
261
+ temp_file_path = None
262
+ try:
263
+ with tempfile.NamedTemporaryFile(mode='w+b', delete=False, suffix=extension or '') as temp_file:
264
+ temp_file.write(file_content)
265
+ temp_file.flush()
266
+ temp_file_path = temp_file.name
267
+
268
+ # Use prepare_loader so that prompt and other kwargs are handled consistently
269
+ loader = prepare_loader(
270
+ file_name=None,
271
+ file_content=None,
272
+ is_capture_image=loader_extra_config.get('extract_images') if loader_extra_config else False,
273
+ page_number=loader_extra_config.get('page_number') if loader_extra_config else None,
274
+ sheet_name=loader_extra_config.get('sheet_name') if loader_extra_config else None,
275
+ llm=llm or (loader_extra_config.get('llm') if loader_extra_config else None),
276
+ file_path=temp_file_path,
277
+ excel_by_sheets=loader_extra_config.get('excel_by_sheets') if loader_extra_config else False,
278
+ prompt=prompt or (loader_extra_config.get('prompt') if loader_extra_config else image_processing_prompt),
279
+ )
280
+
281
+ documents = loader.load()
282
+ page_contents = [doc.page_content for doc in documents]
283
+ return "\n".join(page_contents)
284
+ finally:
285
+ if temp_file_path and os.path.exists(temp_file_path):
286
+ os.remove(temp_file_path)
287
+
288
+
215
289
  def process_document_by_type(content, extension_source: str, document: Document = None, llm = None, chunking_config=None) \
216
290
  -> Generator[Document, None, None]:
217
291
  """Process the content of a file based on its type using a configured loader cosidering the origin document."""
@@ -338,4 +412,4 @@ def file_extension_by_chunker(chunker_name: str) -> str | None:
338
412
  return ".xml"
339
413
  if name == "csv":
340
414
  return ".csv"
341
- return None
415
+ return None
@@ -24,81 +24,50 @@ TEXT_EDITABLE_EXTENSIONS = {
24
24
 
25
25
 
26
26
  def parse_old_new_markers(file_query: str) -> List[Tuple[str, str]]:
27
- """
28
- Parse OLD/NEW marker-based edit instructions.
29
-
30
- Extracts pairs of old and new content from a file query using markers:
27
+ """Parse OLD/NEW marker-based edit instructions.
28
+
29
+ Extracts pairs of old and new content from a file query using markers in
30
+ a minimal, regex-based way without additional line splitting logic.
31
+
32
+ Supported forms (OLD/NEW blocks must appear in pairs):
31
33
  - OLD <<<< ... >>>> OLD
32
- - NEW <<<< ... >>>> NEW
33
-
34
+ NEW <<<< ... >>>> NEW
35
+ - If no such pairs are found, we also accept the slightly incorrect
36
+ "<<<" form as a fallback:
37
+ OLD <<< ... >>> OLD
38
+ NEW <<< ... >>> NEW
39
+
34
40
  Args:
35
- file_query: String containing marked old and new content sections
36
-
41
+ file_query: String containing marked old and new content sections.
42
+
37
43
  Returns:
38
- List of tuples (old_content, new_content) for each edit pair
39
-
40
- Example:
41
- >>> query = '''
42
- ... OLD <<<<
43
- ... Hello World
44
- ... >>>> OLD
45
- ... NEW <<<<
46
- ... Hello Mars
47
- ... >>>> NEW
48
- ... '''
49
- >>> parse_old_new_markers(query)
50
- [('Hello World', 'Hello Mars')]
44
+ List of (old_content, new_content) tuples, where each content string
45
+ is the raw inner block (with leading/trailing whitespace stripped),
46
+ but otherwise unmodified.
51
47
  """
52
- # Split the file content by lines
53
- code_lines = file_query.split("\n")
54
-
55
- # Initialize lists to hold the contents of OLD and NEW sections
56
- old_contents = []
57
- new_contents = []
58
-
59
- # Initialize variables to track whether the current line is within an OLD or NEW section
60
- in_old_section = False
61
- in_new_section = False
62
-
63
- # Temporary storage for the current section's content
64
- current_section_content = []
65
-
66
- # Iterate through each line in the file content
67
- for line in code_lines:
68
- # Check for OLD section start
69
- if "OLD <<<" in line:
70
- in_old_section = True
71
- current_section_content = [] # Reset current section content
72
- continue # Skip the line with the marker
73
-
74
- # Check for OLD section end
75
- if ">>>> OLD" in line:
76
- in_old_section = False
77
- old_contents.append("\n".join(current_section_content).strip()) # Add the captured content
78
- current_section_content = [] # Reset current section content
79
- continue # Skip the line with the marker
80
-
81
- # Check for NEW section start
82
- if "NEW <<<" in line:
83
- in_new_section = True
84
- current_section_content = [] # Reset current section content
85
- continue # Skip the line with the marker
86
-
87
- # Check for NEW section end
88
- if ">>>> NEW" in line:
89
- in_new_section = False
90
- new_contents.append("\n".join(current_section_content).strip()) # Add the captured content
91
- current_section_content = [] # Reset current section content
92
- continue # Skip the line with the marker
93
-
94
- # If currently in an OLD or NEW section, add the line to the current section content
95
- if in_old_section or in_new_section:
96
- current_section_content.append(line)
97
-
98
- # Pair the OLD and NEW contents
99
- paired_contents = list(zip(old_contents, new_contents))
100
-
101
- return paired_contents
48
+ # Primary pattern: correct 4-< markers
49
+ pattern_primary = re.compile(
50
+ r"OLD <<<<(\s*.*?\s*)>>>> OLD" # OLD block
51
+ r"\s*" # optional whitespace between OLD/NEW
52
+ r"NEW <<<<(\s*.*?\s*)>>>> NEW", # NEW block
53
+ re.DOTALL,
54
+ )
55
+
56
+ matches = pattern_primary.findall(file_query)
57
+
58
+ # Fallback pattern: accept 3-< markers if no proper 4-< markers found
59
+ if not matches:
60
+ pattern_fallback = re.compile(
61
+ r"OLD <<<(\s*.*?\s*)>>>> OLD" # OLD block (3 < and 4 > to support previous version)
62
+ r"\s*" # optional whitespace between OLD/NEW
63
+ r"NEW <<<(\s*.*?\s*)>>>> NEW", # NEW block (3 < and 4 > to support previous version)
64
+ re.DOTALL,
65
+ )
66
+ matches = pattern_fallback.findall(file_query)
67
+
68
+ # Preserve block content exactly as captured so Stage 1 can use exact
69
+ # substring replacement (including indentation and trailing spaces).
70
+ return [(old_block, new_block) for old_block, new_block in matches]
102
71
 
103
72
 
104
73
  def is_text_editable(filename: str) -> bool:
@@ -252,3 +221,126 @@ def search_in_content(
252
221
  })
253
222
 
254
223
  return matches
224
+
225
+
226
+ def _normalize_for_match(text: str) -> str:
227
+ """Normalize text for tolerant OLD/NEW matching.
228
+
229
+ - Split into lines
230
+ - Replace common Unicode spaces with regular spaces
231
+ - Strip leading/trailing whitespace per line
232
+ - Collapse internal whitespace runs to a single space
233
+ - Join with '\n'
234
+ """
235
+ lines = text.splitlines()
236
+ norm_lines = []
237
+ for line in lines:
238
+ # Normalize common Unicode spaces to regular space
239
+ line = line.replace("\u00A0", " ").replace("\u2009", " ")
240
+ # Strip outer whitespace
241
+ line = line.strip()
242
+ # Collapse internal whitespace
243
+ line = re.sub(r"\s+", " ", line)
244
+ norm_lines.append(line)
245
+ return "\n".join(norm_lines)
246
+
247
+
248
+ def try_apply_edit(
249
+ content: str,
250
+ old_text: str,
251
+ new_text: str,
252
+ file_path: Optional[str] = None,
253
+ ) -> Tuple[str, bool]:
254
+ """Apply a single OLD/NEW edit with a tolerant fallback.
255
+
256
+ This helper is used by edit_file to apply one (old_text, new_text) pair:
257
+
258
+ 1. First tries exact substring replacement (old_text in content).
259
+ 2. If that fails, performs a tolerant, line-based match:
260
+ - Builds a logical OLD sequence without empty/whitespace-only lines
261
+ - Scans content while skipping empty/whitespace-only lines
262
+ - Compares using `_normalize_for_match` so minor spacing differences
263
+ don't break the match
264
+ - If exactly one such region is found, replaces that region with new_text
265
+ - If zero or multiple regions are found, no change is applied
266
+
267
+ Args:
268
+ content: Current file content
269
+ old_text: OLD block extracted from markers
270
+ new_text: NEW block extracted from markers
271
+ file_path: Optional path for logging context
272
+
273
+ Returns:
274
+ (updated_content, used_fallback)
275
+ """
276
+ # Stage 1: exact match
277
+ if old_text in content:
278
+ return content.replace(old_text, new_text), False
279
+
280
+ # Stage 2: tolerant match
281
+ if not old_text.strip() or not content:
282
+ return content, False
283
+
284
+ # Logical OLD: drop empty/whitespace-only lines
285
+ old_lines_raw = old_text.splitlines()
286
+ old_lines = [l for l in old_lines_raw if l.strip()]
287
+ if not old_lines:
288
+ return content, False
289
+
290
+ # Precompute normalized OLD (joined by '\n')
291
+ norm_old = _normalize_for_match("\n".join(old_lines))
292
+
293
+ content_lines = content.splitlines(keepends=True)
294
+ total = len(content_lines)
295
+ candidates: list[tuple[int, int, str]] = [] # (start_idx, end_idx, block)
296
+
297
+ # Scan content for regions whose non-empty, normalized lines match norm_old
298
+ for start in range(total):
299
+ idx = start
300
+ collected_non_empty: list[str] = []
301
+ window_lines: list[str] = []
302
+
303
+ while idx < total and len(collected_non_empty) < len(old_lines):
304
+ line = content_lines[idx]
305
+ window_lines.append(line)
306
+ if line.strip():
307
+ collected_non_empty.append(line)
308
+ idx += 1
309
+
310
+ if len(collected_non_empty) < len(old_lines):
311
+ # Not enough non-empty lines from this start; no more windows possible
312
+ break
313
+
314
+ # Compare normalized non-empty content lines to normalized OLD
315
+ candidate_norm = _normalize_for_match("".join(collected_non_empty))
316
+ if candidate_norm == norm_old:
317
+ block = "".join(window_lines)
318
+ candidates.append((start, idx, block))
319
+
320
+ if not candidates:
321
+ logger.warning(
322
+ "Fallback match: normalized OLD block not found in %s.",
323
+ file_path or "<unknown>",
324
+ )
325
+ return content, False
326
+
327
+ if len(candidates) > 1:
328
+ logger.warning(
329
+ "Fallback match: multiple candidate regions for OLD block in %s; "
330
+ "no change applied to avoid ambiguity.",
331
+ file_path or "<unknown>",
332
+ )
333
+ return content, False
334
+
335
+ start_idx, end_idx, candidate_block = candidates[0]
336
+ updated = content.replace(candidate_block, new_text, 1)
337
+
338
+ logger.info(
339
+ "Fallback match: applied tolerant OLD/NEW replacement in %s around lines %d-%d",
340
+ file_path or "<unknown>",
341
+ start_idx + 1,
342
+ start_idx + len(old_lines),
343
+ )
344
+
345
+ return updated, True
346
+
@@ -11,6 +11,7 @@ from ..elitea_base import filter_missconfigured_index_tools
11
11
  from ..utils import clean_string, get_max_toolkit_length
12
12
  from ...configurations.pgvector import PgVectorConfiguration
13
13
  from ...configurations.xray import XrayConfiguration
14
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
14
15
 
15
16
  name = "xray_cloud"
16
17
 
@@ -40,7 +41,7 @@ class XrayToolkit(BaseToolkit):
40
41
  selected_tools = {x['name']: x['args_schema'].schema() for x in XrayApiWrapper.model_construct().get_available_tools()}
41
42
  return create_model(
42
43
  name,
43
- limit=(Optional[int], Field(description="Limit", default=100)),
44
+ limit=(Optional[int], Field(description="Limit", default=100, gt=0)),
44
45
  xray_configuration=(XrayConfiguration, Field(description="Xray Configuration", json_schema_extra={'configuration_types': ['xray']})),
45
46
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None,
46
47
  description="PgVector Configuration",
@@ -89,7 +90,7 @@ class XrayToolkit(BaseToolkit):
89
90
  name=tool["name"],
90
91
  description=description,
91
92
  args_schema=tool["args_schema"],
92
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
93
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
93
94
  ))
94
95
  return cls(tools=tools)
95
96
 
@@ -6,6 +6,7 @@ from pydantic import create_model, BaseModel, Field, SecretStr
6
6
 
7
7
  from .yagmail_wrapper import YagmailWrapper, SMTP_SERVER
8
8
  from ..base.tool import BaseAction
9
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
9
10
 
10
11
  name = "yagmail"
11
12
 
@@ -53,7 +54,7 @@ class AlitaYagmailToolkit(BaseToolkit):
53
54
  name=tool["name"],
54
55
  description=description,
55
56
  args_schema=tool["args_schema"],
56
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
57
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
57
58
  ))
58
59
  return cls(tools=tools)
59
60
 
@@ -9,6 +9,7 @@ from ..base.tool import BaseAction
9
9
  from .api_wrapper import ZephyrV1ApiWrapper
10
10
  from ..elitea_base import filter_missconfigured_index_tools
11
11
  from ..utils import clean_string, get_max_toolkit_length
12
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
12
13
 
13
14
  name = "zephyr"
14
15
 
@@ -62,7 +63,7 @@ class ZephyrToolkit(BaseToolkit):
62
63
  name=tool["name"],
63
64
  description=description,
64
65
  args_schema=tool["args_schema"],
65
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
66
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
66
67
  ))
67
68
  return cls(tools=tools)
68
69
 
@@ -8,6 +8,7 @@ from ..elitea_base import filter_missconfigured_index_tools
8
8
  from ..utils import clean_string, get_max_toolkit_length
9
9
  from ...configurations.pgvector import PgVectorConfiguration
10
10
  from ...configurations.zephyr_enterprise import ZephyrEnterpriseConfiguration
11
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
11
12
 
12
13
  name = "zephyr_enterprise"
13
14
 
@@ -80,7 +81,7 @@ class ZephyrEnterpriseToolkit(BaseToolkit):
80
81
  name=tool["name"],
81
82
  description=description,
82
83
  args_schema=tool["args_schema"],
83
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
84
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
84
85
  ))
85
86
  return cls(tools=tools)
86
87
 
@@ -9,6 +9,7 @@ from ..elitea_base import filter_missconfigured_index_tools
9
9
  from ..utils import clean_string, get_max_toolkit_length
10
10
  from ...configurations.pgvector import PgVectorConfiguration
11
11
  from ...configurations.zephyr_essential import ZephyrEssentialConfiguration
12
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
12
13
 
13
14
  name = "zephyr_essential"
14
15
 
@@ -75,7 +76,7 @@ class ZephyrEssentialToolkit(BaseToolkit):
75
76
  name=tool["name"],
76
77
  description=description,
77
78
  args_schema=tool["args_schema"],
78
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
79
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
79
80
  ))
80
81
  return cls(tools=tools)
81
82
 
@@ -10,6 +10,7 @@ from ..elitea_base import filter_missconfigured_index_tools
10
10
  from ..utils import clean_string, get_max_toolkit_length
11
11
  from ...configurations.pgvector import PgVectorConfiguration
12
12
  from ...configurations.zephyr import ZephyrConfiguration
13
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
13
14
 
14
15
  name = "zephyr_scale"
15
16
 
@@ -38,7 +39,7 @@ class ZephyrScaleToolkit(BaseToolkit):
38
39
  selected_tools = {x['name']: x['args_schema'].schema() for x in ZephyrScaleApiWrapper.model_construct().get_available_tools()}
39
40
  return create_model(
40
41
  name,
41
- max_results=(int, Field(default=100, description="Results count to show")),
42
+ max_results=(int, Field(default=100, description="Results count to show", gt=0)),
42
43
  zephyr_configuration=(ZephyrConfiguration, Field(description="Zephyr Configuration",
43
44
  json_schema_extra={'configuration_types': ['zephyr']})),
44
45
  pgvector_configuration=(Optional[PgVectorConfiguration], Field(default=None, description="PgVector Configuration",
@@ -88,7 +89,7 @@ class ZephyrScaleToolkit(BaseToolkit):
88
89
  name=tool["name"],
89
90
  description=description,
90
91
  args_schema=tool["args_schema"],
91
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
92
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
92
93
  ))
93
94
  return cls(tools=tools)
94
95
 
@@ -177,13 +177,13 @@ ZephyrSearchTestCases = create_model(
177
177
  "ZephyrSearchTestCases",
178
178
  project_key=(str, Field(description="Jira project key filter")),
179
179
  search_term=(Optional[str], Field(description="Optional search term to filter test cases", default=None)),
180
- max_results=(Optional[int], Field(description="Maximum number of results to query from the API", default=1000)),
180
+ max_results=(Optional[int], Field(description="Maximum number of results to query from the API", default=1000, gt=0)),
181
181
  start_at=(Optional[int], Field(description="Zero-indexed starting position", default=0)),
182
182
  order_by=(Optional[str], Field(description="Field to order results by", default="name")),
183
183
  order_direction=(Optional[str], Field(description="Order direction", default="ASC")),
184
184
  archived=(Optional[bool], Field(description="Include archived test cases", default=False)),
185
185
  fields=(Optional[List[str]], Field(description="Fields to include in the response (default: key, name). Regular fields include key, name, id, labels, folder, etc. Custom fields can be included in the following ways:Individual custom fields via customFields.field_name format, All custom fields via customFields in the fields list", default=["key", "name"])),
186
- limit_results=(Optional[int], Field(description="Maximum number of filtered results to return", default=10)),
186
+ limit_results=(Optional[int], Field(description="Maximum number of filtered results to return", default=10, gt=0)),
187
187
  folder_id=(Optional[str], Field(description="Filter test cases by folder ID", default=None)),
188
188
  folder_name=(Optional[str], Field(description="Filter test cases by folder name (full or partial)", default=None)),
189
189
  exact_folder_match=(Optional[bool], Field(description="Whether to match the folder name exactly or allow partial matches", default=False)),
@@ -7,6 +7,7 @@ from .api_wrapper import ZephyrSquadApiWrapper
7
7
  from ..base.tool import BaseAction
8
8
  from ..elitea_base import filter_missconfigured_index_tools
9
9
  from ..utils import clean_string, get_max_toolkit_length
10
+ from ...runtime.utils.constants import TOOLKIT_NAME_META, TOOL_NAME_META, TOOLKIT_TYPE_META
10
11
 
11
12
  name = "zephyr_squad"
12
13
 
@@ -56,7 +57,7 @@ class ZephyrSquadToolkit(BaseToolkit):
56
57
  name=tool["name"],
57
58
  description=description,
58
59
  args_schema=tool["args_schema"],
59
- metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
60
+ metadata={TOOLKIT_NAME_META: toolkit_name, TOOLKIT_TYPE_META: name, TOOL_NAME_META: tool["name"]} if toolkit_name else {TOOL_NAME_META: tool["name"]}
60
61
  ))
61
62
  return cls(tools=tools)
62
63
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: alita_sdk
3
- Version: 0.3.532
3
+ Version: 0.3.602
4
4
  Summary: SDK for building langchain agents using resources from Alita
5
5
  Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedj27@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
6
6
  License-Expression: Apache-2.0
@@ -68,12 +68,13 @@ Requires-Dist: pytesseract==0.3.13; extra == "runtime"
68
68
  Requires-Dist: markdown==3.5.1; extra == "runtime"
69
69
  Requires-Dist: beautifulsoup4==4.12.2; extra == "runtime"
70
70
  Requires-Dist: charset_normalizer==3.3.2; extra == "runtime"
71
- Requires-Dist: opentelemetry-exporter-otlp-proto-grpc==1.25.0; extra == "runtime"
72
- Requires-Dist: opentelemetry_api==1.25.0; extra == "runtime"
73
- Requires-Dist: opentelemetry_instrumentation==0.46b0; extra == "runtime"
74
- Requires-Dist: grpcio_status==1.63.0rc1; extra == "runtime"
75
- Requires-Dist: protobuf==4.25.7; extra == "runtime"
71
+ Requires-Dist: opentelemetry-exporter-otlp-proto-grpc>=1.25.0; extra == "runtime"
72
+ Requires-Dist: opentelemetry_api>=1.25.0; extra == "runtime"
73
+ Requires-Dist: opentelemetry_instrumentation>=0.46b0; extra == "runtime"
74
+ Requires-Dist: grpcio_status>=1.63.0rc1; extra == "runtime"
75
+ Requires-Dist: protobuf>=4.25.7; extra == "runtime"
76
76
  Requires-Dist: langchain-sandbox>=0.0.6; extra == "runtime"
77
+ Requires-Dist: langchain-mcp-adapters<0.2.0,>=0.1.14; extra == "runtime"
77
78
  Provides-Extra: tools
78
79
  Requires-Dist: dulwich==0.21.6; extra == "tools"
79
80
  Requires-Dist: paramiko==3.3.1; extra == "tools"