alita-sdk 0.3.497__py3-none-any.whl → 0.3.516__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/inventory.py +12 -195
- alita_sdk/community/inventory/__init__.py +12 -0
- alita_sdk/community/inventory/toolkit.py +9 -5
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/configurations/ado.py +144 -0
- alita_sdk/configurations/confluence.py +76 -42
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +2 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/runtime/clients/artifact.py +2 -2
- alita_sdk/runtime/clients/client.py +24 -19
- alita_sdk/runtime/clients/sandbox_client.py +14 -0
- alita_sdk/runtime/langchain/assistant.py +64 -23
- alita_sdk/runtime/langchain/constants.py +270 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +2 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +2 -1
- alita_sdk/runtime/langchain/langraph_agent.py +8 -9
- alita_sdk/runtime/langchain/utils.py +6 -1
- alita_sdk/runtime/toolkits/artifact.py +14 -5
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +26 -157
- alita_sdk/runtime/toolkits/planning.py +10 -5
- alita_sdk/runtime/toolkits/tools.py +23 -7
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/artifact.py +139 -6
- alita_sdk/runtime/tools/llm.py +20 -10
- alita_sdk/runtime/tools/mcp_remote_tool.py +2 -3
- alita_sdk/runtime/tools/mcp_server_tool.py +2 -4
- alita_sdk/runtime/utils/AlitaCallback.py +30 -1
- alita_sdk/runtime/utils/mcp_client.py +33 -6
- alita_sdk/runtime/utils/mcp_oauth.py +125 -8
- alita_sdk/runtime/utils/mcp_sse_client.py +35 -6
- alita_sdk/runtime/utils/utils.py +2 -0
- alita_sdk/tools/__init__.py +15 -0
- alita_sdk/tools/ado/repos/__init__.py +10 -12
- alita_sdk/tools/ado/test_plan/__init__.py +23 -8
- alita_sdk/tools/ado/wiki/__init__.py +24 -8
- alita_sdk/tools/ado/wiki/ado_wrapper.py +21 -7
- alita_sdk/tools/ado/work_item/__init__.py +24 -8
- alita_sdk/tools/advanced_jira_mining/__init__.py +10 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +12 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +9 -7
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +25 -0
- alita_sdk/tools/bitbucket/__init__.py +14 -10
- alita_sdk/tools/bitbucket/api_wrapper.py +50 -2
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/cloud/aws/__init__.py +9 -7
- alita_sdk/tools/cloud/azure/__init__.py +9 -7
- alita_sdk/tools/cloud/gcp/__init__.py +9 -7
- alita_sdk/tools/cloud/k8s/__init__.py +9 -7
- alita_sdk/tools/code/linter/__init__.py +9 -8
- alita_sdk/tools/code/sonar/__init__.py +9 -7
- alita_sdk/tools/confluence/__init__.py +15 -10
- alita_sdk/tools/custom_open_api/__init__.py +11 -5
- alita_sdk/tools/elastic/__init__.py +10 -8
- alita_sdk/tools/elitea_base.py +387 -9
- alita_sdk/tools/figma/__init__.py +8 -7
- alita_sdk/tools/github/__init__.py +12 -14
- alita_sdk/tools/github/github_client.py +68 -2
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/gitlab/__init__.py +14 -11
- alita_sdk/tools/gitlab/api_wrapper.py +81 -1
- alita_sdk/tools/gitlab_org/__init__.py +9 -8
- alita_sdk/tools/google/bigquery/__init__.py +12 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +9 -8
- alita_sdk/tools/jira/__init__.py +15 -10
- alita_sdk/tools/keycloak/__init__.py +10 -8
- alita_sdk/tools/localgit/__init__.py +8 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +11 -3
- alita_sdk/tools/ocr/__init__.py +10 -8
- alita_sdk/tools/openapi/__init__.py +6 -2
- alita_sdk/tools/pandas/__init__.py +9 -7
- alita_sdk/tools/postman/__init__.py +10 -11
- alita_sdk/tools/pptx/__init__.py +9 -9
- alita_sdk/tools/qtest/__init__.py +9 -8
- alita_sdk/tools/rally/__init__.py +9 -8
- alita_sdk/tools/report_portal/__init__.py +11 -9
- alita_sdk/tools/salesforce/__init__.py +9 -9
- alita_sdk/tools/servicenow/__init__.py +10 -8
- alita_sdk/tools/sharepoint/__init__.py +9 -8
- alita_sdk/tools/slack/__init__.py +8 -7
- alita_sdk/tools/sql/__init__.py +9 -8
- alita_sdk/tools/testio/__init__.py +9 -8
- alita_sdk/tools/testrail/__init__.py +10 -8
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/xray/__init__.py +10 -8
- alita_sdk/tools/yagmail/__init__.py +8 -3
- alita_sdk/tools/zephyr/__init__.py +8 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +10 -8
- alita_sdk/tools/zephyr_essential/__init__.py +9 -8
- alita_sdk/tools/zephyr_scale/__init__.py +9 -8
- alita_sdk/tools/zephyr_squad/__init__.py +9 -8
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/RECORD +109 -106
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/entry_points.txt +0 -0
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.497.dist-info → alita_sdk-0.3.516.dist-info}/top_level.txt +0 -0
|
@@ -8,7 +8,8 @@ from git import Repo
|
|
|
8
8
|
from pydantic import BaseModel, Field, create_model, model_validator
|
|
9
9
|
from langchain_core.tools import ToolException
|
|
10
10
|
|
|
11
|
-
from ..elitea_base import BaseToolApiWrapper
|
|
11
|
+
from ..elitea_base import BaseToolApiWrapper, extend_with_file_operations, BaseCodeToolApiWrapper
|
|
12
|
+
from ..utils.text_operations import parse_old_new_markers
|
|
12
13
|
|
|
13
14
|
logger = logging.getLogger(__name__)
|
|
14
15
|
CREATE_FILE_PROMPT = """Create new file in your local repository."""
|
|
@@ -110,6 +111,12 @@ class LocalGit(BaseToolApiWrapper):
|
|
|
110
111
|
repo_url: str = None
|
|
111
112
|
commit_sha: str = None
|
|
112
113
|
path_pattern: str = '**/*.py'
|
|
114
|
+
|
|
115
|
+
# Import file operation methods from BaseCodeToolApiWrapper
|
|
116
|
+
read_file_chunk = BaseCodeToolApiWrapper.read_file_chunk
|
|
117
|
+
read_multiple_files = BaseCodeToolApiWrapper.read_multiple_files
|
|
118
|
+
search_file = BaseCodeToolApiWrapper.search_file
|
|
119
|
+
edit_file = BaseCodeToolApiWrapper.edit_file
|
|
113
120
|
|
|
114
121
|
@model_validator(mode='before')
|
|
115
122
|
@classmethod
|
|
@@ -128,58 +135,6 @@ class LocalGit(BaseToolApiWrapper):
|
|
|
128
135
|
repo.head.reset(commit=commit_sha, working_tree=True)
|
|
129
136
|
return values
|
|
130
137
|
|
|
131
|
-
def extract_old_new_pairs(self, file_query):
|
|
132
|
-
# Split the file content by lines
|
|
133
|
-
code_lines = file_query.split("\n")
|
|
134
|
-
|
|
135
|
-
# Initialize lists to hold the contents of OLD and NEW sections
|
|
136
|
-
old_contents = []
|
|
137
|
-
new_contents = []
|
|
138
|
-
|
|
139
|
-
# Initialize variables to track whether the current line is within an OLD or NEW section
|
|
140
|
-
in_old_section = False
|
|
141
|
-
in_new_section = False
|
|
142
|
-
|
|
143
|
-
# Temporary storage for the current section's content
|
|
144
|
-
current_section_content = []
|
|
145
|
-
|
|
146
|
-
# Iterate through each line in the file content
|
|
147
|
-
for line in code_lines:
|
|
148
|
-
# Check for OLD section start
|
|
149
|
-
if "OLD <<<" in line:
|
|
150
|
-
in_old_section = True
|
|
151
|
-
current_section_content = [] # Reset current section content
|
|
152
|
-
continue # Skip the line with the marker
|
|
153
|
-
|
|
154
|
-
# Check for OLD section end
|
|
155
|
-
if ">>>> OLD" in line:
|
|
156
|
-
in_old_section = False
|
|
157
|
-
old_contents.append("\n".join(current_section_content).strip()) # Add the captured content
|
|
158
|
-
current_section_content = [] # Reset current section content
|
|
159
|
-
continue # Skip the line with the marker
|
|
160
|
-
|
|
161
|
-
# Check for NEW section start
|
|
162
|
-
if "NEW <<<" in line:
|
|
163
|
-
in_new_section = True
|
|
164
|
-
current_section_content = [] # Reset current section content
|
|
165
|
-
continue # Skip the line with the marker
|
|
166
|
-
|
|
167
|
-
# Check for NEW section end
|
|
168
|
-
if ">>>> NEW" in line:
|
|
169
|
-
in_new_section = False
|
|
170
|
-
new_contents.append("\n".join(current_section_content).strip()) # Add the captured content
|
|
171
|
-
current_section_content = [] # Reset current section content
|
|
172
|
-
continue # Skip the line with the marker
|
|
173
|
-
|
|
174
|
-
# If currently in an OLD or NEW section, add the line to the current section content
|
|
175
|
-
if in_old_section or in_new_section:
|
|
176
|
-
current_section_content.append(line)
|
|
177
|
-
|
|
178
|
-
# Pair the OLD and NEW contents
|
|
179
|
-
paired_contents = list(zip(old_contents, new_contents))
|
|
180
|
-
|
|
181
|
-
return paired_contents
|
|
182
|
-
|
|
183
138
|
def checkout_commit(self, commit_sha: str) -> str:
|
|
184
139
|
""" Checkout specific commit from repository """
|
|
185
140
|
try:
|
|
@@ -233,6 +188,58 @@ class LocalGit(BaseToolApiWrapper):
|
|
|
233
188
|
return f.read()
|
|
234
189
|
else:
|
|
235
190
|
return "File '{}' cannot be read because it is not existed".format(file_path)
|
|
191
|
+
|
|
192
|
+
def _read_file(self, file_path: str, branch: str = None, **kwargs) -> str:
|
|
193
|
+
"""
|
|
194
|
+
Read a file from the repository with optional partial read support.
|
|
195
|
+
|
|
196
|
+
Parameters:
|
|
197
|
+
file_path: the file path (relative to repo root)
|
|
198
|
+
branch: branch name (not used for local git, always reads from working dir)
|
|
199
|
+
**kwargs: Additional parameters (offset, limit, head, tail) - currently ignored,
|
|
200
|
+
partial read handled client-side by base class methods
|
|
201
|
+
|
|
202
|
+
Returns:
|
|
203
|
+
File content as string
|
|
204
|
+
"""
|
|
205
|
+
return self.read_file(file_path)
|
|
206
|
+
|
|
207
|
+
def _write_file(
|
|
208
|
+
self,
|
|
209
|
+
file_path: str,
|
|
210
|
+
content: str,
|
|
211
|
+
branch: str = None,
|
|
212
|
+
commit_message: str = None
|
|
213
|
+
) -> str:
|
|
214
|
+
"""
|
|
215
|
+
Write content to a file (create or update).
|
|
216
|
+
|
|
217
|
+
Parameters:
|
|
218
|
+
file_path: Path to the file (relative to repo root)
|
|
219
|
+
content: New file content
|
|
220
|
+
branch: Branch name (not used for local git)
|
|
221
|
+
commit_message: Commit message (not used - files are written without commit)
|
|
222
|
+
|
|
223
|
+
Returns:
|
|
224
|
+
Success message
|
|
225
|
+
"""
|
|
226
|
+
try:
|
|
227
|
+
full_path = os.path.normpath(os.path.join(self.repo.working_dir, file_path))
|
|
228
|
+
|
|
229
|
+
# Ensure directory exists
|
|
230
|
+
os.makedirs(os.path.dirname(full_path), exist_ok=True)
|
|
231
|
+
|
|
232
|
+
# Write the file
|
|
233
|
+
with open(full_path, 'w') as f:
|
|
234
|
+
f.write(content)
|
|
235
|
+
|
|
236
|
+
# Determine if file was created or updated
|
|
237
|
+
if os.path.exists(full_path):
|
|
238
|
+
return f"Updated file {file_path}"
|
|
239
|
+
else:
|
|
240
|
+
return f"Created file {file_path}"
|
|
241
|
+
except Exception as e:
|
|
242
|
+
raise ToolException(f"Unable to write file {file_path}: {str(e)}")
|
|
236
243
|
|
|
237
244
|
def update_file_content_by_lines(self, file_path: str, start_line_index: int, end_line_index: int,
|
|
238
245
|
new_content: str) -> str:
|
|
@@ -314,7 +321,7 @@ class LocalGit(BaseToolApiWrapper):
|
|
|
314
321
|
file_path = os.path.normpath(os.path.join(self.repo.working_dir, file_path))
|
|
315
322
|
file_content = self.read_file(file_path)
|
|
316
323
|
updated_file_content = file_content
|
|
317
|
-
for old, new in
|
|
324
|
+
for old, new in parse_old_new_markers(file_query): # Use shared utility
|
|
318
325
|
if not old.strip():
|
|
319
326
|
continue
|
|
320
327
|
updated_file_content = updated_file_content.replace(old, new)
|
|
@@ -332,6 +339,7 @@ class LocalGit(BaseToolApiWrapper):
|
|
|
332
339
|
except Exception as e:
|
|
333
340
|
return "Unable to update file due to error:\n" + str(e)
|
|
334
341
|
|
|
342
|
+
@extend_with_file_operations
|
|
335
343
|
def get_available_tools(self):
|
|
336
344
|
return [
|
|
337
345
|
{
|
alita_sdk/tools/localgit/tool.py
CHANGED
|
@@ -29,6 +29,10 @@ class LocalGitAction(BaseTool):
|
|
|
29
29
|
) -> str:
|
|
30
30
|
"""Use the GitHub API to run an operation."""
|
|
31
31
|
try:
|
|
32
|
-
|
|
32
|
+
# Strip numeric suffix added for deduplication (_2, _3, etc.)
|
|
33
|
+
# to get the original tool name that exists in the wrapper
|
|
34
|
+
import re
|
|
35
|
+
mode = re.sub(r'_\d+$', '', self.mode) if self.mode else self.mode
|
|
36
|
+
return self.api_wrapper.run(mode, *args, **kwargs)
|
|
33
37
|
except Exception as e:
|
|
34
38
|
return f"Error: {format_exc()}"
|
|
@@ -80,13 +80,14 @@ class MemoryToolkit(BaseToolkit):
|
|
|
80
80
|
)
|
|
81
81
|
|
|
82
82
|
@classmethod
|
|
83
|
-
def get_toolkit(cls, namespace: str, store=None, **kwargs):
|
|
83
|
+
def get_toolkit(cls, namespace: str, store=None, toolkit_name: str = None, **kwargs):
|
|
84
84
|
"""
|
|
85
85
|
Get toolkit with memory tools.
|
|
86
86
|
|
|
87
87
|
Args:
|
|
88
88
|
namespace: Memory namespace
|
|
89
89
|
store: PostgresStore instance (imported dynamically)
|
|
90
|
+
toolkit_name: Optional toolkit name for metadata
|
|
90
91
|
**kwargs: Additional arguments
|
|
91
92
|
"""
|
|
92
93
|
try:
|
|
@@ -109,10 +110,17 @@ class MemoryToolkit(BaseToolkit):
|
|
|
109
110
|
if store is not None and not isinstance(store, PostgresStore):
|
|
110
111
|
raise TypeError(f"Expected PostgresStore, got {type(store)}")
|
|
111
112
|
|
|
112
|
-
|
|
113
|
+
tools = [
|
|
113
114
|
create_manage_memory_tool(namespace=namespace, store=store),
|
|
114
115
|
create_search_memory_tool(namespace=namespace, store=store)
|
|
115
|
-
]
|
|
116
|
+
]
|
|
117
|
+
|
|
118
|
+
# Add metadata to tools if toolkit_name is provided
|
|
119
|
+
if toolkit_name:
|
|
120
|
+
for tool in tools:
|
|
121
|
+
tool.metadata = {"toolkit_name": toolkit_name}
|
|
122
|
+
|
|
123
|
+
return cls(tools=tools)
|
|
116
124
|
|
|
117
125
|
def get_tools(self):
|
|
118
126
|
return self.tools
|
alita_sdk/tools/ocr/__init__.py
CHANGED
|
@@ -5,7 +5,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
|
5
5
|
|
|
6
6
|
from .api_wrapper import OCRApiWrapper
|
|
7
7
|
from ..base.tool import BaseAction
|
|
8
|
-
from ..utils import clean_string,
|
|
8
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
9
9
|
|
|
10
10
|
name = "ocr"
|
|
11
11
|
|
|
@@ -23,15 +23,13 @@ def get_tools(tool):
|
|
|
23
23
|
|
|
24
24
|
class OCRToolkit(BaseToolkit):
|
|
25
25
|
tools: list[BaseTool] = []
|
|
26
|
-
toolkit_max_length: int = 0
|
|
27
26
|
|
|
28
27
|
@staticmethod
|
|
29
28
|
def toolkit_config_schema() -> BaseModel:
|
|
30
29
|
selected_tools = {x['name']: x['args_schema'].schema() for x in OCRApiWrapper.model_construct().get_available_tools()}
|
|
31
|
-
OCRToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
32
30
|
return create_model(
|
|
33
31
|
name,
|
|
34
|
-
artifacts_folder=(str, Field(description="Folder path containing artifacts to process", json_schema_extra={'toolkit_name': True
|
|
32
|
+
artifacts_folder=(str, Field(description="Folder path containing artifacts to process", json_schema_extra={'toolkit_name': True})),
|
|
35
33
|
tesseract_settings=(dict, Field(description="Settings for Tesseract OCR processing", default={})),
|
|
36
34
|
structured_output=(bool, Field(description="Whether to return structured JSON output", default=False)),
|
|
37
35
|
expected_fields=(dict, Field(description="Expected fields for structured output", default={})),
|
|
@@ -47,17 +45,21 @@ class OCRToolkit(BaseToolkit):
|
|
|
47
45
|
if selected_tools is None:
|
|
48
46
|
selected_tools = []
|
|
49
47
|
ocr_api_wrapper = OCRApiWrapper(**kwargs)
|
|
50
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
51
48
|
available_tools = ocr_api_wrapper.get_available_tools()
|
|
52
49
|
tools = []
|
|
53
50
|
for tool in available_tools:
|
|
54
51
|
if selected_tools and tool["name"] not in selected_tools:
|
|
55
52
|
continue
|
|
53
|
+
description = tool["description"]
|
|
54
|
+
if toolkit_name:
|
|
55
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
56
|
+
description = description[:1000]
|
|
56
57
|
tools.append(BaseAction(
|
|
57
58
|
api_wrapper=ocr_api_wrapper,
|
|
58
|
-
name=
|
|
59
|
-
description=
|
|
60
|
-
args_schema=tool["args_schema"]
|
|
59
|
+
name=tool["name"],
|
|
60
|
+
description=description,
|
|
61
|
+
args_schema=tool["args_schema"],
|
|
62
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
61
63
|
))
|
|
62
64
|
return cls(tools=tools)
|
|
63
65
|
|
|
@@ -96,7 +96,8 @@ class AlitaOpenAPIToolkit(BaseToolkit):
|
|
|
96
96
|
@classmethod
|
|
97
97
|
def get_toolkit(cls, openapi_spec: str | dict,
|
|
98
98
|
selected_tools: list[dict] | None = None,
|
|
99
|
-
headers: Optional[Dict[str, str]] = None
|
|
99
|
+
headers: Optional[Dict[str, str]] = None,
|
|
100
|
+
toolkit_name: Optional[str] = None):
|
|
100
101
|
if selected_tools is not None:
|
|
101
102
|
tools_set = set([i if not isinstance(i, dict) else i.get('name') for i in selected_tools])
|
|
102
103
|
else:
|
|
@@ -124,7 +125,10 @@ class AlitaOpenAPIToolkit(BaseToolkit):
|
|
|
124
125
|
tool = c.operations[i]
|
|
125
126
|
if not isinstance(tool, Operation):
|
|
126
127
|
raise ToolException(f"Operation {i} is not an instance of Operation class.")
|
|
127
|
-
|
|
128
|
+
api_tool = create_api_tool(i, tool)
|
|
129
|
+
if toolkit_name:
|
|
130
|
+
api_tool.metadata = {"toolkit_name": toolkit_name}
|
|
131
|
+
tools.append(api_tool)
|
|
128
132
|
except ToolException:
|
|
129
133
|
raise
|
|
130
134
|
except Exception as e:
|
|
@@ -5,7 +5,7 @@ from pydantic import BaseModel, ConfigDict, create_model, Field
|
|
|
5
5
|
|
|
6
6
|
from .api_wrapper import PandasWrapper
|
|
7
7
|
from ..base.tool import BaseAction
|
|
8
|
-
from ..utils import clean_string,
|
|
8
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
9
9
|
|
|
10
10
|
name = "pandas"
|
|
11
11
|
|
|
@@ -21,12 +21,10 @@ def get_tools(tool):
|
|
|
21
21
|
|
|
22
22
|
class PandasToolkit(BaseToolkit):
|
|
23
23
|
tools: List[BaseTool] = []
|
|
24
|
-
toolkit_max_length: int = 0
|
|
25
24
|
|
|
26
25
|
@staticmethod
|
|
27
26
|
def toolkit_config_schema() -> BaseModel:
|
|
28
27
|
selected_tools = {x['name']: x['args_schema'].schema() for x in PandasWrapper.model_construct().get_available_tools()}
|
|
29
|
-
PandasToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
30
28
|
return create_model(
|
|
31
29
|
name,
|
|
32
30
|
bucket_name=(str, Field(default=None, title="Bucket name", description="Bucket where the content file is stored")),
|
|
@@ -41,17 +39,21 @@ class PandasToolkit(BaseToolkit):
|
|
|
41
39
|
if selected_tools is None:
|
|
42
40
|
selected_tools = []
|
|
43
41
|
csv_tool_api_wrapper = PandasWrapper(**kwargs)
|
|
44
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
45
42
|
available_tools = csv_tool_api_wrapper.get_available_tools()
|
|
46
43
|
tools = []
|
|
47
44
|
for tool in available_tools:
|
|
48
45
|
if selected_tools and tool["name"] not in selected_tools:
|
|
49
46
|
continue
|
|
47
|
+
description = tool["description"]
|
|
48
|
+
if toolkit_name:
|
|
49
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
50
|
+
description = description[:1000]
|
|
50
51
|
tools.append(BaseAction(
|
|
51
52
|
api_wrapper=csv_tool_api_wrapper,
|
|
52
|
-
name=
|
|
53
|
-
description=
|
|
54
|
-
args_schema=tool["args_schema"]
|
|
53
|
+
name=tool["name"],
|
|
54
|
+
description=description,
|
|
55
|
+
args_schema=tool["args_schema"],
|
|
56
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
55
57
|
))
|
|
56
58
|
return cls(tools=tools)
|
|
57
59
|
|
|
@@ -6,7 +6,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, field_validator
|
|
|
6
6
|
from ..base.tool import BaseAction
|
|
7
7
|
|
|
8
8
|
from .api_wrapper import PostmanApiWrapper
|
|
9
|
-
from ..utils import clean_string, get_max_toolkit_length,
|
|
9
|
+
from ..utils import clean_string, get_max_toolkit_length, check_connection_response
|
|
10
10
|
from ...configurations.postman import PostmanConfiguration
|
|
11
11
|
|
|
12
12
|
name = "postman"
|
|
@@ -43,14 +43,11 @@ def get_tools(tool):
|
|
|
43
43
|
|
|
44
44
|
class PostmanToolkit(BaseToolkit):
|
|
45
45
|
tools: List[BaseTool] = []
|
|
46
|
-
toolkit_max_length: int = 0
|
|
47
46
|
|
|
48
47
|
@staticmethod
|
|
49
48
|
def toolkit_config_schema() -> BaseModel:
|
|
50
49
|
selected_tools = {x['name']: x['args_schema'].schema(
|
|
51
50
|
) for x in PostmanApiWrapper.model_construct().get_available_tools()}
|
|
52
|
-
PostmanToolkit.toolkit_max_length = get_max_toolkit_length(
|
|
53
|
-
selected_tools)
|
|
54
51
|
m = create_model(
|
|
55
52
|
name,
|
|
56
53
|
postman_configuration=(Optional[PostmanConfiguration], Field(description="Postman Configuration",
|
|
@@ -62,8 +59,7 @@ class PostmanToolkit(BaseToolkit):
|
|
|
62
59
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(
|
|
63
60
|
default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
64
61
|
__config__=ConfigDict(json_schema_extra={'metadata': {
|
|
65
|
-
"label": "Postman", "icon_url": "postman.svg"
|
|
66
|
-
"max_length": PostmanToolkit.toolkit_max_length,}})
|
|
62
|
+
"label": "Postman", "icon_url": "postman.svg"}})
|
|
67
63
|
)
|
|
68
64
|
|
|
69
65
|
@check_connection_response
|
|
@@ -90,20 +86,23 @@ class PostmanToolkit(BaseToolkit):
|
|
|
90
86
|
**kwargs['postman_configuration'],
|
|
91
87
|
}
|
|
92
88
|
postman_api_wrapper = PostmanApiWrapper(**wrapper_payload)
|
|
93
|
-
prefix = clean_string(str(toolkit_name), cls.toolkit_max_length) + \
|
|
94
|
-
TOOLKIT_SPLITTER if toolkit_name else ''
|
|
95
89
|
available_tools = postman_api_wrapper.get_available_tools()
|
|
96
90
|
tools = []
|
|
97
91
|
for tool in available_tools:
|
|
98
92
|
if selected_tools:
|
|
99
93
|
if tool["name"] not in selected_tools:
|
|
100
94
|
continue
|
|
95
|
+
description = f"{tool['description']}\nAPI URL: {postman_api_wrapper.base_url}"
|
|
96
|
+
if toolkit_name:
|
|
97
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
98
|
+
description = description[:1000]
|
|
101
99
|
tools.append(PostmanAction(
|
|
102
100
|
api_wrapper=postman_api_wrapper,
|
|
103
|
-
name=
|
|
101
|
+
name=tool["name"],
|
|
104
102
|
mode=tool["mode"],
|
|
105
|
-
description=
|
|
106
|
-
args_schema=tool["args_schema"]
|
|
103
|
+
description=description,
|
|
104
|
+
args_schema=tool["args_schema"],
|
|
105
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
107
106
|
))
|
|
108
107
|
return cls(tools=tools)
|
|
109
108
|
|
alita_sdk/tools/pptx/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
|
7
7
|
from .pptx_wrapper import PPTXWrapper
|
|
8
8
|
|
|
9
9
|
from ..base.tool import BaseAction
|
|
10
|
-
from ..utils import clean_string,
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
11
11
|
|
|
12
12
|
logger = logging.getLogger(__name__)
|
|
13
13
|
|
|
@@ -27,8 +27,6 @@ def get_tools(tool):
|
|
|
27
27
|
).get_tools()
|
|
28
28
|
|
|
29
29
|
|
|
30
|
-
TOOLKIT_MAX_LENGTH = 25
|
|
31
|
-
|
|
32
30
|
class PPTXToolkit(BaseToolkit):
|
|
33
31
|
"""
|
|
34
32
|
PowerPoint (PPTX) manipulation toolkit for Alita.
|
|
@@ -51,7 +49,6 @@ class PPTXToolkit(BaseToolkit):
|
|
|
51
49
|
'metadata': {
|
|
52
50
|
"label": "PPTX",
|
|
53
51
|
"icon_url": "pptx.svg",
|
|
54
|
-
"max_length": TOOLKIT_MAX_LENGTH,
|
|
55
52
|
"categories": ["office"],
|
|
56
53
|
"extra_categories": ["presentation", "office automation", "document"]
|
|
57
54
|
}
|
|
@@ -75,19 +72,22 @@ class PPTXToolkit(BaseToolkit):
|
|
|
75
72
|
selected_tools = []
|
|
76
73
|
|
|
77
74
|
pptx_api_wrapper = PPTXWrapper(**kwargs)
|
|
78
|
-
prefix = clean_string(toolkit_name, TOOLKIT_MAX_LENGTH) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
79
75
|
available_tools = pptx_api_wrapper.get_available_tools()
|
|
80
76
|
tools = []
|
|
81
77
|
|
|
82
78
|
for tool in available_tools:
|
|
83
79
|
if selected_tools and tool["name"] not in selected_tools:
|
|
84
80
|
continue
|
|
85
|
-
|
|
81
|
+
description = tool["description"]
|
|
82
|
+
if toolkit_name:
|
|
83
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
84
|
+
description = description[:1000]
|
|
86
85
|
tools.append(BaseAction(
|
|
87
86
|
api_wrapper=pptx_api_wrapper,
|
|
88
|
-
name=
|
|
89
|
-
description=
|
|
90
|
-
args_schema=tool["args_schema"]
|
|
87
|
+
name=tool["name"],
|
|
88
|
+
description=description,
|
|
89
|
+
args_schema=tool["args_schema"],
|
|
90
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
91
91
|
))
|
|
92
92
|
|
|
93
93
|
return cls(tools=tools)
|
|
@@ -7,7 +7,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field, SecretStr
|
|
|
7
7
|
from .api_wrapper import QtestApiWrapper
|
|
8
8
|
from .tool import QtestAction
|
|
9
9
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
10
|
-
from ..utils import clean_string, get_max_toolkit_length,
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length, check_connection_response
|
|
11
11
|
from ...configurations.qtest import QtestConfiguration
|
|
12
12
|
|
|
13
13
|
name = "qtest"
|
|
@@ -27,12 +27,10 @@ def get_tools(tool):
|
|
|
27
27
|
|
|
28
28
|
class QtestToolkit(BaseToolkit):
|
|
29
29
|
tools: List[BaseTool] = []
|
|
30
|
-
toolkit_max_length: int = 0
|
|
31
30
|
|
|
32
31
|
@staticmethod
|
|
33
32
|
def toolkit_config_schema() -> BaseModel:
|
|
34
33
|
selected_tools = {x['name']: x['args_schema'].schema() for x in QtestApiWrapper.model_construct().get_available_tools()}
|
|
35
|
-
QtestToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
36
34
|
m = create_model(
|
|
37
35
|
name,
|
|
38
36
|
qtest_configuration=(QtestConfiguration, Field(description="QTest API token", json_schema_extra={
|
|
@@ -44,7 +42,6 @@ class QtestToolkit(BaseToolkit):
|
|
|
44
42
|
selected_tools=(List[Literal[tuple(selected_tools)]],
|
|
45
43
|
Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
46
44
|
__config__=ConfigDict(json_schema_extra={'metadata': {"label": "QTest", "icon_url": "qtest.svg",
|
|
47
|
-
"max_length": QtestToolkit.toolkit_max_length,
|
|
48
45
|
"categories": ["test management"],
|
|
49
46
|
"extra_categories": ["quality assurance",
|
|
50
47
|
"test case management",
|
|
@@ -76,19 +73,23 @@ class QtestToolkit(BaseToolkit):
|
|
|
76
73
|
**kwargs['qtest_configuration'],
|
|
77
74
|
}
|
|
78
75
|
qtest_api_wrapper = QtestApiWrapper(**wrapper_payload)
|
|
79
|
-
prefix = clean_string(str(toolkit_name), cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
80
76
|
available_tools = qtest_api_wrapper.get_available_tools()
|
|
81
77
|
tools = []
|
|
82
78
|
for tool in available_tools:
|
|
83
79
|
if selected_tools:
|
|
84
80
|
if tool["name"] not in selected_tools:
|
|
85
81
|
continue
|
|
82
|
+
description = f"{tool['description']}\nUrl: {qtest_api_wrapper.base_url}. Project id: {qtest_api_wrapper.qtest_project_id}"
|
|
83
|
+
if toolkit_name:
|
|
84
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
85
|
+
description = description[:1000]
|
|
86
86
|
tools.append(QtestAction(
|
|
87
87
|
api_wrapper=qtest_api_wrapper,
|
|
88
|
-
name=
|
|
88
|
+
name=tool["name"],
|
|
89
89
|
mode=tool["mode"],
|
|
90
|
-
description=
|
|
91
|
-
args_schema=tool["args_schema"]
|
|
90
|
+
description=description,
|
|
91
|
+
args_schema=tool["args_schema"],
|
|
92
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
92
93
|
))
|
|
93
94
|
return cls(tools=tools)
|
|
94
95
|
|
|
@@ -5,7 +5,7 @@ from .api_wrapper import RallyApiWrapper
|
|
|
5
5
|
from langchain_core.tools import BaseTool
|
|
6
6
|
from ..base.tool import BaseAction
|
|
7
7
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
8
|
-
from ..utils import clean_string,
|
|
8
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
9
9
|
from ...configurations.rally import RallyConfiguration
|
|
10
10
|
|
|
11
11
|
name = "rally"
|
|
@@ -21,12 +21,10 @@ def get_tools(tool):
|
|
|
21
21
|
|
|
22
22
|
class RallyToolkit(BaseToolkit):
|
|
23
23
|
tools: List[BaseTool] = []
|
|
24
|
-
toolkit_max_length: int = 0
|
|
25
24
|
|
|
26
25
|
@staticmethod
|
|
27
26
|
def toolkit_config_schema() -> BaseModel:
|
|
28
27
|
selected_tools = {x['name']: x['args_schema'].schema() for x in RallyApiWrapper.model_construct().get_available_tools()}
|
|
29
|
-
RallyToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
30
28
|
return create_model(
|
|
31
29
|
name,
|
|
32
30
|
rally_configuration=(RallyConfiguration, Field(description="Rally configuration", json_schema_extra={'configuration_types': ['rally']})),
|
|
@@ -37,7 +35,6 @@ class RallyToolkit(BaseToolkit):
|
|
|
37
35
|
'metadata': {
|
|
38
36
|
"label": "Rally",
|
|
39
37
|
"icon_url": "rally.svg",
|
|
40
|
-
"max_length": RallyToolkit.toolkit_max_length,
|
|
41
38
|
"categories": ["project management"],
|
|
42
39
|
"extra_categories": ["agile management", "test management", "scrum", "kanban"]
|
|
43
40
|
}
|
|
@@ -54,18 +51,22 @@ class RallyToolkit(BaseToolkit):
|
|
|
54
51
|
**kwargs.get('rally_configuration'),
|
|
55
52
|
}
|
|
56
53
|
rally_api_wrapper = RallyApiWrapper(**wrapper_payload)
|
|
57
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
58
54
|
available_tools = rally_api_wrapper.get_available_tools()
|
|
59
55
|
tools = []
|
|
60
56
|
for tool in available_tools:
|
|
61
57
|
if selected_tools:
|
|
62
58
|
if tool["name"] not in selected_tools:
|
|
63
59
|
continue
|
|
60
|
+
description = f"{tool['description']}\nWorkspace: {rally_api_wrapper.workspace}. Project: {rally_api_wrapper.project}"
|
|
61
|
+
if toolkit_name:
|
|
62
|
+
description = f"{description}\nToolkit: {toolkit_name}"
|
|
63
|
+
description = description[:1000]
|
|
64
64
|
tools.append(BaseAction(
|
|
65
65
|
api_wrapper=rally_api_wrapper,
|
|
66
|
-
name=
|
|
67
|
-
description=
|
|
68
|
-
args_schema=tool["args_schema"]
|
|
66
|
+
name=tool["name"],
|
|
67
|
+
description=description,
|
|
68
|
+
args_schema=tool["args_schema"],
|
|
69
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
69
70
|
))
|
|
70
71
|
return cls(tools=tools)
|
|
71
72
|
|
|
@@ -7,7 +7,7 @@ from pydantic import create_model, BaseModel, ConfigDict, Field
|
|
|
7
7
|
from .api_wrapper import ReportPortalApiWrapper
|
|
8
8
|
from ..base.tool import BaseAction
|
|
9
9
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
10
|
-
from ..utils import clean_string,
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
11
11
|
from ...configurations.report_portal import ReportPortalConfiguration
|
|
12
12
|
|
|
13
13
|
name = "report_portal"
|
|
@@ -21,19 +21,16 @@ def get_tools(tool):
|
|
|
21
21
|
|
|
22
22
|
|
|
23
23
|
class ReportPortalToolkit(BaseToolkit):
|
|
24
|
-
tools:
|
|
25
|
-
toolkit_max_length: int = 0
|
|
24
|
+
tools: List[BaseTool] = []
|
|
26
25
|
|
|
27
26
|
@staticmethod
|
|
28
27
|
def toolkit_config_schema() -> BaseModel:
|
|
29
28
|
selected_tools = {x['name']: x['args_schema'].schema() for x in ReportPortalApiWrapper.model_construct().get_available_tools()}
|
|
30
|
-
ReportPortalToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
31
29
|
return create_model(
|
|
32
30
|
name,
|
|
33
31
|
report_portal_configuration=(ReportPortalConfiguration, Field(description="Report Portal Configuration", json_schema_extra={'configuration_types': ['report_portal']})),
|
|
34
32
|
selected_tools=(List[Literal[tuple(selected_tools)]], Field(default=[], json_schema_extra={'args_schemas': selected_tools})),
|
|
35
33
|
__config__=ConfigDict(json_schema_extra={'metadata': {"label": "Report Portal", "icon_url": "reportportal-icon.svg",
|
|
36
|
-
"max_length": ReportPortalToolkit.toolkit_max_length,
|
|
37
34
|
"categories": ["testing"],
|
|
38
35
|
"extra_categories": ["test reporting", "test automation"]}})
|
|
39
36
|
)
|
|
@@ -48,17 +45,22 @@ class ReportPortalToolkit(BaseToolkit):
|
|
|
48
45
|
**kwargs.get('report_portal_configuration', {}),
|
|
49
46
|
}
|
|
50
47
|
report_portal_api_wrapper = ReportPortalApiWrapper(**wrapper_payload)
|
|
51
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
52
48
|
available_tools = report_portal_api_wrapper.get_available_tools()
|
|
53
49
|
tools = []
|
|
54
50
|
for tool in available_tools:
|
|
55
51
|
if selected_tools and tool["name"] not in selected_tools:
|
|
56
52
|
continue
|
|
53
|
+
description = tool['description']
|
|
54
|
+
if toolkit_name:
|
|
55
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
56
|
+
description = f"{description}\nReport portal configuration: 'url - {report_portal_api_wrapper.endpoint}, project - {report_portal_api_wrapper.project}'"
|
|
57
|
+
description = description[:1000]
|
|
57
58
|
tools.append(BaseAction(
|
|
58
59
|
api_wrapper=report_portal_api_wrapper,
|
|
59
|
-
name=
|
|
60
|
-
description=
|
|
61
|
-
args_schema=tool["args_schema"]
|
|
60
|
+
name=tool["name"],
|
|
61
|
+
description=description,
|
|
62
|
+
args_schema=tool["args_schema"],
|
|
63
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
62
64
|
))
|
|
63
65
|
return cls(tools=tools)
|
|
64
66
|
|