alita-sdk 0.3.486__py3-none-any.whl → 0.3.515__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of alita-sdk might be problematic. Click here for more details.
- alita_sdk/cli/agent_loader.py +27 -6
- alita_sdk/cli/agents.py +10 -1
- alita_sdk/cli/inventory.py +12 -195
- alita_sdk/cli/tools/filesystem.py +95 -9
- alita_sdk/community/inventory/__init__.py +12 -0
- alita_sdk/community/inventory/toolkit.py +9 -5
- alita_sdk/community/inventory/toolkit_utils.py +176 -0
- alita_sdk/configurations/ado.py +144 -0
- alita_sdk/configurations/confluence.py +76 -42
- alita_sdk/configurations/figma.py +76 -0
- alita_sdk/configurations/gitlab.py +2 -0
- alita_sdk/configurations/qtest.py +72 -1
- alita_sdk/configurations/report_portal.py +96 -0
- alita_sdk/configurations/sharepoint.py +148 -0
- alita_sdk/configurations/testio.py +83 -0
- alita_sdk/runtime/clients/artifact.py +2 -2
- alita_sdk/runtime/clients/client.py +64 -40
- alita_sdk/runtime/clients/sandbox_client.py +14 -0
- alita_sdk/runtime/langchain/assistant.py +48 -2
- alita_sdk/runtime/langchain/constants.py +3 -1
- alita_sdk/runtime/langchain/document_loaders/AlitaExcelLoader.py +103 -60
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLinesLoader.py +77 -0
- alita_sdk/runtime/langchain/document_loaders/AlitaJSONLoader.py +2 -1
- alita_sdk/runtime/langchain/document_loaders/constants.py +12 -7
- alita_sdk/runtime/langchain/langraph_agent.py +10 -10
- alita_sdk/runtime/langchain/utils.py +6 -1
- alita_sdk/runtime/toolkits/artifact.py +14 -5
- alita_sdk/runtime/toolkits/datasource.py +13 -6
- alita_sdk/runtime/toolkits/mcp.py +94 -219
- alita_sdk/runtime/toolkits/planning.py +13 -6
- alita_sdk/runtime/toolkits/tools.py +60 -25
- alita_sdk/runtime/toolkits/vectorstore.py +11 -5
- alita_sdk/runtime/tools/artifact.py +185 -23
- alita_sdk/runtime/tools/function.py +2 -1
- alita_sdk/runtime/tools/llm.py +155 -34
- alita_sdk/runtime/tools/mcp_remote_tool.py +25 -10
- alita_sdk/runtime/tools/mcp_server_tool.py +2 -4
- alita_sdk/runtime/tools/vectorstore_base.py +3 -3
- alita_sdk/runtime/utils/AlitaCallback.py +136 -21
- alita_sdk/runtime/utils/mcp_client.py +492 -0
- alita_sdk/runtime/utils/mcp_oauth.py +125 -8
- alita_sdk/runtime/utils/mcp_sse_client.py +35 -6
- alita_sdk/runtime/utils/mcp_tools_discovery.py +124 -0
- alita_sdk/runtime/utils/toolkit_utils.py +7 -13
- alita_sdk/runtime/utils/utils.py +2 -0
- alita_sdk/tools/__init__.py +15 -0
- alita_sdk/tools/ado/repos/__init__.py +10 -12
- alita_sdk/tools/ado/test_plan/__init__.py +23 -8
- alita_sdk/tools/ado/wiki/__init__.py +24 -8
- alita_sdk/tools/ado/wiki/ado_wrapper.py +21 -7
- alita_sdk/tools/ado/work_item/__init__.py +24 -8
- alita_sdk/tools/advanced_jira_mining/__init__.py +10 -8
- alita_sdk/tools/aws/delta_lake/__init__.py +12 -9
- alita_sdk/tools/aws/delta_lake/tool.py +5 -1
- alita_sdk/tools/azure_ai/search/__init__.py +9 -7
- alita_sdk/tools/base/tool.py +5 -1
- alita_sdk/tools/base_indexer_toolkit.py +26 -1
- alita_sdk/tools/bitbucket/__init__.py +14 -10
- alita_sdk/tools/bitbucket/api_wrapper.py +50 -2
- alita_sdk/tools/browser/__init__.py +5 -4
- alita_sdk/tools/carrier/__init__.py +5 -6
- alita_sdk/tools/chunkers/sematic/json_chunker.py +1 -0
- alita_sdk/tools/chunkers/sematic/markdown_chunker.py +2 -0
- alita_sdk/tools/chunkers/universal_chunker.py +1 -0
- alita_sdk/tools/cloud/aws/__init__.py +9 -7
- alita_sdk/tools/cloud/azure/__init__.py +9 -7
- alita_sdk/tools/cloud/gcp/__init__.py +9 -7
- alita_sdk/tools/cloud/k8s/__init__.py +9 -7
- alita_sdk/tools/code/linter/__init__.py +9 -8
- alita_sdk/tools/code/loaders/codesearcher.py +3 -2
- alita_sdk/tools/code/sonar/__init__.py +9 -7
- alita_sdk/tools/confluence/__init__.py +15 -10
- alita_sdk/tools/confluence/api_wrapper.py +63 -14
- alita_sdk/tools/custom_open_api/__init__.py +11 -5
- alita_sdk/tools/elastic/__init__.py +10 -8
- alita_sdk/tools/elitea_base.py +387 -9
- alita_sdk/tools/figma/__init__.py +8 -7
- alita_sdk/tools/github/__init__.py +12 -14
- alita_sdk/tools/github/github_client.py +68 -2
- alita_sdk/tools/github/tool.py +5 -1
- alita_sdk/tools/gitlab/__init__.py +14 -11
- alita_sdk/tools/gitlab/api_wrapper.py +81 -1
- alita_sdk/tools/gitlab_org/__init__.py +9 -8
- alita_sdk/tools/google/bigquery/__init__.py +12 -12
- alita_sdk/tools/google/bigquery/tool.py +5 -1
- alita_sdk/tools/google_places/__init__.py +9 -8
- alita_sdk/tools/jira/__init__.py +15 -10
- alita_sdk/tools/keycloak/__init__.py +10 -8
- alita_sdk/tools/localgit/__init__.py +8 -3
- alita_sdk/tools/localgit/local_git.py +62 -54
- alita_sdk/tools/localgit/tool.py +5 -1
- alita_sdk/tools/memory/__init__.py +11 -3
- alita_sdk/tools/ocr/__init__.py +10 -8
- alita_sdk/tools/openapi/__init__.py +6 -2
- alita_sdk/tools/pandas/__init__.py +9 -7
- alita_sdk/tools/postman/__init__.py +10 -11
- alita_sdk/tools/pptx/__init__.py +9 -9
- alita_sdk/tools/qtest/__init__.py +9 -8
- alita_sdk/tools/rally/__init__.py +9 -8
- alita_sdk/tools/report_portal/__init__.py +11 -9
- alita_sdk/tools/salesforce/__init__.py +9 -9
- alita_sdk/tools/servicenow/__init__.py +10 -8
- alita_sdk/tools/sharepoint/__init__.py +9 -8
- alita_sdk/tools/sharepoint/api_wrapper.py +2 -2
- alita_sdk/tools/slack/__init__.py +8 -7
- alita_sdk/tools/sql/__init__.py +9 -8
- alita_sdk/tools/testio/__init__.py +9 -8
- alita_sdk/tools/testrail/__init__.py +10 -8
- alita_sdk/tools/utils/__init__.py +9 -4
- alita_sdk/tools/utils/text_operations.py +254 -0
- alita_sdk/tools/vector_adapters/VectorStoreAdapter.py +16 -18
- alita_sdk/tools/xray/__init__.py +10 -8
- alita_sdk/tools/yagmail/__init__.py +8 -3
- alita_sdk/tools/zephyr/__init__.py +8 -7
- alita_sdk/tools/zephyr_enterprise/__init__.py +10 -8
- alita_sdk/tools/zephyr_essential/__init__.py +9 -8
- alita_sdk/tools/zephyr_scale/__init__.py +9 -8
- alita_sdk/tools/zephyr_squad/__init__.py +9 -8
- {alita_sdk-0.3.486.dist-info → alita_sdk-0.3.515.dist-info}/METADATA +1 -1
- {alita_sdk-0.3.486.dist-info → alita_sdk-0.3.515.dist-info}/RECORD +124 -119
- {alita_sdk-0.3.486.dist-info → alita_sdk-0.3.515.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.486.dist-info → alita_sdk-0.3.515.dist-info}/entry_points.txt +0 -0
- {alita_sdk-0.3.486.dist-info → alita_sdk-0.3.515.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.486.dist-info → alita_sdk-0.3.515.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,254 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Shared text operations utilities for file manipulation across toolkits.
|
|
3
|
+
|
|
4
|
+
Provides common functionality for:
|
|
5
|
+
- Parsing OLD/NEW marker-based edits
|
|
6
|
+
- Text file validation
|
|
7
|
+
- Line-based slicing and partial reads
|
|
8
|
+
- Content searching with context
|
|
9
|
+
"""
|
|
10
|
+
import re
|
|
11
|
+
import logging
|
|
12
|
+
from typing import List, Tuple, Dict, Optional
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
# Text file extensions that support editing
|
|
17
|
+
TEXT_EDITABLE_EXTENSIONS = {
|
|
18
|
+
'.md', '.txt', '.csv', '.json', '.xml', '.html',
|
|
19
|
+
'.yaml', '.yml', '.ini', '.conf', '.log', '.sh',
|
|
20
|
+
'.py', '.js', '.ts', '.jsx', '.tsx', '.java', '.go',
|
|
21
|
+
'.rb', '.php', '.c', '.cpp', '.h', '.hpp', '.cs',
|
|
22
|
+
'.sql', '.r', '.m', '.swift', '.kt', '.rs', '.scala'
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def parse_old_new_markers(file_query: str) -> List[Tuple[str, str]]:
|
|
27
|
+
"""
|
|
28
|
+
Parse OLD/NEW marker-based edit instructions.
|
|
29
|
+
|
|
30
|
+
Extracts pairs of old and new content from a file query using markers:
|
|
31
|
+
- OLD <<<< ... >>>> OLD
|
|
32
|
+
- NEW <<<< ... >>>> NEW
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
file_query: String containing marked old and new content sections
|
|
36
|
+
|
|
37
|
+
Returns:
|
|
38
|
+
List of tuples (old_content, new_content) for each edit pair
|
|
39
|
+
|
|
40
|
+
Example:
|
|
41
|
+
>>> query = '''
|
|
42
|
+
... OLD <<<<
|
|
43
|
+
... Hello World
|
|
44
|
+
... >>>> OLD
|
|
45
|
+
... NEW <<<<
|
|
46
|
+
... Hello Mars
|
|
47
|
+
... >>>> NEW
|
|
48
|
+
... '''
|
|
49
|
+
>>> parse_old_new_markers(query)
|
|
50
|
+
[('Hello World', 'Hello Mars')]
|
|
51
|
+
"""
|
|
52
|
+
# Split the file content by lines
|
|
53
|
+
code_lines = file_query.split("\n")
|
|
54
|
+
|
|
55
|
+
# Initialize lists to hold the contents of OLD and NEW sections
|
|
56
|
+
old_contents = []
|
|
57
|
+
new_contents = []
|
|
58
|
+
|
|
59
|
+
# Initialize variables to track whether the current line is within an OLD or NEW section
|
|
60
|
+
in_old_section = False
|
|
61
|
+
in_new_section = False
|
|
62
|
+
|
|
63
|
+
# Temporary storage for the current section's content
|
|
64
|
+
current_section_content = []
|
|
65
|
+
|
|
66
|
+
# Iterate through each line in the file content
|
|
67
|
+
for line in code_lines:
|
|
68
|
+
# Check for OLD section start
|
|
69
|
+
if "OLD <<<" in line:
|
|
70
|
+
in_old_section = True
|
|
71
|
+
current_section_content = [] # Reset current section content
|
|
72
|
+
continue # Skip the line with the marker
|
|
73
|
+
|
|
74
|
+
# Check for OLD section end
|
|
75
|
+
if ">>>> OLD" in line:
|
|
76
|
+
in_old_section = False
|
|
77
|
+
old_contents.append("\n".join(current_section_content).strip()) # Add the captured content
|
|
78
|
+
current_section_content = [] # Reset current section content
|
|
79
|
+
continue # Skip the line with the marker
|
|
80
|
+
|
|
81
|
+
# Check for NEW section start
|
|
82
|
+
if "NEW <<<" in line:
|
|
83
|
+
in_new_section = True
|
|
84
|
+
current_section_content = [] # Reset current section content
|
|
85
|
+
continue # Skip the line with the marker
|
|
86
|
+
|
|
87
|
+
# Check for NEW section end
|
|
88
|
+
if ">>>> NEW" in line:
|
|
89
|
+
in_new_section = False
|
|
90
|
+
new_contents.append("\n".join(current_section_content).strip()) # Add the captured content
|
|
91
|
+
current_section_content = [] # Reset current section content
|
|
92
|
+
continue # Skip the line with the marker
|
|
93
|
+
|
|
94
|
+
# If currently in an OLD or NEW section, add the line to the current section content
|
|
95
|
+
if in_old_section or in_new_section:
|
|
96
|
+
current_section_content.append(line)
|
|
97
|
+
|
|
98
|
+
# Pair the OLD and NEW contents
|
|
99
|
+
paired_contents = list(zip(old_contents, new_contents))
|
|
100
|
+
|
|
101
|
+
return paired_contents
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def is_text_editable(filename: str) -> bool:
|
|
105
|
+
"""
|
|
106
|
+
Check if a file is editable as text based on its extension.
|
|
107
|
+
|
|
108
|
+
Args:
|
|
109
|
+
filename: Name or path of the file to check
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
True if file extension is in the text-editable whitelist
|
|
113
|
+
|
|
114
|
+
Example:
|
|
115
|
+
>>> is_text_editable("config.json")
|
|
116
|
+
True
|
|
117
|
+
>>> is_text_editable("image.png")
|
|
118
|
+
False
|
|
119
|
+
"""
|
|
120
|
+
from pathlib import Path
|
|
121
|
+
ext = Path(filename).suffix.lower()
|
|
122
|
+
return ext in TEXT_EDITABLE_EXTENSIONS
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def apply_line_slice(
|
|
126
|
+
content: str,
|
|
127
|
+
offset: Optional[int] = None,
|
|
128
|
+
limit: Optional[int] = None,
|
|
129
|
+
head: Optional[int] = None,
|
|
130
|
+
tail: Optional[int] = None
|
|
131
|
+
) -> str:
|
|
132
|
+
"""
|
|
133
|
+
Apply line-based slicing to text content.
|
|
134
|
+
|
|
135
|
+
Supports multiple modes:
|
|
136
|
+
- offset + limit: Read from line `offset` for `limit` lines (1-indexed)
|
|
137
|
+
- head: Read only first N lines
|
|
138
|
+
- tail: Read only last N lines
|
|
139
|
+
- No params: Return full content
|
|
140
|
+
|
|
141
|
+
Args:
|
|
142
|
+
content: Text content to slice
|
|
143
|
+
offset: Starting line number (1-indexed, inclusive)
|
|
144
|
+
limit: Number of lines to read from offset
|
|
145
|
+
head: Return only first N lines
|
|
146
|
+
tail: Return only last N lines
|
|
147
|
+
|
|
148
|
+
Returns:
|
|
149
|
+
Sliced content as string
|
|
150
|
+
|
|
151
|
+
Example:
|
|
152
|
+
>>> text = "line1\\nline2\\nline3\\nline4\\nline5"
|
|
153
|
+
>>> apply_line_slice(text, offset=2, limit=2)
|
|
154
|
+
'line2\\nline3'
|
|
155
|
+
>>> apply_line_slice(text, head=2)
|
|
156
|
+
'line1\\nline2'
|
|
157
|
+
>>> apply_line_slice(text, tail=2)
|
|
158
|
+
'line4\\nline5'
|
|
159
|
+
"""
|
|
160
|
+
if not content:
|
|
161
|
+
return content
|
|
162
|
+
|
|
163
|
+
lines = content.splitlines(keepends=True)
|
|
164
|
+
|
|
165
|
+
# Head mode: first N lines
|
|
166
|
+
if head is not None:
|
|
167
|
+
return ''.join(lines[:head])
|
|
168
|
+
|
|
169
|
+
# Tail mode: last N lines
|
|
170
|
+
if tail is not None:
|
|
171
|
+
return ''.join(lines[-tail:] if tail > 0 else lines)
|
|
172
|
+
|
|
173
|
+
# Offset + limit mode: slice from offset for limit lines
|
|
174
|
+
if offset is not None:
|
|
175
|
+
start_idx = max(0, offset - 1) # Convert 1-indexed to 0-indexed
|
|
176
|
+
if limit is not None:
|
|
177
|
+
end_idx = start_idx + limit
|
|
178
|
+
return ''.join(lines[start_idx:end_idx])
|
|
179
|
+
else:
|
|
180
|
+
return ''.join(lines[start_idx:])
|
|
181
|
+
|
|
182
|
+
# No slicing parameters: return full content
|
|
183
|
+
return content
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def search_in_content(
|
|
187
|
+
content: str,
|
|
188
|
+
pattern: str,
|
|
189
|
+
is_regex: bool = True,
|
|
190
|
+
context_lines: int = 2
|
|
191
|
+
) -> List[Dict[str, any]]:
|
|
192
|
+
"""
|
|
193
|
+
Search for pattern in content with context lines.
|
|
194
|
+
|
|
195
|
+
Args:
|
|
196
|
+
content: Text content to search
|
|
197
|
+
pattern: Search pattern (regex if is_regex=True, else literal string)
|
|
198
|
+
is_regex: Whether to treat pattern as regex (default True)
|
|
199
|
+
context_lines: Number of lines before/after match to include (default 2)
|
|
200
|
+
|
|
201
|
+
Returns:
|
|
202
|
+
List of match dictionaries with keys:
|
|
203
|
+
- line_number: 1-indexed line number of match
|
|
204
|
+
- line_content: The matching line
|
|
205
|
+
- match_text: The actual matched text
|
|
206
|
+
- context_before: List of lines before match
|
|
207
|
+
- context_after: List of lines after match
|
|
208
|
+
|
|
209
|
+
Example:
|
|
210
|
+
>>> text = "line1\\nHello World\\nline3"
|
|
211
|
+
>>> matches = search_in_content(text, "Hello", is_regex=False)
|
|
212
|
+
>>> matches[0]['line_number']
|
|
213
|
+
2
|
|
214
|
+
>>> matches[0]['match_text']
|
|
215
|
+
'Hello'
|
|
216
|
+
"""
|
|
217
|
+
if not content:
|
|
218
|
+
return []
|
|
219
|
+
|
|
220
|
+
lines = content.splitlines()
|
|
221
|
+
matches = []
|
|
222
|
+
|
|
223
|
+
# Compile regex pattern or escape for literal search
|
|
224
|
+
if is_regex:
|
|
225
|
+
try:
|
|
226
|
+
regex = re.compile(pattern, re.IGNORECASE)
|
|
227
|
+
except re.error as e:
|
|
228
|
+
logger.warning(f"Invalid regex pattern '{pattern}': {e}")
|
|
229
|
+
return []
|
|
230
|
+
else:
|
|
231
|
+
regex = re.compile(re.escape(pattern), re.IGNORECASE)
|
|
232
|
+
|
|
233
|
+
# Search each line
|
|
234
|
+
for line_idx, line in enumerate(lines):
|
|
235
|
+
match = regex.search(line)
|
|
236
|
+
if match:
|
|
237
|
+
line_number = line_idx + 1 # Convert to 1-indexed
|
|
238
|
+
|
|
239
|
+
# Get context lines
|
|
240
|
+
context_start = max(0, line_idx - context_lines)
|
|
241
|
+
context_end = min(len(lines), line_idx + context_lines + 1)
|
|
242
|
+
|
|
243
|
+
context_before = lines[context_start:line_idx]
|
|
244
|
+
context_after = lines[line_idx + 1:context_end]
|
|
245
|
+
|
|
246
|
+
matches.append({
|
|
247
|
+
'line_number': line_number,
|
|
248
|
+
'line_content': line,
|
|
249
|
+
'match_text': match.group(0),
|
|
250
|
+
'context_before': context_before,
|
|
251
|
+
'context_after': context_after,
|
|
252
|
+
})
|
|
253
|
+
|
|
254
|
+
return matches
|
|
@@ -31,8 +31,8 @@ class VectorStoreAdapter(ABC):
|
|
|
31
31
|
pass
|
|
32
32
|
|
|
33
33
|
@abstractmethod
|
|
34
|
-
def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
|
|
35
|
-
"""Clean the vectorstore collection by deleting all indexed data."""
|
|
34
|
+
def clean_collection(self, vectorstore_wrapper, index_name: str = '', including_index_meta: bool = False):
|
|
35
|
+
"""Clean the vectorstore collection by deleting all indexed data. If including_index_meta is True, skip the index_meta records."""
|
|
36
36
|
pass
|
|
37
37
|
|
|
38
38
|
@abstractmethod
|
|
@@ -132,24 +132,22 @@ class PGVectorAdapter(VectorStoreAdapter):
|
|
|
132
132
|
logger.error(f"Failed to get indexed IDs from PGVector: {str(e)}")
|
|
133
133
|
return []
|
|
134
134
|
|
|
135
|
-
def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
|
|
136
|
-
"""Clean the vectorstore collection by deleting all indexed data."""
|
|
137
|
-
# This logic deletes all data from the vectorstore collection without removal of collection.
|
|
138
|
-
# Collection itself remains available for future indexing.
|
|
135
|
+
def clean_collection(self, vectorstore_wrapper, index_name: str = '', including_index_meta: bool = False):
|
|
136
|
+
"""Clean the vectorstore collection by deleting all indexed data. If including_index_meta is True, skip the index_meta records."""
|
|
139
137
|
from sqlalchemy.orm import Session
|
|
140
138
|
from sqlalchemy import func, or_
|
|
141
|
-
|
|
142
139
|
store = vectorstore_wrapper.vectorstore
|
|
143
140
|
with Session(store.session_maker.bind) as session:
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
141
|
+
if including_index_meta:
|
|
142
|
+
session.query(store.EmbeddingStore).filter(
|
|
143
|
+
func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name
|
|
144
|
+
).delete(synchronize_session=False)
|
|
145
|
+
else:
|
|
146
|
+
session.query(store.EmbeddingStore).filter(
|
|
147
|
+
func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'collection') == index_name,
|
|
148
|
+
or_(func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'type').is_(None),
|
|
149
|
+
func.jsonb_extract_path_text(store.EmbeddingStore.cmetadata, 'type') != IndexerKeywords.INDEX_META_TYPE.value)
|
|
150
|
+
).delete(synchronize_session=False)
|
|
153
151
|
session.commit()
|
|
154
152
|
|
|
155
153
|
def is_vectorstore_type(self, vectorstore) -> bool:
|
|
@@ -340,8 +338,8 @@ class ChromaAdapter(VectorStoreAdapter):
|
|
|
340
338
|
logger.error(f"Failed to get indexed IDs from Chroma: {str(e)}")
|
|
341
339
|
return []
|
|
342
340
|
|
|
343
|
-
def clean_collection(self, vectorstore_wrapper, index_name: str = ''):
|
|
344
|
-
"""Clean the vectorstore collection by deleting all indexed data."""
|
|
341
|
+
def clean_collection(self, vectorstore_wrapper, index_name: str = '', including_index_meta: bool = False):
|
|
342
|
+
"""Clean the vectorstore collection by deleting all indexed data. including_index_meta is ignored."""
|
|
345
343
|
vectorstore_wrapper.vectorstore.delete(ids=self.get_indexed_ids(vectorstore_wrapper, index_name))
|
|
346
344
|
|
|
347
345
|
def get_indexed_data(self, vectorstore_wrapper):
|
alita_sdk/tools/xray/__init__.py
CHANGED
|
@@ -8,7 +8,7 @@ from pydantic import create_model, BaseModel, Field
|
|
|
8
8
|
from .api_wrapper import XrayApiWrapper
|
|
9
9
|
from ..base.tool import BaseAction
|
|
10
10
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
11
|
-
from ..utils import clean_string, get_max_toolkit_length
|
|
11
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
12
12
|
from ...configurations.pgvector import PgVectorConfiguration
|
|
13
13
|
from ...configurations.xray import XrayConfiguration
|
|
14
14
|
|
|
@@ -34,12 +34,10 @@ def get_tools(tool):
|
|
|
34
34
|
|
|
35
35
|
class XrayToolkit(BaseToolkit):
|
|
36
36
|
tools: List[BaseTool] = []
|
|
37
|
-
toolkit_max_length: int = 0
|
|
38
37
|
|
|
39
38
|
@staticmethod
|
|
40
39
|
def toolkit_config_schema() -> BaseModel:
|
|
41
40
|
selected_tools = {x['name']: x['args_schema'].schema() for x in XrayApiWrapper.model_construct().get_available_tools()}
|
|
42
|
-
XrayToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
43
41
|
return create_model(
|
|
44
42
|
name,
|
|
45
43
|
limit=(Optional[int], Field(description="Limit", default=100)),
|
|
@@ -56,7 +54,6 @@ class XrayToolkit(BaseToolkit):
|
|
|
56
54
|
{
|
|
57
55
|
'metadata': {
|
|
58
56
|
"label": "XRAY cloud", "icon_url": "xray.svg",
|
|
59
|
-
"max_length": XrayToolkit.toolkit_max_length,
|
|
60
57
|
"categories": ["test management"],
|
|
61
58
|
"extra_categories": ["test automation", "test case management", "test planning"]
|
|
62
59
|
}
|
|
@@ -76,18 +73,23 @@ class XrayToolkit(BaseToolkit):
|
|
|
76
73
|
**(kwargs.get('pgvector_configuration') or {}),
|
|
77
74
|
}
|
|
78
75
|
xray_api_wrapper = XrayApiWrapper(**wrapper_payload)
|
|
79
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
80
76
|
available_tools = xray_api_wrapper.get_available_tools()
|
|
81
77
|
tools = []
|
|
82
78
|
for tool in available_tools:
|
|
83
79
|
if selected_tools:
|
|
84
80
|
if tool["name"] not in selected_tools:
|
|
85
81
|
continue
|
|
82
|
+
description = tool["description"]
|
|
83
|
+
if toolkit_name:
|
|
84
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
85
|
+
description = description + "\nXray instance: " + xray_api_wrapper.base_url
|
|
86
|
+
description = description[:1000]
|
|
86
87
|
tools.append(BaseAction(
|
|
87
88
|
api_wrapper=xray_api_wrapper,
|
|
88
|
-
name=
|
|
89
|
-
description=
|
|
90
|
-
args_schema=tool["args_schema"]
|
|
89
|
+
name=tool["name"],
|
|
90
|
+
description=description,
|
|
91
|
+
args_schema=tool["args_schema"],
|
|
92
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
91
93
|
))
|
|
92
94
|
return cls(tools=tools)
|
|
93
95
|
|
|
@@ -34,7 +34,7 @@ class AlitaYagmailToolkit(BaseToolkit):
|
|
|
34
34
|
)
|
|
35
35
|
|
|
36
36
|
@classmethod
|
|
37
|
-
def get_toolkit(cls, selected_tools: list[str] | None = None, **kwargs):
|
|
37
|
+
def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
|
|
38
38
|
if selected_tools is None:
|
|
39
39
|
selected_tools = []
|
|
40
40
|
yagmail_wrapper = YagmailWrapper(**kwargs)
|
|
@@ -44,11 +44,16 @@ class AlitaYagmailToolkit(BaseToolkit):
|
|
|
44
44
|
if selected_tools:
|
|
45
45
|
if tool["name"] not in selected_tools:
|
|
46
46
|
continue
|
|
47
|
+
description = tool["description"]
|
|
48
|
+
if toolkit_name:
|
|
49
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
50
|
+
description = description[:1000]
|
|
47
51
|
tools.append(BaseAction(
|
|
48
52
|
api_wrapper=yagmail_wrapper,
|
|
49
53
|
name=tool["name"],
|
|
50
|
-
description=
|
|
51
|
-
args_schema=tool["args_schema"]
|
|
54
|
+
description=description,
|
|
55
|
+
args_schema=tool["args_schema"],
|
|
56
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
52
57
|
))
|
|
53
58
|
return cls(tools=tools)
|
|
54
59
|
|
|
@@ -8,7 +8,7 @@ from pydantic import create_model, BaseModel, Field, SecretStr
|
|
|
8
8
|
from ..base.tool import BaseAction
|
|
9
9
|
from .api_wrapper import ZephyrV1ApiWrapper
|
|
10
10
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
11
|
-
from ..utils import clean_string,
|
|
11
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
12
12
|
|
|
13
13
|
name = "zephyr"
|
|
14
14
|
|
|
@@ -23,12 +23,10 @@ def get_tools(tool):
|
|
|
23
23
|
|
|
24
24
|
class ZephyrToolkit(BaseToolkit):
|
|
25
25
|
tools: List[BaseTool] = []
|
|
26
|
-
toolkit_max_length: int = 0
|
|
27
26
|
|
|
28
27
|
@staticmethod
|
|
29
28
|
def toolkit_config_schema() -> BaseModel:
|
|
30
29
|
selected_tools = {x['name']: x['args_schema'].schema() for x in ZephyrV1ApiWrapper.model_construct().get_available_tools()}
|
|
31
|
-
ZephyrToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
32
30
|
return create_model(
|
|
33
31
|
name,
|
|
34
32
|
base_url=(str, Field(description="Base URL")),
|
|
@@ -40,7 +38,6 @@ class ZephyrToolkit(BaseToolkit):
|
|
|
40
38
|
{
|
|
41
39
|
'metadata': {
|
|
42
40
|
"label": "Zephyr", "icon_url": "zephyr.svg", "hidden": True,
|
|
43
|
-
"max_length": ZephyrToolkit.toolkit_max_length,
|
|
44
41
|
"categories": ["test management"],
|
|
45
42
|
"extra_categories": ["test automation", "test case management", "test planning"]
|
|
46
43
|
}}}
|
|
@@ -50,18 +47,22 @@ class ZephyrToolkit(BaseToolkit):
|
|
|
50
47
|
@filter_missconfigured_index_tools
|
|
51
48
|
def get_toolkit(cls, selected_tools: list[str] | None = None, toolkit_name: Optional[str] = None, **kwargs):
|
|
52
49
|
zephyr_api_wrapper = ZephyrV1ApiWrapper(**kwargs)
|
|
53
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
54
50
|
available_tools = zephyr_api_wrapper.get_available_tools()
|
|
55
51
|
tools = []
|
|
56
52
|
for tool in available_tools:
|
|
57
53
|
if selected_tools:
|
|
58
54
|
if tool["name"] not in selected_tools:
|
|
59
55
|
continue
|
|
56
|
+
description = tool["description"]
|
|
57
|
+
if toolkit_name:
|
|
58
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
59
|
+
description = description[:1000]
|
|
60
60
|
tools.append(BaseAction(
|
|
61
61
|
api_wrapper=zephyr_api_wrapper,
|
|
62
62
|
name=tool["name"],
|
|
63
|
-
description=
|
|
64
|
-
args_schema=tool["args_schema"]
|
|
63
|
+
description=description,
|
|
64
|
+
args_schema=tool["args_schema"],
|
|
65
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
65
66
|
))
|
|
66
67
|
return cls(tools=tools)
|
|
67
68
|
|
|
@@ -5,7 +5,7 @@ from typing import List, Literal, Optional
|
|
|
5
5
|
from .api_wrapper import ZephyrApiWrapper
|
|
6
6
|
from ..base.tool import BaseAction
|
|
7
7
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
8
|
-
from ..utils import clean_string, get_max_toolkit_length
|
|
8
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
9
9
|
from ...configurations.pgvector import PgVectorConfiguration
|
|
10
10
|
from ...configurations.zephyr_enterprise import ZephyrEnterpriseConfiguration
|
|
11
11
|
|
|
@@ -28,13 +28,11 @@ def get_tools(tool):
|
|
|
28
28
|
|
|
29
29
|
class ZephyrEnterpriseToolkit(BaseToolkit):
|
|
30
30
|
tools: List[BaseTool] = []
|
|
31
|
-
toolkit_max_length: int = 0
|
|
32
31
|
|
|
33
32
|
@staticmethod
|
|
34
33
|
def toolkit_config_schema() -> BaseModel:
|
|
35
34
|
selected_tools = {x['name']: x['args_schema'].schema() for x in
|
|
36
35
|
ZephyrApiWrapper.model_construct().get_available_tools()}
|
|
37
|
-
ZephyrEnterpriseToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
38
36
|
return create_model(
|
|
39
37
|
name,
|
|
40
38
|
zephyr_configuration=(ZephyrEnterpriseConfiguration, Field(description="Zephyr Configuration", json_schema_extra={'configuration_types': ['zephyr_enterprise']})),
|
|
@@ -49,7 +47,6 @@ class ZephyrEnterpriseToolkit(BaseToolkit):
|
|
|
49
47
|
__config__=ConfigDict(json_schema_extra={
|
|
50
48
|
'metadata': {
|
|
51
49
|
"label": "Zephyr Enterprise", "icon_url": "zephyr.svg",
|
|
52
|
-
"max_length": ZephyrEnterpriseToolkit.toolkit_max_length,
|
|
53
50
|
"categories": ["test management"],
|
|
54
51
|
"extra_categories": ["test automation", "test case management", "test planning"]
|
|
55
52
|
}})
|
|
@@ -68,17 +65,22 @@ class ZephyrEnterpriseToolkit(BaseToolkit):
|
|
|
68
65
|
**(kwargs.get('embedding_configuration') or {}),
|
|
69
66
|
}
|
|
70
67
|
zephyr_api_wrapper = ZephyrApiWrapper(**wrapper_payload)
|
|
71
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
72
68
|
available_tools = zephyr_api_wrapper.get_available_tools()
|
|
73
69
|
tools = []
|
|
74
70
|
for tool in available_tools:
|
|
75
71
|
if selected_tools and tool["name"] not in selected_tools:
|
|
76
72
|
continue
|
|
73
|
+
description = tool["description"]
|
|
74
|
+
if toolkit_name:
|
|
75
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
76
|
+
description = description + "\nZephyr Enterprise instance: " + zephyr_api_wrapper.base_url
|
|
77
|
+
description = description[:1000]
|
|
77
78
|
tools.append(BaseAction(
|
|
78
79
|
api_wrapper=zephyr_api_wrapper,
|
|
79
|
-
name=
|
|
80
|
-
description=
|
|
81
|
-
args_schema=tool["args_schema"]
|
|
80
|
+
name=tool["name"],
|
|
81
|
+
description=description,
|
|
82
|
+
args_schema=tool["args_schema"],
|
|
83
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
82
84
|
))
|
|
83
85
|
return cls(tools=tools)
|
|
84
86
|
|
|
@@ -6,7 +6,7 @@ from pydantic import create_model, BaseModel, Field
|
|
|
6
6
|
from .api_wrapper import ZephyrEssentialApiWrapper
|
|
7
7
|
from ..base.tool import BaseAction
|
|
8
8
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
9
|
-
from ..utils import clean_string,
|
|
9
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
10
10
|
from ...configurations.pgvector import PgVectorConfiguration
|
|
11
11
|
from ...configurations.zephyr_essential import ZephyrEssentialConfiguration
|
|
12
12
|
|
|
@@ -29,12 +29,10 @@ def get_tools(tool):
|
|
|
29
29
|
|
|
30
30
|
class ZephyrEssentialToolkit(BaseToolkit):
|
|
31
31
|
tools: List[BaseTool] = []
|
|
32
|
-
toolkit_max_length: int = 0
|
|
33
32
|
|
|
34
33
|
@staticmethod
|
|
35
34
|
def toolkit_config_schema() -> BaseModel:
|
|
36
35
|
selected_tools = {x['name']: x['args_schema'].schema() for x in ZephyrEssentialApiWrapper.model_construct().get_available_tools()}
|
|
37
|
-
ZephyrEssentialToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
38
36
|
return create_model(
|
|
39
37
|
name,
|
|
40
38
|
zephyr_essential_configuration=(ZephyrEssentialConfiguration, Field(description="Zephyr Essential Configuration", json_schema_extra={'configuration_types': ['zephyr_essential']})),
|
|
@@ -46,7 +44,6 @@ class ZephyrEssentialToolkit(BaseToolkit):
|
|
|
46
44
|
# embedder settings
|
|
47
45
|
embedding_model=(Optional[str], Field(default=None, description="Embedding configuration.", json_schema_extra={'configuration_model': 'embedding'})),
|
|
48
46
|
__config__={'json_schema_extra': {'metadata': {"label": "Zephyr Essential", "icon_url": "zephyr.svg",
|
|
49
|
-
"max_length": ZephyrEssentialToolkit.toolkit_max_length,
|
|
50
47
|
"categories": ["test management"],
|
|
51
48
|
"extra_categories": ["test automation", "test case management", "test planning"]
|
|
52
49
|
}}}
|
|
@@ -63,18 +60,22 @@ class ZephyrEssentialToolkit(BaseToolkit):
|
|
|
63
60
|
**(kwargs.get('pgvector_configuration') or {}),
|
|
64
61
|
}
|
|
65
62
|
zephyr_api_wrapper = ZephyrEssentialApiWrapper(**wrapper_payload)
|
|
66
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
67
63
|
available_tools = zephyr_api_wrapper.get_available_tools()
|
|
68
64
|
tools = []
|
|
69
65
|
for tool in available_tools:
|
|
70
66
|
if selected_tools:
|
|
71
67
|
if tool["name"] not in selected_tools:
|
|
72
68
|
continue
|
|
69
|
+
description = tool["description"]
|
|
70
|
+
if toolkit_name:
|
|
71
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
72
|
+
description = description[:1000]
|
|
73
73
|
tools.append(BaseAction(
|
|
74
74
|
api_wrapper=zephyr_api_wrapper,
|
|
75
|
-
name=
|
|
76
|
-
description=
|
|
77
|
-
args_schema=tool["args_schema"]
|
|
75
|
+
name=tool["name"],
|
|
76
|
+
description=description,
|
|
77
|
+
args_schema=tool["args_schema"],
|
|
78
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
78
79
|
))
|
|
79
80
|
return cls(tools=tools)
|
|
80
81
|
|
|
@@ -7,7 +7,7 @@ from pydantic import create_model, BaseModel, Field
|
|
|
7
7
|
from .api_wrapper import ZephyrScaleApiWrapper
|
|
8
8
|
from ..base.tool import BaseAction
|
|
9
9
|
from ..elitea_base import filter_missconfigured_index_tools
|
|
10
|
-
from ..utils import clean_string, get_max_toolkit_length
|
|
10
|
+
from ..utils import clean_string, get_max_toolkit_length
|
|
11
11
|
from ...configurations.pgvector import PgVectorConfiguration
|
|
12
12
|
from ...configurations.zephyr import ZephyrConfiguration
|
|
13
13
|
|
|
@@ -32,12 +32,10 @@ def get_tools(tool):
|
|
|
32
32
|
|
|
33
33
|
class ZephyrScaleToolkit(BaseToolkit):
|
|
34
34
|
tools: List[BaseTool] = []
|
|
35
|
-
toolkit_max_length: int = 0
|
|
36
35
|
|
|
37
36
|
@staticmethod
|
|
38
37
|
def toolkit_config_schema() -> BaseModel:
|
|
39
38
|
selected_tools = {x['name']: x['args_schema'].schema() for x in ZephyrScaleApiWrapper.model_construct().get_available_tools()}
|
|
40
|
-
ZephyrScaleToolkit.toolkit_max_length = get_max_toolkit_length(selected_tools)
|
|
41
39
|
return create_model(
|
|
42
40
|
name,
|
|
43
41
|
max_results=(int, Field(default=100, description="Results count to show")),
|
|
@@ -56,7 +54,6 @@ class ZephyrScaleToolkit(BaseToolkit):
|
|
|
56
54
|
'metadata': {
|
|
57
55
|
"label": "Zephyr Scale",
|
|
58
56
|
"icon_url": "zephyr.svg",
|
|
59
|
-
"max_length": ZephyrScaleToolkit.toolkit_max_length,
|
|
60
57
|
"categories": ["test management"],
|
|
61
58
|
"extra_categories": ["test automation", "test case management", "test planning"],
|
|
62
59
|
}
|
|
@@ -76,18 +73,22 @@ class ZephyrScaleToolkit(BaseToolkit):
|
|
|
76
73
|
**(kwargs.get('pgvector_configuration') or {}),
|
|
77
74
|
}
|
|
78
75
|
zephyr_wrapper = ZephyrScaleApiWrapper(**wrapper_payload)
|
|
79
|
-
prefix = clean_string(toolkit_name, cls.toolkit_max_length) + TOOLKIT_SPLITTER if toolkit_name else ''
|
|
80
76
|
available_tools = zephyr_wrapper.get_available_tools()
|
|
81
77
|
tools = []
|
|
82
78
|
for tool in available_tools:
|
|
83
79
|
if selected_tools:
|
|
84
80
|
if tool["name"] not in selected_tools:
|
|
85
81
|
continue
|
|
82
|
+
description = tool["description"]
|
|
83
|
+
if toolkit_name:
|
|
84
|
+
description = f"Toolkit: {toolkit_name}\n{description}"
|
|
85
|
+
description = description[:1000]
|
|
86
86
|
tools.append(BaseAction(
|
|
87
87
|
api_wrapper=zephyr_wrapper,
|
|
88
|
-
name=
|
|
89
|
-
description=
|
|
90
|
-
args_schema=tool["args_schema"]
|
|
88
|
+
name=tool["name"],
|
|
89
|
+
description=description,
|
|
90
|
+
args_schema=tool["args_schema"],
|
|
91
|
+
metadata={"toolkit_name": toolkit_name} if toolkit_name else {}
|
|
91
92
|
))
|
|
92
93
|
return cls(tools=tools)
|
|
93
94
|
|