alita-sdk 0.3.204__py3-none-any.whl → 0.3.206__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/clients/client.py +45 -5
- alita_sdk/runtime/langchain/assistant.py +22 -21
- alita_sdk/runtime/langchain/interfaces/llm_processor.py +1 -4
- alita_sdk/runtime/toolkits/application.py +5 -10
- alita_sdk/runtime/toolkits/tools.py +0 -1
- alita_sdk/runtime/tools/vectorstore.py +157 -13
- alita_sdk/runtime/utils/streamlit.py +33 -30
- alita_sdk/runtime/utils/utils.py +5 -0
- alita_sdk/tools/__init__.py +4 -0
- alita_sdk/tools/ado/repos/repos_wrapper.py +20 -13
- alita_sdk/tools/aws/__init__.py +7 -0
- alita_sdk/tools/aws/delta_lake/__init__.py +136 -0
- alita_sdk/tools/aws/delta_lake/api_wrapper.py +220 -0
- alita_sdk/tools/aws/delta_lake/schemas.py +20 -0
- alita_sdk/tools/aws/delta_lake/tool.py +35 -0
- alita_sdk/tools/bitbucket/api_wrapper.py +5 -5
- alita_sdk/tools/bitbucket/cloud_api_wrapper.py +54 -29
- alita_sdk/tools/elitea_base.py +55 -5
- alita_sdk/tools/gitlab/__init__.py +22 -10
- alita_sdk/tools/gitlab/api_wrapper.py +278 -253
- alita_sdk/tools/gitlab/tools.py +354 -376
- alita_sdk/tools/google/__init__.py +7 -0
- alita_sdk/tools/google/bigquery/__init__.py +154 -0
- alita_sdk/tools/google/bigquery/api_wrapper.py +502 -0
- alita_sdk/tools/google/bigquery/schemas.py +102 -0
- alita_sdk/tools/google/bigquery/tool.py +34 -0
- alita_sdk/tools/llm/llm_utils.py +0 -6
- alita_sdk/tools/openapi/__init__.py +14 -3
- alita_sdk/tools/sharepoint/__init__.py +2 -1
- alita_sdk/tools/sharepoint/api_wrapper.py +71 -7
- alita_sdk/tools/testrail/__init__.py +9 -1
- alita_sdk/tools/testrail/api_wrapper.py +154 -5
- alita_sdk/tools/utils/content_parser.py +77 -13
- alita_sdk/tools/zephyr_scale/api_wrapper.py +271 -22
- {alita_sdk-0.3.204.dist-info → alita_sdk-0.3.206.dist-info}/METADATA +3 -1
- {alita_sdk-0.3.204.dist-info → alita_sdk-0.3.206.dist-info}/RECORD +39 -30
- alita_sdk/runtime/llms/alita.py +0 -259
- {alita_sdk-0.3.204.dist-info → alita_sdk-0.3.206.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.204.dist-info → alita_sdk-0.3.206.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.204.dist-info → alita_sdk-0.3.206.dist-info}/top_level.txt +0 -0
@@ -1,3 +1,5 @@
|
|
1
|
+
import re
|
2
|
+
|
1
3
|
from docx import Document
|
2
4
|
from io import BytesIO
|
3
5
|
import pandas as pd
|
@@ -8,8 +10,53 @@ import io
|
|
8
10
|
import pymupdf
|
9
11
|
from langchain_core.tools import ToolException
|
10
12
|
from transformers import BlipProcessor, BlipForConditionalGeneration
|
13
|
+
from langchain_core.messages import HumanMessage
|
14
|
+
|
15
|
+
from ...runtime.langchain.tools.utils import bytes_to_base64
|
16
|
+
|
17
|
+
image_processing_prompt='''
|
18
|
+
You are an AI model designed for analyzing images. Your task is to accurately describe the content of the given image. Depending on the type of image, follow these specific instructions:
|
19
|
+
|
20
|
+
If the image is a diagram (e.g., chart, table, pie chart, bar graph, etc.):
|
21
|
+
|
22
|
+
Identify the type of diagram.
|
23
|
+
Extract all numerical values, labels, axis titles, headings, legends, and any other textual elements.
|
24
|
+
Describe the relationships or trends between the data, if visible.
|
25
|
+
If the image is a screenshot:
|
26
|
+
|
27
|
+
Describe what is shown in the screenshot.
|
28
|
+
If it is a software interface, identify the program or website name (if visible).
|
29
|
+
List the key interface elements (e.g., buttons, menus, text fields, images, headers).
|
30
|
+
If there is text, extract it.
|
31
|
+
If the screenshot shows a conversation, describe the participants, the content of the messages, and timestamps (if visible).
|
32
|
+
If the image is a photograph:
|
33
|
+
|
34
|
+
Describe the main objects, people, animals, or elements visible in the photo.
|
35
|
+
Specify the setting (e.g., indoors, outdoors, nature, urban area).
|
36
|
+
If possible, identify the actions being performed by people or objects in the photo.
|
37
|
+
If the image is an illustration or drawing:
|
11
38
|
|
12
|
-
|
39
|
+
Describe the style of the illustration (e.g., realistic, cartoonish, abstract).
|
40
|
+
Identify the main elements, their colors, and the composition of the image.
|
41
|
+
If there is text, extract it.
|
42
|
+
If the image contains text:
|
43
|
+
|
44
|
+
Extract all text from the image.
|
45
|
+
Specify the format of the text (e.g., heading, paragraph, list).
|
46
|
+
If the image is a mixed type (e.g., a diagram within a screenshot):
|
47
|
+
|
48
|
+
Identify all types of content present in the image.
|
49
|
+
Perform an analysis for each type of content separately, following the relevant instructions above.
|
50
|
+
If the image does not fit into any of the above categories:
|
51
|
+
|
52
|
+
Provide a detailed description of what is shown in the image.
|
53
|
+
Highlight any visible details that could help in understanding the image.
|
54
|
+
Be as precise and thorough as possible in your responses. If something is unclear or illegible, state that explicitly.
|
55
|
+
'''
|
56
|
+
|
57
|
+
IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'gif', 'bmp', 'tiff', 'webp', 'svg']
|
58
|
+
|
59
|
+
def parse_file_content(file_name, file_content, is_capture_image: bool = False, page_number: int = None, sheet_name: str = None, llm=None):
|
13
60
|
if file_name.endswith('.txt'):
|
14
61
|
return parse_txt(file_content)
|
15
62
|
elif file_name.endswith('.docx'):
|
@@ -17,9 +64,12 @@ def parse_file_content(file_name, file_content, is_capture_image: bool = False,
|
|
17
64
|
elif file_name.endswith('.xlsx') or file_name.endswith('.xls'):
|
18
65
|
return parse_excel(file_content, sheet_name)
|
19
66
|
elif file_name.endswith('.pdf'):
|
20
|
-
return parse_pdf(file_content, page_number, is_capture_image)
|
67
|
+
return parse_pdf(file_content, page_number, is_capture_image, llm)
|
21
68
|
elif file_name.endswith('.pptx'):
|
22
|
-
return parse_pptx(file_content, page_number, is_capture_image)
|
69
|
+
return parse_pptx(file_content, page_number, is_capture_image, llm)
|
70
|
+
elif any(file_name.lower().endswith(f".{ext}") for ext in IMAGE_EXTENSIONS):
|
71
|
+
match = re.search(r'\.([a-zA-Z0-9]+)$', file_name)
|
72
|
+
return __perform_llm_prediction_for_image(llm, file_content, match.group(1), image_processing_prompt)
|
23
73
|
else:
|
24
74
|
return ToolException(
|
25
75
|
"Not supported type of files entered. Supported types are TXT, DOCX, PDF, PPTX, XLSX and XLS only.")
|
@@ -49,28 +99,28 @@ def parse_sheet(excel_file, sheet_name):
|
|
49
99
|
df.fillna('', inplace=True)
|
50
100
|
return df.to_string()
|
51
101
|
|
52
|
-
def parse_pdf(file_content, page_number, is_capture_image):
|
102
|
+
def parse_pdf(file_content, page_number, is_capture_image, llm):
|
53
103
|
with pymupdf.open(stream=file_content, filetype="pdf") as report:
|
54
104
|
text_content = ''
|
55
105
|
if page_number is not None:
|
56
106
|
page = report.load_page(page_number - 1)
|
57
|
-
text_content += read_pdf_page(report, page, page_number, is_capture_image)
|
107
|
+
text_content += read_pdf_page(report, page, page_number, is_capture_image, llm)
|
58
108
|
else:
|
59
109
|
for index, page in enumerate(report, start=1):
|
60
|
-
text_content += read_pdf_page(report, page, index, is_capture_image)
|
110
|
+
text_content += read_pdf_page(report, page, index, is_capture_image, llm)
|
61
111
|
return text_content
|
62
112
|
|
63
|
-
def parse_pptx(file_content, page_number, is_capture_image):
|
113
|
+
def parse_pptx(file_content, page_number, is_capture_image, llm=None):
|
64
114
|
prs = Presentation(io.BytesIO(file_content))
|
65
115
|
text_content = ''
|
66
116
|
if page_number is not None:
|
67
|
-
text_content += read_pptx_slide(prs.slides[page_number - 1], page_number, is_capture_image)
|
117
|
+
text_content += read_pptx_slide(prs.slides[page_number - 1], page_number, is_capture_image, llm)
|
68
118
|
else:
|
69
119
|
for index, slide in enumerate(prs.slides, start=1):
|
70
|
-
text_content += read_pptx_slide(slide, index, is_capture_image)
|
120
|
+
text_content += read_pptx_slide(slide, index, is_capture_image, llm)
|
71
121
|
return text_content
|
72
122
|
|
73
|
-
def read_pdf_page(report, page, index, is_capture_images):
|
123
|
+
def read_pdf_page(report, page, index, is_capture_images, llm=None):
|
74
124
|
text_content = f'Page: {index}\n'
|
75
125
|
text_content += page.get_text()
|
76
126
|
if is_capture_images:
|
@@ -79,7 +129,7 @@ def read_pdf_page(report, page, index, is_capture_images):
|
|
79
129
|
xref = img[0]
|
80
130
|
base_image = report.extract_image(xref)
|
81
131
|
img_bytes = base_image["image"]
|
82
|
-
text_content +=
|
132
|
+
text_content += __perform_llm_prediction_for_image(llm, img_bytes)
|
83
133
|
return text_content
|
84
134
|
|
85
135
|
def read_docx_from_bytes(file_content):
|
@@ -94,14 +144,14 @@ def read_docx_from_bytes(file_content):
|
|
94
144
|
print(f"Error reading .docx from bytes: {e}")
|
95
145
|
return ""
|
96
146
|
|
97
|
-
def read_pptx_slide(slide, index, is_capture_image):
|
147
|
+
def read_pptx_slide(slide, index, is_capture_image, llm):
|
98
148
|
text_content = f'Slide: {index}\n'
|
99
149
|
for shape in slide.shapes:
|
100
150
|
if hasattr(shape, "text"):
|
101
151
|
text_content += shape.text + "\n"
|
102
152
|
elif is_capture_image and shape.shape_type == MSO_SHAPE_TYPE.PICTURE:
|
103
153
|
try:
|
104
|
-
caption =
|
154
|
+
caption = __perform_llm_prediction_for_image(llm, shape.image.blob)
|
105
155
|
except:
|
106
156
|
caption = "\n[Picture: unknown]\n"
|
107
157
|
text_content += caption
|
@@ -113,3 +163,17 @@ def describe_image(image):
|
|
113
163
|
inputs = processor(image, return_tensors="pt")
|
114
164
|
out = model.generate(**inputs)
|
115
165
|
return "\n[Picture: " + processor.decode(out[0], skip_special_tokens=True) + "]\n"
|
166
|
+
|
167
|
+
def __perform_llm_prediction_for_image(llm, image: bytes, image_format='png', prompt=image_processing_prompt) -> str:
|
168
|
+
base64_string = bytes_to_base64(image)
|
169
|
+
result = llm.invoke([
|
170
|
+
HumanMessage(
|
171
|
+
content=[
|
172
|
+
{"type": "text", "text": prompt},
|
173
|
+
{
|
174
|
+
"type": "image_url",
|
175
|
+
"image_url": {"url": f"data:image/{image_format};base64,{base64_string}"},
|
176
|
+
},
|
177
|
+
])
|
178
|
+
])
|
179
|
+
return f"\n[Image description: {result.content}]\n"
|
@@ -1,5 +1,6 @@
|
|
1
1
|
import json
|
2
2
|
import logging
|
3
|
+
import re
|
3
4
|
from typing import Any, Optional, List, Dict, Tuple, Union
|
4
5
|
|
5
6
|
from pydantic import model_validator, BaseModel, SecretStr
|
@@ -7,7 +8,12 @@ from langchain_core.tools import ToolException
|
|
7
8
|
from pydantic import create_model, PrivateAttr
|
8
9
|
from pydantic.fields import Field
|
9
10
|
|
10
|
-
from ..elitea_base import
|
11
|
+
from ..elitea_base import BaseVectorStoreToolApiWrapper, BaseIndexParams, extend_with_vector_tools
|
12
|
+
from langchain_core.documents import Document
|
13
|
+
try:
|
14
|
+
from alita_sdk.runtime.langchain.interfaces.llm_processor import get_embeddings
|
15
|
+
except ImportError:
|
16
|
+
from alita_sdk.langchain.interfaces.llm_processor import get_embeddings
|
11
17
|
|
12
18
|
logger = logging.getLogger(__name__)
|
13
19
|
|
@@ -164,7 +170,7 @@ ZephyrCreateTestScript = create_model(
|
|
164
170
|
|
165
171
|
ZephyrSearchTestCases = create_model(
|
166
172
|
"ZephyrSearchTestCases",
|
167
|
-
|
173
|
+
project_key=(str, Field(description="Jira project key filter")),
|
168
174
|
search_term=(Optional[str], Field(description="Optional search term to filter test cases", default=None)),
|
169
175
|
max_results=(Optional[int], Field(description="Maximum number of results to query from the API", default=1000)),
|
170
176
|
start_at=(Optional[int], Field(description="Zero-indexed starting position", default=0)),
|
@@ -242,8 +248,37 @@ ZephyrUpdateTestSteps = create_model(
|
|
242
248
|
steps_updates=(str, Field(description="JSON string representing the test steps to update. Format: [{\"index\": 0, \"description\": \"Updated step description\", \"testData\": \"Updated test data\", \"expectedResult\": \"Updated expected result\"}]"))
|
243
249
|
)
|
244
250
|
|
251
|
+
# Schema for indexing Zephyr scale data into vector store
|
252
|
+
indexData = create_model(
|
253
|
+
"indexData",
|
254
|
+
__base__=BaseIndexParams,
|
255
|
+
project_key=(str, Field(description="Jira project key filter")),
|
256
|
+
jql=(str, Field(description="""JQL-like query for searching test cases.
|
257
|
+
|
258
|
+
Supported fields:
|
259
|
+
- folder: exact folder name (e.g., folder = "Login Tests")
|
260
|
+
- folderPath: full folder path (e.g., folderPath = "Root/Subfolder")
|
261
|
+
- label: one or more labels (e.g., label in ("Smoke", "Critical"))
|
262
|
+
- text: full-text search in name/description (e.g., text ~ "login")
|
263
|
+
- customFields: JSON string with key-value pairs to filter by custom fields
|
264
|
+
- steps: search within test steps (e.g., steps ~ "click submit")
|
265
|
+
- orderBy: sort field (e.g., orderBy = "name")
|
266
|
+
- orderDirection: ASC or DESC (e.g., orderDirection = "DESC")
|
267
|
+
- limit: maximum number of results (e.g., limit = 100)
|
268
|
+
- includeSubfolders: whether to include subfolders (e.g., includeSubfolders = false)
|
269
|
+
- exactFolderMatch: match folder name exactly (e.g., exactFolderMatch = true)
|
270
|
+
|
271
|
+
Example:
|
272
|
+
'folder = "Authentication" AND label in ("Smoke", "Critical") AND text ~ "login" AND orderBy = "name" AND orderDirection = "ASC"'
|
273
|
+
""")),
|
274
|
+
progress_step=(Optional[int], Field(default=None, ge=0, le=100,
|
275
|
+
description="Optional step size for progress reporting during indexing")),
|
276
|
+
clean_index=(Optional[bool], Field(default=False,
|
277
|
+
description="Optional flag to enforce clean existing index before indexing new data")),
|
278
|
+
)
|
245
279
|
|
246
|
-
|
280
|
+
|
281
|
+
class ZephyrScaleApiWrapper(BaseVectorStoreToolApiWrapper):
|
247
282
|
# url for a Zephyr server
|
248
283
|
base_url: Optional[str] = ""
|
249
284
|
# auth with Jira token (cloud & server)
|
@@ -260,6 +295,14 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
260
295
|
_is_cloud: bool = False
|
261
296
|
_api: Any = PrivateAttr()
|
262
297
|
|
298
|
+
llm: Any = None
|
299
|
+
|
300
|
+
connection_string: Optional[SecretStr] = None
|
301
|
+
collection_name: Optional[str] = None
|
302
|
+
embedding_model: Optional[str] = "HuggingFaceEmbeddings"
|
303
|
+
embedding_model_params: Optional[Dict[str, Any]] = {"model_name": "sentence-transformers/all-MiniLM-L6-v2"}
|
304
|
+
vectorstore_type: Optional[str] = "PGVector"
|
305
|
+
|
263
306
|
class Config:
|
264
307
|
arbitrary_types_allowed = True
|
265
308
|
|
@@ -342,6 +385,8 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
342
385
|
try:
|
343
386
|
test_case_steps = self._api.test_cases.get_test_steps(test_case_key, **kwargs)
|
344
387
|
steps_list = [str(step) for step in test_case_steps]
|
388
|
+
if kwargs['return_list']:
|
389
|
+
return steps_list
|
345
390
|
all_steps_concatenated = '\n'.join(steps_list)
|
346
391
|
except Exception as e:
|
347
392
|
return ToolException(f"Unable to extract test case steps from test case with key: {test_case_key}:\n{str(e)}")
|
@@ -403,7 +448,8 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
403
448
|
maxResults: Optional[int] = 10,
|
404
449
|
startAt: Optional[int] = 0,
|
405
450
|
projectKey: Optional[str] = None,
|
406
|
-
folderType: Optional[str] = None
|
451
|
+
folderType: Optional[str] = None,
|
452
|
+
return_as_list: bool = False
|
407
453
|
):
|
408
454
|
"""Retrieves all folders. Query parameters can be used to filter the results: maxResults, startAt, projectKey, folderType"""
|
409
455
|
|
@@ -411,7 +457,7 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
411
457
|
for folder in self._api.folders.get_folders(maxResults=maxResults, startAt=startAt,
|
412
458
|
projectKey=projectKey, folderType=folderType):
|
413
459
|
folders_str.append(folder)
|
414
|
-
return f"Extracted folders: {folders_str}"
|
460
|
+
return folders_str if return_as_list else f"Extracted folders: {folders_str}"
|
415
461
|
|
416
462
|
def update_test_case(self, test_case_key: str, test_case_id: int, name: str, project_id: int, priority_id: int, status_id: int, **kwargs) -> str:
|
417
463
|
"""Updates an existing test case.
|
@@ -452,7 +498,7 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
452
498
|
except Exception as e:
|
453
499
|
return ToolException(f"Unable to update test case with key: {test_case_key}:\n{str(e)}")
|
454
500
|
|
455
|
-
def get_links(self, test_case_key: str) -> str:
|
501
|
+
def get_links(self, test_case_key: str, **kwargs) -> str:
|
456
502
|
"""Returns links for a test case with specified key
|
457
503
|
|
458
504
|
Args:
|
@@ -461,6 +507,8 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
461
507
|
|
462
508
|
try:
|
463
509
|
links = self._api.test_cases.get_links(test_case_key)
|
510
|
+
if kwargs['return_only_links']:
|
511
|
+
return links
|
464
512
|
return f"Links for test case `{test_case_key}`: {str(links)}"
|
465
513
|
except Exception as e:
|
466
514
|
return ToolException(f"Unable to get links for test case with key: {test_case_key}:\n{str(e)}")
|
@@ -495,8 +543,32 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
495
543
|
return f"Web link created for test case `{test_case_key}` with URL `{url}`: {str(web_link_response)}"
|
496
544
|
except Exception as e:
|
497
545
|
return ToolException(f"Unable to create web link for test case with key: {test_case_key}:\n{str(e)}")
|
498
|
-
|
499
|
-
def
|
546
|
+
|
547
|
+
def _get_last_version(self, test_case_key: str, step:int = 10):
|
548
|
+
max_iterations = 50
|
549
|
+
count = 0
|
550
|
+
start_at = 0
|
551
|
+
last_version = None
|
552
|
+
|
553
|
+
while count < max_iterations:
|
554
|
+
count+=1
|
555
|
+
last_versions = self.get_versions(test_case_key=test_case_key, maxResults=step, startAt=start_at, return_as_list=True)
|
556
|
+
last_versions_count = len(last_versions)
|
557
|
+
|
558
|
+
if last_versions_count == 0:
|
559
|
+
break
|
560
|
+
|
561
|
+
last_version = last_versions[-1]
|
562
|
+
start_at+=last_versions_count
|
563
|
+
|
564
|
+
if last_version:
|
565
|
+
match = re.search(r'/versions/(\d+)', last_version["self"])
|
566
|
+
version_number = match.group(1)
|
567
|
+
return self.get_version(test_case_key=test_case_key, version=version_number, return_as_object=True)
|
568
|
+
return None
|
569
|
+
|
570
|
+
|
571
|
+
def get_versions(self, test_case_key: str, maxResults: Optional[int] = 10, startAt: Optional[int] = 0, return_as_list: bool = False) -> str|list:
|
500
572
|
"""Returns all test case versions for a test case with specified key. Response is ordered by most recent first.
|
501
573
|
|
502
574
|
Args:
|
@@ -507,26 +579,30 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
507
579
|
|
508
580
|
try:
|
509
581
|
versions = self._api.test_cases.get_versions(test_case_key, maxResults=maxResults, startAt=startAt)
|
582
|
+
if return_as_list:
|
583
|
+
return [version for version in versions]
|
510
584
|
versions_list = [str(version) for version in versions]
|
511
585
|
all_versions = '\n'.join(versions_list)
|
512
586
|
return f"Versions for test case `{test_case_key}`: {all_versions}"
|
513
587
|
except Exception as e:
|
514
588
|
return ToolException(f"Unable to get versions for test case with key: {test_case_key}:\n{str(e)}")
|
515
589
|
|
516
|
-
def get_version(self, test_case_key: str, version: str) -> str:
|
590
|
+
def get_version(self, test_case_key: str, version: str, return_as_object: bool = False) -> str|dict:
|
517
591
|
"""Retrieves a specific version of a test case"""
|
518
592
|
|
519
593
|
try:
|
520
594
|
version_data = self._api.test_cases.get_version(test_case_key, version)
|
521
|
-
return f"Version {version} of test case `{test_case_key}`: {str(version_data)}"
|
595
|
+
return version_data if return_as_object else f"Version {version} of test case `{test_case_key}`: {str(version_data)}"
|
522
596
|
except Exception as e:
|
523
597
|
return ToolException(f"Unable to get version {version} for test case with key: {test_case_key}:\n{str(e)}")
|
524
598
|
|
525
|
-
def get_test_script(self, test_case_key: str) -> str:
|
599
|
+
def get_test_script(self, test_case_key: str, return_only_script:bool = False) -> str:
|
526
600
|
"""Returns the test script for the given test case"""
|
527
601
|
|
528
602
|
try:
|
529
603
|
test_script = self._api.test_cases.get_test_script(test_case_key)
|
604
|
+
if return_only_script:
|
605
|
+
return test_script
|
530
606
|
return f"Test script for test case `{test_case_key}`: {str(test_script)}"
|
531
607
|
except Exception as e:
|
532
608
|
return ToolException(f"Unable to get test script for test case with key: {test_case_key}:\n{str(e)}")
|
@@ -949,7 +1025,7 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
949
1025
|
|
950
1026
|
return result_cases, message
|
951
1027
|
|
952
|
-
def search_test_cases(self,
|
1028
|
+
def search_test_cases(self, project_key: str, search_term: Optional[str] = None,
|
953
1029
|
max_results: Optional[int] = 1000, start_at: Optional[int] = 0,
|
954
1030
|
order_by: Optional[str] = "name", order_direction: Optional[str] = "ASC",
|
955
1031
|
archived: Optional[bool] = False, fields: Optional[List[str]] = ["key", "name"],
|
@@ -957,7 +1033,8 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
957
1033
|
folder_name: Optional[str] = None, exact_folder_match: Optional[bool] = False,
|
958
1034
|
folder_path: Optional[str] = None, include_subfolders: Optional[bool] = True,
|
959
1035
|
labels: Optional[List[str]] = None, custom_fields: Optional[str] = None,
|
960
|
-
steps_search: Optional[str] = None, include_steps: Optional[bool] = False
|
1036
|
+
steps_search: Optional[str] = None, include_steps: Optional[bool] = False,
|
1037
|
+
return_raw: bool = False) -> Union[str, List[dict]]:
|
961
1038
|
"""Searches for test cases using custom search API.
|
962
1039
|
|
963
1040
|
Args:
|
@@ -987,7 +1064,7 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
987
1064
|
# If we have folder_name or folder_path, we need to get folders and build the folder hierarchy
|
988
1065
|
if folder_name or folder_path:
|
989
1066
|
# Get all folders in the project
|
990
|
-
all_folders = self._get_folders(
|
1067
|
+
all_folders = self._get_folders(project_key, "TEST_CASE", 1000)
|
991
1068
|
|
992
1069
|
# Build folder hierarchy
|
993
1070
|
folder_hierarchy = self._build_folder_hierarchy(all_folders)
|
@@ -1014,7 +1091,7 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
1014
1091
|
|
1015
1092
|
# Prepare parameters for the API call
|
1016
1093
|
params = {
|
1017
|
-
"projectKey":
|
1094
|
+
"projectKey": project_key,
|
1018
1095
|
"maxResults": max_results,
|
1019
1096
|
"startAt": start_at
|
1020
1097
|
}
|
@@ -1040,7 +1117,7 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
1040
1117
|
# If we have multiple target folder IDs, get test cases from each folder
|
1041
1118
|
if target_folder_ids and include_subfolders:
|
1042
1119
|
all_test_cases = self._get_test_cases_from_folders(
|
1043
|
-
|
1120
|
+
project_key, target_folder_ids, max_results, start_at, params
|
1044
1121
|
)
|
1045
1122
|
else:
|
1046
1123
|
# Just use the standard params (which might include a single folder_id)
|
@@ -1077,14 +1154,179 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
1077
1154
|
"custom fields": custom_fields,
|
1078
1155
|
"steps containing": steps_search
|
1079
1156
|
}
|
1080
|
-
|
1081
|
-
|
1082
|
-
|
1083
|
-
return
|
1084
|
-
|
1157
|
+
|
1158
|
+
if return_raw:
|
1159
|
+
return filtered_cases
|
1160
|
+
return self._finalize_test_case_results(filtered_cases, fields, search_criteria)
|
1161
|
+
|
1085
1162
|
except Exception as e:
|
1086
1163
|
return ToolException(f"Error searching test cases: {str(e)}")
|
1087
1164
|
|
1165
|
+
def _finalize_test_case_results(self, filtered_cases: List[dict], fields: List[str], search_criteria: dict) -> str:
|
1166
|
+
result_cases, message = self._format_test_case_results(filtered_cases, fields, search_criteria)
|
1167
|
+
return f"{message}: {json.dumps(result_cases, indent=2)}"
|
1168
|
+
|
1169
|
+
def _search_test_cases_by_jql(self, project_key: str, jql: str):
|
1170
|
+
try:
|
1171
|
+
parsed = self._parse_jql(jql)
|
1172
|
+
|
1173
|
+
return self.search_test_cases(
|
1174
|
+
project_key=project_key,
|
1175
|
+
search_term=parsed.get("text"),
|
1176
|
+
order_by=parsed.get("orderBy", "name"),
|
1177
|
+
order_direction=parsed.get("orderDirection", "ASC"),
|
1178
|
+
limit_results=parsed.get("limit"),
|
1179
|
+
folder_name=parsed.get("folder"),
|
1180
|
+
folder_path=parsed.get("folderPath"),
|
1181
|
+
exact_folder_match=parsed.get("exactFolderMatch", False),
|
1182
|
+
include_subfolders=parsed.get("includeSubfolders", True),
|
1183
|
+
labels=parsed.get("label"),
|
1184
|
+
custom_fields=parsed.get("customFields"),
|
1185
|
+
steps_search=parsed.get("steps"),
|
1186
|
+
include_steps=True,
|
1187
|
+
return_raw=True
|
1188
|
+
)
|
1189
|
+
except Exception as e:
|
1190
|
+
return ToolException(f"Error searching test cases by JQL: {str(e)}")
|
1191
|
+
|
1192
|
+
def _parse_jql(self, jql: str) -> dict:
|
1193
|
+
import re
|
1194
|
+
result = {}
|
1195
|
+
|
1196
|
+
# Match string equality: field = "value"
|
1197
|
+
for match in re.findall(r'(\w+)\s*=\s*"([^"]*)"', jql):
|
1198
|
+
result[match[0]] = match[1]
|
1199
|
+
|
1200
|
+
# Match text search: field ~ "value"
|
1201
|
+
for match in re.findall(r'(\w+)\s*~\s*"([^"]*)"', jql):
|
1202
|
+
result[match[0]] = match[1]
|
1203
|
+
|
1204
|
+
# Match list: field in ("a", "b", ...)
|
1205
|
+
for match in re.findall(r'(\w+)\s*in\s*\(\s*([^)]+?)\s*\)', jql):
|
1206
|
+
values = [v.strip().strip('"') for v in match[1].split(",")]
|
1207
|
+
result[match[0]] = values
|
1208
|
+
|
1209
|
+
# Match number/bool: field = 123 or field = true/false
|
1210
|
+
for match in re.findall(r'(\w+)\s*=\s*([^\s"()]+)', jql):
|
1211
|
+
key, raw_val = match
|
1212
|
+
if key in result:
|
1213
|
+
continue
|
1214
|
+
if raw_val.lower() == "true":
|
1215
|
+
result[key] = True
|
1216
|
+
elif raw_val.lower() == "false":
|
1217
|
+
result[key] = False
|
1218
|
+
elif raw_val.isdigit():
|
1219
|
+
result[key] = int(raw_val)
|
1220
|
+
else:
|
1221
|
+
try:
|
1222
|
+
result[key] = float(raw_val)
|
1223
|
+
except ValueError:
|
1224
|
+
result[key] = raw_val
|
1225
|
+
|
1226
|
+
return result
|
1227
|
+
|
1228
|
+
def index_data(self, project_key: str,
|
1229
|
+
jql: str,
|
1230
|
+
collection_suffix: str = '',
|
1231
|
+
progress_step: int = None,
|
1232
|
+
clean_index: bool = False) -> str:
|
1233
|
+
"""
|
1234
|
+
Search test cases using a JQL-like query with explicit project_key.
|
1235
|
+
|
1236
|
+
Example:
|
1237
|
+
jql = 'folder = "Authentication" AND label in ("Smoke", "Critical") AND text ~ "login"'
|
1238
|
+
"""
|
1239
|
+
docs = self._base_loader(project_key, jql)
|
1240
|
+
embedding = get_embeddings(self.embedding_model, self.embedding_model_params)
|
1241
|
+
vs = self._init_vector_store(collection_suffix, embeddings=embedding)
|
1242
|
+
return vs.index_documents(docs, progress_step=progress_step, clean_index=clean_index)
|
1243
|
+
|
1244
|
+
def _base_loader(self, project_key: str, jql: str):
|
1245
|
+
test_cases_docs = self._get_test_cases_docs(project_key, jql)
|
1246
|
+
folders_docs = self._get_folders_docs(project_key)
|
1247
|
+
return test_cases_docs + folders_docs
|
1248
|
+
|
1249
|
+
def _get_all_folders(self, project_key: str, folder_type:str, step: int = 10):
|
1250
|
+
max_iterations = 50
|
1251
|
+
count = 0
|
1252
|
+
all_folders = []
|
1253
|
+
start_at = 0
|
1254
|
+
while count < max_iterations:
|
1255
|
+
count+=1
|
1256
|
+
new_folders = self.get_folders(projectKey=project_key, folderType=folder_type, maxResults=step, startAt=start_at, return_as_list=True)
|
1257
|
+
|
1258
|
+
if new_folders and len(new_folders) > 0:
|
1259
|
+
all_folders.extend(new_folders)
|
1260
|
+
start_at+=step
|
1261
|
+
else:
|
1262
|
+
break
|
1263
|
+
return all_folders
|
1264
|
+
|
1265
|
+
def _get_folders_docs(self, project_key: str):
|
1266
|
+
folder_types = ['TEST_CASE', 'TEST_PLAN', 'TEST_CYCLE']
|
1267
|
+
folders = []
|
1268
|
+
for folder_type in folder_types:
|
1269
|
+
try:
|
1270
|
+
folders.extend(self._get_all_folders(project_key, folder_type=folder_type, step=100))
|
1271
|
+
except Exception as e:
|
1272
|
+
raise ToolException(f"Unable to extract folders for folder type '{folder_type}': {e}")
|
1273
|
+
page_content = {}
|
1274
|
+
docs: List[Document] = []
|
1275
|
+
for folder in folders:
|
1276
|
+
page_content['name'] = folder['name']
|
1277
|
+
metadata = {}
|
1278
|
+
for key, value in folder.items():
|
1279
|
+
if value is not None:
|
1280
|
+
metadata[key] = value
|
1281
|
+
page_content['type'] = "FOLDER"
|
1282
|
+
docs.append(Document(page_content=json.dumps(page_content), metadata=metadata))
|
1283
|
+
return docs
|
1284
|
+
|
1285
|
+
def _get_test_cases_docs(self, project_key: str, jql: str):
|
1286
|
+
try:
|
1287
|
+
test_cases = self._search_test_cases_by_jql(project_key, jql)
|
1288
|
+
except Exception as e:
|
1289
|
+
raise ToolException(f"Unable to extract test cases: {e}")
|
1290
|
+
|
1291
|
+
docs: List[Document] = []
|
1292
|
+
for case in test_cases:
|
1293
|
+
last_version = self._get_last_version(case['key'], step=100)
|
1294
|
+
metadata = {
|
1295
|
+
k: v for k, v in case.items()
|
1296
|
+
if isinstance(v, (str, int, float, bool, list, dict))
|
1297
|
+
}
|
1298
|
+
if last_version and isinstance(last_version, dict) and 'createdOn' in last_version:
|
1299
|
+
metadata['updated_at'] = last_version['createdOn']
|
1300
|
+
else:
|
1301
|
+
metadata['updated_at'] = case['createdOn']
|
1302
|
+
|
1303
|
+
case['type'] = "TEST_CASE"
|
1304
|
+
|
1305
|
+
docs.append(Document(page_content=json.dumps(case), metadata=metadata))
|
1306
|
+
return docs
|
1307
|
+
|
1308
|
+
def _process_document(self, document: Document) -> Document:
|
1309
|
+
try:
|
1310
|
+
base_data = json.loads(document.page_content)
|
1311
|
+
|
1312
|
+
if base_data['type'] and base_data['type'] == "TEST_CASE":
|
1313
|
+
additional_content = self._process_test_case(base_data)
|
1314
|
+
base_data['test_case_content'] = additional_content
|
1315
|
+
|
1316
|
+
document.page_content = json.dumps(base_data)
|
1317
|
+
return document
|
1318
|
+
except json.JSONDecodeError as e:
|
1319
|
+
raise ToolException(f"Failed to decode JSON from document: {e}")
|
1320
|
+
|
1321
|
+
def _process_test_case(self, case):
|
1322
|
+
steps = self.get_test_steps(case['key'], return_list=True)
|
1323
|
+
script = self.get_test_script(case['key'], return_only_script=True)
|
1324
|
+
additional_content = {
|
1325
|
+
"steps": "" if isinstance(steps, ToolException) else steps,
|
1326
|
+
"script": "" if isinstance(script, ToolException) else script,
|
1327
|
+
}
|
1328
|
+
return additional_content
|
1329
|
+
|
1088
1330
|
def get_tests_recursive(self, project_key: str = None, folder_id: str = None, maxResults: Optional[int] = 100, startAt: Optional[int] = 0):
|
1089
1331
|
"""Retrieves all test cases recursively from a folder and all its subfolders.
|
1090
1332
|
|
@@ -1341,7 +1583,8 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
1341
1583
|
return f"Test steps updated for test case `{test_case_key}`: {len(updates)} step(s) modified"
|
1342
1584
|
except Exception as e:
|
1343
1585
|
return ToolException(f"Error updating test steps for test case {test_case_key}: {str(e)}")
|
1344
|
-
|
1586
|
+
|
1587
|
+
@extend_with_vector_tools
|
1345
1588
|
def get_available_tools(self):
|
1346
1589
|
return [
|
1347
1590
|
{
|
@@ -1463,5 +1706,11 @@ class ZephyrScaleApiWrapper(BaseToolApiWrapper):
|
|
1463
1706
|
"description": self.get_tests_by_folder_path.__doc__,
|
1464
1707
|
"args_schema": ZephyrGetTestsByFolderPath,
|
1465
1708
|
"ref": self.get_tests_by_folder_path,
|
1709
|
+
},
|
1710
|
+
{
|
1711
|
+
"name": "index_data",
|
1712
|
+
"ref": self.index_data,
|
1713
|
+
"description": self.index_data.__doc__,
|
1714
|
+
"args_schema": indexData,
|
1466
1715
|
}
|
1467
1716
|
]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: alita_sdk
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.206
|
4
4
|
Summary: SDK for building langchain agents using resources from Alita
|
5
5
|
Author-email: Artem Rozumenko <artyom.rozumenko@gmail.com>, Mikalai Biazruchka <mikalai_biazruchka@epam.com>, Roman Mitusov <roman_mitusov@epam.com>, Ivan Krakhmaliuk <lifedjik@gmail.com>, Artem Dubrovskiy <ad13box@gmail.com>
|
6
6
|
License-Expression: Apache-2.0
|
@@ -125,6 +125,8 @@ Requires-Dist: yarl==1.17.1; extra == "tools"
|
|
125
125
|
Requires-Dist: langmem==0.0.27; extra == "tools"
|
126
126
|
Requires-Dist: textract-py3==2.1.1; extra == "tools"
|
127
127
|
Requires-Dist: slack_sdk==3.35.0; extra == "tools"
|
128
|
+
Requires-Dist: deltalake==1.0.2; extra == "tools"
|
129
|
+
Requires-Dist: google_cloud_bigquery==3.34.0; extra == "tools"
|
128
130
|
Provides-Extra: community
|
129
131
|
Requires-Dist: retry-extended==0.2.3; extra == "community"
|
130
132
|
Requires-Dist: pyobjtojson==0.3; extra == "community"
|