alita-sdk 0.3.162__py3-none-any.whl → 0.3.164__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/langchain/assistant.py +2 -2
- alita_sdk/runtime/langchain/store_manager.py +22 -1
- alita_sdk/runtime/toolkits/tools.py +1 -1
- alita_sdk/tools/__init__.py +7 -1
- alita_sdk/tools/carrier/api_wrapper.py +76 -4
- alita_sdk/tools/carrier/backend_reports_tool.py +31 -12
- alita_sdk/tools/carrier/backend_tests_tool.py +14 -8
- alita_sdk/tools/carrier/cancel_ui_test_tool.py +178 -0
- alita_sdk/tools/carrier/carrier_sdk.py +99 -15
- alita_sdk/tools/carrier/create_ui_excel_report_tool.py +473 -0
- alita_sdk/tools/carrier/create_ui_test_tool.py +199 -0
- alita_sdk/tools/carrier/lighthouse_excel_reporter.py +155 -0
- alita_sdk/tools/carrier/run_ui_test_tool.py +394 -0
- alita_sdk/tools/carrier/tools.py +11 -1
- alita_sdk/tools/carrier/ui_reports_tool.py +6 -2
- alita_sdk/tools/carrier/update_ui_test_schedule_tool.py +278 -0
- alita_sdk/tools/memory/__init__.py +7 -0
- alita_sdk/tools/postman/__init__.py +7 -0
- alita_sdk/tools/postman/api_wrapper.py +335 -0
- alita_sdk/tools/zephyr_squad/__init__.py +62 -0
- alita_sdk/tools/zephyr_squad/api_wrapper.py +135 -0
- alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py +79 -0
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/METADATA +4 -3
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/RECORD +27 -18
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/top_level.txt +0 -0
@@ -61,8 +61,8 @@ class Assistant:
|
|
61
61
|
"Review toolkits configuration or use pipeline as master agent.")
|
62
62
|
|
63
63
|
# configure memory store if memory tool is defined
|
64
|
-
|
65
|
-
|
64
|
+
memory_tool = next((tool for tool in data['tools'] if tool['type'] == 'memory'), None)
|
65
|
+
self._configure_store(memory_tool)
|
66
66
|
|
67
67
|
# Lazy import to avoid circular dependency
|
68
68
|
from ..toolkits.tools import get_tools
|
@@ -1,6 +1,8 @@
|
|
1
1
|
import threading
|
2
2
|
import atexit
|
3
3
|
import logging
|
4
|
+
from urllib.parse import urlparse, unquote
|
5
|
+
|
4
6
|
from psycopg import Connection
|
5
7
|
from langgraph.store.postgres import PostgresStore
|
6
8
|
|
@@ -18,11 +20,30 @@ class StoreManager:
|
|
18
20
|
cls._instance._stores = {}
|
19
21
|
return cls._instance
|
20
22
|
|
23
|
+
def _parse_connection_string(self, conn_str: str) -> dict:
|
24
|
+
"""
|
25
|
+
Parse the connection string from SQLAlchemy style to args dict.
|
26
|
+
"""
|
27
|
+
if conn_str.startswith("postgresql+psycopg://"):
|
28
|
+
url = conn_str[len("postgresql+psycopg://"):]
|
29
|
+
|
30
|
+
parsed = urlparse(f"//{url}")
|
31
|
+
|
32
|
+
return {
|
33
|
+
"user": unquote(parsed.username) if parsed.username else None,
|
34
|
+
"password": unquote(parsed.password) if parsed.password else None,
|
35
|
+
"host": parsed.hostname,
|
36
|
+
"port": parsed.port,
|
37
|
+
"dbname": parsed.path.lstrip("/") if parsed.path else None
|
38
|
+
}
|
39
|
+
|
21
40
|
def get_store(self, conn_str: str) -> PostgresStore:
|
22
41
|
store = self._stores.get(conn_str)
|
23
42
|
if store is None:
|
24
43
|
logger.info(f"Creating new PostgresStore for connection: {conn_str}")
|
25
|
-
|
44
|
+
conn_params = self._parse_connection_string(conn_str)
|
45
|
+
conn_params.update({'autocommit': True, 'prepare_threshold': 0})
|
46
|
+
conn = Connection.connect(**conn_params)
|
26
47
|
store = PostgresStore(conn)
|
27
48
|
store.setup()
|
28
49
|
self._stores[conn_str] = store
|
@@ -95,7 +95,7 @@ def get_tools(tools_list: list, alita_client, llm, memory_store: BaseStore = Non
|
|
95
95
|
# Add community tools
|
96
96
|
tools += community_tools(tools_list, alita_client, llm)
|
97
97
|
# Add alita tools
|
98
|
-
tools += alita_tools(tools_list, alita_client, llm)
|
98
|
+
tools += alita_tools(tools_list, alita_client, llm, memory_store)
|
99
99
|
# Add MCP tools
|
100
100
|
tools += _mcp_tools(tools_list, alita_client)
|
101
101
|
|
alita_sdk/tools/__init__.py
CHANGED
@@ -1,5 +1,8 @@
|
|
1
1
|
import logging
|
2
2
|
from importlib import import_module
|
3
|
+
from typing import Optional
|
4
|
+
|
5
|
+
from langgraph.store.base import BaseStore
|
3
6
|
|
4
7
|
logger = logging.getLogger(__name__)
|
5
8
|
|
@@ -74,13 +77,15 @@ _safe_import_tool('carrier', 'carrier', 'get_tools', 'AlitaCarrierToolkit')
|
|
74
77
|
_safe_import_tool('ocr', 'ocr', 'get_tools', 'OCRToolkit')
|
75
78
|
_safe_import_tool('pptx', 'pptx', 'get_tools', 'PPTXToolkit')
|
76
79
|
_safe_import_tool('postman', 'postman', 'get_tools', 'PostmanToolkit')
|
80
|
+
_safe_import_tool('memory', 'memory', 'get_tools', 'MemoryToolkit')
|
81
|
+
_safe_import_tool('zephyr_squad', 'zephyr_squad', 'get_tools', 'ZephyrSquadToolkit')
|
77
82
|
|
78
83
|
# Log import summary
|
79
84
|
available_count = len(AVAILABLE_TOOLS)
|
80
85
|
total_attempted = len(AVAILABLE_TOOLS) + len(FAILED_IMPORTS)
|
81
86
|
logger.info(f"Tool imports completed: {available_count}/{total_attempted} successful")
|
82
87
|
|
83
|
-
def get_tools(tools_list, alita, llm, *args, **kwargs):
|
88
|
+
def get_tools(tools_list, alita, llm, store: Optional[BaseStore] = None, *args, **kwargs):
|
84
89
|
tools = []
|
85
90
|
for tool in tools_list:
|
86
91
|
# validate tool name syntax - it cannot be started with _
|
@@ -90,6 +95,7 @@ def get_tools(tools_list, alita, llm, *args, **kwargs):
|
|
90
95
|
|
91
96
|
tool['settings']['alita'] = alita
|
92
97
|
tool['settings']['llm'] = llm
|
98
|
+
tool['settings']['store'] = store
|
93
99
|
tool_type = tool['type']
|
94
100
|
|
95
101
|
# Check if tool is available and has get_tools function
|
@@ -67,6 +67,10 @@ class CarrierAPIWrapper(BaseModel):
|
|
67
67
|
def run_test(self, test_id: str, json_body):
|
68
68
|
return self._client.run_test(test_id, json_body)
|
69
69
|
|
70
|
+
def run_ui_test(self, test_id: str, json_body):
|
71
|
+
"""Run a UI test with the given test ID and JSON body."""
|
72
|
+
return self._client.run_ui_test(test_id, json_body)
|
73
|
+
|
70
74
|
def get_engagements_list(self) -> List[Dict[str, Any]]:
|
71
75
|
return self._client.get_engagements_list()
|
72
76
|
|
@@ -76,17 +80,21 @@ class CarrierAPIWrapper(BaseModel):
|
|
76
80
|
def get_report_file_log(self, bucket: str, file_name: str):
|
77
81
|
return self._client.get_report_file_log(bucket, file_name)
|
78
82
|
|
79
|
-
def
|
80
|
-
return self._client.
|
83
|
+
def upload_file(self, bucket_name: str, file_name: str):
|
84
|
+
return self._client.upload_file(bucket_name, file_name)
|
81
85
|
|
82
86
|
def get_ui_reports_list(self) -> List[Dict[str, Any]]:
|
83
87
|
"""Get list of UI test reports from the Carrier platform."""
|
84
|
-
return self._client.
|
88
|
+
return self._client.get_ui_reports_list()
|
85
89
|
|
86
90
|
def get_ui_tests_list(self) -> List[Dict[str, Any]]:
|
87
91
|
"""Get list of UI tests from the Carrier platform."""
|
88
92
|
return self._client.get_ui_tests_list()
|
89
93
|
|
94
|
+
def get_locations(self) -> Dict[str, Any]:
|
95
|
+
"""Get list of available locations/cloud settings from the Carrier platform."""
|
96
|
+
return self._client.get_locations()
|
97
|
+
|
90
98
|
def get_ui_report_links(self, uid: str) -> list:
|
91
99
|
"""Get all unique file_names for a given UI report UID, ending with .html, without #index=*, and only unique values."""
|
92
100
|
endpoint = f"api/v1/ui_performance/results/{self.project_id}/{uid}?sort=loop&order=asc"
|
@@ -120,4 +128,68 @@ class CarrierAPIWrapper(BaseModel):
|
|
120
128
|
return [prefix + name for name in sorted_names]
|
121
129
|
except Exception as e:
|
122
130
|
logger.error(f"Failed to fetch UI report links: {e}")
|
123
|
-
return []
|
131
|
+
return []
|
132
|
+
|
133
|
+
def update_ui_test(self, test_id: str, json_body) -> Dict[str, Any]:
|
134
|
+
"""Update UI test configuration and schedule."""
|
135
|
+
return self._client.update_ui_test(test_id, json_body)
|
136
|
+
|
137
|
+
def get_ui_test_details(self, test_id: str) -> Dict[str, Any]:
|
138
|
+
"""Get detailed UI test configuration by test ID."""
|
139
|
+
return self._client.get_ui_test_details(test_id)
|
140
|
+
|
141
|
+
def create_ui_test(self, json_body: Dict[str, Any]) -> Dict[str, Any]:
|
142
|
+
"""Create a new UI test."""
|
143
|
+
return self._client.create_ui_test(json_body)
|
144
|
+
|
145
|
+
def cancel_ui_test(self, test_id: str) -> Dict[str, Any]:
|
146
|
+
"""Cancel a UI test by setting its status to Canceled."""
|
147
|
+
return self._client.cancel_ui_test(test_id)
|
148
|
+
|
149
|
+
def get_ui_report_json_files(self, uid: str) -> list:
|
150
|
+
"""Get all JSON file names for a given UI report UID for Excel processing."""
|
151
|
+
endpoint = f"api/v1/ui_performance/results/{self.project_id}/{uid}?sort=loop&order=asc"
|
152
|
+
try:
|
153
|
+
response = self._client.request('get', endpoint)
|
154
|
+
file_names = set()
|
155
|
+
|
156
|
+
def clean_file_name(file_name):
|
157
|
+
# Extract JSON files only and clean the names
|
158
|
+
if file_name.endswith('.json'):
|
159
|
+
return file_name
|
160
|
+
return None
|
161
|
+
|
162
|
+
# If the response is a dict with lists as values, flatten all file_names from all values
|
163
|
+
if isinstance(response, dict):
|
164
|
+
for value in response.values():
|
165
|
+
if isinstance(value, list):
|
166
|
+
for item in value:
|
167
|
+
file_name = item.get("file_name")
|
168
|
+
if file_name:
|
169
|
+
clean_name = clean_file_name(file_name)
|
170
|
+
if clean_name:
|
171
|
+
file_names.add(clean_name)
|
172
|
+
elif isinstance(response, list):
|
173
|
+
for item in response:
|
174
|
+
file_name = item.get("file_name")
|
175
|
+
if file_name:
|
176
|
+
clean_name = clean_file_name(file_name)
|
177
|
+
if clean_name:
|
178
|
+
file_names.add(clean_name)
|
179
|
+
|
180
|
+
sorted_names = sorted(file_names)
|
181
|
+
prefix = f"https://platform.getcarrier.io/api/v1/artifacts/artifact/default/{self.project_id}/reports/"
|
182
|
+
return [prefix + name for name in sorted_names]
|
183
|
+
except Exception as e:
|
184
|
+
logger.error(f"Failed to fetch UI report JSON files: {e}")
|
185
|
+
return []
|
186
|
+
|
187
|
+
def download_ui_report_json(self, bucket: str, file_name: str) -> str:
|
188
|
+
"""Download UI report JSON file content."""
|
189
|
+
endpoint = f"api/v1/artifacts/artifact/default/{self.project_id}/{bucket}/{file_name}"
|
190
|
+
try:
|
191
|
+
response = self._client.request('get', endpoint)
|
192
|
+
return response
|
193
|
+
except Exception as e:
|
194
|
+
logger.error(f"Failed to download UI report JSON: {e}")
|
195
|
+
return None
|
@@ -1,6 +1,8 @@
|
|
1
1
|
import logging
|
2
2
|
from datetime import datetime
|
3
3
|
import json
|
4
|
+
import zipfile
|
5
|
+
from itertools import islice
|
4
6
|
import traceback
|
5
7
|
from typing import Type
|
6
8
|
from langchain_core.tools import BaseTool, ToolException
|
@@ -78,14 +80,31 @@ class GetReportByIDTool(BaseTool):
|
|
78
80
|
|
79
81
|
def _run(self, report_id: str):
|
80
82
|
try:
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
83
|
+
report, test_log_file_path, errors_log_file_path = self.api_wrapper.get_report_file_name(report_id)
|
84
|
+
try:
|
85
|
+
with open(errors_log_file_path, mode='r') as f:
|
86
|
+
# Use islice to read up to 100 lines
|
87
|
+
errors = list(islice(f, 100))
|
88
|
+
report["errors_log"] = errors
|
89
|
+
# Archive with errors log file path
|
90
|
+
zip_file_path = f'/tmp/{report["build_id"]}_error_log_archive.zip'
|
91
|
+
|
92
|
+
# Create zip archive
|
93
|
+
with zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED) as zipf:
|
94
|
+
arcname = os.path.basename(errors_log_file_path)
|
95
|
+
zipf.write(errors_log_file_path, arcname)
|
96
|
+
|
97
|
+
bucket_name = report["name"].replace("_", "").replace(" ", "").lower()
|
98
|
+
self.api_wrapper.upload_file(bucket_name, zip_file_path)
|
99
|
+
report["link_to_errors_file"] = f"{self.api_wrapper.url.rstrip('/')}/api/v1/artifacts/artifact/default/" \
|
100
|
+
f"{self.api_wrapper.project_id}/{bucket_name}/" \
|
101
|
+
f"{zip_file_path.replace('/tmp/', '')}"
|
102
|
+
except Exception as e:
|
103
|
+
logger.error(e)
|
104
|
+
report["errors_log"] = []
|
105
|
+
report["link_to_errors_file"] = "link is not available"
|
106
|
+
|
107
|
+
return json.dumps(report)
|
89
108
|
except Exception:
|
90
109
|
stacktrace = traceback.format_exc()
|
91
110
|
logger.error(f"Error downloading reports: {stacktrace}")
|
@@ -164,17 +183,17 @@ class CreateExcelReportTool(BaseTool):
|
|
164
183
|
|
165
184
|
def _process_report_by_id(self, report_id, parameters):
|
166
185
|
"""Process report using report ID."""
|
167
|
-
report,
|
186
|
+
report, test_log_file_path, errors_log_file_path = self.api_wrapper.get_report_file_name(report_id)
|
168
187
|
carrier_report = f"{self.api_wrapper.url.rstrip('/')}/-/performance/backend/results?result_id={report_id}"
|
169
188
|
lg_type = report.get("lg_type")
|
170
189
|
excel_report_file_name = f'/tmp/reports_test_results_{report["build_id"]}_excel_report.xlsx'
|
171
190
|
bucket_name = report["name"].replace("_", "").replace(" ", "").lower()
|
172
191
|
|
173
|
-
result_stats_j = self._parse_report(
|
192
|
+
result_stats_j = self._parse_report(test_log_file_path, lg_type, parameters["think_time"], is_absolute_file_path=True)
|
174
193
|
calc_thr_j = self._calculate_thresholds(result_stats_j, parameters)
|
175
194
|
|
176
195
|
return self._generate_and_upload_report(
|
177
|
-
result_stats_j, carrier_report, calc_thr_j, parameters, excel_report_file_name, bucket_name,
|
196
|
+
result_stats_j, carrier_report, calc_thr_j, parameters, excel_report_file_name, bucket_name, test_log_file_path
|
178
197
|
)
|
179
198
|
|
180
199
|
def _process_report_by_file(self, bucket, file_name, parameters):
|
@@ -233,7 +252,7 @@ class CreateExcelReportTool(BaseTool):
|
|
233
252
|
excel_reporter_object.prepare_headers_and_titles()
|
234
253
|
excel_reporter_object.write_to_excel(result_stats_j, carrier_report, calc_thr_j, parameters["pct"])
|
235
254
|
|
236
|
-
self.api_wrapper.
|
255
|
+
self.api_wrapper.upload_file(bucket_name, excel_report_file_name)
|
237
256
|
|
238
257
|
# Clean up
|
239
258
|
self._cleanup(file_path, excel_report_file_name)
|
@@ -80,22 +80,28 @@ class RunTestByIDTool(BaseTool):
|
|
80
80
|
description: str = "Execute test plan from the Carrier platform."
|
81
81
|
args_schema: Type[BaseModel] = create_model(
|
82
82
|
"RunTestByIdInput",
|
83
|
-
test_id=(str, Field(default=
|
83
|
+
test_id=(str, Field(default=None, description="Test id to execute")),
|
84
|
+
name=(str, Field(default=None, description="Test name to execute")),
|
84
85
|
test_parameters=(dict, Field(default=None, description="Test parameters to override")),
|
85
86
|
)
|
86
87
|
|
87
|
-
def _run(self, test_id
|
88
|
+
def _run(self, test_id=None, name=None, test_parameters=None):
|
88
89
|
try:
|
90
|
+
if not test_id and not name:
|
91
|
+
return {"message": "Please provide test id or test name to start"}
|
92
|
+
|
89
93
|
# Fetch test data
|
90
94
|
tests = self.api_wrapper.get_tests_list()
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
95
|
+
|
96
|
+
# Find the test data based on test_id or name
|
97
|
+
test_data = next(
|
98
|
+
(test for test in tests if
|
99
|
+
(test_id and str(test["id"]) == test_id) or (name and str(test["name"]) == name)),
|
100
|
+
None
|
101
|
+
)
|
96
102
|
|
97
103
|
if not test_data:
|
98
|
-
raise ValueError(f"Test with id {test_id} not found.")
|
104
|
+
raise ValueError(f"Test with id {test_id} or name {name} not found.")
|
99
105
|
|
100
106
|
# Default test parameters
|
101
107
|
default_test_parameters = test_data.get("test_parameters", [])
|
@@ -0,0 +1,178 @@
|
|
1
|
+
import logging
|
2
|
+
import re
|
3
|
+
import traceback
|
4
|
+
from typing import Type
|
5
|
+
from langchain_core.tools import BaseTool, ToolException
|
6
|
+
from pydantic.fields import Field
|
7
|
+
from pydantic import create_model, BaseModel
|
8
|
+
from .api_wrapper import CarrierAPIWrapper
|
9
|
+
|
10
|
+
logger = logging.getLogger(__name__)
|
11
|
+
|
12
|
+
|
13
|
+
class CancelUITestTool(BaseTool):
|
14
|
+
api_wrapper: CarrierAPIWrapper = Field(..., description="Carrier API Wrapper instance")
|
15
|
+
name: str = "cancel_ui_test"
|
16
|
+
description: str = "Cancel a UI test or show available tests to cancel in the Carrier platform."
|
17
|
+
args_schema: Type[BaseModel] = create_model(
|
18
|
+
"CancelUITestInput",
|
19
|
+
**{
|
20
|
+
"message": (str, Field(description="User input message (e.g., 'Cancel UI test' or 'Cancel UI test 12345')")),
|
21
|
+
}
|
22
|
+
)
|
23
|
+
|
24
|
+
def _run(self, message: str):
|
25
|
+
try:
|
26
|
+
# Parse the message to extract test ID if provided
|
27
|
+
test_id = self._extract_test_id(message)
|
28
|
+
|
29
|
+
if test_id:
|
30
|
+
# User provided a specific test ID to cancel
|
31
|
+
return self._cancel_specific_test(test_id)
|
32
|
+
else:
|
33
|
+
# User didn't provide ID, show available tests to cancel
|
34
|
+
return self._show_cancelable_tests()
|
35
|
+
|
36
|
+
except Exception as e:
|
37
|
+
logger.error(f"Error in cancel UI test: {e}")
|
38
|
+
raise ToolException(f"Failed to process cancel UI test request: {str(e)}")
|
39
|
+
|
40
|
+
def _extract_test_id(self, message: str) -> str:
|
41
|
+
"""Extract test ID from user message if present."""
|
42
|
+
# Look for patterns like "Cancel UI test 12345" or "cancel ui test 12345"
|
43
|
+
match = re.search(r'cancel\s+ui\s+test\s+(\d+)', message.lower())
|
44
|
+
if match:
|
45
|
+
return match.group(1)
|
46
|
+
return ""
|
47
|
+
|
48
|
+
def _show_cancelable_tests(self) -> str:
|
49
|
+
"""Show list of tests that can be canceled."""
|
50
|
+
try:
|
51
|
+
# Get all UI reports/tests
|
52
|
+
reports = self.api_wrapper.get_ui_reports_list()
|
53
|
+
|
54
|
+
if not reports:
|
55
|
+
return "❌ **No UI tests found.**"
|
56
|
+
|
57
|
+
# Filter tests that can be canceled (not in final states)
|
58
|
+
final_states = {"Canceled", "Finished", "Failed"}
|
59
|
+
cancelable_tests = []
|
60
|
+
|
61
|
+
for report in reports:
|
62
|
+
test_status = report.get("test_status", {})
|
63
|
+
status = test_status.get("status", "Unknown")
|
64
|
+
|
65
|
+
# Check if status is not in final states
|
66
|
+
if status not in final_states:
|
67
|
+
cancelable_tests.append({
|
68
|
+
"id": report.get("id"),
|
69
|
+
"name": report.get("name", "Unknown"),
|
70
|
+
"status": status,
|
71
|
+
"percentage": test_status.get("percentage", 0),
|
72
|
+
"description": test_status.get("description", "")
|
73
|
+
})
|
74
|
+
|
75
|
+
if not cancelable_tests:
|
76
|
+
return """# ℹ️ No Tests Available for Cancellation
|
77
|
+
|
78
|
+
All UI tests are already in final states (Canceled, Finished, or Failed).
|
79
|
+
|
80
|
+
## 🔍 To cancel a specific test:
|
81
|
+
Use the command: `Cancel UI test <test_id>`
|
82
|
+
|
83
|
+
Example: `Cancel UI test 12345`"""
|
84
|
+
|
85
|
+
# Build the response message
|
86
|
+
response = """# 🚫 UI Tests Available for Cancellation
|
87
|
+
|
88
|
+
The following tests are currently running and can be canceled:
|
89
|
+
|
90
|
+
## 📋 Active Tests:
|
91
|
+
"""
|
92
|
+
|
93
|
+
for test in cancelable_tests:
|
94
|
+
response += f"""
|
95
|
+
### 🔸 Test ID: `{test['id']}`
|
96
|
+
- **Name:** `{test['name']}`
|
97
|
+
- **Status:** `{test['status']}`
|
98
|
+
- **Progress:** {test['percentage']}%
|
99
|
+
- **Description:** {test['description']}
|
100
|
+
"""
|
101
|
+
|
102
|
+
response += """
|
103
|
+
## 🚫 To cancel a specific test:
|
104
|
+
Use the command: `Cancel UI test <test_id>`
|
105
|
+
|
106
|
+
Example: `Cancel UI test 12345`"""
|
107
|
+
|
108
|
+
return response
|
109
|
+
|
110
|
+
except Exception as e:
|
111
|
+
logger.error(f"Error fetching cancelable tests: {e}")
|
112
|
+
return f"❌ **Error fetching tests:** {str(e)}"
|
113
|
+
|
114
|
+
def _cancel_specific_test(self, test_id: str) -> str:
|
115
|
+
"""Cancel a specific UI test by ID."""
|
116
|
+
try:
|
117
|
+
# First, get the current status of the test
|
118
|
+
reports = self.api_wrapper.get_ui_reports_list()
|
119
|
+
target_test = None
|
120
|
+
|
121
|
+
for report in reports:
|
122
|
+
if str(report.get("id")) == test_id:
|
123
|
+
target_test = report
|
124
|
+
break
|
125
|
+
|
126
|
+
if not target_test:
|
127
|
+
return f"❌ **Test with ID `{test_id}` not found.**"
|
128
|
+
|
129
|
+
# Check if test can be canceled
|
130
|
+
test_status = target_test.get("test_status", {})
|
131
|
+
current_status = test_status.get("status", "Unknown")
|
132
|
+
final_states = {"Canceled", "Finished", "Failed"}
|
133
|
+
|
134
|
+
if current_status in final_states:
|
135
|
+
return f"""# ❌ Cannot Cancel Test
|
136
|
+
|
137
|
+
## Test Information:
|
138
|
+
- **Test ID:** `{test_id}`
|
139
|
+
- **Name:** `{target_test.get('name', 'Unknown')}`
|
140
|
+
- **Current Status:** `{current_status}`
|
141
|
+
|
142
|
+
## 🚫 Reason:
|
143
|
+
This test cannot be canceled because it is already in a final state (`{current_status}`).
|
144
|
+
|
145
|
+
Only tests with status **not** in `Canceled`, `Finished`, or `Failed` can be canceled."""
|
146
|
+
|
147
|
+
# Attempt to cancel the test
|
148
|
+
try:
|
149
|
+
cancel_response = self.api_wrapper.cancel_ui_test(test_id)
|
150
|
+
|
151
|
+
return f"""# ✅ UI Test Canceled Successfully!
|
152
|
+
|
153
|
+
## Test Information:
|
154
|
+
- **Test ID:** `{test_id}`
|
155
|
+
- **Name:** `{target_test.get('name', 'Unknown')}`
|
156
|
+
- **Previous Status:** `{current_status}`
|
157
|
+
- **New Status:** `Canceled`
|
158
|
+
|
159
|
+
## 🎯 Result:
|
160
|
+
The test has been successfully canceled and will stop executing."""
|
161
|
+
|
162
|
+
except Exception as cancel_error:
|
163
|
+
logger.error(f"Error canceling test {test_id}: {cancel_error}")
|
164
|
+
return f"""# ❌ Failed to Cancel Test
|
165
|
+
|
166
|
+
## Test Information:
|
167
|
+
- **Test ID:** `{test_id}`
|
168
|
+
- **Name:** `{target_test.get('name', 'Unknown')}`
|
169
|
+
- **Current Status:** `{current_status}`
|
170
|
+
|
171
|
+
## 🚫 Error:
|
172
|
+
{str(cancel_error)}
|
173
|
+
|
174
|
+
Please check the test ID and try again."""
|
175
|
+
|
176
|
+
except Exception as e:
|
177
|
+
logger.error(f"Error canceling specific test {test_id}: {e}")
|
178
|
+
return f"❌ **Error processing cancellation for test `{test_id}`: {str(e)}**"
|
@@ -85,6 +85,11 @@ class CarrierClient(BaseModel):
|
|
85
85
|
endpoint = f"api/v1/backend_performance/test/{self.credentials.project_id}/{test_id}"
|
86
86
|
return self.request('post', endpoint, json=json_body).get("result_id", "")
|
87
87
|
|
88
|
+
def run_ui_test(self, test_id: str, json_body):
|
89
|
+
"""Run a UI test with the given test ID and JSON body."""
|
90
|
+
endpoint = f"api/v1/ui_performance/test/{self.credentials.project_id}/{test_id}"
|
91
|
+
return self.request('post', endpoint, json=json_body).get("result_id", "")
|
92
|
+
|
88
93
|
def get_engagements_list(self) -> List[Dict[str, Any]]:
|
89
94
|
endpoint = f"api/v1/engagements/engagements/{self.credentials.project_id}"
|
90
95
|
return self.request('get', endpoint).get("items", [])
|
@@ -123,15 +128,17 @@ class CarrierClient(BaseModel):
|
|
123
128
|
for file_name in file_list:
|
124
129
|
if file_name.startswith(report_archive_prefix) and "excel_report" not in file_name:
|
125
130
|
report_files_list.append(file_name)
|
126
|
-
|
131
|
+
test_log_file_path, errors_log_file_path = self.download_and_merge_reports(report_files_list, lg_type, bucket_name, extract_to)
|
127
132
|
|
128
|
-
return report_info,
|
133
|
+
return report_info, test_log_file_path, errors_log_file_path
|
129
134
|
|
130
|
-
def download_and_merge_reports(self, report_files_list: list, lg_type: str, bucket: str, extract_to: str = "/tmp")
|
135
|
+
def download_and_merge_reports(self, report_files_list: list, lg_type: str, bucket: str, extract_to: str = "/tmp"):
|
131
136
|
if lg_type == "jmeter":
|
132
137
|
summary_log_file_path = f"summary_{bucket}_jmeter.jtl"
|
138
|
+
error_log_file_path = f"error_{bucket}_jmeter.log"
|
133
139
|
else:
|
134
140
|
summary_log_file_path = f"summary_{bucket}_simulation.log"
|
141
|
+
error_log_file_path = f"error_{bucket}_simulation.log"
|
135
142
|
extracted_reports = []
|
136
143
|
for each in report_files_list:
|
137
144
|
endpoint = f"api/v1/artifacts/artifact/{self.credentials.project_id}/{bucket}/{each}"
|
@@ -153,10 +160,21 @@ class CarrierClient(BaseModel):
|
|
153
160
|
os.remove(local_file_path)
|
154
161
|
extracted_reports.append(extract_dir)
|
155
162
|
|
156
|
-
# get files from extract_dirs and merge to summary_log_file_path
|
163
|
+
# get files from extract_dirs and merge to summary_log_file_path and error_log_file_path
|
157
164
|
self.merge_log_files(summary_log_file_path, extracted_reports, lg_type)
|
165
|
+
try:
|
166
|
+
self.merge_error_files(error_log_file_path, extracted_reports)
|
167
|
+
except Exception as e:
|
168
|
+
logger.error(f"Failed to merge errors log: {e}")
|
169
|
+
|
170
|
+
# Clean up
|
171
|
+
for each in extracted_reports:
|
172
|
+
try:
|
173
|
+
shutil.rmtree(each)
|
174
|
+
except Exception as e:
|
175
|
+
logger.error(e)
|
158
176
|
|
159
|
-
return summary_log_file_path
|
177
|
+
return summary_log_file_path, error_log_file_path
|
160
178
|
|
161
179
|
def merge_log_files(self, summary_file, extracted_reports, lg_type):
|
162
180
|
with open(summary_file, mode='w') as summary:
|
@@ -173,11 +191,14 @@ class CarrierClient(BaseModel):
|
|
173
191
|
else:
|
174
192
|
# Skip the first line (header) for subsequent files
|
175
193
|
summary.writelines(lines[1:])
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
194
|
+
|
195
|
+
def merge_error_files(self, error_file, extracted_reports):
|
196
|
+
with open(error_file, mode='w') as summary_errors:
|
197
|
+
for i, log_file in enumerate(extracted_reports):
|
198
|
+
report_file = f"{log_file}/simulation-errors.log"
|
199
|
+
with open(report_file, mode='r') as f:
|
200
|
+
lines = f.readlines()
|
201
|
+
summary_errors.writelines(lines)
|
181
202
|
|
182
203
|
def get_report_file_log(self, bucket: str, file_name: str):
|
183
204
|
bucket_endpoint = f"api/v1/artifacts/artifact/default/{self.credentials.project_id}/{bucket}/{file_name}"
|
@@ -190,10 +211,10 @@ class CarrierClient(BaseModel):
|
|
190
211
|
f.write(response.content)
|
191
212
|
return file_path
|
192
213
|
|
193
|
-
def
|
214
|
+
def upload_file(self, bucket_name: str, file_name: str):
|
194
215
|
upload_url = f'api/v1/artifacts/artifacts/{self.credentials.project_id}/{bucket_name}'
|
195
216
|
full_url = f"{self.credentials.url.rstrip('/')}/{upload_url.lstrip('/')}"
|
196
|
-
files = {'file': open(
|
217
|
+
files = {'file': open(file_name, 'rb')}
|
197
218
|
headers = {'Authorization': f'bearer {self.credentials.token}'}
|
198
219
|
s3_config = {'integration_id': 1, 'is_local': False}
|
199
220
|
requests.post(full_url, params=s3_config, allow_redirects=True, files=files, headers=headers)
|
@@ -204,6 +225,69 @@ class CarrierClient(BaseModel):
|
|
204
225
|
return self.request('get', endpoint).get("rows", [])
|
205
226
|
|
206
227
|
def get_ui_reports_list(self) -> List[Dict[str, Any]]:
|
207
|
-
|
208
|
-
|
209
|
-
|
228
|
+
"""Get list of UI test reports from the Carrier platform."""
|
229
|
+
endpoint = f"api/v1/ui_performance/reports/{self.credentials.project_id}"
|
230
|
+
return self.request('get', endpoint).get("rows", [])
|
231
|
+
|
232
|
+
def get_locations(self) -> Dict[str, Any]:
|
233
|
+
"""Get list of available locations/cloud settings from the Carrier platform."""
|
234
|
+
endpoint = f"api/v1/shared/locations/{self.credentials.project_id}"
|
235
|
+
return self.request('get', endpoint)
|
236
|
+
|
237
|
+
def update_ui_test(self, test_id: str, json_body) -> Dict[str, Any]:
|
238
|
+
"""Update UI test configuration and schedule."""
|
239
|
+
endpoint = f"api/v1/ui_performance/test/{self.credentials.project_id}/{test_id}"
|
240
|
+
return self.request('put', endpoint, json=json_body)
|
241
|
+
|
242
|
+
def get_ui_test_details(self, test_id: str) -> Dict[str, Any]:
|
243
|
+
"""Get detailed UI test configuration by test ID."""
|
244
|
+
endpoint = f"api/v1/ui_performance/test/{self.credentials.project_id}/{test_id}"
|
245
|
+
return self.request('get', endpoint)
|
246
|
+
|
247
|
+
def create_ui_test(self, json_body: Dict[str, Any]) -> Dict[str, Any]:
|
248
|
+
"""Create a new UI test."""
|
249
|
+
endpoint = f"api/v1/ui_performance/tests/{self.credentials.project_id}"
|
250
|
+
|
251
|
+
# Print full JSON POST body for debugging
|
252
|
+
print("=" * 60)
|
253
|
+
print("DEBUG: Full JSON POST body for create_ui_test:")
|
254
|
+
print("=" * 60)
|
255
|
+
print(json.dumps(json_body, indent=2))
|
256
|
+
print("=" * 60)
|
257
|
+
|
258
|
+
# Use multipart/form-data with data field containing the JSON body
|
259
|
+
form_data = {'data': json.dumps(json_body)}
|
260
|
+
|
261
|
+
# Temporarily remove Content-Type header to let requests set it for multipart
|
262
|
+
original_headers = self.session.headers.copy()
|
263
|
+
if 'Content-Type' in self.session.headers:
|
264
|
+
del self.session.headers['Content-Type']
|
265
|
+
|
266
|
+
try:
|
267
|
+
full_url = f"{self.credentials.url.rstrip('/')}/{endpoint.lstrip('/')}"
|
268
|
+
response = self.session.post(full_url, data=form_data)
|
269
|
+
response.raise_for_status()
|
270
|
+
return response.json()
|
271
|
+
except requests.HTTPError as http_err:
|
272
|
+
logger.error(f"HTTP {response.status_code} error: {response.text[:500]}")
|
273
|
+
raise CarrierAPIError(f"Request to {full_url} failed with status {response.status_code}")
|
274
|
+
except json.JSONDecodeError:
|
275
|
+
logger.error(f"Response was not valid JSON. Body:\n{response.text[:500]}")
|
276
|
+
raise CarrierAPIError("Server returned non-JSON response")
|
277
|
+
finally:
|
278
|
+
# Restore original headers
|
279
|
+
self.session.headers.update(original_headers)
|
280
|
+
|
281
|
+
def cancel_ui_test(self, test_id: str) -> Dict[str, Any]:
|
282
|
+
"""Cancel a UI test by setting its status to Canceled."""
|
283
|
+
endpoint = f"api/v1/ui_performance/report_status/{self.credentials.project_id}/{test_id}"
|
284
|
+
|
285
|
+
cancel_body = {
|
286
|
+
"test_status": {
|
287
|
+
"status": "Canceled",
|
288
|
+
"percentage": 100,
|
289
|
+
"description": "Test was canceled"
|
290
|
+
}
|
291
|
+
}
|
292
|
+
|
293
|
+
return self.request('put', endpoint, json=cancel_body)
|