alita-sdk 0.3.162__py3-none-any.whl → 0.3.164__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- alita_sdk/runtime/langchain/assistant.py +2 -2
- alita_sdk/runtime/langchain/store_manager.py +22 -1
- alita_sdk/runtime/toolkits/tools.py +1 -1
- alita_sdk/tools/__init__.py +7 -1
- alita_sdk/tools/carrier/api_wrapper.py +76 -4
- alita_sdk/tools/carrier/backend_reports_tool.py +31 -12
- alita_sdk/tools/carrier/backend_tests_tool.py +14 -8
- alita_sdk/tools/carrier/cancel_ui_test_tool.py +178 -0
- alita_sdk/tools/carrier/carrier_sdk.py +99 -15
- alita_sdk/tools/carrier/create_ui_excel_report_tool.py +473 -0
- alita_sdk/tools/carrier/create_ui_test_tool.py +199 -0
- alita_sdk/tools/carrier/lighthouse_excel_reporter.py +155 -0
- alita_sdk/tools/carrier/run_ui_test_tool.py +394 -0
- alita_sdk/tools/carrier/tools.py +11 -1
- alita_sdk/tools/carrier/ui_reports_tool.py +6 -2
- alita_sdk/tools/carrier/update_ui_test_schedule_tool.py +278 -0
- alita_sdk/tools/memory/__init__.py +7 -0
- alita_sdk/tools/postman/__init__.py +7 -0
- alita_sdk/tools/postman/api_wrapper.py +335 -0
- alita_sdk/tools/zephyr_squad/__init__.py +62 -0
- alita_sdk/tools/zephyr_squad/api_wrapper.py +135 -0
- alita_sdk/tools/zephyr_squad/zephyr_squad_cloud_client.py +79 -0
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/METADATA +4 -3
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/RECORD +27 -18
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/WHEEL +0 -0
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/licenses/LICENSE +0 -0
- {alita_sdk-0.3.162.dist-info → alita_sdk-0.3.164.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,473 @@
|
|
1
|
+
import logging
|
2
|
+
import json
|
3
|
+
import traceback
|
4
|
+
import tempfile
|
5
|
+
import os
|
6
|
+
from datetime import datetime
|
7
|
+
from typing import Type
|
8
|
+
from langchain_core.tools import BaseTool, ToolException
|
9
|
+
from pydantic.fields import Field
|
10
|
+
from pydantic import create_model, BaseModel
|
11
|
+
from .api_wrapper import CarrierAPIWrapper
|
12
|
+
|
13
|
+
|
14
|
+
logger = logging.getLogger(__name__)
|
15
|
+
|
16
|
+
|
17
|
+
class CreateUIExcelReportTool(BaseTool):
|
18
|
+
api_wrapper: CarrierAPIWrapper = Field(..., description="Carrier API Wrapper instance")
|
19
|
+
name: str = "create_excel_report_ui"
|
20
|
+
description: str = "Create Excel report from UI test results JSON files from the Carrier platform."
|
21
|
+
args_schema: Type[BaseModel] = create_model(
|
22
|
+
"CreateUIExcelReportInput",
|
23
|
+
report_id=(str, Field(default="", description="UI Report ID to generate Excel report for")),
|
24
|
+
)
|
25
|
+
def _run(self, report_id: str = ""):
|
26
|
+
try:
|
27
|
+
# Check if report_id is provided
|
28
|
+
if not report_id or report_id.strip() == "":
|
29
|
+
return self._missing_input_response()
|
30
|
+
|
31
|
+
# Get UI reports list and find the specific report
|
32
|
+
ui_reports = self.api_wrapper.get_ui_reports_list()
|
33
|
+
|
34
|
+
# Find the report by ID
|
35
|
+
target_report = None
|
36
|
+
for report in ui_reports:
|
37
|
+
if str(report.get("id")) == str(report_id):
|
38
|
+
target_report = report
|
39
|
+
break
|
40
|
+
|
41
|
+
if not target_report:
|
42
|
+
return self._show_available_reports_message()
|
43
|
+
|
44
|
+
return self._process_ui_report(target_report, report_id)
|
45
|
+
|
46
|
+
except Exception:
|
47
|
+
stacktrace = traceback.format_exc()
|
48
|
+
logger.error(f"Error creating UI Excel report: {stacktrace}")
|
49
|
+
raise ToolException(stacktrace)
|
50
|
+
def _missing_input_response(self):
|
51
|
+
"""Response when report_id is missing."""
|
52
|
+
return "Please provide me test id for generating excel report from your UI test"
|
53
|
+
|
54
|
+
def _show_available_reports_message(self):
|
55
|
+
"""Show available reports when no matching report_id found."""
|
56
|
+
try:
|
57
|
+
ui_reports = self.api_wrapper.get_ui_reports_list()
|
58
|
+
|
59
|
+
if not ui_reports:
|
60
|
+
return "❌ **No UI test reports found.**"
|
61
|
+
|
62
|
+
message = ["# ❌ No report found for the specified report ID\n"]
|
63
|
+
message.append("## Available Report IDs:")
|
64
|
+
|
65
|
+
for report in ui_reports[:10]: # Show first 10 reports
|
66
|
+
report_id = report.get("id")
|
67
|
+
report_name = report.get("name", "Unnamed Report")
|
68
|
+
test_status = report.get("test_status", "Unknown")
|
69
|
+
start_time = report.get("start_time", "")
|
70
|
+
|
71
|
+
message.append(f"- **Report ID: {report_id}** - {report_name} ({test_status}) - {start_time}")
|
72
|
+
|
73
|
+
if len(ui_reports) > 10:
|
74
|
+
message.append(f"... and {len(ui_reports) - 10} more reports")
|
75
|
+
|
76
|
+
message.append("\n## 💡 Example:")
|
77
|
+
message.append("```")
|
78
|
+
message.append(f"report_id: {ui_reports[0].get('id') if ui_reports else 'YOUR_REPORT_ID'}")
|
79
|
+
message.append("```")
|
80
|
+
return "\n".join(message)
|
81
|
+
|
82
|
+
except Exception:
|
83
|
+
return "❌ **Error retrieving available report IDs. Please check your report_id and try again.**"
|
84
|
+
|
85
|
+
def _process_ui_report(self, report, report_id):
|
86
|
+
"""Process single UI report and generate Excel file."""
|
87
|
+
try: # Get the UID from the report (similar to get_ui_report_by_id logic)
|
88
|
+
uid = report.get("uid")
|
89
|
+
if not uid:
|
90
|
+
return f"❌ **No UID found for report {report_id}. Cannot process this report.**"
|
91
|
+
|
92
|
+
# Create Excel reporter instance
|
93
|
+
current_date = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
|
94
|
+
excel_file_name = f'/tmp/ui_report_{report_id}_{current_date}.xlsx'
|
95
|
+
|
96
|
+
excel_reporter = LighthouseExcelReporter(excel_file_name)
|
97
|
+
|
98
|
+
processed_files = 0
|
99
|
+
|
100
|
+
# Get report links using the same method as GetUIReportByIDTool
|
101
|
+
report_links = self.api_wrapper.get_ui_report_links(uid)
|
102
|
+
|
103
|
+
if not report_links:
|
104
|
+
return f"❌ **No report links found for report {report_id}.**"
|
105
|
+
|
106
|
+
# Process each report link by converting HTML to JSON
|
107
|
+
for html_url in report_links:
|
108
|
+
try:
|
109
|
+
# Convert HTML URL to JSON URL by replacing .html with .json
|
110
|
+
json_url = html_url.replace('.html', '.json')
|
111
|
+
|
112
|
+
# Extract file name from URL for worksheet naming
|
113
|
+
json_file_name = json_url.split('/')[-1]
|
114
|
+
|
115
|
+
# Download JSON content directly from the converted URL
|
116
|
+
# Extract bucket and file name from the URL structure
|
117
|
+
# URL format: https://platform.getcarrier.io/api/v1/artifacts/artifact/default/{project_id}/reports/{file_name}
|
118
|
+
url_parts = json_url.split('/')
|
119
|
+
if len(url_parts) >= 2:
|
120
|
+
bucket = url_parts[-2] # "reports"
|
121
|
+
file_name = url_parts[-1] # actual file name
|
122
|
+
else:
|
123
|
+
bucket = "reports"
|
124
|
+
file_name = json_file_name
|
125
|
+
|
126
|
+
json_content = self.api_wrapper.download_ui_report_json(bucket, file_name)
|
127
|
+
|
128
|
+
if json_content:
|
129
|
+
# Create worksheet name from JSON file name
|
130
|
+
worksheet_name = self._create_worksheet_name(json_file_name)
|
131
|
+
|
132
|
+
# Process JSON and add to Excel
|
133
|
+
excel_reporter.add_json_report(json_content, worksheet_name)
|
134
|
+
processed_files += 1
|
135
|
+
|
136
|
+
except Exception as e:
|
137
|
+
logger.error(f"Error processing JSON file from {html_url}: {e}")
|
138
|
+
continue
|
139
|
+
|
140
|
+
if processed_files == 0:
|
141
|
+
return f"❌ **No JSON files could be processed for report {report_id}.**"
|
142
|
+
|
143
|
+
# Finalize Excel file
|
144
|
+
excel_reporter.finalize()
|
145
|
+
|
146
|
+
# Upload to Carrier artifacts
|
147
|
+
report_name = report.get("name", f"report_{report_id}")
|
148
|
+
bucket_name = report_name.replace("_", "").replace(" ", "").lower()
|
149
|
+
excel_file_basename = os.path.basename(excel_file_name)
|
150
|
+
|
151
|
+
self.api_wrapper.upload_file(bucket_name, excel_file_name)
|
152
|
+
|
153
|
+
# Clean up temporary file
|
154
|
+
if os.path.exists(excel_file_name):
|
155
|
+
os.remove(excel_file_name)
|
156
|
+
|
157
|
+
download_link = f"{self.api_wrapper.url.rstrip('/')}/api/v1/artifacts/artifact/default/{self.api_wrapper.project_id}/{bucket_name}/{excel_file_basename}"
|
158
|
+
|
159
|
+
return f"""# ✅ UI Excel Report Generated Successfully!
|
160
|
+
|
161
|
+
## Report Information:
|
162
|
+
- **Report ID:** `{report_id}`
|
163
|
+
- **Report Name:** `{report.get("name", "N/A")}`
|
164
|
+
- **JSON Files Processed:** `{processed_files}`
|
165
|
+
- **Excel File:** `{excel_file_basename}`
|
166
|
+
- **Bucket:** `{bucket_name}`
|
167
|
+
|
168
|
+
## 📥 Download Link:
|
169
|
+
[Download Excel Report]({download_link})
|
170
|
+
|
171
|
+
## 🎯 What's included:
|
172
|
+
- Multiple worksheets for each JSON report file
|
173
|
+
- Lighthouse performance metrics formatted for analysis
|
174
|
+
- Conditional formatting for easy identification of performance issues"""
|
175
|
+
|
176
|
+
except Exception as e:
|
177
|
+
logger.error(f"Error processing UI report: {e}")
|
178
|
+
raise ToolException(f"Error processing UI report: {e}")
|
179
|
+
|
180
|
+
def _create_worksheet_name(self, json_file_name):
|
181
|
+
"""Create a valid worksheet name from JSON file name."""
|
182
|
+
# Remove .json extension
|
183
|
+
name = json_file_name.replace('.json', '')
|
184
|
+
|
185
|
+
# Replace : with _ as specified in requirements
|
186
|
+
name = name.replace(':', '_')
|
187
|
+
|
188
|
+
# Excel worksheet names have limitations
|
189
|
+
# Max 31 characters, no special characters except underscore
|
190
|
+
name = name.replace('/', '_').replace('\\', '_').replace('[', '_').replace(']', '_')
|
191
|
+
name = name.replace('*', '_').replace('?', '_').replace(':', '_')
|
192
|
+
|
193
|
+
# Extract only the timestamp part (remove everything after the time)
|
194
|
+
# Expected format: "24Jun2025_02_14_37_user-flow.re" -> "24Jun2025_02_14_37"
|
195
|
+
parts = name.split('_')
|
196
|
+
if len(parts) >= 4:
|
197
|
+
# Keep first 4 parts which should be: date + 3 time parts
|
198
|
+
# Example: ["24Jun2025", "02", "14", "37", "user-flow.re"] -> ["24Jun2025", "02", "14", "37"]
|
199
|
+
timestamp_parts = parts[:4]
|
200
|
+
name = '_'.join(timestamp_parts)
|
201
|
+
|
202
|
+
# Ensure it's within Excel's 31 character limit
|
203
|
+
if len(name) > 31:
|
204
|
+
name = name[:31]
|
205
|
+
|
206
|
+
return name
|
207
|
+
|
208
|
+
|
209
|
+
class LighthouseExcelReporter:
|
210
|
+
"""Excel reporter for Lighthouse UI test results."""
|
211
|
+
|
212
|
+
def __init__(self, output_file):
|
213
|
+
"""Initialize the Excel reporter."""
|
214
|
+
self.output_file = output_file
|
215
|
+
self.workbook = None
|
216
|
+
self.worksheets = {}
|
217
|
+
|
218
|
+
# Import required libraries
|
219
|
+
try:
|
220
|
+
import pandas as pd
|
221
|
+
from openpyxl import Workbook
|
222
|
+
from openpyxl.styles import Alignment, PatternFill, Border, Side
|
223
|
+
from openpyxl.formatting.rule import CellIsRule
|
224
|
+
from openpyxl.utils import get_column_letter
|
225
|
+
|
226
|
+
self.pd = pd
|
227
|
+
self.Workbook = Workbook
|
228
|
+
self.Alignment = Alignment
|
229
|
+
self.PatternFill = PatternFill
|
230
|
+
self.Border = Border
|
231
|
+
self.Side = Side
|
232
|
+
self.CellIsRule = CellIsRule
|
233
|
+
self.get_column_letter = get_column_letter
|
234
|
+
|
235
|
+
self.workbook = Workbook()
|
236
|
+
# Remove default sheet
|
237
|
+
if self.workbook.worksheets:
|
238
|
+
self.workbook.remove(self.workbook.active)
|
239
|
+
|
240
|
+
except ImportError as e:
|
241
|
+
raise ToolException(f"Required libraries not available: {e}")
|
242
|
+
|
243
|
+
def add_json_report(self, json_content, worksheet_name):
|
244
|
+
"""Add a JSON report as a new worksheet."""
|
245
|
+
try:
|
246
|
+
# Parse JSON content
|
247
|
+
if isinstance(json_content, str):
|
248
|
+
data = json.loads(json_content)
|
249
|
+
else:
|
250
|
+
data = json_content
|
251
|
+
|
252
|
+
# Process Lighthouse data similar to the reference file
|
253
|
+
data_rows = self._process_lighthouse_data(data)
|
254
|
+
|
255
|
+
if not data_rows:
|
256
|
+
logger.warning(f"No data extracted from JSON for worksheet {worksheet_name}")
|
257
|
+
return
|
258
|
+
|
259
|
+
# Create DataFrame
|
260
|
+
df = self.pd.DataFrame(data_rows)
|
261
|
+
|
262
|
+
if df.empty:
|
263
|
+
logger.warning(f"Empty DataFrame for worksheet {worksheet_name}")
|
264
|
+
return
|
265
|
+
|
266
|
+
# Create pivot table
|
267
|
+
df_pivot = df.pivot_table(index="Step name", columns="Audit", values="Numeric Value", aggfunc='mean')
|
268
|
+
df_pivot = df_pivot.fillna('')
|
269
|
+
|
270
|
+
# Add worksheet
|
271
|
+
ws = self.workbook.create_sheet(title=worksheet_name)
|
272
|
+
|
273
|
+
# Write data to worksheet
|
274
|
+
self._write_dataframe_to_worksheet(df_pivot, ws)
|
275
|
+
|
276
|
+
# Apply formatting
|
277
|
+
self._apply_formatting(ws, df_pivot)
|
278
|
+
|
279
|
+
logger.info(f"Added worksheet: {worksheet_name}")
|
280
|
+
|
281
|
+
except Exception as e:
|
282
|
+
logger.error(f"Error processing JSON report for worksheet {worksheet_name}: {e}")
|
283
|
+
|
284
|
+
def _process_lighthouse_data(self, data):
|
285
|
+
"""Process Lighthouse JSON data similar to the reference implementation."""
|
286
|
+
from urllib.parse import urlparse
|
287
|
+
from collections import OrderedDict
|
288
|
+
import re
|
289
|
+
|
290
|
+
# Performance audits to extract (from reference file)
|
291
|
+
performance_audits = [
|
292
|
+
'first-contentful-paint',
|
293
|
+
'speed-index',
|
294
|
+
'interactive',
|
295
|
+
'total-blocking-time',
|
296
|
+
'largest-contentful-paint',
|
297
|
+
'cumulative-layout-shift',
|
298
|
+
'network-requests',
|
299
|
+
'bootup-time',
|
300
|
+
'interaction-to-next-paint',
|
301
|
+
'server-response-time',
|
302
|
+
]
|
303
|
+
|
304
|
+
# Audit naming mappings (from reference file)
|
305
|
+
sec_audits = ['first-contentful-paint', 'interactive', 'largest-contentful-paint', 'mainthread-work-breakdown', 'network-requests', 'speed-index', 'javaScript-execution-time']
|
306
|
+
ms_audits = ['interaction-to-next-paint', 'total-blocking-time', 'time-to-first-byte']
|
307
|
+
|
308
|
+
rename_audits = {
|
309
|
+
'bootup-time': 'javaScript-execution-time',
|
310
|
+
'server-response-time': 'time-to-first-byte'
|
311
|
+
}
|
312
|
+
|
313
|
+
def extract_application_name(url):
|
314
|
+
parsed_url = urlparse(url)
|
315
|
+
hostname_parts = parsed_url.hostname.split('.') if parsed_url.hostname else []
|
316
|
+
application_name = hostname_parts[0] if len(hostname_parts) > 1 else '3rd-party'
|
317
|
+
return application_name
|
318
|
+
|
319
|
+
data_rows = []
|
320
|
+
step_order = OrderedDict()
|
321
|
+
|
322
|
+
# Process steps from the data
|
323
|
+
steps = data.get('steps', [])
|
324
|
+
if not steps:
|
325
|
+
# If no steps, treat the entire data as a single step
|
326
|
+
steps = [{'name': 'main_report', 'lhr': data}]
|
327
|
+
|
328
|
+
for index, step in enumerate(steps):
|
329
|
+
step_name = step.get('name', f'step_{index}')
|
330
|
+
step_order[step_name] = index
|
331
|
+
lhr_data = step.get('lhr', {})
|
332
|
+
|
333
|
+
url = lhr_data.get('finalDisplayedUrl', '3rd-party')
|
334
|
+
application_name = extract_application_name(url)
|
335
|
+
|
336
|
+
performance_score = lhr_data.get('categories', {}).get('performance', {}).get('score')
|
337
|
+
performance_score = performance_score * 100 if performance_score is not None else None
|
338
|
+
|
339
|
+
for audit in performance_audits:
|
340
|
+
audit_result = lhr_data.get('audits', {}).get(audit, {})
|
341
|
+
numeric_value = audit_result.get('displayValue')
|
342
|
+
|
343
|
+
if numeric_value is not None:
|
344
|
+
numeric_value = re.sub(r'[a-zA-Z\s]', '', str(numeric_value))
|
345
|
+
if numeric_value:
|
346
|
+
numeric_value = numeric_value.replace(',', '')
|
347
|
+
try:
|
348
|
+
numeric_value = float(numeric_value)
|
349
|
+
except ValueError:
|
350
|
+
numeric_value = None
|
351
|
+
else:
|
352
|
+
numeric_value = None
|
353
|
+
|
354
|
+
audit_display_name = rename_audits.get(audit, audit)
|
355
|
+
if audit_display_name in sec_audits:
|
356
|
+
audit_display_name += ", sec"
|
357
|
+
elif audit_display_name in ms_audits:
|
358
|
+
audit_display_name += ", ms"
|
359
|
+
|
360
|
+
data_row = {
|
361
|
+
"Step name": step_name,
|
362
|
+
"Performance Score": performance_score,
|
363
|
+
"Audit": audit_display_name,
|
364
|
+
"Numeric Value": numeric_value
|
365
|
+
}
|
366
|
+
data_rows.append(data_row)
|
367
|
+
|
368
|
+
return data_rows
|
369
|
+
|
370
|
+
def _write_dataframe_to_worksheet(self, df, ws):
|
371
|
+
"""Write pandas DataFrame to Excel worksheet."""
|
372
|
+
# Write headers
|
373
|
+
ws.cell(row=1, column=1, value="Step name")
|
374
|
+
for col_idx, col_name in enumerate(df.columns, 2):
|
375
|
+
ws.cell(row=1, column=col_idx, value=col_name)
|
376
|
+
|
377
|
+
# Write data
|
378
|
+
for row_idx, (index, row) in enumerate(df.iterrows(), 2):
|
379
|
+
ws.cell(row=row_idx, column=1, value=index)
|
380
|
+
for col_idx, value in enumerate(row, 2):
|
381
|
+
ws.cell(row=row_idx, column=col_idx, value=value if value != '' else None)
|
382
|
+
|
383
|
+
def _apply_formatting(self, ws, df):
|
384
|
+
"""Apply Excel formatting to the worksheet."""
|
385
|
+
# Apply header formatting
|
386
|
+
header_fill = self.PatternFill(start_color="7FD5D8", end_color="7FD5D8", fill_type="solid")
|
387
|
+
for cell in ws[1]:
|
388
|
+
cell.fill = header_fill
|
389
|
+
|
390
|
+
# Set alignment for 'Step name' column
|
391
|
+
for row in ws.iter_rows(min_row=2, min_col=1, max_col=1):
|
392
|
+
for cell in row:
|
393
|
+
cell.alignment = self.Alignment(horizontal='left')
|
394
|
+
|
395
|
+
# Apply conditional formatting
|
396
|
+
for col_index, col_name in enumerate(df.columns, 2):
|
397
|
+
column_letter = self.get_column_letter(col_index)
|
398
|
+
|
399
|
+
if col_name in ["cumulative-layout-shift"]:
|
400
|
+
self._apply_conditional_formatting(ws, column_letter, [0.1, 0.25], ["AFF2C9", "FFE699", "F7A9A9"])
|
401
|
+
elif col_name in ["first-contentful-paint, sec"]:
|
402
|
+
self._apply_conditional_formatting(ws, column_letter, [1.8, 3], ["AFF2C9", "FFE699", "F7A9A9"])
|
403
|
+
elif col_name in ["largest-contentful-paint, sec"]:
|
404
|
+
self._apply_conditional_formatting(ws, column_letter, [2.5, 4], ["AFF2C9", "FFE699", "F7A9A9"])
|
405
|
+
|
406
|
+
# Apply borders
|
407
|
+
self._apply_borders(ws)
|
408
|
+
|
409
|
+
# Auto-adjust column widths
|
410
|
+
self._auto_adjust_column_width(ws)
|
411
|
+
|
412
|
+
def _apply_conditional_formatting(self, ws, column_letter, thresholds, colors):
|
413
|
+
"""Apply conditional formatting to a column."""
|
414
|
+
ws.conditional_formatting.add(
|
415
|
+
f'{column_letter}2:{column_letter}{ws.max_row}',
|
416
|
+
self.CellIsRule(
|
417
|
+
operator='lessThanOrEqual',
|
418
|
+
formula=[str(thresholds[0])],
|
419
|
+
stopIfTrue=True,
|
420
|
+
fill=self.PatternFill(start_color=colors[0], end_color=colors[0], fill_type="solid")
|
421
|
+
)
|
422
|
+
)
|
423
|
+
ws.conditional_formatting.add(
|
424
|
+
f'{column_letter}2:{column_letter}{ws.max_row}',
|
425
|
+
self.CellIsRule(
|
426
|
+
operator='between',
|
427
|
+
formula=[str(thresholds[0]+0.0001), str(thresholds[1])],
|
428
|
+
stopIfTrue=True,
|
429
|
+
fill=self.PatternFill(start_color=colors[1], end_color=colors[1], fill_type="solid")
|
430
|
+
)
|
431
|
+
)
|
432
|
+
ws.conditional_formatting.add(
|
433
|
+
f'{column_letter}2:{column_letter}{ws.max_row}',
|
434
|
+
self.CellIsRule(
|
435
|
+
operator='greaterThanOrEqual',
|
436
|
+
formula=[str(thresholds[1]+0.0001)],
|
437
|
+
stopIfTrue=True,
|
438
|
+
fill=self.PatternFill(start_color=colors[2], end_color=colors[2], fill_type="solid")
|
439
|
+
)
|
440
|
+
)
|
441
|
+
|
442
|
+
def _apply_borders(self, ws):
|
443
|
+
"""Apply borders to all data cells."""
|
444
|
+
thin_border = self.Border(
|
445
|
+
left=self.Side(style='thin'),
|
446
|
+
right=self.Side(style='thin'),
|
447
|
+
top=self.Side(style='thin'),
|
448
|
+
bottom=self.Side(style='thin')
|
449
|
+
)
|
450
|
+
for row in ws.iter_rows(min_row=2, min_col=2, max_col=ws.max_column, max_row=ws.max_row):
|
451
|
+
for cell in row:
|
452
|
+
cell.border = thin_border
|
453
|
+
|
454
|
+
def _auto_adjust_column_width(self, ws):
|
455
|
+
"""Auto-adjust column widths."""
|
456
|
+
for col in ws.columns:
|
457
|
+
max_length = 0
|
458
|
+
column = col[0].column_letter
|
459
|
+
for cell in col:
|
460
|
+
try:
|
461
|
+
if len(str(cell.value)) > max_length:
|
462
|
+
max_length = len(cell.value)
|
463
|
+
except:
|
464
|
+
pass
|
465
|
+
adjusted_width = (max_length + 2)
|
466
|
+
ws.column_dimensions[column].width = adjusted_width
|
467
|
+
|
468
|
+
def finalize(self):
|
469
|
+
"""Finalize and save the Excel file."""
|
470
|
+
if self.workbook and self.workbook.worksheets:
|
471
|
+
self.workbook.save(self.output_file)
|
472
|
+
else:
|
473
|
+
raise ToolException("No worksheets were created")
|
@@ -0,0 +1,199 @@
|
|
1
|
+
import logging
|
2
|
+
import json
|
3
|
+
import traceback
|
4
|
+
from typing import Type
|
5
|
+
from langchain_core.tools import BaseTool, ToolException
|
6
|
+
from pydantic.fields import Field
|
7
|
+
from pydantic import create_model, BaseModel
|
8
|
+
from .api_wrapper import CarrierAPIWrapper
|
9
|
+
|
10
|
+
logger = logging.getLogger(__name__)
|
11
|
+
|
12
|
+
|
13
|
+
class CreateUITestTool(BaseTool):
|
14
|
+
api_wrapper: CarrierAPIWrapper = Field(..., description="Carrier API Wrapper instance")
|
15
|
+
name: str = "create_ui_test"
|
16
|
+
description: str = "Create a new UI test in the Carrier platform."
|
17
|
+
args_schema: Type[BaseModel] = create_model(
|
18
|
+
"CreateUITestInput",
|
19
|
+
**{
|
20
|
+
"name": (str, Field(default="", description="Test name")),
|
21
|
+
"test_type": (str, Field(default="", description="Test type")),
|
22
|
+
"env_type": (str, Field(default="", description="Environment type")),
|
23
|
+
"entrypoint": (str, Field(default="", description="Entry point file (e.g., my_test.js)")),
|
24
|
+
"runner": (str, Field(default="", description="Test runner type")),
|
25
|
+
"repo": (str, Field(default="", description="Git repository URL")),
|
26
|
+
"branch": (str, Field(default="", description="Git branch name")),
|
27
|
+
"username": (str, Field(default="", description="Git username")),
|
28
|
+
"password": (str, Field(default="", description="Git password")),
|
29
|
+
"cpu_quota": (int, Field(default=2, description="CPU quota (cores)")),
|
30
|
+
"memory_quota": (int, Field(default=5, description="Memory quota (GB)")),
|
31
|
+
"custom_cmd": (str, Field(default="", description="Optional custom command")),
|
32
|
+
"parallel_runners": (int, Field(default=1, description="Number of parallel runners")),
|
33
|
+
"loops": (int, Field(default=1, description="Number of loops")),
|
34
|
+
"aggregation": (str, Field(default="max", description="Aggregation method (max, min, avg)")),
|
35
|
+
}
|
36
|
+
)
|
37
|
+
|
38
|
+
def _run(self, **kwargs):
|
39
|
+
try:
|
40
|
+
# Check if all required parameters are provided
|
41
|
+
required_params = ["name", "test_type", "env_type", "entrypoint", "runner", "repo", "branch", "username", "password"]
|
42
|
+
missing_params = []
|
43
|
+
|
44
|
+
for param in required_params:
|
45
|
+
if not kwargs.get(param) or kwargs.get(param).strip() == "":
|
46
|
+
missing_params.append(param)
|
47
|
+
|
48
|
+
if missing_params:
|
49
|
+
return self._missing_parameters_response(missing_params)
|
50
|
+
|
51
|
+
# Create the UI test
|
52
|
+
return self._create_ui_test(kwargs)
|
53
|
+
|
54
|
+
except Exception:
|
55
|
+
stacktrace = traceback.format_exc()
|
56
|
+
logger.error(f"Error creating UI test: {stacktrace}")
|
57
|
+
raise ToolException(stacktrace)
|
58
|
+
|
59
|
+
def _missing_parameters_response(self, missing_params=None):
|
60
|
+
"""Response when required parameters are missing."""
|
61
|
+
available_runners = [
|
62
|
+
"Lighthouse-NPM_V12",
|
63
|
+
"Lighthouse-Nodejs",
|
64
|
+
"Lighthouse-NPM",
|
65
|
+
"Lighthouse-NPM_V11",
|
66
|
+
"Sitespeed (Browsertime)",
|
67
|
+
"Sitespeed (New Entrypoint BETA)",
|
68
|
+
"Sitespeed (New Version BETA)",
|
69
|
+
"Sitespeed V36"
|
70
|
+
]
|
71
|
+
|
72
|
+
message = [
|
73
|
+
"# 📝 Create UI Test - Required Parameters",
|
74
|
+
"",
|
75
|
+
"To create a new UI test, please provide the following parameters:",
|
76
|
+
"",
|
77
|
+
"## 🔴 Required Parameters:",
|
78
|
+
"- **name**: Test name (e.g., 'My UI Test')",
|
79
|
+
"- **test_type**: Test type (e.g., 'performance')",
|
80
|
+
"- **env_type**: Environment type (e.g., 'staging')",
|
81
|
+
"- **entrypoint**: Entry point file (e.g., 'my_test.js')",
|
82
|
+
"- **runner**: Test runner (see available options below)",
|
83
|
+
"- **repo**: Git repository URL (e.g., 'https://github.com/user/repo.git')",
|
84
|
+
"- **branch**: Git branch name (e.g., 'main')",
|
85
|
+
"- **username**: Git username",
|
86
|
+
"- **password**: Git password",
|
87
|
+
"",
|
88
|
+
"## 🟡 Optional Parameters:",
|
89
|
+
"- **cpu_quota**: CPU quota in cores (default: 2)",
|
90
|
+
"- **memory_quota**: Memory quota in GB (default: 5)", "- **custom_cmd**: Optional custom command (e.g., '--login=\"qwerty\"')",
|
91
|
+
"- **parallel_runners**: Number of parallel runners (default: 1)",
|
92
|
+
"- **loops**: Number of loops (default: 1)",
|
93
|
+
"",
|
94
|
+
"## 🚀 Available Runners:",
|
95
|
+
]
|
96
|
+
|
97
|
+
for runner in available_runners:
|
98
|
+
message.append(f"- {runner}")
|
99
|
+
|
100
|
+
message.extend([
|
101
|
+
"",
|
102
|
+
"## 💡 Example:",
|
103
|
+
"```",
|
104
|
+
"name: 'My Performance Test'",
|
105
|
+
"test_type: 'performance'",
|
106
|
+
"env_type: 'staging'",
|
107
|
+
"entrypoint: 'lighthouse_test.js'",
|
108
|
+
"runner: 'Lighthouse-NPM_V12'",
|
109
|
+
"repo: 'https://github.com/mycompany/tests.git'", "branch: 'main'",
|
110
|
+
"username: 'myusername'",
|
111
|
+
"password: 'mypassword'",
|
112
|
+
"```",
|
113
|
+
"",
|
114
|
+
"**Note:** Aggregation method is automatically set to 'max'."
|
115
|
+
])
|
116
|
+
|
117
|
+
if missing_params:
|
118
|
+
message.insert(2, f"❌ **Missing parameters:** {', '.join(missing_params)}")
|
119
|
+
message.insert(3, "")
|
120
|
+
|
121
|
+
return {
|
122
|
+
"message": "\n".join(message)
|
123
|
+
}
|
124
|
+
|
125
|
+
def _create_ui_test(self, params):
|
126
|
+
"""Create UI test using the provided parameters."""
|
127
|
+
try:
|
128
|
+
# Construct the POST body
|
129
|
+
post_body = {
|
130
|
+
"common_params": {
|
131
|
+
"name": params["name"],
|
132
|
+
"test_type": params["test_type"],
|
133
|
+
"env_type": params["env_type"],
|
134
|
+
"entrypoint": params["entrypoint"],
|
135
|
+
"runner": params["runner"],
|
136
|
+
"source": {
|
137
|
+
"name": "git_https",
|
138
|
+
"repo": params["repo"],
|
139
|
+
"branch": params["branch"],
|
140
|
+
"username": params["username"],
|
141
|
+
"password": params["password"]
|
142
|
+
},
|
143
|
+
"env_vars": {
|
144
|
+
"cpu_quota": params.get("cpu_quota", 2),
|
145
|
+
"memory_quota": params.get("memory_quota", 5),
|
146
|
+
"cloud_settings": {}
|
147
|
+
},
|
148
|
+
"parallel_runners": params.get("parallel_runners", 1),
|
149
|
+
"cc_env_vars": {},
|
150
|
+
"location": "default",
|
151
|
+
"loops": params.get("loops", 1),
|
152
|
+
"aggregation": params.get("aggregation", "max")
|
153
|
+
},
|
154
|
+
"test_parameters": [],
|
155
|
+
"integrations": {},
|
156
|
+
"schedules": [],
|
157
|
+
"run_test": False
|
158
|
+
}
|
159
|
+
|
160
|
+
# Add custom_cmd if provided
|
161
|
+
if params.get("custom_cmd") and params["custom_cmd"].strip():
|
162
|
+
post_body["common_params"]["env_vars"]["custom_cmd"] = params["custom_cmd"]
|
163
|
+
|
164
|
+
# Make the API call to create the UI test using the API wrapper
|
165
|
+
response = self.api_wrapper.create_ui_test(post_body)
|
166
|
+
|
167
|
+
if response:
|
168
|
+
test_id = response.get("id") if isinstance(response, dict) else "Unknown"
|
169
|
+
|
170
|
+
return f"""# ✅ UI Test Created Successfully!
|
171
|
+
|
172
|
+
## Test Information:
|
173
|
+
- **Test ID:** `{test_id}`
|
174
|
+
- **Name:** `{params['name']}`
|
175
|
+
- **Type:** `{params['test_type']}`
|
176
|
+
- **Environment:** `{params['env_type']}`
|
177
|
+
- **Runner:** `{params['runner']}`
|
178
|
+
- **Repository:** `{params['repo']}`
|
179
|
+
- **Branch:** `{params['branch']}`
|
180
|
+
- **Entry Point:** `{params['entrypoint']}`
|
181
|
+
|
182
|
+
## Configuration:
|
183
|
+
- **CPU Quota:** {params.get('cpu_quota', 2)} cores
|
184
|
+
- **Memory Quota:** {params.get('memory_quota', 5)} GB
|
185
|
+
- **Parallel Runners:** {params.get('parallel_runners', 1)}
|
186
|
+
- **Loops:** {params.get('loops', 1)}
|
187
|
+
- **Aggregation:** {params.get('aggregation', 'max')}
|
188
|
+
{f"- **Custom Command:** `{params['custom_cmd']}`" if params.get('custom_cmd') else ""}
|
189
|
+
|
190
|
+
## 🎯 Next Steps:
|
191
|
+
- Your UI test has been created and is ready to run
|
192
|
+
- You can execute it using the UI test runner tools
|
193
|
+
- Configure schedules and integrations as needed"""
|
194
|
+
else:
|
195
|
+
return "❌ **Failed to create UI test. Please check your parameters and try again.**"
|
196
|
+
|
197
|
+
except Exception as e:
|
198
|
+
logger.error(f"Error creating UI test: {e}")
|
199
|
+
raise ToolException(f"Failed to create UI test: {str(e)}")
|