alita-sdk 0.3.161__py3-none-any.whl → 0.3.163__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,155 @@
1
+ import json
2
+ import pandas as pd
3
+ from urllib.parse import urlparse
4
+ from collections import OrderedDict
5
+ import re
6
+ from openpyxl.styles import Alignment, PatternFill, Border, Side
7
+ from openpyxl.formatting.rule import CellIsRule
8
+ from openpyxl.utils import get_column_letter
9
+
10
+ # Path to the Lighthouse JSON report
11
+ report_path = 'user-flow.report.json'
12
+
13
+ # Audits to extract from the report
14
+ performance_audits = [
15
+ 'first-contentful-paint',
16
+ 'speed-index',
17
+ 'interactive',
18
+ 'total-blocking-time',
19
+ 'largest-contentful-paint',
20
+ 'cumulative-layout-shift',
21
+ 'network-requests',
22
+ 'bootup-time',
23
+ 'interaction-to-next-paint',
24
+ 'server-response-time',
25
+ ]
26
+
27
+ # Create lists for audits to be appended with ", sec" and ", ms"
28
+ sec_audits = ['first-contentful-paint', 'interactive', 'largest-contentful-paint', 'mainthread-work-breakdown', 'network-requests', 'speed-index', 'javaScript-execution-time']
29
+ ms_audits = ['interaction-to-next-paint', 'total-blocking-time', 'time-to-first-byte']
30
+
31
+ # Rename mappings
32
+ rename_audits = {
33
+ 'bootup-time': 'javaScript-execution-time',
34
+ 'server-response-time': 'time-to-first-byte'
35
+ }
36
+
37
+ def extract_application_name(url):
38
+ parsed_url = urlparse(url)
39
+ hostname_parts = parsed_url.hostname.split('.') if parsed_url.hostname else []
40
+ application_name = hostname_parts[0] if len(hostname_parts) > 1 else '3rd-party'
41
+ return application_name
42
+
43
+ def apply_excel_conditional_formatting(ws, column_letter, thresholds, colors):
44
+ ws.conditional_formatting.add(f'{column_letter}2:{column_letter}{ws.max_row}',
45
+ CellIsRule(operator='lessThanOrEqual', formula=[str(thresholds[0])], stopIfTrue=True, fill=PatternFill(start_color=colors[0], end_color=colors[0], fill_type="solid")))
46
+ ws.conditional_formatting.add(f'{column_letter}2:{column_letter}{ws.max_row}',
47
+ CellIsRule(operator='between', formula=[str(thresholds[0]+0.0001), str(thresholds[1])], stopIfTrue=True, fill=PatternFill(start_color=colors[1], end_color=colors[1], fill_type="solid")))
48
+ ws.conditional_formatting.add(f'{column_letter}2:{column_letter}{ws.max_row}',
49
+ CellIsRule(operator='greaterThanOrEqual', formula=[str(thresholds[1]+0.0001)], stopIfTrue=True, fill=PatternFill(start_color=colors[2], end_color=colors[2], fill_type="solid")))
50
+
51
+ def apply_all_borders(ws):
52
+ thin_border = Border(left=Side(style='thin'),
53
+ right=Side(style='thin'),
54
+ top=Side(style='thin'),
55
+ bottom=Side(style='thin'))
56
+ for row in ws.iter_rows(min_row=2, min_col=2, max_col=ws.max_column, max_row=ws.max_row):
57
+ for cell in row:
58
+ cell.border = thin_border
59
+
60
+ def auto_adjust_column_width(ws):
61
+ for col in ws.columns:
62
+ max_length = 0
63
+ column = col[0].column_letter # Get the column name
64
+ for cell in col:
65
+ try:
66
+ if len(str(cell.value)) > max_length:
67
+ max_length = len(cell.value)
68
+ except:
69
+ pass
70
+ adjusted_width = (max_length + 2)
71
+ ws.column_dimensions[column].width = adjusted_width
72
+
73
+ try:
74
+ with open(report_path, 'r', encoding='utf-8') as file:
75
+ data = json.load(file)
76
+
77
+ data_rows = []
78
+ step_order = OrderedDict()
79
+
80
+ for index, step in enumerate(data.get('steps', [])):
81
+ step_name = step.get('name', 'unknown_step')
82
+ step_order[step_name] = index
83
+ lhr_data = step.get('lhr', {})
84
+ url = lhr_data.get('finalDisplayedUrl', '3rd-party')
85
+ application_name = extract_application_name(url)
86
+
87
+ performance_score = lhr_data.get('categories', {}).get('performance', {}).get('score')
88
+ performance_score = performance_score * 100 if performance_score is not None else None
89
+
90
+ for audit in performance_audits:
91
+ audit_result = lhr_data.get('audits', {}).get(audit, {})
92
+ numeric_value = audit_result.get('displayValue')
93
+
94
+ if numeric_value is not None:
95
+ numeric_value = re.sub(r'[a-zA-Z\s]', '', numeric_value)
96
+ if numeric_value:
97
+ numeric_value = numeric_value.replace(',', '')
98
+ numeric_value = float(numeric_value)
99
+ else:
100
+ numeric_value = None
101
+
102
+ audit_display_name = rename_audits.get(audit, audit)
103
+ if audit_display_name in sec_audits:
104
+ audit_display_name += ", sec"
105
+ elif audit_display_name in ms_audits:
106
+ audit_display_name += ", ms"
107
+
108
+ data_row = {"Step name": step_name,
109
+ "Performance Score": performance_score,
110
+ "Audit": audit_display_name,
111
+ "Numeric Value": numeric_value
112
+ }
113
+ data_rows.append(data_row)
114
+
115
+ df = pd.DataFrame(data_rows)
116
+ df = df.pivot_table(index="Step name", columns="Audit", values="Numeric Value", aggfunc='mean')
117
+ df = df.fillna('')
118
+ df = df.reindex(step_order.keys())
119
+
120
+ # Save DataFrame to Excel using openpyxl for styling
121
+ writer = pd.ExcelWriter("output.xlsx", engine='openpyxl')
122
+ df.to_excel(writer, index=True)
123
+ workbook = writer.book
124
+ worksheet = writer.sheets['Sheet1']
125
+
126
+ # Apply styles
127
+ header_fill = PatternFill(start_color="7FD5D8", end_color="7FD5D8", fill_type="solid")
128
+ for cell in worksheet[1]: # Apply styles to header row
129
+ cell.fill = header_fill
130
+
131
+ # Set alignment for 'Step name' column
132
+ for row in worksheet.iter_rows(min_row=2, min_col=1, max_col=1):
133
+ for cell in row:
134
+ cell.alignment = Alignment(horizontal='left')
135
+
136
+ # Apply conditional formatting
137
+ for col_index, col_name in enumerate(df.columns, 2): # Start from 2 to account for index column
138
+ column_letter = get_column_letter(col_index)
139
+ if col_name in ["cumulative-layout-shift"]:
140
+ apply_excel_conditional_formatting(worksheet, column_letter, [0.1, 0.25], ["AFF2C9", "FFE699", "F7A9A9"])
141
+ elif col_name in ["first-contentful-paint, sec"]:
142
+ apply_excel_conditional_formatting(worksheet, column_letter, [1.8, 3], ["AFF2C9", "FFE699", "F7A9A9"])
143
+ elif col_name in ["largest-contentful-paint, sec"]:
144
+ apply_excel_conditional_formatting(worksheet, column_letter, [2.5, 4], ["AFF2C9", "FFE699", "F7A9A9"])
145
+
146
+ # Apply all borders to the data cells
147
+ apply_all_borders(worksheet)
148
+
149
+ # Auto-adjust column widths
150
+ auto_adjust_column_width(worksheet)
151
+
152
+ writer.close() # Correct method to finalize and save the file
153
+
154
+ except Exception as e:
155
+ print(f"An error occurred: {e}")
@@ -0,0 +1,394 @@
1
+ import logging
2
+ import json
3
+ import traceback
4
+ from typing import Type
5
+ from langchain_core.tools import BaseTool, ToolException
6
+ from pydantic.fields import Field
7
+ from pydantic import create_model, BaseModel
8
+ from .api_wrapper import CarrierAPIWrapper
9
+
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+
14
+ class RunUITestTool(BaseTool):
15
+ api_wrapper: CarrierAPIWrapper = Field(..., description="Carrier API Wrapper instance")
16
+ name: str = "run_ui_test"
17
+ description: str = ("Run and execute UI tests from the Carrier platform. Use this tool when user wants to run, execute, or start a UI test. "
18
+ "Provide either test ID or test name, or leave empty to see available tests. "
19
+ "Optionally provide custom parameters like loops, cpu_quota, memory_quota, cloud_settings, or custom_cmd.")
20
+ args_schema: Type[BaseModel] = create_model(
21
+ "RunUITestInput",
22
+ test_id=(str, Field(default="", description="Test ID to execute")),
23
+ test_name=(str, Field(default="", description="Test name to execute")),
24
+ loops=(int, Field(default=None, description="Number of loops to run the test")),
25
+ cpu_quota=(str, Field(default=None, description="CPU quota for the test runner")),
26
+ memory_quota=(str, Field(default=None, description="Memory quota for the test runner")),
27
+ cloud_settings=(str, Field(default=None, description="Cloud settings name for the test runner")),
28
+ custom_cmd=(str, Field(default=None, description="Custom command to run with the test")),
29
+ )
30
+
31
+ def _run(self, test_id: str = "", test_name: str = "", loops: int = None,
32
+ cpu_quota: str = None, memory_quota: str = None,
33
+ cloud_settings: str = None, custom_cmd: str = None):
34
+ try:
35
+ # Check if neither test_id nor test_name is provided
36
+ if (not test_id or test_id.strip() == "") and (not test_name or test_name.strip() == ""):
37
+ return self._missing_input_response()
38
+
39
+ # Check if user wants to see the list of tests (can be in test_name field)
40
+ if test_name.lower() in ["show me the list of ui tests", "list ui tests", "show ui tests"]:
41
+ return self._show_ui_tests_list()
42
+ # Get UI tests list only when we need to search for and run a test
43
+ ui_tests = self.api_wrapper.get_ui_tests_list()
44
+
45
+ # Find the test by ID or name
46
+ test_data = None
47
+ ui_test_id = None
48
+
49
+ # Try to find by ID first (if test_id is provided and numeric)
50
+ if test_id and test_id.strip() and test_id.isdigit():
51
+ test_id_int = int(test_id)
52
+ for test in ui_tests:
53
+ if test.get("id") == test_id_int:
54
+ test_data = test
55
+ ui_test_id = test_id_int
56
+ break
57
+
58
+ # If not found by ID, try to find by name (if test_name is provided)
59
+ if not test_data and test_name and test_name.strip():
60
+ for test in ui_tests:
61
+ if test.get("name", "").lower() == test_name.lower():
62
+ test_data = test
63
+ ui_test_id = test.get("id")
64
+ break
65
+
66
+ # If exact match not found, try partial match
67
+ if not test_data:
68
+ for test in ui_tests:
69
+ if test_name.lower() in test.get("name", "").lower():
70
+ test_data = test
71
+ ui_test_id = test.get("id")
72
+ break
73
+ # If still not found and test_id was provided but not numeric, try as name
74
+ if not test_data and test_id and test_id.strip() and not test_id.isdigit():
75
+ for test in ui_tests:
76
+ if test.get("name", "").lower() == test_id.lower():
77
+ test_data = test
78
+ ui_test_id = test.get("id")
79
+ break
80
+
81
+ # If exact match not found, try partial match
82
+ if not test_data:
83
+ for test in ui_tests:
84
+ if test_id.lower() in test.get("name", "").lower():
85
+ test_data = test
86
+ ui_test_id = test.get("id")
87
+ break
88
+
89
+ if not test_data:
90
+ available_tests = []
91
+ for test in ui_tests:
92
+ available_tests.append(f"ID: {test.get('id')}, Name: {test.get('name')}")
93
+
94
+ search_criteria = []
95
+ if test_id:
96
+ search_criteria.append(f"ID: {test_id}")
97
+ if test_name:
98
+ search_criteria.append(f"Name: {test_name}")
99
+
100
+ return f"Test not found for {' or '.join(search_criteria)}. Available UI tests:\n" + "\n".join(available_tests)
101
+
102
+ # Check if custom parameters are provided
103
+ has_custom_params = any([loops is not None, cpu_quota is not None, memory_quota is not None,
104
+ cloud_settings is not None, custom_cmd is not None])
105
+
106
+ # If no custom parameters provided, show info message with default values and available options
107
+ if not has_custom_params:
108
+ return self._show_test_parameter_info(test_data, ui_test_id)
109
+
110
+ # Get detailed test configuration for the POST request
111
+ test_details = self._get_ui_test_details(ui_test_id)
112
+
113
+ if not test_details:
114
+ return f"Could not retrieve test details for test ID {ui_test_id}."
115
+
116
+ # Prepare POST request data with custom parameters
117
+ post_data = self._prepare_post_data_with_custom_params(test_details, loops, cpu_quota, memory_quota, cloud_settings, custom_cmd)
118
+
119
+ # Execute the UI test
120
+ result_id = self.api_wrapper.run_ui_test(str(ui_test_id), post_data)
121
+
122
+ return f"UI test started successfully. Result ID: {result_id}. " \
123
+ f"Link to report: {self.api_wrapper.url.rstrip('/')}/-/performance/ui/results?result_id={result_id}"
124
+
125
+ except Exception:
126
+ stacktrace = traceback.format_exc()
127
+ logger.error(f"Error running UI test: {stacktrace}")
128
+ raise ToolException(stacktrace)
129
+
130
+ def _show_ui_tests_list(self):
131
+ """Show the list of available UI tests."""
132
+ try:
133
+ ui_tests = self.api_wrapper.get_ui_tests_list()
134
+
135
+ if not ui_tests:
136
+ return "No UI tests found."
137
+
138
+ test_list = ["Available UI Tests:"]
139
+ for test in ui_tests:
140
+ test_list.append(f"- ID: {test.get('id')}, Name: {test.get('name')}, Runner: {test.get('runner')}")
141
+
142
+ return "\n".join(test_list)
143
+
144
+ except Exception:
145
+ stacktrace = traceback.format_exc()
146
+ logger.error(f"Error fetching UI tests list: {stacktrace}")
147
+ raise ToolException(stacktrace)
148
+
149
+ def _get_ui_test_details(self, test_id: int):
150
+ """Get detailed test configuration from the UI tests list."""
151
+ try:
152
+ ui_tests = self.api_wrapper.get_ui_tests_list()
153
+
154
+ for test in ui_tests:
155
+ if test.get("id") == test_id:
156
+ return test
157
+
158
+ return None
159
+
160
+ except Exception:
161
+ stacktrace = traceback.format_exc()
162
+ logger.error(f"Error getting UI test details: {stacktrace}")
163
+ return None
164
+
165
+ def _show_test_parameter_info(self, test_data, test_id):
166
+ """Show information about test parameters that can be changed."""
167
+ try:
168
+ # Get current default values from test data
169
+ env_vars = test_data.get("env_vars", {})
170
+
171
+ info_message = []
172
+ info_message.append(f"Test '{test_data.get('name')}' (ID: {test_id}) found!")
173
+ info_message.append("\nCurrent default parameters:")
174
+ info_message.append(f"- loops: 1 (default override)")
175
+ info_message.append(f"- cpu_quota: {env_vars.get('cpu_quota', 'Not set')}")
176
+ info_message.append(f"- memory_quota: {env_vars.get('memory_quota', 'Not set')}")
177
+ info_message.append(f"- cloud_settings: {env_vars.get('cloud_settings', 'Not set')}")
178
+ info_message.append(f"- custom_cmd: {env_vars.get('custom_cmd', 'Not set')}")
179
+ # Always try to get and display available cloud settings - this is critical information
180
+ info_message.append("\n" + "="*60)
181
+ info_message.append("🏃 AVAILABLE RUNNERS - CHOOSE ONE FOR cloud_settings:")
182
+ info_message.append("="*60)
183
+
184
+ try:
185
+ locations_data = self.api_wrapper.get_locations()
186
+ if not locations_data:
187
+ info_message.append("⚠️ Could not fetch locations data - API returned empty response")
188
+ else:
189
+ cloud_regions = locations_data.get("cloud_regions", [])
190
+ public_regions = locations_data.get("public_regions", [])
191
+ project_regions = locations_data.get("project_regions", [])
192
+ # Add public regions information (these are the most commonly used)
193
+ info_message.append("\n🌐 PUBLIC REGIONS (use these names):")
194
+ if public_regions:
195
+ for region in public_regions:
196
+ info_message.append(f" ✅ '{region}'")
197
+ else:
198
+ info_message.append(" ❌ No public regions available")
199
+
200
+ # Add project regions information
201
+ if project_regions:
202
+ info_message.append("\n🏢 PROJECT REGIONS (use these names):")
203
+ for region in project_regions:
204
+ info_message.append(f" ✅ '{region}'")
205
+
206
+ # Add cloud regions information
207
+ if cloud_regions:
208
+ info_message.append("\n☁️ CLOUD REGIONS (advanced - use full names):")
209
+ for region in cloud_regions:
210
+ region_name = region.get("name", "Unknown")
211
+ info_message.append(f" ✅ '{region_name}'")
212
+
213
+ except Exception as e:
214
+ logger.error(f"Error fetching locations: {e}")
215
+ info_message.append("❌ ERROR: Could not fetch available runners!")
216
+ info_message.append(f" Reason: {str(e)}")
217
+ info_message.append(" Please check your API connection and try again.")
218
+
219
+ info_message.append("="*60)
220
+ info_message.append("\n📋 HOW TO USE:")
221
+ info_message.append("To run this test with custom parameters, specify the values you want to change.")
222
+ info_message.append("\n💡 EXAMPLES:")
223
+ info_message.append(" • Use default runner: cloud_settings='default'")
224
+ info_message.append(" • Change loops: loops=5")
225
+ info_message.append(" • Change resources: cpu_quota='2', memory_quota='4Gi'")
226
+ info_message.append(" • Full example: loops=3, cloud_settings='default', cpu_quota='2'")
227
+ info_message.append("\n📝 RUNNER TYPES:")
228
+ info_message.append(" • Public regions: Use empty cloud_settings {}, location = runner name")
229
+ info_message.append(" • Project regions: Use empty cloud_settings {}, location = runner name")
230
+ info_message.append(" • Cloud regions: Use full cloud configuration object")
231
+
232
+ return "\n".join(info_message)
233
+
234
+ except Exception:
235
+ stacktrace = traceback.format_exc()
236
+ logger.error(f"Error showing test parameter info: {stacktrace}")
237
+ raise ToolException(stacktrace)
238
+
239
+ def _prepare_post_data_with_custom_params(self, test_data, loops=None, cpu_quota=None,
240
+ memory_quota=None, cloud_settings=None, custom_cmd=None):
241
+ """Prepare POST request data with custom parameters."""
242
+ try:
243
+ # Extract values from the test data
244
+ test_parameters = test_data.get("test_parameters", [])
245
+ env_vars = test_data.get("env_vars", {})
246
+ integrations = test_data.get("integrations", {})
247
+ location = test_data.get("location", "")
248
+ parallel_runners = test_data.get("parallel_runners", 1)
249
+ aggregation = test_data.get("aggregation", "max")
250
+
251
+ # Extract reporter email for integrations
252
+ reporter_email = integrations.get("reporters", {}).get("reporter_email", {})
253
+
254
+ # Extract S3 integration
255
+ s3_integration = integrations.get("system", {}).get("s3_integration", {})
256
+ # Find specific test parameters by name
257
+ def find_param_by_name(params, name):
258
+ for param in params:
259
+ if param.get("name") == name:
260
+ return param
261
+ return {}
262
+ # Handle cloud_settings parameter and location
263
+ final_cloud_settings = env_vars.get("cloud_settings")
264
+ final_location = location # Use original location as default
265
+
266
+ if cloud_settings:
267
+ try:
268
+ locations_data = self.api_wrapper.get_locations()
269
+ cloud_regions = locations_data.get("cloud_regions", [])
270
+ public_regions = locations_data.get("public_regions", [])
271
+ project_regions = locations_data.get("project_regions", [])
272
+
273
+ # Check if it's a public region first
274
+ if cloud_settings in public_regions:
275
+ # For public regions, use empty cloud_settings and set location to the runner name
276
+ final_cloud_settings = {}
277
+ final_location = cloud_settings
278
+ # Check if it's a project region
279
+ elif cloud_settings in project_regions:
280
+ # For project regions, use empty cloud_settings and set location to the runner name
281
+ final_cloud_settings = {}
282
+ final_location = cloud_settings
283
+ else:
284
+ # Try to find exact match in cloud regions
285
+ found_match = False
286
+ for region in cloud_regions:
287
+ if region.get("name", "").lower() == cloud_settings.lower():
288
+ # Get the complete cloud_settings object and add the missing fields
289
+ region_cloud_settings = region.get("cloud_settings", {})
290
+ # Add the additional fields that are expected in the POST request
291
+ region_cloud_settings.update({
292
+ "instance_type": "on-demand",
293
+ "ec2_instance_type": "t2.xlarge",
294
+ "cpu_cores_limit": int(cpu_quota) if cpu_quota else env_vars.get("cpu_quota", 1),
295
+ "memory_limit": int(memory_quota) if memory_quota else env_vars.get("memory_quota", 8),
296
+ "concurrency": 1
297
+ })
298
+ final_cloud_settings = region_cloud_settings
299
+ final_location = location # Keep original location for cloud regions
300
+ found_match = True
301
+ break
302
+
303
+ # If no exact match in cloud regions, try partial match
304
+ if not found_match:
305
+ for region in cloud_regions:
306
+ if cloud_settings.lower() in region.get("name", "").lower():
307
+ region_cloud_settings = region.get("cloud_settings", {})
308
+ region_cloud_settings.update({
309
+ "instance_type": "on-demand",
310
+ "ec2_instance_type": "t2.xlarge",
311
+ "cpu_cores_limit": int(cpu_quota) if cpu_quota else env_vars.get("cpu_quota", 1),
312
+ "memory_limit": int(memory_quota) if memory_quota else env_vars.get("memory_quota", 8),
313
+ "concurrency": 1
314
+ })
315
+ final_cloud_settings = region_cloud_settings
316
+ final_location = location # Keep original location for cloud regions
317
+ found_match = True
318
+ break
319
+
320
+ # If still no match found, treat as public region fallback
321
+ if not found_match:
322
+ final_cloud_settings = {}
323
+ final_location = cloud_settings
324
+
325
+ except Exception as e:
326
+ logger.error(f"Error processing cloud_settings: {e}")
327
+ # Use the provided value as-is if we can't process it
328
+ final_cloud_settings = cloud_settings
329
+ final_location = location
330
+
331
+ # Prepare the POST request body with custom parameters
332
+ post_data = {
333
+ "common_params": {
334
+ "name": find_param_by_name(test_parameters, "test_name"),
335
+ "test_type": find_param_by_name(test_parameters, "test_type"),
336
+ "env_type": find_param_by_name(test_parameters, "env_type"),
337
+ "env_vars": {
338
+ "cpu_quota": cpu_quota if cpu_quota is not None else env_vars.get("cpu_quota"),
339
+ "memory_quota": memory_quota if memory_quota is not None else env_vars.get("memory_quota"),
340
+ "cloud_settings": final_cloud_settings,
341
+ "ENV": "prod", # Override as per reference code
342
+ "custom_cmd": custom_cmd if custom_cmd is not None else env_vars.get("custom_cmd", "")
343
+ }, "parallel_runners": parallel_runners,
344
+ "location": final_location
345
+ },
346
+ "test_parameters": test_parameters,
347
+ "integrations": {
348
+ "reporters": {
349
+ "reporter_email": {
350
+ "id": reporter_email.get("id"),
351
+ "is_local": reporter_email.get("is_local"),
352
+ "project_id": reporter_email.get("project_id"),
353
+ "recipients": reporter_email.get("recipients")
354
+ }
355
+ },
356
+ "system": {
357
+ "s3_integration": {
358
+ "integration_id": s3_integration.get("integration_id"),
359
+ "is_local": s3_integration.get("is_local")
360
+ }
361
+ }
362
+ },
363
+ "loops": loops if loops is not None else 1, # Use custom loops or default to 1
364
+ "aggregation": aggregation
365
+ }
366
+
367
+ return post_data
368
+
369
+ except Exception:
370
+ stacktrace = traceback.format_exc()
371
+ logger.error(f"Error preparing POST data with custom parameters: {stacktrace}")
372
+ raise ToolException(stacktrace)
373
+
374
+ def _missing_input_response(self):
375
+ """Response when required input is missing."""
376
+ try:
377
+ available_tests = self._show_ui_tests_list()
378
+ return {
379
+ "message": "Please provide test ID or test name of your UI test.",
380
+ "parameters": {
381
+ "test_id": None,
382
+ "test_name": None,
383
+ },
384
+ "available_tests": available_tests
385
+ }
386
+ except Exception:
387
+ return {
388
+ "message": "Please provide test ID or test name of your UI test.",
389
+ "parameters": {
390
+ "test_id": None,
391
+ "test_name": None,
392
+ },
393
+ "available_tests": "Error fetching available tests."
394
+ }
@@ -3,6 +3,11 @@ from .tickets_tool import FetchTicketsTool, CreateTicketTool
3
3
  from .backend_reports_tool import GetReportsTool, GetReportByIDTool, CreateExcelReportTool
4
4
  from .backend_tests_tool import GetTestsTool, GetTestByIDTool, RunTestByIDTool
5
5
  from .ui_reports_tool import GetUIReportsTool, GetUIReportByIDTool, GetUITestsTool
6
+ from .run_ui_test_tool import RunUITestTool
7
+ from .update_ui_test_schedule_tool import UpdateUITestScheduleTool
8
+ from .create_ui_excel_report_tool import CreateUIExcelReportTool
9
+ from .create_ui_test_tool import CreateUITestTool
10
+ from .cancel_ui_test_tool import CancelUITestTool
6
11
 
7
12
  __all__ = [
8
13
  {"name": "get_ticket_list", "tool": FetchTicketsTool},
@@ -15,5 +20,10 @@ __all__ = [
15
20
  {"name": "run_test_by_id", "tool": RunTestByIDTool},
16
21
  {"name": "get_ui_reports", "tool": GetUIReportsTool},
17
22
  {"name": "get_ui_report_by_id", "tool": GetUIReportByIDTool},
18
- {"name": "get_ui_tests", "tool": GetUITestsTool}
23
+ {"name": "get_ui_tests", "tool": GetUITestsTool},
24
+ {"name": "run_ui_test", "tool": RunUITestTool},
25
+ {"name": "update_ui_test_schedule", "tool": UpdateUITestScheduleTool},
26
+ {"name": "create_ui_excel_report", "tool": CreateUIExcelReportTool},
27
+ {"name": "create_ui_test", "tool": CreateUITestTool},
28
+ {"name": "cancel_ui_test", "tool": CancelUITestTool}
19
29
  ]
@@ -13,7 +13,7 @@ logger = logging.getLogger("carrier_ui_reports_tool")
13
13
  class GetUIReportsTool(BaseTool):
14
14
  api_wrapper: CarrierAPIWrapper = Field(..., description="Carrier API Wrapper instance")
15
15
  name: str = "get_ui_reports"
16
- description: str = "Get list of UI test reports from the Carrier platform. Optionally filter by tag and time range."
16
+ description: str = "Get list of UI test reports from the Carrier platform. Optionally filter by time range."
17
17
  args_schema: Type[BaseModel] = create_model(
18
18
  "GetUIReportsInput",
19
19
  report_id=(str, Field(description="UI Report id to retrieve")),
@@ -199,13 +199,17 @@ class GetUITestsTool(BaseTool):
199
199
  # Extract relevant fields for cleaner output
200
200
  base_fields = {
201
201
  "id", "name", "browser", "loops", "aggregation", "parallel_runners",
202
- "location", "entrypoint", "runner", "test_uid", "job_type"
202
+ "location", "entrypoint", "runner", "job_type"
203
203
  }
204
204
 
205
205
  result_tests = []
206
206
  for test in filtered_tests:
207
207
  trimmed = {k: test[k] for k in base_fields if k in test}
208
208
 
209
+ # Add test_uid separately with a clear label to avoid confusion with id
210
+ if "test_uid" in test:
211
+ trimmed["test_uid"] = test["test_uid"]
212
+
209
213
  # Include test parameters if available
210
214
  if "test_parameters" in test:
211
215
  trimmed["test_parameters"] = [