@bbearai/mcp-server 0.6.0 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +984 -37
- package/package.json +2 -1
- package/src/index.ts +1081 -38
package/src/index.ts
CHANGED
|
@@ -145,7 +145,7 @@ const tools = [
|
|
|
145
145
|
},
|
|
146
146
|
status: {
|
|
147
147
|
type: 'string',
|
|
148
|
-
enum: ['new', 'triaging', 'confirmed', 'in_progress', 'fixed', '
|
|
148
|
+
enum: ['new', 'triaging', 'confirmed', 'in_progress', 'fixed', 'ready_to_test', 'verified', 'resolved', 'reviewed', 'closed', 'wont_fix', 'duplicate'],
|
|
149
149
|
description: 'The new status for the report',
|
|
150
150
|
},
|
|
151
151
|
resolution_notes: {
|
|
@@ -158,7 +158,47 @@ const tools = [
|
|
|
158
158
|
},
|
|
159
159
|
{
|
|
160
160
|
name: 'get_report_context',
|
|
161
|
-
description: 'Get the full debugging context for a report including console logs, network requests, and navigation history',
|
|
161
|
+
description: 'Get the full debugging context for a report including console logs, network requests, and navigation history. Use compact=true for app_context summary only (no console/network/navigation).',
|
|
162
|
+
inputSchema: {
|
|
163
|
+
type: 'object' as const,
|
|
164
|
+
properties: {
|
|
165
|
+
report_id: {
|
|
166
|
+
type: 'string',
|
|
167
|
+
description: 'The UUID of the report',
|
|
168
|
+
},
|
|
169
|
+
compact: {
|
|
170
|
+
type: 'boolean',
|
|
171
|
+
description: 'Compact mode: returns app_context only, skips console logs, network requests, and navigation history. (default: false)',
|
|
172
|
+
},
|
|
173
|
+
},
|
|
174
|
+
required: ['report_id'],
|
|
175
|
+
},
|
|
176
|
+
},
|
|
177
|
+
{
|
|
178
|
+
name: 'add_report_comment',
|
|
179
|
+
description: 'Add a comment/note to a bug report thread without changing its status. Use this for follow-up questions, investigation notes, or developer-tester communication.',
|
|
180
|
+
inputSchema: {
|
|
181
|
+
type: 'object' as const,
|
|
182
|
+
properties: {
|
|
183
|
+
report_id: {
|
|
184
|
+
type: 'string',
|
|
185
|
+
description: 'The UUID of the report to comment on',
|
|
186
|
+
},
|
|
187
|
+
message: {
|
|
188
|
+
type: 'string',
|
|
189
|
+
description: 'The comment/note content',
|
|
190
|
+
},
|
|
191
|
+
author: {
|
|
192
|
+
type: 'string',
|
|
193
|
+
description: 'Optional author name (defaults to "Claude Code")',
|
|
194
|
+
},
|
|
195
|
+
},
|
|
196
|
+
required: ['report_id', 'message'],
|
|
197
|
+
},
|
|
198
|
+
},
|
|
199
|
+
{
|
|
200
|
+
name: 'get_report_comments',
|
|
201
|
+
description: 'Get all comments/notes on a bug report in chronological order. Returns the full discussion thread.',
|
|
162
202
|
inputSchema: {
|
|
163
203
|
type: 'object' as const,
|
|
164
204
|
properties: {
|
|
@@ -170,6 +210,25 @@ const tools = [
|
|
|
170
210
|
required: ['report_id'],
|
|
171
211
|
},
|
|
172
212
|
},
|
|
213
|
+
{
|
|
214
|
+
name: 'resolve_conversations',
|
|
215
|
+
description: 'Mark one or more discussion threads as resolved (closed) or reopen them. Use this to close conversations after issues are addressed.',
|
|
216
|
+
inputSchema: {
|
|
217
|
+
type: 'object' as const,
|
|
218
|
+
properties: {
|
|
219
|
+
thread_ids: {
|
|
220
|
+
type: 'array',
|
|
221
|
+
items: { type: 'string' },
|
|
222
|
+
description: 'UUIDs of the discussion threads to resolve/reopen',
|
|
223
|
+
},
|
|
224
|
+
resolved: {
|
|
225
|
+
type: 'boolean',
|
|
226
|
+
description: 'true to mark as resolved (default), false to reopen',
|
|
227
|
+
},
|
|
228
|
+
},
|
|
229
|
+
required: ['thread_ids'],
|
|
230
|
+
},
|
|
231
|
+
},
|
|
173
232
|
{
|
|
174
233
|
name: 'get_project_info',
|
|
175
234
|
description: 'Get project information including QA tracks, test case counts, and common bug patterns',
|
|
@@ -325,7 +384,7 @@ const tools = [
|
|
|
325
384
|
},
|
|
326
385
|
{
|
|
327
386
|
name: 'list_test_cases',
|
|
328
|
-
description: 'List all test cases in the project. Returns test_key, title, target_route, and other metadata. Use this to see existing tests before updating them.',
|
|
387
|
+
description: 'List all test cases in the project. Returns test_key, title, target_route, and other metadata. Use this to see existing tests before updating them. Use compact=true for id, test_key, title, and priority only (saves tokens).',
|
|
329
388
|
inputSchema: {
|
|
330
389
|
type: 'object' as const,
|
|
331
390
|
properties: {
|
|
@@ -350,6 +409,10 @@ const tools = [
|
|
|
350
409
|
type: 'number',
|
|
351
410
|
description: 'Offset for pagination (default 0)',
|
|
352
411
|
},
|
|
412
|
+
compact: {
|
|
413
|
+
type: 'boolean',
|
|
414
|
+
description: 'Compact mode: returns id, test_key, title, and priority only. (default: false)',
|
|
415
|
+
},
|
|
353
416
|
},
|
|
354
417
|
},
|
|
355
418
|
},
|
|
@@ -458,7 +521,7 @@ const tools = [
|
|
|
458
521
|
},
|
|
459
522
|
notify_tester: {
|
|
460
523
|
type: 'boolean',
|
|
461
|
-
description: '
|
|
524
|
+
description: 'Notify the original tester about the fix with a message and verification task. Default: true. Set to false for silent resolve.',
|
|
462
525
|
},
|
|
463
526
|
},
|
|
464
527
|
required: ['report_id', 'commit_sha'],
|
|
@@ -700,17 +763,17 @@ const tools = [
|
|
|
700
763
|
},
|
|
701
764
|
{
|
|
702
765
|
name: 'get_coverage_matrix',
|
|
703
|
-
description: 'Get a comprehensive Route × Track coverage matrix showing test counts, pass rates, and execution data. Use this for a complete view of test coverage.',
|
|
766
|
+
description: 'Get a comprehensive Route × Track coverage matrix showing test counts, pass rates, and execution data. Use this for a complete view of test coverage. Execution data and bug counts are opt-in to save tokens.',
|
|
704
767
|
inputSchema: {
|
|
705
768
|
type: 'object' as const,
|
|
706
769
|
properties: {
|
|
707
770
|
include_execution_data: {
|
|
708
771
|
type: 'boolean',
|
|
709
|
-
description: 'Include pass/fail rates and last execution times (default: true
|
|
772
|
+
description: 'Include pass/fail rates and last execution times (default: false). Set true when you need execution history.',
|
|
710
773
|
},
|
|
711
774
|
include_bug_counts: {
|
|
712
775
|
type: 'boolean',
|
|
713
|
-
description: 'Include open/critical bug counts per route (default: true
|
|
776
|
+
description: 'Include open/critical bug counts per route (default: false). Set true when you need bug context.',
|
|
714
777
|
},
|
|
715
778
|
},
|
|
716
779
|
},
|
|
@@ -944,6 +1007,11 @@ const tools = [
|
|
|
944
1007
|
enum: ['ios', 'android', 'web'],
|
|
945
1008
|
description: 'Filter by platform support',
|
|
946
1009
|
},
|
|
1010
|
+
role: {
|
|
1011
|
+
type: 'string',
|
|
1012
|
+
enum: ['tester', 'feedback'],
|
|
1013
|
+
description: 'Filter by role: "tester" for QA testers, "feedback" for feedback-only users (default: all)',
|
|
1014
|
+
},
|
|
947
1015
|
},
|
|
948
1016
|
},
|
|
949
1017
|
},
|
|
@@ -1032,6 +1100,21 @@ const tools = [
|
|
|
1032
1100
|
required: ['tester_id', 'test_case_ids'],
|
|
1033
1101
|
},
|
|
1034
1102
|
},
|
|
1103
|
+
{
|
|
1104
|
+
name: 'unassign_tests',
|
|
1105
|
+
description: 'Remove one or more test assignments by assignment ID. Preserves the test case and its history — only the assignment link is deleted. Use list_test_assignments first to find assignment IDs. Max 50 per call.',
|
|
1106
|
+
inputSchema: {
|
|
1107
|
+
type: 'object' as const,
|
|
1108
|
+
properties: {
|
|
1109
|
+
assignment_ids: {
|
|
1110
|
+
type: 'array',
|
|
1111
|
+
items: { type: 'string' },
|
|
1112
|
+
description: 'Array of test assignment UUIDs to remove (required, max 50)',
|
|
1113
|
+
},
|
|
1114
|
+
},
|
|
1115
|
+
required: ['assignment_ids'],
|
|
1116
|
+
},
|
|
1117
|
+
},
|
|
1035
1118
|
{
|
|
1036
1119
|
name: 'get_tester_workload',
|
|
1037
1120
|
description: 'View a specific tester\'s current workload — assignment counts by status and recent assignments.',
|
|
@@ -1074,6 +1157,11 @@ const tools = [
|
|
|
1074
1157
|
type: 'string',
|
|
1075
1158
|
description: 'Optional notes about the tester',
|
|
1076
1159
|
},
|
|
1160
|
+
role: {
|
|
1161
|
+
type: 'string',
|
|
1162
|
+
enum: ['tester', 'feedback'],
|
|
1163
|
+
description: 'Role: "tester" for QA testers (default), "feedback" for feedback-only users',
|
|
1164
|
+
},
|
|
1077
1165
|
},
|
|
1078
1166
|
required: ['name', 'email'],
|
|
1079
1167
|
},
|
|
@@ -1176,7 +1264,7 @@ const tools = [
|
|
|
1176
1264
|
},
|
|
1177
1265
|
{
|
|
1178
1266
|
name: 'export_test_results',
|
|
1179
|
-
description: 'Export test results for a specific test run as structured JSON — includes every assignment, tester, result, and duration.',
|
|
1267
|
+
description: 'Export test results for a specific test run as structured JSON — includes every assignment, tester, result, and duration. Use compact=true for summary only (no assignments array). Use limit to cap assignments returned.',
|
|
1180
1268
|
inputSchema: {
|
|
1181
1269
|
type: 'object' as const,
|
|
1182
1270
|
properties: {
|
|
@@ -1184,6 +1272,14 @@ const tools = [
|
|
|
1184
1272
|
type: 'string',
|
|
1185
1273
|
description: 'UUID of the test run to export (required)',
|
|
1186
1274
|
},
|
|
1275
|
+
compact: {
|
|
1276
|
+
type: 'boolean',
|
|
1277
|
+
description: 'Compact mode: returns test run info + summary only, no assignments array. (default: false)',
|
|
1278
|
+
},
|
|
1279
|
+
limit: {
|
|
1280
|
+
type: 'number',
|
|
1281
|
+
description: 'Max assignments to return in full mode (default: 100, max: 500). Ignored when compact=true.',
|
|
1282
|
+
},
|
|
1187
1283
|
},
|
|
1188
1284
|
required: ['test_run_id'],
|
|
1189
1285
|
},
|
|
@@ -1232,8 +1328,677 @@ const tools = [
|
|
|
1232
1328
|
properties: {},
|
|
1233
1329
|
},
|
|
1234
1330
|
},
|
|
1331
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
1332
|
+
{
|
|
1333
|
+
name: 'get_test_impact',
|
|
1334
|
+
description: 'Given changed files, identify which test cases are affected by mapping file paths to test case target routes.',
|
|
1335
|
+
inputSchema: {
|
|
1336
|
+
type: 'object' as const,
|
|
1337
|
+
properties: {
|
|
1338
|
+
changed_files: {
|
|
1339
|
+
type: 'array',
|
|
1340
|
+
items: { type: 'string' },
|
|
1341
|
+
description: 'List of changed file paths (relative to project root)',
|
|
1342
|
+
},
|
|
1343
|
+
},
|
|
1344
|
+
required: ['changed_files'],
|
|
1345
|
+
},
|
|
1346
|
+
},
|
|
1347
|
+
{
|
|
1348
|
+
name: 'get_flaky_tests',
|
|
1349
|
+
description: 'Analyze test run history to identify tests with intermittent failure rates above a threshold.',
|
|
1350
|
+
inputSchema: {
|
|
1351
|
+
type: 'object' as const,
|
|
1352
|
+
properties: {
|
|
1353
|
+
threshold: {
|
|
1354
|
+
type: 'number',
|
|
1355
|
+
description: 'Minimum flakiness rate to report (0-100, default: 5)',
|
|
1356
|
+
},
|
|
1357
|
+
limit: {
|
|
1358
|
+
type: 'number',
|
|
1359
|
+
description: 'Maximum results to return (default: 20)',
|
|
1360
|
+
},
|
|
1361
|
+
},
|
|
1362
|
+
},
|
|
1363
|
+
},
|
|
1364
|
+
{
|
|
1365
|
+
name: 'assess_test_quality',
|
|
1366
|
+
description: 'Analyze test case steps for weak patterns: vague assertions, missing edge cases, no negative testing, generic descriptions.',
|
|
1367
|
+
inputSchema: {
|
|
1368
|
+
type: 'object' as const,
|
|
1369
|
+
properties: {
|
|
1370
|
+
test_case_ids: {
|
|
1371
|
+
type: 'array',
|
|
1372
|
+
items: { type: 'string' },
|
|
1373
|
+
description: 'Specific test case IDs to assess. If omitted, assesses recent test cases.',
|
|
1374
|
+
},
|
|
1375
|
+
limit: {
|
|
1376
|
+
type: 'number',
|
|
1377
|
+
description: 'Maximum test cases to assess (default: 20)',
|
|
1378
|
+
},
|
|
1379
|
+
},
|
|
1380
|
+
},
|
|
1381
|
+
},
|
|
1382
|
+
{
|
|
1383
|
+
name: 'get_test_execution_summary',
|
|
1384
|
+
description: 'Aggregate test execution metrics: pass rate, completion rate, most-failed tests, fastest/slowest tests.',
|
|
1385
|
+
inputSchema: {
|
|
1386
|
+
type: 'object' as const,
|
|
1387
|
+
properties: {
|
|
1388
|
+
days: {
|
|
1389
|
+
type: 'number',
|
|
1390
|
+
description: 'Number of days to analyze (default: 30)',
|
|
1391
|
+
},
|
|
1392
|
+
},
|
|
1393
|
+
},
|
|
1394
|
+
},
|
|
1395
|
+
{
|
|
1396
|
+
name: 'check_test_freshness',
|
|
1397
|
+
description: 'Identify test cases that have not been updated since their target code was modified.',
|
|
1398
|
+
inputSchema: {
|
|
1399
|
+
type: 'object' as const,
|
|
1400
|
+
properties: {
|
|
1401
|
+
limit: {
|
|
1402
|
+
type: 'number',
|
|
1403
|
+
description: 'Maximum results to return (default: 20)',
|
|
1404
|
+
},
|
|
1405
|
+
},
|
|
1406
|
+
},
|
|
1407
|
+
},
|
|
1408
|
+
{
|
|
1409
|
+
name: 'get_untested_changes',
|
|
1410
|
+
description: 'Given recent commits or changed files, find code changes with no corresponding test coverage in BugBear.',
|
|
1411
|
+
inputSchema: {
|
|
1412
|
+
type: 'object' as const,
|
|
1413
|
+
properties: {
|
|
1414
|
+
changed_files: {
|
|
1415
|
+
type: 'array',
|
|
1416
|
+
items: { type: 'string' },
|
|
1417
|
+
description: 'List of changed file paths. If omitted, uses git diff against main.',
|
|
1418
|
+
},
|
|
1419
|
+
},
|
|
1420
|
+
},
|
|
1421
|
+
},
|
|
1422
|
+
// === AUTO-MONITORING TOOLS ===
|
|
1423
|
+
{
|
|
1424
|
+
name: 'get_auto_detected_issues',
|
|
1425
|
+
description: 'Get auto-detected monitoring issues grouped by error fingerprint. Shows recurring crashes, API failures, and rage clicks with frequency and user impact.',
|
|
1426
|
+
inputSchema: {
|
|
1427
|
+
type: 'object' as const,
|
|
1428
|
+
properties: {
|
|
1429
|
+
source: {
|
|
1430
|
+
type: 'string',
|
|
1431
|
+
enum: ['auto_crash', 'auto_api', 'auto_rage_click'],
|
|
1432
|
+
description: 'Filter by source type',
|
|
1433
|
+
},
|
|
1434
|
+
min_occurrences: {
|
|
1435
|
+
type: 'number',
|
|
1436
|
+
description: 'Min occurrence count (default: 1)',
|
|
1437
|
+
},
|
|
1438
|
+
since: {
|
|
1439
|
+
type: 'string',
|
|
1440
|
+
description: 'ISO date — only issues after this date (default: 7 days ago)',
|
|
1441
|
+
},
|
|
1442
|
+
limit: {
|
|
1443
|
+
type: 'number',
|
|
1444
|
+
description: 'Max results (default: 20)',
|
|
1445
|
+
},
|
|
1446
|
+
compact: {
|
|
1447
|
+
type: 'boolean',
|
|
1448
|
+
description: 'Compact mode: fingerprint, source, count only',
|
|
1449
|
+
},
|
|
1450
|
+
},
|
|
1451
|
+
},
|
|
1452
|
+
},
|
|
1453
|
+
{
|
|
1454
|
+
name: 'generate_tests_from_errors',
|
|
1455
|
+
description: 'Suggest QA test cases from auto-detected error patterns. Returns structured suggestions — does NOT auto-create test cases.',
|
|
1456
|
+
inputSchema: {
|
|
1457
|
+
type: 'object' as const,
|
|
1458
|
+
properties: {
|
|
1459
|
+
report_ids: {
|
|
1460
|
+
type: 'array',
|
|
1461
|
+
items: { type: 'string' },
|
|
1462
|
+
description: 'Specific report IDs. If omitted, uses top uncovered errors.',
|
|
1463
|
+
},
|
|
1464
|
+
limit: {
|
|
1465
|
+
type: 'number',
|
|
1466
|
+
description: 'Max suggestions (default: 5)',
|
|
1467
|
+
},
|
|
1468
|
+
},
|
|
1469
|
+
},
|
|
1470
|
+
},
|
|
1235
1471
|
];
|
|
1236
1472
|
|
|
1473
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
1474
|
+
|
|
1475
|
+
async function getTestImpact(args: { changed_files: string[] }) {
|
|
1476
|
+
const projectId = requireProject();
|
|
1477
|
+
const changedFiles = args.changed_files || [];
|
|
1478
|
+
|
|
1479
|
+
if (changedFiles.length === 0) {
|
|
1480
|
+
return { affectedTests: [], message: 'No changed files provided.' };
|
|
1481
|
+
}
|
|
1482
|
+
|
|
1483
|
+
// Get all test cases for the project with their target routes
|
|
1484
|
+
const { data: testCases, error } = await supabase
|
|
1485
|
+
.from('test_cases')
|
|
1486
|
+
.select('id, title, target_route, qa_track, priority')
|
|
1487
|
+
.eq('project_id', projectId);
|
|
1488
|
+
|
|
1489
|
+
if (error) return { error: error.message };
|
|
1490
|
+
if (!testCases || testCases.length === 0) {
|
|
1491
|
+
return { affectedTests: [], message: 'No test cases found for this project.' };
|
|
1492
|
+
}
|
|
1493
|
+
|
|
1494
|
+
// Map changed files to affected test cases
|
|
1495
|
+
const affected: Array<{ testId: string; title: string; targetRoute: string; matchedFiles: string[]; qaTrack: string }> = [];
|
|
1496
|
+
|
|
1497
|
+
for (const tc of testCases) {
|
|
1498
|
+
const route = tc.target_route || '';
|
|
1499
|
+
const matchedFiles = changedFiles.filter(f => {
|
|
1500
|
+
// Match file path to route (e.g., src/app/api/tasks/route.ts -> /api/tasks)
|
|
1501
|
+
const normalized = f.replace(/\\/g, '/');
|
|
1502
|
+
const routeParts = route.split('/').filter(Boolean);
|
|
1503
|
+
return routeParts.some((part: string) => normalized.includes(part)) || normalized.includes(route.replace(/\//g, '/'));
|
|
1504
|
+
});
|
|
1505
|
+
|
|
1506
|
+
if (matchedFiles.length > 0) {
|
|
1507
|
+
affected.push({
|
|
1508
|
+
testId: tc.id,
|
|
1509
|
+
title: tc.title,
|
|
1510
|
+
targetRoute: route,
|
|
1511
|
+
matchedFiles,
|
|
1512
|
+
qaTrack: tc.qa_track,
|
|
1513
|
+
});
|
|
1514
|
+
}
|
|
1515
|
+
}
|
|
1516
|
+
|
|
1517
|
+
return {
|
|
1518
|
+
affectedTests: affected,
|
|
1519
|
+
totalTestCases: testCases.length,
|
|
1520
|
+
affectedCount: affected.length,
|
|
1521
|
+
changedFileCount: changedFiles.length,
|
|
1522
|
+
};
|
|
1523
|
+
}
|
|
1524
|
+
|
|
1525
|
+
async function getFlakyTests(args: { threshold?: number; limit?: number }) {
|
|
1526
|
+
const projectId = requireProject();
|
|
1527
|
+
const threshold = args.threshold || 5;
|
|
1528
|
+
const limit = args.limit || 20;
|
|
1529
|
+
|
|
1530
|
+
// Get test results grouped by test case
|
|
1531
|
+
const { data: results, error } = await supabase
|
|
1532
|
+
.from('test_results')
|
|
1533
|
+
.select('test_case_id, status, test_cases!inner(title, target_route, qa_track)')
|
|
1534
|
+
.eq('test_cases.project_id', projectId)
|
|
1535
|
+
.order('created_at', { ascending: false })
|
|
1536
|
+
.limit(5000);
|
|
1537
|
+
|
|
1538
|
+
if (error) return { error: error.message };
|
|
1539
|
+
if (!results || results.length === 0) {
|
|
1540
|
+
return { flakyTests: [], message: 'No test results found.' };
|
|
1541
|
+
}
|
|
1542
|
+
|
|
1543
|
+
// Group by test case and calculate flakiness
|
|
1544
|
+
const testStats: Record<string, { passes: number; fails: number; total: number; title: string; route: string; track: string }> = {};
|
|
1545
|
+
|
|
1546
|
+
for (const r of results) {
|
|
1547
|
+
const id = r.test_case_id;
|
|
1548
|
+
if (!testStats[id]) {
|
|
1549
|
+
const tc = r.test_cases as any;
|
|
1550
|
+
testStats[id] = { passes: 0, fails: 0, total: 0, title: tc?.title || '', route: tc?.target_route || '', track: tc?.qa_track || '' };
|
|
1551
|
+
}
|
|
1552
|
+
testStats[id].total++;
|
|
1553
|
+
if (r.status === 'pass') testStats[id].passes++;
|
|
1554
|
+
else if (r.status === 'fail') testStats[id].fails++;
|
|
1555
|
+
}
|
|
1556
|
+
|
|
1557
|
+
// Find flaky tests (have both passes and fails, with fail rate above threshold)
|
|
1558
|
+
const flaky = Object.entries(testStats)
|
|
1559
|
+
.filter(([, stats]) => {
|
|
1560
|
+
if (stats.total < 3) return false; // Need enough data
|
|
1561
|
+
const failRate = (stats.fails / stats.total) * 100;
|
|
1562
|
+
const passRate = (stats.passes / stats.total) * 100;
|
|
1563
|
+
return failRate >= threshold && passRate > 0; // Has both passes and fails
|
|
1564
|
+
})
|
|
1565
|
+
.map(([id, stats]) => ({
|
|
1566
|
+
testCaseId: id,
|
|
1567
|
+
title: stats.title,
|
|
1568
|
+
targetRoute: stats.route,
|
|
1569
|
+
qaTrack: stats.track,
|
|
1570
|
+
totalRuns: stats.total,
|
|
1571
|
+
failRate: Math.round((stats.fails / stats.total) * 100),
|
|
1572
|
+
passRate: Math.round((stats.passes / stats.total) * 100),
|
|
1573
|
+
}))
|
|
1574
|
+
.sort((a, b) => b.failRate - a.failRate)
|
|
1575
|
+
.slice(0, limit);
|
|
1576
|
+
|
|
1577
|
+
return {
|
|
1578
|
+
flakyTests: flaky,
|
|
1579
|
+
totalAnalyzed: Object.keys(testStats).length,
|
|
1580
|
+
flakyCount: flaky.length,
|
|
1581
|
+
threshold,
|
|
1582
|
+
};
|
|
1583
|
+
}
|
|
1584
|
+
|
|
1585
|
+
async function assessTestQuality(args: { test_case_ids?: string[]; limit?: number }) {
|
|
1586
|
+
const projectId = requireProject();
|
|
1587
|
+
const limit = args.limit || 20;
|
|
1588
|
+
|
|
1589
|
+
let query = supabase
|
|
1590
|
+
.from('test_cases')
|
|
1591
|
+
.select('id, title, steps, target_route, qa_track, priority')
|
|
1592
|
+
.eq('project_id', projectId)
|
|
1593
|
+
.limit(limit);
|
|
1594
|
+
|
|
1595
|
+
if (args.test_case_ids && args.test_case_ids.length > 0) {
|
|
1596
|
+
query = query.in('id', args.test_case_ids);
|
|
1597
|
+
}
|
|
1598
|
+
|
|
1599
|
+
const { data: testCases, error } = await query;
|
|
1600
|
+
if (error) return { error: error.message };
|
|
1601
|
+
if (!testCases || testCases.length === 0) {
|
|
1602
|
+
return { assessments: [], message: 'No test cases found.' };
|
|
1603
|
+
}
|
|
1604
|
+
|
|
1605
|
+
const assessments = testCases.map(tc => {
|
|
1606
|
+
const issues: string[] = [];
|
|
1607
|
+
const steps = tc.steps || [];
|
|
1608
|
+
|
|
1609
|
+
// Check for weak patterns
|
|
1610
|
+
if (steps.length < 2) {
|
|
1611
|
+
issues.push('Too few steps — test may not cover the full flow');
|
|
1612
|
+
}
|
|
1613
|
+
|
|
1614
|
+
const allStepsText = steps.map((s: any) => (typeof s === 'string' ? s : s.action || s.description || '')).join(' ');
|
|
1615
|
+
|
|
1616
|
+
// Vague assertions
|
|
1617
|
+
if (/should work|looks good|is correct|verify it works/i.test(allStepsText)) {
|
|
1618
|
+
issues.push('Vague assertions detected — use specific expected outcomes');
|
|
1619
|
+
}
|
|
1620
|
+
|
|
1621
|
+
// Missing edge cases
|
|
1622
|
+
if (!/error|invalid|empty|missing|unauthorized|forbidden|404|500/i.test(allStepsText)) {
|
|
1623
|
+
issues.push('No negative/error test cases — add edge case testing');
|
|
1624
|
+
}
|
|
1625
|
+
|
|
1626
|
+
// Generic descriptions
|
|
1627
|
+
if (/test the|check the|verify the/i.test(tc.title) && tc.title.length < 30) {
|
|
1628
|
+
issues.push('Generic test title — be more specific about what is being tested');
|
|
1629
|
+
}
|
|
1630
|
+
|
|
1631
|
+
// No specific UI elements referenced
|
|
1632
|
+
if (!/button|input|form|modal|dropdown|select|click|type|enter|submit/i.test(allStepsText)) {
|
|
1633
|
+
issues.push('No specific UI elements referenced — steps may be too abstract');
|
|
1634
|
+
}
|
|
1635
|
+
|
|
1636
|
+
const quality = issues.length === 0 ? 'good' : issues.length <= 2 ? 'needs-improvement' : 'poor';
|
|
1637
|
+
|
|
1638
|
+
return {
|
|
1639
|
+
testCaseId: tc.id,
|
|
1640
|
+
title: tc.title,
|
|
1641
|
+
targetRoute: tc.target_route,
|
|
1642
|
+
stepCount: steps.length,
|
|
1643
|
+
quality,
|
|
1644
|
+
issues,
|
|
1645
|
+
};
|
|
1646
|
+
});
|
|
1647
|
+
|
|
1648
|
+
const qualityCounts = {
|
|
1649
|
+
good: assessments.filter(a => a.quality === 'good').length,
|
|
1650
|
+
needsImprovement: assessments.filter(a => a.quality === 'needs-improvement').length,
|
|
1651
|
+
poor: assessments.filter(a => a.quality === 'poor').length,
|
|
1652
|
+
};
|
|
1653
|
+
|
|
1654
|
+
return {
|
|
1655
|
+
assessments,
|
|
1656
|
+
summary: qualityCounts,
|
|
1657
|
+
totalAssessed: assessments.length,
|
|
1658
|
+
};
|
|
1659
|
+
}
|
|
1660
|
+
|
|
1661
|
+
async function getTestExecutionSummary(args: { days?: number }) {
|
|
1662
|
+
const projectId = requireProject();
|
|
1663
|
+
const days = args.days || 30;
|
|
1664
|
+
const since = new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString();
|
|
1665
|
+
|
|
1666
|
+
// Get test results
|
|
1667
|
+
const { data: results, error } = await supabase
|
|
1668
|
+
.from('test_results')
|
|
1669
|
+
.select('test_case_id, status, duration_ms, created_at, test_cases!inner(title, target_route)')
|
|
1670
|
+
.eq('test_cases.project_id', projectId)
|
|
1671
|
+
.gte('created_at', since)
|
|
1672
|
+
.order('created_at', { ascending: false });
|
|
1673
|
+
|
|
1674
|
+
if (error) return { error: error.message };
|
|
1675
|
+
if (!results || results.length === 0) {
|
|
1676
|
+
return { message: `No test results found in the last ${days} days.` };
|
|
1677
|
+
}
|
|
1678
|
+
|
|
1679
|
+
const totalRuns = results.length;
|
|
1680
|
+
const passed = results.filter(r => r.status === 'pass').length;
|
|
1681
|
+
const failed = results.filter(r => r.status === 'fail').length;
|
|
1682
|
+
const blocked = results.filter(r => r.status === 'blocked').length;
|
|
1683
|
+
|
|
1684
|
+
// Most failed tests
|
|
1685
|
+
const failCounts: Record<string, { count: number; title: string; route: string }> = {};
|
|
1686
|
+
for (const r of results.filter(r => r.status === 'fail')) {
|
|
1687
|
+
const id = r.test_case_id;
|
|
1688
|
+
const tc = r.test_cases as any;
|
|
1689
|
+
if (!failCounts[id]) {
|
|
1690
|
+
failCounts[id] = { count: 0, title: tc?.title || '', route: tc?.target_route || '' };
|
|
1691
|
+
}
|
|
1692
|
+
failCounts[id].count++;
|
|
1693
|
+
}
|
|
1694
|
+
const mostFailed = Object.entries(failCounts)
|
|
1695
|
+
.sort((a, b) => b[1].count - a[1].count)
|
|
1696
|
+
.slice(0, 5)
|
|
1697
|
+
.map(([id, data]) => ({ testCaseId: id, ...data }));
|
|
1698
|
+
|
|
1699
|
+
// Duration stats
|
|
1700
|
+
const durations = results.filter(r => r.duration_ms).map(r => r.duration_ms as number);
|
|
1701
|
+
const avgDuration = durations.length > 0 ? Math.round(durations.reduce((a, b) => a + b, 0) / durations.length) : 0;
|
|
1702
|
+
const maxDuration = durations.length > 0 ? Math.max(...durations) : 0;
|
|
1703
|
+
|
|
1704
|
+
return {
|
|
1705
|
+
period: `${days} days`,
|
|
1706
|
+
totalRuns,
|
|
1707
|
+
passRate: Math.round((passed / totalRuns) * 100),
|
|
1708
|
+
failRate: Math.round((failed / totalRuns) * 100),
|
|
1709
|
+
blockedCount: blocked,
|
|
1710
|
+
averageDurationMs: avgDuration,
|
|
1711
|
+
maxDurationMs: maxDuration,
|
|
1712
|
+
mostFailed,
|
|
1713
|
+
uniqueTestsCovered: new Set(results.map(r => r.test_case_id)).size,
|
|
1714
|
+
};
|
|
1715
|
+
}
|
|
1716
|
+
|
|
1717
|
+
async function checkTestFreshness(args: { limit?: number }) {
|
|
1718
|
+
const projectId = requireProject();
|
|
1719
|
+
const limit = args.limit || 20;
|
|
1720
|
+
|
|
1721
|
+
// Get test cases with their last update and last result
|
|
1722
|
+
const { data: testCases, error } = await supabase
|
|
1723
|
+
.from('test_cases')
|
|
1724
|
+
.select('id, title, target_route, updated_at, created_at')
|
|
1725
|
+
.eq('project_id', projectId)
|
|
1726
|
+
.order('updated_at', { ascending: true })
|
|
1727
|
+
.limit(limit);
|
|
1728
|
+
|
|
1729
|
+
if (error) return { error: error.message };
|
|
1730
|
+
if (!testCases || testCases.length === 0) {
|
|
1731
|
+
return { staleTests: [], message: 'No test cases found.' };
|
|
1732
|
+
}
|
|
1733
|
+
|
|
1734
|
+
const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString();
|
|
1735
|
+
|
|
1736
|
+
const stale = testCases
|
|
1737
|
+
.filter(tc => tc.updated_at < thirtyDaysAgo)
|
|
1738
|
+
.map(tc => ({
|
|
1739
|
+
testCaseId: tc.id,
|
|
1740
|
+
title: tc.title,
|
|
1741
|
+
targetRoute: tc.target_route,
|
|
1742
|
+
lastUpdated: tc.updated_at,
|
|
1743
|
+
daysSinceUpdate: Math.round((Date.now() - new Date(tc.updated_at).getTime()) / (24 * 60 * 60 * 1000)),
|
|
1744
|
+
}));
|
|
1745
|
+
|
|
1746
|
+
return {
|
|
1747
|
+
staleTests: stale,
|
|
1748
|
+
totalTestCases: testCases.length,
|
|
1749
|
+
staleCount: stale.length,
|
|
1750
|
+
stalenessThreshold: '30 days',
|
|
1751
|
+
};
|
|
1752
|
+
}
|
|
1753
|
+
|
|
1754
|
+
async function getUntestedChanges(args: { changed_files?: string[] }) {
|
|
1755
|
+
const projectId = requireProject();
|
|
1756
|
+
|
|
1757
|
+
// Get all test cases to understand what's covered
|
|
1758
|
+
const { data: testCases, error } = await supabase
|
|
1759
|
+
.from('test_cases')
|
|
1760
|
+
.select('id, title, target_route')
|
|
1761
|
+
.eq('project_id', projectId);
|
|
1762
|
+
|
|
1763
|
+
if (error) return { error: error.message };
|
|
1764
|
+
|
|
1765
|
+
const coveredRoutes = new Set((testCases || []).map(tc => tc.target_route).filter(Boolean));
|
|
1766
|
+
|
|
1767
|
+
// If changed_files provided, check coverage
|
|
1768
|
+
const changedFiles = args.changed_files || [];
|
|
1769
|
+
|
|
1770
|
+
if (changedFiles.length === 0) {
|
|
1771
|
+
return {
|
|
1772
|
+
message: 'No changed files provided. Pass changed_files to check coverage.',
|
|
1773
|
+
totalCoveredRoutes: coveredRoutes.size,
|
|
1774
|
+
};
|
|
1775
|
+
}
|
|
1776
|
+
|
|
1777
|
+
// Map changed files to routes and check coverage
|
|
1778
|
+
const untested: Array<{ file: string; inferredRoute: string; reason: string }> = [];
|
|
1779
|
+
|
|
1780
|
+
for (const file of changedFiles) {
|
|
1781
|
+
const normalized = file.replace(/\\/g, '/');
|
|
1782
|
+
|
|
1783
|
+
// Extract route-like path from file
|
|
1784
|
+
let inferredRoute = '';
|
|
1785
|
+
|
|
1786
|
+
// Next.js app router: app/api/tasks/route.ts -> /api/tasks
|
|
1787
|
+
const appRouterMatch = normalized.match(/app\/(api\/[^/]+(?:\/[^/]+)*?)\/route\.\w+$/);
|
|
1788
|
+
if (appRouterMatch) {
|
|
1789
|
+
inferredRoute = '/' + appRouterMatch[1];
|
|
1790
|
+
}
|
|
1791
|
+
|
|
1792
|
+
// Pages router: pages/api/tasks.ts -> /api/tasks
|
|
1793
|
+
const pagesMatch = normalized.match(/pages\/(api\/[^.]+)\.\w+$/);
|
|
1794
|
+
if (!inferredRoute && pagesMatch) {
|
|
1795
|
+
inferredRoute = '/' + pagesMatch[1];
|
|
1796
|
+
}
|
|
1797
|
+
|
|
1798
|
+
// Component files
|
|
1799
|
+
const componentMatch = normalized.match(/(?:components|screens|pages)\/([^.]+)\.\w+$/);
|
|
1800
|
+
if (!inferredRoute && componentMatch) {
|
|
1801
|
+
inferredRoute = '/' + componentMatch[1].replace(/\\/g, '/');
|
|
1802
|
+
}
|
|
1803
|
+
|
|
1804
|
+
if (inferredRoute && !coveredRoutes.has(inferredRoute)) {
|
|
1805
|
+
untested.push({
|
|
1806
|
+
file,
|
|
1807
|
+
inferredRoute,
|
|
1808
|
+
reason: 'No test cases cover this route',
|
|
1809
|
+
});
|
|
1810
|
+
} else if (!inferredRoute) {
|
|
1811
|
+
// Can't map to a route — flag as potentially untested
|
|
1812
|
+
untested.push({
|
|
1813
|
+
file,
|
|
1814
|
+
inferredRoute: 'unknown',
|
|
1815
|
+
reason: 'Could not map file to a testable route',
|
|
1816
|
+
});
|
|
1817
|
+
}
|
|
1818
|
+
}
|
|
1819
|
+
|
|
1820
|
+
return {
|
|
1821
|
+
untestedChanges: untested,
|
|
1822
|
+
changedFileCount: changedFiles.length,
|
|
1823
|
+
untestedCount: untested.length,
|
|
1824
|
+
coveredRoutes: coveredRoutes.size,
|
|
1825
|
+
};
|
|
1826
|
+
}
|
|
1827
|
+
|
|
1828
|
+
// === AUTO-MONITORING HANDLERS ===
|
|
1829
|
+
|
|
1830
|
+
async function getAutoDetectedIssues(args: {
|
|
1831
|
+
source?: string;
|
|
1832
|
+
min_occurrences?: number;
|
|
1833
|
+
since?: string;
|
|
1834
|
+
limit?: number;
|
|
1835
|
+
compact?: boolean;
|
|
1836
|
+
}) {
|
|
1837
|
+
const projectId = requireProject();
|
|
1838
|
+
const since = args.since || new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
|
|
1839
|
+
const limit = args.limit || 20;
|
|
1840
|
+
|
|
1841
|
+
let query = supabase
|
|
1842
|
+
.from('reports')
|
|
1843
|
+
.select('id, error_fingerprint, report_source, title, severity, reporter_id, sentry_event_id, created_at, app_context')
|
|
1844
|
+
.eq('project_id', projectId)
|
|
1845
|
+
.neq('report_source', 'manual')
|
|
1846
|
+
.not('error_fingerprint', 'is', null)
|
|
1847
|
+
.gte('created_at', since)
|
|
1848
|
+
.order('created_at', { ascending: false });
|
|
1849
|
+
|
|
1850
|
+
if (args.source) {
|
|
1851
|
+
query = query.eq('report_source', args.source);
|
|
1852
|
+
}
|
|
1853
|
+
|
|
1854
|
+
const { data, error } = await query;
|
|
1855
|
+
if (error) return { error: error.message };
|
|
1856
|
+
if (!data || data.length === 0) return { issues: [], total: 0 };
|
|
1857
|
+
|
|
1858
|
+
// Group by fingerprint
|
|
1859
|
+
const grouped = new Map<string, typeof data>();
|
|
1860
|
+
for (const report of data) {
|
|
1861
|
+
const fp = report.error_fingerprint!;
|
|
1862
|
+
if (!grouped.has(fp)) grouped.set(fp, []);
|
|
1863
|
+
grouped.get(fp)!.push(report);
|
|
1864
|
+
}
|
|
1865
|
+
|
|
1866
|
+
// Build issue summaries
|
|
1867
|
+
const issues = Array.from(grouped.entries())
|
|
1868
|
+
.map(([fingerprint, reports]) => {
|
|
1869
|
+
const uniqueReporters = new Set(reports.map(r => r.reporter_id));
|
|
1870
|
+
const sorted = reports.sort((a, b) => new Date(a.created_at).getTime() - new Date(b.created_at).getTime());
|
|
1871
|
+
const first = sorted[0];
|
|
1872
|
+
const last = sorted[sorted.length - 1];
|
|
1873
|
+
const route = (first.app_context as any)?.currentRoute || 'unknown';
|
|
1874
|
+
|
|
1875
|
+
return {
|
|
1876
|
+
fingerprint,
|
|
1877
|
+
source: first.report_source,
|
|
1878
|
+
message: first.title,
|
|
1879
|
+
route,
|
|
1880
|
+
occurrence_count: reports.length,
|
|
1881
|
+
affected_users: uniqueReporters.size,
|
|
1882
|
+
first_seen: first.created_at,
|
|
1883
|
+
last_seen: last.created_at,
|
|
1884
|
+
severity: first.severity,
|
|
1885
|
+
has_sentry_link: reports.some(r => r.sentry_event_id != null),
|
|
1886
|
+
sample_report_id: first.id,
|
|
1887
|
+
};
|
|
1888
|
+
})
|
|
1889
|
+
.filter(issue => issue.occurrence_count >= (args.min_occurrences || 1))
|
|
1890
|
+
.sort((a, b) => b.occurrence_count - a.occurrence_count)
|
|
1891
|
+
.slice(0, limit);
|
|
1892
|
+
|
|
1893
|
+
if (args.compact) {
|
|
1894
|
+
return {
|
|
1895
|
+
issues: issues.map(i => ({
|
|
1896
|
+
fingerprint: i.fingerprint,
|
|
1897
|
+
source: i.source,
|
|
1898
|
+
count: i.occurrence_count,
|
|
1899
|
+
users: i.affected_users,
|
|
1900
|
+
severity: i.severity,
|
|
1901
|
+
})),
|
|
1902
|
+
total: issues.length,
|
|
1903
|
+
};
|
|
1904
|
+
}
|
|
1905
|
+
|
|
1906
|
+
return { issues, total: issues.length };
|
|
1907
|
+
}
|
|
1908
|
+
|
|
1909
|
+
async function generateTestsFromErrors(args: {
|
|
1910
|
+
report_ids?: string[];
|
|
1911
|
+
limit?: number;
|
|
1912
|
+
}) {
|
|
1913
|
+
const projectId = requireProject();
|
|
1914
|
+
const limit = args.limit || 5;
|
|
1915
|
+
|
|
1916
|
+
let reports;
|
|
1917
|
+
if (args.report_ids?.length) {
|
|
1918
|
+
// Validate all UUIDs
|
|
1919
|
+
for (const id of args.report_ids) {
|
|
1920
|
+
if (!isValidUUID(id)) {
|
|
1921
|
+
return { error: `Invalid report_id format: ${id}` };
|
|
1922
|
+
}
|
|
1923
|
+
}
|
|
1924
|
+
|
|
1925
|
+
const { data, error } = await supabase
|
|
1926
|
+
.from('reports')
|
|
1927
|
+
.select('id, title, report_source, severity, app_context, error_fingerprint, description')
|
|
1928
|
+
.eq('project_id', projectId)
|
|
1929
|
+
.in('id', args.report_ids);
|
|
1930
|
+
if (error) return { error: error.message };
|
|
1931
|
+
reports = data;
|
|
1932
|
+
} else {
|
|
1933
|
+
// Get top uncovered auto-detected errors
|
|
1934
|
+
const { data, error } = await supabase
|
|
1935
|
+
.from('reports')
|
|
1936
|
+
.select('id, title, report_source, severity, app_context, error_fingerprint, description')
|
|
1937
|
+
.eq('project_id', projectId)
|
|
1938
|
+
.neq('report_source', 'manual')
|
|
1939
|
+
.not('error_fingerprint', 'is', null)
|
|
1940
|
+
.order('created_at', { ascending: false })
|
|
1941
|
+
.limit(50);
|
|
1942
|
+
if (error) return { error: error.message };
|
|
1943
|
+
|
|
1944
|
+
// Deduplicate by fingerprint, keep first occurrence
|
|
1945
|
+
const seen = new Set<string>();
|
|
1946
|
+
reports = (data || []).filter(r => {
|
|
1947
|
+
if (!r.error_fingerprint || seen.has(r.error_fingerprint)) return false;
|
|
1948
|
+
seen.add(r.error_fingerprint);
|
|
1949
|
+
return true;
|
|
1950
|
+
}).slice(0, limit);
|
|
1951
|
+
}
|
|
1952
|
+
|
|
1953
|
+
if (!reports?.length) return { suggestions: [] };
|
|
1954
|
+
|
|
1955
|
+
const suggestions = reports.map(report => {
|
|
1956
|
+
const route = (report.app_context as any)?.currentRoute || '/unknown';
|
|
1957
|
+
const source = report.report_source;
|
|
1958
|
+
const priority = report.severity === 'critical' ? 'P1' : report.severity === 'high' ? 'P1' : 'P2';
|
|
1959
|
+
|
|
1960
|
+
let suggestedSteps: string[];
|
|
1961
|
+
if (source === 'auto_crash') {
|
|
1962
|
+
suggestedSteps = [
|
|
1963
|
+
`Navigate to ${route}`,
|
|
1964
|
+
'Reproduce the action that triggered the crash',
|
|
1965
|
+
'Verify the page does not throw an unhandled error',
|
|
1966
|
+
'Verify error boundary displays a user-friendly message if error occurs',
|
|
1967
|
+
];
|
|
1968
|
+
} else if (source === 'auto_api') {
|
|
1969
|
+
const statusCode = (report.app_context as any)?.custom?.statusCode || 'error';
|
|
1970
|
+
const method = (report.app_context as any)?.custom?.requestMethod || 'API';
|
|
1971
|
+
suggestedSteps = [
|
|
1972
|
+
`Navigate to ${route}`,
|
|
1973
|
+
`Trigger the ${method} request that returned ${statusCode}`,
|
|
1974
|
+
'Verify the request succeeds or displays an appropriate error message',
|
|
1975
|
+
'Verify no data corruption occurs on failure',
|
|
1976
|
+
];
|
|
1977
|
+
} else {
|
|
1978
|
+
// rage_click or sentry_sync
|
|
1979
|
+
const target = (report.app_context as any)?.custom?.targetSelector || 'the element';
|
|
1980
|
+
suggestedSteps = [
|
|
1981
|
+
`Navigate to ${route}`,
|
|
1982
|
+
`Click on ${target}`,
|
|
1983
|
+
'Verify the element responds to interaction',
|
|
1984
|
+
'Verify loading state is shown if action takes time',
|
|
1985
|
+
];
|
|
1986
|
+
}
|
|
1987
|
+
|
|
1988
|
+
return {
|
|
1989
|
+
title: `Test: ${report.title?.replace('[Auto] ', '') || 'Auto-detected issue'}`,
|
|
1990
|
+
track: source === 'auto_crash' ? 'Stability' : source === 'auto_api' ? 'API' : 'UX',
|
|
1991
|
+
priority,
|
|
1992
|
+
rationale: `Auto-detected ${source?.replace('auto_', '')} on ${route}`,
|
|
1993
|
+
suggested_steps: suggestedSteps,
|
|
1994
|
+
source_report_id: report.id,
|
|
1995
|
+
route,
|
|
1996
|
+
};
|
|
1997
|
+
});
|
|
1998
|
+
|
|
1999
|
+
return { suggestions };
|
|
2000
|
+
}
|
|
2001
|
+
|
|
1237
2002
|
// === Project management handlers ===
|
|
1238
2003
|
|
|
1239
2004
|
async function listProjects() {
|
|
@@ -1358,7 +2123,7 @@ async function getReport(args: { report_id: string }) {
|
|
|
1358
2123
|
app_context: data.app_context,
|
|
1359
2124
|
device_info: data.device_info,
|
|
1360
2125
|
navigation_history: data.navigation_history,
|
|
1361
|
-
|
|
2126
|
+
screenshot_urls: data.screenshot_urls,
|
|
1362
2127
|
created_at: data.created_at,
|
|
1363
2128
|
reporter: data.tester ? {
|
|
1364
2129
|
name: data.tester.name,
|
|
@@ -1444,14 +2209,14 @@ async function updateReportStatus(args: {
|
|
|
1444
2209
|
return { success: true, message: `Report status updated to ${args.status}` };
|
|
1445
2210
|
}
|
|
1446
2211
|
|
|
1447
|
-
async function getReportContext(args: { report_id: string }) {
|
|
2212
|
+
async function getReportContext(args: { report_id: string; compact?: boolean }) {
|
|
1448
2213
|
if (!isValidUUID(args.report_id)) {
|
|
1449
2214
|
return { error: 'Invalid report_id format' };
|
|
1450
2215
|
}
|
|
1451
2216
|
|
|
1452
2217
|
const { data, error } = await supabase
|
|
1453
2218
|
.from('reports')
|
|
1454
|
-
.select('app_context, device_info, navigation_history, enhanced_context')
|
|
2219
|
+
.select('app_context, device_info, navigation_history, enhanced_context, screenshot_urls')
|
|
1455
2220
|
.eq('id', args.report_id)
|
|
1456
2221
|
.eq('project_id', currentProjectId) // Security: ensure report belongs to this project
|
|
1457
2222
|
.single();
|
|
@@ -1460,16 +2225,119 @@ async function getReportContext(args: { report_id: string }) {
|
|
|
1460
2225
|
return { error: error.message };
|
|
1461
2226
|
}
|
|
1462
2227
|
|
|
2228
|
+
// Compact: return app_context only (skip console/network/navigation)
|
|
2229
|
+
if (args.compact === true) {
|
|
2230
|
+
return {
|
|
2231
|
+
context: {
|
|
2232
|
+
app_context: data.app_context,
|
|
2233
|
+
screenshot_urls: data.screenshot_urls,
|
|
2234
|
+
},
|
|
2235
|
+
};
|
|
2236
|
+
}
|
|
2237
|
+
|
|
1463
2238
|
return {
|
|
1464
2239
|
context: {
|
|
1465
2240
|
app_context: data.app_context,
|
|
1466
2241
|
device_info: data.device_info,
|
|
1467
2242
|
navigation_history: data.navigation_history,
|
|
1468
2243
|
enhanced_context: data.enhanced_context || {},
|
|
2244
|
+
screenshot_urls: data.screenshot_urls,
|
|
1469
2245
|
},
|
|
1470
2246
|
};
|
|
1471
2247
|
}
|
|
1472
2248
|
|
|
2249
|
+
async function addReportComment(args: { report_id: string; message: string; author?: string }) {
|
|
2250
|
+
if (!isValidUUID(args.report_id)) return { error: 'Invalid report_id format' };
|
|
2251
|
+
if (!args.message?.trim()) return { error: 'Message is required' };
|
|
2252
|
+
|
|
2253
|
+
// Verify report exists
|
|
2254
|
+
const { data: report } = await supabase
|
|
2255
|
+
.from('reports').select('id').eq('id', args.report_id).eq('project_id', currentProjectId).single();
|
|
2256
|
+
if (!report) return { error: 'Report not found' };
|
|
2257
|
+
|
|
2258
|
+
// Find or create a discussion thread for this report
|
|
2259
|
+
const { data: existingThread } = await supabase
|
|
2260
|
+
.from('discussion_threads').select('id')
|
|
2261
|
+
.eq('project_id', currentProjectId).eq('report_id', args.report_id).eq('thread_type', 'report')
|
|
2262
|
+
.limit(1).single();
|
|
2263
|
+
|
|
2264
|
+
let threadId: string;
|
|
2265
|
+
if (existingThread) {
|
|
2266
|
+
threadId = existingThread.id;
|
|
2267
|
+
} else {
|
|
2268
|
+
const newId = crypto.randomUUID();
|
|
2269
|
+
const { error: threadErr } = await supabase
|
|
2270
|
+
.from('discussion_threads').insert({
|
|
2271
|
+
id: newId, project_id: currentProjectId, report_id: args.report_id,
|
|
2272
|
+
thread_type: 'report', subject: 'Bug Report Discussion', audience: 'all',
|
|
2273
|
+
priority: 'normal', created_by_admin: true, last_message_at: new Date().toISOString(),
|
|
2274
|
+
});
|
|
2275
|
+
if (threadErr) return { error: `Failed to create thread: ${threadErr.message}` };
|
|
2276
|
+
threadId = newId;
|
|
2277
|
+
}
|
|
2278
|
+
|
|
2279
|
+
const { data: msg, error: msgErr } = await supabase
|
|
2280
|
+
.from('discussion_messages').insert({
|
|
2281
|
+
thread_id: threadId, sender_type: 'admin', sender_name: args.author || 'Claude Code', content: args.message.trim(), content_type: 'text',
|
|
2282
|
+
}).select('id, content, created_at').single();
|
|
2283
|
+
|
|
2284
|
+
if (msgErr) return { error: `Failed to add comment: ${msgErr.message}` };
|
|
2285
|
+
return { success: true, comment: { id: msg.id, thread_id: threadId, content: msg.content, author: args.author || 'Claude Code', created_at: msg.created_at }, message: 'Comment added to report' };
|
|
2286
|
+
}
|
|
2287
|
+
|
|
2288
|
+
async function getReportComments(args: { report_id: string }) {
|
|
2289
|
+
if (!isValidUUID(args.report_id)) return { error: 'Invalid report_id format' };
|
|
2290
|
+
|
|
2291
|
+
const { data: threads } = await supabase
|
|
2292
|
+
.from('discussion_threads').select('id')
|
|
2293
|
+
.eq('project_id', currentProjectId).eq('report_id', args.report_id).order('created_at', { ascending: true });
|
|
2294
|
+
|
|
2295
|
+
if (!threads || threads.length === 0) return { comments: [], total: 0, message: 'No comments on this report' };
|
|
2296
|
+
|
|
2297
|
+
const { data: messages, error } = await supabase
|
|
2298
|
+
.from('discussion_messages').select('id, thread_id, sender_type, content, content_type, created_at, attachments')
|
|
2299
|
+
.in('thread_id', threads.map(t => t.id)).order('created_at', { ascending: true });
|
|
2300
|
+
|
|
2301
|
+
if (error) return { error: error.message };
|
|
2302
|
+
return { comments: (messages || []).map(m => ({ id: m.id, sender_type: m.sender_type, content: m.content, created_at: m.created_at, attachments: m.attachments })), total: (messages || []).length };
|
|
2303
|
+
}
|
|
2304
|
+
|
|
2305
|
+
async function resolveConversations(args: { thread_ids: string[]; resolved?: boolean }) {
|
|
2306
|
+
if (!args.thread_ids || args.thread_ids.length === 0) {
|
|
2307
|
+
return { error: 'At least one thread_id is required' };
|
|
2308
|
+
}
|
|
2309
|
+
if (args.thread_ids.length > 50) {
|
|
2310
|
+
return { error: 'Maximum 50 threads per request' };
|
|
2311
|
+
}
|
|
2312
|
+
for (const id of args.thread_ids) {
|
|
2313
|
+
if (!isValidUUID(id)) return { error: `Invalid thread_id format: ${id}` };
|
|
2314
|
+
}
|
|
2315
|
+
|
|
2316
|
+
const resolved = args.resolved !== false;
|
|
2317
|
+
|
|
2318
|
+
const { data, error } = await supabase
|
|
2319
|
+
.from('discussion_threads')
|
|
2320
|
+
.update({ is_resolved: resolved })
|
|
2321
|
+
.eq('project_id', currentProjectId)
|
|
2322
|
+
.in('id', args.thread_ids)
|
|
2323
|
+
.select('id, subject, is_resolved');
|
|
2324
|
+
|
|
2325
|
+
if (error) return { error: error.message };
|
|
2326
|
+
|
|
2327
|
+
const updated = data || [];
|
|
2328
|
+
const updatedIds = new Set(updated.map((t: any) => t.id));
|
|
2329
|
+
const notFound = args.thread_ids.filter(id => !updatedIds.has(id));
|
|
2330
|
+
|
|
2331
|
+
return {
|
|
2332
|
+
success: true,
|
|
2333
|
+
updatedCount: updated.length,
|
|
2334
|
+
resolved,
|
|
2335
|
+
notFound: notFound.length > 0 ? notFound : undefined,
|
|
2336
|
+
threads: updated.map((t: any) => ({ id: t.id, subject: t.subject, is_resolved: t.is_resolved })),
|
|
2337
|
+
message: `${resolved ? 'Resolved' : 'Reopened'} ${updated.length} conversation(s).${notFound.length > 0 ? ` ${notFound.length} not found.` : ''}`,
|
|
2338
|
+
};
|
|
2339
|
+
}
|
|
2340
|
+
|
|
1473
2341
|
async function getProjectInfo() {
|
|
1474
2342
|
// Get project details
|
|
1475
2343
|
const { data: project, error: projectError } = await supabase
|
|
@@ -1810,6 +2678,7 @@ async function listTestCases(args: {
|
|
|
1810
2678
|
missing_target_route?: boolean;
|
|
1811
2679
|
limit?: number;
|
|
1812
2680
|
offset?: number;
|
|
2681
|
+
compact?: boolean;
|
|
1813
2682
|
}) {
|
|
1814
2683
|
let query = supabase
|
|
1815
2684
|
.from('test_cases')
|
|
@@ -1856,6 +2725,20 @@ async function listTestCases(args: {
|
|
|
1856
2725
|
);
|
|
1857
2726
|
}
|
|
1858
2727
|
|
|
2728
|
+
// Compact: return minimal fields only
|
|
2729
|
+
if (args.compact === true) {
|
|
2730
|
+
return {
|
|
2731
|
+
count: testCases.length,
|
|
2732
|
+
testCases: testCases.map((tc: any) => ({
|
|
2733
|
+
id: tc.id,
|
|
2734
|
+
testKey: tc.test_key,
|
|
2735
|
+
title: tc.title,
|
|
2736
|
+
priority: tc.priority,
|
|
2737
|
+
})),
|
|
2738
|
+
pagination: { limit, offset, hasMore: testCases.length === limit },
|
|
2739
|
+
};
|
|
2740
|
+
}
|
|
2741
|
+
|
|
1859
2742
|
return {
|
|
1860
2743
|
count: testCases.length,
|
|
1861
2744
|
testCases: testCases.map((tc: any) => ({
|
|
@@ -2467,8 +3350,8 @@ async function getCoverageMatrix(args: {
|
|
|
2467
3350
|
include_execution_data?: boolean;
|
|
2468
3351
|
include_bug_counts?: boolean;
|
|
2469
3352
|
}) {
|
|
2470
|
-
const includeExecution = args.include_execution_data
|
|
2471
|
-
const includeBugs = args.include_bug_counts
|
|
3353
|
+
const includeExecution = args.include_execution_data === true;
|
|
3354
|
+
const includeBugs = args.include_bug_counts === true;
|
|
2472
3355
|
|
|
2473
3356
|
// Get tracks
|
|
2474
3357
|
const { data: tracks } = await supabase
|
|
@@ -4167,7 +5050,7 @@ async function markFixedWithCommit(args: {
|
|
|
4167
5050
|
status: 'resolved',
|
|
4168
5051
|
resolved_at: new Date().toISOString(),
|
|
4169
5052
|
resolution_notes: args.resolution_notes || `Fixed in commit ${args.commit_sha.slice(0, 7)}`,
|
|
4170
|
-
notify_tester: args.notify_tester
|
|
5053
|
+
notify_tester: args.notify_tester !== false, // Default: notify tester. Pass false to silently resolve.
|
|
4171
5054
|
code_context: {
|
|
4172
5055
|
...existingContext,
|
|
4173
5056
|
fix: {
|
|
@@ -4190,7 +5073,8 @@ async function markFixedWithCommit(args: {
|
|
|
4190
5073
|
return { error: error.message };
|
|
4191
5074
|
}
|
|
4192
5075
|
|
|
4193
|
-
const
|
|
5076
|
+
const notifyTester = args.notify_tester !== false;
|
|
5077
|
+
const notificationStatus = notifyTester
|
|
4194
5078
|
? 'The original tester will be notified and assigned a verification task.'
|
|
4195
5079
|
: 'No notification sent (silent resolve). A verification task was created.';
|
|
4196
5080
|
|
|
@@ -4199,7 +5083,7 @@ async function markFixedWithCommit(args: {
|
|
|
4199
5083
|
message: `Bug marked as fixed in commit ${args.commit_sha.slice(0, 7)}. ${notificationStatus}`,
|
|
4200
5084
|
report_id: args.report_id,
|
|
4201
5085
|
commit: args.commit_sha,
|
|
4202
|
-
tester_notified:
|
|
5086
|
+
tester_notified: notifyTester,
|
|
4203
5087
|
next_steps: [
|
|
4204
5088
|
'Consider running create_regression_test to prevent this bug from recurring',
|
|
4205
5089
|
'Push your changes to trigger CI/CD',
|
|
@@ -5039,16 +5923,20 @@ Which files or areas would you like me to analyze?`;
|
|
|
5039
5923
|
async function listTesters(args: {
|
|
5040
5924
|
status?: string;
|
|
5041
5925
|
platform?: string;
|
|
5926
|
+
role?: string;
|
|
5042
5927
|
}) {
|
|
5043
5928
|
let query = supabase
|
|
5044
5929
|
.from('testers')
|
|
5045
|
-
.select('id, name, email, status, platforms, tier, assigned_count, completed_count, notes, created_at')
|
|
5930
|
+
.select('id, name, email, status, platforms, tier, assigned_count, completed_count, notes, role, created_at')
|
|
5046
5931
|
.eq('project_id', currentProjectId)
|
|
5047
5932
|
.order('name', { ascending: true });
|
|
5048
5933
|
|
|
5049
5934
|
if (args.status) {
|
|
5050
5935
|
query = query.eq('status', args.status);
|
|
5051
5936
|
}
|
|
5937
|
+
if (args.role) {
|
|
5938
|
+
query = query.eq('role', args.role);
|
|
5939
|
+
}
|
|
5052
5940
|
|
|
5053
5941
|
const { data, error } = await query;
|
|
5054
5942
|
|
|
@@ -5077,6 +5965,7 @@ async function listTesters(args: {
|
|
|
5077
5965
|
assignedCount: t.assigned_count,
|
|
5078
5966
|
completedCount: t.completed_count,
|
|
5079
5967
|
notes: t.notes,
|
|
5968
|
+
role: t.role,
|
|
5080
5969
|
})),
|
|
5081
5970
|
};
|
|
5082
5971
|
}
|
|
@@ -5321,6 +6210,22 @@ async function assignTests(args: {
|
|
|
5321
6210
|
status: 'pending',
|
|
5322
6211
|
}));
|
|
5323
6212
|
|
|
6213
|
+
// Helper: after assignments change, sync the test run's total_tests counter
|
|
6214
|
+
async function syncRunCounter() {
|
|
6215
|
+
if (!args.test_run_id) return;
|
|
6216
|
+
const { count } = await supabase
|
|
6217
|
+
.from('test_assignments')
|
|
6218
|
+
.select('id', { count: 'exact', head: true })
|
|
6219
|
+
.eq('test_run_id', args.test_run_id)
|
|
6220
|
+
.eq('project_id', currentProjectId);
|
|
6221
|
+
if (count !== null) {
|
|
6222
|
+
await supabase
|
|
6223
|
+
.from('test_runs')
|
|
6224
|
+
.update({ total_tests: count })
|
|
6225
|
+
.eq('id', args.test_run_id);
|
|
6226
|
+
}
|
|
6227
|
+
}
|
|
6228
|
+
|
|
5324
6229
|
// Insert — use upsert-like approach: insert and handle conflicts
|
|
5325
6230
|
const { data: inserted, error: insertErr } = await supabase
|
|
5326
6231
|
.from('test_assignments')
|
|
@@ -5349,6 +6254,8 @@ async function assignTests(args: {
|
|
|
5349
6254
|
}
|
|
5350
6255
|
}
|
|
5351
6256
|
|
|
6257
|
+
await syncRunCounter();
|
|
6258
|
+
|
|
5352
6259
|
return {
|
|
5353
6260
|
success: true,
|
|
5354
6261
|
created: created.length,
|
|
@@ -5361,6 +6268,8 @@ async function assignTests(args: {
|
|
|
5361
6268
|
return { error: insertErr.message };
|
|
5362
6269
|
}
|
|
5363
6270
|
|
|
6271
|
+
await syncRunCounter();
|
|
6272
|
+
|
|
5364
6273
|
return {
|
|
5365
6274
|
success: true,
|
|
5366
6275
|
created: (inserted || []).length,
|
|
@@ -5371,6 +6280,82 @@ async function assignTests(args: {
|
|
|
5371
6280
|
};
|
|
5372
6281
|
}
|
|
5373
6282
|
|
|
6283
|
+
async function unassignTests(args: {
|
|
6284
|
+
assignment_ids: string[];
|
|
6285
|
+
}) {
|
|
6286
|
+
if (!args.assignment_ids || args.assignment_ids.length === 0) {
|
|
6287
|
+
return { error: 'At least one assignment_id is required' };
|
|
6288
|
+
}
|
|
6289
|
+
if (args.assignment_ids.length > 50) {
|
|
6290
|
+
return { error: 'Maximum 50 assignments per unassign batch' };
|
|
6291
|
+
}
|
|
6292
|
+
const invalidIds = args.assignment_ids.filter(id => !isValidUUID(id));
|
|
6293
|
+
if (invalidIds.length > 0) {
|
|
6294
|
+
return { error: `Invalid UUID(s): ${invalidIds.join(', ')}` };
|
|
6295
|
+
}
|
|
6296
|
+
|
|
6297
|
+
// Verify assignments exist and belong to this project
|
|
6298
|
+
const { data: existing, error: lookupErr } = await supabase
|
|
6299
|
+
.from('test_assignments')
|
|
6300
|
+
.select('id, test_run_id, test_case:test_cases(test_key, title), tester:testers(name)')
|
|
6301
|
+
.eq('project_id', currentProjectId)
|
|
6302
|
+
.in('id', args.assignment_ids);
|
|
6303
|
+
|
|
6304
|
+
if (lookupErr) return { error: lookupErr.message };
|
|
6305
|
+
|
|
6306
|
+
if (!existing || existing.length === 0) {
|
|
6307
|
+
return { error: 'No matching assignments found in this project' };
|
|
6308
|
+
}
|
|
6309
|
+
|
|
6310
|
+
const foundIds = new Set(existing.map((a: any) => a.id));
|
|
6311
|
+
const notFound = args.assignment_ids.filter(id => !foundIds.has(id));
|
|
6312
|
+
|
|
6313
|
+
// Delete the assignments
|
|
6314
|
+
const { error: deleteErr } = await supabase
|
|
6315
|
+
.from('test_assignments')
|
|
6316
|
+
.delete()
|
|
6317
|
+
.eq('project_id', currentProjectId)
|
|
6318
|
+
.in('id', args.assignment_ids);
|
|
6319
|
+
|
|
6320
|
+
if (deleteErr) return { error: deleteErr.message };
|
|
6321
|
+
|
|
6322
|
+
// Sync run counters for any affected test runs
|
|
6323
|
+
const affectedRunIds = [...new Set(existing.filter((a: any) => a.test_run_id).map((a: any) => a.test_run_id))];
|
|
6324
|
+
for (const runId of affectedRunIds) {
|
|
6325
|
+
const { count } = await supabase
|
|
6326
|
+
.from('test_assignments')
|
|
6327
|
+
.select('id', { count: 'exact', head: true })
|
|
6328
|
+
.eq('test_run_id', runId)
|
|
6329
|
+
.eq('project_id', currentProjectId);
|
|
6330
|
+
if (count !== null) {
|
|
6331
|
+
await supabase.from('test_runs').update({ total_tests: count }).eq('id', runId);
|
|
6332
|
+
}
|
|
6333
|
+
}
|
|
6334
|
+
|
|
6335
|
+
const deleted = existing.map((a: Record<string, unknown>) => {
|
|
6336
|
+
const tc = a.test_case as Record<string, string> | null;
|
|
6337
|
+
const tester = a.tester as Record<string, string> | null;
|
|
6338
|
+
return {
|
|
6339
|
+
id: a.id as string,
|
|
6340
|
+
testKey: tc?.test_key || null,
|
|
6341
|
+
testTitle: tc?.title || null,
|
|
6342
|
+
testerName: tester?.name || null,
|
|
6343
|
+
};
|
|
6344
|
+
});
|
|
6345
|
+
|
|
6346
|
+
const firstKey = deleted[0]?.testKey;
|
|
6347
|
+
|
|
6348
|
+
return {
|
|
6349
|
+
success: true,
|
|
6350
|
+
deletedCount: existing.length,
|
|
6351
|
+
deleted,
|
|
6352
|
+
notFound: notFound.length > 0 ? notFound : undefined,
|
|
6353
|
+
message: existing.length === 1
|
|
6354
|
+
? `Removed 1 assignment${firstKey ? ` (${firstKey})` : ''}`
|
|
6355
|
+
: `Removed ${existing.length} assignment(s)`,
|
|
6356
|
+
};
|
|
6357
|
+
}
|
|
6358
|
+
|
|
5374
6359
|
async function getTesterWorkload(args: {
|
|
5375
6360
|
tester_id: string;
|
|
5376
6361
|
}) {
|
|
@@ -5459,6 +6444,7 @@ async function createTester(args: {
|
|
|
5459
6444
|
platforms?: string[];
|
|
5460
6445
|
tier?: number;
|
|
5461
6446
|
notes?: string;
|
|
6447
|
+
role?: string;
|
|
5462
6448
|
}) {
|
|
5463
6449
|
if (!args.name || args.name.trim().length === 0) {
|
|
5464
6450
|
return { error: 'Tester name is required' };
|
|
@@ -5489,8 +6475,9 @@ async function createTester(args: {
|
|
|
5489
6475
|
tier: args.tier ?? 1,
|
|
5490
6476
|
notes: args.notes?.trim() || null,
|
|
5491
6477
|
status: 'active',
|
|
6478
|
+
role: args.role || 'tester',
|
|
5492
6479
|
})
|
|
5493
|
-
.select('id, name, email, status, platforms, tier, notes, created_at')
|
|
6480
|
+
.select('id, name, email, status, platforms, tier, notes, role, created_at')
|
|
5494
6481
|
.single();
|
|
5495
6482
|
|
|
5496
6483
|
if (error) {
|
|
@@ -5510,6 +6497,7 @@ async function createTester(args: {
|
|
|
5510
6497
|
platforms: data.platforms,
|
|
5511
6498
|
tier: data.tier,
|
|
5512
6499
|
notes: data.notes,
|
|
6500
|
+
role: data.role,
|
|
5513
6501
|
createdAt: data.created_at,
|
|
5514
6502
|
},
|
|
5515
6503
|
message: `Tester "${data.name}" added to the project. Use assign_tests to give them test cases.`,
|
|
@@ -5796,6 +6784,8 @@ async function getTesterLeaderboard(args: {
|
|
|
5796
6784
|
|
|
5797
6785
|
async function exportTestResults(args: {
|
|
5798
6786
|
test_run_id: string;
|
|
6787
|
+
compact?: boolean;
|
|
6788
|
+
limit?: number;
|
|
5799
6789
|
}) {
|
|
5800
6790
|
if (!isValidUUID(args.test_run_id)) {
|
|
5801
6791
|
return { error: 'Invalid test_run_id format' };
|
|
@@ -5844,27 +6834,42 @@ async function exportTestResults(args: {
|
|
|
5844
6834
|
const passCount = all.filter(a => a.status === 'passed').length;
|
|
5845
6835
|
const failCount = all.filter(a => a.status === 'failed').length;
|
|
5846
6836
|
|
|
6837
|
+
const testRunInfo = {
|
|
6838
|
+
id: run.id,
|
|
6839
|
+
name: run.name,
|
|
6840
|
+
description: run.description,
|
|
6841
|
+
status: run.status,
|
|
6842
|
+
startedAt: run.started_at,
|
|
6843
|
+
completedAt: run.completed_at,
|
|
6844
|
+
createdAt: run.created_at,
|
|
6845
|
+
};
|
|
6846
|
+
|
|
6847
|
+
const summaryInfo = {
|
|
6848
|
+
totalAssignments: all.length,
|
|
6849
|
+
passed: passCount,
|
|
6850
|
+
failed: failCount,
|
|
6851
|
+
blocked: all.filter(a => a.status === 'blocked').length,
|
|
6852
|
+
skipped: all.filter(a => a.status === 'skipped').length,
|
|
6853
|
+
pending: all.filter(a => a.status === 'pending').length,
|
|
6854
|
+
inProgress: all.filter(a => a.status === 'in_progress').length,
|
|
6855
|
+
passRate: all.length > 0 ? Math.round((passCount / all.length) * 100) : 0,
|
|
6856
|
+
};
|
|
6857
|
+
|
|
6858
|
+
// Compact: return test run info + summary only, no assignments array
|
|
6859
|
+
if (args.compact === true) {
|
|
6860
|
+
return { testRun: testRunInfo, summary: summaryInfo };
|
|
6861
|
+
}
|
|
6862
|
+
|
|
6863
|
+
// Apply limit (default: 100, max: 500)
|
|
6864
|
+
const assignmentLimit = Math.min(Math.max(args.limit ?? 100, 1), 500);
|
|
6865
|
+
const limitedAssignments = all.slice(0, assignmentLimit);
|
|
6866
|
+
|
|
5847
6867
|
return {
|
|
5848
|
-
testRun:
|
|
5849
|
-
|
|
5850
|
-
|
|
5851
|
-
|
|
5852
|
-
|
|
5853
|
-
startedAt: run.started_at,
|
|
5854
|
-
completedAt: run.completed_at,
|
|
5855
|
-
createdAt: run.created_at,
|
|
5856
|
-
},
|
|
5857
|
-
summary: {
|
|
5858
|
-
totalAssignments: all.length,
|
|
5859
|
-
passed: passCount,
|
|
5860
|
-
failed: failCount,
|
|
5861
|
-
blocked: all.filter(a => a.status === 'blocked').length,
|
|
5862
|
-
skipped: all.filter(a => a.status === 'skipped').length,
|
|
5863
|
-
pending: all.filter(a => a.status === 'pending').length,
|
|
5864
|
-
inProgress: all.filter(a => a.status === 'in_progress').length,
|
|
5865
|
-
passRate: all.length > 0 ? Math.round((passCount / all.length) * 100) : 0,
|
|
5866
|
-
},
|
|
5867
|
-
assignments: all.map((a: any) => ({
|
|
6868
|
+
testRun: testRunInfo,
|
|
6869
|
+
summary: summaryInfo,
|
|
6870
|
+
assignmentsReturned: limitedAssignments.length,
|
|
6871
|
+
assignmentsTotal: all.length,
|
|
6872
|
+
assignments: limitedAssignments.map((a: any) => ({
|
|
5868
6873
|
id: a.id,
|
|
5869
6874
|
status: a.status,
|
|
5870
6875
|
assignedAt: a.assigned_at,
|
|
@@ -5999,6 +7004,15 @@ async function main() {
|
|
|
5999
7004
|
case 'get_report_context':
|
|
6000
7005
|
result = await getReportContext(args as any);
|
|
6001
7006
|
break;
|
|
7007
|
+
case 'add_report_comment':
|
|
7008
|
+
result = await addReportComment(args as any);
|
|
7009
|
+
break;
|
|
7010
|
+
case 'get_report_comments':
|
|
7011
|
+
result = await getReportComments(args as any);
|
|
7012
|
+
break;
|
|
7013
|
+
case 'resolve_conversations':
|
|
7014
|
+
result = await resolveConversations(args as any);
|
|
7015
|
+
break;
|
|
6002
7016
|
case 'get_project_info':
|
|
6003
7017
|
result = await getProjectInfo();
|
|
6004
7018
|
break;
|
|
@@ -6112,6 +7126,9 @@ async function main() {
|
|
|
6112
7126
|
case 'assign_tests':
|
|
6113
7127
|
result = await assignTests(args as any);
|
|
6114
7128
|
break;
|
|
7129
|
+
case 'unassign_tests':
|
|
7130
|
+
result = await unassignTests(args as any);
|
|
7131
|
+
break;
|
|
6115
7132
|
case 'get_tester_workload':
|
|
6116
7133
|
result = await getTesterWorkload(args as any);
|
|
6117
7134
|
break;
|
|
@@ -6147,6 +7164,32 @@ async function main() {
|
|
|
6147
7164
|
case 'get_current_project':
|
|
6148
7165
|
result = getCurrentProject();
|
|
6149
7166
|
break;
|
|
7167
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
7168
|
+
case 'get_test_impact':
|
|
7169
|
+
result = await getTestImpact(args as any);
|
|
7170
|
+
break;
|
|
7171
|
+
case 'get_flaky_tests':
|
|
7172
|
+
result = await getFlakyTests(args as any);
|
|
7173
|
+
break;
|
|
7174
|
+
case 'assess_test_quality':
|
|
7175
|
+
result = await assessTestQuality(args as any);
|
|
7176
|
+
break;
|
|
7177
|
+
case 'get_test_execution_summary':
|
|
7178
|
+
result = await getTestExecutionSummary(args as any);
|
|
7179
|
+
break;
|
|
7180
|
+
case 'check_test_freshness':
|
|
7181
|
+
result = await checkTestFreshness(args as any);
|
|
7182
|
+
break;
|
|
7183
|
+
case 'get_untested_changes':
|
|
7184
|
+
result = await getUntestedChanges(args as any);
|
|
7185
|
+
break;
|
|
7186
|
+
// === AUTO-MONITORING TOOLS ===
|
|
7187
|
+
case 'get_auto_detected_issues':
|
|
7188
|
+
result = await getAutoDetectedIssues(args as any);
|
|
7189
|
+
break;
|
|
7190
|
+
case 'generate_tests_from_errors':
|
|
7191
|
+
result = await generateTestsFromErrors(args as any);
|
|
7192
|
+
break;
|
|
6150
7193
|
default:
|
|
6151
7194
|
return {
|
|
6152
7195
|
content: [{ type: 'text', text: `Unknown tool: ${name}` }],
|