@bbearai/mcp-server 0.6.0 → 0.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +930 -37
- package/package.json +2 -1
- package/src/index.ts +1023 -38
package/dist/index.js
CHANGED
|
@@ -128,7 +128,7 @@ const tools = [
|
|
|
128
128
|
},
|
|
129
129
|
status: {
|
|
130
130
|
type: 'string',
|
|
131
|
-
enum: ['new', 'triaging', 'confirmed', 'in_progress', 'fixed', '
|
|
131
|
+
enum: ['new', 'triaging', 'confirmed', 'in_progress', 'fixed', 'ready_to_test', 'verified', 'resolved', 'reviewed', 'closed', 'wont_fix', 'duplicate'],
|
|
132
132
|
description: 'The new status for the report',
|
|
133
133
|
},
|
|
134
134
|
resolution_notes: {
|
|
@@ -141,7 +141,47 @@ const tools = [
|
|
|
141
141
|
},
|
|
142
142
|
{
|
|
143
143
|
name: 'get_report_context',
|
|
144
|
-
description: 'Get the full debugging context for a report including console logs, network requests, and navigation history',
|
|
144
|
+
description: 'Get the full debugging context for a report including console logs, network requests, and navigation history. Use compact=true for app_context summary only (no console/network/navigation).',
|
|
145
|
+
inputSchema: {
|
|
146
|
+
type: 'object',
|
|
147
|
+
properties: {
|
|
148
|
+
report_id: {
|
|
149
|
+
type: 'string',
|
|
150
|
+
description: 'The UUID of the report',
|
|
151
|
+
},
|
|
152
|
+
compact: {
|
|
153
|
+
type: 'boolean',
|
|
154
|
+
description: 'Compact mode: returns app_context only, skips console logs, network requests, and navigation history. (default: false)',
|
|
155
|
+
},
|
|
156
|
+
},
|
|
157
|
+
required: ['report_id'],
|
|
158
|
+
},
|
|
159
|
+
},
|
|
160
|
+
{
|
|
161
|
+
name: 'add_report_comment',
|
|
162
|
+
description: 'Add a comment/note to a bug report thread without changing its status. Use this for follow-up questions, investigation notes, or developer-tester communication.',
|
|
163
|
+
inputSchema: {
|
|
164
|
+
type: 'object',
|
|
165
|
+
properties: {
|
|
166
|
+
report_id: {
|
|
167
|
+
type: 'string',
|
|
168
|
+
description: 'The UUID of the report to comment on',
|
|
169
|
+
},
|
|
170
|
+
message: {
|
|
171
|
+
type: 'string',
|
|
172
|
+
description: 'The comment/note content',
|
|
173
|
+
},
|
|
174
|
+
author: {
|
|
175
|
+
type: 'string',
|
|
176
|
+
description: 'Optional author name (defaults to "Claude Code")',
|
|
177
|
+
},
|
|
178
|
+
},
|
|
179
|
+
required: ['report_id', 'message'],
|
|
180
|
+
},
|
|
181
|
+
},
|
|
182
|
+
{
|
|
183
|
+
name: 'get_report_comments',
|
|
184
|
+
description: 'Get all comments/notes on a bug report in chronological order. Returns the full discussion thread.',
|
|
145
185
|
inputSchema: {
|
|
146
186
|
type: 'object',
|
|
147
187
|
properties: {
|
|
@@ -308,7 +348,7 @@ const tools = [
|
|
|
308
348
|
},
|
|
309
349
|
{
|
|
310
350
|
name: 'list_test_cases',
|
|
311
|
-
description: 'List all test cases in the project. Returns test_key, title, target_route, and other metadata. Use this to see existing tests before updating them.',
|
|
351
|
+
description: 'List all test cases in the project. Returns test_key, title, target_route, and other metadata. Use this to see existing tests before updating them. Use compact=true for id, test_key, title, and priority only (saves tokens).',
|
|
312
352
|
inputSchema: {
|
|
313
353
|
type: 'object',
|
|
314
354
|
properties: {
|
|
@@ -333,6 +373,10 @@ const tools = [
|
|
|
333
373
|
type: 'number',
|
|
334
374
|
description: 'Offset for pagination (default 0)',
|
|
335
375
|
},
|
|
376
|
+
compact: {
|
|
377
|
+
type: 'boolean',
|
|
378
|
+
description: 'Compact mode: returns id, test_key, title, and priority only. (default: false)',
|
|
379
|
+
},
|
|
336
380
|
},
|
|
337
381
|
},
|
|
338
382
|
},
|
|
@@ -441,7 +485,7 @@ const tools = [
|
|
|
441
485
|
},
|
|
442
486
|
notify_tester: {
|
|
443
487
|
type: 'boolean',
|
|
444
|
-
description: '
|
|
488
|
+
description: 'Notify the original tester about the fix with a message and verification task. Default: true. Set to false for silent resolve.',
|
|
445
489
|
},
|
|
446
490
|
},
|
|
447
491
|
required: ['report_id', 'commit_sha'],
|
|
@@ -683,17 +727,17 @@ const tools = [
|
|
|
683
727
|
},
|
|
684
728
|
{
|
|
685
729
|
name: 'get_coverage_matrix',
|
|
686
|
-
description: 'Get a comprehensive Route × Track coverage matrix showing test counts, pass rates, and execution data. Use this for a complete view of test coverage.',
|
|
730
|
+
description: 'Get a comprehensive Route × Track coverage matrix showing test counts, pass rates, and execution data. Use this for a complete view of test coverage. Execution data and bug counts are opt-in to save tokens.',
|
|
687
731
|
inputSchema: {
|
|
688
732
|
type: 'object',
|
|
689
733
|
properties: {
|
|
690
734
|
include_execution_data: {
|
|
691
735
|
type: 'boolean',
|
|
692
|
-
description: 'Include pass/fail rates and last execution times (default: true
|
|
736
|
+
description: 'Include pass/fail rates and last execution times (default: false). Set true when you need execution history.',
|
|
693
737
|
},
|
|
694
738
|
include_bug_counts: {
|
|
695
739
|
type: 'boolean',
|
|
696
|
-
description: 'Include open/critical bug counts per route (default: true
|
|
740
|
+
description: 'Include open/critical bug counts per route (default: false). Set true when you need bug context.',
|
|
697
741
|
},
|
|
698
742
|
},
|
|
699
743
|
},
|
|
@@ -927,6 +971,11 @@ const tools = [
|
|
|
927
971
|
enum: ['ios', 'android', 'web'],
|
|
928
972
|
description: 'Filter by platform support',
|
|
929
973
|
},
|
|
974
|
+
role: {
|
|
975
|
+
type: 'string',
|
|
976
|
+
enum: ['tester', 'feedback'],
|
|
977
|
+
description: 'Filter by role: "tester" for QA testers, "feedback" for feedback-only users (default: all)',
|
|
978
|
+
},
|
|
930
979
|
},
|
|
931
980
|
},
|
|
932
981
|
},
|
|
@@ -1015,6 +1064,21 @@ const tools = [
|
|
|
1015
1064
|
required: ['tester_id', 'test_case_ids'],
|
|
1016
1065
|
},
|
|
1017
1066
|
},
|
|
1067
|
+
{
|
|
1068
|
+
name: 'unassign_tests',
|
|
1069
|
+
description: 'Remove one or more test assignments by assignment ID. Preserves the test case and its history — only the assignment link is deleted. Use list_test_assignments first to find assignment IDs. Max 50 per call.',
|
|
1070
|
+
inputSchema: {
|
|
1071
|
+
type: 'object',
|
|
1072
|
+
properties: {
|
|
1073
|
+
assignment_ids: {
|
|
1074
|
+
type: 'array',
|
|
1075
|
+
items: { type: 'string' },
|
|
1076
|
+
description: 'Array of test assignment UUIDs to remove (required, max 50)',
|
|
1077
|
+
},
|
|
1078
|
+
},
|
|
1079
|
+
required: ['assignment_ids'],
|
|
1080
|
+
},
|
|
1081
|
+
},
|
|
1018
1082
|
{
|
|
1019
1083
|
name: 'get_tester_workload',
|
|
1020
1084
|
description: 'View a specific tester\'s current workload — assignment counts by status and recent assignments.',
|
|
@@ -1057,6 +1121,11 @@ const tools = [
|
|
|
1057
1121
|
type: 'string',
|
|
1058
1122
|
description: 'Optional notes about the tester',
|
|
1059
1123
|
},
|
|
1124
|
+
role: {
|
|
1125
|
+
type: 'string',
|
|
1126
|
+
enum: ['tester', 'feedback'],
|
|
1127
|
+
description: 'Role: "tester" for QA testers (default), "feedback" for feedback-only users',
|
|
1128
|
+
},
|
|
1060
1129
|
},
|
|
1061
1130
|
required: ['name', 'email'],
|
|
1062
1131
|
},
|
|
@@ -1159,7 +1228,7 @@ const tools = [
|
|
|
1159
1228
|
},
|
|
1160
1229
|
{
|
|
1161
1230
|
name: 'export_test_results',
|
|
1162
|
-
description: 'Export test results for a specific test run as structured JSON — includes every assignment, tester, result, and duration.',
|
|
1231
|
+
description: 'Export test results for a specific test run as structured JSON — includes every assignment, tester, result, and duration. Use compact=true for summary only (no assignments array). Use limit to cap assignments returned.',
|
|
1163
1232
|
inputSchema: {
|
|
1164
1233
|
type: 'object',
|
|
1165
1234
|
properties: {
|
|
@@ -1167,6 +1236,14 @@ const tools = [
|
|
|
1167
1236
|
type: 'string',
|
|
1168
1237
|
description: 'UUID of the test run to export (required)',
|
|
1169
1238
|
},
|
|
1239
|
+
compact: {
|
|
1240
|
+
type: 'boolean',
|
|
1241
|
+
description: 'Compact mode: returns test run info + summary only, no assignments array. (default: false)',
|
|
1242
|
+
},
|
|
1243
|
+
limit: {
|
|
1244
|
+
type: 'number',
|
|
1245
|
+
description: 'Max assignments to return in full mode (default: 100, max: 500). Ignored when compact=true.',
|
|
1246
|
+
},
|
|
1170
1247
|
},
|
|
1171
1248
|
required: ['test_run_id'],
|
|
1172
1249
|
},
|
|
@@ -1215,7 +1292,610 @@ const tools = [
|
|
|
1215
1292
|
properties: {},
|
|
1216
1293
|
},
|
|
1217
1294
|
},
|
|
1295
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
1296
|
+
{
|
|
1297
|
+
name: 'get_test_impact',
|
|
1298
|
+
description: 'Given changed files, identify which test cases are affected by mapping file paths to test case target routes.',
|
|
1299
|
+
inputSchema: {
|
|
1300
|
+
type: 'object',
|
|
1301
|
+
properties: {
|
|
1302
|
+
changed_files: {
|
|
1303
|
+
type: 'array',
|
|
1304
|
+
items: { type: 'string' },
|
|
1305
|
+
description: 'List of changed file paths (relative to project root)',
|
|
1306
|
+
},
|
|
1307
|
+
},
|
|
1308
|
+
required: ['changed_files'],
|
|
1309
|
+
},
|
|
1310
|
+
},
|
|
1311
|
+
{
|
|
1312
|
+
name: 'get_flaky_tests',
|
|
1313
|
+
description: 'Analyze test run history to identify tests with intermittent failure rates above a threshold.',
|
|
1314
|
+
inputSchema: {
|
|
1315
|
+
type: 'object',
|
|
1316
|
+
properties: {
|
|
1317
|
+
threshold: {
|
|
1318
|
+
type: 'number',
|
|
1319
|
+
description: 'Minimum flakiness rate to report (0-100, default: 5)',
|
|
1320
|
+
},
|
|
1321
|
+
limit: {
|
|
1322
|
+
type: 'number',
|
|
1323
|
+
description: 'Maximum results to return (default: 20)',
|
|
1324
|
+
},
|
|
1325
|
+
},
|
|
1326
|
+
},
|
|
1327
|
+
},
|
|
1328
|
+
{
|
|
1329
|
+
name: 'assess_test_quality',
|
|
1330
|
+
description: 'Analyze test case steps for weak patterns: vague assertions, missing edge cases, no negative testing, generic descriptions.',
|
|
1331
|
+
inputSchema: {
|
|
1332
|
+
type: 'object',
|
|
1333
|
+
properties: {
|
|
1334
|
+
test_case_ids: {
|
|
1335
|
+
type: 'array',
|
|
1336
|
+
items: { type: 'string' },
|
|
1337
|
+
description: 'Specific test case IDs to assess. If omitted, assesses recent test cases.',
|
|
1338
|
+
},
|
|
1339
|
+
limit: {
|
|
1340
|
+
type: 'number',
|
|
1341
|
+
description: 'Maximum test cases to assess (default: 20)',
|
|
1342
|
+
},
|
|
1343
|
+
},
|
|
1344
|
+
},
|
|
1345
|
+
},
|
|
1346
|
+
{
|
|
1347
|
+
name: 'get_test_execution_summary',
|
|
1348
|
+
description: 'Aggregate test execution metrics: pass rate, completion rate, most-failed tests, fastest/slowest tests.',
|
|
1349
|
+
inputSchema: {
|
|
1350
|
+
type: 'object',
|
|
1351
|
+
properties: {
|
|
1352
|
+
days: {
|
|
1353
|
+
type: 'number',
|
|
1354
|
+
description: 'Number of days to analyze (default: 30)',
|
|
1355
|
+
},
|
|
1356
|
+
},
|
|
1357
|
+
},
|
|
1358
|
+
},
|
|
1359
|
+
{
|
|
1360
|
+
name: 'check_test_freshness',
|
|
1361
|
+
description: 'Identify test cases that have not been updated since their target code was modified.',
|
|
1362
|
+
inputSchema: {
|
|
1363
|
+
type: 'object',
|
|
1364
|
+
properties: {
|
|
1365
|
+
limit: {
|
|
1366
|
+
type: 'number',
|
|
1367
|
+
description: 'Maximum results to return (default: 20)',
|
|
1368
|
+
},
|
|
1369
|
+
},
|
|
1370
|
+
},
|
|
1371
|
+
},
|
|
1372
|
+
{
|
|
1373
|
+
name: 'get_untested_changes',
|
|
1374
|
+
description: 'Given recent commits or changed files, find code changes with no corresponding test coverage in BugBear.',
|
|
1375
|
+
inputSchema: {
|
|
1376
|
+
type: 'object',
|
|
1377
|
+
properties: {
|
|
1378
|
+
changed_files: {
|
|
1379
|
+
type: 'array',
|
|
1380
|
+
items: { type: 'string' },
|
|
1381
|
+
description: 'List of changed file paths. If omitted, uses git diff against main.',
|
|
1382
|
+
},
|
|
1383
|
+
},
|
|
1384
|
+
},
|
|
1385
|
+
},
|
|
1386
|
+
// === AUTO-MONITORING TOOLS ===
|
|
1387
|
+
{
|
|
1388
|
+
name: 'get_auto_detected_issues',
|
|
1389
|
+
description: 'Get auto-detected monitoring issues grouped by error fingerprint. Shows recurring crashes, API failures, and rage clicks with frequency and user impact.',
|
|
1390
|
+
inputSchema: {
|
|
1391
|
+
type: 'object',
|
|
1392
|
+
properties: {
|
|
1393
|
+
source: {
|
|
1394
|
+
type: 'string',
|
|
1395
|
+
enum: ['auto_crash', 'auto_api', 'auto_rage_click'],
|
|
1396
|
+
description: 'Filter by source type',
|
|
1397
|
+
},
|
|
1398
|
+
min_occurrences: {
|
|
1399
|
+
type: 'number',
|
|
1400
|
+
description: 'Min occurrence count (default: 1)',
|
|
1401
|
+
},
|
|
1402
|
+
since: {
|
|
1403
|
+
type: 'string',
|
|
1404
|
+
description: 'ISO date — only issues after this date (default: 7 days ago)',
|
|
1405
|
+
},
|
|
1406
|
+
limit: {
|
|
1407
|
+
type: 'number',
|
|
1408
|
+
description: 'Max results (default: 20)',
|
|
1409
|
+
},
|
|
1410
|
+
compact: {
|
|
1411
|
+
type: 'boolean',
|
|
1412
|
+
description: 'Compact mode: fingerprint, source, count only',
|
|
1413
|
+
},
|
|
1414
|
+
},
|
|
1415
|
+
},
|
|
1416
|
+
},
|
|
1417
|
+
{
|
|
1418
|
+
name: 'generate_tests_from_errors',
|
|
1419
|
+
description: 'Suggest QA test cases from auto-detected error patterns. Returns structured suggestions — does NOT auto-create test cases.',
|
|
1420
|
+
inputSchema: {
|
|
1421
|
+
type: 'object',
|
|
1422
|
+
properties: {
|
|
1423
|
+
report_ids: {
|
|
1424
|
+
type: 'array',
|
|
1425
|
+
items: { type: 'string' },
|
|
1426
|
+
description: 'Specific report IDs. If omitted, uses top uncovered errors.',
|
|
1427
|
+
},
|
|
1428
|
+
limit: {
|
|
1429
|
+
type: 'number',
|
|
1430
|
+
description: 'Max suggestions (default: 5)',
|
|
1431
|
+
},
|
|
1432
|
+
},
|
|
1433
|
+
},
|
|
1434
|
+
},
|
|
1218
1435
|
];
|
|
1436
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
1437
|
+
async function getTestImpact(args) {
|
|
1438
|
+
const projectId = requireProject();
|
|
1439
|
+
const changedFiles = args.changed_files || [];
|
|
1440
|
+
if (changedFiles.length === 0) {
|
|
1441
|
+
return { affectedTests: [], message: 'No changed files provided.' };
|
|
1442
|
+
}
|
|
1443
|
+
// Get all test cases for the project with their target routes
|
|
1444
|
+
const { data: testCases, error } = await supabase
|
|
1445
|
+
.from('test_cases')
|
|
1446
|
+
.select('id, title, target_route, qa_track, priority')
|
|
1447
|
+
.eq('project_id', projectId);
|
|
1448
|
+
if (error)
|
|
1449
|
+
return { error: error.message };
|
|
1450
|
+
if (!testCases || testCases.length === 0) {
|
|
1451
|
+
return { affectedTests: [], message: 'No test cases found for this project.' };
|
|
1452
|
+
}
|
|
1453
|
+
// Map changed files to affected test cases
|
|
1454
|
+
const affected = [];
|
|
1455
|
+
for (const tc of testCases) {
|
|
1456
|
+
const route = tc.target_route || '';
|
|
1457
|
+
const matchedFiles = changedFiles.filter(f => {
|
|
1458
|
+
// Match file path to route (e.g., src/app/api/tasks/route.ts -> /api/tasks)
|
|
1459
|
+
const normalized = f.replace(/\\/g, '/');
|
|
1460
|
+
const routeParts = route.split('/').filter(Boolean);
|
|
1461
|
+
return routeParts.some((part) => normalized.includes(part)) || normalized.includes(route.replace(/\//g, '/'));
|
|
1462
|
+
});
|
|
1463
|
+
if (matchedFiles.length > 0) {
|
|
1464
|
+
affected.push({
|
|
1465
|
+
testId: tc.id,
|
|
1466
|
+
title: tc.title,
|
|
1467
|
+
targetRoute: route,
|
|
1468
|
+
matchedFiles,
|
|
1469
|
+
qaTrack: tc.qa_track,
|
|
1470
|
+
});
|
|
1471
|
+
}
|
|
1472
|
+
}
|
|
1473
|
+
return {
|
|
1474
|
+
affectedTests: affected,
|
|
1475
|
+
totalTestCases: testCases.length,
|
|
1476
|
+
affectedCount: affected.length,
|
|
1477
|
+
changedFileCount: changedFiles.length,
|
|
1478
|
+
};
|
|
1479
|
+
}
|
|
1480
|
+
async function getFlakyTests(args) {
|
|
1481
|
+
const projectId = requireProject();
|
|
1482
|
+
const threshold = args.threshold || 5;
|
|
1483
|
+
const limit = args.limit || 20;
|
|
1484
|
+
// Get test results grouped by test case
|
|
1485
|
+
const { data: results, error } = await supabase
|
|
1486
|
+
.from('test_results')
|
|
1487
|
+
.select('test_case_id, status, test_cases!inner(title, target_route, qa_track)')
|
|
1488
|
+
.eq('test_cases.project_id', projectId)
|
|
1489
|
+
.order('created_at', { ascending: false })
|
|
1490
|
+
.limit(5000);
|
|
1491
|
+
if (error)
|
|
1492
|
+
return { error: error.message };
|
|
1493
|
+
if (!results || results.length === 0) {
|
|
1494
|
+
return { flakyTests: [], message: 'No test results found.' };
|
|
1495
|
+
}
|
|
1496
|
+
// Group by test case and calculate flakiness
|
|
1497
|
+
const testStats = {};
|
|
1498
|
+
for (const r of results) {
|
|
1499
|
+
const id = r.test_case_id;
|
|
1500
|
+
if (!testStats[id]) {
|
|
1501
|
+
const tc = r.test_cases;
|
|
1502
|
+
testStats[id] = { passes: 0, fails: 0, total: 0, title: tc?.title || '', route: tc?.target_route || '', track: tc?.qa_track || '' };
|
|
1503
|
+
}
|
|
1504
|
+
testStats[id].total++;
|
|
1505
|
+
if (r.status === 'pass')
|
|
1506
|
+
testStats[id].passes++;
|
|
1507
|
+
else if (r.status === 'fail')
|
|
1508
|
+
testStats[id].fails++;
|
|
1509
|
+
}
|
|
1510
|
+
// Find flaky tests (have both passes and fails, with fail rate above threshold)
|
|
1511
|
+
const flaky = Object.entries(testStats)
|
|
1512
|
+
.filter(([, stats]) => {
|
|
1513
|
+
if (stats.total < 3)
|
|
1514
|
+
return false; // Need enough data
|
|
1515
|
+
const failRate = (stats.fails / stats.total) * 100;
|
|
1516
|
+
const passRate = (stats.passes / stats.total) * 100;
|
|
1517
|
+
return failRate >= threshold && passRate > 0; // Has both passes and fails
|
|
1518
|
+
})
|
|
1519
|
+
.map(([id, stats]) => ({
|
|
1520
|
+
testCaseId: id,
|
|
1521
|
+
title: stats.title,
|
|
1522
|
+
targetRoute: stats.route,
|
|
1523
|
+
qaTrack: stats.track,
|
|
1524
|
+
totalRuns: stats.total,
|
|
1525
|
+
failRate: Math.round((stats.fails / stats.total) * 100),
|
|
1526
|
+
passRate: Math.round((stats.passes / stats.total) * 100),
|
|
1527
|
+
}))
|
|
1528
|
+
.sort((a, b) => b.failRate - a.failRate)
|
|
1529
|
+
.slice(0, limit);
|
|
1530
|
+
return {
|
|
1531
|
+
flakyTests: flaky,
|
|
1532
|
+
totalAnalyzed: Object.keys(testStats).length,
|
|
1533
|
+
flakyCount: flaky.length,
|
|
1534
|
+
threshold,
|
|
1535
|
+
};
|
|
1536
|
+
}
|
|
1537
|
+
async function assessTestQuality(args) {
|
|
1538
|
+
const projectId = requireProject();
|
|
1539
|
+
const limit = args.limit || 20;
|
|
1540
|
+
let query = supabase
|
|
1541
|
+
.from('test_cases')
|
|
1542
|
+
.select('id, title, steps, target_route, qa_track, priority')
|
|
1543
|
+
.eq('project_id', projectId)
|
|
1544
|
+
.limit(limit);
|
|
1545
|
+
if (args.test_case_ids && args.test_case_ids.length > 0) {
|
|
1546
|
+
query = query.in('id', args.test_case_ids);
|
|
1547
|
+
}
|
|
1548
|
+
const { data: testCases, error } = await query;
|
|
1549
|
+
if (error)
|
|
1550
|
+
return { error: error.message };
|
|
1551
|
+
if (!testCases || testCases.length === 0) {
|
|
1552
|
+
return { assessments: [], message: 'No test cases found.' };
|
|
1553
|
+
}
|
|
1554
|
+
const assessments = testCases.map(tc => {
|
|
1555
|
+
const issues = [];
|
|
1556
|
+
const steps = tc.steps || [];
|
|
1557
|
+
// Check for weak patterns
|
|
1558
|
+
if (steps.length < 2) {
|
|
1559
|
+
issues.push('Too few steps — test may not cover the full flow');
|
|
1560
|
+
}
|
|
1561
|
+
const allStepsText = steps.map((s) => (typeof s === 'string' ? s : s.action || s.description || '')).join(' ');
|
|
1562
|
+
// Vague assertions
|
|
1563
|
+
if (/should work|looks good|is correct|verify it works/i.test(allStepsText)) {
|
|
1564
|
+
issues.push('Vague assertions detected — use specific expected outcomes');
|
|
1565
|
+
}
|
|
1566
|
+
// Missing edge cases
|
|
1567
|
+
if (!/error|invalid|empty|missing|unauthorized|forbidden|404|500/i.test(allStepsText)) {
|
|
1568
|
+
issues.push('No negative/error test cases — add edge case testing');
|
|
1569
|
+
}
|
|
1570
|
+
// Generic descriptions
|
|
1571
|
+
if (/test the|check the|verify the/i.test(tc.title) && tc.title.length < 30) {
|
|
1572
|
+
issues.push('Generic test title — be more specific about what is being tested');
|
|
1573
|
+
}
|
|
1574
|
+
// No specific UI elements referenced
|
|
1575
|
+
if (!/button|input|form|modal|dropdown|select|click|type|enter|submit/i.test(allStepsText)) {
|
|
1576
|
+
issues.push('No specific UI elements referenced — steps may be too abstract');
|
|
1577
|
+
}
|
|
1578
|
+
const quality = issues.length === 0 ? 'good' : issues.length <= 2 ? 'needs-improvement' : 'poor';
|
|
1579
|
+
return {
|
|
1580
|
+
testCaseId: tc.id,
|
|
1581
|
+
title: tc.title,
|
|
1582
|
+
targetRoute: tc.target_route,
|
|
1583
|
+
stepCount: steps.length,
|
|
1584
|
+
quality,
|
|
1585
|
+
issues,
|
|
1586
|
+
};
|
|
1587
|
+
});
|
|
1588
|
+
const qualityCounts = {
|
|
1589
|
+
good: assessments.filter(a => a.quality === 'good').length,
|
|
1590
|
+
needsImprovement: assessments.filter(a => a.quality === 'needs-improvement').length,
|
|
1591
|
+
poor: assessments.filter(a => a.quality === 'poor').length,
|
|
1592
|
+
};
|
|
1593
|
+
return {
|
|
1594
|
+
assessments,
|
|
1595
|
+
summary: qualityCounts,
|
|
1596
|
+
totalAssessed: assessments.length,
|
|
1597
|
+
};
|
|
1598
|
+
}
|
|
1599
|
+
async function getTestExecutionSummary(args) {
|
|
1600
|
+
const projectId = requireProject();
|
|
1601
|
+
const days = args.days || 30;
|
|
1602
|
+
const since = new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString();
|
|
1603
|
+
// Get test results
|
|
1604
|
+
const { data: results, error } = await supabase
|
|
1605
|
+
.from('test_results')
|
|
1606
|
+
.select('test_case_id, status, duration_ms, created_at, test_cases!inner(title, target_route)')
|
|
1607
|
+
.eq('test_cases.project_id', projectId)
|
|
1608
|
+
.gte('created_at', since)
|
|
1609
|
+
.order('created_at', { ascending: false });
|
|
1610
|
+
if (error)
|
|
1611
|
+
return { error: error.message };
|
|
1612
|
+
if (!results || results.length === 0) {
|
|
1613
|
+
return { message: `No test results found in the last ${days} days.` };
|
|
1614
|
+
}
|
|
1615
|
+
const totalRuns = results.length;
|
|
1616
|
+
const passed = results.filter(r => r.status === 'pass').length;
|
|
1617
|
+
const failed = results.filter(r => r.status === 'fail').length;
|
|
1618
|
+
const blocked = results.filter(r => r.status === 'blocked').length;
|
|
1619
|
+
// Most failed tests
|
|
1620
|
+
const failCounts = {};
|
|
1621
|
+
for (const r of results.filter(r => r.status === 'fail')) {
|
|
1622
|
+
const id = r.test_case_id;
|
|
1623
|
+
const tc = r.test_cases;
|
|
1624
|
+
if (!failCounts[id]) {
|
|
1625
|
+
failCounts[id] = { count: 0, title: tc?.title || '', route: tc?.target_route || '' };
|
|
1626
|
+
}
|
|
1627
|
+
failCounts[id].count++;
|
|
1628
|
+
}
|
|
1629
|
+
const mostFailed = Object.entries(failCounts)
|
|
1630
|
+
.sort((a, b) => b[1].count - a[1].count)
|
|
1631
|
+
.slice(0, 5)
|
|
1632
|
+
.map(([id, data]) => ({ testCaseId: id, ...data }));
|
|
1633
|
+
// Duration stats
|
|
1634
|
+
const durations = results.filter(r => r.duration_ms).map(r => r.duration_ms);
|
|
1635
|
+
const avgDuration = durations.length > 0 ? Math.round(durations.reduce((a, b) => a + b, 0) / durations.length) : 0;
|
|
1636
|
+
const maxDuration = durations.length > 0 ? Math.max(...durations) : 0;
|
|
1637
|
+
return {
|
|
1638
|
+
period: `${days} days`,
|
|
1639
|
+
totalRuns,
|
|
1640
|
+
passRate: Math.round((passed / totalRuns) * 100),
|
|
1641
|
+
failRate: Math.round((failed / totalRuns) * 100),
|
|
1642
|
+
blockedCount: blocked,
|
|
1643
|
+
averageDurationMs: avgDuration,
|
|
1644
|
+
maxDurationMs: maxDuration,
|
|
1645
|
+
mostFailed,
|
|
1646
|
+
uniqueTestsCovered: new Set(results.map(r => r.test_case_id)).size,
|
|
1647
|
+
};
|
|
1648
|
+
}
|
|
1649
|
+
async function checkTestFreshness(args) {
|
|
1650
|
+
const projectId = requireProject();
|
|
1651
|
+
const limit = args.limit || 20;
|
|
1652
|
+
// Get test cases with their last update and last result
|
|
1653
|
+
const { data: testCases, error } = await supabase
|
|
1654
|
+
.from('test_cases')
|
|
1655
|
+
.select('id, title, target_route, updated_at, created_at')
|
|
1656
|
+
.eq('project_id', projectId)
|
|
1657
|
+
.order('updated_at', { ascending: true })
|
|
1658
|
+
.limit(limit);
|
|
1659
|
+
if (error)
|
|
1660
|
+
return { error: error.message };
|
|
1661
|
+
if (!testCases || testCases.length === 0) {
|
|
1662
|
+
return { staleTests: [], message: 'No test cases found.' };
|
|
1663
|
+
}
|
|
1664
|
+
const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString();
|
|
1665
|
+
const stale = testCases
|
|
1666
|
+
.filter(tc => tc.updated_at < thirtyDaysAgo)
|
|
1667
|
+
.map(tc => ({
|
|
1668
|
+
testCaseId: tc.id,
|
|
1669
|
+
title: tc.title,
|
|
1670
|
+
targetRoute: tc.target_route,
|
|
1671
|
+
lastUpdated: tc.updated_at,
|
|
1672
|
+
daysSinceUpdate: Math.round((Date.now() - new Date(tc.updated_at).getTime()) / (24 * 60 * 60 * 1000)),
|
|
1673
|
+
}));
|
|
1674
|
+
return {
|
|
1675
|
+
staleTests: stale,
|
|
1676
|
+
totalTestCases: testCases.length,
|
|
1677
|
+
staleCount: stale.length,
|
|
1678
|
+
stalenessThreshold: '30 days',
|
|
1679
|
+
};
|
|
1680
|
+
}
|
|
1681
|
+
async function getUntestedChanges(args) {
|
|
1682
|
+
const projectId = requireProject();
|
|
1683
|
+
// Get all test cases to understand what's covered
|
|
1684
|
+
const { data: testCases, error } = await supabase
|
|
1685
|
+
.from('test_cases')
|
|
1686
|
+
.select('id, title, target_route')
|
|
1687
|
+
.eq('project_id', projectId);
|
|
1688
|
+
if (error)
|
|
1689
|
+
return { error: error.message };
|
|
1690
|
+
const coveredRoutes = new Set((testCases || []).map(tc => tc.target_route).filter(Boolean));
|
|
1691
|
+
// If changed_files provided, check coverage
|
|
1692
|
+
const changedFiles = args.changed_files || [];
|
|
1693
|
+
if (changedFiles.length === 0) {
|
|
1694
|
+
return {
|
|
1695
|
+
message: 'No changed files provided. Pass changed_files to check coverage.',
|
|
1696
|
+
totalCoveredRoutes: coveredRoutes.size,
|
|
1697
|
+
};
|
|
1698
|
+
}
|
|
1699
|
+
// Map changed files to routes and check coverage
|
|
1700
|
+
const untested = [];
|
|
1701
|
+
for (const file of changedFiles) {
|
|
1702
|
+
const normalized = file.replace(/\\/g, '/');
|
|
1703
|
+
// Extract route-like path from file
|
|
1704
|
+
let inferredRoute = '';
|
|
1705
|
+
// Next.js app router: app/api/tasks/route.ts -> /api/tasks
|
|
1706
|
+
const appRouterMatch = normalized.match(/app\/(api\/[^/]+(?:\/[^/]+)*?)\/route\.\w+$/);
|
|
1707
|
+
if (appRouterMatch) {
|
|
1708
|
+
inferredRoute = '/' + appRouterMatch[1];
|
|
1709
|
+
}
|
|
1710
|
+
// Pages router: pages/api/tasks.ts -> /api/tasks
|
|
1711
|
+
const pagesMatch = normalized.match(/pages\/(api\/[^.]+)\.\w+$/);
|
|
1712
|
+
if (!inferredRoute && pagesMatch) {
|
|
1713
|
+
inferredRoute = '/' + pagesMatch[1];
|
|
1714
|
+
}
|
|
1715
|
+
// Component files
|
|
1716
|
+
const componentMatch = normalized.match(/(?:components|screens|pages)\/([^.]+)\.\w+$/);
|
|
1717
|
+
if (!inferredRoute && componentMatch) {
|
|
1718
|
+
inferredRoute = '/' + componentMatch[1].replace(/\\/g, '/');
|
|
1719
|
+
}
|
|
1720
|
+
if (inferredRoute && !coveredRoutes.has(inferredRoute)) {
|
|
1721
|
+
untested.push({
|
|
1722
|
+
file,
|
|
1723
|
+
inferredRoute,
|
|
1724
|
+
reason: 'No test cases cover this route',
|
|
1725
|
+
});
|
|
1726
|
+
}
|
|
1727
|
+
else if (!inferredRoute) {
|
|
1728
|
+
// Can't map to a route — flag as potentially untested
|
|
1729
|
+
untested.push({
|
|
1730
|
+
file,
|
|
1731
|
+
inferredRoute: 'unknown',
|
|
1732
|
+
reason: 'Could not map file to a testable route',
|
|
1733
|
+
});
|
|
1734
|
+
}
|
|
1735
|
+
}
|
|
1736
|
+
return {
|
|
1737
|
+
untestedChanges: untested,
|
|
1738
|
+
changedFileCount: changedFiles.length,
|
|
1739
|
+
untestedCount: untested.length,
|
|
1740
|
+
coveredRoutes: coveredRoutes.size,
|
|
1741
|
+
};
|
|
1742
|
+
}
|
|
1743
|
+
// === AUTO-MONITORING HANDLERS ===
|
|
1744
|
+
async function getAutoDetectedIssues(args) {
|
|
1745
|
+
const projectId = requireProject();
|
|
1746
|
+
const since = args.since || new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
|
|
1747
|
+
const limit = args.limit || 20;
|
|
1748
|
+
let query = supabase
|
|
1749
|
+
.from('reports')
|
|
1750
|
+
.select('id, error_fingerprint, report_source, title, severity, reporter_id, sentry_event_id, created_at, app_context')
|
|
1751
|
+
.eq('project_id', projectId)
|
|
1752
|
+
.neq('report_source', 'manual')
|
|
1753
|
+
.not('error_fingerprint', 'is', null)
|
|
1754
|
+
.gte('created_at', since)
|
|
1755
|
+
.order('created_at', { ascending: false });
|
|
1756
|
+
if (args.source) {
|
|
1757
|
+
query = query.eq('report_source', args.source);
|
|
1758
|
+
}
|
|
1759
|
+
const { data, error } = await query;
|
|
1760
|
+
if (error)
|
|
1761
|
+
return { error: error.message };
|
|
1762
|
+
if (!data || data.length === 0)
|
|
1763
|
+
return { issues: [], total: 0 };
|
|
1764
|
+
// Group by fingerprint
|
|
1765
|
+
const grouped = new Map();
|
|
1766
|
+
for (const report of data) {
|
|
1767
|
+
const fp = report.error_fingerprint;
|
|
1768
|
+
if (!grouped.has(fp))
|
|
1769
|
+
grouped.set(fp, []);
|
|
1770
|
+
grouped.get(fp).push(report);
|
|
1771
|
+
}
|
|
1772
|
+
// Build issue summaries
|
|
1773
|
+
const issues = Array.from(grouped.entries())
|
|
1774
|
+
.map(([fingerprint, reports]) => {
|
|
1775
|
+
const uniqueReporters = new Set(reports.map(r => r.reporter_id));
|
|
1776
|
+
const sorted = reports.sort((a, b) => new Date(a.created_at).getTime() - new Date(b.created_at).getTime());
|
|
1777
|
+
const first = sorted[0];
|
|
1778
|
+
const last = sorted[sorted.length - 1];
|
|
1779
|
+
const route = first.app_context?.currentRoute || 'unknown';
|
|
1780
|
+
return {
|
|
1781
|
+
fingerprint,
|
|
1782
|
+
source: first.report_source,
|
|
1783
|
+
message: first.title,
|
|
1784
|
+
route,
|
|
1785
|
+
occurrence_count: reports.length,
|
|
1786
|
+
affected_users: uniqueReporters.size,
|
|
1787
|
+
first_seen: first.created_at,
|
|
1788
|
+
last_seen: last.created_at,
|
|
1789
|
+
severity: first.severity,
|
|
1790
|
+
has_sentry_link: reports.some(r => r.sentry_event_id != null),
|
|
1791
|
+
sample_report_id: first.id,
|
|
1792
|
+
};
|
|
1793
|
+
})
|
|
1794
|
+
.filter(issue => issue.occurrence_count >= (args.min_occurrences || 1))
|
|
1795
|
+
.sort((a, b) => b.occurrence_count - a.occurrence_count)
|
|
1796
|
+
.slice(0, limit);
|
|
1797
|
+
if (args.compact) {
|
|
1798
|
+
return {
|
|
1799
|
+
issues: issues.map(i => ({
|
|
1800
|
+
fingerprint: i.fingerprint,
|
|
1801
|
+
source: i.source,
|
|
1802
|
+
count: i.occurrence_count,
|
|
1803
|
+
users: i.affected_users,
|
|
1804
|
+
severity: i.severity,
|
|
1805
|
+
})),
|
|
1806
|
+
total: issues.length,
|
|
1807
|
+
};
|
|
1808
|
+
}
|
|
1809
|
+
return { issues, total: issues.length };
|
|
1810
|
+
}
|
|
1811
|
+
async function generateTestsFromErrors(args) {
|
|
1812
|
+
const projectId = requireProject();
|
|
1813
|
+
const limit = args.limit || 5;
|
|
1814
|
+
let reports;
|
|
1815
|
+
if (args.report_ids?.length) {
|
|
1816
|
+
// Validate all UUIDs
|
|
1817
|
+
for (const id of args.report_ids) {
|
|
1818
|
+
if (!isValidUUID(id)) {
|
|
1819
|
+
return { error: `Invalid report_id format: ${id}` };
|
|
1820
|
+
}
|
|
1821
|
+
}
|
|
1822
|
+
const { data, error } = await supabase
|
|
1823
|
+
.from('reports')
|
|
1824
|
+
.select('id, title, report_source, severity, app_context, error_fingerprint, description')
|
|
1825
|
+
.eq('project_id', projectId)
|
|
1826
|
+
.in('id', args.report_ids);
|
|
1827
|
+
if (error)
|
|
1828
|
+
return { error: error.message };
|
|
1829
|
+
reports = data;
|
|
1830
|
+
}
|
|
1831
|
+
else {
|
|
1832
|
+
// Get top uncovered auto-detected errors
|
|
1833
|
+
const { data, error } = await supabase
|
|
1834
|
+
.from('reports')
|
|
1835
|
+
.select('id, title, report_source, severity, app_context, error_fingerprint, description')
|
|
1836
|
+
.eq('project_id', projectId)
|
|
1837
|
+
.neq('report_source', 'manual')
|
|
1838
|
+
.not('error_fingerprint', 'is', null)
|
|
1839
|
+
.order('created_at', { ascending: false })
|
|
1840
|
+
.limit(50);
|
|
1841
|
+
if (error)
|
|
1842
|
+
return { error: error.message };
|
|
1843
|
+
// Deduplicate by fingerprint, keep first occurrence
|
|
1844
|
+
const seen = new Set();
|
|
1845
|
+
reports = (data || []).filter(r => {
|
|
1846
|
+
if (!r.error_fingerprint || seen.has(r.error_fingerprint))
|
|
1847
|
+
return false;
|
|
1848
|
+
seen.add(r.error_fingerprint);
|
|
1849
|
+
return true;
|
|
1850
|
+
}).slice(0, limit);
|
|
1851
|
+
}
|
|
1852
|
+
if (!reports?.length)
|
|
1853
|
+
return { suggestions: [] };
|
|
1854
|
+
const suggestions = reports.map(report => {
|
|
1855
|
+
const route = report.app_context?.currentRoute || '/unknown';
|
|
1856
|
+
const source = report.report_source;
|
|
1857
|
+
const priority = report.severity === 'critical' ? 'P1' : report.severity === 'high' ? 'P1' : 'P2';
|
|
1858
|
+
let suggestedSteps;
|
|
1859
|
+
if (source === 'auto_crash') {
|
|
1860
|
+
suggestedSteps = [
|
|
1861
|
+
`Navigate to ${route}`,
|
|
1862
|
+
'Reproduce the action that triggered the crash',
|
|
1863
|
+
'Verify the page does not throw an unhandled error',
|
|
1864
|
+
'Verify error boundary displays a user-friendly message if error occurs',
|
|
1865
|
+
];
|
|
1866
|
+
}
|
|
1867
|
+
else if (source === 'auto_api') {
|
|
1868
|
+
const statusCode = report.app_context?.custom?.statusCode || 'error';
|
|
1869
|
+
const method = report.app_context?.custom?.requestMethod || 'API';
|
|
1870
|
+
suggestedSteps = [
|
|
1871
|
+
`Navigate to ${route}`,
|
|
1872
|
+
`Trigger the ${method} request that returned ${statusCode}`,
|
|
1873
|
+
'Verify the request succeeds or displays an appropriate error message',
|
|
1874
|
+
'Verify no data corruption occurs on failure',
|
|
1875
|
+
];
|
|
1876
|
+
}
|
|
1877
|
+
else {
|
|
1878
|
+
// rage_click or sentry_sync
|
|
1879
|
+
const target = report.app_context?.custom?.targetSelector || 'the element';
|
|
1880
|
+
suggestedSteps = [
|
|
1881
|
+
`Navigate to ${route}`,
|
|
1882
|
+
`Click on ${target}`,
|
|
1883
|
+
'Verify the element responds to interaction',
|
|
1884
|
+
'Verify loading state is shown if action takes time',
|
|
1885
|
+
];
|
|
1886
|
+
}
|
|
1887
|
+
return {
|
|
1888
|
+
title: `Test: ${report.title?.replace('[Auto] ', '') || 'Auto-detected issue'}`,
|
|
1889
|
+
track: source === 'auto_crash' ? 'Stability' : source === 'auto_api' ? 'API' : 'UX',
|
|
1890
|
+
priority,
|
|
1891
|
+
rationale: `Auto-detected ${source?.replace('auto_', '')} on ${route}`,
|
|
1892
|
+
suggested_steps: suggestedSteps,
|
|
1893
|
+
source_report_id: report.id,
|
|
1894
|
+
route,
|
|
1895
|
+
};
|
|
1896
|
+
});
|
|
1897
|
+
return { suggestions };
|
|
1898
|
+
}
|
|
1219
1899
|
// === Project management handlers ===
|
|
1220
1900
|
async function listProjects() {
|
|
1221
1901
|
const { data, error } = await supabase
|
|
@@ -1320,7 +2000,7 @@ async function getReport(args) {
|
|
|
1320
2000
|
app_context: data.app_context,
|
|
1321
2001
|
device_info: data.device_info,
|
|
1322
2002
|
navigation_history: data.navigation_history,
|
|
1323
|
-
|
|
2003
|
+
screenshot_urls: data.screenshot_urls,
|
|
1324
2004
|
created_at: data.created_at,
|
|
1325
2005
|
reporter: data.tester ? {
|
|
1326
2006
|
name: data.tester.name,
|
|
@@ -1395,22 +2075,86 @@ async function getReportContext(args) {
|
|
|
1395
2075
|
}
|
|
1396
2076
|
const { data, error } = await supabase
|
|
1397
2077
|
.from('reports')
|
|
1398
|
-
.select('app_context, device_info, navigation_history, enhanced_context')
|
|
2078
|
+
.select('app_context, device_info, navigation_history, enhanced_context, screenshot_urls')
|
|
1399
2079
|
.eq('id', args.report_id)
|
|
1400
2080
|
.eq('project_id', currentProjectId) // Security: ensure report belongs to this project
|
|
1401
2081
|
.single();
|
|
1402
2082
|
if (error) {
|
|
1403
2083
|
return { error: error.message };
|
|
1404
2084
|
}
|
|
2085
|
+
// Compact: return app_context only (skip console/network/navigation)
|
|
2086
|
+
if (args.compact === true) {
|
|
2087
|
+
return {
|
|
2088
|
+
context: {
|
|
2089
|
+
app_context: data.app_context,
|
|
2090
|
+
screenshot_urls: data.screenshot_urls,
|
|
2091
|
+
},
|
|
2092
|
+
};
|
|
2093
|
+
}
|
|
1405
2094
|
return {
|
|
1406
2095
|
context: {
|
|
1407
2096
|
app_context: data.app_context,
|
|
1408
2097
|
device_info: data.device_info,
|
|
1409
2098
|
navigation_history: data.navigation_history,
|
|
1410
2099
|
enhanced_context: data.enhanced_context || {},
|
|
2100
|
+
screenshot_urls: data.screenshot_urls,
|
|
1411
2101
|
},
|
|
1412
2102
|
};
|
|
1413
2103
|
}
|
|
2104
|
+
async function addReportComment(args) {
|
|
2105
|
+
if (!isValidUUID(args.report_id))
|
|
2106
|
+
return { error: 'Invalid report_id format' };
|
|
2107
|
+
if (!args.message?.trim())
|
|
2108
|
+
return { error: 'Message is required' };
|
|
2109
|
+
// Verify report exists
|
|
2110
|
+
const { data: report } = await supabase
|
|
2111
|
+
.from('reports').select('id').eq('id', args.report_id).eq('project_id', currentProjectId).single();
|
|
2112
|
+
if (!report)
|
|
2113
|
+
return { error: 'Report not found' };
|
|
2114
|
+
// Find or create a discussion thread for this report
|
|
2115
|
+
const { data: existingThread } = await supabase
|
|
2116
|
+
.from('discussion_threads').select('id')
|
|
2117
|
+
.eq('project_id', currentProjectId).eq('report_id', args.report_id).eq('thread_type', 'report')
|
|
2118
|
+
.limit(1).single();
|
|
2119
|
+
let threadId;
|
|
2120
|
+
if (existingThread) {
|
|
2121
|
+
threadId = existingThread.id;
|
|
2122
|
+
}
|
|
2123
|
+
else {
|
|
2124
|
+
const newId = crypto.randomUUID();
|
|
2125
|
+
const { error: threadErr } = await supabase
|
|
2126
|
+
.from('discussion_threads').insert({
|
|
2127
|
+
id: newId, project_id: currentProjectId, report_id: args.report_id,
|
|
2128
|
+
thread_type: 'report', subject: 'Bug Report Discussion', audience: 'all',
|
|
2129
|
+
priority: 'normal', created_by_admin: true, last_message_at: new Date().toISOString(),
|
|
2130
|
+
});
|
|
2131
|
+
if (threadErr)
|
|
2132
|
+
return { error: `Failed to create thread: ${threadErr.message}` };
|
|
2133
|
+
threadId = newId;
|
|
2134
|
+
}
|
|
2135
|
+
const { data: msg, error: msgErr } = await supabase
|
|
2136
|
+
.from('discussion_messages').insert({
|
|
2137
|
+
thread_id: threadId, sender_type: 'admin', sender_name: args.author || 'Claude Code', content: args.message.trim(), content_type: 'text',
|
|
2138
|
+
}).select('id, content, created_at').single();
|
|
2139
|
+
if (msgErr)
|
|
2140
|
+
return { error: `Failed to add comment: ${msgErr.message}` };
|
|
2141
|
+
return { success: true, comment: { id: msg.id, thread_id: threadId, content: msg.content, author: args.author || 'Claude Code', created_at: msg.created_at }, message: 'Comment added to report' };
|
|
2142
|
+
}
|
|
2143
|
+
async function getReportComments(args) {
|
|
2144
|
+
if (!isValidUUID(args.report_id))
|
|
2145
|
+
return { error: 'Invalid report_id format' };
|
|
2146
|
+
const { data: threads } = await supabase
|
|
2147
|
+
.from('discussion_threads').select('id')
|
|
2148
|
+
.eq('project_id', currentProjectId).eq('report_id', args.report_id).order('created_at', { ascending: true });
|
|
2149
|
+
if (!threads || threads.length === 0)
|
|
2150
|
+
return { comments: [], total: 0, message: 'No comments on this report' };
|
|
2151
|
+
const { data: messages, error } = await supabase
|
|
2152
|
+
.from('discussion_messages').select('id, thread_id, sender_type, content, content_type, created_at, attachments')
|
|
2153
|
+
.in('thread_id', threads.map(t => t.id)).order('created_at', { ascending: true });
|
|
2154
|
+
if (error)
|
|
2155
|
+
return { error: error.message };
|
|
2156
|
+
return { comments: (messages || []).map(m => ({ id: m.id, sender_type: m.sender_type, content: m.content, created_at: m.created_at, attachments: m.attachments })), total: (messages || []).length };
|
|
2157
|
+
}
|
|
1414
2158
|
async function getProjectInfo() {
|
|
1415
2159
|
// Get project details
|
|
1416
2160
|
const { data: project, error: projectError } = await supabase
|
|
@@ -1727,6 +2471,19 @@ async function listTestCases(args) {
|
|
|
1727
2471
|
if (args.track) {
|
|
1728
2472
|
testCases = testCases.filter((tc) => tc.track?.name?.toLowerCase().includes(args.track.toLowerCase()));
|
|
1729
2473
|
}
|
|
2474
|
+
// Compact: return minimal fields only
|
|
2475
|
+
if (args.compact === true) {
|
|
2476
|
+
return {
|
|
2477
|
+
count: testCases.length,
|
|
2478
|
+
testCases: testCases.map((tc) => ({
|
|
2479
|
+
id: tc.id,
|
|
2480
|
+
testKey: tc.test_key,
|
|
2481
|
+
title: tc.title,
|
|
2482
|
+
priority: tc.priority,
|
|
2483
|
+
})),
|
|
2484
|
+
pagination: { limit, offset, hasMore: testCases.length === limit },
|
|
2485
|
+
};
|
|
2486
|
+
}
|
|
1730
2487
|
return {
|
|
1731
2488
|
count: testCases.length,
|
|
1732
2489
|
testCases: testCases.map((tc) => ({
|
|
@@ -2236,8 +2993,8 @@ async function getRegressions(args) {
|
|
|
2236
2993
|
};
|
|
2237
2994
|
}
|
|
2238
2995
|
async function getCoverageMatrix(args) {
|
|
2239
|
-
const includeExecution = args.include_execution_data
|
|
2240
|
-
const includeBugs = args.include_bug_counts
|
|
2996
|
+
const includeExecution = args.include_execution_data === true;
|
|
2997
|
+
const includeBugs = args.include_bug_counts === true;
|
|
2241
2998
|
// Get tracks
|
|
2242
2999
|
const { data: tracks } = await supabase
|
|
2243
3000
|
.from('qa_tracks')
|
|
@@ -3690,7 +4447,7 @@ async function markFixedWithCommit(args) {
|
|
|
3690
4447
|
status: 'resolved',
|
|
3691
4448
|
resolved_at: new Date().toISOString(),
|
|
3692
4449
|
resolution_notes: args.resolution_notes || `Fixed in commit ${args.commit_sha.slice(0, 7)}`,
|
|
3693
|
-
notify_tester: args.notify_tester
|
|
4450
|
+
notify_tester: args.notify_tester !== false, // Default: notify tester. Pass false to silently resolve.
|
|
3694
4451
|
code_context: {
|
|
3695
4452
|
...existingContext,
|
|
3696
4453
|
fix: {
|
|
@@ -3710,7 +4467,8 @@ async function markFixedWithCommit(args) {
|
|
|
3710
4467
|
if (error) {
|
|
3711
4468
|
return { error: error.message };
|
|
3712
4469
|
}
|
|
3713
|
-
const
|
|
4470
|
+
const notifyTester = args.notify_tester !== false;
|
|
4471
|
+
const notificationStatus = notifyTester
|
|
3714
4472
|
? 'The original tester will be notified and assigned a verification task.'
|
|
3715
4473
|
: 'No notification sent (silent resolve). A verification task was created.';
|
|
3716
4474
|
return {
|
|
@@ -3718,7 +4476,7 @@ async function markFixedWithCommit(args) {
|
|
|
3718
4476
|
message: `Bug marked as fixed in commit ${args.commit_sha.slice(0, 7)}. ${notificationStatus}`,
|
|
3719
4477
|
report_id: args.report_id,
|
|
3720
4478
|
commit: args.commit_sha,
|
|
3721
|
-
tester_notified:
|
|
4479
|
+
tester_notified: notifyTester,
|
|
3722
4480
|
next_steps: [
|
|
3723
4481
|
'Consider running create_regression_test to prevent this bug from recurring',
|
|
3724
4482
|
'Push your changes to trigger CI/CD',
|
|
@@ -4443,12 +5201,15 @@ Which files or areas would you like me to analyze?`;
|
|
|
4443
5201
|
async function listTesters(args) {
|
|
4444
5202
|
let query = supabase
|
|
4445
5203
|
.from('testers')
|
|
4446
|
-
.select('id, name, email, status, platforms, tier, assigned_count, completed_count, notes, created_at')
|
|
5204
|
+
.select('id, name, email, status, platforms, tier, assigned_count, completed_count, notes, role, created_at')
|
|
4447
5205
|
.eq('project_id', currentProjectId)
|
|
4448
5206
|
.order('name', { ascending: true });
|
|
4449
5207
|
if (args.status) {
|
|
4450
5208
|
query = query.eq('status', args.status);
|
|
4451
5209
|
}
|
|
5210
|
+
if (args.role) {
|
|
5211
|
+
query = query.eq('role', args.role);
|
|
5212
|
+
}
|
|
4452
5213
|
const { data, error } = await query;
|
|
4453
5214
|
if (error) {
|
|
4454
5215
|
return { error: error.message };
|
|
@@ -4470,6 +5231,7 @@ async function listTesters(args) {
|
|
|
4470
5231
|
assignedCount: t.assigned_count,
|
|
4471
5232
|
completedCount: t.completed_count,
|
|
4472
5233
|
notes: t.notes,
|
|
5234
|
+
role: t.role,
|
|
4473
5235
|
})),
|
|
4474
5236
|
};
|
|
4475
5237
|
}
|
|
@@ -4671,6 +5433,22 @@ async function assignTests(args) {
|
|
|
4671
5433
|
test_run_id: args.test_run_id || null,
|
|
4672
5434
|
status: 'pending',
|
|
4673
5435
|
}));
|
|
5436
|
+
// Helper: after assignments change, sync the test run's total_tests counter
|
|
5437
|
+
async function syncRunCounter() {
|
|
5438
|
+
if (!args.test_run_id)
|
|
5439
|
+
return;
|
|
5440
|
+
const { count } = await supabase
|
|
5441
|
+
.from('test_assignments')
|
|
5442
|
+
.select('id', { count: 'exact', head: true })
|
|
5443
|
+
.eq('test_run_id', args.test_run_id)
|
|
5444
|
+
.eq('project_id', currentProjectId);
|
|
5445
|
+
if (count !== null) {
|
|
5446
|
+
await supabase
|
|
5447
|
+
.from('test_runs')
|
|
5448
|
+
.update({ total_tests: count })
|
|
5449
|
+
.eq('id', args.test_run_id);
|
|
5450
|
+
}
|
|
5451
|
+
}
|
|
4674
5452
|
// Insert — use upsert-like approach: insert and handle conflicts
|
|
4675
5453
|
const { data: inserted, error: insertErr } = await supabase
|
|
4676
5454
|
.from('test_assignments')
|
|
@@ -4696,6 +5474,7 @@ async function assignTests(args) {
|
|
|
4696
5474
|
created.push(single);
|
|
4697
5475
|
}
|
|
4698
5476
|
}
|
|
5477
|
+
await syncRunCounter();
|
|
4699
5478
|
return {
|
|
4700
5479
|
success: true,
|
|
4701
5480
|
created: created.length,
|
|
@@ -4707,6 +5486,7 @@ async function assignTests(args) {
|
|
|
4707
5486
|
}
|
|
4708
5487
|
return { error: insertErr.message };
|
|
4709
5488
|
}
|
|
5489
|
+
await syncRunCounter();
|
|
4710
5490
|
return {
|
|
4711
5491
|
success: true,
|
|
4712
5492
|
created: (inserted || []).length,
|
|
@@ -4716,6 +5496,71 @@ async function assignTests(args) {
|
|
|
4716
5496
|
message: `Assigned ${(inserted || []).length} test(s) to ${tester.name}.`,
|
|
4717
5497
|
};
|
|
4718
5498
|
}
|
|
5499
|
+
async function unassignTests(args) {
|
|
5500
|
+
if (!args.assignment_ids || args.assignment_ids.length === 0) {
|
|
5501
|
+
return { error: 'At least one assignment_id is required' };
|
|
5502
|
+
}
|
|
5503
|
+
if (args.assignment_ids.length > 50) {
|
|
5504
|
+
return { error: 'Maximum 50 assignments per unassign batch' };
|
|
5505
|
+
}
|
|
5506
|
+
const invalidIds = args.assignment_ids.filter(id => !isValidUUID(id));
|
|
5507
|
+
if (invalidIds.length > 0) {
|
|
5508
|
+
return { error: `Invalid UUID(s): ${invalidIds.join(', ')}` };
|
|
5509
|
+
}
|
|
5510
|
+
// Verify assignments exist and belong to this project
|
|
5511
|
+
const { data: existing, error: lookupErr } = await supabase
|
|
5512
|
+
.from('test_assignments')
|
|
5513
|
+
.select('id, test_run_id, test_case:test_cases(test_key, title), tester:testers(name)')
|
|
5514
|
+
.eq('project_id', currentProjectId)
|
|
5515
|
+
.in('id', args.assignment_ids);
|
|
5516
|
+
if (lookupErr)
|
|
5517
|
+
return { error: lookupErr.message };
|
|
5518
|
+
if (!existing || existing.length === 0) {
|
|
5519
|
+
return { error: 'No matching assignments found in this project' };
|
|
5520
|
+
}
|
|
5521
|
+
const foundIds = new Set(existing.map((a) => a.id));
|
|
5522
|
+
const notFound = args.assignment_ids.filter(id => !foundIds.has(id));
|
|
5523
|
+
// Delete the assignments
|
|
5524
|
+
const { error: deleteErr } = await supabase
|
|
5525
|
+
.from('test_assignments')
|
|
5526
|
+
.delete()
|
|
5527
|
+
.eq('project_id', currentProjectId)
|
|
5528
|
+
.in('id', args.assignment_ids);
|
|
5529
|
+
if (deleteErr)
|
|
5530
|
+
return { error: deleteErr.message };
|
|
5531
|
+
// Sync run counters for any affected test runs
|
|
5532
|
+
const affectedRunIds = [...new Set(existing.filter((a) => a.test_run_id).map((a) => a.test_run_id))];
|
|
5533
|
+
for (const runId of affectedRunIds) {
|
|
5534
|
+
const { count } = await supabase
|
|
5535
|
+
.from('test_assignments')
|
|
5536
|
+
.select('id', { count: 'exact', head: true })
|
|
5537
|
+
.eq('test_run_id', runId)
|
|
5538
|
+
.eq('project_id', currentProjectId);
|
|
5539
|
+
if (count !== null) {
|
|
5540
|
+
await supabase.from('test_runs').update({ total_tests: count }).eq('id', runId);
|
|
5541
|
+
}
|
|
5542
|
+
}
|
|
5543
|
+
const deleted = existing.map((a) => {
|
|
5544
|
+
const tc = a.test_case;
|
|
5545
|
+
const tester = a.tester;
|
|
5546
|
+
return {
|
|
5547
|
+
id: a.id,
|
|
5548
|
+
testKey: tc?.test_key || null,
|
|
5549
|
+
testTitle: tc?.title || null,
|
|
5550
|
+
testerName: tester?.name || null,
|
|
5551
|
+
};
|
|
5552
|
+
});
|
|
5553
|
+
const firstKey = deleted[0]?.testKey;
|
|
5554
|
+
return {
|
|
5555
|
+
success: true,
|
|
5556
|
+
deletedCount: existing.length,
|
|
5557
|
+
deleted,
|
|
5558
|
+
notFound: notFound.length > 0 ? notFound : undefined,
|
|
5559
|
+
message: existing.length === 1
|
|
5560
|
+
? `Removed 1 assignment${firstKey ? ` (${firstKey})` : ''}`
|
|
5561
|
+
: `Removed ${existing.length} assignment(s)`,
|
|
5562
|
+
};
|
|
5563
|
+
}
|
|
4719
5564
|
async function getTesterWorkload(args) {
|
|
4720
5565
|
if (!isValidUUID(args.tester_id)) {
|
|
4721
5566
|
return { error: 'Invalid tester_id format' };
|
|
@@ -4815,8 +5660,9 @@ async function createTester(args) {
|
|
|
4815
5660
|
tier: args.tier ?? 1,
|
|
4816
5661
|
notes: args.notes?.trim() || null,
|
|
4817
5662
|
status: 'active',
|
|
5663
|
+
role: args.role || 'tester',
|
|
4818
5664
|
})
|
|
4819
|
-
.select('id, name, email, status, platforms, tier, notes, created_at')
|
|
5665
|
+
.select('id, name, email, status, platforms, tier, notes, role, created_at')
|
|
4820
5666
|
.single();
|
|
4821
5667
|
if (error) {
|
|
4822
5668
|
if (error.message.includes('duplicate') || error.message.includes('unique')) {
|
|
@@ -4834,6 +5680,7 @@ async function createTester(args) {
|
|
|
4834
5680
|
platforms: data.platforms,
|
|
4835
5681
|
tier: data.tier,
|
|
4836
5682
|
notes: data.notes,
|
|
5683
|
+
role: data.role,
|
|
4837
5684
|
createdAt: data.created_at,
|
|
4838
5685
|
},
|
|
4839
5686
|
message: `Tester "${data.name}" added to the project. Use assign_tests to give them test cases.`,
|
|
@@ -5124,27 +5971,38 @@ async function exportTestResults(args) {
|
|
|
5124
5971
|
const all = assignments || [];
|
|
5125
5972
|
const passCount = all.filter(a => a.status === 'passed').length;
|
|
5126
5973
|
const failCount = all.filter(a => a.status === 'failed').length;
|
|
5974
|
+
const testRunInfo = {
|
|
5975
|
+
id: run.id,
|
|
5976
|
+
name: run.name,
|
|
5977
|
+
description: run.description,
|
|
5978
|
+
status: run.status,
|
|
5979
|
+
startedAt: run.started_at,
|
|
5980
|
+
completedAt: run.completed_at,
|
|
5981
|
+
createdAt: run.created_at,
|
|
5982
|
+
};
|
|
5983
|
+
const summaryInfo = {
|
|
5984
|
+
totalAssignments: all.length,
|
|
5985
|
+
passed: passCount,
|
|
5986
|
+
failed: failCount,
|
|
5987
|
+
blocked: all.filter(a => a.status === 'blocked').length,
|
|
5988
|
+
skipped: all.filter(a => a.status === 'skipped').length,
|
|
5989
|
+
pending: all.filter(a => a.status === 'pending').length,
|
|
5990
|
+
inProgress: all.filter(a => a.status === 'in_progress').length,
|
|
5991
|
+
passRate: all.length > 0 ? Math.round((passCount / all.length) * 100) : 0,
|
|
5992
|
+
};
|
|
5993
|
+
// Compact: return test run info + summary only, no assignments array
|
|
5994
|
+
if (args.compact === true) {
|
|
5995
|
+
return { testRun: testRunInfo, summary: summaryInfo };
|
|
5996
|
+
}
|
|
5997
|
+
// Apply limit (default: 100, max: 500)
|
|
5998
|
+
const assignmentLimit = Math.min(Math.max(args.limit ?? 100, 1), 500);
|
|
5999
|
+
const limitedAssignments = all.slice(0, assignmentLimit);
|
|
5127
6000
|
return {
|
|
5128
|
-
testRun:
|
|
5129
|
-
|
|
5130
|
-
|
|
5131
|
-
|
|
5132
|
-
|
|
5133
|
-
startedAt: run.started_at,
|
|
5134
|
-
completedAt: run.completed_at,
|
|
5135
|
-
createdAt: run.created_at,
|
|
5136
|
-
},
|
|
5137
|
-
summary: {
|
|
5138
|
-
totalAssignments: all.length,
|
|
5139
|
-
passed: passCount,
|
|
5140
|
-
failed: failCount,
|
|
5141
|
-
blocked: all.filter(a => a.status === 'blocked').length,
|
|
5142
|
-
skipped: all.filter(a => a.status === 'skipped').length,
|
|
5143
|
-
pending: all.filter(a => a.status === 'pending').length,
|
|
5144
|
-
inProgress: all.filter(a => a.status === 'in_progress').length,
|
|
5145
|
-
passRate: all.length > 0 ? Math.round((passCount / all.length) * 100) : 0,
|
|
5146
|
-
},
|
|
5147
|
-
assignments: all.map((a) => ({
|
|
6001
|
+
testRun: testRunInfo,
|
|
6002
|
+
summary: summaryInfo,
|
|
6003
|
+
assignmentsReturned: limitedAssignments.length,
|
|
6004
|
+
assignmentsTotal: all.length,
|
|
6005
|
+
assignments: limitedAssignments.map((a) => ({
|
|
5148
6006
|
id: a.id,
|
|
5149
6007
|
status: a.status,
|
|
5150
6008
|
assignedAt: a.assigned_at,
|
|
@@ -5258,6 +6116,12 @@ async function main() {
|
|
|
5258
6116
|
case 'get_report_context':
|
|
5259
6117
|
result = await getReportContext(args);
|
|
5260
6118
|
break;
|
|
6119
|
+
case 'add_report_comment':
|
|
6120
|
+
result = await addReportComment(args);
|
|
6121
|
+
break;
|
|
6122
|
+
case 'get_report_comments':
|
|
6123
|
+
result = await getReportComments(args);
|
|
6124
|
+
break;
|
|
5261
6125
|
case 'get_project_info':
|
|
5262
6126
|
result = await getProjectInfo();
|
|
5263
6127
|
break;
|
|
@@ -5371,6 +6235,9 @@ async function main() {
|
|
|
5371
6235
|
case 'assign_tests':
|
|
5372
6236
|
result = await assignTests(args);
|
|
5373
6237
|
break;
|
|
6238
|
+
case 'unassign_tests':
|
|
6239
|
+
result = await unassignTests(args);
|
|
6240
|
+
break;
|
|
5374
6241
|
case 'get_tester_workload':
|
|
5375
6242
|
result = await getTesterWorkload(args);
|
|
5376
6243
|
break;
|
|
@@ -5406,6 +6273,32 @@ async function main() {
|
|
|
5406
6273
|
case 'get_current_project':
|
|
5407
6274
|
result = getCurrentProject();
|
|
5408
6275
|
break;
|
|
6276
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
6277
|
+
case 'get_test_impact':
|
|
6278
|
+
result = await getTestImpact(args);
|
|
6279
|
+
break;
|
|
6280
|
+
case 'get_flaky_tests':
|
|
6281
|
+
result = await getFlakyTests(args);
|
|
6282
|
+
break;
|
|
6283
|
+
case 'assess_test_quality':
|
|
6284
|
+
result = await assessTestQuality(args);
|
|
6285
|
+
break;
|
|
6286
|
+
case 'get_test_execution_summary':
|
|
6287
|
+
result = await getTestExecutionSummary(args);
|
|
6288
|
+
break;
|
|
6289
|
+
case 'check_test_freshness':
|
|
6290
|
+
result = await checkTestFreshness(args);
|
|
6291
|
+
break;
|
|
6292
|
+
case 'get_untested_changes':
|
|
6293
|
+
result = await getUntestedChanges(args);
|
|
6294
|
+
break;
|
|
6295
|
+
// === AUTO-MONITORING TOOLS ===
|
|
6296
|
+
case 'get_auto_detected_issues':
|
|
6297
|
+
result = await getAutoDetectedIssues(args);
|
|
6298
|
+
break;
|
|
6299
|
+
case 'generate_tests_from_errors':
|
|
6300
|
+
result = await generateTestsFromErrors(args);
|
|
6301
|
+
break;
|
|
5409
6302
|
default:
|
|
5410
6303
|
return {
|
|
5411
6304
|
content: [{ type: 'text', text: `Unknown tool: ${name}` }],
|