@bbearai/mcp-server 0.6.0 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.js +984 -37
- package/package.json +2 -1
- package/src/index.ts +1081 -38
package/dist/index.js
CHANGED
|
@@ -128,7 +128,7 @@ const tools = [
|
|
|
128
128
|
},
|
|
129
129
|
status: {
|
|
130
130
|
type: 'string',
|
|
131
|
-
enum: ['new', 'triaging', 'confirmed', 'in_progress', 'fixed', '
|
|
131
|
+
enum: ['new', 'triaging', 'confirmed', 'in_progress', 'fixed', 'ready_to_test', 'verified', 'resolved', 'reviewed', 'closed', 'wont_fix', 'duplicate'],
|
|
132
132
|
description: 'The new status for the report',
|
|
133
133
|
},
|
|
134
134
|
resolution_notes: {
|
|
@@ -141,7 +141,47 @@ const tools = [
|
|
|
141
141
|
},
|
|
142
142
|
{
|
|
143
143
|
name: 'get_report_context',
|
|
144
|
-
description: 'Get the full debugging context for a report including console logs, network requests, and navigation history',
|
|
144
|
+
description: 'Get the full debugging context for a report including console logs, network requests, and navigation history. Use compact=true for app_context summary only (no console/network/navigation).',
|
|
145
|
+
inputSchema: {
|
|
146
|
+
type: 'object',
|
|
147
|
+
properties: {
|
|
148
|
+
report_id: {
|
|
149
|
+
type: 'string',
|
|
150
|
+
description: 'The UUID of the report',
|
|
151
|
+
},
|
|
152
|
+
compact: {
|
|
153
|
+
type: 'boolean',
|
|
154
|
+
description: 'Compact mode: returns app_context only, skips console logs, network requests, and navigation history. (default: false)',
|
|
155
|
+
},
|
|
156
|
+
},
|
|
157
|
+
required: ['report_id'],
|
|
158
|
+
},
|
|
159
|
+
},
|
|
160
|
+
{
|
|
161
|
+
name: 'add_report_comment',
|
|
162
|
+
description: 'Add a comment/note to a bug report thread without changing its status. Use this for follow-up questions, investigation notes, or developer-tester communication.',
|
|
163
|
+
inputSchema: {
|
|
164
|
+
type: 'object',
|
|
165
|
+
properties: {
|
|
166
|
+
report_id: {
|
|
167
|
+
type: 'string',
|
|
168
|
+
description: 'The UUID of the report to comment on',
|
|
169
|
+
},
|
|
170
|
+
message: {
|
|
171
|
+
type: 'string',
|
|
172
|
+
description: 'The comment/note content',
|
|
173
|
+
},
|
|
174
|
+
author: {
|
|
175
|
+
type: 'string',
|
|
176
|
+
description: 'Optional author name (defaults to "Claude Code")',
|
|
177
|
+
},
|
|
178
|
+
},
|
|
179
|
+
required: ['report_id', 'message'],
|
|
180
|
+
},
|
|
181
|
+
},
|
|
182
|
+
{
|
|
183
|
+
name: 'get_report_comments',
|
|
184
|
+
description: 'Get all comments/notes on a bug report in chronological order. Returns the full discussion thread.',
|
|
145
185
|
inputSchema: {
|
|
146
186
|
type: 'object',
|
|
147
187
|
properties: {
|
|
@@ -153,6 +193,25 @@ const tools = [
|
|
|
153
193
|
required: ['report_id'],
|
|
154
194
|
},
|
|
155
195
|
},
|
|
196
|
+
{
|
|
197
|
+
name: 'resolve_conversations',
|
|
198
|
+
description: 'Mark one or more discussion threads as resolved (closed) or reopen them. Use this to close conversations after issues are addressed.',
|
|
199
|
+
inputSchema: {
|
|
200
|
+
type: 'object',
|
|
201
|
+
properties: {
|
|
202
|
+
thread_ids: {
|
|
203
|
+
type: 'array',
|
|
204
|
+
items: { type: 'string' },
|
|
205
|
+
description: 'UUIDs of the discussion threads to resolve/reopen',
|
|
206
|
+
},
|
|
207
|
+
resolved: {
|
|
208
|
+
type: 'boolean',
|
|
209
|
+
description: 'true to mark as resolved (default), false to reopen',
|
|
210
|
+
},
|
|
211
|
+
},
|
|
212
|
+
required: ['thread_ids'],
|
|
213
|
+
},
|
|
214
|
+
},
|
|
156
215
|
{
|
|
157
216
|
name: 'get_project_info',
|
|
158
217
|
description: 'Get project information including QA tracks, test case counts, and common bug patterns',
|
|
@@ -308,7 +367,7 @@ const tools = [
|
|
|
308
367
|
},
|
|
309
368
|
{
|
|
310
369
|
name: 'list_test_cases',
|
|
311
|
-
description: 'List all test cases in the project. Returns test_key, title, target_route, and other metadata. Use this to see existing tests before updating them.',
|
|
370
|
+
description: 'List all test cases in the project. Returns test_key, title, target_route, and other metadata. Use this to see existing tests before updating them. Use compact=true for id, test_key, title, and priority only (saves tokens).',
|
|
312
371
|
inputSchema: {
|
|
313
372
|
type: 'object',
|
|
314
373
|
properties: {
|
|
@@ -333,6 +392,10 @@ const tools = [
|
|
|
333
392
|
type: 'number',
|
|
334
393
|
description: 'Offset for pagination (default 0)',
|
|
335
394
|
},
|
|
395
|
+
compact: {
|
|
396
|
+
type: 'boolean',
|
|
397
|
+
description: 'Compact mode: returns id, test_key, title, and priority only. (default: false)',
|
|
398
|
+
},
|
|
336
399
|
},
|
|
337
400
|
},
|
|
338
401
|
},
|
|
@@ -441,7 +504,7 @@ const tools = [
|
|
|
441
504
|
},
|
|
442
505
|
notify_tester: {
|
|
443
506
|
type: 'boolean',
|
|
444
|
-
description: '
|
|
507
|
+
description: 'Notify the original tester about the fix with a message and verification task. Default: true. Set to false for silent resolve.',
|
|
445
508
|
},
|
|
446
509
|
},
|
|
447
510
|
required: ['report_id', 'commit_sha'],
|
|
@@ -683,17 +746,17 @@ const tools = [
|
|
|
683
746
|
},
|
|
684
747
|
{
|
|
685
748
|
name: 'get_coverage_matrix',
|
|
686
|
-
description: 'Get a comprehensive Route × Track coverage matrix showing test counts, pass rates, and execution data. Use this for a complete view of test coverage.',
|
|
749
|
+
description: 'Get a comprehensive Route × Track coverage matrix showing test counts, pass rates, and execution data. Use this for a complete view of test coverage. Execution data and bug counts are opt-in to save tokens.',
|
|
687
750
|
inputSchema: {
|
|
688
751
|
type: 'object',
|
|
689
752
|
properties: {
|
|
690
753
|
include_execution_data: {
|
|
691
754
|
type: 'boolean',
|
|
692
|
-
description: 'Include pass/fail rates and last execution times (default: true
|
|
755
|
+
description: 'Include pass/fail rates and last execution times (default: false). Set true when you need execution history.',
|
|
693
756
|
},
|
|
694
757
|
include_bug_counts: {
|
|
695
758
|
type: 'boolean',
|
|
696
|
-
description: 'Include open/critical bug counts per route (default: true
|
|
759
|
+
description: 'Include open/critical bug counts per route (default: false). Set true when you need bug context.',
|
|
697
760
|
},
|
|
698
761
|
},
|
|
699
762
|
},
|
|
@@ -927,6 +990,11 @@ const tools = [
|
|
|
927
990
|
enum: ['ios', 'android', 'web'],
|
|
928
991
|
description: 'Filter by platform support',
|
|
929
992
|
},
|
|
993
|
+
role: {
|
|
994
|
+
type: 'string',
|
|
995
|
+
enum: ['tester', 'feedback'],
|
|
996
|
+
description: 'Filter by role: "tester" for QA testers, "feedback" for feedback-only users (default: all)',
|
|
997
|
+
},
|
|
930
998
|
},
|
|
931
999
|
},
|
|
932
1000
|
},
|
|
@@ -1015,6 +1083,21 @@ const tools = [
|
|
|
1015
1083
|
required: ['tester_id', 'test_case_ids'],
|
|
1016
1084
|
},
|
|
1017
1085
|
},
|
|
1086
|
+
{
|
|
1087
|
+
name: 'unassign_tests',
|
|
1088
|
+
description: 'Remove one or more test assignments by assignment ID. Preserves the test case and its history — only the assignment link is deleted. Use list_test_assignments first to find assignment IDs. Max 50 per call.',
|
|
1089
|
+
inputSchema: {
|
|
1090
|
+
type: 'object',
|
|
1091
|
+
properties: {
|
|
1092
|
+
assignment_ids: {
|
|
1093
|
+
type: 'array',
|
|
1094
|
+
items: { type: 'string' },
|
|
1095
|
+
description: 'Array of test assignment UUIDs to remove (required, max 50)',
|
|
1096
|
+
},
|
|
1097
|
+
},
|
|
1098
|
+
required: ['assignment_ids'],
|
|
1099
|
+
},
|
|
1100
|
+
},
|
|
1018
1101
|
{
|
|
1019
1102
|
name: 'get_tester_workload',
|
|
1020
1103
|
description: 'View a specific tester\'s current workload — assignment counts by status and recent assignments.',
|
|
@@ -1057,6 +1140,11 @@ const tools = [
|
|
|
1057
1140
|
type: 'string',
|
|
1058
1141
|
description: 'Optional notes about the tester',
|
|
1059
1142
|
},
|
|
1143
|
+
role: {
|
|
1144
|
+
type: 'string',
|
|
1145
|
+
enum: ['tester', 'feedback'],
|
|
1146
|
+
description: 'Role: "tester" for QA testers (default), "feedback" for feedback-only users',
|
|
1147
|
+
},
|
|
1060
1148
|
},
|
|
1061
1149
|
required: ['name', 'email'],
|
|
1062
1150
|
},
|
|
@@ -1159,7 +1247,7 @@ const tools = [
|
|
|
1159
1247
|
},
|
|
1160
1248
|
{
|
|
1161
1249
|
name: 'export_test_results',
|
|
1162
|
-
description: 'Export test results for a specific test run as structured JSON — includes every assignment, tester, result, and duration.',
|
|
1250
|
+
description: 'Export test results for a specific test run as structured JSON — includes every assignment, tester, result, and duration. Use compact=true for summary only (no assignments array). Use limit to cap assignments returned.',
|
|
1163
1251
|
inputSchema: {
|
|
1164
1252
|
type: 'object',
|
|
1165
1253
|
properties: {
|
|
@@ -1167,6 +1255,14 @@ const tools = [
|
|
|
1167
1255
|
type: 'string',
|
|
1168
1256
|
description: 'UUID of the test run to export (required)',
|
|
1169
1257
|
},
|
|
1258
|
+
compact: {
|
|
1259
|
+
type: 'boolean',
|
|
1260
|
+
description: 'Compact mode: returns test run info + summary only, no assignments array. (default: false)',
|
|
1261
|
+
},
|
|
1262
|
+
limit: {
|
|
1263
|
+
type: 'number',
|
|
1264
|
+
description: 'Max assignments to return in full mode (default: 100, max: 500). Ignored when compact=true.',
|
|
1265
|
+
},
|
|
1170
1266
|
},
|
|
1171
1267
|
required: ['test_run_id'],
|
|
1172
1268
|
},
|
|
@@ -1215,7 +1311,610 @@ const tools = [
|
|
|
1215
1311
|
properties: {},
|
|
1216
1312
|
},
|
|
1217
1313
|
},
|
|
1314
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
1315
|
+
{
|
|
1316
|
+
name: 'get_test_impact',
|
|
1317
|
+
description: 'Given changed files, identify which test cases are affected by mapping file paths to test case target routes.',
|
|
1318
|
+
inputSchema: {
|
|
1319
|
+
type: 'object',
|
|
1320
|
+
properties: {
|
|
1321
|
+
changed_files: {
|
|
1322
|
+
type: 'array',
|
|
1323
|
+
items: { type: 'string' },
|
|
1324
|
+
description: 'List of changed file paths (relative to project root)',
|
|
1325
|
+
},
|
|
1326
|
+
},
|
|
1327
|
+
required: ['changed_files'],
|
|
1328
|
+
},
|
|
1329
|
+
},
|
|
1330
|
+
{
|
|
1331
|
+
name: 'get_flaky_tests',
|
|
1332
|
+
description: 'Analyze test run history to identify tests with intermittent failure rates above a threshold.',
|
|
1333
|
+
inputSchema: {
|
|
1334
|
+
type: 'object',
|
|
1335
|
+
properties: {
|
|
1336
|
+
threshold: {
|
|
1337
|
+
type: 'number',
|
|
1338
|
+
description: 'Minimum flakiness rate to report (0-100, default: 5)',
|
|
1339
|
+
},
|
|
1340
|
+
limit: {
|
|
1341
|
+
type: 'number',
|
|
1342
|
+
description: 'Maximum results to return (default: 20)',
|
|
1343
|
+
},
|
|
1344
|
+
},
|
|
1345
|
+
},
|
|
1346
|
+
},
|
|
1347
|
+
{
|
|
1348
|
+
name: 'assess_test_quality',
|
|
1349
|
+
description: 'Analyze test case steps for weak patterns: vague assertions, missing edge cases, no negative testing, generic descriptions.',
|
|
1350
|
+
inputSchema: {
|
|
1351
|
+
type: 'object',
|
|
1352
|
+
properties: {
|
|
1353
|
+
test_case_ids: {
|
|
1354
|
+
type: 'array',
|
|
1355
|
+
items: { type: 'string' },
|
|
1356
|
+
description: 'Specific test case IDs to assess. If omitted, assesses recent test cases.',
|
|
1357
|
+
},
|
|
1358
|
+
limit: {
|
|
1359
|
+
type: 'number',
|
|
1360
|
+
description: 'Maximum test cases to assess (default: 20)',
|
|
1361
|
+
},
|
|
1362
|
+
},
|
|
1363
|
+
},
|
|
1364
|
+
},
|
|
1365
|
+
{
|
|
1366
|
+
name: 'get_test_execution_summary',
|
|
1367
|
+
description: 'Aggregate test execution metrics: pass rate, completion rate, most-failed tests, fastest/slowest tests.',
|
|
1368
|
+
inputSchema: {
|
|
1369
|
+
type: 'object',
|
|
1370
|
+
properties: {
|
|
1371
|
+
days: {
|
|
1372
|
+
type: 'number',
|
|
1373
|
+
description: 'Number of days to analyze (default: 30)',
|
|
1374
|
+
},
|
|
1375
|
+
},
|
|
1376
|
+
},
|
|
1377
|
+
},
|
|
1378
|
+
{
|
|
1379
|
+
name: 'check_test_freshness',
|
|
1380
|
+
description: 'Identify test cases that have not been updated since their target code was modified.',
|
|
1381
|
+
inputSchema: {
|
|
1382
|
+
type: 'object',
|
|
1383
|
+
properties: {
|
|
1384
|
+
limit: {
|
|
1385
|
+
type: 'number',
|
|
1386
|
+
description: 'Maximum results to return (default: 20)',
|
|
1387
|
+
},
|
|
1388
|
+
},
|
|
1389
|
+
},
|
|
1390
|
+
},
|
|
1391
|
+
{
|
|
1392
|
+
name: 'get_untested_changes',
|
|
1393
|
+
description: 'Given recent commits or changed files, find code changes with no corresponding test coverage in BugBear.',
|
|
1394
|
+
inputSchema: {
|
|
1395
|
+
type: 'object',
|
|
1396
|
+
properties: {
|
|
1397
|
+
changed_files: {
|
|
1398
|
+
type: 'array',
|
|
1399
|
+
items: { type: 'string' },
|
|
1400
|
+
description: 'List of changed file paths. If omitted, uses git diff against main.',
|
|
1401
|
+
},
|
|
1402
|
+
},
|
|
1403
|
+
},
|
|
1404
|
+
},
|
|
1405
|
+
// === AUTO-MONITORING TOOLS ===
|
|
1406
|
+
{
|
|
1407
|
+
name: 'get_auto_detected_issues',
|
|
1408
|
+
description: 'Get auto-detected monitoring issues grouped by error fingerprint. Shows recurring crashes, API failures, and rage clicks with frequency and user impact.',
|
|
1409
|
+
inputSchema: {
|
|
1410
|
+
type: 'object',
|
|
1411
|
+
properties: {
|
|
1412
|
+
source: {
|
|
1413
|
+
type: 'string',
|
|
1414
|
+
enum: ['auto_crash', 'auto_api', 'auto_rage_click'],
|
|
1415
|
+
description: 'Filter by source type',
|
|
1416
|
+
},
|
|
1417
|
+
min_occurrences: {
|
|
1418
|
+
type: 'number',
|
|
1419
|
+
description: 'Min occurrence count (default: 1)',
|
|
1420
|
+
},
|
|
1421
|
+
since: {
|
|
1422
|
+
type: 'string',
|
|
1423
|
+
description: 'ISO date — only issues after this date (default: 7 days ago)',
|
|
1424
|
+
},
|
|
1425
|
+
limit: {
|
|
1426
|
+
type: 'number',
|
|
1427
|
+
description: 'Max results (default: 20)',
|
|
1428
|
+
},
|
|
1429
|
+
compact: {
|
|
1430
|
+
type: 'boolean',
|
|
1431
|
+
description: 'Compact mode: fingerprint, source, count only',
|
|
1432
|
+
},
|
|
1433
|
+
},
|
|
1434
|
+
},
|
|
1435
|
+
},
|
|
1436
|
+
{
|
|
1437
|
+
name: 'generate_tests_from_errors',
|
|
1438
|
+
description: 'Suggest QA test cases from auto-detected error patterns. Returns structured suggestions — does NOT auto-create test cases.',
|
|
1439
|
+
inputSchema: {
|
|
1440
|
+
type: 'object',
|
|
1441
|
+
properties: {
|
|
1442
|
+
report_ids: {
|
|
1443
|
+
type: 'array',
|
|
1444
|
+
items: { type: 'string' },
|
|
1445
|
+
description: 'Specific report IDs. If omitted, uses top uncovered errors.',
|
|
1446
|
+
},
|
|
1447
|
+
limit: {
|
|
1448
|
+
type: 'number',
|
|
1449
|
+
description: 'Max suggestions (default: 5)',
|
|
1450
|
+
},
|
|
1451
|
+
},
|
|
1452
|
+
},
|
|
1453
|
+
},
|
|
1218
1454
|
];
|
|
1455
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
1456
|
+
async function getTestImpact(args) {
|
|
1457
|
+
const projectId = requireProject();
|
|
1458
|
+
const changedFiles = args.changed_files || [];
|
|
1459
|
+
if (changedFiles.length === 0) {
|
|
1460
|
+
return { affectedTests: [], message: 'No changed files provided.' };
|
|
1461
|
+
}
|
|
1462
|
+
// Get all test cases for the project with their target routes
|
|
1463
|
+
const { data: testCases, error } = await supabase
|
|
1464
|
+
.from('test_cases')
|
|
1465
|
+
.select('id, title, target_route, qa_track, priority')
|
|
1466
|
+
.eq('project_id', projectId);
|
|
1467
|
+
if (error)
|
|
1468
|
+
return { error: error.message };
|
|
1469
|
+
if (!testCases || testCases.length === 0) {
|
|
1470
|
+
return { affectedTests: [], message: 'No test cases found for this project.' };
|
|
1471
|
+
}
|
|
1472
|
+
// Map changed files to affected test cases
|
|
1473
|
+
const affected = [];
|
|
1474
|
+
for (const tc of testCases) {
|
|
1475
|
+
const route = tc.target_route || '';
|
|
1476
|
+
const matchedFiles = changedFiles.filter(f => {
|
|
1477
|
+
// Match file path to route (e.g., src/app/api/tasks/route.ts -> /api/tasks)
|
|
1478
|
+
const normalized = f.replace(/\\/g, '/');
|
|
1479
|
+
const routeParts = route.split('/').filter(Boolean);
|
|
1480
|
+
return routeParts.some((part) => normalized.includes(part)) || normalized.includes(route.replace(/\//g, '/'));
|
|
1481
|
+
});
|
|
1482
|
+
if (matchedFiles.length > 0) {
|
|
1483
|
+
affected.push({
|
|
1484
|
+
testId: tc.id,
|
|
1485
|
+
title: tc.title,
|
|
1486
|
+
targetRoute: route,
|
|
1487
|
+
matchedFiles,
|
|
1488
|
+
qaTrack: tc.qa_track,
|
|
1489
|
+
});
|
|
1490
|
+
}
|
|
1491
|
+
}
|
|
1492
|
+
return {
|
|
1493
|
+
affectedTests: affected,
|
|
1494
|
+
totalTestCases: testCases.length,
|
|
1495
|
+
affectedCount: affected.length,
|
|
1496
|
+
changedFileCount: changedFiles.length,
|
|
1497
|
+
};
|
|
1498
|
+
}
|
|
1499
|
+
async function getFlakyTests(args) {
|
|
1500
|
+
const projectId = requireProject();
|
|
1501
|
+
const threshold = args.threshold || 5;
|
|
1502
|
+
const limit = args.limit || 20;
|
|
1503
|
+
// Get test results grouped by test case
|
|
1504
|
+
const { data: results, error } = await supabase
|
|
1505
|
+
.from('test_results')
|
|
1506
|
+
.select('test_case_id, status, test_cases!inner(title, target_route, qa_track)')
|
|
1507
|
+
.eq('test_cases.project_id', projectId)
|
|
1508
|
+
.order('created_at', { ascending: false })
|
|
1509
|
+
.limit(5000);
|
|
1510
|
+
if (error)
|
|
1511
|
+
return { error: error.message };
|
|
1512
|
+
if (!results || results.length === 0) {
|
|
1513
|
+
return { flakyTests: [], message: 'No test results found.' };
|
|
1514
|
+
}
|
|
1515
|
+
// Group by test case and calculate flakiness
|
|
1516
|
+
const testStats = {};
|
|
1517
|
+
for (const r of results) {
|
|
1518
|
+
const id = r.test_case_id;
|
|
1519
|
+
if (!testStats[id]) {
|
|
1520
|
+
const tc = r.test_cases;
|
|
1521
|
+
testStats[id] = { passes: 0, fails: 0, total: 0, title: tc?.title || '', route: tc?.target_route || '', track: tc?.qa_track || '' };
|
|
1522
|
+
}
|
|
1523
|
+
testStats[id].total++;
|
|
1524
|
+
if (r.status === 'pass')
|
|
1525
|
+
testStats[id].passes++;
|
|
1526
|
+
else if (r.status === 'fail')
|
|
1527
|
+
testStats[id].fails++;
|
|
1528
|
+
}
|
|
1529
|
+
// Find flaky tests (have both passes and fails, with fail rate above threshold)
|
|
1530
|
+
const flaky = Object.entries(testStats)
|
|
1531
|
+
.filter(([, stats]) => {
|
|
1532
|
+
if (stats.total < 3)
|
|
1533
|
+
return false; // Need enough data
|
|
1534
|
+
const failRate = (stats.fails / stats.total) * 100;
|
|
1535
|
+
const passRate = (stats.passes / stats.total) * 100;
|
|
1536
|
+
return failRate >= threshold && passRate > 0; // Has both passes and fails
|
|
1537
|
+
})
|
|
1538
|
+
.map(([id, stats]) => ({
|
|
1539
|
+
testCaseId: id,
|
|
1540
|
+
title: stats.title,
|
|
1541
|
+
targetRoute: stats.route,
|
|
1542
|
+
qaTrack: stats.track,
|
|
1543
|
+
totalRuns: stats.total,
|
|
1544
|
+
failRate: Math.round((stats.fails / stats.total) * 100),
|
|
1545
|
+
passRate: Math.round((stats.passes / stats.total) * 100),
|
|
1546
|
+
}))
|
|
1547
|
+
.sort((a, b) => b.failRate - a.failRate)
|
|
1548
|
+
.slice(0, limit);
|
|
1549
|
+
return {
|
|
1550
|
+
flakyTests: flaky,
|
|
1551
|
+
totalAnalyzed: Object.keys(testStats).length,
|
|
1552
|
+
flakyCount: flaky.length,
|
|
1553
|
+
threshold,
|
|
1554
|
+
};
|
|
1555
|
+
}
|
|
1556
|
+
async function assessTestQuality(args) {
|
|
1557
|
+
const projectId = requireProject();
|
|
1558
|
+
const limit = args.limit || 20;
|
|
1559
|
+
let query = supabase
|
|
1560
|
+
.from('test_cases')
|
|
1561
|
+
.select('id, title, steps, target_route, qa_track, priority')
|
|
1562
|
+
.eq('project_id', projectId)
|
|
1563
|
+
.limit(limit);
|
|
1564
|
+
if (args.test_case_ids && args.test_case_ids.length > 0) {
|
|
1565
|
+
query = query.in('id', args.test_case_ids);
|
|
1566
|
+
}
|
|
1567
|
+
const { data: testCases, error } = await query;
|
|
1568
|
+
if (error)
|
|
1569
|
+
return { error: error.message };
|
|
1570
|
+
if (!testCases || testCases.length === 0) {
|
|
1571
|
+
return { assessments: [], message: 'No test cases found.' };
|
|
1572
|
+
}
|
|
1573
|
+
const assessments = testCases.map(tc => {
|
|
1574
|
+
const issues = [];
|
|
1575
|
+
const steps = tc.steps || [];
|
|
1576
|
+
// Check for weak patterns
|
|
1577
|
+
if (steps.length < 2) {
|
|
1578
|
+
issues.push('Too few steps — test may not cover the full flow');
|
|
1579
|
+
}
|
|
1580
|
+
const allStepsText = steps.map((s) => (typeof s === 'string' ? s : s.action || s.description || '')).join(' ');
|
|
1581
|
+
// Vague assertions
|
|
1582
|
+
if (/should work|looks good|is correct|verify it works/i.test(allStepsText)) {
|
|
1583
|
+
issues.push('Vague assertions detected — use specific expected outcomes');
|
|
1584
|
+
}
|
|
1585
|
+
// Missing edge cases
|
|
1586
|
+
if (!/error|invalid|empty|missing|unauthorized|forbidden|404|500/i.test(allStepsText)) {
|
|
1587
|
+
issues.push('No negative/error test cases — add edge case testing');
|
|
1588
|
+
}
|
|
1589
|
+
// Generic descriptions
|
|
1590
|
+
if (/test the|check the|verify the/i.test(tc.title) && tc.title.length < 30) {
|
|
1591
|
+
issues.push('Generic test title — be more specific about what is being tested');
|
|
1592
|
+
}
|
|
1593
|
+
// No specific UI elements referenced
|
|
1594
|
+
if (!/button|input|form|modal|dropdown|select|click|type|enter|submit/i.test(allStepsText)) {
|
|
1595
|
+
issues.push('No specific UI elements referenced — steps may be too abstract');
|
|
1596
|
+
}
|
|
1597
|
+
const quality = issues.length === 0 ? 'good' : issues.length <= 2 ? 'needs-improvement' : 'poor';
|
|
1598
|
+
return {
|
|
1599
|
+
testCaseId: tc.id,
|
|
1600
|
+
title: tc.title,
|
|
1601
|
+
targetRoute: tc.target_route,
|
|
1602
|
+
stepCount: steps.length,
|
|
1603
|
+
quality,
|
|
1604
|
+
issues,
|
|
1605
|
+
};
|
|
1606
|
+
});
|
|
1607
|
+
const qualityCounts = {
|
|
1608
|
+
good: assessments.filter(a => a.quality === 'good').length,
|
|
1609
|
+
needsImprovement: assessments.filter(a => a.quality === 'needs-improvement').length,
|
|
1610
|
+
poor: assessments.filter(a => a.quality === 'poor').length,
|
|
1611
|
+
};
|
|
1612
|
+
return {
|
|
1613
|
+
assessments,
|
|
1614
|
+
summary: qualityCounts,
|
|
1615
|
+
totalAssessed: assessments.length,
|
|
1616
|
+
};
|
|
1617
|
+
}
|
|
1618
|
+
async function getTestExecutionSummary(args) {
|
|
1619
|
+
const projectId = requireProject();
|
|
1620
|
+
const days = args.days || 30;
|
|
1621
|
+
const since = new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString();
|
|
1622
|
+
// Get test results
|
|
1623
|
+
const { data: results, error } = await supabase
|
|
1624
|
+
.from('test_results')
|
|
1625
|
+
.select('test_case_id, status, duration_ms, created_at, test_cases!inner(title, target_route)')
|
|
1626
|
+
.eq('test_cases.project_id', projectId)
|
|
1627
|
+
.gte('created_at', since)
|
|
1628
|
+
.order('created_at', { ascending: false });
|
|
1629
|
+
if (error)
|
|
1630
|
+
return { error: error.message };
|
|
1631
|
+
if (!results || results.length === 0) {
|
|
1632
|
+
return { message: `No test results found in the last ${days} days.` };
|
|
1633
|
+
}
|
|
1634
|
+
const totalRuns = results.length;
|
|
1635
|
+
const passed = results.filter(r => r.status === 'pass').length;
|
|
1636
|
+
const failed = results.filter(r => r.status === 'fail').length;
|
|
1637
|
+
const blocked = results.filter(r => r.status === 'blocked').length;
|
|
1638
|
+
// Most failed tests
|
|
1639
|
+
const failCounts = {};
|
|
1640
|
+
for (const r of results.filter(r => r.status === 'fail')) {
|
|
1641
|
+
const id = r.test_case_id;
|
|
1642
|
+
const tc = r.test_cases;
|
|
1643
|
+
if (!failCounts[id]) {
|
|
1644
|
+
failCounts[id] = { count: 0, title: tc?.title || '', route: tc?.target_route || '' };
|
|
1645
|
+
}
|
|
1646
|
+
failCounts[id].count++;
|
|
1647
|
+
}
|
|
1648
|
+
const mostFailed = Object.entries(failCounts)
|
|
1649
|
+
.sort((a, b) => b[1].count - a[1].count)
|
|
1650
|
+
.slice(0, 5)
|
|
1651
|
+
.map(([id, data]) => ({ testCaseId: id, ...data }));
|
|
1652
|
+
// Duration stats
|
|
1653
|
+
const durations = results.filter(r => r.duration_ms).map(r => r.duration_ms);
|
|
1654
|
+
const avgDuration = durations.length > 0 ? Math.round(durations.reduce((a, b) => a + b, 0) / durations.length) : 0;
|
|
1655
|
+
const maxDuration = durations.length > 0 ? Math.max(...durations) : 0;
|
|
1656
|
+
return {
|
|
1657
|
+
period: `${days} days`,
|
|
1658
|
+
totalRuns,
|
|
1659
|
+
passRate: Math.round((passed / totalRuns) * 100),
|
|
1660
|
+
failRate: Math.round((failed / totalRuns) * 100),
|
|
1661
|
+
blockedCount: blocked,
|
|
1662
|
+
averageDurationMs: avgDuration,
|
|
1663
|
+
maxDurationMs: maxDuration,
|
|
1664
|
+
mostFailed,
|
|
1665
|
+
uniqueTestsCovered: new Set(results.map(r => r.test_case_id)).size,
|
|
1666
|
+
};
|
|
1667
|
+
}
|
|
1668
|
+
async function checkTestFreshness(args) {
|
|
1669
|
+
const projectId = requireProject();
|
|
1670
|
+
const limit = args.limit || 20;
|
|
1671
|
+
// Get test cases with their last update and last result
|
|
1672
|
+
const { data: testCases, error } = await supabase
|
|
1673
|
+
.from('test_cases')
|
|
1674
|
+
.select('id, title, target_route, updated_at, created_at')
|
|
1675
|
+
.eq('project_id', projectId)
|
|
1676
|
+
.order('updated_at', { ascending: true })
|
|
1677
|
+
.limit(limit);
|
|
1678
|
+
if (error)
|
|
1679
|
+
return { error: error.message };
|
|
1680
|
+
if (!testCases || testCases.length === 0) {
|
|
1681
|
+
return { staleTests: [], message: 'No test cases found.' };
|
|
1682
|
+
}
|
|
1683
|
+
const thirtyDaysAgo = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString();
|
|
1684
|
+
const stale = testCases
|
|
1685
|
+
.filter(tc => tc.updated_at < thirtyDaysAgo)
|
|
1686
|
+
.map(tc => ({
|
|
1687
|
+
testCaseId: tc.id,
|
|
1688
|
+
title: tc.title,
|
|
1689
|
+
targetRoute: tc.target_route,
|
|
1690
|
+
lastUpdated: tc.updated_at,
|
|
1691
|
+
daysSinceUpdate: Math.round((Date.now() - new Date(tc.updated_at).getTime()) / (24 * 60 * 60 * 1000)),
|
|
1692
|
+
}));
|
|
1693
|
+
return {
|
|
1694
|
+
staleTests: stale,
|
|
1695
|
+
totalTestCases: testCases.length,
|
|
1696
|
+
staleCount: stale.length,
|
|
1697
|
+
stalenessThreshold: '30 days',
|
|
1698
|
+
};
|
|
1699
|
+
}
|
|
1700
|
+
async function getUntestedChanges(args) {
|
|
1701
|
+
const projectId = requireProject();
|
|
1702
|
+
// Get all test cases to understand what's covered
|
|
1703
|
+
const { data: testCases, error } = await supabase
|
|
1704
|
+
.from('test_cases')
|
|
1705
|
+
.select('id, title, target_route')
|
|
1706
|
+
.eq('project_id', projectId);
|
|
1707
|
+
if (error)
|
|
1708
|
+
return { error: error.message };
|
|
1709
|
+
const coveredRoutes = new Set((testCases || []).map(tc => tc.target_route).filter(Boolean));
|
|
1710
|
+
// If changed_files provided, check coverage
|
|
1711
|
+
const changedFiles = args.changed_files || [];
|
|
1712
|
+
if (changedFiles.length === 0) {
|
|
1713
|
+
return {
|
|
1714
|
+
message: 'No changed files provided. Pass changed_files to check coverage.',
|
|
1715
|
+
totalCoveredRoutes: coveredRoutes.size,
|
|
1716
|
+
};
|
|
1717
|
+
}
|
|
1718
|
+
// Map changed files to routes and check coverage
|
|
1719
|
+
const untested = [];
|
|
1720
|
+
for (const file of changedFiles) {
|
|
1721
|
+
const normalized = file.replace(/\\/g, '/');
|
|
1722
|
+
// Extract route-like path from file
|
|
1723
|
+
let inferredRoute = '';
|
|
1724
|
+
// Next.js app router: app/api/tasks/route.ts -> /api/tasks
|
|
1725
|
+
const appRouterMatch = normalized.match(/app\/(api\/[^/]+(?:\/[^/]+)*?)\/route\.\w+$/);
|
|
1726
|
+
if (appRouterMatch) {
|
|
1727
|
+
inferredRoute = '/' + appRouterMatch[1];
|
|
1728
|
+
}
|
|
1729
|
+
// Pages router: pages/api/tasks.ts -> /api/tasks
|
|
1730
|
+
const pagesMatch = normalized.match(/pages\/(api\/[^.]+)\.\w+$/);
|
|
1731
|
+
if (!inferredRoute && pagesMatch) {
|
|
1732
|
+
inferredRoute = '/' + pagesMatch[1];
|
|
1733
|
+
}
|
|
1734
|
+
// Component files
|
|
1735
|
+
const componentMatch = normalized.match(/(?:components|screens|pages)\/([^.]+)\.\w+$/);
|
|
1736
|
+
if (!inferredRoute && componentMatch) {
|
|
1737
|
+
inferredRoute = '/' + componentMatch[1].replace(/\\/g, '/');
|
|
1738
|
+
}
|
|
1739
|
+
if (inferredRoute && !coveredRoutes.has(inferredRoute)) {
|
|
1740
|
+
untested.push({
|
|
1741
|
+
file,
|
|
1742
|
+
inferredRoute,
|
|
1743
|
+
reason: 'No test cases cover this route',
|
|
1744
|
+
});
|
|
1745
|
+
}
|
|
1746
|
+
else if (!inferredRoute) {
|
|
1747
|
+
// Can't map to a route — flag as potentially untested
|
|
1748
|
+
untested.push({
|
|
1749
|
+
file,
|
|
1750
|
+
inferredRoute: 'unknown',
|
|
1751
|
+
reason: 'Could not map file to a testable route',
|
|
1752
|
+
});
|
|
1753
|
+
}
|
|
1754
|
+
}
|
|
1755
|
+
return {
|
|
1756
|
+
untestedChanges: untested,
|
|
1757
|
+
changedFileCount: changedFiles.length,
|
|
1758
|
+
untestedCount: untested.length,
|
|
1759
|
+
coveredRoutes: coveredRoutes.size,
|
|
1760
|
+
};
|
|
1761
|
+
}
|
|
1762
|
+
// === AUTO-MONITORING HANDLERS ===
|
|
1763
|
+
async function getAutoDetectedIssues(args) {
|
|
1764
|
+
const projectId = requireProject();
|
|
1765
|
+
const since = args.since || new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
|
|
1766
|
+
const limit = args.limit || 20;
|
|
1767
|
+
let query = supabase
|
|
1768
|
+
.from('reports')
|
|
1769
|
+
.select('id, error_fingerprint, report_source, title, severity, reporter_id, sentry_event_id, created_at, app_context')
|
|
1770
|
+
.eq('project_id', projectId)
|
|
1771
|
+
.neq('report_source', 'manual')
|
|
1772
|
+
.not('error_fingerprint', 'is', null)
|
|
1773
|
+
.gte('created_at', since)
|
|
1774
|
+
.order('created_at', { ascending: false });
|
|
1775
|
+
if (args.source) {
|
|
1776
|
+
query = query.eq('report_source', args.source);
|
|
1777
|
+
}
|
|
1778
|
+
const { data, error } = await query;
|
|
1779
|
+
if (error)
|
|
1780
|
+
return { error: error.message };
|
|
1781
|
+
if (!data || data.length === 0)
|
|
1782
|
+
return { issues: [], total: 0 };
|
|
1783
|
+
// Group by fingerprint
|
|
1784
|
+
const grouped = new Map();
|
|
1785
|
+
for (const report of data) {
|
|
1786
|
+
const fp = report.error_fingerprint;
|
|
1787
|
+
if (!grouped.has(fp))
|
|
1788
|
+
grouped.set(fp, []);
|
|
1789
|
+
grouped.get(fp).push(report);
|
|
1790
|
+
}
|
|
1791
|
+
// Build issue summaries
|
|
1792
|
+
const issues = Array.from(grouped.entries())
|
|
1793
|
+
.map(([fingerprint, reports]) => {
|
|
1794
|
+
const uniqueReporters = new Set(reports.map(r => r.reporter_id));
|
|
1795
|
+
const sorted = reports.sort((a, b) => new Date(a.created_at).getTime() - new Date(b.created_at).getTime());
|
|
1796
|
+
const first = sorted[0];
|
|
1797
|
+
const last = sorted[sorted.length - 1];
|
|
1798
|
+
const route = first.app_context?.currentRoute || 'unknown';
|
|
1799
|
+
return {
|
|
1800
|
+
fingerprint,
|
|
1801
|
+
source: first.report_source,
|
|
1802
|
+
message: first.title,
|
|
1803
|
+
route,
|
|
1804
|
+
occurrence_count: reports.length,
|
|
1805
|
+
affected_users: uniqueReporters.size,
|
|
1806
|
+
first_seen: first.created_at,
|
|
1807
|
+
last_seen: last.created_at,
|
|
1808
|
+
severity: first.severity,
|
|
1809
|
+
has_sentry_link: reports.some(r => r.sentry_event_id != null),
|
|
1810
|
+
sample_report_id: first.id,
|
|
1811
|
+
};
|
|
1812
|
+
})
|
|
1813
|
+
.filter(issue => issue.occurrence_count >= (args.min_occurrences || 1))
|
|
1814
|
+
.sort((a, b) => b.occurrence_count - a.occurrence_count)
|
|
1815
|
+
.slice(0, limit);
|
|
1816
|
+
if (args.compact) {
|
|
1817
|
+
return {
|
|
1818
|
+
issues: issues.map(i => ({
|
|
1819
|
+
fingerprint: i.fingerprint,
|
|
1820
|
+
source: i.source,
|
|
1821
|
+
count: i.occurrence_count,
|
|
1822
|
+
users: i.affected_users,
|
|
1823
|
+
severity: i.severity,
|
|
1824
|
+
})),
|
|
1825
|
+
total: issues.length,
|
|
1826
|
+
};
|
|
1827
|
+
}
|
|
1828
|
+
return { issues, total: issues.length };
|
|
1829
|
+
}
|
|
1830
|
+
async function generateTestsFromErrors(args) {
|
|
1831
|
+
const projectId = requireProject();
|
|
1832
|
+
const limit = args.limit || 5;
|
|
1833
|
+
let reports;
|
|
1834
|
+
if (args.report_ids?.length) {
|
|
1835
|
+
// Validate all UUIDs
|
|
1836
|
+
for (const id of args.report_ids) {
|
|
1837
|
+
if (!isValidUUID(id)) {
|
|
1838
|
+
return { error: `Invalid report_id format: ${id}` };
|
|
1839
|
+
}
|
|
1840
|
+
}
|
|
1841
|
+
const { data, error } = await supabase
|
|
1842
|
+
.from('reports')
|
|
1843
|
+
.select('id, title, report_source, severity, app_context, error_fingerprint, description')
|
|
1844
|
+
.eq('project_id', projectId)
|
|
1845
|
+
.in('id', args.report_ids);
|
|
1846
|
+
if (error)
|
|
1847
|
+
return { error: error.message };
|
|
1848
|
+
reports = data;
|
|
1849
|
+
}
|
|
1850
|
+
else {
|
|
1851
|
+
// Get top uncovered auto-detected errors
|
|
1852
|
+
const { data, error } = await supabase
|
|
1853
|
+
.from('reports')
|
|
1854
|
+
.select('id, title, report_source, severity, app_context, error_fingerprint, description')
|
|
1855
|
+
.eq('project_id', projectId)
|
|
1856
|
+
.neq('report_source', 'manual')
|
|
1857
|
+
.not('error_fingerprint', 'is', null)
|
|
1858
|
+
.order('created_at', { ascending: false })
|
|
1859
|
+
.limit(50);
|
|
1860
|
+
if (error)
|
|
1861
|
+
return { error: error.message };
|
|
1862
|
+
// Deduplicate by fingerprint, keep first occurrence
|
|
1863
|
+
const seen = new Set();
|
|
1864
|
+
reports = (data || []).filter(r => {
|
|
1865
|
+
if (!r.error_fingerprint || seen.has(r.error_fingerprint))
|
|
1866
|
+
return false;
|
|
1867
|
+
seen.add(r.error_fingerprint);
|
|
1868
|
+
return true;
|
|
1869
|
+
}).slice(0, limit);
|
|
1870
|
+
}
|
|
1871
|
+
if (!reports?.length)
|
|
1872
|
+
return { suggestions: [] };
|
|
1873
|
+
const suggestions = reports.map(report => {
|
|
1874
|
+
const route = report.app_context?.currentRoute || '/unknown';
|
|
1875
|
+
const source = report.report_source;
|
|
1876
|
+
const priority = report.severity === 'critical' ? 'P1' : report.severity === 'high' ? 'P1' : 'P2';
|
|
1877
|
+
let suggestedSteps;
|
|
1878
|
+
if (source === 'auto_crash') {
|
|
1879
|
+
suggestedSteps = [
|
|
1880
|
+
`Navigate to ${route}`,
|
|
1881
|
+
'Reproduce the action that triggered the crash',
|
|
1882
|
+
'Verify the page does not throw an unhandled error',
|
|
1883
|
+
'Verify error boundary displays a user-friendly message if error occurs',
|
|
1884
|
+
];
|
|
1885
|
+
}
|
|
1886
|
+
else if (source === 'auto_api') {
|
|
1887
|
+
const statusCode = report.app_context?.custom?.statusCode || 'error';
|
|
1888
|
+
const method = report.app_context?.custom?.requestMethod || 'API';
|
|
1889
|
+
suggestedSteps = [
|
|
1890
|
+
`Navigate to ${route}`,
|
|
1891
|
+
`Trigger the ${method} request that returned ${statusCode}`,
|
|
1892
|
+
'Verify the request succeeds or displays an appropriate error message',
|
|
1893
|
+
'Verify no data corruption occurs on failure',
|
|
1894
|
+
];
|
|
1895
|
+
}
|
|
1896
|
+
else {
|
|
1897
|
+
// rage_click or sentry_sync
|
|
1898
|
+
const target = report.app_context?.custom?.targetSelector || 'the element';
|
|
1899
|
+
suggestedSteps = [
|
|
1900
|
+
`Navigate to ${route}`,
|
|
1901
|
+
`Click on ${target}`,
|
|
1902
|
+
'Verify the element responds to interaction',
|
|
1903
|
+
'Verify loading state is shown if action takes time',
|
|
1904
|
+
];
|
|
1905
|
+
}
|
|
1906
|
+
return {
|
|
1907
|
+
title: `Test: ${report.title?.replace('[Auto] ', '') || 'Auto-detected issue'}`,
|
|
1908
|
+
track: source === 'auto_crash' ? 'Stability' : source === 'auto_api' ? 'API' : 'UX',
|
|
1909
|
+
priority,
|
|
1910
|
+
rationale: `Auto-detected ${source?.replace('auto_', '')} on ${route}`,
|
|
1911
|
+
suggested_steps: suggestedSteps,
|
|
1912
|
+
source_report_id: report.id,
|
|
1913
|
+
route,
|
|
1914
|
+
};
|
|
1915
|
+
});
|
|
1916
|
+
return { suggestions };
|
|
1917
|
+
}
|
|
1219
1918
|
// === Project management handlers ===
|
|
1220
1919
|
async function listProjects() {
|
|
1221
1920
|
const { data, error } = await supabase
|
|
@@ -1320,7 +2019,7 @@ async function getReport(args) {
|
|
|
1320
2019
|
app_context: data.app_context,
|
|
1321
2020
|
device_info: data.device_info,
|
|
1322
2021
|
navigation_history: data.navigation_history,
|
|
1323
|
-
|
|
2022
|
+
screenshot_urls: data.screenshot_urls,
|
|
1324
2023
|
created_at: data.created_at,
|
|
1325
2024
|
reporter: data.tester ? {
|
|
1326
2025
|
name: data.tester.name,
|
|
@@ -1395,22 +2094,118 @@ async function getReportContext(args) {
|
|
|
1395
2094
|
}
|
|
1396
2095
|
const { data, error } = await supabase
|
|
1397
2096
|
.from('reports')
|
|
1398
|
-
.select('app_context, device_info, navigation_history, enhanced_context')
|
|
2097
|
+
.select('app_context, device_info, navigation_history, enhanced_context, screenshot_urls')
|
|
1399
2098
|
.eq('id', args.report_id)
|
|
1400
2099
|
.eq('project_id', currentProjectId) // Security: ensure report belongs to this project
|
|
1401
2100
|
.single();
|
|
1402
2101
|
if (error) {
|
|
1403
2102
|
return { error: error.message };
|
|
1404
2103
|
}
|
|
2104
|
+
// Compact: return app_context only (skip console/network/navigation)
|
|
2105
|
+
if (args.compact === true) {
|
|
2106
|
+
return {
|
|
2107
|
+
context: {
|
|
2108
|
+
app_context: data.app_context,
|
|
2109
|
+
screenshot_urls: data.screenshot_urls,
|
|
2110
|
+
},
|
|
2111
|
+
};
|
|
2112
|
+
}
|
|
1405
2113
|
return {
|
|
1406
2114
|
context: {
|
|
1407
2115
|
app_context: data.app_context,
|
|
1408
2116
|
device_info: data.device_info,
|
|
1409
2117
|
navigation_history: data.navigation_history,
|
|
1410
2118
|
enhanced_context: data.enhanced_context || {},
|
|
2119
|
+
screenshot_urls: data.screenshot_urls,
|
|
1411
2120
|
},
|
|
1412
2121
|
};
|
|
1413
2122
|
}
|
|
2123
|
+
async function addReportComment(args) {
|
|
2124
|
+
if (!isValidUUID(args.report_id))
|
|
2125
|
+
return { error: 'Invalid report_id format' };
|
|
2126
|
+
if (!args.message?.trim())
|
|
2127
|
+
return { error: 'Message is required' };
|
|
2128
|
+
// Verify report exists
|
|
2129
|
+
const { data: report } = await supabase
|
|
2130
|
+
.from('reports').select('id').eq('id', args.report_id).eq('project_id', currentProjectId).single();
|
|
2131
|
+
if (!report)
|
|
2132
|
+
return { error: 'Report not found' };
|
|
2133
|
+
// Find or create a discussion thread for this report
|
|
2134
|
+
const { data: existingThread } = await supabase
|
|
2135
|
+
.from('discussion_threads').select('id')
|
|
2136
|
+
.eq('project_id', currentProjectId).eq('report_id', args.report_id).eq('thread_type', 'report')
|
|
2137
|
+
.limit(1).single();
|
|
2138
|
+
let threadId;
|
|
2139
|
+
if (existingThread) {
|
|
2140
|
+
threadId = existingThread.id;
|
|
2141
|
+
}
|
|
2142
|
+
else {
|
|
2143
|
+
const newId = crypto.randomUUID();
|
|
2144
|
+
const { error: threadErr } = await supabase
|
|
2145
|
+
.from('discussion_threads').insert({
|
|
2146
|
+
id: newId, project_id: currentProjectId, report_id: args.report_id,
|
|
2147
|
+
thread_type: 'report', subject: 'Bug Report Discussion', audience: 'all',
|
|
2148
|
+
priority: 'normal', created_by_admin: true, last_message_at: new Date().toISOString(),
|
|
2149
|
+
});
|
|
2150
|
+
if (threadErr)
|
|
2151
|
+
return { error: `Failed to create thread: ${threadErr.message}` };
|
|
2152
|
+
threadId = newId;
|
|
2153
|
+
}
|
|
2154
|
+
const { data: msg, error: msgErr } = await supabase
|
|
2155
|
+
.from('discussion_messages').insert({
|
|
2156
|
+
thread_id: threadId, sender_type: 'admin', sender_name: args.author || 'Claude Code', content: args.message.trim(), content_type: 'text',
|
|
2157
|
+
}).select('id, content, created_at').single();
|
|
2158
|
+
if (msgErr)
|
|
2159
|
+
return { error: `Failed to add comment: ${msgErr.message}` };
|
|
2160
|
+
return { success: true, comment: { id: msg.id, thread_id: threadId, content: msg.content, author: args.author || 'Claude Code', created_at: msg.created_at }, message: 'Comment added to report' };
|
|
2161
|
+
}
|
|
2162
|
+
async function getReportComments(args) {
|
|
2163
|
+
if (!isValidUUID(args.report_id))
|
|
2164
|
+
return { error: 'Invalid report_id format' };
|
|
2165
|
+
const { data: threads } = await supabase
|
|
2166
|
+
.from('discussion_threads').select('id')
|
|
2167
|
+
.eq('project_id', currentProjectId).eq('report_id', args.report_id).order('created_at', { ascending: true });
|
|
2168
|
+
if (!threads || threads.length === 0)
|
|
2169
|
+
return { comments: [], total: 0, message: 'No comments on this report' };
|
|
2170
|
+
const { data: messages, error } = await supabase
|
|
2171
|
+
.from('discussion_messages').select('id, thread_id, sender_type, content, content_type, created_at, attachments')
|
|
2172
|
+
.in('thread_id', threads.map(t => t.id)).order('created_at', { ascending: true });
|
|
2173
|
+
if (error)
|
|
2174
|
+
return { error: error.message };
|
|
2175
|
+
return { comments: (messages || []).map(m => ({ id: m.id, sender_type: m.sender_type, content: m.content, created_at: m.created_at, attachments: m.attachments })), total: (messages || []).length };
|
|
2176
|
+
}
|
|
2177
|
+
async function resolveConversations(args) {
|
|
2178
|
+
if (!args.thread_ids || args.thread_ids.length === 0) {
|
|
2179
|
+
return { error: 'At least one thread_id is required' };
|
|
2180
|
+
}
|
|
2181
|
+
if (args.thread_ids.length > 50) {
|
|
2182
|
+
return { error: 'Maximum 50 threads per request' };
|
|
2183
|
+
}
|
|
2184
|
+
for (const id of args.thread_ids) {
|
|
2185
|
+
if (!isValidUUID(id))
|
|
2186
|
+
return { error: `Invalid thread_id format: ${id}` };
|
|
2187
|
+
}
|
|
2188
|
+
const resolved = args.resolved !== false;
|
|
2189
|
+
const { data, error } = await supabase
|
|
2190
|
+
.from('discussion_threads')
|
|
2191
|
+
.update({ is_resolved: resolved })
|
|
2192
|
+
.eq('project_id', currentProjectId)
|
|
2193
|
+
.in('id', args.thread_ids)
|
|
2194
|
+
.select('id, subject, is_resolved');
|
|
2195
|
+
if (error)
|
|
2196
|
+
return { error: error.message };
|
|
2197
|
+
const updated = data || [];
|
|
2198
|
+
const updatedIds = new Set(updated.map((t) => t.id));
|
|
2199
|
+
const notFound = args.thread_ids.filter(id => !updatedIds.has(id));
|
|
2200
|
+
return {
|
|
2201
|
+
success: true,
|
|
2202
|
+
updatedCount: updated.length,
|
|
2203
|
+
resolved,
|
|
2204
|
+
notFound: notFound.length > 0 ? notFound : undefined,
|
|
2205
|
+
threads: updated.map((t) => ({ id: t.id, subject: t.subject, is_resolved: t.is_resolved })),
|
|
2206
|
+
message: `${resolved ? 'Resolved' : 'Reopened'} ${updated.length} conversation(s).${notFound.length > 0 ? ` ${notFound.length} not found.` : ''}`,
|
|
2207
|
+
};
|
|
2208
|
+
}
|
|
1414
2209
|
async function getProjectInfo() {
|
|
1415
2210
|
// Get project details
|
|
1416
2211
|
const { data: project, error: projectError } = await supabase
|
|
@@ -1727,6 +2522,19 @@ async function listTestCases(args) {
|
|
|
1727
2522
|
if (args.track) {
|
|
1728
2523
|
testCases = testCases.filter((tc) => tc.track?.name?.toLowerCase().includes(args.track.toLowerCase()));
|
|
1729
2524
|
}
|
|
2525
|
+
// Compact: return minimal fields only
|
|
2526
|
+
if (args.compact === true) {
|
|
2527
|
+
return {
|
|
2528
|
+
count: testCases.length,
|
|
2529
|
+
testCases: testCases.map((tc) => ({
|
|
2530
|
+
id: tc.id,
|
|
2531
|
+
testKey: tc.test_key,
|
|
2532
|
+
title: tc.title,
|
|
2533
|
+
priority: tc.priority,
|
|
2534
|
+
})),
|
|
2535
|
+
pagination: { limit, offset, hasMore: testCases.length === limit },
|
|
2536
|
+
};
|
|
2537
|
+
}
|
|
1730
2538
|
return {
|
|
1731
2539
|
count: testCases.length,
|
|
1732
2540
|
testCases: testCases.map((tc) => ({
|
|
@@ -2236,8 +3044,8 @@ async function getRegressions(args) {
|
|
|
2236
3044
|
};
|
|
2237
3045
|
}
|
|
2238
3046
|
async function getCoverageMatrix(args) {
|
|
2239
|
-
const includeExecution = args.include_execution_data
|
|
2240
|
-
const includeBugs = args.include_bug_counts
|
|
3047
|
+
const includeExecution = args.include_execution_data === true;
|
|
3048
|
+
const includeBugs = args.include_bug_counts === true;
|
|
2241
3049
|
// Get tracks
|
|
2242
3050
|
const { data: tracks } = await supabase
|
|
2243
3051
|
.from('qa_tracks')
|
|
@@ -3690,7 +4498,7 @@ async function markFixedWithCommit(args) {
|
|
|
3690
4498
|
status: 'resolved',
|
|
3691
4499
|
resolved_at: new Date().toISOString(),
|
|
3692
4500
|
resolution_notes: args.resolution_notes || `Fixed in commit ${args.commit_sha.slice(0, 7)}`,
|
|
3693
|
-
notify_tester: args.notify_tester
|
|
4501
|
+
notify_tester: args.notify_tester !== false, // Default: notify tester. Pass false to silently resolve.
|
|
3694
4502
|
code_context: {
|
|
3695
4503
|
...existingContext,
|
|
3696
4504
|
fix: {
|
|
@@ -3710,7 +4518,8 @@ async function markFixedWithCommit(args) {
|
|
|
3710
4518
|
if (error) {
|
|
3711
4519
|
return { error: error.message };
|
|
3712
4520
|
}
|
|
3713
|
-
const
|
|
4521
|
+
const notifyTester = args.notify_tester !== false;
|
|
4522
|
+
const notificationStatus = notifyTester
|
|
3714
4523
|
? 'The original tester will be notified and assigned a verification task.'
|
|
3715
4524
|
: 'No notification sent (silent resolve). A verification task was created.';
|
|
3716
4525
|
return {
|
|
@@ -3718,7 +4527,7 @@ async function markFixedWithCommit(args) {
|
|
|
3718
4527
|
message: `Bug marked as fixed in commit ${args.commit_sha.slice(0, 7)}. ${notificationStatus}`,
|
|
3719
4528
|
report_id: args.report_id,
|
|
3720
4529
|
commit: args.commit_sha,
|
|
3721
|
-
tester_notified:
|
|
4530
|
+
tester_notified: notifyTester,
|
|
3722
4531
|
next_steps: [
|
|
3723
4532
|
'Consider running create_regression_test to prevent this bug from recurring',
|
|
3724
4533
|
'Push your changes to trigger CI/CD',
|
|
@@ -4443,12 +5252,15 @@ Which files or areas would you like me to analyze?`;
|
|
|
4443
5252
|
async function listTesters(args) {
|
|
4444
5253
|
let query = supabase
|
|
4445
5254
|
.from('testers')
|
|
4446
|
-
.select('id, name, email, status, platforms, tier, assigned_count, completed_count, notes, created_at')
|
|
5255
|
+
.select('id, name, email, status, platforms, tier, assigned_count, completed_count, notes, role, created_at')
|
|
4447
5256
|
.eq('project_id', currentProjectId)
|
|
4448
5257
|
.order('name', { ascending: true });
|
|
4449
5258
|
if (args.status) {
|
|
4450
5259
|
query = query.eq('status', args.status);
|
|
4451
5260
|
}
|
|
5261
|
+
if (args.role) {
|
|
5262
|
+
query = query.eq('role', args.role);
|
|
5263
|
+
}
|
|
4452
5264
|
const { data, error } = await query;
|
|
4453
5265
|
if (error) {
|
|
4454
5266
|
return { error: error.message };
|
|
@@ -4470,6 +5282,7 @@ async function listTesters(args) {
|
|
|
4470
5282
|
assignedCount: t.assigned_count,
|
|
4471
5283
|
completedCount: t.completed_count,
|
|
4472
5284
|
notes: t.notes,
|
|
5285
|
+
role: t.role,
|
|
4473
5286
|
})),
|
|
4474
5287
|
};
|
|
4475
5288
|
}
|
|
@@ -4671,6 +5484,22 @@ async function assignTests(args) {
|
|
|
4671
5484
|
test_run_id: args.test_run_id || null,
|
|
4672
5485
|
status: 'pending',
|
|
4673
5486
|
}));
|
|
5487
|
+
// Helper: after assignments change, sync the test run's total_tests counter
|
|
5488
|
+
async function syncRunCounter() {
|
|
5489
|
+
if (!args.test_run_id)
|
|
5490
|
+
return;
|
|
5491
|
+
const { count } = await supabase
|
|
5492
|
+
.from('test_assignments')
|
|
5493
|
+
.select('id', { count: 'exact', head: true })
|
|
5494
|
+
.eq('test_run_id', args.test_run_id)
|
|
5495
|
+
.eq('project_id', currentProjectId);
|
|
5496
|
+
if (count !== null) {
|
|
5497
|
+
await supabase
|
|
5498
|
+
.from('test_runs')
|
|
5499
|
+
.update({ total_tests: count })
|
|
5500
|
+
.eq('id', args.test_run_id);
|
|
5501
|
+
}
|
|
5502
|
+
}
|
|
4674
5503
|
// Insert — use upsert-like approach: insert and handle conflicts
|
|
4675
5504
|
const { data: inserted, error: insertErr } = await supabase
|
|
4676
5505
|
.from('test_assignments')
|
|
@@ -4696,6 +5525,7 @@ async function assignTests(args) {
|
|
|
4696
5525
|
created.push(single);
|
|
4697
5526
|
}
|
|
4698
5527
|
}
|
|
5528
|
+
await syncRunCounter();
|
|
4699
5529
|
return {
|
|
4700
5530
|
success: true,
|
|
4701
5531
|
created: created.length,
|
|
@@ -4707,6 +5537,7 @@ async function assignTests(args) {
|
|
|
4707
5537
|
}
|
|
4708
5538
|
return { error: insertErr.message };
|
|
4709
5539
|
}
|
|
5540
|
+
await syncRunCounter();
|
|
4710
5541
|
return {
|
|
4711
5542
|
success: true,
|
|
4712
5543
|
created: (inserted || []).length,
|
|
@@ -4716,6 +5547,71 @@ async function assignTests(args) {
|
|
|
4716
5547
|
message: `Assigned ${(inserted || []).length} test(s) to ${tester.name}.`,
|
|
4717
5548
|
};
|
|
4718
5549
|
}
|
|
5550
|
+
async function unassignTests(args) {
|
|
5551
|
+
if (!args.assignment_ids || args.assignment_ids.length === 0) {
|
|
5552
|
+
return { error: 'At least one assignment_id is required' };
|
|
5553
|
+
}
|
|
5554
|
+
if (args.assignment_ids.length > 50) {
|
|
5555
|
+
return { error: 'Maximum 50 assignments per unassign batch' };
|
|
5556
|
+
}
|
|
5557
|
+
const invalidIds = args.assignment_ids.filter(id => !isValidUUID(id));
|
|
5558
|
+
if (invalidIds.length > 0) {
|
|
5559
|
+
return { error: `Invalid UUID(s): ${invalidIds.join(', ')}` };
|
|
5560
|
+
}
|
|
5561
|
+
// Verify assignments exist and belong to this project
|
|
5562
|
+
const { data: existing, error: lookupErr } = await supabase
|
|
5563
|
+
.from('test_assignments')
|
|
5564
|
+
.select('id, test_run_id, test_case:test_cases(test_key, title), tester:testers(name)')
|
|
5565
|
+
.eq('project_id', currentProjectId)
|
|
5566
|
+
.in('id', args.assignment_ids);
|
|
5567
|
+
if (lookupErr)
|
|
5568
|
+
return { error: lookupErr.message };
|
|
5569
|
+
if (!existing || existing.length === 0) {
|
|
5570
|
+
return { error: 'No matching assignments found in this project' };
|
|
5571
|
+
}
|
|
5572
|
+
const foundIds = new Set(existing.map((a) => a.id));
|
|
5573
|
+
const notFound = args.assignment_ids.filter(id => !foundIds.has(id));
|
|
5574
|
+
// Delete the assignments
|
|
5575
|
+
const { error: deleteErr } = await supabase
|
|
5576
|
+
.from('test_assignments')
|
|
5577
|
+
.delete()
|
|
5578
|
+
.eq('project_id', currentProjectId)
|
|
5579
|
+
.in('id', args.assignment_ids);
|
|
5580
|
+
if (deleteErr)
|
|
5581
|
+
return { error: deleteErr.message };
|
|
5582
|
+
// Sync run counters for any affected test runs
|
|
5583
|
+
const affectedRunIds = [...new Set(existing.filter((a) => a.test_run_id).map((a) => a.test_run_id))];
|
|
5584
|
+
for (const runId of affectedRunIds) {
|
|
5585
|
+
const { count } = await supabase
|
|
5586
|
+
.from('test_assignments')
|
|
5587
|
+
.select('id', { count: 'exact', head: true })
|
|
5588
|
+
.eq('test_run_id', runId)
|
|
5589
|
+
.eq('project_id', currentProjectId);
|
|
5590
|
+
if (count !== null) {
|
|
5591
|
+
await supabase.from('test_runs').update({ total_tests: count }).eq('id', runId);
|
|
5592
|
+
}
|
|
5593
|
+
}
|
|
5594
|
+
const deleted = existing.map((a) => {
|
|
5595
|
+
const tc = a.test_case;
|
|
5596
|
+
const tester = a.tester;
|
|
5597
|
+
return {
|
|
5598
|
+
id: a.id,
|
|
5599
|
+
testKey: tc?.test_key || null,
|
|
5600
|
+
testTitle: tc?.title || null,
|
|
5601
|
+
testerName: tester?.name || null,
|
|
5602
|
+
};
|
|
5603
|
+
});
|
|
5604
|
+
const firstKey = deleted[0]?.testKey;
|
|
5605
|
+
return {
|
|
5606
|
+
success: true,
|
|
5607
|
+
deletedCount: existing.length,
|
|
5608
|
+
deleted,
|
|
5609
|
+
notFound: notFound.length > 0 ? notFound : undefined,
|
|
5610
|
+
message: existing.length === 1
|
|
5611
|
+
? `Removed 1 assignment${firstKey ? ` (${firstKey})` : ''}`
|
|
5612
|
+
: `Removed ${existing.length} assignment(s)`,
|
|
5613
|
+
};
|
|
5614
|
+
}
|
|
4719
5615
|
async function getTesterWorkload(args) {
|
|
4720
5616
|
if (!isValidUUID(args.tester_id)) {
|
|
4721
5617
|
return { error: 'Invalid tester_id format' };
|
|
@@ -4815,8 +5711,9 @@ async function createTester(args) {
|
|
|
4815
5711
|
tier: args.tier ?? 1,
|
|
4816
5712
|
notes: args.notes?.trim() || null,
|
|
4817
5713
|
status: 'active',
|
|
5714
|
+
role: args.role || 'tester',
|
|
4818
5715
|
})
|
|
4819
|
-
.select('id, name, email, status, platforms, tier, notes, created_at')
|
|
5716
|
+
.select('id, name, email, status, platforms, tier, notes, role, created_at')
|
|
4820
5717
|
.single();
|
|
4821
5718
|
if (error) {
|
|
4822
5719
|
if (error.message.includes('duplicate') || error.message.includes('unique')) {
|
|
@@ -4834,6 +5731,7 @@ async function createTester(args) {
|
|
|
4834
5731
|
platforms: data.platforms,
|
|
4835
5732
|
tier: data.tier,
|
|
4836
5733
|
notes: data.notes,
|
|
5734
|
+
role: data.role,
|
|
4837
5735
|
createdAt: data.created_at,
|
|
4838
5736
|
},
|
|
4839
5737
|
message: `Tester "${data.name}" added to the project. Use assign_tests to give them test cases.`,
|
|
@@ -5124,27 +6022,38 @@ async function exportTestResults(args) {
|
|
|
5124
6022
|
const all = assignments || [];
|
|
5125
6023
|
const passCount = all.filter(a => a.status === 'passed').length;
|
|
5126
6024
|
const failCount = all.filter(a => a.status === 'failed').length;
|
|
6025
|
+
const testRunInfo = {
|
|
6026
|
+
id: run.id,
|
|
6027
|
+
name: run.name,
|
|
6028
|
+
description: run.description,
|
|
6029
|
+
status: run.status,
|
|
6030
|
+
startedAt: run.started_at,
|
|
6031
|
+
completedAt: run.completed_at,
|
|
6032
|
+
createdAt: run.created_at,
|
|
6033
|
+
};
|
|
6034
|
+
const summaryInfo = {
|
|
6035
|
+
totalAssignments: all.length,
|
|
6036
|
+
passed: passCount,
|
|
6037
|
+
failed: failCount,
|
|
6038
|
+
blocked: all.filter(a => a.status === 'blocked').length,
|
|
6039
|
+
skipped: all.filter(a => a.status === 'skipped').length,
|
|
6040
|
+
pending: all.filter(a => a.status === 'pending').length,
|
|
6041
|
+
inProgress: all.filter(a => a.status === 'in_progress').length,
|
|
6042
|
+
passRate: all.length > 0 ? Math.round((passCount / all.length) * 100) : 0,
|
|
6043
|
+
};
|
|
6044
|
+
// Compact: return test run info + summary only, no assignments array
|
|
6045
|
+
if (args.compact === true) {
|
|
6046
|
+
return { testRun: testRunInfo, summary: summaryInfo };
|
|
6047
|
+
}
|
|
6048
|
+
// Apply limit (default: 100, max: 500)
|
|
6049
|
+
const assignmentLimit = Math.min(Math.max(args.limit ?? 100, 1), 500);
|
|
6050
|
+
const limitedAssignments = all.slice(0, assignmentLimit);
|
|
5127
6051
|
return {
|
|
5128
|
-
testRun:
|
|
5129
|
-
|
|
5130
|
-
|
|
5131
|
-
|
|
5132
|
-
|
|
5133
|
-
startedAt: run.started_at,
|
|
5134
|
-
completedAt: run.completed_at,
|
|
5135
|
-
createdAt: run.created_at,
|
|
5136
|
-
},
|
|
5137
|
-
summary: {
|
|
5138
|
-
totalAssignments: all.length,
|
|
5139
|
-
passed: passCount,
|
|
5140
|
-
failed: failCount,
|
|
5141
|
-
blocked: all.filter(a => a.status === 'blocked').length,
|
|
5142
|
-
skipped: all.filter(a => a.status === 'skipped').length,
|
|
5143
|
-
pending: all.filter(a => a.status === 'pending').length,
|
|
5144
|
-
inProgress: all.filter(a => a.status === 'in_progress').length,
|
|
5145
|
-
passRate: all.length > 0 ? Math.round((passCount / all.length) * 100) : 0,
|
|
5146
|
-
},
|
|
5147
|
-
assignments: all.map((a) => ({
|
|
6052
|
+
testRun: testRunInfo,
|
|
6053
|
+
summary: summaryInfo,
|
|
6054
|
+
assignmentsReturned: limitedAssignments.length,
|
|
6055
|
+
assignmentsTotal: all.length,
|
|
6056
|
+
assignments: limitedAssignments.map((a) => ({
|
|
5148
6057
|
id: a.id,
|
|
5149
6058
|
status: a.status,
|
|
5150
6059
|
assignedAt: a.assigned_at,
|
|
@@ -5258,6 +6167,15 @@ async function main() {
|
|
|
5258
6167
|
case 'get_report_context':
|
|
5259
6168
|
result = await getReportContext(args);
|
|
5260
6169
|
break;
|
|
6170
|
+
case 'add_report_comment':
|
|
6171
|
+
result = await addReportComment(args);
|
|
6172
|
+
break;
|
|
6173
|
+
case 'get_report_comments':
|
|
6174
|
+
result = await getReportComments(args);
|
|
6175
|
+
break;
|
|
6176
|
+
case 'resolve_conversations':
|
|
6177
|
+
result = await resolveConversations(args);
|
|
6178
|
+
break;
|
|
5261
6179
|
case 'get_project_info':
|
|
5262
6180
|
result = await getProjectInfo();
|
|
5263
6181
|
break;
|
|
@@ -5371,6 +6289,9 @@ async function main() {
|
|
|
5371
6289
|
case 'assign_tests':
|
|
5372
6290
|
result = await assignTests(args);
|
|
5373
6291
|
break;
|
|
6292
|
+
case 'unassign_tests':
|
|
6293
|
+
result = await unassignTests(args);
|
|
6294
|
+
break;
|
|
5374
6295
|
case 'get_tester_workload':
|
|
5375
6296
|
result = await getTesterWorkload(args);
|
|
5376
6297
|
break;
|
|
@@ -5406,6 +6327,32 @@ async function main() {
|
|
|
5406
6327
|
case 'get_current_project':
|
|
5407
6328
|
result = getCurrentProject();
|
|
5408
6329
|
break;
|
|
6330
|
+
// === TEST EXECUTION INTELLIGENCE ===
|
|
6331
|
+
case 'get_test_impact':
|
|
6332
|
+
result = await getTestImpact(args);
|
|
6333
|
+
break;
|
|
6334
|
+
case 'get_flaky_tests':
|
|
6335
|
+
result = await getFlakyTests(args);
|
|
6336
|
+
break;
|
|
6337
|
+
case 'assess_test_quality':
|
|
6338
|
+
result = await assessTestQuality(args);
|
|
6339
|
+
break;
|
|
6340
|
+
case 'get_test_execution_summary':
|
|
6341
|
+
result = await getTestExecutionSummary(args);
|
|
6342
|
+
break;
|
|
6343
|
+
case 'check_test_freshness':
|
|
6344
|
+
result = await checkTestFreshness(args);
|
|
6345
|
+
break;
|
|
6346
|
+
case 'get_untested_changes':
|
|
6347
|
+
result = await getUntestedChanges(args);
|
|
6348
|
+
break;
|
|
6349
|
+
// === AUTO-MONITORING TOOLS ===
|
|
6350
|
+
case 'get_auto_detected_issues':
|
|
6351
|
+
result = await getAutoDetectedIssues(args);
|
|
6352
|
+
break;
|
|
6353
|
+
case 'generate_tests_from_errors':
|
|
6354
|
+
result = await generateTestsFromErrors(args);
|
|
6355
|
+
break;
|
|
5409
6356
|
default:
|
|
5410
6357
|
return {
|
|
5411
6358
|
content: [{ type: 'text', text: `Unknown tool: ${name}` }],
|