@bbearai/mcp-server 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,3577 @@
1
+ #!/usr/bin/env node
2
+ "use strict";
3
+ /**
4
+ * BugBear MCP Server
5
+ * Allows Claude Code to query and manage bug reports via Model Context Protocol
6
+ */
7
+ Object.defineProperty(exports, "__esModule", { value: true });
8
+ const index_js_1 = require("@modelcontextprotocol/sdk/server/index.js");
9
+ const stdio_js_1 = require("@modelcontextprotocol/sdk/server/stdio.js");
10
+ const types_js_1 = require("@modelcontextprotocol/sdk/types.js");
11
+ const supabase_js_1 = require("@supabase/supabase-js");
12
+ // Configuration from environment
13
+ const SUPABASE_URL = process.env.SUPABASE_URL || 'https://kyxgzjnqgvapvlnvqawz.supabase.co';
14
+ const SUPABASE_ANON_KEY = process.env.SUPABASE_ANON_KEY || '';
15
+ const PROJECT_ID = process.env.BUGBEAR_PROJECT_ID || '';
16
+ // Initialize Supabase client
17
+ let supabase;
18
+ function validateConfig() {
19
+ const errors = [];
20
+ if (!SUPABASE_ANON_KEY) {
21
+ errors.push('SUPABASE_ANON_KEY environment variable is required');
22
+ }
23
+ if (!PROJECT_ID) {
24
+ errors.push('BUGBEAR_PROJECT_ID environment variable is required');
25
+ }
26
+ // Basic UUID format validation for project ID
27
+ if (PROJECT_ID && !/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i.test(PROJECT_ID)) {
28
+ errors.push('BUGBEAR_PROJECT_ID must be a valid UUID');
29
+ }
30
+ if (errors.length > 0) {
31
+ console.error('BugBear MCP Server configuration errors:');
32
+ errors.forEach(e => console.error(` - ${e}`));
33
+ process.exit(1);
34
+ }
35
+ }
36
+ function initSupabase() {
37
+ validateConfig();
38
+ supabase = (0, supabase_js_1.createClient)(SUPABASE_URL, SUPABASE_ANON_KEY);
39
+ }
40
+ // Security helpers
41
+ const UUID_REGEX = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
42
+ function isValidUUID(str) {
43
+ return typeof str === 'string' && UUID_REGEX.test(str);
44
+ }
45
+ function sanitizeSearchQuery(query) {
46
+ if (!query)
47
+ return undefined;
48
+ // Remove characters that could cause issues in LIKE queries
49
+ // Supabase escapes properly but we add an extra layer
50
+ return query
51
+ .replace(/[%_\\]/g, '') // Remove LIKE wildcards and escape char
52
+ .slice(0, 500); // Limit length
53
+ }
54
+ // Tool definitions
55
+ const tools = [
56
+ {
57
+ name: 'list_reports',
58
+ description: 'List recent bug reports for the project. Returns the most recent reports with their details.',
59
+ inputSchema: {
60
+ type: 'object',
61
+ properties: {
62
+ limit: {
63
+ type: 'number',
64
+ description: 'Maximum number of reports to return (default: 10, max: 50)',
65
+ },
66
+ status: {
67
+ type: 'string',
68
+ enum: ['new', 'triaging', 'confirmed', 'in_progress', 'fixed', 'resolved', 'verified', 'wont_fix', 'duplicate'],
69
+ description: 'Filter by report status',
70
+ },
71
+ severity: {
72
+ type: 'string',
73
+ enum: ['critical', 'high', 'medium', 'low'],
74
+ description: 'Filter by severity',
75
+ },
76
+ type: {
77
+ type: 'string',
78
+ enum: ['bug', 'test_fail', 'feedback', 'suggestion'],
79
+ description: 'Filter by report type',
80
+ },
81
+ },
82
+ },
83
+ },
84
+ {
85
+ name: 'get_report',
86
+ description: 'Get detailed information about a specific bug report by ID',
87
+ inputSchema: {
88
+ type: 'object',
89
+ properties: {
90
+ report_id: {
91
+ type: 'string',
92
+ description: 'The UUID of the report to retrieve',
93
+ },
94
+ },
95
+ required: ['report_id'],
96
+ },
97
+ },
98
+ {
99
+ name: 'search_reports',
100
+ description: 'Search bug reports by route, description, or other criteria',
101
+ inputSchema: {
102
+ type: 'object',
103
+ properties: {
104
+ query: {
105
+ type: 'string',
106
+ description: 'Search query to match against report descriptions',
107
+ },
108
+ route: {
109
+ type: 'string',
110
+ description: 'Filter by specific route/path where the bug occurred',
111
+ },
112
+ },
113
+ },
114
+ },
115
+ {
116
+ name: 'update_report_status',
117
+ description: 'Update the status of a bug report',
118
+ inputSchema: {
119
+ type: 'object',
120
+ properties: {
121
+ report_id: {
122
+ type: 'string',
123
+ description: 'The UUID of the report to update',
124
+ },
125
+ status: {
126
+ type: 'string',
127
+ enum: ['new', 'triaging', 'confirmed', 'in_progress', 'fixed', 'resolved', 'verified', 'wont_fix', 'duplicate'],
128
+ description: 'The new status for the report',
129
+ },
130
+ resolution: {
131
+ type: 'string',
132
+ description: 'Optional resolution notes when marking as resolved',
133
+ },
134
+ },
135
+ required: ['report_id', 'status'],
136
+ },
137
+ },
138
+ {
139
+ name: 'get_report_context',
140
+ description: 'Get the full debugging context for a report including console logs, network requests, and navigation history',
141
+ inputSchema: {
142
+ type: 'object',
143
+ properties: {
144
+ report_id: {
145
+ type: 'string',
146
+ description: 'The UUID of the report',
147
+ },
148
+ },
149
+ required: ['report_id'],
150
+ },
151
+ },
152
+ {
153
+ name: 'get_project_info',
154
+ description: 'Get project information including QA tracks, test case counts, and common bug patterns',
155
+ inputSchema: {
156
+ type: 'object',
157
+ properties: {},
158
+ },
159
+ },
160
+ {
161
+ name: 'get_qa_tracks',
162
+ description: 'Get all QA tracks for the project with their test templates and requirements',
163
+ inputSchema: {
164
+ type: 'object',
165
+ properties: {},
166
+ },
167
+ },
168
+ {
169
+ name: 'create_test_case',
170
+ description: 'Create a new test case in BugBear. Use this after generating test case suggestions.',
171
+ inputSchema: {
172
+ type: 'object',
173
+ properties: {
174
+ test_key: {
175
+ type: 'string',
176
+ description: 'Unique test case identifier (e.g., TC-001)',
177
+ },
178
+ title: {
179
+ type: 'string',
180
+ description: 'Brief title describing what is being tested',
181
+ },
182
+ description: {
183
+ type: 'string',
184
+ description: 'Detailed description of the test case',
185
+ },
186
+ track: {
187
+ type: 'string',
188
+ enum: ['functional', 'design', 'accessibility', 'performance', 'content', 'ux'],
189
+ description: 'QA track this test belongs to',
190
+ },
191
+ priority: {
192
+ type: 'string',
193
+ enum: ['P0', 'P1', 'P2', 'P3'],
194
+ description: 'Priority level (P0=critical, P3=low)',
195
+ },
196
+ steps: {
197
+ type: 'array',
198
+ description: 'Array of test steps',
199
+ items: {
200
+ type: 'object',
201
+ properties: {
202
+ stepNumber: { type: 'number' },
203
+ action: { type: 'string' },
204
+ expectedResult: { type: 'string' },
205
+ },
206
+ },
207
+ },
208
+ expected_result: {
209
+ type: 'string',
210
+ description: 'Overall expected outcome of the test',
211
+ },
212
+ preconditions: {
213
+ type: 'string',
214
+ description: 'Any setup required before running the test',
215
+ },
216
+ target_route: {
217
+ type: 'string',
218
+ description: 'Route/path to navigate to when starting this test (e.g., /settings/profile). This enables deep linking so testers can jump directly to the screen being tested.',
219
+ },
220
+ },
221
+ required: ['test_key', 'title', 'steps', 'expected_result'],
222
+ },
223
+ },
224
+ {
225
+ name: 'update_test_case',
226
+ description: 'Update an existing test case in BugBear. Use this to add target_route for deep linking or modify other fields.',
227
+ inputSchema: {
228
+ type: 'object',
229
+ properties: {
230
+ test_case_id: {
231
+ type: 'string',
232
+ description: 'The UUID of the test case to update',
233
+ },
234
+ test_key: {
235
+ type: 'string',
236
+ description: 'Look up test case by test_key instead of ID (e.g., TC-001)',
237
+ },
238
+ title: {
239
+ type: 'string',
240
+ description: 'New title for the test case',
241
+ },
242
+ description: {
243
+ type: 'string',
244
+ description: 'New description for the test case',
245
+ },
246
+ priority: {
247
+ type: 'string',
248
+ enum: ['P0', 'P1', 'P2', 'P3'],
249
+ description: 'New priority level',
250
+ },
251
+ steps: {
252
+ type: 'array',
253
+ description: 'New array of test steps (replaces existing steps)',
254
+ items: {
255
+ type: 'object',
256
+ properties: {
257
+ stepNumber: { type: 'number' },
258
+ action: { type: 'string' },
259
+ expectedResult: { type: 'string' },
260
+ },
261
+ },
262
+ },
263
+ expected_result: {
264
+ type: 'string',
265
+ description: 'New expected outcome',
266
+ },
267
+ preconditions: {
268
+ type: 'string',
269
+ description: 'New preconditions',
270
+ },
271
+ target_route: {
272
+ type: 'string',
273
+ description: 'Route/path for deep linking (e.g., /settings/profile)',
274
+ },
275
+ },
276
+ },
277
+ },
278
+ {
279
+ name: 'list_test_cases',
280
+ description: 'List all test cases in the project. Returns test_key, title, target_route, and other metadata. Use this to see existing tests before updating them.',
281
+ inputSchema: {
282
+ type: 'object',
283
+ properties: {
284
+ track: {
285
+ type: 'string',
286
+ description: 'Optional: filter by QA track name',
287
+ },
288
+ priority: {
289
+ type: 'string',
290
+ enum: ['P0', 'P1', 'P2', 'P3'],
291
+ description: 'Optional: filter by priority',
292
+ },
293
+ missing_target_route: {
294
+ type: 'boolean',
295
+ description: 'If true, only return test cases that do NOT have a target_route set (useful for backfilling)',
296
+ },
297
+ limit: {
298
+ type: 'number',
299
+ description: 'Max number of test cases to return (default 100)',
300
+ },
301
+ offset: {
302
+ type: 'number',
303
+ description: 'Offset for pagination (default 0)',
304
+ },
305
+ },
306
+ },
307
+ },
308
+ {
309
+ name: 'get_bug_patterns',
310
+ description: 'Get common bug patterns and hotspots for the project to help prioritize testing',
311
+ inputSchema: {
312
+ type: 'object',
313
+ properties: {
314
+ route: {
315
+ type: 'string',
316
+ description: 'Optional: filter by specific route',
317
+ },
318
+ },
319
+ },
320
+ },
321
+ // === WRITE-BACK TOOLS FOR VIBE CODERS ===
322
+ {
323
+ name: 'create_bug_report',
324
+ description: 'Create a bug report directly from Claude Code. Use this when you notice issues while coding - broken functionality, potential bugs, or problems you discover during development.',
325
+ inputSchema: {
326
+ type: 'object',
327
+ properties: {
328
+ title: {
329
+ type: 'string',
330
+ description: 'Brief title describing the bug',
331
+ },
332
+ description: {
333
+ type: 'string',
334
+ description: 'Detailed description of the bug, including what you observed',
335
+ },
336
+ severity: {
337
+ type: 'string',
338
+ enum: ['critical', 'high', 'medium', 'low'],
339
+ description: 'Bug severity level',
340
+ },
341
+ file_path: {
342
+ type: 'string',
343
+ description: 'File path where the bug was found (e.g., src/components/Auth.tsx)',
344
+ },
345
+ line_number: {
346
+ type: 'number',
347
+ description: 'Line number in the file where the issue occurs',
348
+ },
349
+ code_snippet: {
350
+ type: 'string',
351
+ description: 'Relevant code snippet showing the problematic code',
352
+ },
353
+ suggested_fix: {
354
+ type: 'string',
355
+ description: 'Your suggested fix or approach to resolve this bug',
356
+ },
357
+ related_files: {
358
+ type: 'array',
359
+ items: { type: 'string' },
360
+ description: 'Other files that may be affected or related to this bug',
361
+ },
362
+ },
363
+ required: ['title', 'description', 'severity'],
364
+ },
365
+ },
366
+ {
367
+ name: 'get_bugs_for_file',
368
+ description: 'Get all known bugs related to a specific file. Use this BEFORE editing a file to check for existing issues you should be aware of or might want to fix.',
369
+ inputSchema: {
370
+ type: 'object',
371
+ properties: {
372
+ file_path: {
373
+ type: 'string',
374
+ description: 'File path to check for bugs (e.g., src/components/Auth.tsx)',
375
+ },
376
+ include_resolved: {
377
+ type: 'boolean',
378
+ description: 'Include resolved bugs for context (default: false)',
379
+ },
380
+ },
381
+ required: ['file_path'],
382
+ },
383
+ },
384
+ {
385
+ name: 'mark_fixed_with_commit',
386
+ description: 'Mark a bug as fixed and link it to the commit that resolved it. Use this after you fix a bug to close the loop.',
387
+ inputSchema: {
388
+ type: 'object',
389
+ properties: {
390
+ report_id: {
391
+ type: 'string',
392
+ description: 'The UUID of the bug report to mark as fixed',
393
+ },
394
+ commit_sha: {
395
+ type: 'string',
396
+ description: 'Git commit SHA that contains the fix',
397
+ },
398
+ commit_message: {
399
+ type: 'string',
400
+ description: 'The commit message',
401
+ },
402
+ resolution_notes: {
403
+ type: 'string',
404
+ description: 'Notes explaining how the bug was fixed',
405
+ },
406
+ files_changed: {
407
+ type: 'array',
408
+ items: { type: 'string' },
409
+ description: 'List of files that were modified to fix this bug',
410
+ },
411
+ },
412
+ required: ['report_id', 'commit_sha'],
413
+ },
414
+ },
415
+ {
416
+ name: 'get_bugs_affecting_code',
417
+ description: 'Get bugs that might be affected by changes to specific files. Use this before pushing to understand the impact of your changes.',
418
+ inputSchema: {
419
+ type: 'object',
420
+ properties: {
421
+ file_paths: {
422
+ type: 'array',
423
+ items: { type: 'string' },
424
+ description: 'List of file paths that were changed',
425
+ },
426
+ include_related: {
427
+ type: 'boolean',
428
+ description: 'Include bugs in related/imported files (default: true)',
429
+ },
430
+ },
431
+ required: ['file_paths'],
432
+ },
433
+ },
434
+ {
435
+ name: 'link_bug_to_code',
436
+ description: 'Add code location information to an existing bug report. Use this to help track exactly where bugs occur.',
437
+ inputSchema: {
438
+ type: 'object',
439
+ properties: {
440
+ report_id: {
441
+ type: 'string',
442
+ description: 'The UUID of the bug report',
443
+ },
444
+ file_path: {
445
+ type: 'string',
446
+ description: 'File path where the bug occurs',
447
+ },
448
+ line_number: {
449
+ type: 'number',
450
+ description: 'Line number in the file',
451
+ },
452
+ code_snippet: {
453
+ type: 'string',
454
+ description: 'Relevant code snippet',
455
+ },
456
+ function_name: {
457
+ type: 'string',
458
+ description: 'Function or component name where the bug occurs',
459
+ },
460
+ },
461
+ required: ['report_id', 'file_path'],
462
+ },
463
+ },
464
+ {
465
+ name: 'create_regression_test',
466
+ description: 'Generate a regression test case from a fixed bug to prevent it from recurring. Use this after fixing a bug.',
467
+ inputSchema: {
468
+ type: 'object',
469
+ properties: {
470
+ report_id: {
471
+ type: 'string',
472
+ description: 'The UUID of the fixed bug report to create a test from',
473
+ },
474
+ test_type: {
475
+ type: 'string',
476
+ enum: ['unit', 'integration', 'e2e'],
477
+ description: 'Type of test to generate',
478
+ },
479
+ },
480
+ required: ['report_id'],
481
+ },
482
+ },
483
+ // === FIX QUEUE TOOLS ===
484
+ {
485
+ name: 'get_pending_fixes',
486
+ description: 'Get fix requests queued from the BugBear dashboard. These are bugs that users have requested to be fixed. Check this periodically to see if there are fixes waiting for you.',
487
+ inputSchema: {
488
+ type: 'object',
489
+ properties: {
490
+ limit: {
491
+ type: 'number',
492
+ description: 'Maximum number of fix requests to return (default: 10)',
493
+ },
494
+ include_claimed: {
495
+ type: 'boolean',
496
+ description: 'Include already-claimed fix requests (default: false)',
497
+ },
498
+ },
499
+ },
500
+ },
501
+ {
502
+ name: 'claim_fix_request',
503
+ description: 'Claim a fix request from the queue. This marks it as being worked on so other Claude Code instances know not to pick it up.',
504
+ inputSchema: {
505
+ type: 'object',
506
+ properties: {
507
+ fix_request_id: {
508
+ type: 'string',
509
+ description: 'The UUID of the fix request to claim',
510
+ },
511
+ claimed_by: {
512
+ type: 'string',
513
+ description: 'Identifier for this Claude Code instance (e.g., hostname or session ID)',
514
+ },
515
+ },
516
+ required: ['fix_request_id'],
517
+ },
518
+ },
519
+ {
520
+ name: 'complete_fix_request',
521
+ description: 'Mark a fix request as completed after you have fixed the bug.',
522
+ inputSchema: {
523
+ type: 'object',
524
+ properties: {
525
+ fix_request_id: {
526
+ type: 'string',
527
+ description: 'The UUID of the fix request to complete',
528
+ },
529
+ completion_notes: {
530
+ type: 'string',
531
+ description: 'Notes about how the fix was implemented (e.g., commit SHA, files changed)',
532
+ },
533
+ success: {
534
+ type: 'boolean',
535
+ description: 'Whether the fix was successful (default: true). Set to false if you could not fix the issue.',
536
+ },
537
+ },
538
+ required: ['fix_request_id'],
539
+ },
540
+ },
541
+ {
542
+ name: 'analyze_changes_for_tests',
543
+ description: 'Analyze code changes and intelligently suggest QA tests that should be created. Call this after implementing features, fixing bugs, or making significant changes. Returns prioritized test suggestions with rationale.',
544
+ inputSchema: {
545
+ type: 'object',
546
+ properties: {
547
+ changed_files: {
548
+ type: 'array',
549
+ items: { type: 'string' },
550
+ description: 'List of files that were changed (paths)',
551
+ },
552
+ change_type: {
553
+ type: 'string',
554
+ enum: ['feature', 'bugfix', 'refactor', 'ui_change', 'api_change', 'config'],
555
+ description: 'Type of change made',
556
+ },
557
+ change_summary: {
558
+ type: 'string',
559
+ description: 'Brief description of what was changed (1-2 sentences)',
560
+ },
561
+ affected_routes: {
562
+ type: 'array',
563
+ items: { type: 'string' },
564
+ description: 'Routes/screens affected by this change (e.g., /settings, /profile)',
565
+ },
566
+ },
567
+ required: ['changed_files', 'change_type', 'change_summary'],
568
+ },
569
+ },
570
+ {
571
+ name: 'suggest_test_cases',
572
+ description: 'Get AI-generated test case suggestions based on bug history and routes. Returns formatted suggestions that can be reviewed and created.',
573
+ inputSchema: {
574
+ type: 'object',
575
+ properties: {
576
+ route: {
577
+ type: 'string',
578
+ description: 'Route/feature to generate tests for',
579
+ },
580
+ track: {
581
+ type: 'string',
582
+ enum: ['functional', 'design', 'accessibility', 'performance'],
583
+ description: 'QA track to focus on',
584
+ },
585
+ count: {
586
+ type: 'number',
587
+ description: 'Number of test cases to suggest (default: 5)',
588
+ },
589
+ },
590
+ },
591
+ },
592
+ // === QA INTELLIGENCE TOOLS ===
593
+ {
594
+ name: 'get_test_priorities',
595
+ description: 'Get routes ranked by test priority score. Returns the most urgent routes that need testing based on bug frequency, critical issues, staleness, coverage gaps, and regression risk. Use this to know WHERE to focus testing efforts.',
596
+ inputSchema: {
597
+ type: 'object',
598
+ properties: {
599
+ limit: {
600
+ type: 'number',
601
+ description: 'Maximum number of routes to return (default: 10)',
602
+ },
603
+ min_score: {
604
+ type: 'number',
605
+ description: 'Minimum priority score threshold (0-100, default: 0)',
606
+ },
607
+ include_factors: {
608
+ type: 'boolean',
609
+ description: 'Include detailed breakdown of priority factors (default: true)',
610
+ },
611
+ },
612
+ },
613
+ },
614
+ {
615
+ name: 'get_coverage_gaps',
616
+ description: 'Identify coverage gaps in QA testing. Finds untested routes, routes missing track coverage, and stale test coverage. Use this to understand what areas lack adequate testing.',
617
+ inputSchema: {
618
+ type: 'object',
619
+ properties: {
620
+ gap_type: {
621
+ type: 'string',
622
+ enum: ['untested_routes', 'missing_tracks', 'stale_coverage', 'all'],
623
+ description: 'Type of gap to look for (default: all)',
624
+ },
625
+ stale_days: {
626
+ type: 'number',
627
+ description: 'Days threshold for stale coverage (default: 14)',
628
+ },
629
+ },
630
+ },
631
+ },
632
+ {
633
+ name: 'get_regressions',
634
+ description: 'Detect potential regressions - bugs that reappear after being resolved. Identifies routes with recurring issues and patterns. Use this to find areas prone to regression.',
635
+ inputSchema: {
636
+ type: 'object',
637
+ properties: {
638
+ days: {
639
+ type: 'number',
640
+ description: 'Look back period in days (default: 30)',
641
+ },
642
+ include_history: {
643
+ type: 'boolean',
644
+ description: 'Include full regression history (default: false)',
645
+ },
646
+ },
647
+ },
648
+ },
649
+ {
650
+ name: 'get_coverage_matrix',
651
+ description: 'Get a comprehensive Route × Track coverage matrix showing test counts, pass rates, and execution data. Use this for a complete view of test coverage.',
652
+ inputSchema: {
653
+ type: 'object',
654
+ properties: {
655
+ include_execution_data: {
656
+ type: 'boolean',
657
+ description: 'Include pass/fail rates and last execution times (default: true)',
658
+ },
659
+ include_bug_counts: {
660
+ type: 'boolean',
661
+ description: 'Include open/critical bug counts per route (default: true)',
662
+ },
663
+ },
664
+ },
665
+ },
666
+ {
667
+ name: 'get_stale_coverage',
668
+ description: 'Get routes that have not been tested within a threshold, ordered by risk (open bugs, critical bugs). Use this to find areas that need fresh testing.',
669
+ inputSchema: {
670
+ type: 'object',
671
+ properties: {
672
+ days_threshold: {
673
+ type: 'number',
674
+ description: 'Days without testing to consider stale (default: 14)',
675
+ },
676
+ limit: {
677
+ type: 'number',
678
+ description: 'Maximum routes to return (default: 20)',
679
+ },
680
+ },
681
+ },
682
+ },
683
+ {
684
+ name: 'generate_deploy_checklist',
685
+ description: 'Generate a pre-deployment testing checklist based on changed routes. Returns prioritized tests to run before deploying, including critical tests, recommended tests, and coverage gaps.',
686
+ inputSchema: {
687
+ type: 'object',
688
+ properties: {
689
+ routes: {
690
+ type: 'array',
691
+ items: { type: 'string' },
692
+ description: 'Routes that will be affected by the deployment',
693
+ },
694
+ changed_files: {
695
+ type: 'array',
696
+ items: { type: 'string' },
697
+ description: 'Optional: files changed to infer affected routes',
698
+ },
699
+ deployment_type: {
700
+ type: 'string',
701
+ enum: ['hotfix', 'feature', 'release'],
702
+ description: 'Type of deployment (affects checklist thoroughness, default: feature)',
703
+ },
704
+ },
705
+ required: ['routes'],
706
+ },
707
+ },
708
+ {
709
+ name: 'get_qa_health',
710
+ description: 'Get comprehensive QA health metrics including testing velocity, bug discovery rate, resolution time, coverage, and tester activity. Returns a health score (0-100) with grade.',
711
+ inputSchema: {
712
+ type: 'object',
713
+ properties: {
714
+ period_days: {
715
+ type: 'number',
716
+ description: 'Analysis period in days (default: 30)',
717
+ },
718
+ compare_previous: {
719
+ type: 'boolean',
720
+ description: 'Include comparison with previous period (default: true)',
721
+ },
722
+ },
723
+ },
724
+ },
725
+ ];
726
+ // Tool handlers
727
+ async function listReports(args) {
728
+ let query = supabase
729
+ .from('reports')
730
+ .select('id, report_type, severity, status, description, app_context, created_at, tester:testers(name, email)')
731
+ .eq('project_id', PROJECT_ID)
732
+ .order('created_at', { ascending: false })
733
+ .limit(Math.min(args.limit || 10, 50));
734
+ if (args.status)
735
+ query = query.eq('status', args.status);
736
+ if (args.severity)
737
+ query = query.eq('severity', args.severity);
738
+ if (args.type)
739
+ query = query.eq('report_type', args.type);
740
+ const { data, error } = await query;
741
+ if (error) {
742
+ return { error: error.message };
743
+ }
744
+ return {
745
+ reports: data?.map(r => ({
746
+ id: r.id,
747
+ type: r.report_type,
748
+ severity: r.severity,
749
+ status: r.status,
750
+ description: r.description,
751
+ route: r.app_context?.currentRoute,
752
+ reporter: r.tester?.name || 'Anonymous',
753
+ created_at: r.created_at,
754
+ })),
755
+ total: data?.length || 0,
756
+ };
757
+ }
758
+ async function getReport(args) {
759
+ // Validate UUID format to prevent injection
760
+ if (!args.report_id || !/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i.test(args.report_id)) {
761
+ return { error: 'Invalid report_id format' };
762
+ }
763
+ const { data, error } = await supabase
764
+ .from('reports')
765
+ .select('*, tester:testers(*), track:qa_tracks(*)')
766
+ .eq('id', args.report_id)
767
+ .eq('project_id', PROJECT_ID) // Security: ensure report belongs to this project
768
+ .single();
769
+ if (error) {
770
+ return { error: error.message };
771
+ }
772
+ return {
773
+ report: {
774
+ id: data.id,
775
+ type: data.report_type,
776
+ severity: data.severity,
777
+ status: data.status,
778
+ description: data.description,
779
+ app_context: data.app_context,
780
+ device_info: data.device_info,
781
+ navigation_history: data.navigation_history,
782
+ screenshots: data.screenshots,
783
+ created_at: data.created_at,
784
+ reporter: data.tester ? {
785
+ name: data.tester.name,
786
+ email: data.tester.email,
787
+ } : null,
788
+ track: data.track ? {
789
+ name: data.track.name,
790
+ icon: data.track.icon,
791
+ } : null,
792
+ },
793
+ };
794
+ }
795
+ async function searchReports(args) {
796
+ const sanitizedQuery = sanitizeSearchQuery(args.query);
797
+ const sanitizedRoute = sanitizeSearchQuery(args.route);
798
+ let query = supabase
799
+ .from('reports')
800
+ .select('id, report_type, severity, status, description, app_context, created_at')
801
+ .eq('project_id', PROJECT_ID)
802
+ .order('created_at', { ascending: false })
803
+ .limit(20);
804
+ if (sanitizedQuery) {
805
+ query = query.ilike('description', `%${sanitizedQuery}%`);
806
+ }
807
+ const { data, error } = await query;
808
+ if (error) {
809
+ return { error: error.message };
810
+ }
811
+ // Filter by route if provided
812
+ let results = data || [];
813
+ if (sanitizedRoute) {
814
+ results = results.filter(r => {
815
+ const route = r.app_context?.currentRoute;
816
+ return route && route.includes(sanitizedRoute);
817
+ });
818
+ }
819
+ return {
820
+ reports: results.map(r => ({
821
+ id: r.id,
822
+ type: r.report_type,
823
+ severity: r.severity,
824
+ status: r.status,
825
+ description: r.description,
826
+ route: r.app_context?.currentRoute,
827
+ created_at: r.created_at,
828
+ })),
829
+ total: results.length,
830
+ };
831
+ }
832
+ async function updateReportStatus(args) {
833
+ if (!isValidUUID(args.report_id)) {
834
+ return { error: 'Invalid report_id format' };
835
+ }
836
+ const updates = { status: args.status };
837
+ if (args.resolution) {
838
+ updates.resolution = args.resolution;
839
+ }
840
+ const { error } = await supabase
841
+ .from('reports')
842
+ .update(updates)
843
+ .eq('id', args.report_id)
844
+ .eq('project_id', PROJECT_ID); // Security: ensure report belongs to this project
845
+ if (error) {
846
+ return { error: error.message };
847
+ }
848
+ return { success: true, message: `Report status updated to ${args.status}` };
849
+ }
850
+ async function getReportContext(args) {
851
+ if (!isValidUUID(args.report_id)) {
852
+ return { error: 'Invalid report_id format' };
853
+ }
854
+ const { data, error } = await supabase
855
+ .from('reports')
856
+ .select('app_context, device_info, navigation_history, enhanced_context')
857
+ .eq('id', args.report_id)
858
+ .eq('project_id', PROJECT_ID) // Security: ensure report belongs to this project
859
+ .single();
860
+ if (error) {
861
+ return { error: error.message };
862
+ }
863
+ return {
864
+ context: {
865
+ app_context: data.app_context,
866
+ device_info: data.device_info,
867
+ navigation_history: data.navigation_history,
868
+ enhanced_context: data.enhanced_context || {},
869
+ },
870
+ };
871
+ }
872
+ async function getProjectInfo() {
873
+ // Get project details
874
+ const { data: project, error: projectError } = await supabase
875
+ .from('projects')
876
+ .select('id, name, slug, is_qa_enabled')
877
+ .eq('id', PROJECT_ID)
878
+ .single();
879
+ if (projectError) {
880
+ return { error: projectError.message };
881
+ }
882
+ // Get track counts
883
+ const { data: tracks } = await supabase
884
+ .from('qa_tracks')
885
+ .select('id, name, icon, test_template')
886
+ .eq('project_id', PROJECT_ID);
887
+ // Get test case count
888
+ const { count: testCaseCount } = await supabase
889
+ .from('test_cases')
890
+ .select('id', { count: 'exact', head: true })
891
+ .eq('project_id', PROJECT_ID);
892
+ // Get open bug count
893
+ const { count: openBugCount } = await supabase
894
+ .from('reports')
895
+ .select('id', { count: 'exact', head: true })
896
+ .eq('project_id', PROJECT_ID)
897
+ .eq('report_type', 'bug')
898
+ .in('status', ['new', 'confirmed', 'in_progress']);
899
+ return {
900
+ project: {
901
+ id: project.id,
902
+ name: project.name,
903
+ slug: project.slug,
904
+ qaEnabled: project.is_qa_enabled,
905
+ },
906
+ stats: {
907
+ tracks: tracks?.length || 0,
908
+ testCases: testCaseCount || 0,
909
+ openBugs: openBugCount || 0,
910
+ },
911
+ tracks: tracks?.map(t => ({
912
+ id: t.id,
913
+ name: t.name,
914
+ icon: t.icon,
915
+ template: t.test_template,
916
+ })) || [],
917
+ };
918
+ }
919
+ async function getQaTracks() {
920
+ const { data, error } = await supabase
921
+ .from('qa_tracks')
922
+ .select('*')
923
+ .eq('project_id', PROJECT_ID)
924
+ .order('sort_order');
925
+ if (error) {
926
+ return { error: error.message };
927
+ }
928
+ return {
929
+ tracks: data?.map(t => ({
930
+ id: t.id,
931
+ name: t.name,
932
+ slug: t.slug,
933
+ icon: t.icon,
934
+ color: t.color,
935
+ testTemplate: t.test_template,
936
+ description: t.description,
937
+ requiresCertification: t.requires_certification,
938
+ evaluationCriteria: t.evaluation_criteria,
939
+ })) || [],
940
+ };
941
+ }
942
+ async function createTestCase(args) {
943
+ // Find track ID if track name provided
944
+ let trackId = null;
945
+ if (args.track) {
946
+ const { data: trackData } = await supabase
947
+ .from('qa_tracks')
948
+ .select('id')
949
+ .eq('project_id', PROJECT_ID)
950
+ .ilike('name', `%${args.track}%`)
951
+ .single();
952
+ trackId = trackData?.id || null;
953
+ }
954
+ const testCase = {
955
+ project_id: PROJECT_ID,
956
+ test_key: args.test_key,
957
+ title: args.title,
958
+ description: args.description || '',
959
+ track_id: trackId,
960
+ priority: args.priority || 'P2',
961
+ steps: args.steps,
962
+ expected_result: args.expected_result,
963
+ preconditions: args.preconditions || '',
964
+ target_route: args.target_route || null,
965
+ };
966
+ const { data, error } = await supabase
967
+ .from('test_cases')
968
+ .insert(testCase)
969
+ .select('id, test_key, title')
970
+ .single();
971
+ if (error) {
972
+ return { error: error.message };
973
+ }
974
+ return {
975
+ success: true,
976
+ testCase: {
977
+ id: data.id,
978
+ testKey: data.test_key,
979
+ title: data.title,
980
+ },
981
+ message: `Test case ${data.test_key} created successfully`,
982
+ };
983
+ }
984
+ async function updateTestCase(args) {
985
+ // Need either test_case_id or test_key
986
+ if (!args.test_case_id && !args.test_key) {
987
+ return { error: 'Must provide either test_case_id or test_key to identify the test case' };
988
+ }
989
+ // Find the test case
990
+ let testCaseId = args.test_case_id;
991
+ if (!testCaseId && args.test_key) {
992
+ const { data: existing } = await supabase
993
+ .from('test_cases')
994
+ .select('id')
995
+ .eq('project_id', PROJECT_ID)
996
+ .eq('test_key', args.test_key)
997
+ .single();
998
+ if (!existing) {
999
+ return { error: `Test case with key ${args.test_key} not found` };
1000
+ }
1001
+ testCaseId = existing.id;
1002
+ }
1003
+ // Build update object with only provided fields
1004
+ const updates = {};
1005
+ if (args.title !== undefined)
1006
+ updates.title = args.title;
1007
+ if (args.description !== undefined)
1008
+ updates.description = args.description;
1009
+ if (args.priority !== undefined)
1010
+ updates.priority = args.priority;
1011
+ if (args.steps !== undefined)
1012
+ updates.steps = args.steps;
1013
+ if (args.expected_result !== undefined)
1014
+ updates.expected_result = args.expected_result;
1015
+ if (args.preconditions !== undefined)
1016
+ updates.preconditions = args.preconditions;
1017
+ if (args.target_route !== undefined)
1018
+ updates.target_route = args.target_route;
1019
+ if (Object.keys(updates).length === 0) {
1020
+ return { error: 'No fields to update' };
1021
+ }
1022
+ const { data, error } = await supabase
1023
+ .from('test_cases')
1024
+ .update(updates)
1025
+ .eq('id', testCaseId)
1026
+ .eq('project_id', PROJECT_ID)
1027
+ .select('id, test_key, title, target_route')
1028
+ .single();
1029
+ if (error) {
1030
+ return { error: error.message };
1031
+ }
1032
+ return {
1033
+ success: true,
1034
+ testCase: {
1035
+ id: data.id,
1036
+ testKey: data.test_key,
1037
+ title: data.title,
1038
+ targetRoute: data.target_route,
1039
+ },
1040
+ message: `Test case ${data.test_key} updated successfully`,
1041
+ updatedFields: Object.keys(updates),
1042
+ };
1043
+ }
1044
+ async function listTestCases(args) {
1045
+ let query = supabase
1046
+ .from('test_cases')
1047
+ .select(`
1048
+ id,
1049
+ test_key,
1050
+ title,
1051
+ description,
1052
+ priority,
1053
+ target_route,
1054
+ preconditions,
1055
+ expected_result,
1056
+ steps,
1057
+ track:qa_tracks(id, name, icon, color)
1058
+ `)
1059
+ .eq('project_id', PROJECT_ID)
1060
+ .order('test_key', { ascending: true });
1061
+ // Apply filters
1062
+ if (args.priority) {
1063
+ query = query.eq('priority', args.priority);
1064
+ }
1065
+ if (args.missing_target_route) {
1066
+ query = query.is('target_route', null);
1067
+ }
1068
+ // Pagination
1069
+ const limit = args.limit || 100;
1070
+ const offset = args.offset || 0;
1071
+ query = query.range(offset, offset + limit - 1);
1072
+ const { data, error } = await query;
1073
+ if (error) {
1074
+ return { error: error.message };
1075
+ }
1076
+ // Filter by track name if provided (post-query since it's a joined field)
1077
+ let testCases = data || [];
1078
+ if (args.track) {
1079
+ testCases = testCases.filter((tc) => tc.track?.name?.toLowerCase().includes(args.track.toLowerCase()));
1080
+ }
1081
+ return {
1082
+ count: testCases.length,
1083
+ testCases: testCases.map((tc) => ({
1084
+ id: tc.id,
1085
+ testKey: tc.test_key,
1086
+ title: tc.title,
1087
+ description: tc.description,
1088
+ priority: tc.priority,
1089
+ targetRoute: tc.target_route,
1090
+ hasTargetRoute: !!tc.target_route,
1091
+ track: tc.track?.name || null,
1092
+ stepsCount: tc.steps?.length || 0,
1093
+ })),
1094
+ pagination: {
1095
+ limit,
1096
+ offset,
1097
+ hasMore: testCases.length === limit,
1098
+ },
1099
+ };
1100
+ }
1101
+ async function getBugPatterns(args) {
1102
+ // Get bugs grouped by route
1103
+ let query = supabase
1104
+ .from('reports')
1105
+ .select('app_context, severity, status, created_at')
1106
+ .eq('project_id', PROJECT_ID)
1107
+ .eq('report_type', 'bug')
1108
+ .order('created_at', { ascending: false })
1109
+ .limit(100);
1110
+ const { data: bugs, error } = await query;
1111
+ if (error) {
1112
+ return { error: error.message };
1113
+ }
1114
+ // Analyze patterns
1115
+ const routePatterns = {};
1116
+ for (const bug of bugs || []) {
1117
+ const route = bug.app_context?.currentRoute || 'unknown';
1118
+ // Filter by route if specified
1119
+ if (args.route && !route.includes(args.route))
1120
+ continue;
1121
+ if (!routePatterns[route]) {
1122
+ routePatterns[route] = { total: 0, critical: 0, open: 0, resolved: 0 };
1123
+ }
1124
+ routePatterns[route].total++;
1125
+ if (bug.severity === 'critical' || bug.severity === 'high') {
1126
+ routePatterns[route].critical++;
1127
+ }
1128
+ if (['new', 'confirmed', 'in_progress'].includes(bug.status)) {
1129
+ routePatterns[route].open++;
1130
+ }
1131
+ else {
1132
+ routePatterns[route].resolved++;
1133
+ }
1134
+ }
1135
+ // Sort by total bugs descending
1136
+ const hotspots = Object.entries(routePatterns)
1137
+ .map(([route, stats]) => ({ route, ...stats }))
1138
+ .sort((a, b) => b.total - a.total)
1139
+ .slice(0, 10);
1140
+ return {
1141
+ hotspots,
1142
+ summary: {
1143
+ totalRoutes: Object.keys(routePatterns).length,
1144
+ totalBugs: bugs?.length || 0,
1145
+ criticalRoutes: hotspots.filter(h => h.critical > 0).length,
1146
+ },
1147
+ recommendations: hotspots
1148
+ .filter(h => h.open > 0)
1149
+ .map(h => `Route "${h.route}" has ${h.open} open bugs - prioritize testing here`),
1150
+ };
1151
+ }
1152
+ async function suggestTestCases(args) {
1153
+ const count = args.count || 5;
1154
+ // Get existing test cases to avoid duplicates
1155
+ const { data: existingTests } = await supabase
1156
+ .from('test_cases')
1157
+ .select('test_key, title')
1158
+ .eq('project_id', PROJECT_ID)
1159
+ .order('test_key', { ascending: false })
1160
+ .limit(1);
1161
+ // Calculate next test key number
1162
+ const lastKey = existingTests?.[0]?.test_key || 'TC-000';
1163
+ const lastNum = parseInt(lastKey.replace('TC-', '')) || 0;
1164
+ // Get bug patterns for context
1165
+ const patterns = await getBugPatterns({ route: args.route });
1166
+ // Generate suggestions based on track
1167
+ const suggestions = [];
1168
+ const track = args.track || 'functional';
1169
+ const route = args.route || '/';
1170
+ // Template suggestions based on track type
1171
+ const templates = getTrackTemplates(track);
1172
+ for (let i = 0; i < count; i++) {
1173
+ const keyNum = lastNum + i + 1;
1174
+ const template = templates[i % templates.length];
1175
+ suggestions.push({
1176
+ test_key: `TC-${String(keyNum).padStart(3, '0')}`,
1177
+ title: template.title.replace('{route}', route),
1178
+ description: template.description.replace('{route}', route),
1179
+ track,
1180
+ priority: i === 0 ? 'P1' : 'P2',
1181
+ steps: template.steps.map((s, idx) => ({
1182
+ stepNumber: idx + 1,
1183
+ action: s.action.replace('{route}', route),
1184
+ expectedResult: s.expectedResult.replace('{route}', route),
1185
+ })),
1186
+ expected_result: template.expected_result.replace('{route}', route),
1187
+ });
1188
+ }
1189
+ // Get related bugs for historical context
1190
+ const { data: relatedBugs } = await supabase
1191
+ .from('reports')
1192
+ .select('id, description, severity')
1193
+ .eq('project_id', PROJECT_ID)
1194
+ .eq('report_type', 'bug')
1195
+ .limit(10);
1196
+ const routeBugs = (relatedBugs || []).filter(bug => {
1197
+ // In a real impl, would filter by route match
1198
+ return true;
1199
+ }).slice(0, 5);
1200
+ return {
1201
+ suggestions,
1202
+ context: {
1203
+ route: args.route || 'all',
1204
+ track,
1205
+ bugHotspots: patterns.hotspots?.slice(0, 3) || [],
1206
+ },
1207
+ historicalContext: {
1208
+ relatedBugs: routeBugs.map(b => ({
1209
+ id: b.id,
1210
+ description: b.description.slice(0, 100),
1211
+ severity: b.severity,
1212
+ })),
1213
+ recommendation: routeBugs.length > 0
1214
+ ? `These test suggestions are informed by ${routeBugs.length} historical bug(s) in this area.`
1215
+ : 'No historical bugs found for this route.',
1216
+ },
1217
+ instructions: `Review these suggestions and use create_test_case to add them to BugBear.
1218
+ You can modify the suggestions before creating them.`,
1219
+ };
1220
+ }
1221
+ // === QA INTELLIGENCE TOOL HANDLERS ===
1222
+ async function getTestPriorities(args) {
1223
+ const limit = args.limit || 10;
1224
+ const minScore = args.min_score || 0;
1225
+ const includeFactors = args.include_factors !== false;
1226
+ // First, refresh the route stats
1227
+ await supabase.rpc('refresh_route_test_stats', { p_project_id: PROJECT_ID });
1228
+ // Get prioritized routes
1229
+ const { data: routes, error } = await supabase
1230
+ .from('route_test_stats')
1231
+ .select('*')
1232
+ .eq('project_id', PROJECT_ID)
1233
+ .gte('priority_score', minScore)
1234
+ .order('priority_score', { ascending: false })
1235
+ .limit(limit);
1236
+ if (error) {
1237
+ return { error: error.message };
1238
+ }
1239
+ if (!routes || routes.length === 0) {
1240
+ return {
1241
+ priorities: [],
1242
+ summary: { totalRoutes: 0, criticalCount: 0, highCount: 0, mediumCount: 0 },
1243
+ guidance: 'No routes found with test data. Start by creating test cases and running tests.',
1244
+ };
1245
+ }
1246
+ // Calculate urgency levels
1247
+ const getUrgency = (score) => {
1248
+ if (score >= 70)
1249
+ return 'critical';
1250
+ if (score >= 50)
1251
+ return 'high';
1252
+ if (score >= 30)
1253
+ return 'medium';
1254
+ return 'low';
1255
+ };
1256
+ const getRecommendation = (route) => {
1257
+ if (route.critical_bugs > 0) {
1258
+ return `Critical bugs exist - prioritize immediate testing and bug fixes`;
1259
+ }
1260
+ if (route.open_bugs >= 3) {
1261
+ return `Multiple open bugs (${route.open_bugs}) - comprehensive testing needed`;
1262
+ }
1263
+ if (route.last_tested_at === null) {
1264
+ return `Never tested - establish baseline coverage`;
1265
+ }
1266
+ const daysSinceTest = route.last_tested_at
1267
+ ? Math.floor((Date.now() - new Date(route.last_tested_at).getTime()) / (1000 * 60 * 60 * 24))
1268
+ : null;
1269
+ if (daysSinceTest && daysSinceTest > 14) {
1270
+ return `Stale coverage (${daysSinceTest} days) - refresh testing`;
1271
+ }
1272
+ if (route.regression_count > 0) {
1273
+ return `Regression risk (${route.regression_count} past regressions) - add regression tests`;
1274
+ }
1275
+ if (route.test_case_count < 3) {
1276
+ return `Low test coverage (${route.test_case_count} tests) - add more test cases`;
1277
+ }
1278
+ return 'Maintain current testing cadence';
1279
+ };
1280
+ const priorities = routes.map((route, idx) => {
1281
+ const daysSinceTest = route.last_tested_at
1282
+ ? Math.floor((Date.now() - new Date(route.last_tested_at).getTime()) / (1000 * 60 * 60 * 24))
1283
+ : null;
1284
+ const priority = {
1285
+ rank: idx + 1,
1286
+ route: route.route,
1287
+ priorityScore: route.priority_score,
1288
+ urgency: getUrgency(route.priority_score),
1289
+ stats: {
1290
+ openBugs: route.open_bugs,
1291
+ criticalBugs: route.critical_bugs,
1292
+ highBugs: route.high_bugs,
1293
+ testCases: route.test_case_count,
1294
+ daysSinceTest,
1295
+ regressions: route.regression_count,
1296
+ recentBugs: route.bugs_last_7_days,
1297
+ },
1298
+ recommendation: getRecommendation(route),
1299
+ };
1300
+ if (includeFactors) {
1301
+ // Calculate factor breakdown
1302
+ const bugFreqScore = Math.min(route.open_bugs * 5, 30);
1303
+ const criticalScore = Math.min(route.critical_bugs * 25 + route.high_bugs * 10, 25);
1304
+ const stalenessScore = daysSinceTest === null ? 20 : Math.min(daysSinceTest, 20);
1305
+ const coverageScore = Math.max(15 - route.test_case_count * 5, 0);
1306
+ const regressionScore = Math.min(route.regression_count * 10, 10);
1307
+ priority.factors = {
1308
+ bugFrequency: { score: bugFreqScore, openBugs: route.open_bugs, bugs7d: route.bugs_last_7_days },
1309
+ criticalSeverity: { score: criticalScore, critical: route.critical_bugs, high: route.high_bugs },
1310
+ staleness: { score: stalenessScore, daysSinceTest },
1311
+ coverageGap: { score: coverageScore, testCount: route.test_case_count },
1312
+ regressionRisk: { score: regressionScore, regressionCount: route.regression_count },
1313
+ };
1314
+ }
1315
+ return priority;
1316
+ });
1317
+ const criticalCount = priorities.filter(p => p.urgency === 'critical').length;
1318
+ const highCount = priorities.filter(p => p.urgency === 'high').length;
1319
+ const mediumCount = priorities.filter(p => p.urgency === 'medium').length;
1320
+ let guidance = '';
1321
+ if (criticalCount > 0) {
1322
+ guidance = `URGENT: ${criticalCount} route(s) need immediate attention. Focus on critical bugs and untested areas first.`;
1323
+ }
1324
+ else if (highCount > 0) {
1325
+ guidance = `${highCount} route(s) have high priority. Schedule testing sessions to address coverage gaps.`;
1326
+ }
1327
+ else {
1328
+ guidance = 'QA coverage is in good shape. Maintain regular testing cadence.';
1329
+ }
1330
+ return {
1331
+ priorities,
1332
+ summary: {
1333
+ totalRoutes: routes.length,
1334
+ criticalCount,
1335
+ highCount,
1336
+ mediumCount,
1337
+ },
1338
+ guidance,
1339
+ };
1340
+ }
1341
+ async function getCoverageGaps(args) {
1342
+ const gapType = args.gap_type || 'all';
1343
+ const staleDays = args.stale_days || 14;
1344
+ const gaps = { untested: [], missingTracks: [], stale: [] };
1345
+ // Get all routes from reports
1346
+ const { data: routesFromReports } = await supabase
1347
+ .from('reports')
1348
+ .select('app_context')
1349
+ .eq('project_id', PROJECT_ID)
1350
+ .not('app_context->currentRoute', 'is', null);
1351
+ const allRoutes = new Set();
1352
+ (routesFromReports || []).forEach(r => {
1353
+ const route = r.app_context?.currentRoute;
1354
+ if (route)
1355
+ allRoutes.add(route);
1356
+ });
1357
+ // Get test coverage data
1358
+ const { data: testCases } = await supabase
1359
+ .from('test_cases')
1360
+ .select('target_route, category, track_id')
1361
+ .eq('project_id', PROJECT_ID);
1362
+ const coveredRoutes = new Set();
1363
+ const routeTrackCoverage = {};
1364
+ (testCases || []).forEach(tc => {
1365
+ const route = tc.target_route || tc.category;
1366
+ if (route) {
1367
+ coveredRoutes.add(route);
1368
+ if (!routeTrackCoverage[route])
1369
+ routeTrackCoverage[route] = new Set();
1370
+ if (tc.track_id)
1371
+ routeTrackCoverage[route].add(tc.track_id);
1372
+ }
1373
+ });
1374
+ // Get all tracks
1375
+ const { data: tracks } = await supabase
1376
+ .from('qa_tracks')
1377
+ .select('id, name')
1378
+ .eq('project_id', PROJECT_ID);
1379
+ const trackMap = new Map((tracks || []).map(t => [t.id, t.name]));
1380
+ // Get route stats for staleness
1381
+ const { data: routeStats } = await supabase
1382
+ .from('route_test_stats')
1383
+ .select('route, last_tested_at, open_bugs, critical_bugs')
1384
+ .eq('project_id', PROJECT_ID);
1385
+ const routeStatsMap = new Map((routeStats || []).map(r => [r.route, r]));
1386
+ // Find untested routes
1387
+ if (gapType === 'all' || gapType === 'untested_routes') {
1388
+ allRoutes.forEach(route => {
1389
+ if (!coveredRoutes.has(route)) {
1390
+ const stats = routeStatsMap.get(route);
1391
+ const severity = (stats?.critical_bugs || 0) > 0 ? 'critical' :
1392
+ (stats?.open_bugs || 0) > 2 ? 'high' : 'medium';
1393
+ gaps.untested.push({
1394
+ route,
1395
+ severity,
1396
+ type: 'untested',
1397
+ details: {
1398
+ openBugs: stats?.open_bugs || 0,
1399
+ criticalBugs: stats?.critical_bugs || 0,
1400
+ },
1401
+ recommendation: `Create test cases for ${route} - this route has bugs but no test coverage.`,
1402
+ });
1403
+ }
1404
+ });
1405
+ gaps.untested.sort((a, b) => {
1406
+ const severityOrder = { critical: 0, high: 1, medium: 2 };
1407
+ return severityOrder[a.severity] - severityOrder[b.severity];
1408
+ });
1409
+ }
1410
+ // Find missing track coverage
1411
+ if (gapType === 'all' || gapType === 'missing_tracks') {
1412
+ coveredRoutes.forEach(route => {
1413
+ const coveredTracks = routeTrackCoverage[route] || new Set();
1414
+ const missingTracks = [];
1415
+ trackMap.forEach((name, id) => {
1416
+ if (!coveredTracks.has(id)) {
1417
+ missingTracks.push(name);
1418
+ }
1419
+ });
1420
+ if (missingTracks.length > 0 && missingTracks.length < trackMap.size) {
1421
+ const stats = routeStatsMap.get(route);
1422
+ gaps.missingTracks.push({
1423
+ route,
1424
+ severity: missingTracks.length > trackMap.size / 2 ? 'high' : 'medium',
1425
+ type: 'missing_tracks',
1426
+ details: { missingTracks },
1427
+ recommendation: `Add tests for tracks: ${missingTracks.join(', ')}`,
1428
+ });
1429
+ }
1430
+ });
1431
+ }
1432
+ // Find stale coverage
1433
+ if (gapType === 'all' || gapType === 'stale_coverage') {
1434
+ (routeStats || []).forEach(stat => {
1435
+ if (stat.last_tested_at) {
1436
+ const daysSince = Math.floor((Date.now() - new Date(stat.last_tested_at).getTime()) / (1000 * 60 * 60 * 24));
1437
+ if (daysSince >= staleDays) {
1438
+ gaps.stale.push({
1439
+ route: stat.route,
1440
+ severity: daysSince >= staleDays * 2 ? 'high' : 'medium',
1441
+ type: 'stale',
1442
+ details: {
1443
+ daysSinceTest: daysSince,
1444
+ openBugs: stat.open_bugs,
1445
+ criticalBugs: stat.critical_bugs,
1446
+ },
1447
+ recommendation: `Re-run tests for ${stat.route} - last tested ${daysSince} days ago`,
1448
+ });
1449
+ }
1450
+ }
1451
+ });
1452
+ gaps.stale.sort((a, b) => (b.details.daysSinceTest || 0) - (a.details.daysSinceTest || 0));
1453
+ }
1454
+ const recommendations = [];
1455
+ if (gaps.untested.length > 0) {
1456
+ recommendations.push(`${gaps.untested.length} route(s) have bugs but no test coverage - create tests immediately`);
1457
+ }
1458
+ if (gaps.missingTracks.length > 0) {
1459
+ recommendations.push(`${gaps.missingTracks.length} route(s) are missing track-specific tests`);
1460
+ }
1461
+ if (gaps.stale.length > 0) {
1462
+ recommendations.push(`${gaps.stale.length} route(s) have stale coverage (>${staleDays} days) - refresh testing`);
1463
+ }
1464
+ return {
1465
+ gaps,
1466
+ summary: {
1467
+ untestedRoutes: gaps.untested.length,
1468
+ routesMissingTracks: gaps.missingTracks.length,
1469
+ staleRoutes: gaps.stale.length,
1470
+ },
1471
+ recommendations,
1472
+ };
1473
+ }
1474
+ async function getRegressions(args) {
1475
+ const days = args.days || 30;
1476
+ const includeHistory = args.include_history || false;
1477
+ // Find potential regressions: routes with resolved bugs that have new bugs
1478
+ const { data: resolvedBugs } = await supabase
1479
+ .from('reports')
1480
+ .select('id, description, severity, app_context, resolved_at')
1481
+ .eq('project_id', PROJECT_ID)
1482
+ .eq('report_type', 'bug')
1483
+ .in('status', ['resolved', 'fixed', 'verified', 'closed'])
1484
+ .gte('resolved_at', new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString());
1485
+ const { data: newBugs } = await supabase
1486
+ .from('reports')
1487
+ .select('id, description, severity, app_context, created_at')
1488
+ .eq('project_id', PROJECT_ID)
1489
+ .eq('report_type', 'bug')
1490
+ .in('status', ['new', 'triaging', 'confirmed', 'in_progress', 'reviewed'])
1491
+ .gte('created_at', new Date(Date.now() - days * 24 * 60 * 60 * 1000).toISOString());
1492
+ // Group by route
1493
+ const routeData = {};
1494
+ (resolvedBugs || []).forEach(bug => {
1495
+ const route = bug.app_context?.currentRoute;
1496
+ if (route) {
1497
+ if (!routeData[route])
1498
+ routeData[route] = { resolved: [], new: [] };
1499
+ routeData[route].resolved.push(bug);
1500
+ }
1501
+ });
1502
+ (newBugs || []).forEach(bug => {
1503
+ const route = bug.app_context?.currentRoute;
1504
+ if (route) {
1505
+ if (!routeData[route])
1506
+ routeData[route] = { resolved: [], new: [] };
1507
+ routeData[route].new.push(bug);
1508
+ }
1509
+ });
1510
+ // Find routes with both resolved and new bugs (potential regressions)
1511
+ const regressions = [];
1512
+ let criticalCount = 0;
1513
+ const recurringPatterns = [];
1514
+ Object.entries(routeData).forEach(([route, data]) => {
1515
+ if (data.resolved.length > 0 && data.new.length > 0) {
1516
+ // This is a potential regression
1517
+ const latestResolved = data.resolved.sort((a, b) => new Date(b.resolved_at).getTime() - new Date(a.resolved_at).getTime())[0];
1518
+ const daysSinceResolution = latestResolved.resolved_at
1519
+ ? Math.floor((Date.now() - new Date(latestResolved.resolved_at).getTime()) / (1000 * 60 * 60 * 24))
1520
+ : null;
1521
+ const severity = data.new.some(b => b.severity === 'critical') ? 'critical' :
1522
+ data.new.some(b => b.severity === 'high') ? 'high' : 'medium';
1523
+ if (severity === 'critical')
1524
+ criticalCount++;
1525
+ if (data.resolved.length >= 2 && data.new.length >= 2) {
1526
+ recurringPatterns.push(route);
1527
+ }
1528
+ const regression = {
1529
+ route,
1530
+ severity,
1531
+ originalBug: {
1532
+ id: latestResolved.id,
1533
+ description: latestResolved.description?.slice(0, 100) || '',
1534
+ resolvedAt: latestResolved.resolved_at,
1535
+ },
1536
+ newBugs: data.new.map(b => ({
1537
+ id: b.id,
1538
+ description: b.description?.slice(0, 100) || '',
1539
+ severity: b.severity,
1540
+ createdAt: b.created_at,
1541
+ })),
1542
+ daysSinceResolution,
1543
+ regressionCount: data.resolved.length,
1544
+ };
1545
+ if (includeHistory) {
1546
+ regression.history = {
1547
+ totalResolved: data.resolved.length,
1548
+ totalNew: data.new.length,
1549
+ resolvedBugs: data.resolved.map(b => ({
1550
+ id: b.id,
1551
+ severity: b.severity,
1552
+ resolvedAt: b.resolved_at,
1553
+ })),
1554
+ };
1555
+ }
1556
+ regressions.push(regression);
1557
+ }
1558
+ });
1559
+ // Sort by severity and count
1560
+ regressions.sort((a, b) => {
1561
+ const severityOrder = { critical: 0, high: 1, medium: 2 };
1562
+ return (severityOrder[a.severity] - severityOrder[b.severity]) ||
1563
+ (b.newBugs.length - a.newBugs.length);
1564
+ });
1565
+ const recommendations = [];
1566
+ if (criticalCount > 0) {
1567
+ recommendations.push(`URGENT: ${criticalCount} regression(s) involve critical bugs - investigate immediately`);
1568
+ }
1569
+ if (recurringPatterns.length > 0) {
1570
+ recommendations.push(`Recurring patterns in ${recurringPatterns.length} route(s): ${recurringPatterns.slice(0, 3).join(', ')} - consider architectural review`);
1571
+ }
1572
+ if (regressions.length > 0) {
1573
+ recommendations.push('Add regression tests to prevent recurrence - use create_regression_test');
1574
+ }
1575
+ return {
1576
+ regressions,
1577
+ summary: {
1578
+ totalRegressions: regressions.length,
1579
+ criticalRegressions: criticalCount,
1580
+ recurringPatterns: recurringPatterns.length,
1581
+ },
1582
+ recommendations,
1583
+ };
1584
+ }
1585
+ async function getCoverageMatrix(args) {
1586
+ const includeExecution = args.include_execution_data !== false;
1587
+ const includeBugs = args.include_bug_counts !== false;
1588
+ // Get tracks
1589
+ const { data: tracks } = await supabase
1590
+ .from('qa_tracks')
1591
+ .select('id, name, icon, color')
1592
+ .eq('project_id', PROJECT_ID)
1593
+ .order('sort_order');
1594
+ // Get test cases with track info
1595
+ const { data: testCases } = await supabase
1596
+ .from('test_cases')
1597
+ .select('id, target_route, category, track_id')
1598
+ .eq('project_id', PROJECT_ID);
1599
+ // Get test assignments for execution data
1600
+ let assignments = [];
1601
+ if (includeExecution) {
1602
+ const { data } = await supabase
1603
+ .from('test_assignments')
1604
+ .select('test_case_id, status, completed_at')
1605
+ .eq('project_id', PROJECT_ID)
1606
+ .in('status', ['passed', 'failed']);
1607
+ assignments = data || [];
1608
+ }
1609
+ // Get route stats for bug counts
1610
+ let routeStats = [];
1611
+ if (includeBugs) {
1612
+ const { data } = await supabase
1613
+ .from('route_test_stats')
1614
+ .select('route, open_bugs, critical_bugs')
1615
+ .eq('project_id', PROJECT_ID);
1616
+ routeStats = data || [];
1617
+ }
1618
+ const routeStatsMap = new Map(routeStats.map(r => [r.route, r]));
1619
+ // Build assignment map
1620
+ const assignmentsByTestCase = {};
1621
+ assignments.forEach(a => {
1622
+ if (!assignmentsByTestCase[a.test_case_id]) {
1623
+ assignmentsByTestCase[a.test_case_id] = { passed: 0, failed: 0, lastTested: null };
1624
+ }
1625
+ if (a.status === 'passed')
1626
+ assignmentsByTestCase[a.test_case_id].passed++;
1627
+ if (a.status === 'failed')
1628
+ assignmentsByTestCase[a.test_case_id].failed++;
1629
+ if (a.completed_at) {
1630
+ const current = assignmentsByTestCase[a.test_case_id].lastTested;
1631
+ if (!current || new Date(a.completed_at) > new Date(current)) {
1632
+ assignmentsByTestCase[a.test_case_id].lastTested = a.completed_at;
1633
+ }
1634
+ }
1635
+ });
1636
+ // Group test cases by route
1637
+ const routeMap = {};
1638
+ (testCases || []).forEach(tc => {
1639
+ const route = tc.target_route || tc.category || 'Uncategorized';
1640
+ if (!routeMap[route]) {
1641
+ routeMap[route] = { testCases: [], trackCoverage: {} };
1642
+ }
1643
+ routeMap[route].testCases.push(tc);
1644
+ const trackId = tc.track_id || 'none';
1645
+ if (!routeMap[route].trackCoverage[trackId]) {
1646
+ routeMap[route].trackCoverage[trackId] = [];
1647
+ }
1648
+ routeMap[route].trackCoverage[trackId].push(tc);
1649
+ });
1650
+ // Build matrix
1651
+ const matrix = [];
1652
+ const now = Date.now();
1653
+ Object.entries(routeMap).forEach(([route, data]) => {
1654
+ const row = {
1655
+ route,
1656
+ totalTests: data.testCases.length,
1657
+ tracks: {},
1658
+ };
1659
+ if (includeBugs) {
1660
+ const stats = routeStatsMap.get(route);
1661
+ row.openBugs = stats?.open_bugs || 0;
1662
+ row.criticalBugs = stats?.critical_bugs || 0;
1663
+ }
1664
+ // Calculate overall last tested
1665
+ let latestTest = null;
1666
+ data.testCases.forEach(tc => {
1667
+ const execData = assignmentsByTestCase[tc.id];
1668
+ if (execData?.lastTested) {
1669
+ if (!latestTest || new Date(execData.lastTested) > new Date(latestTest)) {
1670
+ latestTest = execData.lastTested;
1671
+ }
1672
+ }
1673
+ });
1674
+ row.lastTestedAt = latestTest;
1675
+ // Build track cells
1676
+ (tracks || []).forEach(track => {
1677
+ const trackTests = data.trackCoverage[track.id] || [];
1678
+ const cell = {
1679
+ testCount: trackTests.length,
1680
+ };
1681
+ if (includeExecution && trackTests.length > 0) {
1682
+ let passCount = 0;
1683
+ let failCount = 0;
1684
+ let trackLastTested = null;
1685
+ trackTests.forEach(tc => {
1686
+ const execData = assignmentsByTestCase[tc.id];
1687
+ if (execData) {
1688
+ passCount += execData.passed;
1689
+ failCount += execData.failed;
1690
+ if (execData.lastTested) {
1691
+ if (!trackLastTested || new Date(execData.lastTested) > new Date(trackLastTested)) {
1692
+ trackLastTested = execData.lastTested;
1693
+ }
1694
+ }
1695
+ }
1696
+ });
1697
+ cell.passCount = passCount;
1698
+ cell.failCount = failCount;
1699
+ cell.passRate = passCount + failCount > 0
1700
+ ? Math.round((passCount / (passCount + failCount)) * 100)
1701
+ : null;
1702
+ cell.lastTestedAt = trackLastTested;
1703
+ cell.staleDays = trackLastTested
1704
+ ? Math.floor((now - new Date(trackLastTested).getTime()) / (1000 * 60 * 60 * 24))
1705
+ : null;
1706
+ }
1707
+ row.tracks[track.id] = cell;
1708
+ });
1709
+ // Add "none" track for unassigned tests
1710
+ const unassignedTests = data.trackCoverage['none'] || [];
1711
+ if (unassignedTests.length > 0) {
1712
+ const cell = { testCount: unassignedTests.length };
1713
+ if (includeExecution) {
1714
+ let passCount = 0;
1715
+ let failCount = 0;
1716
+ unassignedTests.forEach(tc => {
1717
+ const execData = assignmentsByTestCase[tc.id];
1718
+ if (execData) {
1719
+ passCount += execData.passed;
1720
+ failCount += execData.failed;
1721
+ }
1722
+ });
1723
+ cell.passCount = passCount;
1724
+ cell.failCount = failCount;
1725
+ cell.passRate = passCount + failCount > 0
1726
+ ? Math.round((passCount / (passCount + failCount)) * 100)
1727
+ : null;
1728
+ }
1729
+ row.tracks['none'] = cell;
1730
+ }
1731
+ matrix.push(row);
1732
+ });
1733
+ // Sort by route name
1734
+ matrix.sort((a, b) => a.route.localeCompare(b.route));
1735
+ return {
1736
+ matrix,
1737
+ tracks: (tracks || []).map(t => ({
1738
+ id: t.id,
1739
+ name: t.name,
1740
+ icon: t.icon,
1741
+ color: t.color,
1742
+ })),
1743
+ summary: {
1744
+ totalRoutes: matrix.length,
1745
+ totalTests: matrix.reduce((sum, r) => sum + r.totalTests, 0),
1746
+ routesWithCriticalBugs: includeBugs ? matrix.filter(r => r.criticalBugs > 0).length : undefined,
1747
+ },
1748
+ };
1749
+ }
1750
+ async function getStaleCoverage(args) {
1751
+ const daysThreshold = args.days_threshold || 14;
1752
+ const limit = args.limit || 20;
1753
+ // Refresh stats first
1754
+ await supabase.rpc('refresh_route_test_stats', { p_project_id: PROJECT_ID });
1755
+ // Get routes ordered by staleness and risk
1756
+ const { data: routes, error } = await supabase
1757
+ .from('route_test_stats')
1758
+ .select('route, last_tested_at, open_bugs, critical_bugs, test_case_count, priority_score')
1759
+ .eq('project_id', PROJECT_ID)
1760
+ .order('last_tested_at', { ascending: true, nullsFirst: true })
1761
+ .limit(limit * 2); // Get extra to filter
1762
+ if (error) {
1763
+ return { error: error.message };
1764
+ }
1765
+ const now = Date.now();
1766
+ const staleRoutes = [];
1767
+ (routes || []).forEach(route => {
1768
+ let daysSinceTest = null;
1769
+ if (route.last_tested_at) {
1770
+ daysSinceTest = Math.floor((now - new Date(route.last_tested_at).getTime()) / (1000 * 60 * 60 * 24));
1771
+ if (daysSinceTest < daysThreshold)
1772
+ return; // Not stale
1773
+ }
1774
+ const riskLevel = route.critical_bugs > 0 ? 'critical' :
1775
+ route.open_bugs > 2 ? 'high' :
1776
+ daysSinceTest === null ? 'high' : 'medium';
1777
+ staleRoutes.push({
1778
+ route: route.route,
1779
+ daysSinceTest,
1780
+ neverTested: route.last_tested_at === null,
1781
+ lastTestedAt: route.last_tested_at,
1782
+ openBugs: route.open_bugs,
1783
+ criticalBugs: route.critical_bugs,
1784
+ testCaseCount: route.test_case_count,
1785
+ riskLevel,
1786
+ priorityScore: route.priority_score,
1787
+ recommendation: route.last_tested_at === null
1788
+ ? 'Never tested - establish baseline coverage immediately'
1789
+ : `Last tested ${daysSinceTest} days ago - refresh testing`,
1790
+ });
1791
+ });
1792
+ // Sort by risk then staleness
1793
+ staleRoutes.sort((a, b) => {
1794
+ const riskOrder = { critical: 0, high: 1, medium: 2 };
1795
+ const riskDiff = riskOrder[a.riskLevel] - riskOrder[b.riskLevel];
1796
+ if (riskDiff !== 0)
1797
+ return riskDiff;
1798
+ if (a.neverTested && !b.neverTested)
1799
+ return -1;
1800
+ if (!a.neverTested && b.neverTested)
1801
+ return 1;
1802
+ return (b.daysSinceTest || 999) - (a.daysSinceTest || 999);
1803
+ });
1804
+ return {
1805
+ staleRoutes: staleRoutes.slice(0, limit),
1806
+ summary: {
1807
+ totalStale: staleRoutes.length,
1808
+ neverTested: staleRoutes.filter(r => r.neverTested).length,
1809
+ withCriticalBugs: staleRoutes.filter(r => r.criticalBugs > 0).length,
1810
+ threshold: daysThreshold,
1811
+ },
1812
+ guidance: staleRoutes.length > 0
1813
+ ? `${staleRoutes.length} route(s) have stale or missing test coverage. Prioritize routes with critical bugs first.`
1814
+ : 'All routes have been tested within the threshold period.',
1815
+ };
1816
+ }
1817
+ async function generateDeployChecklist(args) {
1818
+ const routes = args.routes;
1819
+ const deploymentType = args.deployment_type || 'feature';
1820
+ // Infer additional routes from changed files
1821
+ const allRoutes = new Set(routes);
1822
+ if (args.changed_files) {
1823
+ args.changed_files.forEach(file => {
1824
+ // Extract route from common patterns
1825
+ const matches = [
1826
+ /\/app\/(.+?)\/page\./,
1827
+ /\/pages\/(.+?)\./,
1828
+ /\/routes\/(.+?)\./,
1829
+ /\/screens\/(.+?)\./,
1830
+ ];
1831
+ for (const pattern of matches) {
1832
+ const match = file.match(pattern);
1833
+ if (match) {
1834
+ allRoutes.add('/' + match[1].replace(/\[.*?\]/g, ':id'));
1835
+ }
1836
+ }
1837
+ });
1838
+ }
1839
+ // Get test cases for these routes
1840
+ const { data: testCases } = await supabase
1841
+ .from('test_cases')
1842
+ .select('id, test_key, title, target_route, category, priority, track:qa_tracks(name)')
1843
+ .eq('project_id', PROJECT_ID)
1844
+ .or(routes.map(r => `target_route.eq.${r}`).join(',') + ',' + routes.map(r => `category.eq.${r}`).join(','));
1845
+ // Get route stats for risk assessment
1846
+ const { data: routeStats } = await supabase
1847
+ .from('route_test_stats')
1848
+ .select('*')
1849
+ .eq('project_id', PROJECT_ID)
1850
+ .in('route', Array.from(allRoutes));
1851
+ const routeStatsMap = new Map((routeStats || []).map(r => [r.route, r]));
1852
+ // Categorize tests
1853
+ const checklist = { critical: [], recommended: [], optional: [], gaps: [] };
1854
+ // Track covered routes
1855
+ const coveredRoutes = new Set();
1856
+ (testCases || []).forEach(tc => {
1857
+ const route = tc.target_route || tc.category || '';
1858
+ coveredRoutes.add(route);
1859
+ const stats = routeStatsMap.get(route);
1860
+ const item = {
1861
+ testCaseId: tc.id,
1862
+ testKey: tc.test_key,
1863
+ title: tc.title,
1864
+ route,
1865
+ track: tc.track?.name,
1866
+ priority: tc.priority,
1867
+ hasCriticalBugs: (stats?.critical_bugs || 0) > 0,
1868
+ lastTested: stats?.last_tested_at,
1869
+ reason: '',
1870
+ };
1871
+ // Categorize based on priority and context
1872
+ if (tc.priority === 'P0' || (stats?.critical_bugs || 0) > 0) {
1873
+ item.reason = tc.priority === 'P0'
1874
+ ? 'P0 priority test case'
1875
+ : `Route has ${stats?.critical_bugs} critical bug(s)`;
1876
+ checklist.critical.push(item);
1877
+ }
1878
+ else if (tc.priority === 'P1' || deploymentType === 'hotfix') {
1879
+ item.reason = deploymentType === 'hotfix'
1880
+ ? 'Hotfix deployment - verify fix'
1881
+ : 'P1 priority test case';
1882
+ checklist.recommended.push(item);
1883
+ }
1884
+ else if (deploymentType === 'release') {
1885
+ item.reason = 'Release deployment - full verification';
1886
+ checklist.recommended.push(item);
1887
+ }
1888
+ else {
1889
+ item.reason = 'Standard test coverage';
1890
+ checklist.optional.push(item);
1891
+ }
1892
+ });
1893
+ // Find coverage gaps
1894
+ allRoutes.forEach(route => {
1895
+ if (!coveredRoutes.has(route)) {
1896
+ const stats = routeStatsMap.get(route);
1897
+ checklist.gaps.push({
1898
+ route,
1899
+ title: `No test coverage for ${route}`,
1900
+ reason: 'Route is being deployed but has no test cases',
1901
+ hasCriticalBugs: (stats?.critical_bugs || 0) > 0,
1902
+ openBugs: stats?.open_bugs || 0,
1903
+ recommendation: `Create test cases for ${route} before deploying`,
1904
+ });
1905
+ }
1906
+ });
1907
+ // Calculate thoroughness
1908
+ const totalTests = checklist.critical.length + checklist.recommended.length + checklist.optional.length;
1909
+ const thoroughness = allRoutes.size > 0
1910
+ ? Math.round((coveredRoutes.size / allRoutes.size) * 100)
1911
+ : 100;
1912
+ let guidance = '';
1913
+ if (checklist.critical.length > 0) {
1914
+ guidance = `MUST RUN: ${checklist.critical.length} critical test(s) before deploying. `;
1915
+ }
1916
+ if (checklist.gaps.length > 0) {
1917
+ guidance += `WARNING: ${checklist.gaps.length} route(s) have no test coverage. `;
1918
+ }
1919
+ if (deploymentType === 'hotfix') {
1920
+ guidance += 'Hotfix mode: Focus on critical and recommended tests.';
1921
+ }
1922
+ else if (deploymentType === 'release') {
1923
+ guidance += 'Release mode: Run all tests for comprehensive verification.';
1924
+ }
1925
+ return {
1926
+ checklist,
1927
+ summary: {
1928
+ criticalTests: checklist.critical.length,
1929
+ recommendedTests: checklist.recommended.length,
1930
+ optionalTests: checklist.optional.length,
1931
+ coverageGaps: checklist.gaps.length,
1932
+ thoroughness,
1933
+ deploymentType,
1934
+ },
1935
+ guidance: guidance || 'Ready to deploy with standard test coverage.',
1936
+ };
1937
+ }
1938
+ async function getQAHealth(args) {
1939
+ const periodDays = args.period_days || 30;
1940
+ const comparePrevious = args.compare_previous !== false;
1941
+ const now = new Date();
1942
+ const periodStart = new Date(now.getTime() - periodDays * 24 * 60 * 60 * 1000);
1943
+ const previousStart = new Date(periodStart.getTime() - periodDays * 24 * 60 * 60 * 1000);
1944
+ // Get current period data
1945
+ const { data: currentTests } = await supabase
1946
+ .from('test_assignments')
1947
+ .select('id, status, completed_at')
1948
+ .eq('project_id', PROJECT_ID)
1949
+ .gte('completed_at', periodStart.toISOString())
1950
+ .in('status', ['passed', 'failed']);
1951
+ const { data: currentBugs } = await supabase
1952
+ .from('reports')
1953
+ .select('id, severity, status, created_at')
1954
+ .eq('project_id', PROJECT_ID)
1955
+ .eq('report_type', 'bug')
1956
+ .gte('created_at', periodStart.toISOString());
1957
+ const { data: resolvedBugs } = await supabase
1958
+ .from('reports')
1959
+ .select('id, created_at, resolved_at')
1960
+ .eq('project_id', PROJECT_ID)
1961
+ .eq('report_type', 'bug')
1962
+ .in('status', ['resolved', 'fixed', 'verified', 'closed'])
1963
+ .gte('resolved_at', periodStart.toISOString());
1964
+ const { data: testers } = await supabase
1965
+ .from('testers')
1966
+ .select('id, status')
1967
+ .eq('project_id', PROJECT_ID);
1968
+ const { data: routeStats } = await supabase
1969
+ .from('route_test_stats')
1970
+ .select('route, test_case_count')
1971
+ .eq('project_id', PROJECT_ID);
1972
+ // Get previous period data for comparison
1973
+ let previousTests = [];
1974
+ let previousBugs = [];
1975
+ let previousResolved = [];
1976
+ if (comparePrevious) {
1977
+ const { data: pt } = await supabase
1978
+ .from('test_assignments')
1979
+ .select('id, status')
1980
+ .eq('project_id', PROJECT_ID)
1981
+ .gte('completed_at', previousStart.toISOString())
1982
+ .lt('completed_at', periodStart.toISOString())
1983
+ .in('status', ['passed', 'failed']);
1984
+ previousTests = pt || [];
1985
+ const { data: pb } = await supabase
1986
+ .from('reports')
1987
+ .select('id, severity')
1988
+ .eq('project_id', PROJECT_ID)
1989
+ .eq('report_type', 'bug')
1990
+ .gte('created_at', previousStart.toISOString())
1991
+ .lt('created_at', periodStart.toISOString());
1992
+ previousBugs = pb || [];
1993
+ const { data: pr } = await supabase
1994
+ .from('reports')
1995
+ .select('id')
1996
+ .eq('project_id', PROJECT_ID)
1997
+ .in('status', ['resolved', 'fixed', 'verified', 'closed'])
1998
+ .gte('resolved_at', previousStart.toISOString())
1999
+ .lt('resolved_at', periodStart.toISOString());
2000
+ previousResolved = pr || [];
2001
+ }
2002
+ // Calculate metrics
2003
+ const testsCompleted = (currentTests || []).length;
2004
+ const testsPerWeek = Math.round(testsCompleted / (periodDays / 7));
2005
+ const prevTestsPerWeek = comparePrevious ? Math.round(previousTests.length / (periodDays / 7)) : 0;
2006
+ const bugsFound = (currentBugs || []).length;
2007
+ const criticalBugs = (currentBugs || []).filter(b => b.severity === 'critical').length;
2008
+ const bugsPerTest = testsCompleted > 0 ? Math.round((bugsFound / testsCompleted) * 100) / 100 : 0;
2009
+ const prevBugsFound = previousBugs.length;
2010
+ const bugsResolvedCount = (resolvedBugs || []).length;
2011
+ const prevResolvedCount = previousResolved.length;
2012
+ // Calculate average resolution time (approximate)
2013
+ let avgResolutionDays = 0;
2014
+ if (resolvedBugs && resolvedBugs.length > 0) {
2015
+ const totalDays = resolvedBugs.reduce((sum, bug) => {
2016
+ if (bug.created_at && bug.resolved_at) {
2017
+ const days = (new Date(bug.resolved_at).getTime() - new Date(bug.created_at).getTime()) / (1000 * 60 * 60 * 24);
2018
+ return sum + days;
2019
+ }
2020
+ return sum;
2021
+ }, 0);
2022
+ avgResolutionDays = Math.round(totalDays / resolvedBugs.length);
2023
+ }
2024
+ // Coverage metrics
2025
+ const totalRoutes = (routeStats || []).length;
2026
+ const routesWithTests = (routeStats || []).filter(r => r.test_case_count > 0).length;
2027
+ const routeCoverage = totalRoutes > 0 ? Math.round((routesWithTests / totalRoutes) * 100) : 0;
2028
+ // Tester health
2029
+ const totalTesters = (testers || []).length;
2030
+ const activeTesters = (testers || []).filter(t => t.status === 'active').length;
2031
+ const utilizationPercent = totalTesters > 0 ? Math.round((activeTesters / totalTesters) * 100) : 0;
2032
+ // Calculate trends
2033
+ const getTrend = (current, previous) => {
2034
+ if (!comparePrevious || previous === 0)
2035
+ return 'stable';
2036
+ const change = ((current - previous) / previous) * 100;
2037
+ if (change > 10)
2038
+ return 'up';
2039
+ if (change < -10)
2040
+ return 'down';
2041
+ return 'stable';
2042
+ };
2043
+ const getChangePercent = (current, previous) => {
2044
+ if (!comparePrevious || previous === 0)
2045
+ return undefined;
2046
+ return Math.round(((current - previous) / previous) * 100);
2047
+ };
2048
+ const metrics = {
2049
+ velocity: {
2050
+ testsPerWeek,
2051
+ testsCompleted,
2052
+ trend: getTrend(testsPerWeek, prevTestsPerWeek),
2053
+ changePercent: getChangePercent(testsPerWeek, prevTestsPerWeek),
2054
+ },
2055
+ bugDiscovery: {
2056
+ bugsFound,
2057
+ bugsPerTest,
2058
+ criticalBugs,
2059
+ trend: getTrend(bugsFound, prevBugsFound),
2060
+ changePercent: getChangePercent(bugsFound, prevBugsFound),
2061
+ },
2062
+ resolution: {
2063
+ bugsResolved: bugsResolvedCount,
2064
+ avgResolutionDays,
2065
+ trend: getTrend(bugsResolvedCount, prevResolvedCount),
2066
+ changePercent: getChangePercent(bugsResolvedCount, prevResolvedCount),
2067
+ },
2068
+ coverage: {
2069
+ routeCoverage,
2070
+ routesWithTests,
2071
+ totalRoutes,
2072
+ },
2073
+ testerHealth: {
2074
+ activeTesters,
2075
+ totalTesters,
2076
+ utilizationPercent,
2077
+ },
2078
+ };
2079
+ // Calculate health score (0-100)
2080
+ const coverageScore = routeCoverage; // 0-100
2081
+ const velocityScore = Math.min(testsPerWeek * 10, 100); // 10+ tests/week = 100
2082
+ const resolutionScore = avgResolutionDays <= 3 ? 100 : avgResolutionDays <= 7 ? 75 : avgResolutionDays <= 14 ? 50 : 25;
2083
+ const stabilityScore = criticalBugs === 0 ? 100 : criticalBugs === 1 ? 75 : criticalBugs <= 3 ? 50 : 25;
2084
+ const overallScore = Math.round((coverageScore * 0.3) +
2085
+ (velocityScore * 0.25) +
2086
+ (resolutionScore * 0.25) +
2087
+ (stabilityScore * 0.2));
2088
+ const getGrade = (score) => {
2089
+ if (score >= 90)
2090
+ return 'A';
2091
+ if (score >= 80)
2092
+ return 'B';
2093
+ if (score >= 70)
2094
+ return 'C';
2095
+ if (score >= 60)
2096
+ return 'D';
2097
+ return 'F';
2098
+ };
2099
+ const healthScore = {
2100
+ score: overallScore,
2101
+ grade: getGrade(overallScore),
2102
+ breakdown: {
2103
+ coverage: coverageScore,
2104
+ velocity: velocityScore,
2105
+ resolution: resolutionScore,
2106
+ stability: stabilityScore,
2107
+ },
2108
+ };
2109
+ // Generate recommendations
2110
+ const recommendations = [];
2111
+ if (coverageScore < 70) {
2112
+ recommendations.push(`Increase test coverage (currently ${routeCoverage}%) - add tests for ${totalRoutes - routesWithTests} uncovered routes`);
2113
+ }
2114
+ if (velocityScore < 50) {
2115
+ recommendations.push(`Increase testing velocity (${testsPerWeek}/week) - aim for 10+ tests per week`);
2116
+ }
2117
+ if (avgResolutionDays > 7) {
2118
+ recommendations.push(`Improve bug resolution time (currently ${avgResolutionDays} days) - target <7 days`);
2119
+ }
2120
+ if (criticalBugs > 0) {
2121
+ recommendations.push(`Address ${criticalBugs} critical bug(s) immediately`);
2122
+ }
2123
+ if (utilizationPercent < 50) {
2124
+ recommendations.push(`Improve tester utilization (${utilizationPercent}%) - ${totalTesters - activeTesters} testers are inactive`);
2125
+ }
2126
+ return {
2127
+ metrics,
2128
+ healthScore,
2129
+ recommendations,
2130
+ period: {
2131
+ days: periodDays,
2132
+ start: periodStart.toISOString(),
2133
+ end: now.toISOString(),
2134
+ },
2135
+ };
2136
+ }
2137
+ async function analyzeChangesForTests(args) {
2138
+ // Get existing tests to check coverage
2139
+ const { data: existingTests } = await supabase
2140
+ .from('test_cases')
2141
+ .select('test_key, title, target_route, description')
2142
+ .eq('project_id', PROJECT_ID);
2143
+ // Get next test key
2144
+ const { data: lastTest } = await supabase
2145
+ .from('test_cases')
2146
+ .select('test_key')
2147
+ .eq('project_id', PROJECT_ID)
2148
+ .order('test_key', { ascending: false })
2149
+ .limit(1);
2150
+ const lastKey = lastTest?.[0]?.test_key || 'TC-000';
2151
+ const lastNum = parseInt(lastKey.replace('TC-', '')) || 0;
2152
+ // Get bug history for these routes to understand risk areas
2153
+ const routes = args.affected_routes || [];
2154
+ let relatedBugs = [];
2155
+ if (routes.length > 0) {
2156
+ const { data: bugs } = await supabase
2157
+ .from('reports')
2158
+ .select('id, description, severity, app_context')
2159
+ .eq('project_id', PROJECT_ID)
2160
+ .eq('report_type', 'bug')
2161
+ .limit(50);
2162
+ relatedBugs = (bugs || []).filter(bug => {
2163
+ const bugRoute = bug.app_context?.currentRoute;
2164
+ return bugRoute && routes.some(r => bugRoute.includes(r) || r.includes(bugRoute));
2165
+ });
2166
+ }
2167
+ // Analyze file types to determine what kind of tests are needed
2168
+ const fileAnalysis = analyzeFileTypes(args.changed_files);
2169
+ // Check existing test coverage for affected routes
2170
+ const existingCoverage = (existingTests || []).filter(test => routes.some(r => test.target_route?.includes(r) || test.title.toLowerCase().includes(r.toLowerCase())));
2171
+ // Generate intelligent suggestions based on change type and file analysis
2172
+ const suggestions = [];
2173
+ let testNum = lastNum;
2174
+ // Change-type specific test suggestions
2175
+ if (args.change_type === 'feature') {
2176
+ // New features need comprehensive testing
2177
+ for (const route of routes.slice(0, 2)) {
2178
+ testNum++;
2179
+ suggestions.push({
2180
+ test_key: `TC-${String(testNum).padStart(3, '0')}`,
2181
+ title: `Verify new feature: ${args.change_summary.slice(0, 40)}`,
2182
+ description: `Test the new functionality added: ${args.change_summary}`,
2183
+ track: 'functional',
2184
+ priority: 'P1',
2185
+ target_route: route,
2186
+ rationale: 'New features require verification that they work as intended',
2187
+ steps: [
2188
+ { stepNumber: 1, action: `Navigate to ${route}`, expectedResult: 'Page loads successfully' },
2189
+ { stepNumber: 2, action: 'Locate the new feature/element', expectedResult: 'Feature is visible and accessible' },
2190
+ { stepNumber: 3, action: 'Interact with the new feature', expectedResult: 'Feature responds as expected' },
2191
+ { stepNumber: 4, action: 'Verify the expected outcome', expectedResult: 'Correct result is produced' },
2192
+ ],
2193
+ expected_result: 'New feature functions correctly without errors',
2194
+ });
2195
+ }
2196
+ // Add edge case test for features
2197
+ if (routes.length > 0) {
2198
+ testNum++;
2199
+ suggestions.push({
2200
+ test_key: `TC-${String(testNum).padStart(3, '0')}`,
2201
+ title: `Edge cases: ${args.change_summary.slice(0, 35)}`,
2202
+ description: `Test edge cases and error handling for the new feature`,
2203
+ track: 'functional',
2204
+ priority: 'P2',
2205
+ target_route: routes[0],
2206
+ rationale: 'Edge cases often reveal bugs that happy-path testing misses',
2207
+ steps: [
2208
+ { stepNumber: 1, action: `Navigate to ${routes[0]}`, expectedResult: 'Page loads' },
2209
+ { stepNumber: 2, action: 'Test with empty/null input', expectedResult: 'Graceful handling, no crash' },
2210
+ { stepNumber: 3, action: 'Test with invalid input', expectedResult: 'Appropriate error message' },
2211
+ { stepNumber: 4, action: 'Test boundary conditions', expectedResult: 'Correct behavior at limits' },
2212
+ ],
2213
+ expected_result: 'Feature handles edge cases gracefully without errors',
2214
+ });
2215
+ }
2216
+ }
2217
+ if (args.change_type === 'bugfix') {
2218
+ // Bug fixes need regression tests
2219
+ for (const route of routes.slice(0, 1)) {
2220
+ testNum++;
2221
+ suggestions.push({
2222
+ test_key: `TC-${String(testNum).padStart(3, '0')}`,
2223
+ title: `Regression: ${args.change_summary.slice(0, 40)}`,
2224
+ description: `Verify the bug fix works and hasn't regressed: ${args.change_summary}`,
2225
+ track: 'functional',
2226
+ priority: 'P1',
2227
+ target_route: route,
2228
+ rationale: 'Bug fixes should have regression tests to prevent recurrence',
2229
+ steps: [
2230
+ { stepNumber: 1, action: `Navigate to ${route}`, expectedResult: 'Page loads' },
2231
+ { stepNumber: 2, action: 'Reproduce the original bug scenario', expectedResult: 'Bug no longer occurs' },
2232
+ { stepNumber: 3, action: 'Test related functionality', expectedResult: 'No side effects from fix' },
2233
+ ],
2234
+ expected_result: 'Bug is fixed and related functionality still works',
2235
+ });
2236
+ }
2237
+ }
2238
+ if (args.change_type === 'ui_change') {
2239
+ // UI changes need visual and interaction testing
2240
+ for (const route of routes.slice(0, 1)) {
2241
+ testNum++;
2242
+ suggestions.push({
2243
+ test_key: `TC-${String(testNum).padStart(3, '0')}`,
2244
+ title: `UI verification: ${args.change_summary.slice(0, 35)}`,
2245
+ description: `Verify UI changes display correctly across devices`,
2246
+ track: 'design',
2247
+ priority: 'P2',
2248
+ target_route: route,
2249
+ rationale: 'UI changes should be verified visually and for responsiveness',
2250
+ steps: [
2251
+ { stepNumber: 1, action: `Navigate to ${route}`, expectedResult: 'Page loads' },
2252
+ { stepNumber: 2, action: 'Verify visual appearance matches design', expectedResult: 'UI looks correct' },
2253
+ { stepNumber: 3, action: 'Test on mobile viewport (375px)', expectedResult: 'Responsive layout works' },
2254
+ { stepNumber: 4, action: 'Test interactive elements', expectedResult: 'Buttons, links work correctly' },
2255
+ ],
2256
+ expected_result: 'UI changes look correct and are responsive',
2257
+ });
2258
+ }
2259
+ }
2260
+ if (args.change_type === 'api_change') {
2261
+ // API changes need integration testing
2262
+ for (const route of routes.slice(0, 1)) {
2263
+ testNum++;
2264
+ suggestions.push({
2265
+ test_key: `TC-${String(testNum).padStart(3, '0')}`,
2266
+ title: `API integration: ${args.change_summary.slice(0, 35)}`,
2267
+ description: `Verify API changes work correctly end-to-end`,
2268
+ track: 'functional',
2269
+ priority: 'P1',
2270
+ target_route: route,
2271
+ rationale: 'API changes can break frontend functionality',
2272
+ steps: [
2273
+ { stepNumber: 1, action: `Navigate to ${route}`, expectedResult: 'Page loads' },
2274
+ { stepNumber: 2, action: 'Trigger the API call', expectedResult: 'Request is sent' },
2275
+ { stepNumber: 3, action: 'Verify response handling', expectedResult: 'Data displays correctly' },
2276
+ { stepNumber: 4, action: 'Test error scenarios', expectedResult: 'Errors handled gracefully' },
2277
+ ],
2278
+ expected_result: 'API integration works correctly',
2279
+ });
2280
+ }
2281
+ }
2282
+ // Add accessibility test if UI components were changed
2283
+ if (fileAnalysis.hasUIComponents && (args.change_type === 'feature' || args.change_type === 'ui_change')) {
2284
+ testNum++;
2285
+ suggestions.push({
2286
+ test_key: `TC-${String(testNum).padStart(3, '0')}`,
2287
+ title: `Accessibility: ${args.change_summary.slice(0, 35)}`,
2288
+ description: `Verify changes meet accessibility standards`,
2289
+ track: 'accessibility',
2290
+ priority: 'P2',
2291
+ target_route: routes[0] || null,
2292
+ rationale: 'UI changes should maintain accessibility compliance',
2293
+ steps: [
2294
+ { stepNumber: 1, action: 'Navigate using keyboard only (Tab)', expectedResult: 'All interactive elements reachable' },
2295
+ { stepNumber: 2, action: 'Check focus indicators', expectedResult: 'Focus is visible on all elements' },
2296
+ { stepNumber: 3, action: 'Verify with screen reader', expectedResult: 'Content is announced correctly' },
2297
+ ],
2298
+ expected_result: 'Changes are accessible to all users',
2299
+ });
2300
+ }
2301
+ // Determine if tests are recommended
2302
+ const shouldCreateTests = suggestions.length > 0 &&
2303
+ (args.change_type !== 'config' && args.change_type !== 'refactor');
2304
+ // Check if routes already have good coverage
2305
+ const coverageRatio = existingCoverage.length / Math.max(routes.length, 1);
2306
+ return {
2307
+ analysis: {
2308
+ change_type: args.change_type,
2309
+ files_changed: args.changed_files.length,
2310
+ file_types: fileAnalysis,
2311
+ affected_routes: routes,
2312
+ existing_coverage: existingCoverage.length,
2313
+ related_bugs: relatedBugs.length,
2314
+ coverage_ratio: coverageRatio,
2315
+ },
2316
+ recommendation: {
2317
+ should_create_tests: shouldCreateTests,
2318
+ urgency: args.change_type === 'feature' || args.change_type === 'bugfix' ? 'high' :
2319
+ args.change_type === 'api_change' ? 'medium' : 'low',
2320
+ reason: shouldCreateTests
2321
+ ? `${args.change_type === 'feature' ? 'New features' : args.change_type === 'bugfix' ? 'Bug fixes' : 'Changes'} should have QA coverage to catch issues early.`
2322
+ : args.change_type === 'config' ? 'Config changes typically don\'t need manual QA tests.'
2323
+ : 'Refactoring with good existing coverage may not need new tests.',
2324
+ },
2325
+ suggestions: suggestions.map(s => ({
2326
+ ...s,
2327
+ create_command: `create_test_case with test_key="${s.test_key}", title="${s.title}", target_route="${s.target_route}"`,
2328
+ })),
2329
+ existing_tests: existingCoverage.slice(0, 5).map(t => ({
2330
+ test_key: t.test_key,
2331
+ title: t.title,
2332
+ target_route: t.target_route,
2333
+ })),
2334
+ next_steps: shouldCreateTests
2335
+ ? [
2336
+ 'Review the suggested tests above',
2337
+ 'Modify titles/steps as needed for your specific implementation',
2338
+ 'Use create_test_case to add the ones that make sense',
2339
+ 'Skip any that duplicate existing coverage',
2340
+ ]
2341
+ : ['No new tests recommended for this change type'],
2342
+ };
2343
+ }
2344
+ function analyzeFileTypes(files) {
2345
+ const analysis = {
2346
+ hasUIComponents: false,
2347
+ hasAPIRoutes: false,
2348
+ hasTests: false,
2349
+ hasStyles: false,
2350
+ hasConfig: false,
2351
+ primaryType: 'unknown',
2352
+ };
2353
+ for (const file of files) {
2354
+ const lower = file.toLowerCase();
2355
+ if (lower.includes('component') || lower.endsWith('.tsx') || lower.endsWith('.jsx') ||
2356
+ lower.includes('/pages/') || lower.includes('/app/') || lower.includes('/screens/')) {
2357
+ analysis.hasUIComponents = true;
2358
+ }
2359
+ if (lower.includes('/api/') || lower.includes('route.ts') || lower.includes('handler') ||
2360
+ lower.includes('service') || lower.includes('controller')) {
2361
+ analysis.hasAPIRoutes = true;
2362
+ }
2363
+ if (lower.includes('.test.') || lower.includes('.spec.') || lower.includes('__tests__')) {
2364
+ analysis.hasTests = true;
2365
+ }
2366
+ if (lower.endsWith('.css') || lower.endsWith('.scss') || lower.includes('style') ||
2367
+ lower.includes('tailwind')) {
2368
+ analysis.hasStyles = true;
2369
+ }
2370
+ if (lower.includes('config') || lower.endsWith('.json') || lower.endsWith('.env') ||
2371
+ lower.includes('settings')) {
2372
+ analysis.hasConfig = true;
2373
+ }
2374
+ }
2375
+ // Determine primary type
2376
+ if (analysis.hasUIComponents)
2377
+ analysis.primaryType = 'ui';
2378
+ else if (analysis.hasAPIRoutes)
2379
+ analysis.primaryType = 'api';
2380
+ else if (analysis.hasConfig)
2381
+ analysis.primaryType = 'config';
2382
+ else if (analysis.hasStyles)
2383
+ analysis.primaryType = 'styles';
2384
+ return analysis;
2385
+ }
2386
+ function getTrackTemplates(track) {
2387
+ const templates = {
2388
+ functional: [
2389
+ {
2390
+ title: 'Happy path navigation to {route}',
2391
+ description: 'Verify basic navigation and page load for {route}',
2392
+ steps: [
2393
+ { action: 'Navigate to {route}', expectedResult: 'Page loads without errors' },
2394
+ { action: 'Verify all main elements are visible', expectedResult: 'Headers, content, and navigation present' },
2395
+ { action: 'Check for console errors', expectedResult: 'No JavaScript errors in console' },
2396
+ ],
2397
+ expected_result: 'Page functions correctly with all elements visible',
2398
+ },
2399
+ {
2400
+ title: 'Error handling at {route}',
2401
+ description: 'Test error states and edge cases at {route}',
2402
+ steps: [
2403
+ { action: 'Navigate to {route}', expectedResult: 'Page loads' },
2404
+ { action: 'Trigger an error condition (e.g., invalid input)', expectedResult: 'Error message displayed' },
2405
+ { action: 'Verify error is user-friendly', expectedResult: 'Clear explanation of what went wrong' },
2406
+ ],
2407
+ expected_result: 'Errors are handled gracefully with clear messaging',
2408
+ },
2409
+ {
2410
+ title: 'Form submission at {route}',
2411
+ description: 'Test form validation and submission at {route}',
2412
+ steps: [
2413
+ { action: 'Navigate to {route}', expectedResult: 'Form is visible' },
2414
+ { action: 'Submit form with empty fields', expectedResult: 'Validation errors shown' },
2415
+ { action: 'Fill in valid data and submit', expectedResult: 'Success message or redirect' },
2416
+ ],
2417
+ expected_result: 'Form validates input and submits successfully',
2418
+ },
2419
+ ],
2420
+ design: [
2421
+ {
2422
+ title: 'Visual consistency at {route}',
2423
+ description: 'Check design system compliance at {route}',
2424
+ steps: [
2425
+ { action: 'Navigate to {route}', expectedResult: 'Page loads' },
2426
+ { action: 'Check typography matches design system', expectedResult: 'Fonts, sizes, weights are correct' },
2427
+ { action: 'Verify color usage', expectedResult: 'Colors match brand guidelines' },
2428
+ { action: 'Check spacing and alignment', expectedResult: 'Consistent margins and padding' },
2429
+ ],
2430
+ expected_result: 'Page matches design specifications',
2431
+ },
2432
+ {
2433
+ title: 'Responsive behavior at {route}',
2434
+ description: 'Test responsive design at {route}',
2435
+ steps: [
2436
+ { action: 'View {route} on desktop (1920px)', expectedResult: 'Full layout visible' },
2437
+ { action: 'Resize to tablet (768px)', expectedResult: 'Layout adapts appropriately' },
2438
+ { action: 'Resize to mobile (375px)', expectedResult: 'Mobile-friendly layout' },
2439
+ ],
2440
+ expected_result: 'Page is fully responsive across breakpoints',
2441
+ },
2442
+ ],
2443
+ accessibility: [
2444
+ {
2445
+ title: 'Keyboard navigation at {route}',
2446
+ description: 'Test keyboard accessibility at {route}',
2447
+ steps: [
2448
+ { action: 'Navigate to {route}', expectedResult: 'Page loads' },
2449
+ { action: 'Press Tab to navigate through elements', expectedResult: 'Focus moves in logical order' },
2450
+ { action: 'Verify focus indicators are visible', expectedResult: 'Clear focus ring on interactive elements' },
2451
+ { action: 'Test Enter/Space on buttons', expectedResult: 'Buttons activate correctly' },
2452
+ ],
2453
+ expected_result: 'Page is fully navigable by keyboard',
2454
+ },
2455
+ {
2456
+ title: 'Screen reader compatibility at {route}',
2457
+ description: 'Test screen reader accessibility at {route}',
2458
+ steps: [
2459
+ { action: 'Enable screen reader', expectedResult: 'Screen reader activates' },
2460
+ { action: 'Navigate to {route}', expectedResult: 'Page title is announced' },
2461
+ { action: 'Tab through content', expectedResult: 'All content is announced meaningfully' },
2462
+ { action: 'Check form labels', expectedResult: 'Inputs have associated labels' },
2463
+ ],
2464
+ expected_result: 'Page is fully accessible via screen reader',
2465
+ },
2466
+ ],
2467
+ performance: [
2468
+ {
2469
+ title: 'Page load performance at {route}',
2470
+ description: 'Measure load times at {route}',
2471
+ steps: [
2472
+ { action: 'Clear browser cache', expectedResult: 'Cache cleared' },
2473
+ { action: 'Navigate to {route}', expectedResult: 'Page loads' },
2474
+ { action: 'Check Network tab for load time', expectedResult: 'Initial load under 3 seconds' },
2475
+ { action: 'Note Largest Contentful Paint', expectedResult: 'LCP under 2.5 seconds' },
2476
+ ],
2477
+ expected_result: 'Page loads within performance budget',
2478
+ },
2479
+ ],
2480
+ };
2481
+ return templates[track] || templates.functional;
2482
+ }
2483
+ // === WRITE-BACK TOOL HANDLERS ===
2484
+ async function createBugReport(args) {
2485
+ // Build code context for the report
2486
+ const codeContext = {};
2487
+ if (args.file_path) {
2488
+ codeContext.file_path = args.file_path;
2489
+ codeContext.line_number = args.line_number;
2490
+ codeContext.code_snippet = args.code_snippet;
2491
+ codeContext.function_name = extractFunctionName(args.code_snippet);
2492
+ }
2493
+ if (args.related_files) {
2494
+ codeContext.related_files = args.related_files;
2495
+ }
2496
+ if (args.suggested_fix) {
2497
+ codeContext.suggested_fix = args.suggested_fix;
2498
+ }
2499
+ const report = {
2500
+ project_id: PROJECT_ID,
2501
+ report_type: 'bug',
2502
+ title: args.title,
2503
+ description: args.description,
2504
+ severity: args.severity,
2505
+ status: 'new',
2506
+ app_context: {
2507
+ currentRoute: args.file_path || 'code',
2508
+ source: 'claude_code',
2509
+ timestamp: new Date().toISOString(),
2510
+ },
2511
+ device_info: {
2512
+ platform: 'claude_code',
2513
+ environment: 'development',
2514
+ },
2515
+ code_context: codeContext,
2516
+ };
2517
+ const { data, error } = await supabase
2518
+ .from('reports')
2519
+ .insert(report)
2520
+ .select('id')
2521
+ .single();
2522
+ if (error) {
2523
+ return { error: error.message };
2524
+ }
2525
+ return {
2526
+ success: true,
2527
+ report_id: data.id,
2528
+ message: `Bug report created: ${args.title}`,
2529
+ details: {
2530
+ id: data.id,
2531
+ severity: args.severity,
2532
+ file: args.file_path,
2533
+ line: args.line_number,
2534
+ },
2535
+ };
2536
+ }
2537
+ function extractFunctionName(codeSnippet) {
2538
+ if (!codeSnippet)
2539
+ return undefined;
2540
+ // Try to extract function/component name from code
2541
+ const patterns = [
2542
+ /function\s+(\w+)/,
2543
+ /const\s+(\w+)\s*=\s*(?:async\s*)?\(/,
2544
+ /export\s+(?:default\s+)?(?:async\s+)?function\s+(\w+)/,
2545
+ /class\s+(\w+)/,
2546
+ /(\w+)\s*:\s*(?:React\.)?FC/,
2547
+ ];
2548
+ for (const pattern of patterns) {
2549
+ const match = codeSnippet.match(pattern);
2550
+ if (match)
2551
+ return match[1];
2552
+ }
2553
+ return undefined;
2554
+ }
2555
+ async function getBugsForFile(args) {
2556
+ // Normalize the file path for matching
2557
+ const normalizedPath = args.file_path.replace(/^\.\//, '').replace(/^\//, '');
2558
+ let query = supabase
2559
+ .from('reports')
2560
+ .select('id, title, description, severity, status, created_at, code_context')
2561
+ .eq('project_id', PROJECT_ID)
2562
+ .eq('report_type', 'bug');
2563
+ if (!args.include_resolved) {
2564
+ query = query.in('status', ['new', 'confirmed', 'in_progress', 'reviewed']);
2565
+ }
2566
+ const { data, error } = await query.order('created_at', { ascending: false });
2567
+ if (error) {
2568
+ return { error: error.message };
2569
+ }
2570
+ // Filter bugs that match the file path
2571
+ const matchingBugs = (data || []).filter(bug => {
2572
+ const codeContext = bug.code_context;
2573
+ if (!codeContext)
2574
+ return false;
2575
+ const bugFilePath = codeContext.file_path;
2576
+ if (!bugFilePath)
2577
+ return false;
2578
+ const normalizedBugPath = bugFilePath.replace(/^\.\//, '').replace(/^\//, '');
2579
+ return normalizedBugPath.includes(normalizedPath) || normalizedPath.includes(normalizedBugPath);
2580
+ });
2581
+ // Also check related_files
2582
+ const relatedBugs = (data || []).filter(bug => {
2583
+ if (matchingBugs.includes(bug))
2584
+ return false;
2585
+ const codeContext = bug.code_context;
2586
+ const relatedFiles = codeContext?.related_files;
2587
+ if (!relatedFiles)
2588
+ return false;
2589
+ return relatedFiles.some(f => f.includes(normalizedPath) || normalizedPath.includes(f));
2590
+ });
2591
+ return {
2592
+ file: args.file_path,
2593
+ direct_bugs: matchingBugs.map(b => ({
2594
+ id: b.id,
2595
+ title: b.title,
2596
+ severity: b.severity,
2597
+ status: b.status,
2598
+ line: b.code_context?.line_number,
2599
+ description: b.description.slice(0, 200),
2600
+ })),
2601
+ related_bugs: relatedBugs.map(b => ({
2602
+ id: b.id,
2603
+ title: b.title,
2604
+ severity: b.severity,
2605
+ status: b.status,
2606
+ source_file: b.code_context?.file_path,
2607
+ })),
2608
+ summary: {
2609
+ total: matchingBugs.length + relatedBugs.length,
2610
+ critical: matchingBugs.filter(b => b.severity === 'critical').length,
2611
+ open: matchingBugs.filter(b => ['new', 'confirmed', 'in_progress'].includes(b.status)).length,
2612
+ },
2613
+ recommendation: matchingBugs.length > 0
2614
+ ? `Found ${matchingBugs.length} bug(s) in this file. Consider fixing them while you're here.`
2615
+ : 'No known bugs in this file.',
2616
+ };
2617
+ }
2618
+ async function markFixedWithCommit(args) {
2619
+ if (!isValidUUID(args.report_id)) {
2620
+ return { error: 'Invalid report_id format' };
2621
+ }
2622
+ // Get current report to preserve existing data
2623
+ const { data: existing, error: fetchError } = await supabase
2624
+ .from('reports')
2625
+ .select('code_context')
2626
+ .eq('id', args.report_id)
2627
+ .eq('project_id', PROJECT_ID) // Security: ensure report belongs to this project
2628
+ .single();
2629
+ if (fetchError) {
2630
+ return { error: fetchError.message };
2631
+ }
2632
+ const existingContext = existing?.code_context || {};
2633
+ const updates = {
2634
+ status: 'resolved',
2635
+ resolved_at: new Date().toISOString(),
2636
+ resolution: args.resolution_notes || `Fixed in commit ${args.commit_sha.slice(0, 7)}`,
2637
+ code_context: {
2638
+ ...existingContext,
2639
+ fix: {
2640
+ commit_sha: args.commit_sha,
2641
+ commit_message: args.commit_message,
2642
+ files_changed: args.files_changed,
2643
+ fixed_at: new Date().toISOString(),
2644
+ fixed_by: 'claude_code',
2645
+ },
2646
+ },
2647
+ };
2648
+ const { error } = await supabase
2649
+ .from('reports')
2650
+ .update(updates)
2651
+ .eq('id', args.report_id)
2652
+ .eq('project_id', PROJECT_ID); // Security: ensure report belongs to this project
2653
+ if (error) {
2654
+ return { error: error.message };
2655
+ }
2656
+ return {
2657
+ success: true,
2658
+ message: `Bug marked as fixed in commit ${args.commit_sha.slice(0, 7)}`,
2659
+ report_id: args.report_id,
2660
+ commit: args.commit_sha,
2661
+ next_steps: [
2662
+ 'Consider running create_regression_test to prevent this bug from recurring',
2663
+ 'Push your changes to trigger CI/CD',
2664
+ ],
2665
+ };
2666
+ }
2667
+ async function getBugsAffectingCode(args) {
2668
+ const includeRelated = args.include_related !== false;
2669
+ const { data, error } = await supabase
2670
+ .from('reports')
2671
+ .select('id, title, description, severity, status, code_context, app_context')
2672
+ .eq('project_id', PROJECT_ID)
2673
+ .eq('report_type', 'bug')
2674
+ .in('status', ['new', 'confirmed', 'in_progress', 'reviewed'])
2675
+ .order('severity', { ascending: true });
2676
+ if (error) {
2677
+ return { error: error.message };
2678
+ }
2679
+ const normalizedPaths = args.file_paths.map(p => p.replace(/^\.\//, '').replace(/^\//, ''));
2680
+ const affectedBugs = [];
2681
+ for (const bug of data || []) {
2682
+ const codeContext = bug.code_context;
2683
+ const appContext = bug.app_context;
2684
+ // Check direct file match
2685
+ const bugFile = codeContext?.file_path;
2686
+ if (bugFile) {
2687
+ const normalizedBugFile = bugFile.replace(/^\.\//, '').replace(/^\//, '');
2688
+ for (const path of normalizedPaths) {
2689
+ if (normalizedBugFile.includes(path) || path.includes(normalizedBugFile)) {
2690
+ affectedBugs.push({
2691
+ id: bug.id,
2692
+ title: bug.title,
2693
+ severity: bug.severity,
2694
+ status: bug.status,
2695
+ matched_file: path,
2696
+ match_type: 'direct',
2697
+ });
2698
+ break;
2699
+ }
2700
+ }
2701
+ }
2702
+ // Check related files
2703
+ if (includeRelated && codeContext?.related_files) {
2704
+ const relatedFiles = codeContext.related_files;
2705
+ for (const relatedFile of relatedFiles) {
2706
+ for (const path of normalizedPaths) {
2707
+ if (relatedFile.includes(path) || path.includes(relatedFile)) {
2708
+ if (!affectedBugs.find(b => b.id === bug.id)) {
2709
+ affectedBugs.push({
2710
+ id: bug.id,
2711
+ title: bug.title,
2712
+ severity: bug.severity,
2713
+ status: bug.status,
2714
+ matched_file: path,
2715
+ match_type: 'related',
2716
+ });
2717
+ }
2718
+ break;
2719
+ }
2720
+ }
2721
+ }
2722
+ }
2723
+ // Check route-based matches (for component files)
2724
+ const route = appContext?.currentRoute;
2725
+ if (route && route !== 'code') {
2726
+ for (const path of normalizedPaths) {
2727
+ // Match route to common file patterns
2728
+ if (path.includes('page') || path.includes('route') || path.includes('component')) {
2729
+ const pathParts = path.split('/');
2730
+ const fileName = pathParts[pathParts.length - 1].replace(/\.(tsx?|jsx?)$/, '');
2731
+ if (route.toLowerCase().includes(fileName.toLowerCase())) {
2732
+ if (!affectedBugs.find(b => b.id === bug.id)) {
2733
+ affectedBugs.push({
2734
+ id: bug.id,
2735
+ title: bug.title,
2736
+ severity: bug.severity,
2737
+ status: bug.status,
2738
+ matched_file: path,
2739
+ match_type: 'route',
2740
+ });
2741
+ }
2742
+ }
2743
+ }
2744
+ }
2745
+ }
2746
+ }
2747
+ // Group by severity
2748
+ const critical = affectedBugs.filter(b => b.severity === 'critical');
2749
+ const high = affectedBugs.filter(b => b.severity === 'high');
2750
+ const other = affectedBugs.filter(b => !['critical', 'high'].includes(b.severity));
2751
+ return {
2752
+ files_checked: args.file_paths,
2753
+ affected_bugs: affectedBugs,
2754
+ summary: {
2755
+ total: affectedBugs.length,
2756
+ critical: critical.length,
2757
+ high: high.length,
2758
+ direct_matches: affectedBugs.filter(b => b.match_type === 'direct').length,
2759
+ },
2760
+ warnings: critical.length > 0
2761
+ ? [`⚠️ ${critical.length} CRITICAL bug(s) may be affected by your changes!`]
2762
+ : [],
2763
+ recommendation: affectedBugs.length > 0
2764
+ ? `Review ${affectedBugs.length} potentially affected bug(s) before pushing.`
2765
+ : 'No known bugs affected by these changes.',
2766
+ };
2767
+ }
2768
+ async function linkBugToCode(args) {
2769
+ if (!isValidUUID(args.report_id)) {
2770
+ return { error: 'Invalid report_id format' };
2771
+ }
2772
+ // Get current report
2773
+ const { data: existing, error: fetchError } = await supabase
2774
+ .from('reports')
2775
+ .select('code_context')
2776
+ .eq('id', args.report_id)
2777
+ .eq('project_id', PROJECT_ID) // Security: ensure report belongs to this project
2778
+ .single();
2779
+ if (fetchError) {
2780
+ return { error: fetchError.message };
2781
+ }
2782
+ const existingContext = existing?.code_context || {};
2783
+ const updates = {
2784
+ code_context: {
2785
+ ...existingContext,
2786
+ file_path: args.file_path,
2787
+ line_number: args.line_number,
2788
+ code_snippet: args.code_snippet,
2789
+ function_name: args.function_name || extractFunctionName(args.code_snippet),
2790
+ linked_at: new Date().toISOString(),
2791
+ linked_by: 'claude_code',
2792
+ },
2793
+ };
2794
+ const { error } = await supabase
2795
+ .from('reports')
2796
+ .update(updates)
2797
+ .eq('id', args.report_id)
2798
+ .eq('project_id', PROJECT_ID); // Security: ensure report belongs to this project
2799
+ if (error) {
2800
+ return { error: error.message };
2801
+ }
2802
+ return {
2803
+ success: true,
2804
+ message: `Bug linked to ${args.file_path}${args.line_number ? `:${args.line_number}` : ''}`,
2805
+ report_id: args.report_id,
2806
+ };
2807
+ }
2808
+ async function createRegressionTest(args) {
2809
+ if (!isValidUUID(args.report_id)) {
2810
+ return { error: 'Invalid report_id format' };
2811
+ }
2812
+ // Get the bug report details
2813
+ const { data: report, error: fetchError } = await supabase
2814
+ .from('reports')
2815
+ .select('*')
2816
+ .eq('id', args.report_id)
2817
+ .eq('project_id', PROJECT_ID) // Security: ensure report belongs to this project
2818
+ .single();
2819
+ if (fetchError) {
2820
+ return { error: fetchError.message };
2821
+ }
2822
+ if (report.status !== 'resolved') {
2823
+ return {
2824
+ error: 'Bug must be resolved before creating a regression test',
2825
+ current_status: report.status,
2826
+ };
2827
+ }
2828
+ const codeContext = report.code_context;
2829
+ const testType = args.test_type || 'integration';
2830
+ // Get next test key
2831
+ const { data: existingTests } = await supabase
2832
+ .from('test_cases')
2833
+ .select('test_key')
2834
+ .eq('project_id', PROJECT_ID)
2835
+ .order('test_key', { ascending: false })
2836
+ .limit(1);
2837
+ const lastKey = existingTests?.[0]?.test_key || 'TC-000';
2838
+ const lastNum = parseInt(lastKey.replace('TC-', '')) || 0;
2839
+ const newKey = `TC-${String(lastNum + 1).padStart(3, '0')}`;
2840
+ // Get the route from the bug's app_context for deep linking
2841
+ const appContext = report.app_context;
2842
+ const targetRoute = appContext?.currentRoute;
2843
+ // Generate test case from bug
2844
+ const testCase = {
2845
+ project_id: PROJECT_ID,
2846
+ test_key: newKey,
2847
+ title: `Regression: ${report.title}`,
2848
+ description: `Regression test to prevent recurrence of bug #${args.report_id.slice(0, 8)}\n\nOriginal bug: ${report.description}`,
2849
+ priority: report.severity === 'critical' ? 'P0' : report.severity === 'high' ? 'P1' : 'P2',
2850
+ steps: [
2851
+ {
2852
+ stepNumber: 1,
2853
+ action: codeContext?.file_path
2854
+ ? `Navigate to the code/feature in ${codeContext.file_path}`
2855
+ : 'Navigate to the affected feature',
2856
+ expectedResult: 'Feature loads correctly',
2857
+ },
2858
+ {
2859
+ stepNumber: 2,
2860
+ action: 'Reproduce the original bug scenario',
2861
+ expectedResult: 'The bug should NOT occur',
2862
+ },
2863
+ {
2864
+ stepNumber: 3,
2865
+ action: 'Verify the fix is working',
2866
+ expectedResult: report.resolution || 'Feature works as expected',
2867
+ },
2868
+ ],
2869
+ expected_result: `The bug "${report.title}" should not recur`,
2870
+ preconditions: codeContext?.fix
2871
+ ? `Requires commit ${codeContext.fix.commit_sha?.slice(0, 7) || 'unknown'} or later`
2872
+ : '',
2873
+ target_route: targetRoute,
2874
+ metadata: {
2875
+ source: 'regression_from_bug',
2876
+ original_bug_id: args.report_id,
2877
+ test_type: testType,
2878
+ created_by: 'claude_code',
2879
+ },
2880
+ };
2881
+ const { data, error } = await supabase
2882
+ .from('test_cases')
2883
+ .insert(testCase)
2884
+ .select('id, test_key, title')
2885
+ .single();
2886
+ if (error) {
2887
+ return { error: error.message };
2888
+ }
2889
+ return {
2890
+ success: true,
2891
+ test_case: {
2892
+ id: data.id,
2893
+ test_key: data.test_key,
2894
+ title: data.title,
2895
+ type: testType,
2896
+ },
2897
+ message: `Regression test ${data.test_key} created from bug report`,
2898
+ original_bug: {
2899
+ id: args.report_id,
2900
+ title: report.title,
2901
+ },
2902
+ };
2903
+ }
2904
+ // === FIX QUEUE TOOL HANDLERS ===
2905
+ async function getPendingFixes(args) {
2906
+ const limit = args.limit || 10;
2907
+ let query = supabase
2908
+ .from('fix_requests')
2909
+ .select(`
2910
+ id,
2911
+ title,
2912
+ description,
2913
+ prompt,
2914
+ file_path,
2915
+ status,
2916
+ claimed_at,
2917
+ claimed_by,
2918
+ created_at,
2919
+ report:reports(id, title, severity, description)
2920
+ `)
2921
+ .eq('project_id', PROJECT_ID)
2922
+ .order('created_at', { ascending: true })
2923
+ .limit(limit);
2924
+ if (!args.include_claimed) {
2925
+ query = query.eq('status', 'pending');
2926
+ }
2927
+ else {
2928
+ query = query.in('status', ['pending', 'claimed']);
2929
+ }
2930
+ const { data, error } = await query;
2931
+ if (error) {
2932
+ return { error: error.message };
2933
+ }
2934
+ if (!data || data.length === 0) {
2935
+ return {
2936
+ fix_requests: [],
2937
+ count: 0,
2938
+ message: 'No pending fix requests in the queue. Great job keeping up!',
2939
+ };
2940
+ }
2941
+ return {
2942
+ fix_requests: data.map((fr) => ({
2943
+ id: fr.id,
2944
+ title: fr.title,
2945
+ description: fr.description,
2946
+ prompt: fr.prompt,
2947
+ file_path: fr.file_path,
2948
+ status: fr.status,
2949
+ claimed_by: fr.claimed_by,
2950
+ claimed_at: fr.claimed_at,
2951
+ created_at: fr.created_at,
2952
+ related_report: fr.report ? {
2953
+ id: fr.report.id,
2954
+ title: fr.report.title,
2955
+ severity: fr.report.severity,
2956
+ } : null,
2957
+ })),
2958
+ count: data.length,
2959
+ message: `Found ${data.length} fix request(s) waiting. Use claim_fix_request to start working on one.`,
2960
+ };
2961
+ }
2962
+ async function claimFixRequest(args) {
2963
+ if (!isValidUUID(args.fix_request_id)) {
2964
+ return { error: 'Invalid fix_request_id format' };
2965
+ }
2966
+ // First check if it's still available
2967
+ const { data: existing, error: checkError } = await supabase
2968
+ .from('fix_requests')
2969
+ .select('id, status, claimed_by, prompt, title')
2970
+ .eq('id', args.fix_request_id)
2971
+ .eq('project_id', PROJECT_ID) // Security: ensure fix request belongs to this project
2972
+ .single();
2973
+ if (checkError) {
2974
+ return { error: checkError.message };
2975
+ }
2976
+ if (existing.status === 'claimed') {
2977
+ return {
2978
+ error: `This fix request is already claimed by ${existing.claimed_by || 'another instance'}`,
2979
+ status: existing.status,
2980
+ };
2981
+ }
2982
+ if (existing.status === 'completed') {
2983
+ return {
2984
+ error: 'This fix request has already been completed',
2985
+ status: existing.status,
2986
+ };
2987
+ }
2988
+ // Claim it
2989
+ const claimedBy = args.claimed_by || `claude-code-${Date.now()}`;
2990
+ const { error: updateError } = await supabase
2991
+ .from('fix_requests')
2992
+ .update({
2993
+ status: 'claimed',
2994
+ claimed_at: new Date().toISOString(),
2995
+ claimed_by: claimedBy,
2996
+ })
2997
+ .eq('id', args.fix_request_id)
2998
+ .eq('project_id', PROJECT_ID) // Security: ensure fix request belongs to this project
2999
+ .eq('status', 'pending'); // Only claim if still pending (race condition protection)
3000
+ if (updateError) {
3001
+ return { error: updateError.message };
3002
+ }
3003
+ return {
3004
+ success: true,
3005
+ message: `Fix request claimed successfully. Here's your task:`,
3006
+ fix_request: {
3007
+ id: args.fix_request_id,
3008
+ title: existing.title,
3009
+ prompt: existing.prompt,
3010
+ },
3011
+ next_steps: [
3012
+ '1. Read and understand the prompt below',
3013
+ '2. Implement the fix',
3014
+ '3. Test your changes',
3015
+ '4. Use complete_fix_request when done',
3016
+ ],
3017
+ };
3018
+ }
3019
+ async function completeFixRequest(args) {
3020
+ if (!isValidUUID(args.fix_request_id)) {
3021
+ return { error: 'Invalid fix_request_id format' };
3022
+ }
3023
+ const isSuccess = args.success !== false;
3024
+ const updates = {
3025
+ status: isSuccess ? 'completed' : 'cancelled',
3026
+ completed_at: new Date().toISOString(),
3027
+ completion_notes: args.completion_notes || (isSuccess ? 'Fix completed' : 'Could not complete fix'),
3028
+ };
3029
+ const { error } = await supabase
3030
+ .from('fix_requests')
3031
+ .update(updates)
3032
+ .eq('id', args.fix_request_id)
3033
+ .eq('project_id', PROJECT_ID); // Security: ensure fix request belongs to this project
3034
+ if (error) {
3035
+ return { error: error.message };
3036
+ }
3037
+ return {
3038
+ success: true,
3039
+ message: isSuccess
3040
+ ? 'Fix request marked as completed!'
3041
+ : 'Fix request marked as cancelled.',
3042
+ fix_request_id: args.fix_request_id,
3043
+ status: updates.status,
3044
+ };
3045
+ }
3046
+ // === MCP PROMPTS FOR GUIDED WORKFLOWS ===
3047
+ const prompts = [
3048
+ {
3049
+ name: 'fix_bugs',
3050
+ description: 'Review and fix open bugs in the project. Shows critical bugs first and guides you through fixing them one by one.',
3051
+ arguments: [
3052
+ {
3053
+ name: 'severity',
3054
+ description: 'Filter by severity: critical, high, medium, low, or all (default: all)',
3055
+ required: false,
3056
+ },
3057
+ {
3058
+ name: 'file',
3059
+ description: 'Filter to bugs in a specific file path',
3060
+ required: false,
3061
+ },
3062
+ ],
3063
+ },
3064
+ {
3065
+ name: 'qa_check',
3066
+ description: 'Run a QA check on your recent changes. Analyzes changed files for potential issues and related bugs.',
3067
+ arguments: [
3068
+ {
3069
+ name: 'files',
3070
+ description: 'Comma-separated list of changed file paths (or leave empty to auto-detect from git)',
3071
+ required: false,
3072
+ },
3073
+ ],
3074
+ },
3075
+ {
3076
+ name: 'regression_tests',
3077
+ description: 'Generate regression tests for recently fixed bugs to prevent them from recurring.',
3078
+ arguments: [
3079
+ {
3080
+ name: 'limit',
3081
+ description: 'Number of recent fixes to generate tests for (default: 5)',
3082
+ required: false,
3083
+ },
3084
+ ],
3085
+ },
3086
+ {
3087
+ name: 'bug_hunt',
3088
+ description: 'Proactive bug hunting mode. Analyzes the codebase for potential issues based on common patterns and past bugs.',
3089
+ arguments: [
3090
+ {
3091
+ name: 'focus',
3092
+ description: 'Area to focus on: error_handling, null_checks, async_issues, type_safety, security',
3093
+ required: false,
3094
+ },
3095
+ ],
3096
+ },
3097
+ ];
3098
+ async function generatePromptContent(name, args) {
3099
+ switch (name) {
3100
+ case 'fix_bugs': {
3101
+ const severity = args.severity || 'all';
3102
+ const fileFilter = args.file;
3103
+ // First, check the fix queue for requested fixes from the dashboard
3104
+ const { data: fixRequests } = await supabase
3105
+ .from('fix_requests')
3106
+ .select(`
3107
+ id,
3108
+ title,
3109
+ description,
3110
+ prompt,
3111
+ file_path,
3112
+ status,
3113
+ created_at,
3114
+ report:reports(id, title, severity)
3115
+ `)
3116
+ .eq('project_id', PROJECT_ID)
3117
+ .eq('status', 'pending')
3118
+ .order('created_at', { ascending: true })
3119
+ .limit(5);
3120
+ // Fetch open bugs
3121
+ let query = supabase
3122
+ .from('reports')
3123
+ .select('id, title, description, severity, status, code_context, created_at')
3124
+ .eq('project_id', PROJECT_ID)
3125
+ .eq('report_type', 'bug')
3126
+ .in('status', ['new', 'confirmed', 'in_progress']);
3127
+ if (severity !== 'all') {
3128
+ query = query.eq('severity', severity);
3129
+ }
3130
+ const { data: bugs } = await query.order('severity', { ascending: true }).limit(20);
3131
+ // Filter by file if specified
3132
+ let filteredBugs = bugs || [];
3133
+ if (fileFilter) {
3134
+ filteredBugs = filteredBugs.filter(b => {
3135
+ const ctx = b.code_context;
3136
+ const filePath = ctx?.file_path;
3137
+ return filePath && filePath.includes(fileFilter);
3138
+ });
3139
+ }
3140
+ // Build fix queue section if there are pending requests
3141
+ let fixQueueSection = '';
3142
+ if (fixRequests && fixRequests.length > 0) {
3143
+ const queueList = fixRequests.map((fr, i) => {
3144
+ return `
3145
+ ### ${i + 1}. ${fr.title}
3146
+ - **Fix Request ID:** \`${fr.id}\`
3147
+ - **File:** ${fr.file_path || 'Not specified'}
3148
+ - **Related Bug:** ${fr.report ? `[${fr.report.severity?.toUpperCase()}] ${fr.report.title}` : 'None'}
3149
+ - **Created:** ${new Date(fr.created_at).toLocaleDateString()}
3150
+
3151
+ <details>
3152
+ <summary>View Fix Prompt</summary>
3153
+
3154
+ \`\`\`
3155
+ ${fr.prompt}
3156
+ \`\`\`
3157
+ </details>
3158
+ `;
3159
+ }).join('\n');
3160
+ fixQueueSection = `
3161
+ # 🔧 Fix Queue (Priority)
3162
+
3163
+ **${fixRequests.length} fix request(s)** have been queued from the BugBear dashboard:
3164
+
3165
+ ${queueList}
3166
+
3167
+ ---
3168
+
3169
+ **To start on a fix request:**
3170
+ 1. Use \`claim_fix_request\` with the fix request ID
3171
+ 2. Follow the prompt instructions
3172
+ 3. Use \`complete_fix_request\` when done
3173
+
3174
+ ---
3175
+
3176
+ `;
3177
+ }
3178
+ if (filteredBugs.length === 0 && (!fixRequests || fixRequests.length === 0)) {
3179
+ return `# No Open Bugs Found
3180
+
3181
+ Great news! There are no open bugs or fix requests matching your criteria.
3182
+
3183
+ ${severity !== 'all' ? `Filtered by severity: ${severity}` : ''}
3184
+ ${fileFilter ? `Filtered by file: ${fileFilter}` : ''}
3185
+
3186
+ Use \`list_reports\` to see all reports including resolved ones.`;
3187
+ }
3188
+ const bugList = filteredBugs.map((b, i) => {
3189
+ const ctx = b.code_context;
3190
+ return `
3191
+ ## ${i + 1}. [${b.severity?.toUpperCase()}] ${b.title}
3192
+ - **ID:** \`${b.id}\`
3193
+ - **Status:** ${b.status}
3194
+ - **File:** ${ctx?.file_path || 'Unknown'}${ctx?.line_number ? `:${ctx.line_number}` : ''}
3195
+ - **Description:** ${b.description.slice(0, 200)}${b.description.length > 200 ? '...' : ''}
3196
+ ${ctx?.suggested_fix ? `- **Suggested Fix:** ${ctx.suggested_fix}` : ''}
3197
+ `;
3198
+ }).join('\n');
3199
+ return `${fixQueueSection}# Bug Fixing Session
3200
+
3201
+ Found **${filteredBugs.length}** open bug(s) to fix.
3202
+
3203
+ ${bugList}
3204
+
3205
+ ---
3206
+
3207
+ ## Workflow
3208
+
3209
+ 1. **Read the bug details** - Use \`get_report\` with the bug ID for full context
3210
+ 2. **Check the code** - Use \`get_bugs_for_file\` to see related issues
3211
+ 3. **Fix the bug** - Make your code changes
3212
+ 4. **Mark as fixed** - Use \`mark_fixed_with_commit\` after committing
3213
+ 5. **Create regression test** - Use \`create_regression_test\` to prevent recurrence
3214
+
3215
+ ${fixRequests && fixRequests.length > 0 ? '**Start with the Fix Queue items above** - these have been specifically requested by users!' : "Let's start with the most critical bug. Which one would you like to tackle first?"}`;
3216
+ }
3217
+ case 'qa_check': {
3218
+ const filesArg = args.files;
3219
+ let filePaths = [];
3220
+ if (filesArg) {
3221
+ filePaths = filesArg.split(',').map(f => f.trim());
3222
+ }
3223
+ // If no files specified, we'll provide instructions
3224
+ if (filePaths.length === 0) {
3225
+ return `# QA Check Mode
3226
+
3227
+ I'll analyze your changes for potential issues and related bugs.
3228
+
3229
+ **To get started, tell me which files you've changed:**
3230
+
3231
+ Option 1: Run \`git diff --name-only\` and provide the file list
3232
+ Option 2: Tell me which files you're working on
3233
+
3234
+ Once I know the files, I'll:
3235
+ 1. Check for known bugs in those files
3236
+ 2. Look for related issues that might be affected
3237
+ 3. Suggest test cases to verify your changes
3238
+ 4. Identify potential problem patterns
3239
+
3240
+ What files have you changed?`;
3241
+ }
3242
+ // Analyze the files
3243
+ const analysis = await getBugsAffectingCode({ file_paths: filePaths });
3244
+ return `# QA Check Results
3245
+
3246
+ Analyzed **${filePaths.length}** file(s):
3247
+ ${filePaths.map(f => `- ${f}`).join('\n')}
3248
+
3249
+ ## Bug Analysis
3250
+
3251
+ ${analysis.affected_bugs?.length > 0
3252
+ ? `Found **${analysis.affected_bugs.length}** potentially affected bug(s):
3253
+
3254
+ ${analysis.affected_bugs.map((b) => `- [${b.severity.toUpperCase()}] ${b.title} (${b.match_type} match in ${b.matched_file})`).join('\n')}
3255
+
3256
+ ${analysis.warnings?.join('\n') || ''}
3257
+ `
3258
+ : '✅ No known bugs affected by these changes.'}
3259
+
3260
+ ## Recommended Actions
3261
+
3262
+ 1. ${analysis.affected_bugs?.length > 0 ? 'Review the affected bugs above before pushing' : 'Proceed with your changes'}
3263
+ 2. Use \`suggest_test_cases\` to generate tests for your changed files
3264
+ 3. Consider running the app and manually testing the affected areas
3265
+
3266
+ Would you like me to generate test cases for these files?`;
3267
+ }
3268
+ case 'regression_tests': {
3269
+ const limit = parseInt(args.limit) || 5;
3270
+ // Get recently resolved bugs
3271
+ const { data: resolvedBugs } = await supabase
3272
+ .from('reports')
3273
+ .select('id, title, description, severity, resolved_at, code_context')
3274
+ .eq('project_id', PROJECT_ID)
3275
+ .eq('report_type', 'bug')
3276
+ .eq('status', 'resolved')
3277
+ .order('resolved_at', { ascending: false })
3278
+ .limit(limit);
3279
+ if (!resolvedBugs || resolvedBugs.length === 0) {
3280
+ return `# Regression Test Generation
3281
+
3282
+ No recently resolved bugs found.
3283
+
3284
+ To create regression tests:
3285
+ 1. First fix some bugs and mark them as resolved using \`mark_fixed_with_commit\`
3286
+ 2. Then come back here to generate regression tests
3287
+
3288
+ Alternatively, use \`suggest_test_cases\` to generate general test cases.`;
3289
+ }
3290
+ // Check which ones already have regression tests
3291
+ const bugList = resolvedBugs.map((b, i) => {
3292
+ const ctx = b.code_context;
3293
+ const hasCommit = ctx?.fix && ctx.fix.commit_sha;
3294
+ return `
3295
+ ## ${i + 1}. ${b.title}
3296
+ - **Bug ID:** \`${b.id}\`
3297
+ - **Severity:** ${b.severity}
3298
+ - **Resolved:** ${b.resolved_at ? new Date(b.resolved_at).toLocaleDateString() : 'Unknown'}
3299
+ - **Commit:** ${hasCommit ? ctx.fix.commit_sha.slice(0, 7) : 'Not linked'}
3300
+ `;
3301
+ }).join('\n');
3302
+ return `# Regression Test Generation
3303
+
3304
+ Found **${resolvedBugs.length}** recently resolved bug(s) that need regression tests:
3305
+
3306
+ ${bugList}
3307
+
3308
+ ---
3309
+
3310
+ ## Generate Tests
3311
+
3312
+ For each bug, I can create a regression test to prevent it from recurring.
3313
+
3314
+ Use \`create_regression_test\` with the bug ID to generate a test case.
3315
+
3316
+ Example:
3317
+ \`\`\`
3318
+ create_regression_test(report_id: "${resolvedBugs[0]?.id}", test_type: "integration")
3319
+ \`\`\`
3320
+
3321
+ Would you like me to generate regression tests for all of these bugs?`;
3322
+ }
3323
+ case 'bug_hunt': {
3324
+ const focus = args.focus || 'general';
3325
+ const focusDescriptions = {
3326
+ error_handling: 'missing try/catch blocks, unhandled promise rejections, missing error boundaries',
3327
+ null_checks: 'potential null/undefined access, optional chaining opportunities, nullish coalescing',
3328
+ async_issues: 'race conditions, missing await, unhandled async errors, memory leaks',
3329
+ type_safety: 'type assertions, any types, missing type guards, unsafe casts',
3330
+ security: 'XSS vulnerabilities, injection risks, exposed secrets, insecure data handling',
3331
+ general: 'common patterns that have caused bugs in this project',
3332
+ };
3333
+ // Get bug patterns for context
3334
+ const patterns = await getBugPatterns({});
3335
+ return `# Bug Hunt Mode: ${focus.charAt(0).toUpperCase() + focus.slice(1)}
3336
+
3337
+ I'm looking for: **${focusDescriptions[focus] || focusDescriptions.general}**
3338
+
3339
+ ## Historical Bug Hotspots
3340
+
3341
+ ${patterns.hotspots?.slice(0, 5).map((h) => `- **${h.route}**: ${h.total} bugs (${h.open} open, ${h.critical} critical)`).join('\n') || 'No bug patterns found yet.'}
3342
+
3343
+ ## Bug Hunt Checklist
3344
+
3345
+ Based on past bugs in this project, here's what to look for:
3346
+
3347
+ ${focus === 'error_handling' || focus === 'general' ? `
3348
+ ### Error Handling
3349
+ - [ ] All async functions have try/catch or .catch()
3350
+ - [ ] API calls handle network errors
3351
+ - [ ] User-facing error messages are helpful
3352
+ - [ ] Errors are logged appropriately
3353
+ ` : ''}
3354
+
3355
+ ${focus === 'null_checks' || focus === 'general' ? `
3356
+ ### Null Safety
3357
+ - [ ] Optional chaining (?.) used for potentially undefined values
3358
+ - [ ] Nullish coalescing (??) for default values
3359
+ - [ ] Array/object access is guarded
3360
+ - [ ] Props have appropriate defaults
3361
+ ` : ''}
3362
+
3363
+ ${focus === 'async_issues' || focus === 'general' ? `
3364
+ ### Async Issues
3365
+ - [ ] All Promises are awaited or have .then()
3366
+ - [ ] Cleanup functions in useEffect
3367
+ - [ ] Race conditions prevented with AbortController
3368
+ - [ ] Loading states are handled
3369
+ ` : ''}
3370
+
3371
+ ---
3372
+
3373
+ **To report bugs you find:** Use \`create_bug_report\` with the file, line number, and code snippet.
3374
+
3375
+ Which files or areas would you like me to analyze?`;
3376
+ }
3377
+ default:
3378
+ return 'Unknown prompt';
3379
+ }
3380
+ }
3381
+ // Main server setup
3382
+ async function main() {
3383
+ initSupabase();
3384
+ const server = new index_js_1.Server({
3385
+ name: 'bugbear-mcp',
3386
+ version: '0.1.0',
3387
+ }, {
3388
+ capabilities: {
3389
+ tools: {},
3390
+ resources: {},
3391
+ prompts: {},
3392
+ },
3393
+ });
3394
+ // Handle tool listing
3395
+ server.setRequestHandler(types_js_1.ListToolsRequestSchema, async () => ({
3396
+ tools,
3397
+ }));
3398
+ // Handle tool execution
3399
+ server.setRequestHandler(types_js_1.CallToolRequestSchema, async (request) => {
3400
+ const { name, arguments: args } = request.params;
3401
+ try {
3402
+ let result;
3403
+ switch (name) {
3404
+ case 'list_reports':
3405
+ result = await listReports(args);
3406
+ break;
3407
+ case 'get_report':
3408
+ result = await getReport(args);
3409
+ break;
3410
+ case 'search_reports':
3411
+ result = await searchReports(args);
3412
+ break;
3413
+ case 'update_report_status':
3414
+ result = await updateReportStatus(args);
3415
+ break;
3416
+ case 'get_report_context':
3417
+ result = await getReportContext(args);
3418
+ break;
3419
+ case 'get_project_info':
3420
+ result = await getProjectInfo();
3421
+ break;
3422
+ case 'get_qa_tracks':
3423
+ result = await getQaTracks();
3424
+ break;
3425
+ case 'create_test_case':
3426
+ result = await createTestCase(args);
3427
+ break;
3428
+ case 'update_test_case':
3429
+ result = await updateTestCase(args);
3430
+ break;
3431
+ case 'list_test_cases':
3432
+ result = await listTestCases(args);
3433
+ break;
3434
+ case 'get_bug_patterns':
3435
+ result = await getBugPatterns(args);
3436
+ break;
3437
+ case 'analyze_changes_for_tests':
3438
+ result = await analyzeChangesForTests(args);
3439
+ break;
3440
+ case 'suggest_test_cases':
3441
+ result = await suggestTestCases(args);
3442
+ break;
3443
+ // === QA INTELLIGENCE TOOLS ===
3444
+ case 'get_test_priorities':
3445
+ result = await getTestPriorities(args);
3446
+ break;
3447
+ case 'get_coverage_gaps':
3448
+ result = await getCoverageGaps(args);
3449
+ break;
3450
+ case 'get_regressions':
3451
+ result = await getRegressions(args);
3452
+ break;
3453
+ case 'get_coverage_matrix':
3454
+ result = await getCoverageMatrix(args);
3455
+ break;
3456
+ case 'get_stale_coverage':
3457
+ result = await getStaleCoverage(args);
3458
+ break;
3459
+ case 'generate_deploy_checklist':
3460
+ result = await generateDeployChecklist(args);
3461
+ break;
3462
+ case 'get_qa_health':
3463
+ result = await getQAHealth(args);
3464
+ break;
3465
+ // === WRITE-BACK TOOLS ===
3466
+ case 'create_bug_report':
3467
+ result = await createBugReport(args);
3468
+ break;
3469
+ case 'get_bugs_for_file':
3470
+ result = await getBugsForFile(args);
3471
+ break;
3472
+ case 'mark_fixed_with_commit':
3473
+ result = await markFixedWithCommit(args);
3474
+ break;
3475
+ case 'get_bugs_affecting_code':
3476
+ result = await getBugsAffectingCode(args);
3477
+ break;
3478
+ case 'link_bug_to_code':
3479
+ result = await linkBugToCode(args);
3480
+ break;
3481
+ case 'create_regression_test':
3482
+ result = await createRegressionTest(args);
3483
+ break;
3484
+ // === FIX QUEUE TOOLS ===
3485
+ case 'get_pending_fixes':
3486
+ result = await getPendingFixes(args);
3487
+ break;
3488
+ case 'claim_fix_request':
3489
+ result = await claimFixRequest(args);
3490
+ break;
3491
+ case 'complete_fix_request':
3492
+ result = await completeFixRequest(args);
3493
+ break;
3494
+ default:
3495
+ return {
3496
+ content: [{ type: 'text', text: `Unknown tool: ${name}` }],
3497
+ isError: true,
3498
+ };
3499
+ }
3500
+ return {
3501
+ content: [{ type: 'text', text: JSON.stringify(result, null, 2) }],
3502
+ };
3503
+ }
3504
+ catch (err) {
3505
+ return {
3506
+ content: [{ type: 'text', text: `Error: ${err instanceof Error ? err.message : 'Unknown error'}` }],
3507
+ isError: true,
3508
+ };
3509
+ }
3510
+ });
3511
+ // Handle resource listing (reports as resources)
3512
+ server.setRequestHandler(types_js_1.ListResourcesRequestSchema, async () => {
3513
+ const { data } = await supabase
3514
+ .from('reports')
3515
+ .select('id, description, report_type, severity')
3516
+ .eq('project_id', PROJECT_ID)
3517
+ .eq('status', 'new')
3518
+ .order('created_at', { ascending: false })
3519
+ .limit(10);
3520
+ return {
3521
+ resources: (data || []).map(r => ({
3522
+ uri: `bugbear://reports/${r.id}`,
3523
+ name: `[${r.severity?.toUpperCase() || 'N/A'}] ${r.description.slice(0, 50)}...`,
3524
+ description: `${r.report_type} report`,
3525
+ mimeType: 'application/json',
3526
+ })),
3527
+ };
3528
+ });
3529
+ // Handle prompt listing
3530
+ server.setRequestHandler(types_js_1.ListPromptsRequestSchema, async () => ({
3531
+ prompts,
3532
+ }));
3533
+ // Handle prompt execution
3534
+ server.setRequestHandler(types_js_1.GetPromptRequestSchema, async (request) => {
3535
+ const { name, arguments: args } = request.params;
3536
+ const content = await generatePromptContent(name, args || {});
3537
+ return {
3538
+ description: prompts.find(p => p.name === name)?.description || '',
3539
+ messages: [
3540
+ {
3541
+ role: 'user',
3542
+ content: {
3543
+ type: 'text',
3544
+ text: content,
3545
+ },
3546
+ },
3547
+ ],
3548
+ };
3549
+ });
3550
+ // Handle resource reading
3551
+ server.setRequestHandler(types_js_1.ReadResourceRequestSchema, async (request) => {
3552
+ const uri = request.params.uri;
3553
+ const match = uri.match(/^bugbear:\/\/reports\/(.+)$/);
3554
+ if (!match) {
3555
+ throw new Error(`Invalid resource URI: ${uri}`);
3556
+ }
3557
+ const reportId = match[1];
3558
+ const result = await getReport({ report_id: reportId });
3559
+ return {
3560
+ contents: [
3561
+ {
3562
+ uri,
3563
+ mimeType: 'application/json',
3564
+ text: JSON.stringify(result, null, 2),
3565
+ },
3566
+ ],
3567
+ };
3568
+ });
3569
+ // Start the server
3570
+ const transport = new stdio_js_1.StdioServerTransport();
3571
+ await server.connect(transport);
3572
+ console.error('BugBear MCP server started');
3573
+ }
3574
+ main().catch((err) => {
3575
+ console.error('Failed to start server:', err);
3576
+ process.exit(1);
3577
+ });