@bugzy-ai/bugzy 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +248 -0
  3. package/dist/cli/index.cjs +7547 -0
  4. package/dist/cli/index.cjs.map +1 -0
  5. package/dist/cli/index.d.cts +1 -0
  6. package/dist/cli/index.d.ts +1 -0
  7. package/dist/cli/index.js +7539 -0
  8. package/dist/cli/index.js.map +1 -0
  9. package/dist/index.cjs +6439 -0
  10. package/dist/index.cjs.map +1 -0
  11. package/dist/index.d.cts +54 -0
  12. package/dist/index.d.ts +54 -0
  13. package/dist/index.js +6383 -0
  14. package/dist/index.js.map +1 -0
  15. package/dist/subagents/index.cjs +2703 -0
  16. package/dist/subagents/index.cjs.map +1 -0
  17. package/dist/subagents/index.d.cts +34 -0
  18. package/dist/subagents/index.d.ts +34 -0
  19. package/dist/subagents/index.js +2662 -0
  20. package/dist/subagents/index.js.map +1 -0
  21. package/dist/subagents/metadata.cjs +207 -0
  22. package/dist/subagents/metadata.cjs.map +1 -0
  23. package/dist/subagents/metadata.d.cts +31 -0
  24. package/dist/subagents/metadata.d.ts +31 -0
  25. package/dist/subagents/metadata.js +174 -0
  26. package/dist/subagents/metadata.js.map +1 -0
  27. package/dist/tasks/index.cjs +3464 -0
  28. package/dist/tasks/index.cjs.map +1 -0
  29. package/dist/tasks/index.d.cts +44 -0
  30. package/dist/tasks/index.d.ts +44 -0
  31. package/dist/tasks/index.js +3431 -0
  32. package/dist/tasks/index.js.map +1 -0
  33. package/dist/templates/init/.bugzy/runtime/project-context.md +35 -0
  34. package/dist/templates/init/.bugzy/runtime/templates/test-plan-template.md +25 -0
  35. package/dist/templates/init/.bugzy/runtime/testing-best-practices.md +278 -0
  36. package/dist/templates/init/.gitignore-template +4 -0
  37. package/package.json +95 -0
  38. package/templates/init/.bugzy/runtime/knowledge-base.md +61 -0
  39. package/templates/init/.bugzy/runtime/knowledge-maintenance-guide.md +97 -0
  40. package/templates/init/.bugzy/runtime/project-context.md +35 -0
  41. package/templates/init/.bugzy/runtime/subagent-memory-guide.md +87 -0
  42. package/templates/init/.bugzy/runtime/templates/test-plan-template.md +25 -0
  43. package/templates/init/.bugzy/runtime/templates/test-result-schema.md +498 -0
  44. package/templates/init/.bugzy/runtime/test-execution-strategy.md +535 -0
  45. package/templates/init/.bugzy/runtime/testing-best-practices.md +632 -0
  46. package/templates/init/.gitignore-template +25 -0
  47. package/templates/init/CLAUDE.md +157 -0
  48. package/templates/init/test-runs/README.md +45 -0
  49. package/templates/playwright/BasePage.template.ts +190 -0
  50. package/templates/playwright/auth.setup.template.ts +89 -0
  51. package/templates/playwright/dataGenerators.helper.template.ts +148 -0
  52. package/templates/playwright/dateUtils.helper.template.ts +96 -0
  53. package/templates/playwright/pages.fixture.template.ts +50 -0
  54. package/templates/playwright/playwright.config.template.ts +97 -0
  55. package/templates/playwright/reporters/bugzy-reporter.ts +454 -0
@@ -0,0 +1,3431 @@
1
+ // src/tasks/constants.ts
2
+ var TASK_SLUGS = {
3
+ EXPLORE_APPLICATION: "explore-application",
4
+ GENERATE_TEST_CASES: "generate-test-cases",
5
+ GENERATE_TEST_PLAN: "generate-test-plan",
6
+ HANDLE_MESSAGE: "handle-message",
7
+ PROCESS_EVENT: "process-event",
8
+ RUN_TESTS: "run-tests",
9
+ VERIFY_CHANGES: "verify-changes"
10
+ };
11
+
12
+ // src/tasks/templates/exploration-instructions.ts
13
+ var EXPLORATION_INSTRUCTIONS = `
14
+ ## Exploratory Testing Protocol
15
+
16
+ Before creating or running formal tests, perform exploratory testing to validate requirements and understand actual system behavior. The depth of exploration should adapt to the clarity of requirements.
17
+
18
+ ### Step {{STEP_NUMBER}}.1: Assess Requirement Clarity
19
+
20
+ Determine exploration depth based on requirement quality:
21
+
22
+ | Clarity | Indicators | Exploration Depth | Goal |
23
+ |---------|-----------|-------------------|------|
24
+ | **Clear** | Detailed acceptance criteria, screenshots/mockups, specific field names/URLs/roles, unambiguous behavior, consistent patterns | Quick (1-2 min) | Confirm feature exists, capture evidence |
25
+ | **Vague** | General direction clear but specifics missing, incomplete examples, assumed details, relative terms ("fix", "better") | Moderate (3-5 min) | Document current behavior, identify ambiguities, generate clarification questions |
26
+ | **Unclear** | Contradictory info, multiple interpretations, no examples/criteria, ambiguous scope ("the page"), critical details missing | Deep (5-10 min) | Systematically test scenarios, document patterns, identify all ambiguities, formulate comprehensive questions |
27
+
28
+ **Examples:**
29
+ - **Clear:** "Change 'Submit' button from blue (#007BFF) to green (#28A745) on /auth/login. Verify hover effect."
30
+ - **Vague:** "Fix the sorting in todo list page. The items are mixed up for premium users."
31
+ - **Unclear:** "Improve the dashboard performance. Users say it's slow."
32
+
33
+ ### Step {{STEP_NUMBER}}.2: Quick Exploration (1-2 min)
34
+
35
+ **When:** Requirements CLEAR
36
+
37
+ **Steps:**
38
+ 1. Navigate to feature (use provided URL), verify loads without errors
39
+ 2. Verify key elements exist (buttons, fields, sections mentioned)
40
+ 3. Capture screenshot of initial state
41
+ 4. Document:
42
+ \`\`\`markdown
43
+ **Quick Exploration (1 min)**
44
+ Feature: [Name] | URL: [Path]
45
+ Status: \u2705 Accessible / \u274C Not found / \u26A0\uFE0F Different
46
+ Screenshot: [filename]
47
+ Notes: [Immediate observations]
48
+ \`\`\`
49
+ 5. **Decision:** \u2705 Matches \u2192 Test creation | \u274C/\u26A0\uFE0F Doesn't match \u2192 Moderate Exploration
50
+
51
+ **Time Limit:** 1-2 minutes
52
+
53
+ ### Step {{STEP_NUMBER}}.3: Moderate Exploration (3-5 min)
54
+
55
+ **When:** Requirements VAGUE or Quick Exploration revealed discrepancies
56
+
57
+ **Steps:**
58
+ 1. Navigate using appropriate role(s), set up preconditions, ensure clean state
59
+ 2. Test primary user flow, document steps and behavior, note unexpected behavior
60
+ 3. Capture before/after screenshots, document field values/ordering/visibility
61
+ 4. Compare to requirement: What matches? What differs? What's absent?
62
+ 5. Identify specific ambiguities:
63
+ \`\`\`markdown
64
+ **Moderate Exploration (4 min)**
65
+
66
+ **Explored:** Role: [Admin], Path: [Steps], Behavior: [What happened]
67
+
68
+ **Current State:** [Specific observations with examples]
69
+ - Example: "Admin view shows 8 sort options: By Title, By Due Date, By Priority..."
70
+
71
+ **Requirement Says:** [What requirement expected]
72
+
73
+ **Discrepancies:** [Specific differences]
74
+ - Example: "Premium users see 5 fewer sorting options than admins"
75
+
76
+ **Ambiguities:**
77
+ 1. [First ambiguity with concrete example]
78
+ 2. [Second if applicable]
79
+
80
+ **Clarification Needed:** [Specific questions]
81
+ \`\`\`
82
+ 6. Assess severity using Clarification Protocol
83
+ 7. **Decision:** \u{1F7E2} Minor \u2192 Proceed with assumptions | \u{1F7E1} Medium \u2192 Async clarification, proceed | \u{1F534} Critical \u2192 Stop, escalate
84
+
85
+ **Time Limit:** 3-5 minutes
86
+
87
+ ### Step {{STEP_NUMBER}}.4: Deep Exploration (5-10 min)
88
+
89
+ **When:** Requirements UNCLEAR or critical ambiguities found
90
+
91
+ **Steps:**
92
+ 1. **Define Exploration Matrix:** Identify dimensions (user roles, feature states, input variations, browsers)
93
+
94
+ 2. **Systematic Testing:** Test each matrix cell methodically
95
+ \`\`\`
96
+ Example for "Todo List Sorting":
97
+ Matrix: User Roles \xD7 Feature Observations
98
+
99
+ Test 1: Admin Role \u2192 Navigate, document sort options (count, names, order), screenshot
100
+ Test 2: Basic User Role \u2192 Same todo list, document options, screenshot
101
+ Test 3: Compare \u2192 Side-by-side table, identify missing/reordered options
102
+ \`\`\`
103
+
104
+ 3. **Document Patterns:** Consistent behavior? Role-based differences? What varies vs constant?
105
+
106
+ 4. **Comprehensive Report:**
107
+ \`\`\`markdown
108
+ **Deep Exploration (8 min)**
109
+
110
+ **Matrix:** [Dimensions] | **Tests:** [X combinations]
111
+
112
+ **Findings:**
113
+
114
+ ### Test 1: Admin
115
+ - Setup: [Preconditions] | Steps: [Actions]
116
+ - Observations: Sort options=8, Options=[list], Ordering=[sequence]
117
+ - Screenshot: [filename-admin.png]
118
+
119
+ ### Test 2: Basic User
120
+ - Setup: [Preconditions] | Steps: [Actions]
121
+ - Observations: Sort options=3, Missing vs Admin=[5 options], Ordering=[sequence]
122
+ - Screenshot: [filename-user.png]
123
+
124
+ **Comparison Table:**
125
+ | Sort Option | Admin Pos | User Pos | Notes |
126
+ |-------------|-----------|----------|-------|
127
+ | By Title | 1 | 1 | Match |
128
+ | By Priority | 3 | Not visible | Missing |
129
+
130
+ **Patterns:**
131
+ - Role-based feature visibility
132
+ - Consistent relative ordering for visible fields
133
+
134
+ **Critical Ambiguities:**
135
+ 1. Option Visibility: Intentional basic users see 5 fewer sort options?
136
+ 2. Sort Definition: (A) All roles see all options in same order, OR (B) Roles see permitted options in same relative order?
137
+
138
+ **Clarification Questions:** [Specific, concrete based on findings]
139
+ \`\`\`
140
+
141
+ 5. **Next Action:** Critical ambiguities \u2192 STOP, clarify | Patterns suggest answer \u2192 Validate assumption | Behavior clear \u2192 Test creation
142
+
143
+ **Time Limit:** 5-10 minutes
144
+
145
+ ### Step {{STEP_NUMBER}}.5: Link Exploration to Clarification
146
+
147
+ **Flow:** Requirement Analysis \u2192 Exploration \u2192 Clarification
148
+
149
+ 1. Requirement analysis detects vague language \u2192 Triggers exploration
150
+ 2. Exploration documents current behavior \u2192 Identifies discrepancies
151
+ 3. Clarification uses findings \u2192 Asks specific questions referencing observations
152
+
153
+ **Example:**
154
+ \`\`\`
155
+ "Fix the sorting in todo list"
156
+ \u2193 Ambiguity: "sorting" = by date, priority, or completion status?
157
+ \u2193 Moderate Exploration: Admin=8 sort options, User=3 sort options
158
+ \u2193 Question: "Should basic users see all 8 sort options (bug) or only 3 with consistent sequence (correct)?"
159
+ \`\`\`
160
+
161
+ ### Step {{STEP_NUMBER}}.6: Document Exploration Results
162
+
163
+ **Template:**
164
+ \`\`\`markdown
165
+ ## Exploration Summary
166
+
167
+ **Date:** [YYYY-MM-DD] | **Explorer:** [Agent/User] | **Depth:** [Quick/Moderate/Deep] | **Duration:** [X min]
168
+
169
+ ### Feature: [Name and description]
170
+
171
+ ### Observations: [Key findings]
172
+
173
+ ### Current Behavior: [What feature does today]
174
+
175
+ ### Discrepancies: [Requirement vs observation differences]
176
+
177
+ ### Assumptions Made: [If proceeding with assumptions]
178
+
179
+ ### Artifacts: Screenshots: [list], Video: [if captured], Notes: [detailed]
180
+ \`\`\`
181
+
182
+ **Memory Storage:** Feature behavior patterns, common ambiguity types, resolution approaches
183
+
184
+ ### Step {{STEP_NUMBER}}.7: Integration with Test Creation
185
+
186
+ **Quick Exploration \u2192 Direct Test:**
187
+ - Feature verified \u2192 Create test matching requirement \u2192 Reference screenshot
188
+
189
+ **Moderate Exploration \u2192 Assumption-Based Test:**
190
+ - Document behavior \u2192 Create test on best interpretation \u2192 Mark assumptions \u2192 Plan updates after clarification
191
+
192
+ **Deep Exploration \u2192 Clarification-First:**
193
+ - Block test creation until clarification \u2192 Use exploration as basis for questions \u2192 Create test after answer \u2192 Reference both exploration and clarification
194
+
195
+ ---
196
+
197
+ ## Adaptive Exploration Decision Tree
198
+
199
+ \`\`\`
200
+ Start: Requirement Received
201
+ \u2193
202
+ Are requirements clear with specifics?
203
+ \u251C\u2500 YES \u2192 Quick Exploration (1-2 min)
204
+ \u2502 \u2193
205
+ \u2502 Does feature match description?
206
+ \u2502 \u251C\u2500 YES \u2192 Proceed to Test Creation
207
+ \u2502 \u2514\u2500 NO \u2192 Escalate to Moderate Exploration
208
+ \u2502
209
+ \u2514\u2500 NO \u2192 Is general direction clear but details missing?
210
+ \u251C\u2500 YES \u2192 Moderate Exploration (3-5 min)
211
+ \u2502 \u2193
212
+ \u2502 Are ambiguities MEDIUM severity or lower?
213
+ \u2502 \u251C\u2500 YES \u2192 Document assumptions, proceed with test creation
214
+ \u2502 \u2514\u2500 NO \u2192 Escalate to Deep Exploration or Clarification
215
+ \u2502
216
+ \u2514\u2500 NO \u2192 Deep Exploration (5-10 min)
217
+ \u2193
218
+ Document comprehensive findings
219
+ \u2193
220
+ Assess ambiguity severity
221
+ \u2193
222
+ Seek clarification for CRITICAL/HIGH
223
+ \`\`\`
224
+
225
+ ---
226
+
227
+ ## Remember:
228
+
229
+ \u{1F50D} **Explore before assuming** | \u{1F4CA} **Concrete observations > abstract interpretation** | \u23F1\uFE0F **Adaptive depth: time \u221D uncertainty** | \u{1F3AF} **Exploration findings \u2192 specific clarifications** | \u{1F4DD} **Always document** | \u{1F517} **Link exploration \u2192 ambiguity \u2192 clarification**
230
+ `;
231
+
232
+ // src/tasks/templates/knowledge-base.ts
233
+ var KNOWLEDGE_BASE_READ_INSTRUCTIONS = `
234
+ ## Knowledge Base Context
235
+
236
+ Before proceeding, read the curated knowledge base to inform your work:
237
+
238
+ **Location:** \`.bugzy/runtime/knowledge-base.md\`
239
+
240
+ **Purpose:** The knowledge base is a living collection of factual knowledge - what we currently know and believe to be true about this project, its patterns, and its context. This is NOT a historical log, but a curated snapshot that evolves as understanding improves.
241
+
242
+ **How to Use:**
243
+ 1. Read the knowledge base to understand:
244
+ - Project-specific patterns and conventions
245
+ - Known behaviors and system characteristics
246
+ - Relevant context from past work
247
+ - Documented decisions and approaches
248
+
249
+ 2. Apply this knowledge to:
250
+ - Make informed decisions aligned with project patterns
251
+ - Avoid repeating past mistakes
252
+ - Build on existing understanding
253
+ - Maintain consistency with established practices
254
+
255
+ **Note:** The knowledge base may not exist yet or may be empty. If it doesn't exist or is empty, proceed without this context and help build it as you work.
256
+ `;
257
+ var KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS = `
258
+ ## Knowledge Base Maintenance
259
+
260
+ After completing your work, update the knowledge base with new insights.
261
+
262
+ **Location:** \`.bugzy/runtime/knowledge-base.md\`
263
+
264
+ **Process:**
265
+
266
+ 1. **Read the maintenance guide** at \`.bugzy/runtime/knowledge-maintenance-guide.md\` to understand when to ADD, UPDATE, or REMOVE entries and how to maintain a curated knowledge base (not an append-only log)
267
+
268
+ 2. **Review the current knowledge base** to check for overlaps, contradictions, or opportunities to consolidate existing knowledge
269
+
270
+ 3. **Update the knowledge base** following the maintenance guide principles: favor consolidation over addition, update rather than append, resolve contradictions immediately, and focus on quality over completeness
271
+
272
+ **Remember:** Every entry should answer "Will this help someone working on this project in 6 months?"
273
+ `;
274
+
275
+ // src/tasks/library/explore-application.ts
276
+ var exploreApplicationTask = {
277
+ slug: TASK_SLUGS.EXPLORE_APPLICATION,
278
+ name: "Explore Application",
279
+ description: "Systematically explore application to discover UI elements, workflows, and behaviors",
280
+ frontmatter: {
281
+ description: "Systematically explore application to discover UI elements, workflows, and behaviors",
282
+ "argument-hint": "--focus [area] --depth [shallow|deep] --system [system-name]"
283
+ },
284
+ baseContent: `# Explore Application Command
285
+
286
+ ## SECURITY NOTICE
287
+ **CRITICAL**: Never read the \`.env\` file. It contains ONLY secrets (passwords, API keys).
288
+ - **Read \`.env.testdata\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)
289
+ - \`.env.testdata\` contains actual values for test data, URLs, and non-sensitive configuration
290
+ - For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime
291
+ - The \`.env\` file access is blocked by settings.json
292
+
293
+ Systematically explore the application using the test-runner agent to discover actual UI elements, workflows, and behaviors. Updates test plan and project documentation with findings.
294
+
295
+ ## Arguments
296
+ Arguments: $ARGUMENTS
297
+
298
+ ## Parse Arguments
299
+ Extract the following from arguments:
300
+ - **focus**: Specific area to explore (authentication, navigation, search, content, admin)
301
+ - **depth**: Exploration depth - shallow (quick discovery) or deep (comprehensive) - defaults to deep
302
+ - **system**: Which system to explore (optional for multi-system setups)
303
+
304
+ ${KNOWLEDGE_BASE_READ_INSTRUCTIONS}
305
+
306
+ ## Process
307
+
308
+ ### Step 0: Understand Exploration Protocol
309
+
310
+ This task implements the exploration protocol defined in the exploration-instructions template.
311
+
312
+ **Purpose**: This task provides the infrastructure for systematic application exploration that is referenced by other tasks (generate-test-plan, generate-test-cases, verify-changes) when they need to explore features before proceeding.
313
+
314
+ **Depth Alignment**: The depth levels in this task align with the exploration template:
315
+ - **Shallow exploration (15-20 min)** implements the quick/moderate exploration from the template
316
+ - **Deep exploration (45-60 min)** implements comprehensive deep exploration from the template
317
+
318
+ The depth levels are extended for full application exploration compared to the focused feature exploration used in other tasks.
319
+
320
+ **Full Exploration Protocol Reference**:
321
+
322
+ ${EXPLORATION_INSTRUCTIONS}
323
+
324
+ **Note**: This task extends the protocol for comprehensive application-wide exploration, while other tasks use abbreviated versions for targeted feature exploration.
325
+
326
+ ### Step 1: Load Environment and Context
327
+
328
+ #### 1.1 Check Environment Variables
329
+ Read \`.env.testdata\` file to understand what variables are required:
330
+ - TEST_BASE_URL or TEST_MOBILE_BASE_URL (base URL variable names)
331
+ - [SYSTEM_NAME]_URL (if multi-system setup)
332
+ - Authentication credential variable names for the selected system
333
+ - Any test data variable names
334
+
335
+ Note: The actual values will be read from the user's \`.env\` file at test execution time.
336
+ Verify \`.env.testdata\` exists to understand variable structure. If it doesn't exist, notify user to create it based on test plan.
337
+
338
+ #### 1.2 Read Current Test Plan
339
+ Read \`test-plan.md\` to:
340
+ - Identify sections marked with [TO BE EXPLORED]
341
+ - Find features requiring discovery
342
+ - Understand testing scope and priorities
343
+
344
+ #### 1.3 Read Project Context
345
+ Read \`.bugzy/runtime/project-context.md\` for:
346
+ - System architecture understanding
347
+ - Testing environment details
348
+ - QA workflow requirements
349
+
350
+ ### Step 2: Prepare Exploration Strategy
351
+
352
+ Based on the arguments and context, prepare exploration instructions.
353
+
354
+ #### 2.1 Focus Area Strategies
355
+
356
+ **If focus is "authentication":**
357
+ \`\`\`
358
+ 1. Navigate to the application homepage
359
+ 2. Locate and document all authentication entry points:
360
+ - Login button/link location and selector
361
+ - Registration option and flow
362
+ - Social login options (Facebook, Google, etc.)
363
+ 3. Test login flow:
364
+ - Document form fields and validation
365
+ - Test error states with invalid credentials
366
+ - Verify successful login indicators
367
+ 4. Test logout functionality:
368
+ - Find logout option
369
+ - Verify session termination
370
+ - Check redirect behavior
371
+ 5. Explore password recovery:
372
+ - Locate forgot password link
373
+ - Document recovery flow
374
+ - Note email/SMS options
375
+ 6. Check role-based access:
376
+ - Identify user role indicators
377
+ - Document permission differences
378
+ - Test admin/moderator access if available
379
+ 7. Test session persistence:
380
+ - Check remember me functionality
381
+ - Test timeout behavior
382
+ - Verify multi-tab session handling
383
+ \`\`\`
384
+
385
+ **If focus is "navigation":**
386
+ \`\`\`
387
+ 1. Document main navigation structure:
388
+ - Primary menu items and hierarchy
389
+ - Mobile menu behavior
390
+ - Footer navigation links
391
+ 2. Map URL patterns:
392
+ - Category URL structure
393
+ - Parameter patterns
394
+ - Deep linking support
395
+ 3. Test breadcrumb navigation:
396
+ - Availability on different pages
397
+ - Clickability and accuracy
398
+ - Mobile display
399
+ 4. Explore category system:
400
+ - Main categories and subcategories
401
+ - Navigation between levels
402
+ - Content organization
403
+ 5. Document special sections:
404
+ - User profiles
405
+ - Admin areas
406
+ - Help/Support sections
407
+ 6. Test browser navigation:
408
+ - Back/forward button behavior
409
+ - History management
410
+ - State preservation
411
+ \`\`\`
412
+
413
+ **If focus is "search":**
414
+ \`\`\`
415
+ 1. Locate search interfaces:
416
+ - Main search bar
417
+ - Advanced search options
418
+ - Category-specific search
419
+ 2. Document search features:
420
+ - Autocomplete/suggestions
421
+ - Search filters
422
+ - Sort options
423
+ 3. Test search functionality:
424
+ - Special character handling
425
+ - Empty/invalid queries
426
+ 4. Analyze search results:
427
+ - Result format and layout
428
+ - Pagination
429
+ - No results handling
430
+ 5. Check search performance:
431
+ - Response times
432
+ - Result relevance
433
+ - Load more/infinite scroll
434
+ \`\`\`
435
+
436
+ **If no focus specified:**
437
+ Use comprehensive exploration covering all major areas.
438
+
439
+ #### 2.2 Depth Configuration
440
+
441
+ **Implementation Note**: These depths implement the exploration protocol defined in exploration-instructions.ts, extended for full application exploration.
442
+
443
+ **Shallow exploration (--depth shallow):**
444
+ - Quick discovery pass (15-20 minutes)
445
+ - Focus on main features only
446
+ - Basic screenshot capture
447
+ - High-level findings
448
+ - *Aligns with Quick/Moderate exploration from template*
449
+
450
+ **Deep exploration (--depth deep or default):**
451
+ - Comprehensive exploration (45-60 minutes)
452
+ - Test edge cases and variations
453
+ - Extensive screenshot documentation
454
+ - Detailed technical findings
455
+ - Performance observations
456
+ - Accessibility notes
457
+ - *Aligns with Deep exploration from template*
458
+
459
+ ### Step 3: Execute Exploration
460
+
461
+ #### 3.1 Create Exploration Test Case
462
+ Generate a temporary exploration test case file at \`./test-cases/EXPLORATION-TEMP.md\`:
463
+
464
+ \`\`\`markdown
465
+ ---
466
+ id: EXPLORATION-TEMP
467
+ title: Application Exploration - [Focus Area or Comprehensive]
468
+ type: exploratory
469
+ priority: high
470
+ ---
471
+
472
+ ## Preconditions
473
+ - Browser with cleared cookies and cache
474
+ - Access to [system] environment
475
+ - Credentials configured per .env.testdata template
476
+
477
+ ## Test Steps
478
+ [Generated exploration steps based on strategy]
479
+
480
+ ## Expected Results
481
+ Document all findings including:
482
+ - UI element locations and selectors
483
+ - Navigation patterns and URLs
484
+ - Feature behaviors and workflows
485
+ - Performance observations
486
+ - Error states and edge cases
487
+ - Screenshots of all key areas
488
+ \`\`\`
489
+
490
+ #### 3.2 Launch Test Runner Agent
491
+ Invoke the test-runner agent with special exploration instructions:
492
+
493
+ \`\`\`
494
+ Execute the exploration test case at ./test-cases/EXPLORATION-TEMP.md with focus on discovery and documentation.
495
+
496
+ Special instructions for exploration mode:
497
+ 1. Take screenshots of EVERY significant UI element and page
498
+ 2. Document all clickable elements with their selectors
499
+ 3. Note all URL patterns and parameters
500
+ 4. Test variations and edge cases where possible
501
+ 5. Document load times and performance observations
502
+ 6. Create detailed findings report with structured data
503
+ 7. Organize screenshots by functional area
504
+ 8. Note any console errors or warnings
505
+ 9. Document which features are accessible vs restricted
506
+
507
+ Generate a comprehensive exploration report that can be used to update project documentation.
508
+ \`\`\`
509
+
510
+ ### Step 4: Process Exploration Results
511
+
512
+ #### 4.1 Read Test Runner Output
513
+ Read the generated test run files from \`./test-runs/[timestamp]/EXPLORATION-TEMP/\`:
514
+ - \`findings.md\` - Main findings document
515
+ - \`test-log.md\` - Detailed step execution
516
+ - \`screenshots/\` - Visual documentation
517
+ - \`summary.json\` - Execution summary
518
+
519
+ #### 4.2 Parse and Structure Findings
520
+ Extract and organize:
521
+ - Discovered features and capabilities
522
+ - UI element selectors and patterns
523
+ - Navigation structure and URLs
524
+ - Authentication flow details
525
+ - Performance metrics
526
+ - Technical observations
527
+ - Areas requiring further investigation
528
+
529
+ ### Step 5: Update Project Artifacts
530
+
531
+ #### 5.1 Update Test Plan
532
+ Read and update \`test-plan.md\`:
533
+ - Replace [TO BE EXPLORED] markers with concrete findings
534
+ - Add newly discovered features to test items
535
+ - Update navigation patterns and URL structures
536
+ - Document actual authentication methods
537
+ - Update environment variables if new ones discovered
538
+ - Refine pass/fail criteria based on actual behavior
539
+
540
+ #### 5.2 Create Exploration Report
541
+ Create \`./exploration-reports/[timestamp]-[focus]-exploration.md\`
542
+
543
+ ### Step 6: Cleanup
544
+
545
+ #### 6.1 Remove Temporary Files
546
+ Delete the temporary exploration test case:
547
+ \`\`\`bash
548
+ rm ./test-cases/EXPLORATION-TEMP.md
549
+ \`\`\`
550
+
551
+ ### Step 7: Generate Summary Report
552
+ Create a concise summary for the user
553
+
554
+ ## Error Handling
555
+
556
+ ### Environment Issues
557
+ - If \`.env.testdata\` missing: Warn user and suggest creating it from test plan
558
+ - If credentials invalid (at runtime): Document in report and continue with public areas
559
+ - If system unreachable: Retry with exponential backoff, report if persistent
560
+
561
+ ### Exploration Failures
562
+ - If test-runner fails: Capture partial results and report
563
+ - If specific area inaccessible: Note in findings and continue
564
+ - If browser crashes: Attempt recovery and resume
565
+ - If test-runner stops, but does not create files, inspect what it did and if it was not enough remove the test-run and start the test-runner agent again. If it has enough info, continue with what you have.
566
+
567
+ ### Data Issues
568
+ - If dynamic content prevents exploration: Note and try alternative approaches
569
+ - If rate limited: Implement delays and retry
570
+
571
+ ## Integration with Other Commands
572
+
573
+ ### Feeds into /generate-test-cases
574
+ - Provides actual UI elements for test steps
575
+ - Documents real workflows for test scenarios
576
+ - Identifies edge cases to test
577
+
578
+ ### Updates from /process-event
579
+ - New exploration findings can be processed as events
580
+ - Discovered bugs trigger issue creation
581
+ - Feature discoveries update test coverage
582
+
583
+ ### Enhances /run-tests
584
+ - Tests use discovered selectors
585
+ - Validation based on actual behavior
586
+ - More reliable test execution
587
+
588
+ ${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}`,
589
+ optionalSubagents: [],
590
+ requiredSubagents: ["test-runner"]
591
+ };
592
+
593
+ // src/tasks/templates/clarification-instructions.ts
594
+ var CLARIFICATION_INSTRUCTIONS = `
595
+ ## Clarification Protocol
596
+
597
+ Before proceeding with test creation or execution, ensure requirements are clear and testable. Use this protocol to detect ambiguity, assess its severity, and determine the appropriate action.
598
+
599
+ ### Step {{STEP_NUMBER}}.0: Check for Pending Clarification
600
+
601
+ Before starting, check if this task is resuming from a blocked clarification:
602
+
603
+ 1. **Check $ARGUMENTS for clarification data:**
604
+ - If \`$ARGUMENTS.clarification\` exists, this task is resuming with a clarification response
605
+ - Extract: \`clarification\` (the user's answer), \`originalArgs\` (original task parameters)
606
+
607
+ 2. **If clarification is present:**
608
+ - Read \`.bugzy/runtime/blocked-task-queue.md\`
609
+ - Find and remove your task's entry from the queue (update the file)
610
+ - Proceed using the clarification as if user just provided the answer
611
+ - Skip ambiguity detection for the clarified aspect
612
+
613
+ 3. **If no clarification in $ARGUMENTS:** Proceed normally with ambiguity detection below.
614
+
615
+ ### Step {{STEP_NUMBER}}.1: Detect Ambiguity
616
+
617
+ Scan for ambiguity signals:
618
+
619
+ **Language:** Vague terms ("fix", "improve", "better", "like", "mixed up"), relative terms without reference ("faster", "more"), undefined scope ("the ordering", "the fields", "the page"), modal ambiguity ("should", "could" vs "must", "will")
620
+
621
+ **Details:** Missing acceptance criteria (no clear PASS/FAIL), no examples/mockups, incomplete field/element lists, unclear role behavior differences, unspecified error scenarios
622
+
623
+ **Interpretation:** Multiple valid interpretations, contradictory information (description vs comments), implied vs explicit requirements
624
+
625
+ **Context:** No reference documentation, "RELEASE APPROVED" without criteria, quick ticket creation, assumes knowledge ("as you know...", "obviously...")
626
+
627
+ **Quick Check:**
628
+ - [ ] Success criteria explicitly defined? (PASS if X, FAIL if Y)
629
+ - [ ] All affected elements specifically listed? (field names, URLs, roles)
630
+ - [ ] Only ONE reasonable interpretation?
631
+ - [ ] Examples, screenshots, or mockups provided?
632
+ - [ ] Consistent with existing system patterns?
633
+ - [ ] Can write test assertions without assumptions?
634
+
635
+ ### Step {{STEP_NUMBER}}.2: Assess Severity
636
+
637
+ If ambiguity is detected, assess its severity:
638
+
639
+ | Severity | Characteristics | Examples | Action |
640
+ |----------|----------------|----------|--------|
641
+ | \u{1F534} **CRITICAL** | Expected behavior undefined/contradictory; test outcome unpredictable; core functionality unclear; success criteria missing; multiple interpretations = different strategies | "Fix the issue" (what issue?), "Improve performance" (which metrics?), "Fix sorting in todo list" (by date? priority? completion status?) | **STOP** - Seek clarification before proceeding |
642
+ | \u{1F7E0} **HIGH** | Core underspecified but direction clear; affects majority of scenarios; vague success criteria; assumptions risky | "Fix ordering" (sequence OR visibility?), "Add validation" (what? messages?), "Update dashboard" (which widgets?) | **STOP** - Seek clarification before proceeding |
643
+ | \u{1F7E1} **MEDIUM** | Specific details missing; general requirements clear; affects subset of cases; reasonable low-risk assumptions possible; wrong assumption = test updates not strategy overhaul | Missing field labels, unclear error message text, undefined timeouts, button placement not specified, date formats unclear | **PROCEED** - (1) Moderate exploration, (2) Document assumptions: "Assuming X because Y", (3) Proceed with creation/execution, (4) Async clarification (team-communicator), (5) Mark [ASSUMED: description] |
644
+ | \u{1F7E2} **LOW** | Minor edge cases; documentation gaps don't affect execution; optional/cosmetic elements; minimal impact | Tooltip text, optional field validation, icon choice, placeholder text, tab order | **PROCEED** - (1) Mark [TO BE CLARIFIED: description], (2) Proceed, (3) Mention in report "Minor Details", (4) No blocking/async clarification |
645
+
646
+ ### Step {{STEP_NUMBER}}.3: Check Memory for Similar Clarifications
647
+
648
+ Before asking, check if similar question was answered:
649
+
650
+ **Process:**
651
+ 1. **Query team-communicator memory** - Search by feature name, ambiguity pattern, ticket keywords
652
+ 2. **Review past Q&A** - Similar question asked? What was answer? Applicable now?
653
+ 3. **Assess reusability:**
654
+ - Directly applicable \u2192 Use answer, no re-ask
655
+ - Partially applicable \u2192 Adapt and reference ("Previously for X, clarified Y. Same here?")
656
+ - Not applicable \u2192 Ask as new
657
+ 4. **Update memory** - Store Q&A with task type, feature, pattern tags
658
+
659
+ **Example:** Query "todo sorting priority" \u2192 Found 2025-01-15: "Should completed todos appear in main list?" \u2192 Answer: "No, move to separate archive view" \u2192 Directly applicable \u2192 Use, no re-ask needed
660
+
661
+ ### Step {{STEP_NUMBER}}.4: Formulate Clarification Questions
662
+
663
+ If clarification needed (CRITICAL/HIGH severity), formulate specific, concrete questions:
664
+
665
+ **Good Questions:** Specific and concrete, provide context, offer options, reference examples, tie to test strategy
666
+
667
+ **Bad Questions:** Too vague/broad, assumptive, multiple questions in one, no context
668
+
669
+ **Template:**
670
+ \`\`\`
671
+ **Context:** [Current understanding]
672
+ **Ambiguity:** [Specific unclear aspect]
673
+ **Question:** [Specific question with options]
674
+ **Why Important:** [Testing strategy impact]
675
+
676
+ Example:
677
+ Context: TODO-456 "Fix the sorting in the todo list so items appear in the right order"
678
+ Ambiguity: "sorting" = (A) by creation date, (B) by due date, (C) by priority level, or (D) custom user-defined order
679
+ Question: Should todos be sorted by due date (soonest first) or priority (high to low)? Should completed items appear in the list or move to archive?
680
+ Why Important: Different sort criteria require different test assertions. Current app shows 15 active todos + 8 completed in mixed order.
681
+ \`\`\`
682
+
683
+ ### Step {{STEP_NUMBER}}.5: Communicate Clarification Request
684
+
685
+ **For Slack-Triggered Tasks:** Use team-communicator subagent:
686
+ \`\`\`
687
+ Ask clarification in Slack thread:
688
+ Context: [From ticket/description]
689
+ Ambiguity: [Describe ambiguity]
690
+ Severity: [CRITICAL/HIGH]
691
+ Questions:
692
+ 1. [First specific question]
693
+ 2. [Second if needed]
694
+
695
+ Clarification needed to proceed. I'll wait for response before testing.
696
+ \`\`\`
697
+
698
+ **For Manual/API Triggers:** Include in task output:
699
+ \`\`\`markdown
700
+ ## \u26A0\uFE0F Clarification Required Before Testing
701
+
702
+ **Ambiguity:** [Description]
703
+ **Severity:** [CRITICAL/HIGH]
704
+
705
+ ### Questions:
706
+ 1. **Question:** [First question]
707
+ - Context: [Provide context]
708
+ - Options: [If applicable]
709
+ - Impact: [Testing impact]
710
+
711
+ **Action Required:** Provide clarification. Testing cannot proceed.
712
+ **Current Observation:** [What exploration revealed - concrete examples]
713
+ \`\`\`
714
+
715
+ ### Step {{STEP_NUMBER}}.5.1: Register Blocked Task (CRITICAL/HIGH only)
716
+
717
+ When asking a CRITICAL or HIGH severity question that blocks progress, register the task in the blocked queue so it can be automatically re-triggered when clarification arrives.
718
+
719
+ **Update \`.bugzy/runtime/blocked-task-queue.md\`:**
720
+
721
+ 1. Read the current file (create if doesn't exist)
722
+ 2. Add a new row to the Queue table
723
+
724
+ \`\`\`markdown
725
+ # Blocked Task Queue
726
+
727
+ Tasks waiting for clarification responses.
728
+
729
+ | Task Slug | Question | Original Args |
730
+ |-----------|----------|---------------|
731
+ | generate-test-plan | Should todos be sorted by date or priority? | \`{"ticketId": "TODO-456"}\` |
732
+ \`\`\`
733
+
734
+ **Entry Fields:**
735
+ - **Task Slug**: The task slug (e.g., \`generate-test-plan\`) - used for re-triggering
736
+ - **Question**: The clarification question asked (so LLM can match responses)
737
+ - **Original Args**: JSON-serialized \`$ARGUMENTS\` wrapped in backticks
738
+
739
+ **Purpose**: The LLM processor reads this file and matches user responses to pending questions. When a match is found, it re-queues the task with the clarification.
740
+
741
+ ### Step {{STEP_NUMBER}}.6: Wait or Proceed Based on Severity
742
+
743
+ **CRITICAL/HIGH \u2192 STOP and Wait:**
744
+ - Do NOT create tests, run tests, or make assumptions
745
+ - Wait for clarification, resume after answer
746
+ - *Rationale: Wrong assumptions = incorrect tests, false results, wasted time*
747
+
748
+ **MEDIUM \u2192 Proceed with Documented Assumptions:**
749
+ - Perform moderate exploration, document assumptions, proceed with creation/execution
750
+ - Ask clarification async (team-communicator), mark results "based on assumptions"
751
+ - Update tests after clarification received
752
+ - *Rationale: Waiting blocks progress; documented assumptions allow forward movement with later corrections*
753
+
754
+ **LOW \u2192 Proceed and Mark:**
755
+ - Proceed with creation/execution, mark gaps [TO BE CLARIFIED] or [ASSUMED]
756
+ - Mention in report but don't prioritize, no blocking
757
+ - *Rationale: Details don't affect strategy/results significantly*
758
+
759
+ ### Step {{STEP_NUMBER}}.7: Document Clarification in Results
760
+
761
+ When reporting test results, always include an "Ambiguities" section if clarification occurred:
762
+
763
+ \`\`\`markdown
764
+ ## Ambiguities Encountered
765
+
766
+ ### Clarification: [Topic]
767
+ - **Severity:** [CRITICAL/HIGH/MEDIUM/LOW]
768
+ - **Question Asked:** [What was asked]
769
+ - **Response:** [Answer received, or "Awaiting response"]
770
+ - **Impact:** [How this affected testing]
771
+ - **Assumption Made:** [If proceeded with assumption]
772
+ - **Risk:** [What could be wrong if assumption is incorrect]
773
+
774
+ ### Resolution:
775
+ [How the clarification was resolved and incorporated into testing]
776
+ \`\`\`
777
+
778
+ ---
779
+
780
+ ## Remember:
781
+
782
+ \u{1F6D1} **Block for CRITICAL/HIGH** | \u2705 **Ask correctly > guess poorly** | \u{1F4DD} **Document MEDIUM assumptions** | \u{1F50D} **Check memory first** | \u{1F3AF} **Specific questions \u2192 specific answers**
783
+ `;
784
+
785
+ // src/tasks/library/generate-test-cases.ts
786
+ var generateTestCasesTask = {
787
+ slug: TASK_SLUGS.GENERATE_TEST_CASES,
788
+ name: "Generate Test Cases",
789
+ description: "Generate manual test case documentation AND automated Playwright test scripts from test plan",
790
+ frontmatter: {
791
+ description: "Generate manual test case documentation AND automated Playwright test scripts from test plan",
792
+ "argument-hint": "--type [exploratory|functional|regression|smoke] --focus [optional-feature]"
793
+ },
794
+ baseContent: `# Generate Test Cases Command
795
+
796
+ ## SECURITY NOTICE
797
+ **CRITICAL**: Never read the \`.env\` file. It contains ONLY secrets (passwords, API keys).
798
+ - **Read \`.env.testdata\`** for non-secret test data (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)
799
+ - \`.env.testdata\` contains actual values for test data, URLs, and non-sensitive configuration
800
+ - For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime
801
+ - The \`.env\` file access is blocked by settings.json
802
+
803
+ Generate comprehensive test artifacts including BOTH manual test case documentation AND automated Playwright test scripts.
804
+
805
+ ## Overview
806
+
807
+ This command generates:
808
+ 1. **Manual Test Case Documentation** (in \`./test-cases/\`) - Human-readable test cases in markdown format
809
+ 2. **Automated Playwright Tests** (in \`./tests/specs/\`) - Executable TypeScript test scripts
810
+ 3. **Page Object Models** (in \`./tests/pages/\`) - Reusable page classes for automated tests
811
+ 4. **Supporting Files** (fixtures, helpers, components) - As needed for test automation
812
+
813
+ ## Arguments
814
+ Arguments: $ARGUMENTS
815
+
816
+ ## Parse Arguments
817
+ Extract the following from arguments:
818
+ - **type**: Test type (exploratory, functional, regression, smoke) - defaults to functional
819
+ - **focus**: Optional specific feature or section to focus on
820
+
821
+ ${KNOWLEDGE_BASE_READ_INSTRUCTIONS}
822
+
823
+ ## Process
824
+
825
+ ### Step 1: Gather Context
826
+
827
+ #### 1.1 Read Test Plan
828
+ Read the test plan from \`test-plan.md\` to understand:
829
+ - Test items and features
830
+ - Testing approach and automation strategy
831
+ - Test Automation Strategy section (automated vs exploratory)
832
+ - Pass/fail criteria
833
+ - Test environment and data requirements
834
+ - Automation decision criteria
835
+
836
+ #### 1.2 Check Existing Test Cases and Tests
837
+ - List all files in \`./test-cases/\` to understand existing manual test coverage
838
+ - List all files in \`./tests/specs/\` to understand existing automated tests
839
+ - Determine next test case ID (TC-XXX format)
840
+ - Identify existing Page Objects in \`./tests/pages/\`
841
+ - Avoid creating overlapping test cases or duplicate automation
842
+
843
+ {{DOCUMENTATION_RESEARCHER_INSTRUCTIONS}}
844
+
845
+ ### Step 1.4: Explore Features (If Needed)
846
+
847
+ If documentation is insufficient or ambiguous, perform adaptive exploration to understand actual feature behavior before creating test cases.
848
+
849
+ ${EXPLORATION_INSTRUCTIONS.replace(/{{STEP_NUMBER}}/g, "1.4")}
850
+
851
+ ### Step 1.5: Clarify Ambiguities
852
+
853
+ If exploration or documentation review reveals ambiguous requirements, use the clarification protocol to resolve them before generating test cases.
854
+
855
+ ${CLARIFICATION_INSTRUCTIONS.replace(/{{STEP_NUMBER}}/g, "1.5")}
856
+
857
+ **Important Notes:**
858
+ - **CRITICAL/HIGH ambiguities:** STOP test case generation and seek clarification
859
+ - **MEDIUM ambiguities:** Document assumptions explicitly in test case with [ASSUMED: reason]
860
+ - **LOW ambiguities:** Mark with [TO BE CLARIFIED: detail] in test case notes section
861
+
862
+ ### Step 1.6: Organize Test Scenarios by Area
863
+
864
+ Based on exploration and documentation, organize test scenarios by feature area/component:
865
+
866
+ **Group scenarios into areas** (e.g., Authentication, Dashboard, Checkout, Profile Management):
867
+ - Each area should be a logical feature grouping
868
+ - Areas should be relatively independent for parallel test execution
869
+ - Consider the application's navigation structure and user flows
870
+
871
+ **For each area, identify scenarios**:
872
+
873
+ 1. **Critical User Paths** (must automate as smoke tests):
874
+ - Login/authentication flows
875
+ - Core feature workflows
876
+ - Data creation/modification flows
877
+ - Critical business transactions
878
+
879
+ 2. **Happy Path Scenarios** (automate for regression):
880
+ - Standard user workflows
881
+ - Common use cases
882
+ - Typical data entry patterns
883
+
884
+ 3. **Error Handling Scenarios** (evaluate automation ROI):
885
+ - Validation error messages
886
+ - Network error handling
887
+ - Permission/authorization errors
888
+
889
+ 4. **Edge Cases** (consider manual testing):
890
+ - Rare scenarios (<1% occurrence)
891
+ - Complex exploratory scenarios
892
+ - Visual/UX validation requiring judgment
893
+ - Features in heavy flux
894
+
895
+ **Output**: Test scenarios organized by area with automation decisions for each
896
+
897
+ Example structure:
898
+ - **Authentication**: TC-001 Valid login (smoke, automate), TC-002 Invalid password (automate), TC-003 Password reset (automate)
899
+ - **Dashboard**: TC-004 View dashboard widgets (smoke, automate), TC-005 Filter data by date (automate), TC-006 Export data (manual - rare use)
900
+
901
+ ### Step 1.7: Generate All Manual Test Case Files
902
+
903
+ Generate ALL manual test case markdown files in the \`./test-cases/\` directory BEFORE invoking the test-code-generator agent.
904
+
905
+ **For each test scenario from Step 1.6:**
906
+
907
+ 1. **Create test case file** in \`./test-cases/\` with format \`TC-XXX-feature-description.md\`
908
+ 2. **Include frontmatter** with:
909
+ - \`id:\` TC-XXX (sequential ID)
910
+ - \`title:\` Clear, descriptive title
911
+ - \`automated:\` true/false (based on automation decision from Step 1.6)
912
+ - \`automated_test:\` (leave empty - will be filled by subagent when automated)
913
+ - \`type:\` exploratory/functional/regression/smoke
914
+ - \`area:\` Feature area/component
915
+ 3. **Write test case content**:
916
+ - **Objective**: Clear description of what is being tested
917
+ - **Preconditions**: Setup requirements, test data needed
918
+ - **Test Steps**: Numbered, human-readable steps
919
+ - **Expected Results**: What should happen at each step
920
+ - **Test Data**: Environment variables to use (e.g., \${TEST_BASE_URL}, \${TEST_OWNER_EMAIL})
921
+ - **Notes**: Any assumptions, clarifications needed, or special considerations
922
+
923
+ **Output**: All manual test case markdown files created in \`./test-cases/\` with automation flags set
924
+
925
+ ### Step 2: Automate Test Cases Area by Area
926
+
927
+ **IMPORTANT**: Process each feature area separately to enable incremental, focused test creation.
928
+
929
+ **For each area from Step 1.6**, invoke the test-code-generator agent:
930
+
931
+ #### Step 2.1: Prepare Area Context
932
+
933
+ Before invoking the agent, identify the test cases for the current area:
934
+ - Current area name
935
+ - Test case files for this area (e.g., TC-001-valid-login.md, TC-002-invalid-password.md)
936
+ - Which test cases are marked for automation (automated: true)
937
+ - Test type: {type}
938
+ - Test plan reference: test-plan.md
939
+ - Existing automated tests in ./tests/specs/
940
+ - Existing Page Objects in ./tests/pages/
941
+
942
+ #### Step 2.2: Invoke test-code-generator Agent
943
+
944
+ Use the test-code-generator agent for the current area with the following context:
945
+
946
+ **Agent Invocation:**
947
+ "Use the test-code-generator agent to automate test cases for the [AREA_NAME] area.
948
+
949
+ **Context:**
950
+ - Area: [AREA_NAME]
951
+ - Manual test case files to automate: [list TC-XXX files marked with automated: true]
952
+ - Test type: {type}
953
+ - Test plan: test-plan.md
954
+ - Manual test cases directory: ./test-cases/
955
+ - Existing automated tests: ./tests/specs/
956
+ - Existing Page Objects: ./tests/pages/
957
+
958
+ **The agent should:**
959
+ 1. Read the manual test case files for this area
960
+ 2. Check existing Page Object infrastructure for this area
961
+ 3. Explore the feature area to understand implementation (gather selectors, URLs, flows)
962
+ 4. Build missing Page Objects and supporting code
963
+ 5. For each test case marked \`automated: true\`:
964
+ - Create automated Playwright test in ./tests/specs/
965
+ - Update the manual test case file to reference the automated test path
966
+ 6. Run and iterate on each test until it passes or fails with a product bug
967
+ 8. Update .env.testdata with any new variables
968
+
969
+ **Focus only on the [AREA_NAME] area** - do not automate tests for other areas yet."
970
+
971
+ #### Step 2.3: Verify Area Completion
972
+
973
+ After the agent completes the area, verify:
974
+ - Manual test case files updated with automated_test references
975
+ - Automated tests created for all test cases marked automated: true
976
+ - Tests are passing (or failing with documented product bugs)
977
+ - Page Objects created/updated for the area
978
+
979
+ #### Step 2.4: Repeat for Next Area
980
+
981
+ Move to the next area and repeat Steps 2.1-2.3 until all areas are complete.
982
+
983
+ **Benefits of area-by-area approach**:
984
+ - Agent focuses on one feature at a time
985
+ - POMs built incrementally as needed
986
+ - Tests verified before moving to next area
987
+ - Easier to manage and track progress
988
+ - Can pause/resume between areas if needed
989
+
990
+ ### Step 2.5: Validate Generated Artifacts
991
+
992
+ After the test-code-generator completes, verify:
993
+
994
+ 1. **Manual Test Cases (in \`./test-cases/\`)**:
995
+ - Each has unique TC-XXX ID
996
+ - Frontmatter includes \`automated: true/false\` flag
997
+ - If automated, includes \`automated_test\` path reference
998
+ - Contains human-readable steps and expected results
999
+ - References environment variables for test data
1000
+
1001
+ 2. **Automated Tests (in \`./tests/specs/\`)**:
1002
+ - Organized by feature in subdirectories
1003
+ - Each test file references manual test case ID in comments
1004
+ - Uses Page Object Model pattern
1005
+ - Follows role-based selector priority
1006
+ - Uses environment variables for test data
1007
+ - Includes proper TypeScript typing
1008
+
1009
+ 3. **Page Objects (in \`./tests/pages/\`)**:
1010
+ - Extend BasePage class
1011
+ - Use semantic selectors (getByRole, getByLabel, getByText)
1012
+ - Contain only actions, no assertions
1013
+ - Properly typed with TypeScript
1014
+
1015
+ 4. **Supporting Files**:
1016
+ - Fixtures created for common setup (in \`./tests/fixtures/\`)
1017
+ - Helper functions for data generation (in \`./tests/helpers/\`)
1018
+ - Component objects for reusable UI elements (in \`./tests/components/\`)
1019
+ - Types defined as needed (in \`./tests/types/\`)
1020
+
1021
+ ### Step 3: Create Directories if Needed
1022
+
1023
+ Ensure required directories exist:
1024
+ \`\`\`bash
1025
+ mkdir -p ./test-cases
1026
+ mkdir -p ./tests/specs
1027
+ mkdir -p ./tests/pages
1028
+ mkdir -p ./tests/components
1029
+ mkdir -p ./tests/fixtures
1030
+ mkdir -p ./tests/helpers
1031
+ \`\`\`
1032
+
1033
+ ### Step 4: Update .env.testdata (if needed)
1034
+
1035
+ If new environment variables were introduced:
1036
+ - Read current \`.env.testdata\`
1037
+ - Add new TEST_* variables with empty values
1038
+ - Group variables logically with comments
1039
+ - Document what each variable is for
1040
+
1041
+ ${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}
1042
+
1043
+ {{TEAM_COMMUNICATOR_INSTRUCTIONS}}
1044
+
1045
+ ### Step 5: Final Summary
1046
+
1047
+ Provide a comprehensive summary showing:
1048
+
1049
+ **Manual Test Cases:**
1050
+ - Number of manual test cases created
1051
+ - List of test case files with IDs and titles
1052
+ - Automation status for each (automated: yes/no)
1053
+
1054
+ **Automated Tests:**
1055
+ - Number of automated test scripts created
1056
+ - List of spec files with test counts
1057
+ - Page Objects created or updated
1058
+ - Fixtures and helpers added
1059
+
1060
+ **Test Coverage:**
1061
+ - Features covered by manual tests
1062
+ - Features covered by automated tests
1063
+ - Areas kept manual-only (and why)
1064
+
1065
+ **Next Steps:**
1066
+ - Command to run automated tests: \`npx playwright test\`
1067
+ - Instructions to run specific test file
1068
+ - Note about copying .env.testdata to .env
1069
+ - Mention any exploration needed for edge cases
1070
+
1071
+ ### Important Notes
1072
+
1073
+ - **Both Manual AND Automated**: Generate both artifacts - they serve different purposes
1074
+ - **Manual Test Cases**: Documentation, reference, can be executed manually when needed
1075
+ - **Automated Tests**: Fast, repeatable, for CI/CD and regression testing
1076
+ - **Automation Decision**: Not all test cases need automation - rare edge cases can stay manual
1077
+ - **Linking**: Manual test cases reference automated tests; automated tests reference manual test case IDs
1078
+ - **Two-Phase Workflow**: First generate all manual test cases (Step 1.7), then automate area-by-area (Step 2)
1079
+ - **Ambiguity Handling**: Use exploration (Step 1.4) and clarification (Step 1.5) protocols before generating
1080
+ - **Environment Variables**: Use \`process.env.VAR_NAME\` in tests, update .env.testdata as needed
1081
+ - **Test Independence**: Each test must be runnable in isolation and in parallel`,
1082
+ optionalSubagents: [
1083
+ {
1084
+ role: "documentation-researcher",
1085
+ contentBlock: `#### 1.4 Gather Product Documentation
1086
+
1087
+ Use the documentation-researcher agent to gather comprehensive product documentation:
1088
+
1089
+ \`\`\`
1090
+ Use the documentation-researcher agent to explore all available product documentation, specifically focusing on:
1091
+ - UI elements and workflows
1092
+ - User interactions and navigation paths
1093
+ - Form fields and validation rules
1094
+ - Error messages and edge cases
1095
+ - Authentication and authorization flows
1096
+ - Business rules and constraints
1097
+ - API endpoints for test data setup
1098
+ \`\`\``
1099
+ },
1100
+ {
1101
+ role: "team-communicator",
1102
+ contentBlock: `### Step 4.5: Team Communication
1103
+
1104
+ Use the team-communicator agent to notify the product team about the new test cases and automated tests:
1105
+
1106
+ \`\`\`
1107
+ Use the team-communicator agent to:
1108
+ 1. Post an update about test case and automation creation
1109
+ 2. Provide summary of coverage:
1110
+ - Number of manual test cases created
1111
+ - Number of automated tests created
1112
+ - Features covered by automation
1113
+ - Areas kept manual-only (and why)
1114
+ 3. Highlight key automated test scenarios
1115
+ 4. Share command to run automated tests: npx playwright test
1116
+ 5. Ask for team review and validation
1117
+ 6. Mention any areas needing exploration or clarification
1118
+ 7. Use appropriate channel and threading for the update
1119
+ \`\`\`
1120
+
1121
+ The team communication should include:
1122
+ - **Test artifacts created**: Manual test cases + automated tests count
1123
+ - **Automation coverage**: Which features are now automated
1124
+ - **Manual-only areas**: Why some tests are kept manual (rare scenarios, exploratory)
1125
+ - **Key automated scenarios**: Critical paths now covered by automation
1126
+ - **Running tests**: Command to execute automated tests
1127
+ - **Review request**: Ask team to validate scenarios and review test code
1128
+ - **Next steps**: Plans for CI/CD integration or additional test coverage
1129
+
1130
+ **Update team communicator memory:**
1131
+ - Record this communication
1132
+ - Note test case and automation creation
1133
+ - Track team feedback on automation approach
1134
+ - Document any clarifications requested`
1135
+ }
1136
+ ],
1137
+ requiredSubagents: ["test-runner", "test-code-generator"]
1138
+ };
1139
+
1140
+ // src/tasks/library/generate-test-plan.ts
1141
+ var generateTestPlanTask = {
1142
+ slug: TASK_SLUGS.GENERATE_TEST_PLAN,
1143
+ name: "Generate Test Plan",
1144
+ description: "Generate a comprehensive test plan from product description",
1145
+ frontmatter: {
1146
+ description: "Generate a comprehensive test plan from product description",
1147
+ "argument-hint": "<product-description>"
1148
+ },
1149
+ baseContent: `# Generate Test Plan Command
1150
+
1151
+ ## SECURITY NOTICE
1152
+ **CRITICAL**: Never read the \`.env\` file. It contains ONLY secrets (passwords, API keys).
1153
+ - **Read \`.env.testdata\`** for non-secret test data (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)
1154
+ - \`.env.testdata\` contains actual values for test data, URLs, and non-sensitive configuration
1155
+ - For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime
1156
+ - The \`.env\` file access is blocked by settings.json
1157
+
1158
+ Generate a comprehensive test plan from product description following the Brain Module specifications.
1159
+
1160
+ ## Arguments
1161
+ Product description: $ARGUMENTS
1162
+
1163
+ ${KNOWLEDGE_BASE_READ_INSTRUCTIONS}
1164
+
1165
+ ## Process
1166
+
1167
+ ### Step 1: Load project context
1168
+ Read \`.bugzy/runtime/project-context.md\` to understand:
1169
+ - Project overview and key platform features
1170
+ - SDLC methodology and sprint duration
1171
+ - Testing environment and goals
1172
+ - Technical stack and constraints
1173
+ - QA workflow and processes
1174
+
1175
+ ### Step 1.5: Process the product description
1176
+ Use the product description provided directly in the arguments, enriched with project context understanding.
1177
+
1178
+ ### Step 1.6: Initialize environment variables tracking
1179
+ Create a list to track all TEST_ prefixed environment variables discovered throughout the process.
1180
+
1181
+ {{DOCUMENTATION_RESEARCHER_INSTRUCTIONS}}
1182
+
1183
+ ### Step 1.7: Explore Product (If Needed)
1184
+
1185
+ If product description is vague or incomplete, perform adaptive exploration to understand actual product features and behavior.
1186
+
1187
+ ${EXPLORATION_INSTRUCTIONS.replace(/{{STEP_NUMBER}}/g, "1.7")}
1188
+
1189
+ ### Step 1.8: Clarify Ambiguities
1190
+
1191
+ If exploration or product description reveals ambiguous requirements, use the clarification protocol before generating the test plan.
1192
+
1193
+ ${CLARIFICATION_INSTRUCTIONS.replace(/{{STEP_NUMBER}}/g, "1.8")}
1194
+
1195
+ **Important Notes:**
1196
+ - **CRITICAL/HIGH ambiguities:** STOP test plan generation and seek clarification
1197
+ - Examples: Undefined core features, unclear product scope, contradictory requirements
1198
+ - **MEDIUM ambiguities:** Document assumptions in test plan with [ASSUMED: reason] and seek async clarification
1199
+ - Examples: Missing field lists, unclear validation rules, vague user roles
1200
+ - **LOW ambiguities:** Mark with [TO BE EXPLORED: detail] in test plan for future investigation
1201
+ - Examples: Optional features, cosmetic details, non-critical edge cases
1202
+
1203
+ ### Step 3: Prepare the test plan generation context
1204
+
1205
+ **After ensuring requirements are clear through exploration and clarification:**
1206
+
1207
+ Based on the gathered information:
1208
+ - **goal**: Extract the main purpose and objectives from all available documentation
1209
+ - **knowledge**: Combine product description with discovered documentation insights
1210
+ - **testPlan**: Use the standard test plan template structure, enriched with documentation findings
1211
+ - **gaps**: Identify areas lacking documentation that will need exploration
1212
+
1213
+ ### Step 4: Generate the test plan using the prompt template
1214
+
1215
+ You are an expert QA Test Plan Writer with expertise in both manual and automated testing strategies. Using the gathered information and context from the product description provided, you will now produce a comprehensive test plan in Markdown format that includes an automation strategy.
1216
+
1217
+ Writing Instructions:
1218
+ - **Use Product Terminology:** Incorporate exact terms and labels from the product description for features and UI elements (to ensure the test plan uses official naming).
1219
+ - **Testing Scope:** The plan covers both automated E2E testing via Playwright and exploratory manual testing. Focus on what a user can do and see in a browser.
1220
+ - **Test Data - IMPORTANT:**
1221
+ - DO NOT include test data values in the test plan body
1222
+ - Test data goes ONLY to the \`.env.testdata\` file
1223
+ - In the test plan, reference \`.env.testdata\` for test data requirements
1224
+ - Define test data as environment variables prefixed with TEST_ (e.g., TEST_BASE_URL, TEST_USER_EMAIL, TEST_USER_PASSWORD)
1225
+ - DO NOT GENERATE VALUES FOR THE ENV VARS, ONLY THE KEYS
1226
+ - Track all TEST_ variables for extraction to .env.testdata in Step 7
1227
+ - **DO NOT INCLUDE TEST SCENARIOS**
1228
+ - **Incorporate All Relevant Info:** If the product description mentions specific requirements, constraints, or acceptance criteria (such as field validations, role-based access rules, important parameters), make sure these are reflected in the test plan. Do not add anything not supported by the given information.
1229
+ - **Test Automation Strategy Section - REQUIRED:** Include a comprehensive "Test Automation Strategy" section with the following subsections:
1230
+
1231
+ **## Test Automation Strategy**
1232
+
1233
+ ### Automated Test Coverage
1234
+ - Identify critical user paths to automate (login, checkout, core features)
1235
+ - Define regression test scenarios for automation
1236
+ - Specify API endpoints that need automated testing
1237
+ - List smoke test scenarios for CI/CD pipeline
1238
+
1239
+ ### Exploratory Testing Areas
1240
+ - New features not yet automated
1241
+ - Complex edge cases requiring human judgment
1242
+ - Visual/UX validation requiring subjective assessment
1243
+ - Scenarios that are not cost-effective to automate
1244
+
1245
+ ### Test Data Management
1246
+ - Environment variables strategy (which vars go in .env.example vs .env)
1247
+ - Dynamic test data generation approach (use data generators)
1248
+ - API-based test data setup (10-20x faster than UI)
1249
+ - Test data isolation and cleanup strategy
1250
+
1251
+ ### Automation Approach
1252
+ - **Framework:** Playwright + TypeScript (already scaffolded)
1253
+ - **Pattern:** Page Object Model for all pages
1254
+ - **Selectors:** Prioritize role-based selectors (getByRole, getByLabel, getByText)
1255
+ - **Components:** Reusable component objects for common UI elements
1256
+ - **Fixtures:** Custom fixtures for authenticated sessions and common setup
1257
+ - **API for Speed:** Use Playwright's request context to create test data via API
1258
+ - **Best Practices:** Reference \`.bugzy/runtime/testing-best-practices.md\` for patterns
1259
+
1260
+ ### Test Organization
1261
+ - Automated tests location: \`./tests/specs/[feature]/\`
1262
+ - Page Objects location: \`./tests/pages/\`
1263
+ - Manual test cases location: \`./test-cases/\` (human-readable documentation)
1264
+ - Test case naming: TC-XXX-feature-description.md
1265
+ - Automated test naming: feature.spec.ts
1266
+
1267
+ ### Automation Decision Criteria
1268
+ Define which scenarios warrant automation:
1269
+ - \u2705 Automate: Frequent execution, critical paths, regression tests, CI/CD integration
1270
+ - \u274C Keep Manual: Rare edge cases, exploratory tests, visual validation, one-time checks
1271
+
1272
+ ### Step 5: Create the test plan file
1273
+
1274
+ Read the test plan template from \`.bugzy/runtime/templates/test-plan-template.md\` and use it as the base structure. Fill in the placeholders with information extracted from BOTH the product description AND documentation research:
1275
+
1276
+ 1. Read the template file from \`.bugzy/runtime/templates/test-plan-template.md\`
1277
+ 2. Replace placeholders like:
1278
+ - \`[ProjectName]\` with the actual project name from the product description
1279
+ - \`[Date]\` with the current date
1280
+ - Feature sections with actual features identified from all documentation sources
1281
+ - Test data requirements based on the product's needs and API documentation
1282
+ - Risks based on the complexity, known issues, and technical constraints
1283
+ 3. Add any product-specific sections that may be needed based on discovered documentation
1284
+ 4. **Mark ambiguities based on severity:**
1285
+ - CRITICAL/HIGH: Should be clarified before plan creation (see Step 1.8)
1286
+ - MEDIUM: Mark with [ASSUMED: reason] and note assumption
1287
+ - LOW: Mark with [TO BE EXPLORED: detail] for future investigation
1288
+ 5. Include references to source documentation for traceability
1289
+
1290
+ ### Step 6: Save the test plan
1291
+
1292
+ Save the generated test plan to a file named \`test-plan.md\` in the project root with appropriate frontmatter:
1293
+
1294
+ \`\`\`yaml
1295
+ ---
1296
+ version: 1.0.0
1297
+ lifecycle_phase: initial
1298
+ created_at: [current date]
1299
+ updated_at: [current date]
1300
+ last_exploration: null
1301
+ total_discoveries: 0
1302
+ status: draft
1303
+ author: claude
1304
+ tags: [functional, security, performance]
1305
+ ---
1306
+ \`\`\`
1307
+
1308
+ ### Step 7: Extract and save environment variables
1309
+
1310
+ **CRITICAL**: Test data values must ONLY go to .env.testdata, NOT in the test plan document.
1311
+
1312
+ After saving the test plan:
1313
+
1314
+ 1. **Parse the test plan** to find all TEST_ prefixed environment variables mentioned:
1315
+ - Look in the Testing Environment section
1316
+ - Search for any TEST_ variables referenced
1317
+ - Extract variables from configuration or setup sections
1318
+ - Common patterns include: TEST_BASE_URL, TEST_USER_*, TEST_API_*, TEST_ADMIN_*, etc.
1319
+
1320
+ 2. **Create .env.testdata file** with all discovered variables:
1321
+ \`\`\`bash
1322
+ # Application Configuration
1323
+ TEST_BASE_URL=
1324
+
1325
+ # Test User Credentials
1326
+ TEST_USER_EMAIL=
1327
+ TEST_USER_PASSWORD=
1328
+ TEST_ADMIN_EMAIL=
1329
+ TEST_ADMIN_PASSWORD=
1330
+
1331
+ # API Configuration
1332
+ TEST_API_KEY=
1333
+ TEST_API_SECRET=
1334
+
1335
+ # Other Test Data
1336
+ TEST_DB_NAME=
1337
+ TEST_TIMEOUT=
1338
+ \`\`\`
1339
+
1340
+ 3. **Add helpful comments** for each variable group to guide users in filling values
1341
+
1342
+ 4. **Save the file** as \`.env.testdata\` in the project root
1343
+
1344
+ 5. **Verify test plan references .env.testdata**:
1345
+ - Ensure test plan DOES NOT contain test data values
1346
+ - Ensure test plan references \`.env.testdata\` for test data requirements
1347
+ - Add instruction: "Fill in actual values in .env.testdata before running tests"
1348
+
1349
+ ${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}
1350
+
1351
+ {{TEAM_COMMUNICATOR_INSTRUCTIONS}}
1352
+
1353
+ ### Step 8: Final summary
1354
+
1355
+ Provide a summary of:
1356
+ - Test plan created successfully at \`test-plan.md\`
1357
+ - Environment variables extracted to \`.env.testdata\`
1358
+ - Number of TEST_ variables discovered
1359
+ - Instructions for the user to fill in actual values in .env.testdata before running tests`,
1360
+ optionalSubagents: [
1361
+ {
1362
+ role: "documentation-researcher",
1363
+ contentBlock: `### Step 2: Gather comprehensive project documentation
1364
+
1365
+ Use the documentation-researcher agent to explore and gather all available project information and other documentation sources. This ensures the test plan is based on complete and current information.
1366
+
1367
+ \`\`\`
1368
+ Use the documentation-researcher agent to explore all available project documentation related to: $ARGUMENTS
1369
+
1370
+ Specifically gather:
1371
+ - Product specifications and requirements
1372
+ - User stories and acceptance criteria
1373
+ - Technical architecture documentation
1374
+ - API documentation and endpoints
1375
+ - User roles and permissions
1376
+ - Business rules and validations
1377
+ - UI/UX specifications
1378
+ - Known limitations or constraints
1379
+ - Existing test documentation
1380
+ - Bug reports or known issues
1381
+ \`\`\`
1382
+
1383
+ The agent will:
1384
+ 1. Check its memory for previously discovered documentation
1385
+ 2. Explore workspace for relevant pages and databases
1386
+ 3. Build a comprehensive understanding of the product
1387
+ 4. Return synthesized information about all discovered documentation`
1388
+ },
1389
+ {
1390
+ role: "team-communicator",
1391
+ contentBlock: `### Step 7.5: Team Communication
1392
+
1393
+ Use the team-communicator agent to notify the product team about the new test plan:
1394
+
1395
+ \`\`\`
1396
+ Use the team-communicator agent to:
1397
+ 1. Post an update about the test plan creation
1398
+ 2. Provide a brief summary of coverage areas and key features
1399
+ 3. Mention any areas that need exploration or clarification
1400
+ 4. Ask for team review and feedback on the test plan
1401
+ 5. Include a link or reference to the test-plan.md file
1402
+ 6. Use appropriate channel and threading for the update
1403
+ \`\`\`
1404
+
1405
+ The team communication should include:
1406
+ - **Test plan scope**: Brief overview of what will be tested
1407
+ - **Coverage highlights**: Key features and user flows included
1408
+ - **Areas needing clarification**: Any uncertainties discovered during documentation research
1409
+ - **Review request**: Ask team to review and provide feedback
1410
+ - **Next steps**: Mention plan to generate test cases after review
1411
+
1412
+ **Update team communicator memory:**
1413
+ - Record this communication in the team-communicator memory
1414
+ - Note this as a test plan creation communication
1415
+ - Track team response to this type of update`
1416
+ }
1417
+ ],
1418
+ requiredSubagents: ["test-runner"]
1419
+ };
1420
+
1421
+ // src/tasks/library/handle-message.ts
1422
+ var handleMessageTask = {
1423
+ slug: TASK_SLUGS.HANDLE_MESSAGE,
1424
+ name: "Handle Message",
1425
+ description: "Handle team responses and Slack communications, maintaining context for ongoing conversations (LLM-routed)",
1426
+ frontmatter: {
1427
+ description: "Handle team responses and Slack communications, maintaining context for ongoing conversations",
1428
+ "argument-hint": "[slack thread context or team message]"
1429
+ },
1430
+ baseContent: `# Handle Message Command
1431
+
1432
+ ## SECURITY NOTICE
1433
+ **CRITICAL**: Never read the \`.env\` file. It contains ONLY secrets (passwords, API keys).
1434
+ - **Read \`.env.testdata\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)
1435
+ - \`.env.testdata\` contains actual values for test data, URLs, and non-sensitive configuration
1436
+ - For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime
1437
+ - The \`.env\` file access is blocked by settings.json
1438
+
1439
+ Process team responses from Slack threads and handle multi-turn conversations with the product team about testing clarifications, ambiguities, and questions.
1440
+
1441
+ ## Arguments
1442
+ Team message/thread context: $ARGUMENTS
1443
+
1444
+ ${KNOWLEDGE_BASE_READ_INSTRUCTIONS}
1445
+
1446
+ ## Process
1447
+
1448
+ ### Step 0: Detect Message Intent and Load Handler
1449
+
1450
+ Before processing the message, identify the intent type to load the appropriate handler.
1451
+
1452
+ #### 0.1 Extract Intent from Event Payload
1453
+
1454
+ Check the event payload for the \`intent\` field provided by the LLM layer:
1455
+ - If \`intent\` is present, use it directly
1456
+ - Valid intent values: \`question\`, \`feedback\`, \`status\`
1457
+
1458
+ #### 0.2 Fallback Intent Detection (if no intent provided)
1459
+
1460
+ If intent is not in the payload, detect from message patterns:
1461
+
1462
+ | Condition | Intent |
1463
+ |-----------|--------|
1464
+ | Keywords: "status", "progress", "how did", "results", "how many passed" | \`status\` |
1465
+ | Keywords: "bug", "issue", "broken", "doesn't work", "failed", "error" | \`feedback\` |
1466
+ | Question words: "what", "which", "do we have", "is there" about tests/project | \`question\` |
1467
+ | Default (none of above) | \`feedback\` |
1468
+
1469
+ #### 0.3 Load Handler File
1470
+
1471
+ Based on detected intent, load the handler from:
1472
+ \`.bugzy/runtime/handlers/messages/{intent}.md\`
1473
+
1474
+ **Handler files:**
1475
+ - \`question.md\` - Questions about tests, coverage, project details
1476
+ - \`feedback.md\` - Bug reports, test observations, general information
1477
+ - \`status.md\` - Status checks on test runs, task progress
1478
+
1479
+ #### 0.4 Follow Handler Instructions
1480
+
1481
+ **IMPORTANT**: The handler file is authoritative for this intent type.
1482
+
1483
+ 1. Read the handler file completely
1484
+ 2. Follow its processing steps in order
1485
+ 3. Apply its context loading requirements
1486
+ 4. Use its response guidelines
1487
+ 5. Perform any memory updates it specifies
1488
+
1489
+ The handler file contains all necessary processing logic for the detected intent type. Each handler includes:
1490
+ - Specific processing steps for that intent
1491
+ - Context loading requirements
1492
+ - Response guidelines
1493
+ - Memory update instructions
1494
+
1495
+ ${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}
1496
+
1497
+ ## Key Principles
1498
+
1499
+ ### Context Preservation
1500
+ - Always maintain full conversation context
1501
+ - Link responses back to original uncertainties
1502
+ - Preserve reasoning chain for future reference
1503
+
1504
+ ### Actionable Responses
1505
+ - Convert team input into concrete actions
1506
+ - Don't let clarifications sit without implementation
1507
+ - Follow through on commitments made to team
1508
+
1509
+ ### Learning Integration
1510
+ - Each interaction improves our understanding
1511
+ - Build knowledge base of team preferences
1512
+ - Refine communication approaches over time
1513
+
1514
+ ### Quality Communication
1515
+ - Acknowledge team input appropriately
1516
+ - Provide updates on actions taken
1517
+ - Ask good follow-up questions when needed
1518
+
1519
+ ## Important Considerations
1520
+
1521
+ ### Thread Organization
1522
+ - Keep related discussions in same thread
1523
+ - Start new threads for new topics
1524
+ - Maintain clear conversation boundaries
1525
+
1526
+ ### Response Timing
1527
+ - Acknowledge important messages promptly
1528
+ - Allow time for implementation before status updates
1529
+ - Don't spam team with excessive communications
1530
+
1531
+ ### Action Prioritization
1532
+ - Address urgent clarifications first
1533
+ - Batch related updates when possible
1534
+ - Focus on high-impact changes
1535
+
1536
+ ### Memory Maintenance
1537
+ - Keep active conversations visible and current
1538
+ - Archive resolved discussions appropriately
1539
+ - Maintain searchable history of resolutions`,
1540
+ optionalSubagents: [],
1541
+ requiredSubagents: ["team-communicator"]
1542
+ };
1543
+
1544
+ // src/tasks/library/process-event.ts
1545
+ var processEventTask = {
1546
+ slug: TASK_SLUGS.PROCESS_EVENT,
1547
+ name: "Process Event",
1548
+ description: "Process external system events (Jira, GitHub, Linear) using handler-defined rules to extract insights and track issues",
1549
+ frontmatter: {
1550
+ description: "Process external system events (Jira, GitHub, Linear) using handler-defined rules to extract insights and track issues",
1551
+ "argument-hint": "[event payload or description]"
1552
+ },
1553
+ baseContent: `# Process Event Command
1554
+
1555
+ ## SECURITY NOTICE
1556
+ **CRITICAL**: Never read the \`.env\` file. It contains ONLY secrets (passwords, API keys).
1557
+ - **Read \`.env.testdata\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)
1558
+ - \`.env.testdata\` contains actual values for test data, URLs, and non-sensitive configuration
1559
+ - For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime
1560
+ - The \`.env\` file access is blocked by settings.json
1561
+
1562
+ Process various types of events using intelligent pattern matching and historical context to maintain and evolve the testing system.
1563
+
1564
+ ## Arguments
1565
+ Arguments: $ARGUMENTS
1566
+
1567
+ ${KNOWLEDGE_BASE_READ_INSTRUCTIONS}
1568
+
1569
+ ## Process
1570
+
1571
+ ### Step 1: Understand Event Context
1572
+
1573
+ Events come from integrated external systems via webhooks or manual input. Common sources include:
1574
+ - **Issue Trackers**: Jira, Linear, GitHub Issues
1575
+ - **Source Control**: GitHub, GitLab
1576
+ - **Communication Tools**: Slack
1577
+
1578
+ **Event structure and semantics vary by source.** Do not interpret events based on generic assumptions. Instead, load the appropriate handler file (Step 2.4) for system-specific processing rules.
1579
+
1580
+ #### Event Context to Extract:
1581
+ - **What happened**: The core event (test failed, PR merged, etc.)
1582
+ - **Where**: Component, service, or area affected
1583
+ - **Impact**: How this affects testing strategy
1584
+ - **Action Required**: What needs to be done in response
1585
+
1586
+ ### Step 1.5: Clarify Unclear Events
1587
+
1588
+ If the event information is incomplete or ambiguous, seek clarification before processing:
1589
+
1590
+ #### Detect Unclear Events
1591
+
1592
+ Events may be unclear in several ways:
1593
+ - **Vague description**: "Something broke", "issue with login" (what specifically?)
1594
+ - **Missing context**: Which component, which environment, which user?
1595
+ - **Contradictory information**: Event data conflicts with other sources
1596
+ - **Unknown references**: Mentions unfamiliar features, components, or systems
1597
+ - **Unclear severity**: Impact or priority is ambiguous
1598
+
1599
+ #### Assess Ambiguity Severity
1600
+
1601
+ Classify the ambiguity level to determine appropriate response:
1602
+
1603
+ **\u{1F534} CRITICAL - STOP and seek clarification:**
1604
+ - Cannot identify which component is affected
1605
+ - Event data is contradictory or nonsensical
1606
+ - Unknown system or feature mentioned
1607
+ - Cannot determine if this requires immediate action
1608
+ - Example: Event says "production is down" but unclear which service
1609
+
1610
+ **\u{1F7E0} HIGH - STOP and seek clarification:**
1611
+ - Vague problem description that could apply to multiple areas
1612
+ - Missing critical context needed for proper response
1613
+ - Unclear which team or system is responsible
1614
+ - Example: "Login issue reported" (login button? auth service? session? which page?)
1615
+
1616
+ **\u{1F7E1} MEDIUM - Proceed with documented assumptions:**
1617
+ - Some details missing but core event is clear
1618
+ - Can infer likely meaning from context
1619
+ - Can proceed but should clarify async
1620
+ - Example: "Test failed on staging" (can assume main staging, but clarify which one)
1621
+
1622
+ **\u{1F7E2} LOW - Mark and proceed:**
1623
+ - Minor details missing (optional context)
1624
+ - Cosmetic or non-critical information gaps
1625
+ - Can document gap and continue
1626
+ - Example: Missing timestamp or exact user who reported issue
1627
+
1628
+ #### Clarification Approach by Severity
1629
+
1630
+ **For CRITICAL/HIGH ambiguity:**
1631
+ 1. **Use team-communicator to ask specific questions**
1632
+ 2. **WAIT for response before proceeding**
1633
+ 3. **Document the clarification request in event history**
1634
+
1635
+ Example clarification messages:
1636
+ - "Event mentions 'login issue' - can you clarify if this is:
1637
+ \u2022 Login button not responding?
1638
+ \u2022 Authentication service failure?
1639
+ \u2022 Session management problem?
1640
+ \u2022 Specific page or global?"
1641
+
1642
+ - "Event references component 'XYZ' which is unknown. What system does this belong to?"
1643
+
1644
+ - "Event data shows contradictory information: status=success but error_count=15. Which is correct?"
1645
+
1646
+ **For MEDIUM ambiguity:**
1647
+ 1. **Document assumption** with reasoning
1648
+ 2. **Proceed with processing** based on assumption
1649
+ 3. **Ask for clarification async** (non-blocking)
1650
+ 4. **Mark in event history** for future reference
1651
+
1652
+ Example: [ASSUMED: "login issue" refers to login button based on recent similar events]
1653
+
1654
+ **For LOW ambiguity:**
1655
+ 1. **Mark with [TO BE CLARIFIED: detail]**
1656
+ 2. **Continue processing** normally
1657
+ 3. **Document gap** in event history
1658
+
1659
+ Example: [TO BE CLARIFIED: Exact timestamp of when issue was first observed]
1660
+
1661
+ #### Document Clarification Process
1662
+
1663
+ In event history, record:
1664
+ - **Ambiguity detected**: What was unclear
1665
+ - **Severity assessed**: CRITICAL/HIGH/MEDIUM/LOW
1666
+ - **Clarification requested**: Questions asked (if any)
1667
+ - **Response received**: Team's clarification
1668
+ - **Assumption made**: If proceeded with assumption
1669
+ - **Resolution**: How ambiguity was resolved
1670
+
1671
+ This ensures future similar events can reference past clarifications and avoid redundant questions.
1672
+
1673
+ ### Step 2: Load Context and Memory
1674
+
1675
+ #### 2.1 Check Event Processor Memory
1676
+ Read \`.bugzy/runtime/memory/event-processor.md\` to:
1677
+ - Find similar event patterns
1678
+ - Load example events with reasoning
1679
+ - Get system-specific rules
1680
+ - Retrieve task mapping patterns
1681
+
1682
+ #### 2.2 Check Event History
1683
+ Read \`.bugzy/runtime/memory/event-history.md\` to:
1684
+ - Ensure event hasn't been processed already (idempotency)
1685
+ - Find related recent events
1686
+ - Understand event patterns and trends
1687
+
1688
+ #### 2.3 Read Current State
1689
+ - Read \`test-plan.md\` for current coverage
1690
+ - List \`./test-cases/\` for existing tests
1691
+ - Check \`.bugzy/runtime/knowledge-base.md\` for past insights
1692
+
1693
+ #### 2.4 Load System-Specific Handler (REQUIRED)
1694
+
1695
+ Based on the event source, load the handler from \`.bugzy/runtime/handlers/\`:
1696
+
1697
+ **Step 1: Detect Event Source from Payload:**
1698
+ - \`com.jira-server.*\` event type prefix \u2192 \`.bugzy/runtime/handlers/jira.md\`
1699
+ - \`github.*\` or GitHub webhook structure \u2192 \`.bugzy/runtime/handlers/github.md\`
1700
+ - \`linear.*\` or Linear webhook \u2192 \`.bugzy/runtime/handlers/linear.md\`
1701
+ - Other sources \u2192 Check for matching handler file by source name
1702
+
1703
+ **Step 2: Load and Read the Handler File:**
1704
+ The handler file contains system-specific instructions for:
1705
+ - Event payload structure and field meanings
1706
+ - Which triggers (status changes, resolutions) require specific actions
1707
+ - How to interpret different event types
1708
+ - When to invoke \`/verify-changes\`
1709
+ - How to update the knowledge base
1710
+
1711
+ **Step 3: Follow Handler Instructions:**
1712
+ The handler file is authoritative for this event source. Follow its instructions for:
1713
+ - Interpreting the event payload
1714
+ - Determining what actions to take
1715
+ - Formatting responses and updates
1716
+
1717
+ **Step 4: If No Handler Exists:**
1718
+ Do NOT guess or apply generic logic. Instead:
1719
+ 1. Inform the user that no handler exists for this event source
1720
+ 2. Ask how this event type should be processed
1721
+ 3. Suggest creating a handler file at \`.bugzy/runtime/handlers/{source}.md\`
1722
+
1723
+ **Project-Specific Configuration:**
1724
+ Handlers reference \`.bugzy/runtime/project-context.md\` for project-specific rules like:
1725
+ - Which status transitions trigger verify-changes
1726
+ - Which resolutions should update the knowledge base
1727
+ - Which transitions to ignore
1728
+
1729
+ ### Step 3: Intelligent Event Analysis
1730
+
1731
+ #### 3.1 Contextual Pattern Analysis
1732
+ Don't just match patterns - analyze the event within the full context:
1733
+
1734
+ **Combine Multiple Signals**:
1735
+ - Event details + Historical patterns from memory
1736
+ - Current test plan state + Knowledge base
1737
+ - External system status + Team activity
1738
+ - Business priorities + Risk assessment
1739
+
1740
+ **Example Contextual Analysis**:
1741
+ \`\`\`
1742
+ Event: Jira issue PROJ-456 moved to "Ready for QA"
1743
+ + Handler: jira.md says "Ready for QA" triggers /verify-changes
1744
+ + History: This issue was previously in "In Progress" for 3 days
1745
+ + Knowledge: Related PR #123 merged yesterday
1746
+ = Decision: Invoke /verify-changes with issue context and PR reference
1747
+ \`\`\`
1748
+
1749
+ **Pattern Recognition with Context**:
1750
+ - An issue resolution depends on what the handler prescribes for that status
1751
+ - A duplicate event (same issue, same transition) should be skipped
1752
+ - Events from different sources about the same change should be correlated
1753
+ - Handler instructions take precedence over generic assumptions
1754
+
1755
+ #### 3.2 Generate Semantic Queries
1756
+ Based on event type and content, generate 3-5 specific search queries:
1757
+ - Search for similar past events
1758
+ - Look for related test cases
1759
+ - Find relevant documentation
1760
+ - Check for known issues
1761
+
1762
+ {{DOCUMENTATION_RESEARCHER_INSTRUCTIONS}}
1763
+
1764
+ ### Step 4: Task Planning with Reasoning
1765
+
1766
+ Generate tasks based on event analysis, using examples from memory as reference.
1767
+
1768
+ #### Task Generation Logic:
1769
+ Analyze the event in context of ALL available information to decide what actions to take:
1770
+
1771
+ **Consider the Full Context**:
1772
+ - What does the handler prescribe for this event type?
1773
+ - How does this relate to current knowledge?
1774
+ - What's the state of related issues in external systems?
1775
+ - Is this part of a larger pattern we've been seeing?
1776
+ - What's the business impact of this event?
1777
+
1778
+ **Contextual Decision Making**:
1779
+ The same event type can require different actions based on context:
1780
+ - If handler says this status triggers verification \u2192 Invoke /verify-changes
1781
+ - If this issue was already processed (check event history) \u2192 Skip to avoid duplicates
1782
+ - If related PR exists in knowledge base \u2192 Include PR context in actions
1783
+ - If this is a recurring pattern from the same source \u2192 Consider flagging for review
1784
+ - If handler has no rule for this event type \u2192 Ask user for guidance
1785
+
1786
+ **Dynamic Task Selection**:
1787
+ Based on the contextual analysis, decide which tasks make sense:
1788
+ - **extract_learning**: When the event reveals something new about the system
1789
+ - **update_test_plan**: When our understanding of what to test has changed
1790
+ - **update_test_cases**: When tests need to reflect new reality
1791
+ - **report_bug**: When we have a legitimate, impactful, reproducible issue
1792
+ - **skip_action**: When context shows no action needed (e.g., known issue, already fixed)
1793
+
1794
+ The key is to use ALL available context - not just react to the event type
1795
+
1796
+ #### Document Reasoning:
1797
+ For each task, document WHY it's being executed:
1798
+ \`\`\`markdown
1799
+ Task: extract_learning
1800
+ Reasoning: This event reveals a pattern of login failures on Chrome that wasn't previously documented
1801
+ Data: "Chrome-specific timeout issues with login button"
1802
+ \`\`\`
1803
+
1804
+ ### Step 5: Execute Tasks with Memory Updates
1805
+
1806
+ #### 5.1 Execute Each Task
1807
+
1808
+ {{ISSUE_TRACKER_INSTRUCTIONS}}
1809
+
1810
+ ##### For Other Tasks:
1811
+ Follow the standard execution logic with added context from memory.
1812
+
1813
+ #### 5.2 Update Event Processor Memory
1814
+ If new patterns discovered, append to \`.bugzy/runtime/memory/event-processor.md\`:
1815
+ \`\`\`markdown
1816
+ ### Pattern: [New Pattern Name]
1817
+ **First Seen**: [Date]
1818
+ **Indicators**: [What identifies this pattern]
1819
+ **Typical Tasks**: [Common task responses]
1820
+ **Example**: [This event]
1821
+ \`\`\`
1822
+
1823
+ #### 5.3 Update Event History
1824
+ Append to \`.bugzy/runtime/memory/event-history.md\`:
1825
+ \`\`\`markdown
1826
+ ## [Timestamp] - Event #[ID]
1827
+
1828
+ **Original Input**: [Raw arguments provided]
1829
+ **Parsed Event**:
1830
+ \`\`\`yaml
1831
+ type: [type]
1832
+ source: [source]
1833
+ [other fields]
1834
+ \`\`\`
1835
+
1836
+ **Pattern Matched**: [Pattern name or "New Pattern"]
1837
+ **Tasks Executed**:
1838
+ 1. [Task 1] - Reasoning: [Why]
1839
+ 2. [Task 2] - Reasoning: [Why]
1840
+
1841
+ **Files Modified**:
1842
+ - [List of files]
1843
+
1844
+ **Outcome**: [Success/Partial/Failed]
1845
+ **Notes**: [Any additional context]
1846
+ ---
1847
+ \`\`\`
1848
+
1849
+ ### Step 6: Learning from Events
1850
+
1851
+ After processing, check if this event teaches us something new:
1852
+ 1. Is this a new type of event we haven't seen?
1853
+ 2. Did our task planning work well?
1854
+ 3. Should we update our patterns?
1855
+ 4. Are there trends across recent events?
1856
+
1857
+ If yes, update the event processor memory with new patterns or refined rules.
1858
+
1859
+ ### Step 7: Create Necessary Files
1860
+
1861
+ Ensure all required files and directories exist:
1862
+ \`\`\`bash
1863
+ mkdir -p ./test-cases .claude/memory
1864
+ \`\`\`
1865
+
1866
+ Create files if they don't exist:
1867
+ - \`.bugzy/runtime/knowledge-base.md\`
1868
+ - \`.bugzy/runtime/memory/event-processor.md\`
1869
+ - \`.bugzy/runtime/memory/event-history.md\`
1870
+
1871
+ ## Important Considerations
1872
+
1873
+ ### Contextual Intelligence
1874
+ - Never process events in isolation - always consider full context
1875
+ - Use knowledge base, history, and external system state to inform decisions
1876
+ - What seems like a bug might be expected behavior given the context
1877
+ - A minor event might be critical when seen as part of a pattern
1878
+
1879
+ ### Adaptive Response
1880
+ - Same event type can require different actions based on context
1881
+ - Learn from each event to improve future decision-making
1882
+ - Build understanding of system behavior over time
1883
+ - Adjust responses based on business priorities and risk
1884
+
1885
+ ### Smart Task Generation
1886
+ - Only take actions prescribed by the handler or confirmed by the user
1887
+ - Document why each decision was made with full context
1888
+ - Skip redundant actions (e.g., duplicate events, already-processed issues)
1889
+ - Escalate appropriately based on pattern recognition
1890
+
1891
+ ### Continuous Learning
1892
+ - Each event adds to our understanding of the system
1893
+ - Update patterns when new correlations are discovered
1894
+ - Refine decision rules based on outcomes
1895
+ - Build institutional memory through event history
1896
+
1897
+ ${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}`,
1898
+ optionalSubagents: [
1899
+ {
1900
+ role: "documentation-researcher",
1901
+ contentBlock: `#### 3.3 Use Documentation Researcher if Needed
1902
+ For events mentioning unknown features or components:
1903
+ \`\`\`
1904
+ Use documentation-researcher agent to find information about: [component/feature]
1905
+ \`\`\``
1906
+ },
1907
+ {
1908
+ role: "issue-tracker",
1909
+ contentBlock: `##### For Issue Tracking:
1910
+
1911
+ When an issue needs to be tracked (task type: report_bug or update_story):
1912
+ \`\`\`
1913
+ Use issue-tracker agent to:
1914
+ 1. Check for duplicate issues in the tracking system
1915
+ 2. For bugs: Create detailed bug report with:
1916
+ - Clear, descriptive title
1917
+ - Detailed description with context
1918
+ - Step-by-step reproduction instructions
1919
+ - Expected vs actual behavior
1920
+ - Environment and configuration details
1921
+ - Test case reference (if applicable)
1922
+ - Screenshots or error logs
1923
+ 3. For stories: Update status and add QA comments
1924
+ 4. Track issue lifecycle and maintain categorization
1925
+ \`\`\`
1926
+
1927
+ The issue-tracker agent will handle all aspects of issue tracking including duplicate detection, story management, QA workflow transitions, and integration with your project management system (Jira, Linear, Notion, etc.).`
1928
+ }
1929
+ ],
1930
+ requiredSubagents: [],
1931
+ dependentTasks: ["verify-changes"]
1932
+ };
1933
+
1934
+ // src/tasks/library/run-tests.ts
1935
+ var runTestsTask = {
1936
+ slug: TASK_SLUGS.RUN_TESTS,
1937
+ name: "Run Tests",
1938
+ description: "Execute automated Playwright tests, analyze failures, and fix test issues automatically",
1939
+ frontmatter: {
1940
+ description: "Execute automated Playwright tests, analyze failures, and fix test issues automatically",
1941
+ "argument-hint": '[file-pattern|tag|all] (e.g., "auth", "@smoke", "tests/specs/login.spec.ts")'
1942
+ },
1943
+ baseContent: `# Run Tests Command
1944
+
1945
+ ## SECURITY NOTICE
1946
+ **CRITICAL**: Never read the \`.env\` file. It contains ONLY secrets (passwords, API keys).
1947
+ - **Read \`.env.testdata\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)
1948
+ - \`.env.testdata\` contains actual values for test data, URLs, and non-sensitive configuration
1949
+ - For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime
1950
+ - The \`.env\` file access is blocked by settings.json
1951
+
1952
+ Execute automated Playwright tests, analyze failures using JSON reports, automatically fix test issues, and log product bugs.
1953
+
1954
+ ## Arguments
1955
+ Arguments: $ARGUMENTS
1956
+
1957
+ ## Parse Arguments
1958
+ Extract the following from arguments:
1959
+ - **selector**: Test selection criteria
1960
+ - File pattern: "auth" \u2192 finds tests/specs/**/*auth*.spec.ts
1961
+ - Tag: "@smoke" \u2192 runs tests with @smoke annotation
1962
+ - Specific file: "tests/specs/login.spec.ts"
1963
+ - All tests: "all" or "" \u2192 runs entire test suite
1964
+
1965
+ ${KNOWLEDGE_BASE_READ_INSTRUCTIONS}
1966
+
1967
+ ## Test Execution Strategy
1968
+
1969
+ **IMPORTANT**: Before selecting tests, read \`.bugzy/runtime/test-execution-strategy.md\` to understand:
1970
+ - Available test tiers (Smoke, Component, Full Regression)
1971
+ - When to use each tier (commit, PR, release, debug)
1972
+ - Default behavior (default to @smoke unless user specifies otherwise)
1973
+ - How to interpret user intent from context keywords
1974
+ - Time/coverage trade-offs
1975
+ - Tag taxonomy
1976
+
1977
+ Apply the strategy guidance when determining which tests to run.
1978
+
1979
+ ## Process
1980
+
1981
+ **First**, consult \`.bugzy/runtime/test-execution-strategy.md\` decision tree to determine appropriate test tier based on user's selector and context.
1982
+
1983
+ ### Step 1: Identify Automated Tests to Run
1984
+
1985
+ #### 1.1 Understand Test Selection
1986
+ Parse the selector argument to determine which tests to run:
1987
+
1988
+ **File Pattern** (e.g., "auth", "login"):
1989
+ - Find matching test files: \`tests/specs/**/*[pattern]*.spec.ts\`
1990
+ - Example: "auth" \u2192 finds all test files with "auth" in the name
1991
+
1992
+ **Tag** (e.g., "@smoke", "@regression"):
1993
+ - Run tests with specific Playwright tag annotation
1994
+ - Use Playwright's \`--grep\` option
1995
+
1996
+ **Specific File** (e.g., "tests/specs/auth/login.spec.ts"):
1997
+ - Run that specific test file
1998
+
1999
+ **All Tests** ("all" or no selector):
2000
+ - Run entire test suite: \`tests/specs/**/*.spec.ts\`
2001
+
2002
+ #### 1.2 Find Matching Test Files
2003
+ Use glob patterns to find test files:
2004
+ \`\`\`bash
2005
+ # For file pattern
2006
+ ls tests/specs/**/*[pattern]*.spec.ts
2007
+
2008
+ # For specific file
2009
+ ls tests/specs/auth/login.spec.ts
2010
+
2011
+ # For all tests
2012
+ ls tests/specs/**/*.spec.ts
2013
+ \`\`\`
2014
+
2015
+ #### 1.3 Validate Test Files Exist
2016
+ Check that at least one test file was found:
2017
+ - If no tests found, inform user and suggest available tests
2018
+ - List available test files if selection was unclear
2019
+
2020
+ ### Step 2: Execute Automated Playwright Tests
2021
+
2022
+ #### 2.1 Build Playwright Command
2023
+ Construct the Playwright test command based on the selector:
2024
+
2025
+ **For file pattern or specific file**:
2026
+ \`\`\`bash
2027
+ npx playwright test [selector]
2028
+ \`\`\`
2029
+
2030
+ **For tag**:
2031
+ \`\`\`bash
2032
+ npx playwright test --grep "[tag]"
2033
+ \`\`\`
2034
+
2035
+ **For all tests**:
2036
+ \`\`\`bash
2037
+ npx playwright test
2038
+ \`\`\`
2039
+
2040
+ **Output**: Custom Bugzy reporter will create hierarchical test-runs/YYYYMMDD-HHMMSS/ structure with manifest.json
2041
+
2042
+ #### 2.2 Execute Tests via Bash
2043
+ Run the Playwright command:
2044
+ \`\`\`bash
2045
+ npx playwright test [selector]
2046
+ \`\`\`
2047
+
2048
+ Wait for execution to complete. This may take several minutes depending on test count.
2049
+
2050
+ **Note**: The custom Bugzy reporter will automatically:
2051
+ - Generate timestamp in YYYYMMDD-HHMMSS format
2052
+ - Create test-runs/{timestamp}/ directory structure
2053
+ - Record execution-id.txt with BUGZY_EXECUTION_ID
2054
+ - Save results per test case in TC-{id}/exec-1/ folders
2055
+ - Generate manifest.json with complete execution summary
2056
+
2057
+ #### 2.3 Locate and Read Test Results
2058
+ After execution completes, find and read the manifest:
2059
+
2060
+ 1. Find the test run directory (most recent):
2061
+ \`\`\`bash
2062
+ ls -t test-runs/ | head -1
2063
+ \`\`\`
2064
+
2065
+ 2. Read the manifest.json file:
2066
+ \`\`\`bash
2067
+ cat test-runs/[timestamp]/manifest.json
2068
+ \`\`\`
2069
+
2070
+ 3. Store the timestamp for use in test-debugger-fixer if needed
2071
+
2072
+ ### Step 3: Analyze Test Results from Manifest
2073
+
2074
+ #### 3.1 Parse Manifest
2075
+ The Bugzy custom reporter produces structured output in manifest.json:
2076
+ \`\`\`json
2077
+ {
2078
+ "bugzyExecutionId": "70a59676-cfd0-4ffd-b8ad-69ceff25c31d",
2079
+ "timestamp": "20251115-123456",
2080
+ "startTime": "2025-11-15T12:34:56.789Z",
2081
+ "endTime": "2025-11-15T12:45:23.456Z",
2082
+ "status": "completed",
2083
+ "stats": {
2084
+ "totalTests": 10,
2085
+ "passed": 8,
2086
+ "failed": 2,
2087
+ "totalExecutions": 10
2088
+ },
2089
+ "testCases": [
2090
+ {
2091
+ "id": "TC-001-login",
2092
+ "name": "Login functionality",
2093
+ "totalExecutions": 1,
2094
+ "finalStatus": "passed",
2095
+ "executions": [
2096
+ {
2097
+ "number": 1,
2098
+ "status": "passed",
2099
+ "duration": 1234,
2100
+ "videoFile": "video.webm",
2101
+ "hasTrace": false,
2102
+ "hasScreenshots": false,
2103
+ "error": null
2104
+ }
2105
+ ]
2106
+ },
2107
+ {
2108
+ "id": "TC-002-invalid-credentials",
2109
+ "name": "Invalid credentials error",
2110
+ "totalExecutions": 1,
2111
+ "finalStatus": "failed",
2112
+ "executions": [
2113
+ {
2114
+ "number": 1,
2115
+ "status": "failed",
2116
+ "duration": 2345,
2117
+ "videoFile": "video.webm",
2118
+ "hasTrace": true,
2119
+ "hasScreenshots": true,
2120
+ "error": "expect(locator).toBeVisible()..."
2121
+ }
2122
+ ]
2123
+ }
2124
+ ]
2125
+ }
2126
+ \`\`\`
2127
+
2128
+ #### 3.2 Extract Test Results
2129
+ From the manifest, extract:
2130
+ - **Total tests**: stats.totalTests
2131
+ - **Passed tests**: stats.passed
2132
+ - **Failed tests**: stats.failed
2133
+ - **Total executions**: stats.totalExecutions (includes re-runs)
2134
+ - **Duration**: Calculate from startTime and endTime
2135
+
2136
+ For each failed test, collect from testCases array:
2137
+ - Test ID (id field)
2138
+ - Test name (name field)
2139
+ - Final status (finalStatus field)
2140
+ - Latest execution details:
2141
+ - Error message (executions[last].error)
2142
+ - Duration (executions[last].duration)
2143
+ - Video file location (test-runs/{timestamp}/{id}/exec-{num}/{videoFile})
2144
+ - Trace availability (executions[last].hasTrace)
2145
+ - Screenshots availability (executions[last].hasScreenshots)
2146
+
2147
+ #### 3.3 Generate Summary Statistics
2148
+ \`\`\`markdown
2149
+ ## Test Execution Summary
2150
+ - Total Tests: [count]
2151
+ - Passed: [count] ([percentage]%)
2152
+ - Failed: [count] ([percentage]%)
2153
+ - Skipped: [count] ([percentage]%)
2154
+ - Total Duration: [time]
2155
+ \`\`\`
2156
+
2157
+ ### Step 5: Triage Failed Tests
2158
+
2159
+ After analyzing test results, triage each failure to determine if it's a product bug or test issue:
2160
+
2161
+ #### 5.1 Triage Failed Tests FIRST
2162
+
2163
+ **\u26A0\uFE0F IMPORTANT: Do NOT report bugs without triaging first.**
2164
+
2165
+ For each failed test:
2166
+
2167
+ 1. **Read failure details** from JSON report (error message, stack trace)
2168
+ 2. **Classify the failure:**
2169
+ - **Product bug**: Application behaves incorrectly
2170
+ - **Test issue**: Test code needs fixing (selector, timing, assertion)
2171
+ 3. **Document classification** for next steps
2172
+
2173
+ **Classification Guidelines:**
2174
+ - **Product Bug**: Correct test code, unexpected application behavior
2175
+ - **Test Issue**: Selector not found, timeout, race condition, wrong assertion
2176
+
2177
+ #### 5.2 Fix Test Issues Automatically
2178
+
2179
+ For each test classified as **[TEST ISSUE]**, use the test-debugger-fixer agent to automatically fix the test:
2180
+
2181
+ \`\`\`
2182
+ Use the test-debugger-fixer agent to fix test issues:
2183
+
2184
+ For each failed test classified as a test issue (not a product bug), provide:
2185
+ - Test run timestamp: [from manifest.timestamp]
2186
+ - Test case ID: [from testCases[].id in manifest]
2187
+ - Test name/title: [from testCases[].name in manifest]
2188
+ - Error message: [from testCases[].executions[last].error]
2189
+ - Execution details path: test-runs/{timestamp}/{testCaseId}/exec-1/
2190
+
2191
+ The agent will:
2192
+ 1. Read the execution details from result.json
2193
+ 2. Analyze the failure (error message, trace if available)
2194
+ 3. Identify the root cause (brittle selector, missing wait, race condition, etc.)
2195
+ 4. Apply appropriate fix to the test code
2196
+ 5. Rerun the test
2197
+ 6. The custom reporter will automatically create the next exec-N/ folder
2198
+ 7. Repeat up to 3 times if needed (exec-1, exec-2, exec-3)
2199
+ 8. Report success or escalate as likely product bug
2200
+
2201
+ After test-debugger-fixer completes:
2202
+ - If fix succeeded: Mark test as fixed, add to "Tests Fixed" list
2203
+ - If still failing after 3 attempts: Reclassify as potential product bug for Step 5.3
2204
+ \`\`\`
2205
+
2206
+ **Track Fixed Tests:**
2207
+ - Maintain list of tests fixed automatically
2208
+ - Include fix description (e.g., "Updated selector from CSS to role-based")
2209
+ - Note verification status (test now passes)
2210
+
2211
+ {{ISSUE_TRACKER_INSTRUCTIONS}}
2212
+
2213
+ ${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}
2214
+
2215
+ {{TEAM_COMMUNICATOR_INSTRUCTIONS}}
2216
+
2217
+ ### Step 6: Handle Special Cases
2218
+
2219
+ #### If No Test Cases Found
2220
+ If no test cases match the selection criteria:
2221
+ 1. Inform user that no matching test cases were found
2222
+ 2. List available test cases or suggest running \`/generate-test-cases\` first
2223
+ 3. Provide examples of valid selection criteria
2224
+
2225
+ #### If Test Runner Agent Fails
2226
+ If the test-runner agent encounters issues:
2227
+ 1. Report the specific error
2228
+ 2. Suggest troubleshooting steps
2229
+ 3. Offer to run tests individually if batch execution failed
2230
+
2231
+ #### If Test Cases Are Invalid
2232
+ If selected test cases have formatting issues:
2233
+ 1. Report which test cases are invalid
2234
+ 2. Specify what's missing or incorrect
2235
+ 3. Offer to fix the issues or skip invalid tests
2236
+
2237
+ ### Important Notes
2238
+
2239
+ **Test Selection Strategy**:
2240
+ - **Always read** \`.bugzy/runtime/test-execution-strategy.md\` before selecting tests
2241
+ - Default to \`@smoke\` tests for fast validation unless user explicitly requests otherwise
2242
+ - Smoke tests provide 100% manual test case coverage with zero redundancy (~2-5 min)
2243
+ - Full regression includes intentional redundancy for diagnostic value (~10-15 min)
2244
+ - Use context keywords from user request to choose appropriate tier
2245
+
2246
+ **Test Execution**:
2247
+ - Automated Playwright tests are executed via bash command, not through agents
2248
+ - Test execution time varies by tier (see strategy document for details)
2249
+ - JSON reports provide structured test results for analysis
2250
+ - Playwright automatically captures traces, screenshots, and videos on failures
2251
+ - Test artifacts are stored in test-results/ directory
2252
+
2253
+ **Failure Handling**:
2254
+ - Test failures are automatically triaged (product bugs vs test issues)
2255
+ - Test issues are automatically fixed by the test-debugger-fixer subagent
2256
+ - Product bugs are logged via issue tracker after triage
2257
+ - All results are analyzed for learning opportunities and team communication
2258
+ - Critical failures trigger immediate team notification
2259
+
2260
+ **Related Documentation**:
2261
+ - \`.bugzy/runtime/test-execution-strategy.md\` - When and why to run specific tests
2262
+ - \`.bugzy/runtime/testing-best-practices.md\` - How to write tests (patterns and anti-patterns)
2263
+
2264
+ `,
2265
+ optionalSubagents: [
2266
+ {
2267
+ role: "issue-tracker",
2268
+ contentBlock: `
2269
+
2270
+ #### 5.3 Log Product Bugs via Issue Tracker
2271
+
2272
+ After triage in Step 5.1, for tests classified as **[PRODUCT BUG]**, use the issue-tracker agent to log bugs:
2273
+
2274
+ For each bug to report, use the issue-tracker agent:
2275
+
2276
+ \`\`\`
2277
+ Use issue-tracker agent to:
2278
+ 1. Check for duplicate bugs in the tracking system
2279
+ - The agent will automatically search for similar existing issues
2280
+ - It maintains memory of recently reported issues
2281
+ - Duplicate detection happens automatically - don't create manual checks
2282
+
2283
+ 2. For each new bug (non-duplicate):
2284
+ Create detailed bug report with:
2285
+ - **Title**: Clear, descriptive summary (e.g., "Login button fails with timeout on checkout page")
2286
+ - **Description**:
2287
+ - What happened vs. what was expected
2288
+ - Impact on users
2289
+ - Test reference: [file path] \u203A [test title]
2290
+ - **Reproduction Steps**:
2291
+ - List steps from the failing test
2292
+ - Include specific test data used
2293
+ - Note any setup requirements from test file
2294
+ - **Test Execution Details**:
2295
+ - Test file: [file path from JSON report]
2296
+ - Test name: [test title from JSON report]
2297
+ - Error message: [from JSON report]
2298
+ - Stack trace: [from JSON report]
2299
+ - Trace file: [path if available]
2300
+ - Screenshots: [paths if available]
2301
+ - **Environment Details**:
2302
+ - Browser and version (from Playwright config)
2303
+ - Test environment URL (from .env.testdata BASE_URL)
2304
+ - Timestamp of failure
2305
+ - **Severity/Priority**: Based on:
2306
+ - Test type (smoke tests = high priority)
2307
+ - User impact
2308
+ - Frequency (always fails vs flaky)
2309
+ - **Additional Context**:
2310
+ - Error messages or stack traces from JSON report
2311
+ - Related test files (if part of test suite)
2312
+ - Relevant knowledge from knowledge-base.md
2313
+
2314
+ 3. Track created issues:
2315
+ - Note the issue ID/number returned
2316
+ - Update issue tracker memory with new bugs
2317
+ - Prepare issue references for team communication
2318
+ \`\`\`
2319
+
2320
+ #### 6.3 Summary of Bug Reporting
2321
+
2322
+ After issue tracker agent completes, create a summary:
2323
+ \`\`\`markdown
2324
+ ### Bug Reporting Summary
2325
+ - Total bugs found: [count of FAIL tests]
2326
+ - New bugs reported: [count of newly created issues]
2327
+ - Duplicate bugs found: [count of duplicates detected]
2328
+ - Issues not reported: [count of skipped/known issues]
2329
+
2330
+ **New Bug Reports**:
2331
+ - [Issue ID]: [Bug title] (Test: TC-XXX, Priority: [priority])
2332
+ - [Issue ID]: [Bug title] (Test: TC-YYY, Priority: [priority])
2333
+
2334
+ **Duplicate Bugs** (already tracked):
2335
+ - [Existing Issue ID]: [Bug title] (Matches test: TC-XXX)
2336
+
2337
+ **Not Reported** (skipped or known):
2338
+ - TC-XXX: Skipped due to blocker failure
2339
+ - TC-YYY: Known issue documented in knowledge base
2340
+ \`\`\`
2341
+
2342
+ **Note**: The issue tracker agent handles all duplicate detection and system integration automatically. Simply provide the bug details and let it manage the rest.`
2343
+ },
2344
+ {
2345
+ role: "team-communicator",
2346
+ contentBlock: `### Step 6: Team Communication
2347
+
2348
+ Use the team-communicator agent to notify the product team about test execution:
2349
+
2350
+ \`\`\`
2351
+ Use the team-communicator agent to:
2352
+ 1. Post test execution summary with key statistics
2353
+ 2. Highlight critical failures that need immediate attention
2354
+ 3. Share important learnings about product behavior
2355
+ 4. Report any potential bugs discovered during testing
2356
+ 5. Ask for clarification on unexpected behaviors
2357
+ 6. Provide recommendations for areas needing investigation
2358
+ 7. Use appropriate urgency level based on failure severity
2359
+ \`\`\`
2360
+
2361
+ The team communication should include:
2362
+ - **Execution summary**: Overall pass/fail statistics and timing
2363
+ - **Critical issues**: High-priority failures that need immediate attention
2364
+ - **Key learnings**: Important discoveries about product behavior
2365
+ - **Potential bugs**: Issues that may require bug reports
2366
+ - **Clarifications needed**: Unexpected behaviors requiring team input
2367
+ - **Recommendations**: Suggested follow-up actions
2368
+
2369
+ **Communication strategy based on results**:
2370
+ - **All tests passed**: Brief positive update, highlight learnings
2371
+ - **Minor failures**: Standard update with failure details and plans
2372
+ - **Critical failures**: Urgent notification with detailed analysis
2373
+ - **New discoveries**: Separate message highlighting interesting findings
2374
+
2375
+ **Update team communicator memory**:
2376
+ - Record test execution communication
2377
+ - Track team response patterns to test results
2378
+ - Document any clarifications provided by the team
2379
+ - Note team priorities based on their responses`
2380
+ }
2381
+ ],
2382
+ requiredSubagents: ["test-runner", "test-debugger-fixer"]
2383
+ };
2384
+
2385
+ // src/tasks/library/verify-changes.ts
2386
+ var verifyChangesTask = {
2387
+ slug: TASK_SLUGS.VERIFY_CHANGES,
2388
+ name: "Verify Changes",
2389
+ description: "Unified verification command for all trigger sources with automated tests and manual checklists",
2390
+ frontmatter: {
2391
+ description: "Verify code changes with automated tests and manual verification checklists",
2392
+ "argument-hint": "[trigger-auto-detected]"
2393
+ },
2394
+ baseContent: `# Verify Changes - Unified Multi-Trigger Workflow
2395
+
2396
+ ## SECURITY NOTICE
2397
+ **CRITICAL**: Never read the \`.env\` file. It contains ONLY secrets (passwords, API keys).
2398
+ - **Read \`.env.testdata\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)
2399
+ - \`.env.testdata\` contains actual values for test data, URLs, and non-sensitive configuration
2400
+ - For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime
2401
+ - The \`.env\` file access is blocked by settings.json
2402
+
2403
+ ## Overview
2404
+
2405
+ This task performs comprehensive change verification with:
2406
+ - **Automated testing**: Execute Playwright tests with automatic triage and fixing
2407
+ - **Manual verification checklists**: Generate role-specific checklists for non-automatable scenarios
2408
+ - **Multi-trigger support**: Works from manual CLI, Slack messages, GitHub PRs, and CI/CD
2409
+ - **Smart output routing**: Results formatted and delivered to the appropriate channel
2410
+
2411
+ ## Arguments
2412
+
2413
+ **Input**: $ARGUMENTS
2414
+
2415
+ The input format determines the trigger source and context extraction strategy.
2416
+
2417
+ ${KNOWLEDGE_BASE_READ_INSTRUCTIONS}
2418
+
2419
+ ## Step 1: Detect Trigger Source
2420
+
2421
+ Analyze the input format to determine how this task was invoked:
2422
+
2423
+ ### 1.1 Identify Trigger Type
2424
+
2425
+ **GitHub PR Webhook:**
2426
+ - Input contains \`pull_request\` object with structure:
2427
+ \`\`\`json
2428
+ {
2429
+ "pull_request": {
2430
+ "number": 123,
2431
+ "title": "...",
2432
+ "body": "...",
2433
+ "changed_files": [...],
2434
+ "base": { "ref": "main" },
2435
+ "head": { "ref": "feature-branch" },
2436
+ "user": { "login": "..." }
2437
+ }
2438
+ }
2439
+ \`\`\`
2440
+ \u2192 **Trigger detected: GITHUB_PR**
2441
+
2442
+ **Slack Event:**
2443
+ - Input contains \`event\` object with structure:
2444
+ \`\`\`json
2445
+ {
2446
+ "eventType": "com.slack.message" or "com.slack.app_mention",
2447
+ "event": {
2448
+ "type": "message",
2449
+ "channel": "C123456",
2450
+ "user": "U123456",
2451
+ "text": "message content",
2452
+ "ts": "1234567890.123456",
2453
+ "thread_ts": "..." (optional)
2454
+ }
2455
+ }
2456
+ \`\`\`
2457
+ \u2192 **Trigger detected: SLACK_MESSAGE**
2458
+
2459
+ **CI/CD Environment:**
2460
+ - Environment variables present:
2461
+ - \`CI=true\`
2462
+ - \`GITHUB_REF\` (e.g., "refs/heads/feature-branch")
2463
+ - \`GITHUB_SHA\` (commit hash)
2464
+ - \`GITHUB_BASE_REF\` (base branch)
2465
+ - \`GITHUB_HEAD_REF\` (head branch)
2466
+ - Git context available via bash commands
2467
+ \u2192 **Trigger detected: CI_CD**
2468
+
2469
+ **Manual Invocation:**
2470
+ - Input is natural language, URL, or issue identifier
2471
+ - Patterns: "PR #123", GitHub URL, "PROJ-456", feature description
2472
+ \u2192 **Trigger detected: MANUAL**
2473
+
2474
+ ### 1.2 Store Trigger Context
2475
+
2476
+ Store the detected trigger for use in Step 6 (output routing):
2477
+ - Set variable: \`TRIGGER_SOURCE\` = [GITHUB_PR | SLACK_MESSAGE | CI_CD | MANUAL]
2478
+ - This determines output formatting and delivery channel
2479
+
2480
+ ## Step 2: Extract Context Based on Trigger
2481
+
2482
+ Based on the detected trigger source, extract relevant context:
2483
+
2484
+ ### 2.1 GitHub PR Trigger - Extract PR Details
2485
+
2486
+ If trigger is GITHUB_PR:
2487
+ - **PR number**: \`pull_request.number\`
2488
+ - **Title**: \`pull_request.title\`
2489
+ - **Description**: \`pull_request.body\`
2490
+ - **Changed files**: \`pull_request.changed_files\` (array of file paths)
2491
+ - **Author**: \`pull_request.user.login\`
2492
+ - **Base branch**: \`pull_request.base.ref\`
2493
+ - **Head branch**: \`pull_request.head.ref\`
2494
+
2495
+ Optional: Fetch additional details via GitHub API if needed (PR comments, reviews)
2496
+
2497
+ ### 2.2 Slack Message Trigger - Parse Natural Language
2498
+
2499
+ If trigger is SLACK_MESSAGE:
2500
+ - **Message text**: \`event.text\`
2501
+ - **Channel**: \`event.channel\` (for posting results)
2502
+ - **User**: \`event.user\` (requester)
2503
+ - **Thread**: \`event.thread_ts\` or \`event.ts\` (for threading replies)
2504
+
2505
+ **Extract references from text:**
2506
+ - PR numbers: "#123", "PR 123", "pull request 123"
2507
+ - Issue IDs: "PROJ-456", "BUG-123"
2508
+ - URLs: GitHub PR links, deployment URLs
2509
+ - Feature names: Quoted terms, capitalized phrases
2510
+ - Environments: "staging", "production", "preview"
2511
+
2512
+ ### 2.3 CI/CD Trigger - Read CI Environment
2513
+
2514
+ If trigger is CI_CD:
2515
+ - **CI platform**: Read \`CI\` env var
2516
+ - **Branch**: \`GITHUB_REF\` \u2192 extract branch name
2517
+ - **Commit**: \`GITHUB_SHA\`
2518
+ - **Base branch**: \`GITHUB_BASE_REF\` (for PRs)
2519
+ - **Changed files**: Run \`git diff --name-only $BASE_SHA...$HEAD_SHA\`
2520
+
2521
+ If in PR context, can also fetch PR number from CI env vars (e.g., \`GITHUB_EVENT_PATH\`)
2522
+
2523
+ ### 2.4 Manual Trigger - Parse User Input
2524
+
2525
+ If trigger is MANUAL:
2526
+ - **GitHub PR URL**: Parse to extract PR number, then fetch details via API
2527
+ - Pattern: \`https://github.com/owner/repo/pull/123\`
2528
+ - Extract: owner, repo, PR number
2529
+ - Fetch: PR details, diff, comments
2530
+ - **Issue identifier**: Extract issue ID
2531
+ - Patterns: "PROJ-123", "#456", "BUG-789"
2532
+ - **Feature description**: Use text as-is for verification context
2533
+ - **Deployment URL**: Extract for testing environment
2534
+
2535
+ ### 2.5 Unified Context Structure
2536
+
2537
+ After extraction, create unified context structure:
2538
+ \`\`\`
2539
+ CHANGE_CONTEXT = {
2540
+ trigger: [GITHUB_PR | SLACK_MESSAGE | CI_CD | MANUAL],
2541
+ title: "...",
2542
+ description: "...",
2543
+ changedFiles: ["src/pages/Login.tsx", ...],
2544
+ author: "...",
2545
+ environment: "staging" | "production" | URL,
2546
+ prNumber: 123 (if available),
2547
+ issueId: "PROJ-456" (if available),
2548
+
2549
+ // For output routing:
2550
+ slackChannel: "C123456" (if Slack trigger),
2551
+ slackThread: "1234567890.123456" (if Slack trigger),
2552
+ githubRepo: "owner/repo" (if GitHub trigger)
2553
+ }
2554
+ \`\`\`
2555
+
2556
+ ## Step 3: Determine Test Scope (Smart Selection)
2557
+
2558
+ **IMPORTANT**: You do NOT have access to code files. Infer test scope from change **descriptions** only.
2559
+
2560
+ Based on PR title, description, and commit messages, intelligently select which tests to run:
2561
+
2562
+ ### 3.1 Infer Test Scope from Change Descriptions
2563
+
2564
+ Analyze the change description to identify affected feature areas:
2565
+
2566
+ **Example mappings from descriptions to test suites:**
2567
+
2568
+ | Description Keywords | Inferred Test Scope | Example |
2569
+ |---------------------|-------------------|---------|
2570
+ | "login", "authentication", "sign in/up" | \`tests/specs/auth/\` | "Fix login page validation" \u2192 Auth tests |
2571
+ | "checkout", "payment", "purchase" | \`tests/specs/checkout/\` | "Optimize checkout flow" \u2192 Checkout tests |
2572
+ | "cart", "shopping cart", "add to cart" | \`tests/specs/cart/\` | "Update cart calculations" \u2192 Cart tests |
2573
+ | "API", "endpoint", "backend" | API test suites | "Add new user API endpoint" \u2192 User API tests |
2574
+ | "profile", "account", "settings" | \`tests/specs/profile/\` or \`tests/specs/settings/\` | "Profile page redesign" \u2192 Profile tests |
2575
+
2576
+ **Inference strategy:**
2577
+ 1. **Extract feature keywords** from PR title and description
2578
+ - PR title: "feat(checkout): Add PayPal payment option"
2579
+ - Keywords: ["checkout", "payment"]
2580
+ - Inferred scope: Checkout tests
2581
+
2582
+ 2. **Analyze commit messages** for conventional commit scopes
2583
+ - \`feat(auth): Add password reset flow\` \u2192 Auth tests
2584
+ - \`fix(cart): Resolve quantity update bug\` \u2192 Cart tests
2585
+
2586
+ 3. **Map keywords to test organization**
2587
+ - Reference: Tests are organized by feature under \`tests/specs/\` (see \`.bugzy/runtime/testing-best-practices.md\`)
2588
+ - Feature areas typically include: auth/, checkout/, cart/, profile/, api/, etc.
2589
+
2590
+ 4. **Identify test scope breadth from description tone**
2591
+ - "Fix typo in button label" \u2192 Narrow scope (smoke tests)
2592
+ - "Refactor shared utility functions" \u2192 Wide scope (full suite)
2593
+ - "Update single component styling" \u2192 Narrow scope (component tests)
2594
+
2595
+ ### 3.2 Fallback Strategies Based on Description Analysis
2596
+
2597
+ **Description patterns that indicate full suite:**
2598
+ - "Refactor shared/common utilities" (wide impact)
2599
+ - "Update dependencies" or "Upgrade framework" (safety validation)
2600
+ - "Merge main into feature" or "Sync with main" (comprehensive validation)
2601
+ - "Breaking changes" or "Major version update" (thorough testing)
2602
+ - "Database migration" or "Schema changes" (data integrity)
2603
+
2604
+ **Description patterns that indicate smoke tests only:**
2605
+ - "Fix typo" or "Update copy/text" (cosmetic change)
2606
+ - "Update README" or "Documentation only" (no functional change)
2607
+ - "Fix formatting" or "Linting fixes" (no logic change)
2608
+
2609
+ **When description is vague or ambiguous:**
2610
+ - Examples: "Updated several components", "Various bug fixes", "Improvements"
2611
+ - **ACTION REQUIRED**: Use AskUserQuestion tool to clarify test scope
2612
+ - Provide options based on available test suites:
2613
+ \`\`\`typescript
2614
+ AskUserQuestion({
2615
+ questions: [{
2616
+ question: "The change description is broad. Which test suites should run?",
2617
+ header: "Test Scope",
2618
+ multiSelect: true,
2619
+ options: [
2620
+ { label: "Auth tests", description: "Login, signup, password reset" },
2621
+ { label: "Checkout tests", description: "Purchase flow, payment processing" },
2622
+ { label: "Full test suite", description: "Run all tests for comprehensive validation" },
2623
+ { label: "Smoke tests only", description: "Quick validation of critical paths" }
2624
+ ]
2625
+ }]
2626
+ })
2627
+ \`\`\`
2628
+
2629
+ **If specific test scope requested:**
2630
+ - User can override with: "only smoke tests", "full suite", specific test suite names
2631
+ - Honor user's explicit scope over smart selection
2632
+
2633
+ ### 3.3 Test Selection Summary
2634
+
2635
+ Generate summary of test selection based on description analysis:
2636
+ \`\`\`markdown
2637
+ ### Test Scope Determined
2638
+ - **Change description**: [PR title or summary]
2639
+ - **Identified keywords**: [list extracted keywords: "auth", "checkout", etc.]
2640
+ - **Affected test suites**: [list inferred test suite paths or names]
2641
+ - **Scope reasoning**: [explain why this scope was selected]
2642
+ - **Execution strategy**: [smart selection | full suite | smoke tests | user-specified]
2643
+ \`\`\`
2644
+
2645
+ **Example summary:**
2646
+ \`\`\`markdown
2647
+ ### Test Scope Determined
2648
+ - **Change description**: "feat(checkout): Add PayPal payment option"
2649
+ - **Identified keywords**: checkout, payment, PayPal
2650
+ - **Affected test suites**: tests/specs/checkout/payment.spec.ts, tests/specs/checkout/purchase-flow.spec.ts
2651
+ - **Scope reasoning**: Change affects checkout payment processing; running all checkout tests to validate payment integration
2652
+ - **Execution strategy**: Smart selection (checkout suite)
2653
+ \`\`\`
2654
+
2655
+ ## Step 4: Run Verification Workflow
2656
+
2657
+ Execute comprehensive verification combining automated tests and manual checklists:
2658
+
2659
+ ### 4A: Automated Testing (Integrated from /run-tests)
2660
+
2661
+ Execute automated Playwright tests with full triage and fixing:
2662
+
2663
+ #### 4A.1 Execute Tests
2664
+
2665
+ Run the selected tests via Playwright:
2666
+ \`\`\`bash
2667
+ npx playwright test [scope] --reporter=json --output=test-results/
2668
+ \`\`\`
2669
+
2670
+ Wait for execution to complete. Capture JSON report from \`test-results/.last-run.json\`.
2671
+
2672
+ #### 4A.2 Parse Test Results
2673
+
2674
+ Read and analyze the JSON report:
2675
+ - Extract: Total, passed, failed, skipped counts
2676
+ - For each failed test: file path, test name, error message, stack trace, trace file
2677
+ - Calculate: Pass rate, total duration
2678
+
2679
+ #### 4A.3 Triage Failures (Classification)
2680
+
2681
+ #### Automatic Test Issue Fixing
2682
+
2683
+ For each test classified as **[TEST ISSUE]**, use the test-debugger-fixer agent to automatically fix the test:
2684
+
2685
+ \`\`\`
2686
+ Use the test-debugger-fixer agent to fix test issues:
2687
+
2688
+ For each failed test classified as a test issue (not a product bug), provide:
2689
+ - Test file path: [from JSON report]
2690
+ - Test name/title: [from JSON report]
2691
+ - Error message: [from JSON report]
2692
+ - Stack trace: [from JSON report]
2693
+ - Trace file path: [if available]
2694
+
2695
+ The agent will:
2696
+ 1. Read the failing test file
2697
+ 2. Analyze the failure details
2698
+ 3. Open browser via Playwright MCP to debug if needed
2699
+ 4. Identify the root cause (brittle selector, missing wait, race condition, etc.)
2700
+ 5. Apply appropriate fix to the test code
2701
+ 6. Rerun the test to verify the fix
2702
+ 7. Repeat up to 3 times if needed
2703
+ 8. Report success or escalate as likely product bug
2704
+
2705
+ After test-debugger-fixer completes:
2706
+ - If fix succeeded: Mark test as fixed, add to "Tests Fixed" list
2707
+ - If still failing after 3 attempts: Reclassify as potential product bug
2708
+ \`\`\`
2709
+
2710
+ **Track Fixed Tests:**
2711
+ - Maintain list of tests fixed automatically
2712
+ - Include fix description (e.g., "Updated selector from CSS to role-based")
2713
+ - Note verification status (test now passes)
2714
+ - Reference .bugzy/runtime/testing-best-practices.md for best practices
2715
+
2716
+ For each failed test, classify as:
2717
+ - **[PRODUCT BUG]**: Correct test code, but application behaves incorrectly
2718
+ - **[TEST ISSUE]**: Test code needs fixing (selector, timing, assertion)
2719
+
2720
+ Classification guidelines:
2721
+ - Product Bug: Expected behavior not met, functional issue
2722
+ - Test Issue: Selector not found, timeout, race condition, brittle locator
2723
+
2724
+ #### 4A.4 Fix Test Issues Automatically
2725
+
2726
+ For tests classified as [TEST ISSUE]:
2727
+ - Use test-debugger-fixer agent to analyze and fix
2728
+ - Agent debugs with browser if needed
2729
+ - Applies fix (selector update, wait condition, assertion correction)
2730
+ - Reruns test to verify fix (10x for flaky tests)
2731
+ - Max 3 fix attempts, then reclassify as product bug
2732
+
2733
+ Track fixed tests with:
2734
+ - Test file path
2735
+ - Fix description
2736
+ - Verification status (now passes)
2737
+
2738
+ #### 4A.5 Log Product Bugs
2739
+
2740
+ {{ISSUE_TRACKER_INSTRUCTIONS}}
2741
+
2742
+ For tests classified as [PRODUCT BUG]:
2743
+ - Use issue-tracker agent to create bug reports
2744
+ - Agent checks for duplicates automatically
2745
+ - Creates detailed report with:
2746
+ - Title, description, reproduction steps
2747
+ - Test reference, error details, stack trace
2748
+ - Screenshots, traces, environment details
2749
+ - Severity based on test type and impact
2750
+ - Returns issue ID for tracking
2751
+
2752
+ ### 4B: Manual Verification Checklist (NEW)
2753
+
2754
+ Generate human-readable checklist for non-automatable scenarios:
2755
+
2756
+ #### Generate Manual Verification Checklist
2757
+
2758
+ Analyze the code changes and generate a manual verification checklist for scenarios that cannot be automated.
2759
+
2760
+ #### Analyze Change Context
2761
+
2762
+ Review the provided context to understand what changed:
2763
+ - Read PR title, description, and commit messages
2764
+ - Identify change types from descriptions: visual, UX, forms, mobile, accessibility, edge cases
2765
+ - Understand the scope and impact of changes from the change descriptions
2766
+
2767
+ #### Identify Non-Automatable Scenarios
2768
+
2769
+ Based on the change analysis, identify scenarios that require human verification:
2770
+
2771
+ **1. Visual Design Changes** (CSS, styling, design files, graphics)
2772
+ - Color schemes, gradients, shadows
2773
+ - Typography, font sizes, line heights
2774
+ - Spacing, margins, padding, alignment
2775
+ - Visual consistency across components
2776
+ - Brand guideline compliance
2777
+ \u2192 Add **Design Validation** checklist items
2778
+
2779
+ **2. UX Interaction Changes** (animations, transitions, gestures, micro-interactions)
2780
+ - Animation smoothness (60fps expectation)
2781
+ - Transition timing and easing
2782
+ - Interaction responsiveness and feel
2783
+ - Loading states and skeleton screens
2784
+ - Hover effects, focus states
2785
+ \u2192 Add **UX Feel** checklist items
2786
+
2787
+ **3. Form and Input Changes** (new form fields, input validation, user input)
2788
+ - Screen reader compatibility
2789
+ - Keyboard navigation (Tab order, Enter to submit)
2790
+ - Error message clarity and placement
2791
+ - Color contrast (WCAG 2.1 AA: 4.5:1 ratio for text)
2792
+ - Focus indicators visibility
2793
+ \u2192 Add **Accessibility** checklist items
2794
+
2795
+ **4. Mobile and Responsive Changes** (media queries, touch interactions, viewport)
2796
+ - Touch target sizes (\u226544px iOS, \u226548dp Android)
2797
+ - Responsive layout breakpoints
2798
+ - Mobile keyboard behavior (doesn't obscure inputs)
2799
+ - Swipe gestures and touch interactions
2800
+ - Pinch-to-zoom functionality
2801
+ \u2192 Add **Mobile Experience** checklist items
2802
+
2803
+ **5. Low ROI or Rare Scenarios** (edge cases, one-time migrations, rare user paths)
2804
+ - Scenarios used by < 1% of users
2805
+ - Complex multi-system integrations
2806
+ - One-time data migrations
2807
+ - Leap year, DST, timezone edge cases
2808
+ \u2192 Add **Exploratory Testing** notes
2809
+
2810
+ **6. Cross-Browser Visual Consistency** (layout rendering differences)
2811
+ - Layout consistency across Chrome, Firefox, Safari
2812
+ - CSS feature support differences
2813
+ - Font rendering variations
2814
+ \u2192 Add **Cross-Browser** checklist items (if significant visual changes)
2815
+
2816
+ #### Generate Role-Specific Checklist Items
2817
+
2818
+ For each identified scenario, create clear, actionable checklist items:
2819
+
2820
+ **Format for each item:**
2821
+ - Clear, specific task description
2822
+ - Assigned role (@design-team, @qa-team, @a11y-team, @mobile-team)
2823
+ - Acceptance criteria (what constitutes pass/fail)
2824
+ - Reference to standards when applicable (WCAG, iOS HIG, Material Design)
2825
+ - Priority indicator (\u{1F534} critical, \u{1F7E1} important, \u{1F7E2} nice-to-have)
2826
+
2827
+ **Example checklist items:**
2828
+
2829
+ **Design Validation (@design-team)**
2830
+ - [ ] \u{1F534} Login button color matches brand guidelines (#FF6B35)
2831
+ - [ ] \u{1F7E1} Loading spinner animation smooth (60fps, no jank)
2832
+ - [ ] \u{1F7E1} Card shadows match design system (elevation-2: 0 2px 4px rgba(0,0,0,0.1))
2833
+ - [ ] \u{1F7E2} Hover states provide appropriate visual feedback
2834
+
2835
+ **Accessibility (@a11y-team)**
2836
+ - [ ] \u{1F534} Screen reader announces form errors clearly (tested with VoiceOver/NVDA)
2837
+ - [ ] \u{1F534} Keyboard navigation: Tab through all interactive elements in logical order
2838
+ - [ ] \u{1F534} Color contrast meets WCAG 2.1 AA (4.5:1 for body text, 3:1 for large text)
2839
+ - [ ] \u{1F7E1} Focus indicators visible on all interactive elements
2840
+
2841
+ **Mobile Experience (@qa-team, @mobile-team)**
2842
+ - [ ] \u{1F534} Touch targets \u226544px (iOS Human Interface Guidelines)
2843
+ - [ ] \u{1F534} Mobile keyboard doesn't obscure input fields on iOS/Android
2844
+ - [ ] \u{1F7E1} Swipe gestures work naturally without conflicts
2845
+ - [ ] \u{1F7E1} Responsive layout adapts properly on iPhone SE (smallest screen)
2846
+
2847
+ **UX Feel (@design-team, @qa-team)**
2848
+ - [ ] \u{1F7E1} Page transitions smooth and not jarring
2849
+ - [ ] \u{1F7E1} Button click feedback immediate (< 100ms perceived response)
2850
+ - [ ] \u{1F7E2} Loading states prevent confusion during data fetch
2851
+
2852
+ **Exploratory Testing (@qa-team)**
2853
+ - [ ] \u{1F7E2} Test edge case: User submits form during network timeout
2854
+ - [ ] \u{1F7E2} Test edge case: User navigates back during submission
2855
+
2856
+ #### Format for Output Channel
2857
+
2858
+ Adapt the checklist format based on the output channel (determined by trigger source):
2859
+
2860
+ **Terminal (Manual Trigger):**
2861
+ \`\`\`markdown
2862
+ MANUAL VERIFICATION CHECKLIST:
2863
+ Please verify the following before merging:
2864
+
2865
+ Design Validation (@design-team):
2866
+ [ ] \u{1F534} Checkout button colors match brand guidelines (#FF6B35)
2867
+ [ ] \u{1F7E1} Loading spinner animation smooth (60fps)
2868
+
2869
+ Accessibility (@a11y-team):
2870
+ [ ] \u{1F534} Screen reader announces error messages
2871
+ [ ] \u{1F534} Keyboard navigation works (Tab order logical)
2872
+ [ ] \u{1F534} Color contrast meets WCAG 2.1 AA (4.5:1 ratio)
2873
+
2874
+ Mobile Experience (@qa-team):
2875
+ [ ] \u{1F534} Touch targets \u226544px (iOS HIG)
2876
+ [ ] \u{1F7E1} Responsive layout works on iPhone SE
2877
+ \`\`\`
2878
+
2879
+ **Slack (Slack Trigger):**
2880
+ \`\`\`markdown
2881
+ *Manual Verification Needed:*
2882
+ \u25A1 Visual: Button colors, animations (60fps)
2883
+ \u25A1 Mobile: Touch targets \u226544px
2884
+ \u25A1 A11y: Screen reader, keyboard nav, contrast
2885
+
2886
+ cc @design-team @qa-team @a11y-team
2887
+ \`\`\`
2888
+
2889
+ **GitHub PR Comment (GitHub Trigger):**
2890
+ \`\`\`markdown
2891
+ ### Manual Verification Required
2892
+
2893
+ The following scenarios require human verification before release:
2894
+
2895
+ #### Design Validation (@design-team)
2896
+ - [ ] \u{1F534} Checkout button colors match brand guidelines (#FF6B35)
2897
+ - [ ] \u{1F7E1} Loading spinner animation smooth (60fps)
2898
+ - [ ] \u{1F7E1} Card shadows match design system
2899
+
2900
+ #### Accessibility (@a11y-team)
2901
+ - [ ] \u{1F534} Screen reader announces error messages (VoiceOver/NVDA)
2902
+ - [ ] \u{1F534} Keyboard navigation through all form fields (Tab order)
2903
+ - [ ] \u{1F534} Color contrast meets WCAG 2.1 AA (4.5:1 for body text)
2904
+
2905
+ #### Mobile Experience (@qa-team)
2906
+ - [ ] \u{1F534} Touch targets \u226544px (iOS Human Interface Guidelines)
2907
+ - [ ] \u{1F534} Mobile keyboard doesn't obscure input fields
2908
+ - [ ] \u{1F7E1} Responsive layout works on iPhone SE (375x667)
2909
+
2910
+ ---
2911
+ *Legend: \u{1F534} Critical \u2022 \u{1F7E1} Important \u2022 \u{1F7E2} Nice-to-have*
2912
+ \`\`\`
2913
+
2914
+ #### Guidelines for Quality Checklists
2915
+
2916
+ **DO:**
2917
+ - Make each item verifiable (clear pass/fail criteria)
2918
+ - Include context (why this needs manual verification)
2919
+ - Reference standards (WCAG, iOS HIG, Material Design)
2920
+ - Assign to specific roles
2921
+ - Prioritize items (critical, important, nice-to-have)
2922
+ - Be specific (not "check colors" but "Login button color matches #FF6B35")
2923
+
2924
+ **DON'T:**
2925
+ - Create vague items ("test thoroughly")
2926
+ - List items that can be automated
2927
+ - Skip role assignments
2928
+ - Forget acceptance criteria
2929
+ - Omit priority indicators
2930
+
2931
+ #### When NO Manual Verification Needed
2932
+
2933
+ If the changes are purely:
2934
+ - Backend logic (no UI changes)
2935
+ - Code refactoring (no behavior changes)
2936
+ - Configuration changes (no user-facing impact)
2937
+ - Fully covered by automated tests
2938
+
2939
+ Output:
2940
+ \`\`\`markdown
2941
+ **Manual Verification:** Not required for this change.
2942
+ All user-facing changes are fully covered by automated tests.
2943
+ \`\`\`
2944
+
2945
+ #### Summary
2946
+
2947
+ After generating the checklist:
2948
+ - Count total items by priority (\u{1F534} critical, \u{1F7E1} important, \u{1F7E2} nice-to-have)
2949
+ - Estimate time needed (e.g., "~30 minutes for design QA, ~45 minutes for accessibility testing")
2950
+ - Suggest who should perform each category of checks
2951
+
2952
+ ### 4C: Aggregate Results
2953
+
2954
+ Combine automated and manual verification results:
2955
+
2956
+ \`\`\`markdown
2957
+ ## Verification Results Summary
2958
+
2959
+ ### Automated Tests
2960
+ - Total tests: [count]
2961
+ - Passed: [count] ([percentage]%)
2962
+ - Failed: [count] ([percentage]%)
2963
+ - Test issues fixed: [count]
2964
+ - Product bugs logged: [count]
2965
+ - Duration: [time]
2966
+
2967
+ ### Manual Verification Required
2968
+ [Checklist generated in 4B, or "Not required"]
2969
+
2970
+ ### Overall Recommendation
2971
+ [\u2705 Safe to merge | \u26A0\uFE0F Review bugs before merging | \u274C Do not merge]
2972
+ \`\`\`
2973
+
2974
+ ## Step 5: Understanding the Change (Documentation Research)
2975
+
2976
+ {{DOCUMENTATION_RESEARCHER_INSTRUCTIONS}}
2977
+
2978
+ Before proceeding with test creation or execution, ensure requirements are clear through ambiguity detection and adaptive exploration.
2979
+
2980
+ **Note**: For detailed exploration and clarification protocols, refer to the complete instructions below. Adapt the depth of exploration based on requirement clarity and use the clarification protocol to detect ambiguity, assess severity, and seek clarification when needed.
2981
+
2982
+ After clarification and exploration, analyze the change to determine the verification approach:
2983
+
2984
+ ### 5.1 Identify Test Scope
2985
+ Based on the change description, exploration findings, and clarified requirements:
2986
+ - **Direct impact**: Which features/functionality are directly modified
2987
+ - **Indirect impact**: What else might be affected (dependencies, integrations)
2988
+ - **Regression risk**: Existing functionality that should be retested
2989
+ - **New functionality**: Features that need new test coverage
2990
+
2991
+ ### 5.2 Determine Verification Strategy
2992
+ Plan your testing approach based on validated requirements:
2993
+ - **Priority areas**: Critical paths that must work
2994
+ - **Test types needed**: Functional, regression, integration, UI/UX
2995
+ - **Test data requirements**: What test accounts, data, or scenarios needed
2996
+ - **Success criteria**: What determines the change is working correctly (now clearly defined)
2997
+
2998
+ ## Step 6: Report Results (Multi-Channel Output)
2999
+
3000
+ Route output based on trigger source (from Step 1):
3001
+
3002
+ ### 6.1 MANUAL Trigger \u2192 Terminal Output
3003
+
3004
+ Format as comprehensive markdown report for terminal display:
3005
+
3006
+ \`\`\`markdown
3007
+ # Test Verification Report
3008
+
3009
+ ## Change Summary
3010
+ - **What Changed**: [Brief description]
3011
+ - **Scope**: [Affected features/areas]
3012
+ - **Changed Files**: [count] files
3013
+
3014
+ ## Automated Test Results
3015
+ ### Statistics
3016
+ - Total Tests: [count]
3017
+ - Passed: [count] ([percentage]%)
3018
+ - Failed: [count]
3019
+ - Test Issues Fixed: [count]
3020
+ - Product Bugs Logged: [count]
3021
+ - Duration: [time]
3022
+
3023
+ ### Tests Fixed Automatically
3024
+ [For each fixed test:
3025
+ - **Test**: [file path] \u203A [test name]
3026
+ - **Issue**: [problem found]
3027
+ - **Fix**: [what was changed]
3028
+ - **Status**: \u2705 Now passing
3029
+ ]
3030
+
3031
+ ### Product Bugs Logged
3032
+ [For each bug:
3033
+ - **Issue**: [ISSUE-123] [Bug title]
3034
+ - **Test**: [test file] \u203A [test name]
3035
+ - **Severity**: [priority]
3036
+ - **Link**: [issue tracker URL]
3037
+ ]
3038
+
3039
+ ## Manual Verification Checklist
3040
+
3041
+ [Insert checklist from Step 4B]
3042
+
3043
+ ## Recommendation
3044
+ [\u2705 Safe to merge - all automated tests pass, complete manual checks before release]
3045
+ [\u26A0\uFE0F Review bugs before merging - [X] bugs need attention]
3046
+ [\u274C Do not merge - critical failures]
3047
+
3048
+ ## Test Artifacts
3049
+ - JSON Report: test-results/.last-run.json
3050
+ - HTML Report: playwright-report/index.html
3051
+ - Traces: test-results/[test-id]/trace.zip
3052
+ - Screenshots: test-results/[test-id]/screenshots/
3053
+ \`\`\`
3054
+
3055
+ ### 6.2 SLACK_MESSAGE Trigger \u2192 Thread Reply
3056
+
3057
+ {{TEAM_COMMUNICATOR_INSTRUCTIONS}}
3058
+
3059
+ Use team-communicator agent to post concise results to Slack thread:
3060
+
3061
+ \`\`\`
3062
+ Use the team-communicator agent to post verification results.
3063
+
3064
+ **Channel**: [from CHANGE_CONTEXT.slackChannel]
3065
+ **Thread**: [from CHANGE_CONTEXT.slackThread]
3066
+
3067
+ **Message**:
3068
+ \u{1F9EA} *Verification Results for [change title]*
3069
+
3070
+ *Automated:* \u2705 [passed]/[total] tests passed ([duration])
3071
+ [If test issues fixed:] \u{1F527} [count] test issues auto-fixed
3072
+ [If bugs logged:] \u{1F41B} [count] bugs logged ([list issue IDs])
3073
+
3074
+ *Manual Verification Needed:*
3075
+ [Concise checklist summary - collapsed/expandable]
3076
+ \u25A1 Visual: [key items]
3077
+ \u25A1 Mobile: [key items]
3078
+ \u25A1 A11y: [key items]
3079
+
3080
+ *Recommendation:* [\u2705 Safe to merge | \u26A0\uFE0F Review bugs | \u274C Blocked]
3081
+
3082
+ [If bugs logged:] cc @[relevant-team-members]
3083
+ [Link to full test report if available]
3084
+ \`\`\`
3085
+
3086
+ ### 6.3 GITHUB_PR Trigger \u2192 PR Comment
3087
+
3088
+ Use GitHub API to post comprehensive comment on PR:
3089
+
3090
+ **Format as GitHub-flavored markdown:**
3091
+ \`\`\`markdown
3092
+ ## \u{1F9EA} Test Verification Results
3093
+
3094
+ **Status:** [\u2705 All tests passed | \u26A0\uFE0F Issues found | \u274C Critical failures]
3095
+
3096
+ ### Automated Tests
3097
+ | Metric | Value |
3098
+ |--------|-------|
3099
+ | Total Tests | [count] |
3100
+ | Passed | \u2705 [count] ([percentage]%) |
3101
+ | Failed | \u274C [count] |
3102
+ | Test Issues Fixed | \u{1F527} [count] |
3103
+ | Product Bugs Logged | \u{1F41B} [count] |
3104
+ | Duration | \u23F1\uFE0F [time] |
3105
+
3106
+ ### Failed Tests (Triaged)
3107
+
3108
+ [For each failure:]
3109
+
3110
+ #### \u274C **[Test Name]**
3111
+ - **File:** \`[test-file-path]\`
3112
+ - **Cause:** [Product bug | Test issue]
3113
+ - **Action:** [Bug logged: [ISSUE-123](url) | Fixed: [commit-hash](url)]
3114
+ - **Details:**
3115
+ \`\`\`
3116
+ [Error message]
3117
+ \`\`\`
3118
+
3119
+ ### Tests Fixed Automatically
3120
+
3121
+ [For each fixed test:]
3122
+ - \u2705 **[Test Name]** (\`[file-path]\`)
3123
+ - **Issue:** [brittle selector | missing wait | race condition]
3124
+ - **Fix:** [description of fix applied]
3125
+ - **Verified:** Passes 10/10 runs
3126
+
3127
+ ### Product Bugs Logged
3128
+
3129
+ [For each bug:]
3130
+ - \u{1F41B} **[[ISSUE-123](url)]** [Bug title]
3131
+ - **Test:** \`[test-file]\` \u203A [test name]
3132
+ - **Severity:** [\u{1F534} Critical | \u{1F7E1} Important | \u{1F7E2} Minor]
3133
+ - **Assignee:** @[backend-team | frontend-team]
3134
+
3135
+ ### Manual Verification Required
3136
+
3137
+ The following scenarios require human verification before release:
3138
+
3139
+ #### Design Validation (@design-team)
3140
+ - [ ] \u{1F534} [Critical design check]
3141
+ - [ ] \u{1F7E1} [Important design check]
3142
+
3143
+ #### Accessibility (@a11y-team)
3144
+ - [ ] \u{1F534} [Critical a11y check]
3145
+ - [ ] \u{1F7E1} [Important a11y check]
3146
+
3147
+ #### Mobile Experience (@qa-team)
3148
+ - [ ] \u{1F534} [Critical mobile check]
3149
+ - [ ] \u{1F7E1} [Important mobile check]
3150
+
3151
+ ---
3152
+ *Legend: \u{1F534} Critical \u2022 \u{1F7E1} Important \u2022 \u{1F7E2} Nice-to-have*
3153
+
3154
+ ### Test Artifacts
3155
+ - [Full HTML Report](playwright-report/index.html)
3156
+ - [Test Traces](test-results/)
3157
+
3158
+ ### Recommendation
3159
+ [\u2705 **Safe to merge** - All automated tests pass, complete manual checks before release]
3160
+ [\u26A0\uFE0F **Review required** - [X] bugs need attention, complete manual checks]
3161
+ [\u274C **Do not merge** - Critical failures must be resolved first]
3162
+
3163
+ ---
3164
+ *\u{1F916} Automated by Bugzy \u2022 [View Test Code](tests/specs/) \u2022 [Manual Test Cases](test-cases/)*
3165
+ \`\`\`
3166
+
3167
+ **Post comment via GitHub API:**
3168
+ - Endpoint: \`POST /repos/{owner}/{repo}/issues/{pr_number}/comments\`
3169
+ - Use GitHub MCP or bash with \`gh\` CLI
3170
+ - Requires GITHUB_TOKEN from environment
3171
+
3172
+ ### 6.4 CI_CD Trigger \u2192 Build Log + PR Comment
3173
+
3174
+ **Output to CI build log:**
3175
+ - Print detailed results to stdout (captured by CI)
3176
+ - Use ANSI colors if supported by CI platform
3177
+ - Same format as MANUAL terminal output
3178
+
3179
+ **Exit with appropriate code:**
3180
+ - Exit 0: All tests passed (safe to merge)
3181
+ - Exit 1: Tests failed or critical bugs found (block merge)
3182
+
3183
+ **Post PR comment if GitHub context available:**
3184
+ - Check for PR number in CI environment
3185
+ - If available: Post comment using 6.3 format
3186
+ - Also notify team via Slack if critical failures
3187
+
3188
+ ## Additional Steps
3189
+
3190
+ ### Handle Special Cases
3191
+
3192
+ **If no tests found for changed files:**
3193
+ - Inform user: "No automated tests found for changed files"
3194
+ - Recommend: "Run smoke test suite for basic validation"
3195
+ - Still generate manual verification checklist
3196
+
3197
+ **If all tests skipped:**
3198
+ - Explain why (dependencies, environment issues)
3199
+ - Recommend: Check test configuration and prerequisites
3200
+
3201
+ **If test execution fails:**
3202
+ - Report specific error (Playwright not installed, env vars missing)
3203
+ - Suggest troubleshooting steps
3204
+ - Don't proceed with triage if tests didn't run
3205
+
3206
+ ${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}
3207
+
3208
+ ## Important Notes
3209
+
3210
+ - This task handles **all trigger sources** with a single unified workflow
3211
+ - Trigger detection is automatic based on input format
3212
+ - Output is automatically routed to the appropriate channel
3213
+ - Automated tests are executed with **full triage and automatic fixing**
3214
+ - Manual verification checklists are generated for **non-automatable scenarios**
3215
+ - Product bugs are logged with **automatic duplicate detection**
3216
+ - Test issues are fixed automatically with **verification**
3217
+ - Results include both automated and manual verification items
3218
+ - For best results, ensure:
3219
+ - Playwright is installed (\`npx playwright install\`)
3220
+ - Environment variables configured (copy \`.env.testdata\` to \`.env\`)
3221
+ - GitHub token available for PR comments (if GitHub trigger)
3222
+ - Slack integration configured (if Slack trigger)
3223
+ - Issue tracker configured (Linear, Jira, etc.)
3224
+
3225
+ ## Success Criteria
3226
+
3227
+ A successful verification includes:
3228
+ 1. \u2705 Trigger source correctly detected
3229
+ 2. \u2705 Context extracted completely
3230
+ 3. \u2705 Tests executed (or skipped with explanation)
3231
+ 4. \u2705 All failures triaged (product bug vs test issue)
3232
+ 5. \u2705 Test issues fixed automatically (when possible)
3233
+ 6. \u2705 Product bugs logged to issue tracker
3234
+ 7. \u2705 Manual verification checklist generated
3235
+ 8. \u2705 Results formatted for output channel
3236
+ 9. \u2705 Results delivered to appropriate destination
3237
+ 10. \u2705 Clear recommendation provided (merge / review / block)`,
3238
+ optionalSubagents: [
3239
+ {
3240
+ role: "documentation-researcher",
3241
+ contentBlock: `#### Research Project Documentation
3242
+
3243
+ Use the documentation-researcher agent to gather comprehensive context about the changed features:
3244
+
3245
+ \`\`\`
3246
+ Use the documentation-researcher agent to explore project documentation related to the changes.
3247
+
3248
+ Specifically gather:
3249
+ - Product specifications for affected features
3250
+ - User stories and acceptance criteria
3251
+ - Technical architecture documentation
3252
+ - API endpoints and contracts
3253
+ - User roles and permissions relevant to the change
3254
+ - Business rules and validations
3255
+ - UI/UX specifications
3256
+ - Known limitations or constraints
3257
+ - Related bug reports or known issues
3258
+ - Existing test documentation for this area
3259
+ \`\`\`
3260
+
3261
+ The agent will:
3262
+ 1. Check its memory for previously discovered documentation
3263
+ 2. Explore workspace for relevant pages and databases
3264
+ 3. Build comprehensive understanding of the affected features
3265
+ 4. Return synthesized information to inform testing strategy
3266
+
3267
+ Use this information to:
3268
+ - Better understand the change context
3269
+ - Identify comprehensive test scenarios
3270
+ - Recognize integration points and dependencies
3271
+ - Spot potential edge cases or risk areas
3272
+ - Enhance manual verification checklist generation`
3273
+ },
3274
+ {
3275
+ role: "issue-tracker",
3276
+ contentBlock: `#### Log Product Bugs
3277
+
3278
+ For tests classified as **[PRODUCT BUG]**, use the issue-tracker agent to log bugs:
3279
+
3280
+ \`\`\`
3281
+ Use issue-tracker agent to:
3282
+ 1. Check for duplicate bugs in the tracking system
3283
+ - The agent will automatically search for similar existing issues
3284
+ - It maintains memory of recently reported issues
3285
+ - Duplicate detection happens automatically - don't create manual checks
3286
+
3287
+ 2. For each new bug (non-duplicate):
3288
+ Create detailed bug report with:
3289
+ - **Title**: Clear, descriptive summary (e.g., "Login button fails with timeout on checkout page")
3290
+ - **Description**:
3291
+ - What happened vs. what was expected
3292
+ - Impact on users
3293
+ - Test reference: [file path] \u203A [test title]
3294
+ - **Reproduction Steps**:
3295
+ - List steps from the failing test
3296
+ - Include specific test data used
3297
+ - Note any setup requirements from test file
3298
+ - **Test Execution Details**:
3299
+ - Test file: [file path from JSON report]
3300
+ - Test name: [test title from JSON report]
3301
+ - Error message: [from JSON report]
3302
+ - Stack trace: [from JSON report]
3303
+ - Trace file: [path if available]
3304
+ - Screenshots: [paths if available]
3305
+ - **Environment Details**:
3306
+ - Browser and version (from Playwright config)
3307
+ - Test environment URL (from .env.testdata BASE_URL)
3308
+ - Timestamp of failure
3309
+ - **Severity/Priority**: Based on:
3310
+ - Test type (smoke tests = high priority)
3311
+ - User impact
3312
+ - Frequency (always fails vs flaky)
3313
+ - **Additional Context**:
3314
+ - Error messages or stack traces from JSON report
3315
+ - Related test files (if part of test suite)
3316
+ - Relevant knowledge from knowledge-base.md
3317
+
3318
+ 3. Track created issues:
3319
+ - Note the issue ID/number returned
3320
+ - Update issue tracker memory with new bugs
3321
+ - Prepare issue references for team communication
3322
+ \`\`\`
3323
+
3324
+ **Note**: The issue tracker agent handles all duplicate detection and system integration automatically. Simply provide the bug details and let it manage the rest.`
3325
+ },
3326
+ {
3327
+ role: "team-communicator",
3328
+ contentBlock: `#### Team Communication
3329
+
3330
+ Use the team-communicator agent to share verification results (primarily for Slack trigger, but can be used for other triggers):
3331
+
3332
+ \`\`\`
3333
+ Use the team-communicator agent to:
3334
+ 1. Post verification results summary
3335
+ 2. Highlight critical failures that need immediate attention
3336
+ 3. Share bugs logged with issue tracker links
3337
+ 4. Provide manual verification checklist summary
3338
+ 5. Recommend next steps based on results
3339
+ 6. Tag relevant team members for critical issues
3340
+ 7. Use appropriate urgency level based on failure severity
3341
+ \`\`\`
3342
+
3343
+ The team communication should include:
3344
+ - **Execution summary**: Overall pass/fail statistics and timing
3345
+ - **Tests fixed**: Count of test issues fixed automatically
3346
+ - **Bugs logged**: Product bugs reported to issue tracker
3347
+ - **Manual checklist**: Summary of manual verification items
3348
+ - **Recommendation**: Safe to merge / Review required / Do not merge
3349
+ - **Test artifacts**: Links to reports, traces, screenshots
3350
+
3351
+ **Communication strategy based on trigger**:
3352
+ - **Slack**: Post concise message with expandable details in thread
3353
+ - **Manual**: Full detailed report in terminal
3354
+ - **GitHub PR**: Comprehensive PR comment with tables and checklists
3355
+ - **CI/CD**: Build log output + optional Slack notification for critical failures
3356
+
3357
+ **Update team communicator memory**:
3358
+ - Record verification communication
3359
+ - Track response patterns by trigger type
3360
+ - Document team preferences for detail level
3361
+ - Note which team members respond to which types of issues`
3362
+ }
3363
+ ],
3364
+ requiredSubagents: ["test-runner", "test-debugger-fixer"]
3365
+ };
3366
+
3367
+ // src/tasks/index.ts
3368
+ var TASK_TEMPLATES = {
3369
+ [TASK_SLUGS.EXPLORE_APPLICATION]: exploreApplicationTask,
3370
+ [TASK_SLUGS.GENERATE_TEST_CASES]: generateTestCasesTask,
3371
+ [TASK_SLUGS.GENERATE_TEST_PLAN]: generateTestPlanTask,
3372
+ [TASK_SLUGS.HANDLE_MESSAGE]: handleMessageTask,
3373
+ [TASK_SLUGS.PROCESS_EVENT]: processEventTask,
3374
+ [TASK_SLUGS.RUN_TESTS]: runTestsTask,
3375
+ [TASK_SLUGS.VERIFY_CHANGES]: verifyChangesTask
3376
+ };
3377
+ function getTaskTemplate(slug) {
3378
+ return TASK_TEMPLATES[slug];
3379
+ }
3380
+ function getAllTaskSlugs() {
3381
+ return Object.keys(TASK_TEMPLATES);
3382
+ }
3383
+ function isTaskRegistered(slug) {
3384
+ return TASK_TEMPLATES[slug] !== void 0;
3385
+ }
3386
+ function buildSlashCommandsConfig(slugs) {
3387
+ const configs = {};
3388
+ for (const slug of slugs) {
3389
+ const task = TASK_TEMPLATES[slug];
3390
+ if (!task) {
3391
+ console.warn(`Unknown task slug: ${slug}, skipping`);
3392
+ continue;
3393
+ }
3394
+ configs[slug] = {
3395
+ frontmatter: task.frontmatter,
3396
+ content: task.baseContent
3397
+ };
3398
+ console.log(`\u2713 Added slash command: /${slug}`);
3399
+ }
3400
+ return configs;
3401
+ }
3402
+ function getRequiredMCPsFromTasks(slugs) {
3403
+ const mcps = /* @__PURE__ */ new Set();
3404
+ for (const slug of slugs) {
3405
+ const task = TASK_TEMPLATES[slug];
3406
+ if (!task) continue;
3407
+ for (const subagent of task.requiredSubagents) {
3408
+ const mcpMap = {
3409
+ "test-runner": "playwright",
3410
+ "team-communicator": "slack",
3411
+ "documentation-researcher": "notion",
3412
+ "issue-tracker": "linear"
3413
+ };
3414
+ const mcp = mcpMap[subagent];
3415
+ if (mcp) {
3416
+ mcps.add(mcp);
3417
+ }
3418
+ }
3419
+ }
3420
+ return Array.from(mcps);
3421
+ }
3422
+ export {
3423
+ TASK_SLUGS,
3424
+ TASK_TEMPLATES,
3425
+ buildSlashCommandsConfig,
3426
+ getAllTaskSlugs,
3427
+ getRequiredMCPsFromTasks,
3428
+ getTaskTemplate,
3429
+ isTaskRegistered
3430
+ };
3431
+ //# sourceMappingURL=index.js.map