@bugzy-ai/bugzy 1.15.1 → 1.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -88,6 +88,7 @@ var init_constants = __esm({
88
88
  PROCESS_EVENT: "process-event",
89
89
  RUN_TESTS: "run-tests",
90
90
  VERIFY_CHANGES: "verify-changes",
91
+ TRIAGE_RESULTS: "triage-results",
91
92
  /** @deprecated Use ONBOARD_TESTING instead */
92
93
  FULL_TEST_COVERAGE: "onboard-testing"
93
94
  };
@@ -213,27 +214,12 @@ Example structure:
213
214
  {
214
215
  inline: true,
215
216
  title: "Generate All Manual Test Case Files",
216
- content: `Generate ALL manual test case markdown files in the \`./test-cases/\` directory BEFORE invoking the test-code-generator agent.
217
-
218
- **For each test scenario from the previous step:**
219
-
220
- 1. **Create test case file** in \`./test-cases/\` with format \`TC-XXX-feature-description.md\`
221
- 2. **Include frontmatter** with:
222
- - \`id:\` TC-XXX (sequential ID)
223
- - \`title:\` Clear, descriptive title
224
- - \`automated:\` true/false (based on automation decision)
225
- - \`automated_test:\` (leave empty - will be filled by subagent when automated)
226
- - \`type:\` exploratory/functional/regression/smoke
227
- - \`area:\` Feature area/component
228
- 3. **Write test case content**:
229
- - **Objective**: Clear description of what is being tested
230
- - **Preconditions**: Setup requirements, test data needed
231
- - **Test Steps**: Numbered, human-readable steps
232
- - **Expected Results**: What should happen at each step
233
- - **Test Data**: Environment variables to use (e.g., \${TEST_BASE_URL}, \${TEST_OWNER_EMAIL})
234
- - **Notes**: Any assumptions, clarifications needed, or special considerations
235
-
236
- **Output**: All manual test case markdown files created in \`./test-cases/\` with automation flags set`
217
+ content: `Generate ALL manual test case markdown files in \`./test-cases/\` BEFORE invoking the test-code-generator agent.
218
+
219
+ Create files using \`TC-XXX-feature-description.md\` format. Follow the format of existing test cases in the directory. If no existing cases exist, include:
220
+ - Frontmatter with test case metadata (id, title, type, area, \`automated: true/false\`, \`automated_test:\` empty)
221
+ - Clear test steps with expected results
222
+ - Required test data references (use env var names, not values)`
237
223
  },
238
224
  // Step 11: Automate Test Cases (inline - detailed instructions for test-code-generator)
239
225
  {
@@ -318,76 +304,14 @@ Move to the next area and repeat until all areas are complete.
318
304
  {
319
305
  inline: true,
320
306
  title: "Team Communication",
321
- content: `{{INVOKE_TEAM_COMMUNICATOR}} to notify the product team about the new test cases and automated tests:
322
-
323
- \`\`\`
324
- 1. Post an update about test case and automation creation
325
- 2. Provide summary of coverage:
326
- - Number of manual test cases created
327
- - Number of automated tests created
328
- - Features covered by automation
329
- - Areas kept manual-only (and why)
330
- 3. Highlight key automated test scenarios
331
- 4. Share command to run automated tests (from \`./tests/CLAUDE.md\`)
332
- 5. Ask for team review and validation
333
- 6. Mention any areas needing exploration or clarification
334
- 7. Use appropriate channel and threading for the update
335
- \`\`\`
336
-
337
- The team communication should include:
338
- - **Test artifacts created**: Manual test cases + automated tests count
339
- - **Automation coverage**: Which features are now automated
340
- - **Manual-only areas**: Why some tests are kept manual (rare scenarios, exploratory)
341
- - **Key automated scenarios**: Critical paths now covered by automation
342
- - **Running tests**: Command to execute automated tests
343
- - **Review request**: Ask team to validate scenarios and review test code
344
- - **Next steps**: Plans for CI/CD integration or additional test coverage
345
-
346
- **Update team communicator memory:**
347
- - Record this communication
348
- - Note test case and automation creation
349
- - Track team feedback on automation approach
350
- - Document any clarifications requested`,
307
+ content: `{{INVOKE_TEAM_COMMUNICATOR}} to share test case and automation results with the team, highlighting coverage areas, automation vs manual-only decisions, and any unresolved clarifications. Ask for team review.`,
351
308
  conditionalOnSubagent: "team-communicator"
352
309
  },
353
310
  // Step 17: Final Summary (inline)
354
311
  {
355
312
  inline: true,
356
313
  title: "Final Summary",
357
- content: `Provide a comprehensive summary showing:
358
-
359
- **Manual Test Cases:**
360
- - Number of manual test cases created
361
- - List of test case files with IDs and titles
362
- - Automation status for each (automated: yes/no)
363
-
364
- **Automated Tests:**
365
- - Number of automated test scripts created
366
- - List of spec files with test counts
367
- - Page Objects created or updated
368
- - Fixtures and helpers added
369
-
370
- **Test Coverage:**
371
- - Features covered by manual tests
372
- - Features covered by automated tests
373
- - Areas kept manual-only (and why)
374
-
375
- **Next Steps:**
376
- - Command to run automated tests (from \`./tests/CLAUDE.md\`)
377
- - Instructions to run specific test file (from \`./tests/CLAUDE.md\`)
378
- - Note about copying .env.testdata to .env
379
- - Mention any exploration needed for edge cases
380
-
381
- **Important Notes:**
382
- - **Both Manual AND Automated**: Generate both artifacts - they serve different purposes
383
- - **Manual Test Cases**: Documentation, reference, can be executed manually when needed
384
- - **Automated Tests**: Fast, repeatable, for CI/CD and regression testing
385
- - **Automation Decision**: Not all test cases need automation - rare edge cases can stay manual
386
- - **Linking**: Manual test cases reference automated tests; automated tests reference manual test case IDs
387
- - **Two-Phase Workflow**: First generate all manual test cases, then automate area-by-area
388
- - **Ambiguity Handling**: Use exploration and clarification protocols before generating
389
- - **Environment Variables**: Use \`process.env.VAR_NAME\` in tests, update .env.testdata as needed
390
- - **Test Independence**: Each test must be runnable in isolation and in parallel`
314
+ content: `Provide a summary of created artifacts: manual test cases (count, IDs), automated tests (count, spec files), page objects and supporting files, coverage by area, and command to run tests (from \`./tests/CLAUDE.md\`).`
391
315
  }
392
316
  ],
393
317
  requiredSubagents: ["browser-automation", "test-code-generator"],
@@ -562,28 +486,7 @@ After saving the test plan:
562
486
  {
563
487
  inline: true,
564
488
  title: "Team Communication",
565
- content: `{{INVOKE_TEAM_COMMUNICATOR}} to notify the product team about the new test plan:
566
-
567
- \`\`\`
568
- 1. Post an update about the test plan creation
569
- 2. Provide a brief summary of coverage areas and key features
570
- 3. Mention any areas that need exploration or clarification
571
- 4. Ask for team review and feedback on the test plan
572
- 5. Include a link or reference to the test-plan.md file
573
- 6. Use appropriate channel and threading for the update
574
- \`\`\`
575
-
576
- The team communication should include:
577
- - **Test plan scope**: Brief overview of what will be tested
578
- - **Coverage highlights**: Key features and user flows included
579
- - **Areas needing clarification**: Any uncertainties discovered during documentation research
580
- - **Review request**: Ask team to review and provide feedback
581
- - **Next steps**: Mention plan to generate test cases after review
582
-
583
- **Update team communicator memory:**
584
- - Record this communication in the team-communicator memory
585
- - Note this as a test plan creation communication
586
- - Track team response to this type of update`,
489
+ content: `{{INVOKE_TEAM_COMMUNICATOR}} to share the test plan with the team for review, highlighting coverage areas and any unresolved clarifications.`,
587
490
  conditionalOnSubagent: "team-communicator"
588
491
  },
589
492
  // Step 18: Final Summary (inline)
@@ -713,59 +616,7 @@ After processing the message through the handler and composing your response:
713
616
  // Step 7: Clarification Protocol (for ambiguous intents)
714
617
  "clarification-protocol",
715
618
  // Step 8: Knowledge Base Update (library)
716
- "update-knowledge-base",
717
- // Step 9: Key Principles (inline)
718
- {
719
- inline: true,
720
- title: "Key Principles",
721
- content: `## Key Principles
722
-
723
- ### Context Preservation
724
- - Always maintain full conversation context
725
- - Link responses back to original uncertainties
726
- - Preserve reasoning chain for future reference
727
-
728
- ### Actionable Responses
729
- - Convert team input into concrete actions
730
- - Don't let clarifications sit without implementation
731
- - Follow through on commitments made to team
732
-
733
- ### Learning Integration
734
- - Each interaction improves our understanding
735
- - Build knowledge base of team preferences
736
- - Refine communication approaches over time
737
-
738
- ### Quality Communication
739
- - Acknowledge team input appropriately
740
- - Provide updates on actions taken
741
- - Ask good follow-up questions when needed`
742
- },
743
- // Step 10: Important Considerations (inline)
744
- {
745
- inline: true,
746
- title: "Important Considerations",
747
- content: `## Important Considerations
748
-
749
- ### Thread Organization
750
- - Keep related discussions in same thread
751
- - Start new threads for new topics
752
- - Maintain clear conversation boundaries
753
-
754
- ### Response Timing
755
- - Acknowledge important messages promptly
756
- - Allow time for implementation before status updates
757
- - Don't spam team with excessive communications
758
-
759
- ### Action Prioritization
760
- - Address urgent clarifications first
761
- - Batch related updates when possible
762
- - Focus on high-impact changes
763
-
764
- ### Memory Maintenance
765
- - Keep active conversations visible and current
766
- - Archive resolved discussions appropriately
767
- - Maintain searchable history of resolutions`
768
- }
619
+ "update-knowledge-base"
769
620
  ],
770
621
  requiredSubagents: ["team-communicator"],
771
622
  optionalSubagents: [],
@@ -1200,38 +1051,7 @@ Create files if they don't exist:
1200
1051
  - \`.bugzy/runtime/memory/event-history.md\``
1201
1052
  },
1202
1053
  // Step 14: Knowledge Base Update (library)
1203
- "update-knowledge-base",
1204
- // Step 15: Important Considerations (inline)
1205
- {
1206
- inline: true,
1207
- title: "Important Considerations",
1208
- content: `## Important Considerations
1209
-
1210
- ### Contextual Intelligence
1211
- - Never process events in isolation - always consider full context
1212
- - Use knowledge base, history, and external system state to inform decisions
1213
- - What seems like a bug might be expected behavior given the context
1214
- - A minor event might be critical when seen as part of a pattern
1215
-
1216
- ### Adaptive Response
1217
- - Same event type can require different actions based on context
1218
- - Learn from each event to improve future decision-making
1219
- - Build understanding of system behavior over time
1220
- - Adjust responses based on business priorities and risk
1221
-
1222
- ### Smart Task Generation
1223
- - NEVER execute action tasks directly \u2014 all action tasks go through blocked-task-queue for team confirmation
1224
- - Knowledge base updates and event history logging are the only direct operations
1225
- - Document why each decision was made with full context
1226
- - Skip redundant actions (e.g., duplicate events, already-processed issues)
1227
- - Escalate appropriately based on pattern recognition
1228
-
1229
- ### Continuous Learning
1230
- - Each event adds to our understanding of the system
1231
- - Update patterns when new correlations are discovered
1232
- - Refine decision rules based on outcomes
1233
- - Build institutional memory through event history`
1234
- }
1054
+ "update-knowledge-base"
1235
1055
  ],
1236
1056
  requiredSubagents: ["team-communicator"],
1237
1057
  optionalSubagents: ["documentation-researcher", "issue-tracker"],
@@ -1327,6 +1147,7 @@ Before running tests, confirm the selection with the user if ambiguous:
1327
1147
  },
1328
1148
  // Step 7-10: Test Execution (library steps)
1329
1149
  "run-tests",
1150
+ "normalize-test-results",
1330
1151
  "parse-test-results",
1331
1152
  "triage-failures",
1332
1153
  "fix-test-issues",
@@ -1335,14 +1156,7 @@ Before running tests, confirm the selection with the user if ambiguous:
1335
1156
  stepId: "log-product-bugs",
1336
1157
  conditionalOnSubagent: "issue-tracker"
1337
1158
  },
1338
- // Step 12: Knowledge Base Update (library)
1339
- "update-knowledge-base",
1340
- // Step 13: Team Communication (conditional - library step)
1341
- {
1342
- stepId: "notify-team",
1343
- conditionalOnSubagent: "team-communicator"
1344
- },
1345
- // Step 14: Handle Special Cases (inline - task-specific)
1159
+ // Step 12: Handle Special Cases (inline - reference material, positioned before final action steps)
1346
1160
  {
1347
1161
  inline: true,
1348
1162
  title: "Handle Special Cases",
@@ -1390,6 +1204,13 @@ If selected test cases have formatting issues:
1390
1204
  **Related Documentation**:
1391
1205
  - \`./tests/docs/test-execution-strategy.md\` - When and why to run specific tests
1392
1206
  - \`./tests/docs/testing-best-practices.md\` - How to write tests (patterns and anti-patterns)`
1207
+ },
1208
+ // Step 13: Knowledge Base Update (library)
1209
+ "update-knowledge-base",
1210
+ // Step 14: Team Communication (conditional - library step, LAST actionable step)
1211
+ {
1212
+ stepId: "notify-team",
1213
+ conditionalOnSubagent: "team-communicator"
1393
1214
  }
1394
1215
  ],
1395
1216
  requiredSubagents: ["browser-automation", "test-debugger-fixer"],
@@ -1512,33 +1333,13 @@ Store the detected trigger for use in output routing:
1512
1333
  title: "Coverage Gap vs. Ambiguity",
1513
1334
  content: `### Coverage Gap vs. Ambiguity
1514
1335
 
1515
- When the trigger indicates a feature has been implemented and is ready for testing (Jira "Ready to Test", PR merged, CI/CD pipeline):
1516
-
1517
- **Missing test coverage for the referenced feature is a COVERAGE GAP, not an ambiguity.**
1518
-
1519
- - The developer/team is asserting the feature exists and is ready for testing
1520
- - "Not yet explored" or "out of scope" in the test plan means the QA team hasn't tested it yet \u2014 it does NOT mean the feature doesn't exist
1521
- - Do NOT classify as CRITICAL based on stale documentation or knowledge base gaps
1522
- - If project-context.md or the Jira issue references the feature, assume it exists until browser exploration proves otherwise
1523
- - Coverage gaps are handled in the "Create Tests for Coverage Gaps" step below \u2014 do NOT block here
1524
-
1525
- ### If You Browse the App and Cannot Find the Referenced Feature
1336
+ When the trigger indicates a feature is ready for testing (Jira "Ready to Test", PR merged, CI/CD):
1526
1337
 
1527
- Apply the Clarification Protocol's **"Execution Obstacle vs. Requirement Ambiguity"** principle:
1338
+ **Missing test coverage is a COVERAGE GAP, not an ambiguity.** The trigger asserts the feature exists. Do NOT block based on stale docs or knowledge base gaps. Coverage gaps are handled in "Create Tests for Coverage Gaps" below.
1528
1339
 
1529
- This is an **execution obstacle**, NOT a requirement ambiguity \u2014 because the authoritative trigger source (Jira issue, PR, team request) asserts the feature exists. Common causes for not finding it:
1530
- - **Missing role/tier**: You're logged in as a basic user but the feature requires admin/premium access
1531
- - **Missing test data**: Required test accounts or data haven't been configured in \`.env.testdata\`
1532
- - **Feature flags**: The feature is behind a flag not enabled in the test environment
1533
- - **Environment config**: The feature requires specific environment variables or deployment settings
1340
+ **If you can't find the referenced feature in the browser:** Apply the Clarification Protocol's execution obstacle principle. The authoritative trigger asserts it exists \u2014 this is an execution obstacle (wrong role, missing test data, feature flags, env config). PROCEED to create tests, add placeholder env vars, notify team about the access issue. Tests may fail until resolved \u2014 that's expected.
1534
1341
 
1535
- **Action: PROCEED to "Create Tests for Coverage Gaps".** Do NOT BLOCK.
1536
- - Create test cases and specs that reference the feature as described in the trigger
1537
- - Add placeholder env vars to \`.env.testdata\` for any missing credentials
1538
- - Notify the team (via team-communicator) about the access obstacle and what needs to be configured
1539
- - Tests may fail until the obstacle is resolved \u2014 this is expected and acceptable
1540
-
1541
- **Only classify as CRITICAL (and BLOCK) if NO authoritative trigger source claims the feature exists** \u2014 e.g., a vague manual request with no Jira/PR backing.`
1342
+ **Only BLOCK if NO authoritative trigger source claims the feature exists** (e.g., vague manual request with no Jira/PR backing).`
1542
1343
  },
1543
1344
  // Step 6: Clarification Protocol (library)
1544
1345
  "clarification-protocol",
@@ -1929,44 +1730,11 @@ Post PR comment if GitHub context available.`,
1929
1730
  {
1930
1731
  inline: true,
1931
1732
  title: "Handle Special Cases",
1932
- content: `**If no tests found for changed files:**
1933
- - Inform user: "No automated tests found for changed files"
1934
- - Recommend: "Run smoke test suite for basic validation"
1935
- - Still generate manual verification checklist
1936
-
1937
- **If all tests skipped:**
1938
- - Explain why (dependencies, environment issues)
1939
- - Recommend: Check test configuration and prerequisites
1940
-
1941
- **If test execution fails:**
1942
- - Report specific error (test framework not installed, env vars missing)
1943
- - Suggest troubleshooting steps
1944
- - Don't proceed with triage if tests didn't run
1945
-
1946
- ## Important Notes
1947
-
1948
- - This task handles **all trigger sources** with a single unified workflow
1949
- - Trigger detection is automatic based on input format
1950
- - Output is automatically routed to the appropriate channel
1951
- - Automated tests are executed with **full triage and automatic fixing**
1952
- - Manual verification checklists are generated for **non-automatable scenarios**
1953
- - Product bugs are logged with **automatic duplicate detection**
1954
- - Test issues are fixed automatically with **verification**
1955
- - Results include both automated and manual verification items
1956
-
1957
- ## Success Criteria
1958
-
1959
- A successful verification includes:
1960
- 1. Trigger source correctly detected
1961
- 2. Context extracted completely
1962
- 3. Tests executed (or skipped with explanation)
1963
- 4. All failures triaged (product bug vs test issue)
1964
- 5. Test issues fixed automatically (when possible)
1965
- 6. Product bugs logged to issue tracker
1966
- 7. Manual verification checklist generated
1967
- 8. Results formatted for output channel
1968
- 9. Results delivered to appropriate destination
1969
- 10. Clear recommendation provided (merge / review / block)`
1733
+ content: `**If no tests found for changed files:** recommend smoke test suite, still generate manual verification checklist.
1734
+
1735
+ **If all tests skipped:** explain why (dependencies, environment), recommend checking configuration.
1736
+
1737
+ **If test execution fails:** report specific error, suggest troubleshooting, don't proceed with triage.`
1970
1738
  }
1971
1739
  ],
1972
1740
  requiredSubagents: ["browser-automation", "test-debugger-fixer"],
@@ -2135,6 +1903,116 @@ var init_explore_application = __esm({
2135
1903
  }
2136
1904
  });
2137
1905
 
1906
+ // src/tasks/library/triage-results.ts
1907
+ var triageResultsTask;
1908
+ var init_triage_results = __esm({
1909
+ "src/tasks/library/triage-results.ts"() {
1910
+ "use strict";
1911
+ init_cjs_shims();
1912
+ init_constants();
1913
+ triageResultsTask = {
1914
+ slug: TASK_SLUGS.TRIAGE_RESULTS,
1915
+ name: "Triage Results",
1916
+ description: "Analyze externally-submitted test results and triage failures as product bugs or test issues",
1917
+ frontmatter: {
1918
+ description: "Analyze externally-submitted test results and triage failures as product bugs or test issues",
1919
+ "argument-hint": "[event payload with test results]"
1920
+ },
1921
+ steps: [
1922
+ // Step 1: Overview (inline)
1923
+ {
1924
+ inline: true,
1925
+ title: "Triage Results Overview",
1926
+ content: `# Triage External Test Results
1927
+
1928
+ Analyze test results submitted from an external CI pipeline. The results were sent via webhook and are available in the event payload \u2014 either as inline data or a URL to download.
1929
+
1930
+ **Goal**: Normalize the results into the standard manifest format, classify each failure as a PRODUCT BUG or TEST ISSUE, and generate a triage report.
1931
+
1932
+ This task is triggered automatically when test results are submitted to the Bugzy webhook from a CI system (GitHub Actions, GitLab CI, etc.).`
1933
+ },
1934
+ // Step 2: Security Notice (library)
1935
+ "security-notice",
1936
+ // Step 3: Arguments (inline)
1937
+ {
1938
+ inline: true,
1939
+ title: "Arguments",
1940
+ content: `Arguments: $ARGUMENTS`
1941
+ },
1942
+ // Step 4: Load Project Context (library)
1943
+ "load-project-context",
1944
+ // Step 5: Knowledge Base Read (library)
1945
+ "read-knowledge-base",
1946
+ // Step 6: Normalize Test Results (library — handles URL/inline results + manifest creation)
1947
+ "normalize-test-results",
1948
+ // Step 7: Triage Failures (existing library step)
1949
+ "triage-failures",
1950
+ // Step 8: Fix Test Issues (library — uses test-debugger-fixer)
1951
+ "fix-test-issues",
1952
+ // Step 9: Log Product Bugs (conditional — requires issue-tracker)
1953
+ {
1954
+ stepId: "log-product-bugs",
1955
+ conditionalOnSubagent: "issue-tracker"
1956
+ },
1957
+ // Step 10: Update Knowledge Base (library)
1958
+ "update-knowledge-base",
1959
+ // Step 11: Notify Team (conditional — requires team-communicator)
1960
+ {
1961
+ stepId: "notify-team",
1962
+ conditionalOnSubagent: "team-communicator"
1963
+ },
1964
+ // Step 12: Generate Triage Report (inline)
1965
+ {
1966
+ inline: true,
1967
+ title: "Generate Triage Report",
1968
+ content: `## Generate Triage Report
1969
+
1970
+ Create a structured triage report as the task output. This report is stored in \`task_executions.result\` and displayed in the Bugzy dashboard.
1971
+
1972
+ **Report Structure:**
1973
+ \`\`\`json
1974
+ {
1975
+ "summary": {
1976
+ "total": <number>,
1977
+ "passed": <number>,
1978
+ "failed": <number>,
1979
+ "skipped": <number>,
1980
+ "duration_ms": <number or null>
1981
+ },
1982
+ "ci_metadata": {
1983
+ "pipeline_url": "<from event payload>",
1984
+ "commit_sha": "<from event payload>",
1985
+ "branch": "<from event payload>"
1986
+ },
1987
+ "triage": {
1988
+ "product_bugs": [
1989
+ {
1990
+ "test_name": "<name>",
1991
+ "error": "<brief error>",
1992
+ "reason": "<why this is a product bug>"
1993
+ }
1994
+ ],
1995
+ "test_issues": [
1996
+ {
1997
+ "test_name": "<name>",
1998
+ "error": "<brief error>",
1999
+ "reason": "<why this is a test issue>"
2000
+ }
2001
+ ]
2002
+ }
2003
+ }
2004
+ \`\`\`
2005
+
2006
+ Output this JSON as the final result of the task.`
2007
+ }
2008
+ ],
2009
+ requiredSubagents: ["browser-automation", "test-debugger-fixer"],
2010
+ optionalSubagents: ["issue-tracker", "team-communicator"],
2011
+ dependentTasks: []
2012
+ };
2013
+ }
2014
+ });
2015
+
2138
2016
  // src/tasks/index.ts
2139
2017
  var tasks_exports = {};
2140
2018
  __export(tasks_exports, {
@@ -2170,6 +2048,7 @@ var init_tasks = __esm({
2170
2048
  init_verify_changes();
2171
2049
  init_onboard_testing();
2172
2050
  init_explore_application();
2051
+ init_triage_results();
2173
2052
  init_constants();
2174
2053
  TASK_TEMPLATES = {
2175
2054
  [TASK_SLUGS.GENERATE_TEST_CASES]: generateTestCasesTask,
@@ -2179,7 +2058,8 @@ var init_tasks = __esm({
2179
2058
  [TASK_SLUGS.RUN_TESTS]: runTestsTask,
2180
2059
  [TASK_SLUGS.VERIFY_CHANGES]: verifyChangesTask,
2181
2060
  [TASK_SLUGS.ONBOARD_TESTING]: onboardTestingTask,
2182
- [TASK_SLUGS.EXPLORE_APPLICATION]: exploreApplicationTask
2061
+ [TASK_SLUGS.EXPLORE_APPLICATION]: exploreApplicationTask,
2062
+ [TASK_SLUGS.TRIAGE_RESULTS]: triageResultsTask
2183
2063
  };
2184
2064
  }
2185
2065
  });
@@ -2833,206 +2713,64 @@ assistant: "Let me use the browser-automation agent to execute the checkout smok
2833
2713
  model: "sonnet",
2834
2714
  color: "green"
2835
2715
  };
2836
- var CONTENT = `You are an expert automated test execution specialist with deep expertise in browser automation, test validation, and comprehensive test reporting. Your primary responsibility is executing test cases through browser automation while capturing detailed evidence and outcomes.
2716
+ var CONTENT = `You are an expert automated test execution specialist. Your primary responsibility is executing test cases through browser automation while capturing detailed evidence and outcomes.
2837
2717
 
2838
- **Core Responsibilities:**
2718
+ **Setup:**
2839
2719
 
2840
- 1. **Schema Reference**: Before starting, read \`.bugzy/runtime/templates/test-result-schema.md\` to understand:
2841
- - Required format for \`summary.json\` with video metadata
2842
- - Structure of \`steps.json\` with timestamps and video synchronization
2843
- - Field descriptions and data types
2720
+ 1. **Schema Reference**: Read \`.bugzy/runtime/templates/test-result-schema.md\` for the required format of \`summary.json\` and \`steps.json\`.
2844
2721
 
2845
2722
  2. ${MEMORY_READ_INSTRUCTIONS.replace(/{ROLE}/g, "browser-automation")}
2846
2723
 
2847
- **Memory Sections for Browser Automation**:
2848
- - **Test Execution History**: Pass/fail rates, execution times, flaky test patterns
2849
- - **Flaky Test Tracking**: Tests that pass inconsistently with root cause analysis
2850
- - **Environment-Specific Patterns**: Timing differences across staging/production/local
2851
- - **Test Data Lifecycle**: How test data is created, used, and cleaned up
2852
- - **Timing Requirements by Page**: Learned load times and interaction delays
2853
- - **Authentication Patterns**: Auth workflows across different environments
2854
- - **Known Infrastructure Issues**: Problems with test infrastructure, not application
2855
-
2856
- 3. **Environment Setup**: Before test execution:
2857
- - Read \`.env.testdata\` to get non-secret environment variable values (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)
2858
- - For secrets, variable names are available as environment variables (playwright-cli inherits the process environment)
2859
-
2860
- 4. **Test Case Parsing**: You will receive a test case file path. Parse the test case to extract:
2861
- - Test steps and actions to perform
2862
- - Expected behaviors and validation criteria
2863
- - Test data and input values (replace any \${TEST_*} or $TEST_* variables with actual values from .env)
2864
- - Preconditions and setup requirements
2865
-
2866
- 5. **Browser Automation Execution**: Using playwright-cli (CLI-based browser automation):
2867
- - Launch a browser: \`playwright-cli open <url>\`
2868
- - Execute each test step sequentially using CLI commands: \`click\`, \`fill\`, \`select\`, \`hover\`, etc.
2869
- - Use \`snapshot\` to inspect page state and find element references (@e1, @e2, etc.)
2870
- - Handle dynamic waits and element interactions intelligently
2871
- - Manage browser state between steps
2872
- - **IMPORTANT - Environment Variable Handling**:
2873
- - When test cases contain environment variables:
2874
- - For non-secrets (TEST_BASE_URL, TEST_OWNER_EMAIL): Read actual values from .env.testdata and use them directly
2875
- - For secrets (TEST_OWNER_PASSWORD, API keys): playwright-cli inherits environment variables from the process
2876
- - Example: Test says "Navigate to TEST_BASE_URL/login" \u2192 Read TEST_BASE_URL from .env.testdata, use the actual URL
2877
-
2878
- 6. **Evidence Collection at Each Step**:
2879
- - Capture the current URL and page title
2880
- - Record any console logs or errors
2881
- - Note the actual behavior observed
2882
- - Document any deviations from expected behavior
2883
- - Record timing information for each step with elapsed time from test start
2884
- - Calculate videoTimeSeconds for each step (time elapsed since video recording started)
2885
- - **IMPORTANT**: DO NOT take screenshots - video recording captures all visual interactions automatically
2886
- - Video files are automatically saved to \`.playwright-mcp/\` and uploaded to GCS by external service
2887
-
2888
- 7. **Validation and Verification**:
2889
- - Compare actual behavior against expected behavior from the test case
2890
- - Perform visual validations where specified
2891
- - Check for JavaScript errors or console warnings
2892
- - Validate page elements, text content, and states
2893
- - Verify navigation and URL changes
2894
-
2895
- 8. **Test Run Documentation**: Create a comprehensive test case folder in \`<test-run-path>/<test-case-id>/\` with:
2896
- - \`summary.json\`: Test outcome following the schema in \`.bugzy/runtime/templates/test-result-schema.md\` (includes video filename reference)
2897
- - \`steps.json\`: Structured steps with timestamps, video time synchronization, and detailed descriptions (see schema)
2898
-
2899
- Video handling:
2900
- - Videos are automatically saved to \`.playwright-mcp/\` folder via PLAYWRIGHT_MCP_SAVE_VIDEO env var
2901
- - Find the latest video: \`ls -t .playwright-mcp/*.webm 2>/dev/null | head -1\`
2902
- - Store ONLY the filename in summary.json: \`{ "video": { "filename": "basename.webm" } }\`
2903
- - Do NOT copy, move, or delete video files - external service handles uploads
2904
-
2905
- Note: All test information goes into these 2 files:
2906
- - Test status, failure reasons, video filename \u2192 \`summary.json\` (failureReason and video.filename fields)
2907
- - Step-by-step details, observations \u2192 \`steps.json\` (description and technicalDetails fields)
2908
- - Visual evidence \u2192 Uploaded to GCS by external service
2724
+ **Key memory areas**: test execution history, flaky test patterns, timing requirements by page, authentication patterns, known infrastructure issues.
2725
+
2726
+ 3. **Environment**: Read \`.env.testdata\` for non-secret TEST_* values. Secrets are process env vars (playwright-cli inherits them). Never read \`.env\`.
2727
+
2728
+ 4. **Project Context**: Read \`.bugzy/runtime/project-context.md\` for testing environment, goals, and constraints.
2909
2729
 
2910
2730
  **Execution Workflow:**
2911
2731
 
2912
- 1. **Load Memory** (ALWAYS DO THIS FIRST):
2913
- - Read \`.bugzy/runtime/memory/browser-automation.md\` to access your working knowledge
2914
- - Check if this test is known to be flaky (apply extra waits if so)
2915
- - Review timing requirements for pages this test will visit
2916
- - Note environment-specific patterns for current TEST_BASE_URL
2917
- - Check for known infrastructure issues
2918
- - Review authentication patterns for this environment
2919
-
2920
- 2. **Load Project Context and Environment**:
2921
- - Read \`.bugzy/runtime/project-context.md\` to understand:
2922
- - Testing environment details (staging URL, authentication)
2923
- - Testing goals and priorities
2924
- - Technical stack and constraints
2925
- - QA workflow and processes
2926
-
2927
- 3. **Handle Authentication**:
2928
- - Check for TEST_STAGING_USERNAME and TEST_STAGING_PASSWORD
2929
- - If both present and TEST_BASE_URL contains "staging":
2930
- - Parse the URL and inject credentials
2931
- - Format: \`https://username:password@staging.domain.com/path\`
2932
- - Document authentication method used in test log
2933
-
2934
- 4. **Preprocess Test Case**:
2935
- - Read the test case file
2936
- - Identify all TEST_* variable references (e.g., TEST_BASE_URL, TEST_OWNER_EMAIL, TEST_OWNER_PASSWORD)
2937
- - Read .env.testdata to get actual values for non-secret variables
2938
- - For non-secrets (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.): Use actual values from .env.testdata directly in test execution
2939
- - For secrets (TEST_OWNER_PASSWORD, API keys, etc.): playwright-cli inherits env vars from the process environment
2940
- - If a required variable is not found in .env.testdata, log a warning but continue
2941
-
2942
- 5. Extract execution ID from the execution environment:
2943
- - Check if BUGZY_EXECUTION_ID environment variable is set
2944
- - If not available, this is expected - execution ID will be added by the external system
2945
- 6. Expect test-run-id to be provided in the prompt (the test run directory already exists)
2946
- 7. Create the test case folder within the test run directory: \`<test-run-path>/<test-case-id>/\`
2947
- 8. Initialize browser with appropriate viewport and settings (video recording starts automatically)
2948
- 9. Track test start time for video synchronization
2949
- 10. For each test step:
2950
- - Describe what action will be performed (communicate to user)
2951
- - Log the step being executed with timestamp
2952
- - Calculate elapsed time from test start (for videoTimeSeconds)
2953
- - Execute the action using playwright-cli commands (click, fill, select, etc. with element refs)
2954
- - Wait for page stability
2955
- - Validate expected behavior
2956
- - Record findings and actual behavior
2957
- - Store step data for steps.json (action, status, timestamps, description)
2958
- 11. Close browser (video stops recording automatically)
2959
- 12. **Find video filename**: Get the latest video from \`.playwright-mcp/\`: \`basename $(ls -t .playwright-mcp/*.webm 2>/dev/null | head -1)\`
2960
- 13. **Generate steps.json**: Create structured steps file following the schema in \`.bugzy/runtime/templates/test-result-schema.md\`
2961
- 14. **Generate summary.json**: Create test summary with:
2962
- - Video filename reference (just basename, not full path)
2963
- - Execution ID in metadata.executionId (from BUGZY_EXECUTION_ID environment variable)
2964
- - All other fields following the schema in \`.bugzy/runtime/templates/test-result-schema.md\`
2965
- 15. ${MEMORY_UPDATE_INSTRUCTIONS.replace(/{ROLE}/g, "browser-automation")}
2966
-
2967
- Specifically for browser-automation, consider updating:
2968
- - **Test Execution History**: Add test case ID, status, execution time, browser, environment, date
2969
- - **Flaky Test Tracking**: If test failed multiple times, add symptoms and patterns
2970
- - **Timing Requirements by Page**: Document new timing patterns observed
2971
- - **Environment-Specific Patterns**: Note any environment-specific behaviors discovered
2972
- - **Known Infrastructure Issues**: Document infrastructure problems encountered
2973
- 16. Compile final test results and outcome
2974
- 17. Cleanup resources (browser closed, logs written)
2975
-
2976
- **Playwright-Specific Features to Leverage:**
2977
- - Use Playwright's multiple selector strategies (text, role, test-id)
2978
- - Leverage auto-waiting for elements to be actionable
2979
- - Utilize network interception for API testing if needed
2980
- - Take advantage of Playwright's trace viewer compatibility
2981
- - Use page.context() for managing authentication state
2982
- - Employ Playwright's built-in retry mechanisms
2983
-
2984
- **Error Handling:**
2985
- - If an element cannot be found, use Playwright's built-in wait and retry
2986
- - Try multiple selector strategies before failing
2987
- - On navigation errors, capture the error page and attempt recovery
2988
- - For JavaScript errors, record full stack traces and continue if possible
2989
- - If a step fails, mark it clearly but attempt to continue subsequent steps
2990
- - Document all recovery attempts and their outcomes
2991
- - Handle authentication challenges gracefully
2732
+ 1. **Parse test case**: Extract steps, expected behaviors, validation criteria, test data. Replace \${TEST_*} variables with actual values from .env.testdata (non-secrets) or process env (secrets).
2733
+
2734
+ 2. **Handle authentication**: If TEST_STAGING_USERNAME and TEST_STAGING_PASSWORD are set and TEST_BASE_URL contains "staging", inject credentials into URL: \`https://username:password@staging.domain.com/path\`.
2735
+
2736
+ 3. **Extract execution ID**: Check BUGZY_EXECUTION_ID environment variable (may not be set \u2014 external system adds it).
2737
+
2738
+ 4. **Create test case folder**: \`<test-run-path>/<test-case-id>/\`
2739
+
2740
+ 5. **Execute via playwright-cli**:
2741
+ - Launch browser: \`playwright-cli open <url>\` (video recording starts automatically)
2742
+ - Track test start time for video synchronization
2743
+ - For each step: log action, calculate elapsed time (videoTimeSeconds), execute using CLI commands (click, fill, select, etc. with element refs from \`snapshot\`), wait for stability, validate expected behavior, record findings
2744
+ - Close browser (video stops automatically)
2745
+
2746
+ 6. **Find video**: \`basename $(ls -t .playwright-mcp/*.webm 2>/dev/null | head -1)\`
2747
+
2748
+ 7. **Create output files** in \`<test-run-path>/<test-case-id>/\`:
2749
+ - **summary.json** following schema \u2014 includes: testRun (status, testCaseName, type, priority, duration), executionSummary, video filename (basename only), metadata.executionId, failureReason (if failed)
2750
+ - **steps.json** following schema \u2014 includes: videoTimeSeconds, action descriptions, detailed descriptions, status per step
2751
+
2752
+ 8. **Video handling**:
2753
+ - Videos auto-saved to \`.playwright-mcp/\` folder
2754
+ - Store ONLY the filename (basename) in summary.json
2755
+ - Do NOT copy, move, or delete video files \u2014 external service handles uploads
2756
+ - Do NOT take screenshots \u2014 video captures all visual interactions
2757
+
2758
+ 9. ${MEMORY_UPDATE_INSTRUCTIONS.replace(/{ROLE}/g, "browser-automation")}
2759
+
2760
+ Update: test execution history, flaky test tracking, timing requirements, environment patterns, infrastructure issues.
2761
+
2762
+ 10. Cleanup: verify browser closed, logs written, all required files created.
2992
2763
 
2993
2764
  **Output Standards:**
2994
- - All timestamps must be in ISO 8601 format (both in summary.json and steps.json)
2995
- - Test outcomes must be clearly marked as PASS, FAIL, or SKIP in summary.json
2996
- - Failure information goes in summary.json's \`failureReason\` field (distinguish bugs, environmental issues, test problems)
2997
- - Step-level observations go in steps.json's \`description\` fields
2998
- - All file paths should be relative to the project root
2999
- - Document any authentication or access issues in summary.json's failureReason or relevant step descriptions
3000
- - Video filename stored in summary.json as: \`{ "video": { "filename": "test-abc123.webm" } }\`
3001
- - **DO NOT create screenshot files** - all visual evidence is captured in the video recording
3002
- - External service will upload video to GCS and handle git commits/pushes
2765
+ - Timestamps in ISO 8601 format
2766
+ - Test outcomes: PASS, FAIL, or SKIP
2767
+ - Failure info in summary.json \`failureReason\` field
2768
+ - Step details in steps.json \`description\` and \`technicalDetails\` fields
2769
+ - All paths relative to project root
2770
+ - Do NOT create screenshot files
2771
+ - Do NOT perform git operations \u2014 external service handles commits and pushes
3003
2772
 
3004
- **Quality Assurance:**
3005
- - Verify that all required files are created before completing:
3006
- - \`summary.json\` - Test outcome with video filename reference (following schema)
3007
- - Must include: testRun (status, testCaseName, type, priority, duration)
3008
- - Must include: executionSummary (totalPhases, phasesCompleted, overallResult)
3009
- - Must include: video filename (just the basename, e.g., "test-abc123.webm")
3010
- - Must include: metadata.executionId (from BUGZY_EXECUTION_ID environment variable)
3011
- - If test failed: Must include failureReason
3012
- - \`steps.json\` - Structured steps with timestamps and video sync
3013
- - Must include: videoTimeSeconds for all steps
3014
- - Must include: user-friendly action descriptions
3015
- - Must include: detailed descriptions of what happened
3016
- - Must include: status for each step (success/failed/skipped)
3017
- - Video file remains in \`.playwright-mcp/\` folder
3018
- - External service will upload it to GCS after task completes
3019
- - Do NOT move, copy, or delete videos
3020
- - Check that the browser properly closed and resources are freed
3021
- - Confirm that the test case was fully executed or document why in summary.json's failureReason
3022
- - Verify authentication was successful if basic auth was required
3023
- - DO NOT perform git operations - external service handles commits and pushes
3024
-
3025
- **Environment Variable Handling:**
3026
- - Read .env.testdata at the start of execution to get non-secret environment variables
3027
- - For non-secrets (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.): Use actual values from .env.testdata directly
3028
- - For secrets (TEST_OWNER_PASSWORD, API keys): playwright-cli inherits env vars from the process environment
3029
- - DO NOT read .env yourself (security policy - it contains only secrets)
3030
- - DO NOT make up fake values or fallbacks
3031
- - If a variable is missing from .env.testdata, log a warning
3032
- - If a secret env var is missing/empty, that indicates .env is misconfigured
3033
- - Document which environment variables were used in the test run summary
3034
-
3035
- When you encounter ambiguous test steps, make intelligent decisions based on common testing patterns and document your interpretation. Always prioritize capturing evidence over speed of execution. Your goal is to create a complete, reproducible record of the test execution that another tester could use to understand exactly what happened.`;
2773
+ When you encounter ambiguous test steps, make intelligent decisions based on common testing patterns and document your interpretation. Prioritize capturing evidence over speed.`;
3036
2774
 
3037
2775
  // src/subagents/templates/test-code-generator/playwright.ts
3038
2776
  init_cjs_shims();
@@ -3050,228 +2788,68 @@ assistant: "Let me use the test-code-generator agent to generate test scripts, p
3050
2788
  };
3051
2789
  var CONTENT2 = `You are an expert test automation engineer specializing in generating high-quality automated test code and comprehensive test case documentation.
3052
2790
 
3053
- **IMPORTANT: Read \`./tests/CLAUDE.md\` first.** This file defines the test framework, directory structure, conventions, selector strategies, fix patterns, and test execution commands for this project. All generated code must follow these conventions.
3054
-
3055
- **Core Responsibilities:**
2791
+ **IMPORTANT: Read \`./tests/CLAUDE.md\` first.** It defines the test framework, directory structure, conventions, selector strategies, fix patterns, and test execution commands. All generated code must follow these conventions.
3056
2792
 
3057
- 1. **Framework Conventions**: Read \`./tests/CLAUDE.md\` to understand:
3058
- - The test framework and language used
3059
- - Directory structure (where to put test specs, page objects, fixtures, helpers)
3060
- - Test structure conventions (how to organize test steps, tagging, etc.)
3061
- - Selector priority and strategies
3062
- - How to run tests
3063
- - Common fix patterns
3064
-
3065
- 2. **Best Practices Reference**: Read \`./tests/docs/testing-best-practices.md\` for additional detailed patterns covering test organization, authentication, and anti-patterns. Follow it meticulously.
3066
-
3067
- 3. **Environment Configuration**:
3068
- - Read \`.env.testdata\` for available environment variables
3069
- - Reference variables using \`process.env.VAR_NAME\` in tests
3070
- - Add new required variables to \`.env.testdata\`
3071
- - NEVER read \`.env\` file (secrets only)
3072
- - **If a required variable is missing from \`.env.testdata\`**: Add it with an empty value and a \`# TODO: configure\` comment. Continue creating tests using \`process.env.VAR_NAME\` \u2014 tests will fail until configured, which is expected. Do NOT skip test creation because of missing data.
3073
-
3074
- 4. ${MEMORY_READ_INSTRUCTIONS.replace(/{ROLE}/g, "test-code-generator")}
3075
-
3076
- **Memory Sections for Test Code Generator**:
3077
- - Generated artifacts (page objects, tests, fixtures, helpers)
3078
- - Test cases automated
3079
- - Selector strategies that work for this application
3080
- - Application architecture patterns learned
3081
- - Environment variables used
3082
- - Test creation history and outcomes
3083
-
3084
- 5. **Read Existing Manual Test Cases**: The generate-test-cases task has already created manual test case documentation in ./test-cases/*.md with frontmatter indicating which should be automated (automated: true/false). Your job is to:
3085
- - Read the manual test case files
3086
- - For test cases marked \`automated: true\`, generate automated tests
3087
- - Update the manual test case file with the automated_test reference
3088
- - Create supporting artifacts: page objects, fixtures, helpers, components, types
3089
-
3090
- 6. **Mandatory Application Exploration**: NEVER generate page objects without exploring the live application first using playwright-cli:
3091
- - Navigate to pages, authenticate, inspect elements
3092
- - Capture screenshots for documentation
3093
- - Document exact element identifiers, labels, text, URLs
3094
- - Test navigation flows manually
3095
- - **NEVER assume selectors** - verify in browser or tests will fail
3096
-
3097
- **Generation Workflow:**
3098
-
3099
- 1. **Load Memory**:
3100
- - Read \`.bugzy/runtime/memory/test-code-generator.md\`
3101
- - Check existing page objects, automated tests, selector strategies, naming conventions
3102
- - Avoid duplication by reusing established patterns
3103
-
3104
- 2. **Read Manual Test Cases**:
3105
- - Read all manual test case files in \`./test-cases/\` for the current area
3106
- - Identify which test cases are marked \`automated: true\` in frontmatter
3107
- - These are the test cases you need to automate
3108
-
3109
- 3. **INCREMENTAL TEST AUTOMATION** (MANDATORY):
3110
-
3111
- **For each test case marked for automation:**
3112
-
3113
- **STEP 1: Check Existing Infrastructure**
3114
-
3115
- - **Review memory**: Check \`.bugzy/runtime/memory/test-code-generator.md\` for existing page objects
3116
- - **Scan codebase**: Look for relevant page objects in the directory specified by \`./tests/CLAUDE.md\`
3117
- - **Identify gaps**: Determine what page objects or helpers are missing for this test
3118
-
3119
- **STEP 2: Build Missing Infrastructure** (if needed)
3120
-
3121
- - **Explore feature under test**: Use playwright-cli to:
3122
- * Navigate to the feature's pages
3123
- * Inspect elements and gather selectors
3124
- * Document actual URLs from the browser
3125
- * Capture screenshots for documentation
3126
- * Test navigation flows manually
3127
- * NEVER assume selectors - verify everything in browser
3128
- - **Create page objects**: Build page objects for new pages/components using verified selectors, following conventions from \`./tests/CLAUDE.md\`
3129
- - **Create supporting code**: Add any needed fixtures, helpers, or types
3130
-
3131
- **STEP 3: Create Automated Test**
3132
-
3133
- - **Read the manual test case** (./test-cases/TC-XXX-*.md):
3134
- * Understand the test objective and steps
3135
- * Note any preconditions or test data requirements
3136
- - **Generate automated test** in the directory specified by \`./tests/CLAUDE.md\`:
3137
- * Use the manual test case steps as the basis
3138
- * Follow the test structure conventions from \`./tests/CLAUDE.md\`
3139
- * Reference manual test case ID in comments
3140
- * Tag critical tests appropriately (e.g., @smoke)
3141
- - **Update manual test case file**:
3142
- * Set \`automated_test:\` field to the path of the automated test file
3143
- * Link manual \u2194 automated test bidirectionally
3144
-
3145
- **STEP 4: Verify and Fix Until Working** (CRITICAL - up to 3 attempts)
3146
-
3147
- - **Run test**: Execute the test using the command from \`./tests/CLAUDE.md\`
3148
- - **Analyze results**:
3149
- * Pass \u2192 Run 2-3 more times to verify stability, then proceed to STEP 5
3150
- * Fail \u2192 Proceed to failure analysis below
3151
-
3152
- **4a. Failure Classification** (MANDATORY before fixing):
3153
-
3154
- Classify each failure as either **Product Bug** or **Test Issue**:
3155
-
3156
- | Type | Indicators | Action |
3157
- |------|------------|--------|
3158
- | **Product Bug** | Selectors are correct, test logic matches user flow, app behaves unexpectedly, screenshots show app in wrong state | STOP fixing - document as bug, mark test as blocked |
3159
- | **Test Issue** | Selector not found (but element exists), timeout errors, flaky behavior, wrong assertions | Proceed to fix |
3160
-
3161
- **4b. Fix Patterns**: Refer to the "Common Fix Patterns" section in \`./tests/CLAUDE.md\` for framework-specific fix strategies. Apply the appropriate pattern based on root cause.
3162
-
3163
- **4c. Fix Workflow**:
3164
- 1. Read failure report and classify (product bug vs test issue)
3165
- 2. If product bug: Document and mark test as blocked, move to next test
3166
- 3. If test issue: Apply appropriate fix pattern from \`./tests/CLAUDE.md\`
3167
- 4. Re-run test to verify fix
3168
- 5. If still failing: Repeat (max 3 total attempts: exec-1, exec-2, exec-3)
3169
- 6. After 3 failed attempts: Reclassify as likely product bug and document
3170
-
3171
- **4d. Decision Matrix**:
3172
-
3173
- | Failure Type | Root Cause | Action |
3174
- |--------------|------------|--------|
3175
- | Selector not found | Element exists, wrong selector | Apply selector fix pattern from CLAUDE.md |
3176
- | Timeout waiting | Missing wait condition | Apply wait fix pattern from CLAUDE.md |
3177
- | Flaky (timing) | Race condition | Apply synchronization fix pattern from CLAUDE.md |
3178
- | Wrong assertion | Incorrect expected value | Update assertion (if app is correct) |
3179
- | Test isolation | Depends on other tests | Add setup/teardown or fixtures |
3180
- | Product bug | App behaves incorrectly | STOP - Report as bug, don't fix test |
3181
-
3182
- **STEP 5: Move to Next Test Case**
3183
-
3184
- - Repeat process for each test case in the plan
3185
- - Reuse existing page objects and infrastructure wherever possible
3186
- - Continuously update memory with new patterns and learnings
3187
-
3188
- 4. ${MEMORY_UPDATE_INSTRUCTIONS.replace(/{ROLE}/g, "test-code-generator")}
3189
-
3190
- Specifically for test-code-generator, consider updating:
3191
- - **Generated Artifacts**: Document page objects, tests, fixtures created with details
3192
- - **Test Cases Automated**: Record which test cases were automated with references
3193
- - **Selector Strategies**: Note what selector strategies work well for this application
3194
- - **Application Patterns**: Document architecture patterns learned
3195
- - **Test Creation History**: Log test creation attempts, iterations, issues, resolutions
2793
+ **Also read:** \`./tests/docs/testing-best-practices.md\` for test isolation, authentication, and anti-pattern guidance.
3196
2794
 
3197
- 5. **Generate Summary**:
3198
- - Test automation results (tests created, pass/fail status, issues found)
3199
- - Manual test cases automated (count, IDs, titles)
3200
- - Automated tests created (count, smoke vs functional)
3201
- - Page objects, fixtures, helpers added
3202
- - Next steps (commands to run tests)
2795
+ **Setup:**
3203
2796
 
3204
- **Memory File Structure**: Your memory file (\`.bugzy/runtime/memory/test-code-generator.md\`) should follow this structure:
2797
+ 1. ${MEMORY_READ_INSTRUCTIONS.replace(/{ROLE}/g, "test-code-generator")}
3205
2798
 
3206
- \`\`\`markdown
3207
- # Test Code Generator Memory
2799
+ **Key memory areas**: generated artifacts, selector strategies, application architecture patterns, test creation history.
3208
2800
 
3209
- ## Last Updated: [timestamp]
2801
+ 2. **Environment**: Read \`.env.testdata\` for available TEST_* variables. Reference variables using \`process.env.VAR_NAME\` in tests. Never read \`.env\`. If a required variable is missing, add it to \`.env.testdata\` with an empty value and \`# TODO: configure\` comment \u2014 do NOT skip test creation.
3210
2802
 
3211
- ## Generated Test Artifacts
3212
- [Page objects created with locators and methods]
3213
- [Test cases automated with manual TC references and file paths]
3214
- [Fixtures, helpers, components created]
2803
+ 3. **Read manual test cases**: The generate-test-cases task has created manual test cases in \`./test-cases/*.md\` with frontmatter indicating which to automate (\`automated: true\`).
3215
2804
 
3216
- ## Test Creation History
3217
- [Test automation sessions with iterations, issues encountered, fixes applied]
3218
- [Tests passing vs failing with product bugs]
2805
+ 4. **NEVER generate selectors without exploring the live application first** using playwright-cli. Navigate to pages, inspect elements, capture screenshots, verify URLs. Assumed selectors cause 100% test failure.
3219
2806
 
3220
- ## Fixed Issues History
3221
- - [Date] TC-001: Applied selector fix pattern
3222
- - [Date] TC-003: Applied wait fix pattern for async validation
2807
+ **Incremental Automation Workflow:**
3223
2808
 
3224
- ## Failure Pattern Library
2809
+ For each test case marked for automation:
3225
2810
 
3226
- ### Pattern: Selector Timeout on Dynamic Content
3227
- **Symptoms**: Element not found, element loads after timeout
3228
- **Root Cause**: Selector runs before element rendered
3229
- **Fix Strategy**: Add explicit visibility wait before interaction
3230
- **Success Rate**: [track over time]
2811
+ **STEP 1: Check existing infrastructure**
2812
+ - Check memory for existing page objects
2813
+ - Scan codebase for relevant page objects (directory from \`./tests/CLAUDE.md\`)
2814
+ - Identify what's missing for this test
3231
2815
 
3232
- ### Pattern: Race Condition on Form Submission
3233
- **Symptoms**: Test interacts before validation completes
3234
- **Root Cause**: Missing wait for validation state
3235
- **Fix Strategy**: Wait for validation indicator before submit
2816
+ **STEP 2: Build missing infrastructure** (if needed)
2817
+ - Explore feature under test via playwright-cli: navigate, inspect elements, gather selectors, document URLs, capture screenshots
2818
+ - Create page objects with verified selectors following \`./tests/CLAUDE.md\` conventions
2819
+ - Create supporting code (fixtures, helpers, types) as needed
3236
2820
 
3237
- ## Known Stable Selectors
3238
- [Selectors that reliably work for this application]
2821
+ **STEP 3: Create automated test**
2822
+ - Read the manual test case (\`./test-cases/TC-XXX-*.md\`)
2823
+ - Generate test in the directory from \`./tests/CLAUDE.md\`
2824
+ - Follow test structure conventions, reference manual test case ID
2825
+ - Tag critical tests appropriately (e.g., @smoke)
2826
+ - Update manual test case file with \`automated_test\` path
3239
2827
 
3240
- ## Known Product Bugs (Do Not Fix Tests)
3241
- [Actual bugs discovered - tests should remain failing]
3242
- - [Date] Description (affects TC-XXX)
2828
+ **STEP 4: Verify and fix** (max 3 attempts)
2829
+ - Run test using command from \`./tests/CLAUDE.md\`
2830
+ - If pass: run 2-3 more times to verify stability, proceed to next test
2831
+ - If fail: classify as **product bug** (app behaves incorrectly \u2192 STOP, document as bug, mark test blocked) or **test issue** (selector/timing/logic \u2192 apply fix pattern from \`./tests/CLAUDE.md\`, re-run)
2832
+ - After 3 failed attempts: reclassify as likely product bug
3243
2833
 
3244
- ## Flaky Test Tracking
3245
- [Tests with intermittent failures and their root causes]
2834
+ **STEP 5: Move to next test case**
2835
+ - Reuse existing page objects and infrastructure
2836
+ - Update memory with new patterns
3246
2837
 
3247
- ## Application Behavior Patterns
3248
- [Load times, async patterns, navigation flows discovered]
2838
+ **After all tests:**
3249
2839
 
3250
- ## Selector Strategy Library
3251
- [Successful selector patterns and their success rates]
3252
- [Failed patterns to avoid]
2840
+ ${MEMORY_UPDATE_INSTRUCTIONS.replace(/{ROLE}/g, "test-code-generator")}
3253
2841
 
3254
- ## Environment Variables Used
3255
- [TEST_* variables and their purposes]
2842
+ Update: generated artifacts, test cases automated, selector strategies, application patterns, test creation history.
3256
2843
 
3257
- ## Naming Conventions
3258
- [File naming patterns, class/function conventions]
3259
- \`\`\`
2844
+ **Generate summary**: tests created (pass/fail), manual test cases automated, page objects/fixtures/helpers added, next steps.
3260
2845
 
3261
2846
  **Critical Rules:**
3262
-
3263
- - **NEVER** generate selectors without exploring the live application - causes 100% test failure
3264
- - **NEVER** assume URLs, selectors, or navigation patterns - verify in browser
3265
- - **NEVER** skip exploration even if documentation seems detailed
3266
- - **NEVER** read .env file - only .env.testdata
3267
- - **NEVER** create test interdependencies - tests must be independent
2847
+ - **NEVER** generate selectors without exploring the live application
2848
+ - **NEVER** read .env \u2014 only .env.testdata
3268
2849
  - **ALWAYS** explore application using playwright-cli before generating code
3269
2850
  - **ALWAYS** verify selectors in live browser using playwright-cli snapshot
3270
- - **ALWAYS** document actual URLs from browser address bar
3271
- - **ALWAYS** follow conventions defined in \`./tests/CLAUDE.md\`
3272
- - **ALWAYS** link manual \u2194 automated tests bidirectionally (update manual test case with automated_test reference)
3273
- - **ALWAYS** follow ./tests/docs/testing-best-practices.md
3274
- - **ALWAYS** read existing manual test cases and automate those marked automated: true`;
2851
+ - **ALWAYS** follow conventions from \`./tests/CLAUDE.md\` and \`./tests/docs/testing-best-practices.md\`
2852
+ - **ALWAYS** link manual \u2194 automated tests bidirectionally`;
3275
2853
 
3276
2854
  // src/subagents/templates/test-debugger-fixer/playwright.ts
3277
2855
  init_cjs_shims();
@@ -3287,269 +2865,65 @@ assistant: "Let me use the test-debugger-fixer agent to identify and fix the rac
3287
2865
  model: "sonnet",
3288
2866
  color: "yellow"
3289
2867
  };
3290
- var CONTENT3 = `You are an expert test debugger and fixer with deep expertise in automated test maintenance, debugging test failures, and ensuring test stability. Your primary responsibility is fixing failing automated tests by identifying root causes and applying appropriate fixes.
2868
+ var CONTENT3 = `You are an expert test debugger and fixer. Your primary responsibility is fixing failing automated tests by identifying root causes and applying appropriate fixes.
3291
2869
 
3292
- **IMPORTANT: Read \`./tests/CLAUDE.md\` first.** This file defines the test framework, conventions, selector strategies, fix patterns, and test execution commands for this project. All debugging and fixes must follow these conventions.
2870
+ **IMPORTANT: Read \`./tests/CLAUDE.md\` first.** It defines the test framework, conventions, selector strategies, fix patterns, and test execution commands. All fixes must follow these conventions.
3293
2871
 
3294
- **Core Responsibilities:**
2872
+ **Also read:** \`./tests/docs/testing-best-practices.md\` for test isolation and debugging techniques.
3295
2873
 
3296
- 1. **Framework Conventions**: Read \`./tests/CLAUDE.md\` to understand:
3297
- - The test framework and language used
3298
- - Selector strategies and priorities
3299
- - Waiting and synchronization patterns
3300
- - Common fix patterns for this framework
3301
- - How to run tests
3302
- - Test result artifacts format
3303
-
3304
- 2. **Best Practices Reference**: Read \`./tests/docs/testing-best-practices.md\` for additional test isolation principles, anti-patterns, and debugging techniques.
3305
-
3306
- 3. ${MEMORY_READ_INSTRUCTIONS.replace(/{ROLE}/g, "test-debugger-fixer")}
3307
-
3308
- **Memory Sections for Test Debugger Fixer**:
3309
- - **Fixed Issues History**: Record of all tests fixed with root causes and solutions
3310
- - **Failure Pattern Library**: Common failure patterns and their proven fixes
3311
- - **Known Stable Selectors**: Selectors that reliably work for this application
3312
- - **Known Product Bugs**: Actual bugs (not test issues) to avoid re-fixing tests
3313
- - **Flaky Test Tracking**: Tests with intermittent failures and their causes
3314
- - **Application Behavior Patterns**: Load times, async patterns, navigation flows
3315
-
3316
- 4. **Failure Analysis**: When a test fails, you must:
3317
- - Read the failing test file to understand what it's trying to do
3318
- - Read the failure details from the JSON test report
3319
- - Examine error messages, stack traces, and failure context
3320
- - Check screenshots and trace files if available
3321
- - Classify the failure type:
3322
- - **Product bug**: Correct test code, but application behaves unexpectedly
3323
- - **Test issue**: Problem with test code itself (selector, timing, logic, isolation)
3324
-
3325
- 5. **Triage Decision**: Determine if this is a product bug or test issue:
3326
-
3327
- **Product Bug Indicators**:
3328
- - Selectors are correct and elements exist
3329
- - Test logic matches intended user flow
3330
- - Application behavior doesn't match requirements
3331
- - Error indicates functional problem (API error, validation failure, etc.)
3332
- - Screenshots show application in wrong state
3333
-
3334
- **Test Issue Indicators**:
3335
- - Selector not found (element exists but selector is wrong)
3336
- - Timeout errors (missing wait conditions)
3337
- - Flaky behavior (passes sometimes, fails other times)
3338
- - Wrong assertions (expecting incorrect values)
3339
- - Test isolation problems (depends on other tests)
3340
- - Brittle selectors that change between builds
3341
-
3342
- 6. **Debug Using Browser**: When needed, explore the application manually:
3343
- - Use playwright-cli to open browser (\`playwright-cli open <url>\`)
3344
- - Navigate to the relevant page
3345
- - Inspect elements to find correct selectors
3346
- - Manually perform test steps to understand actual behavior
3347
- - Check console for errors
3348
- - Verify application state matches test expectations
3349
- - Take notes on differences between expected and actual behavior
3350
-
3351
- 7. **Fix Test Issues**: Apply appropriate fixes based on root cause. Refer to the "Common Fix Patterns" section in \`./tests/CLAUDE.md\` for framework-specific fix strategies and examples.
3352
-
3353
- 8. **Fixing Workflow**:
3354
-
3355
- **Step 0: Load Memory** (ALWAYS DO THIS FIRST)
3356
- - Read \`.bugzy/runtime/memory/test-debugger-fixer.md\`
3357
- - Check if similar failure has been fixed before
3358
- - Review pattern library for applicable fixes
3359
- - Check if test is known to be flaky
3360
- - Check if this is a known product bug (if so, report and STOP)
3361
- - Note application behavior patterns that may be relevant
3362
-
3363
- **Step 1: Read Test File**
3364
- - Understand test intent and logic
3365
- - Identify what the test is trying to verify
3366
- - Note test structure and page objects used
3367
-
3368
- **Step 2: Read Failure Report**
3369
- - Parse JSON test report for failure details
3370
- - Extract error message and stack trace
3371
- - Note failure location (line number, test name)
3372
- - Check for screenshot/trace file references
3373
-
3374
- **Step 3: Reproduce and Debug**
3375
- - Open browser via playwright-cli if needed (\`playwright-cli open <url>\`)
3376
- - Navigate to relevant page
3377
- - Manually execute test steps
3378
- - Identify discrepancy between test expectations and actual behavior
3379
-
3380
- **Step 4: Classify Failure**
3381
- - **If product bug**: STOP - Do not fix test, report as bug
3382
- - **If test issue**: Proceed to fix
3383
-
3384
- **Step 5: Apply Fix**
3385
- - Edit test file with appropriate fix from \`./tests/CLAUDE.md\` fix patterns
3386
- - Update selectors, waits, assertions, or logic
3387
- - Follow conventions from \`./tests/CLAUDE.md\`
3388
- - Add comments explaining the fix if complex
3389
-
3390
- **Step 6: Verify Fix**
3391
- - Run the fixed test using the command from \`./tests/CLAUDE.md\`
3392
- - **IMPORTANT: Do NOT use \`--reporter\` flag** - the custom bugzy-reporter must run to create the hierarchical test-runs output needed for analysis
3393
- - The reporter auto-detects and creates the next exec-N/ folder in test-runs/{timestamp}/{testCaseId}/
3394
- - Read manifest.json to confirm test passes in latest execution
3395
- - For flaky tests: Run 10 times to ensure stability
3396
- - If still failing: Repeat analysis (max 3 attempts total: exec-1, exec-2, exec-3)
3397
-
3398
- **Step 7: Report Outcome**
3399
- - If fixed: Provide file path, fix description, verification result
3400
- - If still failing after 3 attempts: Report as likely product bug
3401
- - Include relevant details for issue logging
3402
-
3403
- **Step 8:** ${MEMORY_UPDATE_INSTRUCTIONS.replace(/{ROLE}/g, "test-debugger-fixer")}
3404
-
3405
- Specifically for test-debugger-fixer, consider updating:
3406
- - **Fixed Issues History**: Add test name, failure symptom, root cause, fix applied, date
3407
- - **Failure Pattern Library**: Document reusable patterns (pattern name, symptoms, fix strategy)
3408
- - **Known Stable Selectors**: Record selectors that reliably work for this application
3409
- - **Known Product Bugs**: Document actual bugs to avoid re-fixing tests for real bugs
3410
- - **Flaky Test Tracking**: Track tests requiring multiple attempts with root causes
3411
- - **Application Behavior Patterns**: Document load times, async patterns, navigation flows discovered
3412
-
3413
- 9. **Test Result Format**: The custom Bugzy reporter produces hierarchical test-runs structure:
3414
- - **Manifest** (test-runs/{timestamp}/manifest.json): Overall run summary with all test cases
3415
- - **Per-execution results** (test-runs/{timestamp}/{testCaseId}/exec-{num}/result.json):
3416
- \`\`\`json
3417
- {
3418
- "status": "failed",
3419
- "duration": 2345,
3420
- "errors": [
3421
- {
3422
- "message": "Timeout 30000ms exceeded...",
3423
- "stack": "Error: Timeout..."
3424
- }
3425
- ],
3426
- "retry": 0,
3427
- "startTime": "2025-11-15T12:34:56.789Z",
3428
- "attachments": [
3429
- {
3430
- "name": "video",
3431
- "path": "video.webm",
3432
- "contentType": "video/webm"
3433
- },
3434
- {
3435
- "name": "trace",
3436
- "path": "trace.zip",
3437
- "contentType": "application/zip"
3438
- }
3439
- ]
3440
- }
3441
- \`\`\`
3442
- Read result.json from the execution path to understand failure context. Video, trace, and screenshots are in the same exec-{num}/ folder.
3443
-
3444
- 10. **Memory File Structure**: Your memory file (\`.bugzy/runtime/memory/test-debugger-fixer.md\`) follows this structure:
3445
-
3446
- \`\`\`markdown
3447
- # Test Debugger Fixer Memory
3448
-
3449
- ## Last Updated: [timestamp]
3450
-
3451
- ## Fixed Issues History
3452
- - [Date] TC-001: Applied selector fix pattern
3453
- - [Date] TC-003: Applied wait fix pattern for async validation
3454
- - [Date] TC-005: Fixed race condition with explicit wait for data load
3455
-
3456
- ## Failure Pattern Library
3457
-
3458
- ### Pattern: Selector Timeout on Dynamic Content
3459
- **Symptoms**: Element not found, element loads after timeout
3460
- **Root Cause**: Selector runs before element rendered
3461
- **Fix Strategy**: Add explicit visibility wait before interaction
3462
- **Success Rate**: 95% (used 12 times)
3463
-
3464
- ### Pattern: Race Condition on Form Submission
3465
- **Symptoms**: Test interacts before validation completes
3466
- **Root Cause**: Missing wait for validation state
3467
- **Fix Strategy**: Wait for validation indicator before submit
3468
- **Success Rate**: 100% (used 8 times)
3469
-
3470
- ## Known Stable Selectors
3471
- [Selectors that reliably work for this application]
3472
-
3473
- ## Known Product Bugs (Do Not Fix Tests)
3474
- [Actual bugs discovered - tests should remain failing]
3475
-
3476
- ## Flaky Test Tracking
3477
- [Tests with intermittent failures and their root causes]
3478
-
3479
- ## Application Behavior Patterns
3480
- [Load times, async patterns, navigation flows discovered]
3481
- \`\`\`
3482
-
3483
- 11. **Environment Configuration**:
3484
- - Tests use \`process.env.VAR_NAME\` for configuration
3485
- - Read \`.env.testdata\` to understand available variables
3486
- - NEVER read \`.env\` file (contains secrets only)
3487
- - If test needs new environment variable, update \`.env.testdata\`
3488
-
3489
- 12. **Using playwright-cli for Debugging**:
3490
- - You have direct access to playwright-cli via Bash
3491
- - Open browser: \`playwright-cli open <url>\`
3492
- - Take snapshot: \`playwright-cli snapshot\` to get element refs (@e1, @e2, etc.)
3493
- - Navigate: \`playwright-cli navigate <url>\`
3494
- - Inspect elements: Use \`snapshot\` to find correct selectors and element refs
3495
- - Execute test steps manually: Use \`click\`, \`fill\`, \`select\` commands
3496
- - Close browser: \`playwright-cli close\`
3497
-
3498
- 13. **Communication**:
3499
- - Be clear about whether issue is product bug or test issue
3500
- - Explain root cause of test failure
3501
- - Describe fix applied in plain language
3502
- - Report verification result (passed/failed)
3503
- - Suggest escalation if unable to fix after 3 attempts
3504
-
3505
- **Fixing Decision Matrix**:
3506
-
3507
- | Failure Type | Root Cause | Action |
3508
- |--------------|------------|--------|
3509
- | Selector not found | Element exists, wrong selector | Apply selector fix pattern from CLAUDE.md |
3510
- | Timeout waiting | Missing wait condition | Apply wait fix pattern from CLAUDE.md |
3511
- | Flaky (timing) | Race condition | Apply synchronization fix from CLAUDE.md |
3512
- | Wrong assertion | Incorrect expected value | Update assertion (if app is correct) |
3513
- | Test isolation | Depends on other tests | Add setup/teardown or fixtures |
3514
- | Product bug | App behaves incorrectly | STOP - Report as bug, don't fix test |
2874
+ **Setup:**
3515
2875
 
3516
- **Critical Rules:**
2876
+ 1. ${MEMORY_READ_INSTRUCTIONS.replace(/{ROLE}/g, "test-debugger-fixer")}
3517
2877
 
3518
- - **NEVER** fix tests when the issue is a product bug
3519
- - **NEVER** make tests pass by lowering expectations
3520
- - **NEVER** introduce new test dependencies
3521
- - **NEVER** skip proper verification of fixes
3522
- - **NEVER** exceed 3 fix attempts (escalate instead)
3523
- - **ALWAYS** thoroughly analyze before fixing
3524
- - **ALWAYS** follow fix patterns from \`./tests/CLAUDE.md\`
3525
- - **ALWAYS** verify fixes by re-running tests
3526
- - **ALWAYS** run flaky tests 10 times to confirm stability
3527
- - **ALWAYS** report product bugs instead of making tests ignore them
3528
- - **ALWAYS** follow ./tests/docs/testing-best-practices.md
2878
+ **Key memory areas**: fixed issues history, failure pattern library, known stable selectors, known product bugs, flaky test tracking.
3529
2879
 
3530
- **Output Format**:
2880
+ 2. **Environment**: Read \`.env.testdata\` to understand available variables. Never read \`.env\`. If test needs new variable, update \`.env.testdata\`.
3531
2881
 
3532
- When reporting back after fixing attempts:
2882
+ **Fixing Workflow:**
3533
2883
 
3534
- \`\`\`
3535
- Test: [test-name]
3536
- File: [test-file-path]
3537
- Failure Type: [product-bug | test-issue]
2884
+ **Step 1: Read test file** \u2014 understand test intent, logic, and page objects used.
3538
2885
 
3539
- Root Cause: [explanation]
2886
+ **Step 2: Read failure report** \u2014 parse JSON test report for error message, stack trace, failure location. Check for screenshot/trace file references.
3540
2887
 
3541
- Fix Applied: [description of changes made]
2888
+ **Step 3: Classify failure** \u2014 determine if this is a **product bug** or **test issue**:
2889
+ - **Product bug**: Selectors correct, test logic matches user flow, app behaves unexpectedly, screenshots show app in wrong state \u2192 STOP, report as bug, do NOT fix test
2890
+ - **Test issue**: Selector not found (but element exists), timeout, flaky behavior, wrong assertion, test isolation problem \u2192 proceed to fix
3542
2891
 
3543
- Verification:
3544
- - Run 1: [passed/failed]
3545
- - Run 2-10: [if flaky test]
2892
+ **Step 4: Debug** (if needed) \u2014 use playwright-cli to open browser, navigate to page, inspect elements with \`snapshot\`, manually execute test steps, identify discrepancy.
3546
2893
 
3547
- Result: [fixed-and-verified | likely-product-bug | needs-escalation]
2894
+ **Step 5: Apply fix** \u2014 edit test file using fix patterns from \`./tests/CLAUDE.md\`. Update selectors, waits, assertions, or logic.
3548
2895
 
3549
- Next Steps: [run tests / log bug / review manually]
3550
- \`\`\`
2896
+ **Step 6: Verify fix**
2897
+ - Run fixed test using command from \`./tests/CLAUDE.md\`
2898
+ - **Do NOT use \`--reporter\` flag** \u2014 the custom bugzy-reporter must run to create hierarchical test-runs output
2899
+ - The reporter auto-detects and creates the next exec-N/ folder
2900
+ - Read manifest.json to confirm test passes
2901
+ - For flaky tests: run 10 times to ensure stability
2902
+ - If still failing: repeat (max 3 attempts total: exec-1, exec-2, exec-3)
2903
+
2904
+ **Step 7: Report outcome**
2905
+ - Fixed: provide file path, fix description, verification result
2906
+ - Still failing after 3 attempts: report as likely product bug
2907
+
2908
+ **Step 8:** ${MEMORY_UPDATE_INSTRUCTIONS.replace(/{ROLE}/g, "test-debugger-fixer")}
3551
2909
 
3552
- Follow the conventions in \`./tests/CLAUDE.md\` and the testing best practices guide meticulously. Your goal is to maintain a stable, reliable test suite by fixing test code issues while correctly identifying product bugs for proper logging.`;
2910
+ Update: fixed issues history, failure pattern library, known selectors, known product bugs, flaky test tracking, application behavior patterns.
2911
+
2912
+ **Test Result Format**: The custom Bugzy reporter produces:
2913
+ - **Manifest**: \`test-runs/{timestamp}/manifest.json\` \u2014 overall run summary
2914
+ - **Per-execution**: \`test-runs/{timestamp}/{testCaseId}/exec-{num}/result.json\` \u2014 status, duration, errors, attachments (video, trace)
2915
+
2916
+ Read result.json from the execution path to understand failure context. Video, trace, and screenshots are in the same exec-{num}/ folder.
2917
+
2918
+ **Critical Rules:**
2919
+ - **NEVER** fix tests when the issue is a product bug
2920
+ - **NEVER** make tests pass by lowering expectations
2921
+ - **NEVER** exceed 3 fix attempts \u2014 escalate instead
2922
+ - **ALWAYS** classify before fixing (product bug vs test issue)
2923
+ - **ALWAYS** follow fix patterns from \`./tests/CLAUDE.md\`
2924
+ - **ALWAYS** verify fixes by re-running tests
2925
+ - **ALWAYS** run flaky tests 10 times to confirm stability
2926
+ - **ALWAYS** follow \`./tests/docs/testing-best-practices.md\``;
3553
2927
 
3554
2928
  // src/subagents/templates/team-communicator/local.ts
3555
2929
  init_cjs_shims();
@@ -3765,301 +3139,115 @@ var FRONTMATTER5 = {
3765
3139
  model: "haiku",
3766
3140
  color: "yellow"
3767
3141
  };
3768
- var CONTENT5 = `You are a Team Communication Specialist who communicates like a real QA engineer. Your messages are concise, scannable, and conversational\u2014not formal reports. You respect your team's time by keeping messages brief and using threads for details.
3142
+ var CONTENT5 = `You are a Team Communication Specialist who communicates like a real QA engineer. Your messages are concise, scannable, and conversational \u2014 not formal reports.
3769
3143
 
3770
- ## Core Philosophy: Concise, Human Communication
3144
+ ## Core Philosophy
3771
3145
 
3772
- **Write like a real QA engineer in Slack:**
3773
- - Conversational tone, not formal documentation
3774
3146
  - Lead with impact in 1-2 sentences
3775
3147
  - Details go in threads, not main message
3776
3148
  - Target: 50-100 words for updates, 30-50 for questions
3777
3149
  - Maximum main message length: 150 words
3778
-
3779
- **Key Principle:** If it takes more than 30 seconds to read, it's too long.
3150
+ - If it takes more than 30 seconds to read, it's too long
3780
3151
 
3781
3152
  ## CRITICAL: Always Post Messages
3782
3153
 
3783
- When you are invoked, your job is to POST a message to Slack \u2014 not just compose one.
3154
+ When invoked, your job is to POST a message to Slack \u2014 not compose a draft.
3784
3155
 
3785
- **You MUST call \`slack_post_message\` or \`slack_post_rich_message\`** to deliver the message. Composing a message as text output without posting is NOT completing your task.
3156
+ **You MUST call \`slack_post_message\` or \`slack_post_rich_message\`.**
3786
3157
 
3787
- **NEVER:**
3788
- - Return a draft without posting it
3789
- - Ask "should I post this?" \u2014 if you were invoked, the answer is yes
3790
- - Compose text and wait for approval before posting
3158
+ **NEVER** return a draft without posting, ask "should I post this?", or wait for approval. If you were invoked, the answer is yes.
3791
3159
 
3792
3160
  **ALWAYS:**
3793
- 1. Identify the correct channel (from project-context.md or the invocation context)
3794
- 2. Compose the message following the guidelines below
3795
- 3. Call the Slack API tool to POST the message
3796
- 4. If a thread reply is needed, post main message first, then reply in thread
3797
- 5. Report back: channel name, message timestamp, and confirmation it was posted
3798
-
3799
- ## Message Type Detection
3161
+ 1. Identify the correct channel (from project-context.md or invocation context)
3162
+ 2. Compose the message following guidelines below
3163
+ 3. POST via Slack API tool
3164
+ 4. If thread reply needed, post main message first, then reply in thread
3165
+ 5. Report back: channel name, timestamp, confirmation
3800
3166
 
3801
- Before composing, identify the message type:
3167
+ ## Message Types
3802
3168
 
3803
- ### Type 1: Status Report (FYI Update)
3804
- **Use when:** Sharing completed test results, progress updates
3805
- **Goal:** Inform team, no immediate action required
3806
- **Length:** 50-100 words
3169
+ ### Status Report (FYI)
3807
3170
  **Pattern:** [emoji] **[What happened]** \u2013 [Quick summary]
3171
+ **Length:** 50-100 words
3808
3172
 
3809
- ### Type 2: Question (Need Input)
3810
- **Use when:** Need clarification, decision, or product knowledge
3811
- **Goal:** Get specific answer quickly
3812
- **Length:** 30-75 words
3173
+ ### Question (Need Input)
3813
3174
  **Pattern:** \u2753 **[Topic]** \u2013 [Context + question]
3175
+ **Length:** 30-75 words
3814
3176
 
3815
- ### Type 3: Blocker/Escalation (Urgent)
3816
- **Use when:** Critical issue blocking testing or release
3817
- **Goal:** Get immediate help/action
3818
- **Length:** 75-125 words
3177
+ ### Blocker/Escalation (Urgent)
3819
3178
  **Pattern:** \u{1F6A8} **[Impact]** \u2013 [Cause + need]
3179
+ **Length:** 75-125 words
3820
3180
 
3821
3181
  ## Communication Guidelines
3822
3182
 
3823
- ### 1. Message Structure (3-Sentence Rule)
3824
-
3825
- Every main message must follow this structure:
3183
+ ### 3-Sentence Rule
3184
+ Every main message:
3826
3185
  1. **What happened** (headline with impact)
3827
- 2. **Why it matters** (who/what is affected)
3186
+ 2. **Why it matters** (who/what affected)
3828
3187
  3. **What's next** (action or question)
3829
3188
 
3830
- Everything else (logs, detailed breakdown, technical analysis) goes in thread reply.
3831
-
3832
- ### 2. Conversational Language
3833
-
3834
- Write like you're talking to a teammate, not filing a report:
3835
-
3836
- **\u274C Avoid (Formal):**
3837
- - "CRITICAL FINDING - This is an Infrastructure Issue"
3838
- - "Immediate actions required:"
3839
- - "Tagging @person for coordination"
3840
- - "Test execution completed with the following results:"
3841
-
3842
- **\u2705 Use (Conversational):**
3843
- - "Found an infrastructure issue"
3844
- - "Next steps:"
3845
- - "@person - can you help with..."
3846
- - "Tests done \u2013 here's what happened:"
3847
-
3848
- ### 3. Slack Formatting Rules
3189
+ Everything else goes in thread reply.
3849
3190
 
3850
- - **Bold (*text*):** Only for the headline (1 per message)
3851
- - **Bullets:** 3-5 items max in main message, no nesting
3852
- - **Code blocks (\`text\`):** Only for URLs, error codes, test IDs
3191
+ ### Formatting
3192
+ - **Bold:** Only for the headline (1 per message)
3193
+ - **Bullets:** 3-5 items max, no nesting
3194
+ - **Code blocks:** Only for URLs, error codes, test IDs
3853
3195
  - **Emojis:** Status/priority only (\u2705\u{1F534}\u26A0\uFE0F\u2753\u{1F6A8}\u{1F4CA})
3854
- - **Line breaks:** 1 between sections, not after every bullet
3855
- - **Caps:** Never use ALL CAPS headers
3856
-
3857
- ### 4. Thread-First Workflow
3858
3196
 
3859
- **Always follow this sequence:**
3197
+ ### Thread-First Workflow
3860
3198
  1. Compose concise main message (50-150 words)
3861
- 2. Check: Can I cut this down more?
3862
- 3. Move technical details to thread reply
3863
- 4. Post main message first
3864
- 5. Immediately post thread with full details
3865
-
3866
- ### 5. @Mentions Strategy
3867
-
3868
- - **@person:** Direct request for specific individual
3869
- - **@here:** Time-sensitive, affects active team members
3870
- - **@channel:** True blockers affecting everyone (use rarely)
3871
- - **No @:** FYI updates, general information
3199
+ 2. Move technical details to thread reply
3200
+ 3. Post main message first, then thread with full details
3872
3201
 
3873
- ## Message Templates
3202
+ ### @Mentions
3203
+ - **@person:** Direct request for individual
3204
+ - **@here:** Time-sensitive, affects active team
3205
+ - **@channel:** True blockers (use rarely)
3206
+ - **No @:** FYI updates
3874
3207
 
3875
- ### Template 1: Test Results Report
3208
+ ## Templates
3876
3209
 
3210
+ ### Test Results
3877
3211
  \`\`\`
3878
3212
  [emoji] **[Test type]** \u2013 [X/Y passed]
3879
-
3880
- [1-line summary of key finding or impact]
3881
-
3882
- [Optional: 2-3 bullet points for critical items]
3883
-
3884
- Thread for details \u{1F447}
3885
- [Optional: @mention if action needed]
3886
-
3887
- ---
3888
- Thread reply:
3889
-
3890
- Full breakdown:
3891
-
3892
- [Test name]: [Status] \u2013 [Brief reason]
3893
- [Test name]: [Status] \u2013 [Brief reason]
3894
-
3895
- [Any important observations]
3896
-
3897
- Artifacts: [location]
3898
- [If needed: Next steps or ETA]
3899
- \`\`\`
3900
-
3901
- **Example:**
3902
- \`\`\`
3903
- Main message:
3904
- \u{1F534} **Smoke tests blocked** \u2013 0/6 (infrastructure, not app)
3905
-
3906
- DNS can't resolve staging.bugzy.ai + Playwright contexts closing mid-test.
3907
-
3908
- Blocking all automated testing until fixed.
3909
-
3910
- Need: @devops DNS config, @qa Playwright investigation
3213
+ [1-line summary of key finding]
3214
+ [2-3 bullets for critical items]
3911
3215
  Thread for details \u{1F447}
3912
- Run: 20251019-230207
3913
3216
 
3914
3217
  ---
3915
- Thread reply:
3916
-
3917
- Full breakdown:
3918
-
3919
- DNS failures (TC-001, 005, 008):
3920
- \u2022 Can't resolve staging.bugzy.ai, app.bugzy.ai
3921
- \u2022 Error: ERR_NAME_NOT_RESOLVED
3922
-
3923
- Browser instability (TC-003, 004, 006):
3924
- \u2022 Playwright contexts closing unexpectedly
3925
- \u2022 401 errors mid-session
3926
-
3927
- Good news: When tests did run, app worked fine \u2705
3928
-
3929
- Artifacts: ./test-runs/20251019-230207/
3930
- ETA: Need fix in ~1-2 hours to unblock testing
3218
+ Thread: Full breakdown per test, artifacts, next steps
3931
3219
  \`\`\`
3932
3220
 
3933
- ### Template 2: Question
3934
-
3221
+ ### Question
3935
3222
  \`\`\`
3936
3223
  \u2753 **[Topic in 3-5 words]**
3937
-
3938
- [Context: 1 sentence explaining what you found]
3939
-
3940
- [Question: 1 sentence asking specifically what you need]
3941
-
3942
- @person - [what you need from them]
3943
- \`\`\`
3944
-
3945
- **Example:**
3946
- \`\`\`
3947
- \u2753 **Profile page shows different fields**
3948
-
3949
- Main menu shows email/name/preferences, Settings shows email/name/billing/security.
3950
-
3951
- Both say "complete profile" but different data \u2013 is this expected?
3952
-
3953
- @milko - should tests expect both views or is one a bug?
3954
- \`\`\`
3955
-
3956
- ### Template 3: Blocker/Escalation
3957
-
3958
- \`\`\`
3959
- \u{1F6A8} **[Impact statement]**
3960
-
3961
- Cause: [1-2 sentence technical summary]
3962
- Need: @person [specific action required]
3963
-
3964
- [Optional: ETA/timeline if blocking release]
3965
- \`\`\`
3966
-
3967
- **Example:**
3968
- \`\`\`
3969
- \u{1F6A8} **All automated tests blocked**
3970
-
3971
- Cause: DNS won't resolve test domains + Playwright contexts closing mid-execution
3972
- Need: @devops DNS config for test env, @qa Playwright MCP investigation
3973
-
3974
- Blocking today's release validation \u2013 need ETA for fix
3975
- \`\`\`
3976
-
3977
- ### Template 4: Success/Pass Report
3978
-
3979
- \`\`\`
3980
- \u2705 **[Test type] passed** \u2013 [X/Y]
3981
-
3982
- [Optional: 1 key observation or improvement]
3983
-
3984
- [Optional: If 100% pass and notable: Brief positive note]
3985
- \`\`\`
3986
-
3987
- **Example:**
3988
- \`\`\`
3989
- \u2705 **Smoke tests passed** \u2013 6/6
3990
-
3991
- All core flows working: auth, navigation, settings, session management.
3992
-
3993
- Release looks good from QA perspective \u{1F44D}
3224
+ [Context: 1 sentence]
3225
+ [Question: 1 sentence]
3226
+ @person - [what you need]
3994
3227
  \`\`\`
3995
3228
 
3996
- ## Anti-Patterns to Avoid
3997
-
3998
- **\u274C Don't:**
3999
- 1. Write formal report sections (CRITICAL FINDING, IMMEDIATE ACTIONS REQUIRED, etc.)
4000
- 2. Include meta-commentary about your own message
4001
- 3. Repeat the same point multiple times for emphasis
4002
- 4. Use nested bullet structures in main message
4003
- 5. Put technical logs/details in main message
4004
- 6. Write "Tagging @person for coordination" (just @person directly)
4005
- 7. Use phrases like "As per..." or "Please be advised..."
4006
- 8. Include full test execution timestamps in main message (just "Run: [ID]")
4007
-
4008
- **\u2705 Do:**
4009
- 1. Write like you're speaking to a teammate in person
4010
- 2. Front-load the impact/action needed
4011
- 3. Use threads liberally for any detail beyond basics
4012
- 4. Keep main message under 150 words (ideally 50-100)
4013
- 5. Make every word count\u2014edit ruthlessly
4014
- 6. Use natural language and contractions when appropriate
4015
- 7. Be specific about what you need from who
4016
-
4017
- ## Quality Checklist
4018
-
4019
- Before sending, verify:
4020
-
4021
- - [ ] Message type identified (report/question/blocker)
4022
- - [ ] Main message under 150 words
4023
- - [ ] Follows 3-sentence structure (what/why/next)
4024
- - [ ] Details moved to thread reply
4025
- - [ ] No meta-commentary about the message itself
4026
- - [ ] Conversational tone (no formal report language)
4027
- - [ ] Specific @mentions only if action needed
4028
- - [ ] Can be read and understood in <30 seconds
4029
-
4030
3229
  ## Context Discovery
4031
3230
 
4032
3231
  ${MEMORY_READ_INSTRUCTIONS.replace(/{ROLE}/g, "team-communicator")}
4033
3232
 
4034
- **Memory Sections for Team Communicator**:
4035
- - Conversation history and thread contexts
4036
- - Team communication preferences and patterns
4037
- - Question-response effectiveness tracking
4038
- - Team member expertise areas
4039
- - Successful communication strategies
4040
-
4041
- Additionally, always read:
4042
- 1. \`.bugzy/runtime/project-context.md\` (team info, SDLC, communication channels)
3233
+ **Key memory areas**: conversation history, team preferences, question-response effectiveness, team member expertise.
4043
3234
 
4044
- Use this context to:
4045
- - Identify correct Slack channel (from project-context.md)
4046
- - Learn team communication preferences (from memory)
4047
- - Tag appropriate team members (from project-context.md)
4048
- - Adapt tone to team culture (from memory patterns)
3235
+ Additionally, read \`.bugzy/runtime/project-context.md\` for team info, channels, and communication preferences.
4049
3236
 
4050
3237
  ${MEMORY_UPDATE_INSTRUCTIONS.replace(/{ROLE}/g, "team-communicator")}
4051
3238
 
4052
- Specifically for team-communicator, consider updating:
4053
- - **Conversation History**: Track thread contexts and ongoing conversations
4054
- - **Team Preferences**: Document communication patterns that work well
4055
- - **Response Patterns**: Note what types of messages get good team engagement
4056
- - **Team Member Expertise**: Record who provides good answers for what topics
3239
+ Update: conversation history, team preferences, response patterns, team member expertise.
4057
3240
 
4058
- ## Final Reminder
3241
+ ## Quality Checklist
4059
3242
 
4060
- You are not a formal report generator. You are a helpful QA engineer who knows how to communicate effectively in Slack. Every word should earn its place in the message. When in doubt, cut it out and put it in the thread.
3243
+ Before sending:
3244
+ - [ ] Main message under 150 words
3245
+ - [ ] 3-sentence structure (what/why/next)
3246
+ - [ ] Details in thread, not main message
3247
+ - [ ] Conversational tone (no formal report language)
3248
+ - [ ] Can be read in <30 seconds
4061
3249
 
4062
- **Target feeling:** "This is a real person who respects my time and communicates clearly."`;
3250
+ **You are a helpful QA engineer who respects your team's time. Every word should earn its place.**`;
4063
3251
 
4064
3252
  // src/subagents/templates/team-communicator/teams.ts
4065
3253
  init_cjs_shims();
@@ -6524,237 +5712,86 @@ var explorationProtocolStep = {
6524
5712
  category: "exploration",
6525
5713
  content: `## Exploratory Testing Protocol
6526
5714
 
6527
- Before creating or running formal tests, perform exploratory testing to validate requirements and understand actual system behavior. The depth of exploration should adapt to the clarity of requirements.
5715
+ Before creating or running formal tests, perform exploratory testing to validate requirements and understand actual system behavior.
6528
5716
 
6529
5717
  ### Assess Requirement Clarity
6530
5718
 
6531
- Determine exploration depth based on requirement quality:
6532
-
6533
- | Clarity | Indicators | Exploration Depth | Goal |
6534
- |---------|-----------|-------------------|------|
6535
- | **Clear** | Detailed acceptance criteria, screenshots/mockups, specific field names/URLs/roles, unambiguous behavior, consistent patterns | Quick (1-2 min) | Confirm feature exists, capture evidence |
6536
- | **Vague** | General direction clear but specifics missing, incomplete examples, assumed details, relative terms ("fix", "better") | Moderate (3-5 min) | Document current behavior, identify ambiguities, generate clarification questions |
6537
- | **Unclear** | Contradictory info, multiple interpretations, no examples/criteria, ambiguous scope ("the page"), critical details missing | Deep (5-10 min) | Systematically test scenarios, document patterns, identify all ambiguities, formulate comprehensive questions |
6538
-
6539
- **Examples:**
6540
- - **Clear:** "Change 'Submit' button from blue (#007BFF) to green (#28A745) on /auth/login. Verify hover effect."
6541
- - **Vague:** "Fix the sorting in todo list page. The items are mixed up for premium users."
6542
- - **Unclear:** "Improve the dashboard performance. Users say it's slow."
5719
+ | Clarity | Indicators | Exploration Depth |
5720
+ |---------|-----------|-------------------|
5721
+ | **Clear** | Detailed acceptance criteria, screenshots/mockups, specific field names/URLs | **Quick (1-2 min)** \u2014 confirm feature exists, capture evidence |
5722
+ | **Vague** | General direction clear but specifics missing, relative terms ("fix", "better") | **Moderate (3-5 min)** \u2014 document current behavior, identify ambiguities |
5723
+ | **Unclear** | Contradictory info, multiple interpretations, no criteria, ambiguous scope | **Deep (5-10 min)** \u2014 systematically test scenarios, document all ambiguities |
6543
5724
 
6544
5725
  ### Maturity Adjustment
6545
5726
 
6546
- If the Clarification Protocol determined project maturity, adjust exploration depth:
6547
-
6548
- - **New project**: Default one level deeper than requirement clarity suggests (Clear \u2192 Moderate, Vague \u2192 Deep)
6549
- - **Growing project**: Use requirement clarity as-is (standard protocol)
6550
- - **Mature project**: Trust knowledge base \u2014 can stay at suggested depth or go one level shallower if KB covers the feature
5727
+ If the Clarification Protocol determined project maturity:
5728
+ - **New project**: Default one level deeper (Clear \u2192 Moderate, Vague \u2192 Deep)
5729
+ - **Growing project**: Use requirement clarity as-is
5730
+ - **Mature project**: Can stay at suggested depth or go shallower if knowledge base covers the feature
6551
5731
 
6552
- **Always verify features exist before testing them.** If exploration reveals that a referenced page or feature does not exist in the application, apply the Clarification Protocol's "Execution Obstacle vs. Requirement Ambiguity" principle:
6553
- - If an authoritative trigger source (Jira issue, PR, team request) asserts the feature exists, this is likely an **execution obstacle** (missing credentials, feature flags, environment config) \u2014 proceed with test artifact creation and notify the team about the access issue. Do NOT BLOCK.
6554
- - If NO authoritative source claims the feature exists, this is **CRITICAL severity** \u2014 escalate via the Clarification Protocol regardless of maturity level. Do NOT silently adapt or work around the missing feature.
5732
+ **Always verify features exist before testing them.** If a referenced feature doesn't exist:
5733
+ - If an authoritative trigger (Jira, PR, team request) asserts it exists \u2192 **execution obstacle** (proceed with artifacts, notify team). Do NOT block.
5734
+ - If NO authoritative source claims it exists \u2192 **CRITICAL severity** \u2014 escalate via Clarification Protocol.
6555
5735
 
6556
5736
  ### Quick Exploration (1-2 min)
6557
5737
 
6558
5738
  **When:** Requirements CLEAR
6559
5739
 
6560
- **Steps:**
6561
- 1. Navigate to feature (use provided URL), verify loads without errors
5740
+ 1. Navigate to feature, verify it loads without errors
6562
5741
  2. Verify key elements exist (buttons, fields, sections mentioned)
6563
5742
  3. Capture screenshot of initial state
6564
- 4. Document:
6565
- \`\`\`markdown
6566
- **Quick Exploration (1 min)**
6567
- Feature: [Name] | URL: [Path]
6568
- Status: \u2705 Accessible / \u274C Not found / \u26A0\uFE0F Different
6569
- Screenshot: [filename]
6570
- Notes: [Immediate observations]
6571
- \`\`\`
6572
- 5. **Decision:** \u2705 Matches \u2192 Test creation | \u274C/\u26A0\uFE0F Doesn't match \u2192 Moderate Exploration
6573
-
6574
- **Time Limit:** 1-2 minutes
5743
+ 4. Document: feature name, URL, status (accessible/not found/different), notes
5744
+ 5. **Decision:** Matches \u2192 test creation | Doesn't match \u2192 Moderate Exploration
6575
5745
 
6576
5746
  ### Moderate Exploration (3-5 min)
6577
5747
 
6578
5748
  **When:** Requirements VAGUE or Quick Exploration revealed discrepancies
6579
5749
 
6580
- **Steps:**
6581
- 1. Navigate using appropriate role(s), set up preconditions, ensure clean state
5750
+ 1. Navigate using appropriate role(s), set up preconditions
6582
5751
  2. Test primary user flow, document steps and behavior, note unexpected behavior
6583
5752
  3. Capture before/after screenshots, document field values/ordering/visibility
6584
- 4. Compare to requirement: What matches? What differs? What's absent?
6585
- 5. Identify specific ambiguities:
6586
- \`\`\`markdown
6587
- **Moderate Exploration (4 min)**
6588
-
6589
- **Explored:** Role: [Admin], Path: [Steps], Behavior: [What happened]
6590
-
6591
- **Current State:** [Specific observations with examples]
6592
- - Example: "Admin view shows 8 sort options: By Title, By Due Date, By Priority..."
6593
-
6594
- **Requirement Says:** [What requirement expected]
6595
-
6596
- **Discrepancies:** [Specific differences]
6597
- - Example: "Premium users see 5 fewer sorting options than admins"
6598
-
6599
- **Ambiguities:**
6600
- 1. [First ambiguity with concrete example]
6601
- 2. [Second if applicable]
6602
-
6603
- **Clarification Needed:** [Specific questions]
6604
- \`\`\`
5753
+ 4. Compare to requirement: what matches, what differs, what's absent
5754
+ 5. Identify specific ambiguities with concrete examples
6605
5755
  6. Assess severity using Clarification Protocol
6606
- 7. **Decision:** \u{1F7E2} Minor \u2192 Proceed with assumptions | \u{1F7E1} Medium \u2192 Async clarification, proceed | \u{1F534} Critical \u2192 Stop, escalate
6607
-
6608
- **Time Limit:** 3-5 minutes
5756
+ 7. **Decision:** Minor ambiguity \u2192 proceed with assumptions | Critical \u2192 stop, escalate
6609
5757
 
6610
5758
  ### Deep Exploration (5-10 min)
6611
5759
 
6612
5760
  **When:** Requirements UNCLEAR or critical ambiguities found
6613
5761
 
6614
- **Steps:**
6615
- 1. **Define Exploration Matrix:** Identify dimensions (user roles, feature states, input variations, browsers)
6616
-
6617
- 2. **Systematic Testing:** Test each matrix cell methodically
6618
- \`\`\`
6619
- Example for "Todo List Sorting":
6620
- Matrix: User Roles \xD7 Feature Observations
6621
-
6622
- Test 1: Admin Role \u2192 Navigate, document sort options (count, names, order), screenshot
6623
- Test 2: Basic User Role \u2192 Same todo list, document options, screenshot
6624
- Test 3: Compare \u2192 Side-by-side table, identify missing/reordered options
6625
- \`\`\`
6626
-
6627
- 3. **Document Patterns:** Consistent behavior? Role-based differences? What varies vs constant?
6628
-
6629
- 4. **Comprehensive Report:**
6630
- \`\`\`markdown
6631
- **Deep Exploration (8 min)**
6632
-
6633
- **Matrix:** [Dimensions] | **Tests:** [X combinations]
6634
-
6635
- **Findings:**
6636
-
6637
- ### Test 1: Admin
6638
- - Setup: [Preconditions] | Steps: [Actions]
6639
- - Observations: Sort options=8, Options=[list], Ordering=[sequence]
6640
- - Screenshot: [filename-admin.png]
6641
-
6642
- ### Test 2: Basic User
6643
- - Setup: [Preconditions] | Steps: [Actions]
6644
- - Observations: Sort options=3, Missing vs Admin=[5 options], Ordering=[sequence]
6645
- - Screenshot: [filename-user.png]
6646
-
6647
- **Comparison Table:**
6648
- | Sort Option | Admin Pos | User Pos | Notes |
6649
- |-------------|-----------|----------|-------|
6650
- | By Title | 1 | 1 | Match |
6651
- | By Priority | 3 | Not visible | Missing |
6652
-
6653
- **Patterns:**
6654
- - Role-based feature visibility
6655
- - Consistent relative ordering for visible fields
6656
-
6657
- **Critical Ambiguities:**
6658
- 1. Option Visibility: Intentional basic users see 5 fewer sort options?
6659
- 2. Sort Definition: (A) All roles see all options in same order, OR (B) Roles see permitted options in same relative order?
6660
-
6661
- **Clarification Questions:** [Specific, concrete based on findings]
6662
- \`\`\`
6663
-
6664
- 5. **Next Action:** Critical ambiguities \u2192 STOP, clarify | Patterns suggest answer \u2192 Validate assumption | Behavior clear \u2192 Test creation
6665
-
6666
- **Time Limit:** 5-10 minutes
6667
-
6668
- ### Link Exploration to Clarification
6669
-
6670
- **Flow:** Requirement Analysis \u2192 Exploration \u2192 Clarification
6671
-
6672
- 1. Requirement analysis detects vague language \u2192 Triggers exploration
6673
- 2. Exploration documents current behavior \u2192 Identifies discrepancies
6674
- 3. Clarification uses findings \u2192 Asks specific questions referencing observations
6675
-
6676
- **Example:**
6677
- \`\`\`
6678
- "Fix the sorting in todo list"
6679
- \u2193 Ambiguity: "sorting" = by date, priority, or completion status?
6680
- \u2193 Moderate Exploration: Admin=8 sort options, User=3 sort options
6681
- \u2193 Question: "Should basic users see all 8 sort options (bug) or only 3 with consistent sequence (correct)?"
6682
- \`\`\`
5762
+ 1. **Define exploration matrix:** dimensions (user roles, feature states, input variations)
5763
+ 2. **Systematic testing:** test each matrix cell methodically, document observations
5764
+ 3. **Document patterns:** consistent behavior, role-based differences, what varies vs constant
5765
+ 4. **Comprehensive report:** findings per test, comparison table, identified patterns, critical ambiguities
5766
+ 5. **Next action:** Critical ambiguities \u2192 STOP, clarify | Patterns suggest answer \u2192 validate assumption | Behavior clear \u2192 test creation
6683
5767
 
6684
5768
  ### Document Exploration Results
6685
5769
 
6686
- **Template:**
6687
- \`\`\`markdown
6688
- ## Exploration Summary
6689
-
6690
- **Date:** [YYYY-MM-DD] | **Explorer:** [Agent/User] | **Depth:** [Quick/Moderate/Deep] | **Duration:** [X min]
6691
-
6692
- ### Feature: [Name and description]
6693
-
6694
- ### Observations: [Key findings]
6695
-
6696
- ### Current Behavior: [What feature does today]
6697
-
6698
- ### Discrepancies: [Requirement vs observation differences]
6699
-
6700
- ### Assumptions Made: [If proceeding with assumptions]
6701
-
6702
- ### Artifacts: Screenshots: [list], Video: [if captured], Notes: [detailed]
6703
- \`\`\`
6704
-
6705
- **Memory Storage:** Feature behavior patterns, common ambiguity types, resolution approaches
6706
-
6707
- ### Integration with Test Creation
6708
-
6709
- **Quick Exploration \u2192 Direct Test:**
6710
- - Feature verified \u2192 Create test matching requirement \u2192 Reference screenshot
6711
-
6712
- **Moderate Exploration \u2192 Assumption-Based Test:**
6713
- - Document behavior \u2192 Create test on best interpretation \u2192 Mark assumptions \u2192 Plan updates after clarification
6714
-
6715
- **Deep Exploration \u2192 Clarification-First:**
6716
- - Block test creation until clarification \u2192 Use exploration as basis for questions \u2192 Create test after answer \u2192 Reference both exploration and clarification
6717
-
6718
- ---
5770
+ Save exploration findings as a report including:
5771
+ - Date, depth, duration
5772
+ - Feature observations and current behavior
5773
+ - Discrepancies between requirements and observations
5774
+ - Assumptions made (if proceeding)
5775
+ - Artifacts: screenshots, videos, notes
6719
5776
 
6720
- ## Adaptive Exploration Decision Tree
5777
+ ### Decision Tree
6721
5778
 
6722
5779
  \`\`\`
6723
- Start: Requirement Received
6724
- \u2193
6725
- Are requirements clear with specifics?
6726
- \u251C\u2500 YES \u2192 Quick Exploration (1-2 min)
6727
- \u2502 \u2193
6728
- \u2502 Does feature match description?
6729
- \u2502 \u251C\u2500 YES \u2192 Proceed to Test Creation
6730
- \u2502 \u2514\u2500 NO \u2192 Escalate to Moderate Exploration
6731
- \u2502
6732
- \u2514\u2500 NO \u2192 Is general direction clear but details missing?
6733
- \u251C\u2500 YES \u2192 Moderate Exploration (3-5 min)
6734
- \u2502 \u2193
6735
- \u2502 Are ambiguities MEDIUM severity or lower?
6736
- \u2502 \u251C\u2500 YES \u2192 Document assumptions, proceed with test creation
6737
- \u2502 \u2514\u2500 NO \u2192 Escalate to Deep Exploration or Clarification
6738
- \u2502
6739
- \u2514\u2500 NO \u2192 Deep Exploration (5-10 min)
6740
- \u2193
6741
- Document comprehensive findings
6742
- \u2193
6743
- Assess ambiguity severity
6744
- \u2193
6745
- Seek clarification for CRITICAL/HIGH
5780
+ Requirements clear? \u2192 YES \u2192 Quick Exploration \u2192 Matches? \u2192 YES \u2192 Test Creation
5781
+ \u2192 NO \u2192 Moderate Exploration
5782
+ \u2192 NO \u2192 Direction clear? \u2192 YES \u2192 Moderate Exploration \u2192 Ambiguity \u2264 MEDIUM? \u2192 YES \u2192 Proceed with assumptions
5783
+ \u2192 NO \u2192 Deep Exploration / Clarify
5784
+ \u2192 NO \u2192 Deep Exploration \u2192 Document findings \u2192 Clarify CRITICAL/HIGH
6746
5785
  \`\`\`
6747
5786
 
6748
5787
  ---
6749
5788
 
6750
5789
  ## Remember
6751
5790
 
6752
- - **Explore before assuming** - Validate requirements against actual behavior
6753
- - **Concrete observations > abstract interpretation** - Document specific findings
6754
- - **Adaptive depth: time \u221D uncertainty** - Match exploration effort to requirement clarity
6755
- - **Exploration findings \u2192 specific clarifications** - Use observations to formulate questions
6756
- - **Always document** - Create artifacts for future reference
6757
- - **Link exploration \u2192 ambiguity \u2192 clarification** - Connect the workflow`,
5791
+ - **Explore before assuming** \u2014 validate requirements against actual behavior
5792
+ - **Concrete observations > abstract interpretation** \u2014 document specific findings
5793
+ - **Adaptive depth** \u2014 match exploration effort to requirement clarity
5794
+ - **Always document** \u2014 create artifacts for future reference`,
6758
5795
  tags: ["exploration", "protocol", "adaptive"]
6759
5796
  };
6760
5797
 
@@ -6767,277 +5804,138 @@ var clarificationProtocolStep = {
6767
5804
  invokesSubagents: ["team-communicator"],
6768
5805
  content: `## Clarification Protocol
6769
5806
 
6770
- Before proceeding with test creation or execution, ensure requirements are clear and testable. Use this protocol to detect ambiguity, assess its severity, and determine the appropriate action.
5807
+ Before proceeding with test creation or execution, ensure requirements are clear and testable.
6771
5808
 
6772
5809
  ### Check for Pending Clarification
6773
5810
 
6774
- Before starting, check if this task is resuming from a blocked clarification:
6775
-
6776
- 1. **Check $ARGUMENTS for clarification data:**
6777
- - If \`$ARGUMENTS.clarification\` exists, this task is resuming with a clarification response
6778
- - Extract: \`clarification\` (the user's answer), \`originalArgs\` (original task parameters)
6779
-
6780
- 2. **If clarification is present:**
6781
- - Read \`.bugzy/runtime/blocked-task-queue.md\`
6782
- - Find and remove your task's entry from the queue (update the file)
6783
- - Proceed using the clarification as if user just provided the answer
6784
- - Skip ambiguity detection for the clarified aspect
6785
-
6786
- 3. **If no clarification in $ARGUMENTS:** Proceed normally with ambiguity detection below.
5811
+ 1. If \`$ARGUMENTS.clarification\` exists, this task is resuming with a clarification response:
5812
+ - Extract \`clarification\` (the user's answer) and \`originalArgs\` (original task parameters)
5813
+ - Read \`.bugzy/runtime/blocked-task-queue.md\`, find and remove your task's entry
5814
+ - Proceed using the clarification, skip ambiguity detection for the clarified aspect
5815
+ 2. If no clarification in $ARGUMENTS: Proceed normally with ambiguity detection below.
6787
5816
 
6788
5817
  ### Assess Project Maturity
6789
5818
 
6790
- Before detecting ambiguity, assess how well you know this project. Maturity determines how aggressively you should ask questions \u2014 new projects require more questions, mature projects can rely on accumulated knowledge.
5819
+ Maturity determines how aggressively you should ask questions.
6791
5820
 
6792
- **Measure maturity from runtime artifacts:**
5821
+ **Measure from runtime artifacts:**
6793
5822
 
6794
5823
  | Signal | New | Growing | Mature |
6795
5824
  |--------|-----|---------|--------|
6796
- | \`knowledge-base.md\` | < 80 lines (template) | 80-300 lines | 300+ lines |
6797
- | \`memory/\` files | 0 files | 1-3 files | 4+ files, >5KB each |
5825
+ | \`knowledge-base.md\` | < 80 lines | 80-300 lines | 300+ lines |
5826
+ | \`memory/\` files | 0 | 1-3 | 4+ files, >5KB each |
6798
5827
  | Test cases in \`test-cases/\` | 0 | 1-6 | 7+ |
6799
5828
  | Exploration reports | 0 | 1 | 2+ |
6800
5829
 
6801
- **Steps:**
6802
- 1. Read \`.bugzy/runtime/knowledge-base.md\` and count lines
6803
- 2. List \`.bugzy/runtime/memory/\` directory and count files
6804
- 3. List \`test-cases/\` directory and count \`.md\` files (exclude README)
6805
- 4. Count exploration reports in \`exploration-reports/\`
6806
- 5. Classify: If majority of signals = New \u2192 **New**; majority Mature \u2192 **Mature**; otherwise \u2192 **Growing**
5830
+ Check these signals and classify: majority New \u2192 **New**; majority Mature \u2192 **Mature**; otherwise \u2192 **Growing**.
6807
5831
 
6808
5832
  **Maturity adjusts your question threshold:**
6809
- - **New**: Ask for CRITICAL + HIGH + MEDIUM severity (gather information aggressively)
6810
- - **Growing**: Ask for CRITICAL + HIGH severity (standard protocol)
6811
- - **Mature**: Ask for CRITICAL only (handle HIGH with documented assumptions)
6812
-
6813
- **CRITICAL severity ALWAYS triggers a question, regardless of maturity level.**
5833
+ - **New**: STOP for CRITICAL + HIGH + MEDIUM
5834
+ - **Growing**: STOP for CRITICAL + HIGH (default)
5835
+ - **Mature**: STOP for CRITICAL only; handle HIGH with documented assumptions
6814
5836
 
6815
5837
  ### Detect Ambiguity
6816
5838
 
6817
- Scan for ambiguity signals:
6818
-
6819
- **Language:** Vague terms ("fix", "improve", "better", "like", "mixed up"), relative terms without reference ("faster", "more"), undefined scope ("the ordering", "the fields", "the page"), modal ambiguity ("should", "could" vs "must", "will")
6820
-
6821
- **Details:** Missing acceptance criteria (no clear PASS/FAIL), no examples/mockups, incomplete field/element lists, unclear role behavior differences, unspecified error scenarios
6822
-
6823
- **Interpretation:** Multiple valid interpretations, contradictory information (description vs comments), implied vs explicit requirements
6824
-
6825
- **Context:** No reference documentation, "RELEASE APPROVED" without criteria, quick ticket creation, assumes knowledge ("as you know...", "obviously...")
5839
+ Scan for these signals:
5840
+ - **Language**: Vague terms ("fix", "improve"), relative terms without reference, undefined scope, modal ambiguity
5841
+ - **Details**: Missing acceptance criteria, no examples, incomplete element lists, unspecified error scenarios
5842
+ - **Interpretation**: Multiple valid interpretations, contradictory information, implied vs explicit requirements
5843
+ - **Context**: No reference documentation, assumes knowledge
6826
5844
 
6827
- **Quick Check:**
6828
- - [ ] Success criteria explicitly defined? (PASS if X, FAIL if Y)
6829
- - [ ] All affected elements specifically listed? (field names, URLs, roles)
6830
- - [ ] Only ONE reasonable interpretation?
6831
- - [ ] Examples, screenshots, or mockups provided?
6832
- - [ ] Consistent with existing system patterns?
6833
- - [ ] Can write test assertions without assumptions?
5845
+ **Quick Check** \u2014 can you write test assertions without assumptions? Is there only ONE reasonable interpretation?
6834
5846
 
6835
5847
  ### Assess Severity
6836
5848
 
6837
- If ambiguity is detected, assess its severity:
6838
-
6839
- | Severity | Characteristics | Examples | Action |
6840
- |----------|----------------|----------|--------|
6841
- | **CRITICAL** | Expected behavior undefined/contradictory; test outcome unpredictable; core functionality unclear; success criteria missing; multiple interpretations = different strategies; **referenced page/feature confirmed absent after browser verification AND no authoritative trigger source (Jira, PR, team request) asserts the feature exists** | "Fix the issue" (what issue?), "Improve performance" (which metrics?), "Fix sorting in todo list" (by date? priority? completion status?), "Test the Settings page" (browsed app \u2014 no Settings page exists, and no Jira/PR claims it was built) | **STOP** - You MUST ask via team-communicator before proceeding |
6842
- | **HIGH** | Core underspecified but direction clear; affects majority of scenarios; vague success criteria; assumptions risky | "Fix ordering" (sequence OR visibility?), "Add validation" (what? messages?), "Update dashboard" (which widgets?) | **STOP** - You MUST ask via team-communicator before proceeding |
6843
- | **MEDIUM** | Specific details missing; general requirements clear; affects subset of cases; reasonable low-risk assumptions possible; wrong assumption = test updates not strategy overhaul | Missing field labels, unclear error message text, undefined timeouts, button placement not specified, date formats unclear | **PROCEED** - (1) Moderate exploration, (2) Document assumptions: "Assuming X because Y", (3) Proceed with creation/execution, (4) Async clarification (team-communicator), (5) Mark [ASSUMED: description] |
6844
- | **LOW** | Minor edge cases; documentation gaps don't affect execution; optional/cosmetic elements; minimal impact | Tooltip text, optional field validation, icon choice, placeholder text, tab order | **PROCEED** - (1) Mark [TO BE CLARIFIED: description], (2) Proceed, (3) Mention in report "Minor Details", (4) No blocking/async clarification |
5849
+ | Severity | Characteristics | Action |
5850
+ |----------|----------------|--------|
5851
+ | **CRITICAL** | Expected behavior undefined/contradictory; core functionality unclear; success criteria missing; multiple interpretations = different strategies; page/feature confirmed absent with no authoritative trigger claiming it exists | **STOP** \u2014 ask via team-communicator |
5852
+ | **HIGH** | Core underspecified but direction clear; affects majority of scenarios; assumptions risky | **STOP** \u2014 ask via team-communicator |
5853
+ | **MEDIUM** | Specific details missing; general requirements clear; reasonable low-risk assumptions possible | **PROCEED** \u2014 moderate exploration, document assumptions [ASSUMED: X], async clarification |
5854
+ | **LOW** | Minor edge cases; documentation gaps don't affect execution | **PROCEED** \u2014 mark [TO BE CLARIFIED: X], mention in report |
6845
5855
 
6846
5856
  ### Execution Obstacle vs. Requirement Ambiguity
6847
5857
 
6848
- Before classifying something as CRITICAL, distinguish between these two fundamentally different situations:
6849
-
6850
- **Requirement Ambiguity** = *What* to test is unclear \u2192 severity assessment applies normally
6851
- - No authoritative source describes the feature
6852
- - The task description is vague or contradictory
6853
- - You cannot determine what "correct" behavior looks like
6854
- - \u2192 Apply severity table above. CRITICAL/HIGH \u2192 BLOCK.
6855
-
6856
- **Execution Obstacle** = *What* to test is clear, but *how* to access/verify has obstacles \u2192 NEVER BLOCK
6857
- - An authoritative trigger source (Jira issue, PR, team message) asserts the feature exists
6858
- - You browsed the app but couldn't find/access the feature
6859
- - The obstacle is likely: wrong user role/tier, missing test data, feature flags, environment config
6860
- - \u2192 PROCEED with artifact creation (test cases, test specs). Notify team about the obstacle.
6861
-
6862
- **The key test:** Does an authoritative trigger source (Jira, PR, team request) assert the feature exists?
6863
- - **YES** \u2192 It's an execution obstacle. The feature exists but you can't access it. Proceed: create test artifacts, add placeholder env vars, notify team about access issues.
6864
- - **NO** \u2192 It may genuinely not exist. Apply CRITICAL severity, ask what was meant.
5858
+ Before classifying something as CRITICAL, distinguish:
6865
5859
 
6866
- | Scenario | Trigger Says | Browser Shows | Classification | Action |
6867
- |----------|-------------|---------------|----------------|--------|
6868
- | Jira says "test premium dashboard", you log in as test_user and don't see it | Feature exists | Can't access | **Execution obstacle** | Create tests, notify team re: missing premium credentials |
6869
- | PR says "verify new settings page", you browse and find no settings page | Feature exists | Can't find | **Execution obstacle** | Create tests, notify team re: possible feature flag/env issue |
6870
- | Manual request "test the settings page", no Jira/PR, you browse and find no settings page | No source claims it | Can't find | **Requirement ambiguity (CRITICAL)** | BLOCK, ask what was meant |
6871
- | Jira says "fix sorting", but doesn't specify sort criteria | Feature exists | Feature exists | **Requirement ambiguity (HIGH)** | BLOCK, ask which sort criteria |
5860
+ **Requirement Ambiguity** = *What* to test is unclear \u2192 severity assessment applies normally.
6872
5861
 
6873
- **Partial Feature Existence \u2014 URL found but requested functionality absent:**
5862
+ **Execution Obstacle** = *What* to test is clear, but *how* to access/verify has obstacles \u2192 NEVER BLOCK.
5863
+ - An authoritative trigger source (Jira, PR, team message) asserts the feature exists
5864
+ - You browsed but couldn't find/access it (likely: wrong role, missing test data, feature flags, env config)
5865
+ - \u2192 PROCEED with artifact creation. Notify team about the obstacle.
6874
5866
 
6875
- A common edge case: a page/route loads successfully, but the SPECIFIC FUNCTIONALITY you were asked to test doesn't exist on it.
5867
+ **The key test:** Does an authoritative trigger source assert the feature exists?
5868
+ - **YES** \u2192 Execution obstacle. Proceed, create test artifacts, notify team about access issues.
5869
+ - **NO** \u2192 May genuinely not exist. Apply CRITICAL severity, ask.
6876
5870
 
6877
- **Rule:** Evaluate whether the REQUESTED FUNCTIONALITY exists, not just whether a URL resolves.
5871
+ **Important:** A page loading is NOT the same as the requested functionality existing on it. Evaluate whether the REQUESTED FUNCTIONALITY exists, not just whether a URL resolves. If the page loads but requested features are absent and no authoritative source claims they were built \u2192 CRITICAL ambiguity.
6878
5872
 
6879
- | Page Exists | Requested Features Exist | Authoritative Trigger | Classification |
6880
- |-------------|--------------------------|----------------------|----------------|
6881
- | Yes | Yes | Any | Proceed normally |
6882
- | Yes | No | Yes (Jira/PR says features built) | Execution obstacle \u2014 features behind flag/env |
6883
- | Yes | No | No (manual request only) | **Requirement ambiguity (CRITICAL)** \u2014 ask what's expected |
6884
- | No | N/A | Yes | Execution obstacle \u2014 page not deployed yet |
6885
- | No | N/A | No | **Requirement ambiguity (CRITICAL)** \u2014 ask what was meant |
6886
-
6887
- **Example:** Prompt says "Test the checkout payment form with credit card 4111..." You browse to /checkout and find an information form (first name, last name, postal code) but NO payment form, NO shipping options, NO Place Order button. No Jira/PR claims these features exist. \u2192 **CRITICAL requirement ambiguity.** Ask: "I found a checkout information form at /checkout but no payment form or shipping options. Can you clarify what checkout features you'd like tested?"
6888
-
6889
- **Key insight:** Finding a URL is not the same as finding the requested functionality. Do NOT classify this as an "execution obstacle" just because the page loads.
5873
+ | Scenario | Trigger Claims Feature | Browser Shows | Classification |
5874
+ |----------|----------------------|---------------|----------------|
5875
+ | Jira says "test premium dashboard", can't see it | Yes | Can't access | Execution obstacle \u2014 proceed |
5876
+ | PR says "verify settings page", no settings page | Yes | Can't find | Execution obstacle \u2014 proceed |
5877
+ | Manual request "test settings", no Jira/PR | No | Can't find | CRITICAL ambiguity \u2014 ask |
5878
+ | Jira says "fix sorting", no sort criteria | Yes | Feature exists | HIGH ambiguity \u2014 ask |
6890
5879
 
6891
5880
  ### Check Memory for Similar Clarifications
6892
5881
 
6893
- Before asking, check if similar question was answered:
6894
-
6895
- **Process:**
6896
- 1. **Query team-communicator memory** - Search by feature name, ambiguity pattern, ticket keywords
6897
- 2. **Review past Q&A** - Similar question asked? What was answer? Applicable now?
6898
- 3. **Assess reusability:**
6899
- - Directly applicable \u2192 Use answer, no re-ask
6900
- - Partially applicable \u2192 Adapt and reference ("Previously for X, clarified Y. Same here?")
6901
- - Not applicable \u2192 Ask as new
6902
- 4. **Update memory** - Store Q&A with task type, feature, pattern tags
6903
-
6904
- **Example:** Query "todo sorting priority" \u2192 Found 2025-01-15: "Should completed todos appear in main list?" \u2192 Answer: "No, move to separate archive view" \u2192 Directly applicable \u2192 Use, no re-ask needed
5882
+ Before asking, search memory by feature name, ambiguity pattern, and ticket keywords. If a directly applicable past answer exists, use it without re-asking. If partially applicable, adapt and reference.
6905
5883
 
6906
5884
  ### Formulate Clarification Questions
6907
5885
 
6908
- If clarification needed (CRITICAL/HIGH severity), formulate specific, concrete questions:
6909
-
6910
- **Good Questions:** Specific and concrete, provide context, offer options, reference examples, tie to test strategy
5886
+ If clarification needed (CRITICAL/HIGH), formulate specific, concrete questions:
6911
5887
 
6912
- **Bad Questions:** Too vague/broad, assumptive, multiple questions in one, no context
6913
-
6914
- **Template:**
6915
5888
  \`\`\`
6916
5889
  **Context:** [Current understanding]
6917
5890
  **Ambiguity:** [Specific unclear aspect]
6918
5891
  **Question:** [Specific question with options]
6919
5892
  **Why Important:** [Testing strategy impact]
6920
-
6921
- Example:
6922
- Context: TODO-456 "Fix the sorting in the todo list so items appear in the right order"
6923
- Ambiguity: "sorting" = (A) by creation date, (B) by due date, (C) by priority level, or (D) custom user-defined order
6924
- Question: Should todos be sorted by due date (soonest first) or priority (high to low)? Should completed items appear in the list or move to archive?
6925
- Why Important: Different sort criteria require different test assertions. Current app shows 15 active todos + 8 completed in mixed order.
6926
5893
  \`\`\`
6927
5894
 
6928
5895
  ### Communicate Clarification Request
6929
5896
 
6930
- **For Slack-Triggered Tasks:** {{INVOKE_TEAM_COMMUNICATOR}} to ask in thread:
6931
- \`\`\`
6932
- Ask clarification in Slack thread:
6933
- Context: [From ticket/description]
6934
- Ambiguity: [Describe ambiguity]
6935
- Severity: [CRITICAL/HIGH]
6936
- Questions:
6937
- 1. [First specific question]
6938
- 2. [Second if needed]
6939
-
6940
- Clarification needed to proceed. I'll wait for response before testing.
6941
- \`\`\`
6942
-
6943
- **For Manual/API Triggers:** Include in task output:
6944
- \`\`\`markdown
6945
- ## Clarification Required Before Testing
6946
-
6947
- **Ambiguity:** [Description]
6948
- **Severity:** [CRITICAL/HIGH]
6949
-
6950
- ### Questions:
6951
- 1. **Question:** [First question]
6952
- - Context: [Provide context]
6953
- - Options: [If applicable]
6954
- - Impact: [Testing impact]
5897
+ **For Slack-Triggered Tasks:** {{INVOKE_TEAM_COMMUNICATOR}} to ask in thread with context, ambiguity description, severity, and specific questions.
6955
5898
 
6956
- **Action Required:** Provide clarification. Testing cannot proceed.
6957
- **Current Observation:** [What exploration revealed - concrete examples]
6958
- \`\`\`
5899
+ **For Manual/API Triggers:** Include a "Clarification Required Before Testing" section in task output with ambiguity, severity, questions with context/options/impact, and current observations.
6959
5900
 
6960
5901
  ### Register Blocked Task (CRITICAL/HIGH only)
6961
5902
 
6962
- When asking a CRITICAL or HIGH severity question that blocks progress, register the task in the blocked queue so it can be automatically re-triggered when clarification arrives.
6963
-
6964
- **Update \`.bugzy/runtime/blocked-task-queue.md\`:**
6965
-
6966
- 1. Read the current file (create if doesn't exist)
6967
- 2. Add a new row to the Queue table
5903
+ When blocked, register in \`.bugzy/runtime/blocked-task-queue.md\`:
6968
5904
 
6969
5905
  \`\`\`markdown
6970
- # Blocked Task Queue
6971
-
6972
- Tasks waiting for clarification responses.
6973
-
6974
5906
  | Task Slug | Question | Original Args |
6975
5907
  |-----------|----------|---------------|
6976
5908
  | generate-test-plan | Should todos be sorted by date or priority? | \`{"ticketId": "TODO-456"}\` |
6977
5909
  \`\`\`
6978
5910
 
6979
- **Entry Fields:**
6980
- - **Task Slug**: The task slug (e.g., \`generate-test-plan\`) - used for re-triggering
6981
- - **Question**: The clarification question asked (so LLM can match responses)
6982
- - **Original Args**: JSON-serialized \`$ARGUMENTS\` wrapped in backticks
6983
-
6984
- **Purpose**: The LLM processor reads this file and matches user responses to pending questions. When a match is found, it re-queues the task with the clarification.
5911
+ The LLM processor reads this file and matches user responses to pending questions, then re-queues the task with the clarification.
6985
5912
 
6986
5913
  ### Wait or Proceed Based on Severity
6987
5914
 
6988
- **Use your maturity assessment to adjust thresholds:**
6989
- - **New project**: STOP for CRITICAL + HIGH + MEDIUM
6990
- - **Growing project**: STOP for CRITICAL + HIGH (default)
6991
- - **Mature project**: STOP for CRITICAL only; handle HIGH with documented assumptions
6992
-
6993
5915
  **When severity meets your STOP threshold:**
6994
- - You MUST call team-communicator (Slack) to ask the question \u2014 do NOT just mention it in your text output
5916
+ - You MUST call team-communicator to ask \u2014 do NOT just mention it in text output
6995
5917
  - Do NOT create tests, run tests, or make assumptions about the unclear aspect
6996
- - Do NOT silently adapt by working around the issue (e.g., running other tests instead)
5918
+ - Do NOT silently adapt by working around the issue
6997
5919
  - Do NOT invent your own success criteria when none are provided
6998
- - Register the blocked task and wait for clarification
6999
- - *Rationale: Wrong assumptions = incorrect tests, false results, wasted time*
7000
-
7001
- **When severity is below your STOP threshold \u2192 Proceed with Documented Assumptions:**
7002
- - Perform moderate exploration, document assumptions, proceed with creation/execution
7003
- - Ask clarification async (team-communicator), mark results "based on assumptions"
7004
- - Update tests after clarification received
7005
- - *Rationale: Waiting blocks progress; documented assumptions allow forward movement with later corrections*
5920
+ - Register the blocked task and wait
7006
5921
 
7007
- **LOW \u2192 Always Proceed and Mark:**
7008
- - Proceed with creation/execution, mark gaps [TO BE CLARIFIED] or [ASSUMED]
7009
- - Mention in report but don't prioritize, no blocking
7010
- - *Rationale: Details don't affect strategy/results significantly*
5922
+ **When severity is below your STOP threshold:**
5923
+ - Perform moderate exploration, document assumptions, proceed
5924
+ - Ask clarification async, mark results "based on assumptions"
7011
5925
 
7012
5926
  ### Document Clarification in Results
7013
5927
 
7014
- When reporting test results, always include an "Ambiguities" section if clarification occurred:
7015
-
7016
- \`\`\`markdown
7017
- ## Ambiguities Encountered
7018
-
7019
- ### Clarification: [Topic]
7020
- - **Severity:** [CRITICAL/HIGH/MEDIUM/LOW]
7021
- - **Question Asked:** [What was asked]
7022
- - **Response:** [Answer received, or "Awaiting response"]
7023
- - **Impact:** [How this affected testing]
7024
- - **Assumption Made:** [If proceeded with assumption]
7025
- - **Risk:** [What could be wrong if assumption is incorrect]
7026
-
7027
- ### Resolution:
7028
- [How the clarification was resolved and incorporated into testing]
7029
- \`\`\`
5928
+ Include an "Ambiguities Encountered" section in results when clarification occurred, noting severity, question asked, response (or "Awaiting"), impact, assumptions made, and risk.
7030
5929
 
7031
5930
  ---
7032
5931
 
7033
5932
  ## Remember
7034
5933
 
7035
- - **STOP means STOP** - When you hit a STOP threshold, you MUST call team-communicator to ask via Slack. Do NOT silently adapt, skip, or work around the issue
7036
- - **Non-existent features \u2014 check context first** - If a page/feature doesn't exist in the browser, check whether an authoritative trigger (Jira, PR, team request) asserts it exists. If YES \u2192 execution obstacle (proceed with artifact creation, notify team). If NO authoritative source claims it exists \u2192 CRITICAL severity, ask what was meant
7037
- - **Ask correctly > guess poorly** - Specific questions lead to specific answers
7038
- - **Never invent success criteria** - If the task says "improve" or "fix" without metrics, ask what "done" looks like
7039
- - **Check memory first** - Avoid re-asking previously answered questions
7040
- - **Maturity adjusts threshold, not judgment** - Even in mature projects, CRITICAL always triggers a question`,
5934
+ - **STOP means STOP** \u2014 When you hit a STOP threshold, you MUST call team-communicator. Do NOT silently adapt or work around the issue
5935
+ - **Non-existent features \u2014 check context first** \u2014 If a feature doesn't exist in browser, check whether an authoritative trigger asserts it exists. YES \u2192 execution obstacle (proceed). NO \u2192 CRITICAL severity, ask.
5936
+ - **Never invent success criteria** \u2014 If the task says "improve" or "fix" without metrics, ask what "done" looks like
5937
+ - **Check memory first** \u2014 Avoid re-asking previously answered questions
5938
+ - **Maturity adjusts threshold, not judgment** \u2014 CRITICAL always triggers a question`,
7041
5939
  tags: ["clarification", "protocol", "ambiguity"]
7042
5940
  };
7043
5941
 
@@ -7230,6 +6128,10 @@ The agent will:
7230
6128
  4. Apply appropriate fix pattern from \`./tests/CLAUDE.md\`
7231
6129
  5. Rerun the test
7232
6130
  6. The custom reporter will automatically create the next exec-N/ folder
6131
+ 6b. If no custom reporter (BYOT mode \u2014 check for \`reporters/bugzy-reporter.ts\`):
6132
+ Run the parse script to update the manifest with re-run results:
6133
+ \`npx tsx reporters/parse-results.ts --input <re-run-output> --timestamp <current> --test-id <testCaseId>\`
6134
+ This creates exec-N+1/ and updates the manifest.
7233
6135
  7. Repeat up to 3 times if needed (exec-1, exec-2, exec-3)
7234
6136
  8. Report success or escalate as likely product bug
7235
6137
 
@@ -7425,6 +6327,88 @@ ls -t test-runs/ | head -1
7425
6327
  tags: ["execution", "exploration"]
7426
6328
  };
7427
6329
 
6330
+ // src/tasks/steps/execution/normalize-test-results.ts
6331
+ init_cjs_shims();
6332
+ var normalizeTestResultsStep = {
6333
+ id: "normalize-test-results",
6334
+ title: "Normalize Test Results",
6335
+ category: "execution",
6336
+ content: `## Normalize Test Results
6337
+
6338
+ Convert test results into the standard Bugzy \`test-runs/\` manifest format. This step handles both external CI results (via webhook) and local BYOT test output. In managed mode (bugzy-reporter already created the manifest), this step is skipped.
6339
+
6340
+ ### 1. Check for Existing Manifest
6341
+
6342
+ Look for a \`test-runs/*/manifest.json\` from the most recent run. If a manifest already exists from the bugzy-reporter (managed mode), **skip this step entirely** \u2014 the results are already normalized.
6343
+
6344
+ ### 2. Determine Input Source
6345
+
6346
+ Check how test results are available:
6347
+
6348
+ **From event payload** (external CI \u2014 \`$ARGUMENTS\` contains event data):
6349
+ - \`data.results_url\` \u2014 URL to download results from (the parse script handles the download)
6350
+ - \`data.results\` \u2014 inline results (write to a temp file first: \`/tmp/bugzy-results-<random>.json\`)
6351
+
6352
+ **From local test run** (agent executed BYOT tests):
6353
+ - Read \`./tests/CLAUDE.md\` for the native test output location
6354
+ - Find the most recent test output file
6355
+
6356
+ ### 3. Locate and Run Parse Script
6357
+
6358
+ Look for the parse script at \`reporters/parse-results.ts\`.
6359
+
6360
+ **If the parse script exists:**
6361
+ \`\`\`bash
6362
+ npx tsx reporters/parse-results.ts --input <source>
6363
+ \`\`\`
6364
+ Where \`<source>\` is the file path, temp file path, or URL determined in step 2.
6365
+
6366
+ **If the parse script is missing** (fallback for robustness):
6367
+ Create the manifest inline using the same approach \u2014 parse the results format by inspecting the data structure:
6368
+ - JSON with \`suites\` or \`specs\` arrays: Likely Playwright JSON report
6369
+ - XML with \`<testsuites>\` or \`<testsuite>\` root: JUnit XML format
6370
+ - JSON with \`results\` array and \`stats\` object: Likely Cypress/Mocha JSON
6371
+ - Other: Inspect structure and adapt
6372
+
6373
+ Then create:
6374
+ 1. \`test-runs/{timestamp}/manifest.json\` with the standard Bugzy schema
6375
+ 2. \`test-runs/{timestamp}/{testCaseId}/exec-1/result.json\` for each failed test
6376
+
6377
+ Save the inline parse logic to \`reporters/parse-results.ts\` for future reuse.
6378
+
6379
+ ### 4. Verify Manifest
6380
+
6381
+ Confirm \`manifest.json\` was created:
6382
+ - Read the manifest and validate the structure
6383
+ - Check that \`stats\` counts match the \`testCases\` array
6384
+
6385
+ ### 5. Generate Summary
6386
+
6387
+ Read the manifest and produce a summary:
6388
+
6389
+ \`\`\`markdown
6390
+ ## Test Results Summary
6391
+
6392
+ - Total Tests: [count]
6393
+ - Passed: [count] ([percentage]%)
6394
+ - Failed: [count] ([percentage]%)
6395
+ - Skipped: [count] ([percentage]%)
6396
+ - Duration: [time if available]
6397
+ \`\`\`
6398
+
6399
+ ### 6. Include CI Metadata (if from event payload)
6400
+
6401
+ If the results came from an external CI event (\`$ARGUMENTS\` contains \`data.metadata\`), include:
6402
+ - **Pipeline URL**: \`data.metadata.pipeline_url\`
6403
+ - **Commit**: \`data.metadata.commit_sha\`
6404
+ - **Branch**: \`data.metadata.branch\`
6405
+
6406
+ ### 7. All Tests Passed?
6407
+
6408
+ If there are **no failures**, note that all tests passed. Downstream triage and fix steps can be skipped.`,
6409
+ tags: ["execution", "results", "normalization", "byot"]
6410
+ };
6411
+
7428
6412
  // src/tasks/steps/generation/generate-test-plan.ts
7429
6413
  init_cjs_shims();
7430
6414
  var generateTestPlanStep = {
@@ -7613,6 +6597,117 @@ TEST_API_KEY=secret_key_here
7613
6597
  tags: ["generation", "environment"]
7614
6598
  };
7615
6599
 
6600
+ // src/tasks/steps/generation/create-results-parser.ts
6601
+ init_cjs_shims();
6602
+ var createResultsParserStep = {
6603
+ id: "create-results-parser",
6604
+ title: "Create Results Parser Script",
6605
+ category: "generation",
6606
+ content: `## Create Results Parser Script
6607
+
6608
+ Create a reusable script that normalizes test results from the project's test framework into Bugzy's standard \`test-runs/\` manifest format. This script is used at runtime by both external CI events and agent-executed BYOT test runs.
6609
+
6610
+ ### Inspect the Test Project
6611
+
6612
+ 1. Read \`./tests/CLAUDE.md\` to understand:
6613
+ - Which test framework is used (Playwright, Cypress, Jest, Mocha, etc.)
6614
+ - How tests are run and where output goes
6615
+ - The native report format (JSON, JUnit XML, etc.)
6616
+ 2. Check the test runner config file (e.g., \`playwright.config.ts\`, \`cypress.config.ts\`, \`jest.config.ts\`) for report settings
6617
+ 3. If a sample test output exists, read it to understand the exact structure
6618
+
6619
+ ### Create the Parse Script
6620
+
6621
+ Create \`reporters/parse-results.ts\` \u2014 a Node.js/TypeScript CLI script.
6622
+
6623
+ **Interface:**
6624
+ \`\`\`
6625
+ npx tsx reporters/parse-results.ts --input <file-or-url> [--timestamp <existing>] [--test-id <id>]
6626
+ \`\`\`
6627
+
6628
+ **Arguments:**
6629
+ - \`--input\` (required): file path or URL to the test results
6630
+ - If URL (starts with \`http://\` or \`https://\`): download with 30s timeout
6631
+ - If file path: read directly from disk
6632
+ - \`--timestamp\` (optional): existing run timestamp for incremental updates
6633
+ - \`--test-id\` (optional): specific test case ID for incremental updates (used with \`--timestamp\`)
6634
+
6635
+ **Normal mode** (no \`--timestamp\`):
6636
+ 1. Parse the project-specific test output format
6637
+ 2. Generate a timestamp: \`YYYYMMDD-HHmmss\`
6638
+ 3. Create \`test-runs/{timestamp}/manifest.json\` with the standard Bugzy schema:
6639
+ \`\`\`json
6640
+ {
6641
+ "bugzyExecutionId": "<from BUGZY_EXECUTION_ID env var or 'local'>",
6642
+ "timestamp": "<YYYYMMDD-HHmmss>",
6643
+ "startTime": "<ISO8601>",
6644
+ "endTime": "<ISO8601>",
6645
+ "status": "completed",
6646
+ "stats": {
6647
+ "totalTests": 0,
6648
+ "passed": 0,
6649
+ "failed": 0,
6650
+ "totalExecutions": 0
6651
+ },
6652
+ "testCases": [
6653
+ {
6654
+ "id": "<slugified test name, e.g. TC-001-login>",
6655
+ "name": "<original test name>",
6656
+ "totalExecutions": 1,
6657
+ "finalStatus": "passed|failed",
6658
+ "executions": [
6659
+ {
6660
+ "executionNumber": 1,
6661
+ "status": "passed|failed",
6662
+ "error": "<error message if failed, null if passed>",
6663
+ "duration": null,
6664
+ "hasTrace": false,
6665
+ "hasScreenshots": false
6666
+ }
6667
+ ]
6668
+ }
6669
+ ]
6670
+ }
6671
+ \`\`\`
6672
+ 4. For each failed test, create:
6673
+ - Directory: \`test-runs/{timestamp}/{testCaseId}/exec-1/\`
6674
+ - File: \`test-runs/{timestamp}/{testCaseId}/exec-1/result.json\` containing:
6675
+ \`\`\`json
6676
+ {
6677
+ "status": "failed",
6678
+ "error": "<full error message>",
6679
+ "stackTrace": "<stack trace if available>",
6680
+ "duration": null,
6681
+ "testFile": "<file path if available>"
6682
+ }
6683
+ \`\`\`
6684
+ 5. Print the manifest path to stdout
6685
+ 6. Exit code 0 on success, non-zero on failure
6686
+
6687
+ **Incremental mode** (\`--timestamp\` + \`--test-id\` provided):
6688
+ 1. Read existing \`test-runs/{timestamp}/manifest.json\`
6689
+ 2. Parse the new test results for the specified test case
6690
+ 3. Find the next execution number (e.g., if exec-2 exists, create exec-3)
6691
+ 4. Create \`test-runs/{timestamp}/{testCaseId}/exec-N/result.json\`
6692
+ 5. Update the manifest: add execution entry, update \`totalExecutions\`, update \`finalStatus\` and stats
6693
+ 6. Print the manifest path to stdout
6694
+
6695
+ ### Test the Script
6696
+
6697
+ 1. Run the project's tests to generate a sample output (or use an existing one)
6698
+ 2. Run the parse script: \`npx tsx reporters/parse-results.ts --input <sample-output>\`
6699
+ 3. Verify \`test-runs/\` was created with correct manifest.json structure
6700
+ 4. Check that failed test directories have result.json files
6701
+
6702
+ ### Document in CLAUDE.md
6703
+
6704
+ Add to \`./tests/CLAUDE.md\`:
6705
+ - Location: \`reporters/parse-results.ts\`
6706
+ - Usage: \`npx tsx reporters/parse-results.ts --input <file-or-url> [--timestamp <ts>] [--test-id <id>]\`
6707
+ - Where the project's native test output is located (for local runs)`,
6708
+ tags: ["generation", "byot", "results", "parser"]
6709
+ };
6710
+
7616
6711
  // src/tasks/steps/communication/notify-team.ts
7617
6712
  init_cjs_shims();
7618
6713
  var notifyTeamStep = {
@@ -7868,11 +6963,13 @@ var STEP_LIBRARY = {
7868
6963
  "create-exploration-test-case": createExplorationTestCaseStep,
7869
6964
  "run-exploration": runExplorationStep,
7870
6965
  "process-exploration-results": processExplorationResultsStep,
6966
+ "normalize-test-results": normalizeTestResultsStep,
7871
6967
  // Generation
7872
6968
  "generate-test-plan": generateTestPlanStep,
7873
6969
  "generate-test-cases": generateTestCasesStep,
7874
6970
  "automate-test-cases": automateTestCasesStep,
7875
6971
  "extract-env-variables": extractEnvVariablesStep,
6972
+ "create-results-parser": createResultsParserStep,
7876
6973
  // Communication
7877
6974
  "notify-team": notifyTeamStep,
7878
6975
  // Maintenance