@bugzy-ai/bugzy 1.5.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. package/README.md +10 -7
  2. package/dist/cli/index.cjs +6168 -5848
  3. package/dist/cli/index.cjs.map +1 -1
  4. package/dist/cli/index.js +6168 -5848
  5. package/dist/cli/index.js.map +1 -1
  6. package/dist/index.cjs +5563 -5302
  7. package/dist/index.cjs.map +1 -1
  8. package/dist/index.d.cts +5 -4
  9. package/dist/index.d.ts +5 -4
  10. package/dist/index.js +5560 -5300
  11. package/dist/index.js.map +1 -1
  12. package/dist/subagents/index.cjs +368 -51
  13. package/dist/subagents/index.cjs.map +1 -1
  14. package/dist/subagents/index.js +368 -51
  15. package/dist/subagents/index.js.map +1 -1
  16. package/dist/subagents/metadata.cjs +10 -2
  17. package/dist/subagents/metadata.cjs.map +1 -1
  18. package/dist/subagents/metadata.js +10 -2
  19. package/dist/subagents/metadata.js.map +1 -1
  20. package/dist/tasks/index.cjs +864 -2391
  21. package/dist/tasks/index.cjs.map +1 -1
  22. package/dist/tasks/index.d.cts +48 -5
  23. package/dist/tasks/index.d.ts +48 -5
  24. package/dist/tasks/index.js +862 -2389
  25. package/dist/tasks/index.js.map +1 -1
  26. package/dist/templates/init/.bugzy/runtime/knowledge-base.md +61 -0
  27. package/dist/templates/init/.bugzy/runtime/knowledge-maintenance-guide.md +97 -0
  28. package/dist/templates/init/.bugzy/runtime/subagent-memory-guide.md +87 -0
  29. package/dist/templates/init/.bugzy/runtime/templates/test-plan-template.md +41 -16
  30. package/dist/templates/init/.bugzy/runtime/templates/test-result-schema.md +498 -0
  31. package/dist/templates/init/.bugzy/runtime/test-execution-strategy.md +535 -0
  32. package/dist/templates/init/.bugzy/runtime/testing-best-practices.md +368 -14
  33. package/dist/templates/init/.gitignore-template +23 -2
  34. package/package.json +1 -1
  35. package/templates/init/.bugzy/runtime/templates/test-plan-template.md +41 -16
  36. package/templates/init/.env.testdata +18 -0
@@ -1 +1 @@
1
- {"version":3,"sources":["../../src/tasks/constants.ts","../../src/tasks/templates/exploration-instructions.ts","../../src/tasks/templates/knowledge-base.ts","../../src/tasks/library/explore-application.ts","../../src/tasks/templates/clarification-instructions.ts","../../src/tasks/library/generate-test-cases.ts","../../src/tasks/library/generate-test-plan.ts","../../src/tasks/library/handle-message.ts","../../src/tasks/library/process-event.ts","../../src/tasks/library/run-tests.ts","../../src/tasks/library/verify-changes.ts","../../src/tasks/index.ts"],"sourcesContent":["/**\n * Task Slug Constants\n * Single source of truth for all task identifiers\n *\n * These constants should be used throughout the codebase instead of hardcoded strings\n * to ensure type safety and prevent typos.\n */\nexport const TASK_SLUGS = {\n EXPLORE_APPLICATION: 'explore-application',\n GENERATE_TEST_CASES: 'generate-test-cases',\n GENERATE_TEST_PLAN: 'generate-test-plan',\n HANDLE_MESSAGE: 'handle-message',\n PROCESS_EVENT: 'process-event',\n RUN_TESTS: 'run-tests',\n VERIFY_CHANGES: 'verify-changes',\n} as const;\n\n/**\n * Type for task slugs\n * Ensures only valid task slugs can be used\n */\nexport type TaskSlug = typeof TASK_SLUGS[keyof typeof TASK_SLUGS];\n","/**\n * Exploration Protocol - Shared Template\n * Provides adaptive exploratory testing instructions based on requirement clarity\n * Used to validate requirements and discover actual behavior before formal testing\n */\n\nexport const EXPLORATION_INSTRUCTIONS = `\n## Exploratory Testing Protocol\n\nBefore creating or running formal tests, perform exploratory testing to validate requirements and understand actual system behavior. The depth of exploration should adapt to the clarity of requirements.\n\n### Step {{STEP_NUMBER}}.1: Assess Requirement Clarity\n\nDetermine exploration depth based on requirement quality:\n\n| Clarity | Indicators | Exploration Depth | Goal |\n|---------|-----------|-------------------|------|\n| **Clear** | Detailed acceptance criteria, screenshots/mockups, specific field names/URLs/roles, unambiguous behavior, consistent patterns | Quick (1-2 min) | Confirm feature exists, capture evidence |\n| **Vague** | General direction clear but specifics missing, incomplete examples, assumed details, relative terms (\"fix\", \"better\") | Moderate (3-5 min) | Document current behavior, identify ambiguities, generate clarification questions |\n| **Unclear** | Contradictory info, multiple interpretations, no examples/criteria, ambiguous scope (\"the page\"), critical details missing | Deep (5-10 min) | Systematically test scenarios, document patterns, identify all ambiguities, formulate comprehensive questions |\n\n**Examples:**\n- **Clear:** \"Change 'Submit' button from blue (#007BFF) to green (#28A745) on /auth/login. Verify hover effect.\"\n- **Vague:** \"Fix the sorting in todo list page. The items are mixed up for premium users.\"\n- **Unclear:** \"Improve the dashboard performance. Users say it's slow.\"\n\n### Step {{STEP_NUMBER}}.2: Quick Exploration (1-2 min)\n\n**When:** Requirements CLEAR\n\n**Steps:**\n1. Navigate to feature (use provided URL), verify loads without errors\n2. Verify key elements exist (buttons, fields, sections mentioned)\n3. Capture screenshot of initial state\n4. Document:\n \\`\\`\\`markdown\n **Quick Exploration (1 min)**\n Feature: [Name] | URL: [Path]\n Status: ✅ Accessible / ❌ Not found / ⚠️ Different\n Screenshot: [filename]\n Notes: [Immediate observations]\n \\`\\`\\`\n5. **Decision:** ✅ Matches → Test creation | ❌/⚠️ Doesn't match → Moderate Exploration\n\n**Time Limit:** 1-2 minutes\n\n### Step {{STEP_NUMBER}}.3: Moderate Exploration (3-5 min)\n\n**When:** Requirements VAGUE or Quick Exploration revealed discrepancies\n\n**Steps:**\n1. Navigate using appropriate role(s), set up preconditions, ensure clean state\n2. Test primary user flow, document steps and behavior, note unexpected behavior\n3. Capture before/after screenshots, document field values/ordering/visibility\n4. Compare to requirement: What matches? What differs? What's absent?\n5. Identify specific ambiguities:\n \\`\\`\\`markdown\n **Moderate Exploration (4 min)**\n\n **Explored:** Role: [Admin], Path: [Steps], Behavior: [What happened]\n\n **Current State:** [Specific observations with examples]\n - Example: \"Admin view shows 8 sort options: By Title, By Due Date, By Priority...\"\n\n **Requirement Says:** [What requirement expected]\n\n **Discrepancies:** [Specific differences]\n - Example: \"Premium users see 5 fewer sorting options than admins\"\n\n **Ambiguities:**\n 1. [First ambiguity with concrete example]\n 2. [Second if applicable]\n\n **Clarification Needed:** [Specific questions]\n \\`\\`\\`\n6. Assess severity using Clarification Protocol\n7. **Decision:** 🟢 Minor → Proceed with assumptions | 🟡 Medium → Async clarification, proceed | 🔴 Critical → Stop, escalate\n\n**Time Limit:** 3-5 minutes\n\n### Step {{STEP_NUMBER}}.4: Deep Exploration (5-10 min)\n\n**When:** Requirements UNCLEAR or critical ambiguities found\n\n**Steps:**\n1. **Define Exploration Matrix:** Identify dimensions (user roles, feature states, input variations, browsers)\n\n2. **Systematic Testing:** Test each matrix cell methodically\n \\`\\`\\`\n Example for \"Todo List Sorting\":\n Matrix: User Roles × Feature Observations\n\n Test 1: Admin Role → Navigate, document sort options (count, names, order), screenshot\n Test 2: Basic User Role → Same todo list, document options, screenshot\n Test 3: Compare → Side-by-side table, identify missing/reordered options\n \\`\\`\\`\n\n3. **Document Patterns:** Consistent behavior? Role-based differences? What varies vs constant?\n\n4. **Comprehensive Report:**\n \\`\\`\\`markdown\n **Deep Exploration (8 min)**\n\n **Matrix:** [Dimensions] | **Tests:** [X combinations]\n\n **Findings:**\n\n ### Test 1: Admin\n - Setup: [Preconditions] | Steps: [Actions]\n - Observations: Sort options=8, Options=[list], Ordering=[sequence]\n - Screenshot: [filename-admin.png]\n\n ### Test 2: Basic User\n - Setup: [Preconditions] | Steps: [Actions]\n - Observations: Sort options=3, Missing vs Admin=[5 options], Ordering=[sequence]\n - Screenshot: [filename-user.png]\n\n **Comparison Table:**\n | Sort Option | Admin Pos | User Pos | Notes |\n |-------------|-----------|----------|-------|\n | By Title | 1 | 1 | Match |\n | By Priority | 3 | Not visible | Missing |\n\n **Patterns:**\n - Role-based feature visibility\n - Consistent relative ordering for visible fields\n\n **Critical Ambiguities:**\n 1. Option Visibility: Intentional basic users see 5 fewer sort options?\n 2. Sort Definition: (A) All roles see all options in same order, OR (B) Roles see permitted options in same relative order?\n\n **Clarification Questions:** [Specific, concrete based on findings]\n \\`\\`\\`\n\n5. **Next Action:** Critical ambiguities → STOP, clarify | Patterns suggest answer → Validate assumption | Behavior clear → Test creation\n\n**Time Limit:** 5-10 minutes\n\n### Step {{STEP_NUMBER}}.5: Link Exploration to Clarification\n\n**Flow:** Requirement Analysis → Exploration → Clarification\n\n1. Requirement analysis detects vague language → Triggers exploration\n2. Exploration documents current behavior → Identifies discrepancies\n3. Clarification uses findings → Asks specific questions referencing observations\n\n**Example:**\n\\`\\`\\`\n\"Fix the sorting in todo list\"\n ↓ Ambiguity: \"sorting\" = by date, priority, or completion status?\n ↓ Moderate Exploration: Admin=8 sort options, User=3 sort options\n ↓ Question: \"Should basic users see all 8 sort options (bug) or only 3 with consistent sequence (correct)?\"\n\\`\\`\\`\n\n### Step {{STEP_NUMBER}}.6: Document Exploration Results\n\n**Template:**\n\\`\\`\\`markdown\n## Exploration Summary\n\n**Date:** [YYYY-MM-DD] | **Explorer:** [Agent/User] | **Depth:** [Quick/Moderate/Deep] | **Duration:** [X min]\n\n### Feature: [Name and description]\n\n### Observations: [Key findings]\n\n### Current Behavior: [What feature does today]\n\n### Discrepancies: [Requirement vs observation differences]\n\n### Assumptions Made: [If proceeding with assumptions]\n\n### Artifacts: Screenshots: [list], Video: [if captured], Notes: [detailed]\n\\`\\`\\`\n\n**Memory Storage:** Feature behavior patterns, common ambiguity types, resolution approaches\n\n### Step {{STEP_NUMBER}}.7: Integration with Test Creation\n\n**Quick Exploration → Direct Test:**\n- Feature verified → Create test matching requirement → Reference screenshot\n\n**Moderate Exploration → Assumption-Based Test:**\n- Document behavior → Create test on best interpretation → Mark assumptions → Plan updates after clarification\n\n**Deep Exploration → Clarification-First:**\n- Block test creation until clarification → Use exploration as basis for questions → Create test after answer → Reference both exploration and clarification\n\n---\n\n## Adaptive Exploration Decision Tree\n\n\\`\\`\\`\nStart: Requirement Received\n ↓\nAre requirements clear with specifics?\n ├─ YES → Quick Exploration (1-2 min)\n │ ↓\n │ Does feature match description?\n │ ├─ YES → Proceed to Test Creation\n │ └─ NO → Escalate to Moderate Exploration\n │\n └─ NO → Is general direction clear but details missing?\n ├─ YES → Moderate Exploration (3-5 min)\n │ ↓\n │ Are ambiguities MEDIUM severity or lower?\n │ ├─ YES → Document assumptions, proceed with test creation\n │ └─ NO → Escalate to Deep Exploration or Clarification\n │\n └─ NO → Deep Exploration (5-10 min)\n ↓\n Document comprehensive findings\n ↓\n Assess ambiguity severity\n ↓\n Seek clarification for CRITICAL/HIGH\n\\`\\`\\`\n\n---\n\n## Remember:\n\n🔍 **Explore before assuming** | 📊 **Concrete observations > abstract interpretation** | ⏱️ **Adaptive depth: time ∝ uncertainty** | 🎯 **Exploration findings → specific clarifications** | 📝 **Always document** | 🔗 **Link exploration → ambiguity → clarification**\n`;\n","/**\n * Knowledge Base Template\n * Provides instructions for reading and maintaining the curated knowledge base\n * Used across all tasks to maintain a living reference of factual knowledge\n */\n\nexport const KNOWLEDGE_BASE_READ_INSTRUCTIONS = `\n## Knowledge Base Context\n\nBefore proceeding, read the curated knowledge base to inform your work:\n\n**Location:** \\`.bugzy/runtime/knowledge-base.md\\`\n\n**Purpose:** The knowledge base is a living collection of factual knowledge - what we currently know and believe to be true about this project, its patterns, and its context. This is NOT a historical log, but a curated snapshot that evolves as understanding improves.\n\n**How to Use:**\n1. Read the knowledge base to understand:\n - Project-specific patterns and conventions\n - Known behaviors and system characteristics\n - Relevant context from past work\n - Documented decisions and approaches\n\n2. Apply this knowledge to:\n - Make informed decisions aligned with project patterns\n - Avoid repeating past mistakes\n - Build on existing understanding\n - Maintain consistency with established practices\n\n**Note:** The knowledge base may not exist yet or may be empty. If it doesn't exist or is empty, proceed without this context and help build it as you work.\n`;\n\nexport const KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS = `\n## Knowledge Base Maintenance\n\nAfter completing your work, update the knowledge base with new insights.\n\n**Location:** \\`.bugzy/runtime/knowledge-base.md\\`\n\n**Process:**\n\n1. **Read the maintenance guide** at \\`.bugzy/runtime/knowledge-maintenance-guide.md\\` to understand when to ADD, UPDATE, or REMOVE entries and how to maintain a curated knowledge base (not an append-only log)\n\n2. **Review the current knowledge base** to check for overlaps, contradictions, or opportunities to consolidate existing knowledge\n\n3. **Update the knowledge base** following the maintenance guide principles: favor consolidation over addition, update rather than append, resolve contradictions immediately, and focus on quality over completeness\n\n**Remember:** Every entry should answer \"Will this help someone working on this project in 6 months?\"\n`;\n","/**\n * Explore Application Task\n * Systematically explore application to discover UI elements, workflows, and behaviors\n */\n\nimport { TaskTemplate } from '../types';\nimport { TASK_SLUGS } from '../constants';\nimport { EXPLORATION_INSTRUCTIONS } from '../templates/exploration-instructions';\nimport { KNOWLEDGE_BASE_READ_INSTRUCTIONS, KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS } from '../templates/knowledge-base.js';\n\nexport const exploreApplicationTask: TaskTemplate = {\n slug: TASK_SLUGS.EXPLORE_APPLICATION,\n name: 'Explore Application',\n description: 'Systematically explore application to discover UI elements, workflows, and behaviors',\n\n frontmatter: {\n description: 'Systematically explore application to discover UI elements, workflows, and behaviors',\n 'argument-hint': '--focus [area] --depth [shallow|deep] --system [system-name]',\n },\n\n baseContent: `# Explore Application Command\n\n## SECURITY NOTICE\n**CRITICAL**: Never read the \\`.env\\` file. It contains ONLY secrets (passwords, API keys).\n- **Read \\`.env.testdata\\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)\n- \\`.env.testdata\\` contains actual values for test data, URLs, and non-sensitive configuration\n- For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime\n- The \\`.env\\` file access is blocked by settings.json\n\nSystematically explore the application using the test-runner agent to discover actual UI elements, workflows, and behaviors. Updates test plan and project documentation with findings.\n\n## Arguments\nArguments: $ARGUMENTS\n\n## Parse Arguments\nExtract the following from arguments:\n- **focus**: Specific area to explore (authentication, navigation, search, content, admin)\n- **depth**: Exploration depth - shallow (quick discovery) or deep (comprehensive) - defaults to deep\n- **system**: Which system to explore (optional for multi-system setups)\n\n${KNOWLEDGE_BASE_READ_INSTRUCTIONS}\n\n## Process\n\n### Step 0: Understand Exploration Protocol\n\nThis task implements the exploration protocol defined in the exploration-instructions template.\n\n**Purpose**: This task provides the infrastructure for systematic application exploration that is referenced by other tasks (generate-test-plan, generate-test-cases, verify-changes) when they need to explore features before proceeding.\n\n**Depth Alignment**: The depth levels in this task align with the exploration template:\n- **Shallow exploration (15-20 min)** implements the quick/moderate exploration from the template\n- **Deep exploration (45-60 min)** implements comprehensive deep exploration from the template\n\nThe depth levels are extended for full application exploration compared to the focused feature exploration used in other tasks.\n\n**Full Exploration Protocol Reference**:\n\n${EXPLORATION_INSTRUCTIONS}\n\n**Note**: This task extends the protocol for comprehensive application-wide exploration, while other tasks use abbreviated versions for targeted feature exploration.\n\n### Step 1: Load Environment and Context\n\n#### 1.1 Check Environment Variables\nRead \\`.env.testdata\\` file to understand what variables are required:\n- TEST_BASE_URL or TEST_MOBILE_BASE_URL (base URL variable names)\n- [SYSTEM_NAME]_URL (if multi-system setup)\n- Authentication credential variable names for the selected system\n- Any test data variable names\n\nNote: The actual values will be read from the user's \\`.env\\` file at test execution time.\nVerify \\`.env.testdata\\` exists to understand variable structure. If it doesn't exist, notify user to create it based on test plan.\n\n#### 1.2 Read Current Test Plan\nRead \\`test-plan.md\\` to:\n- Identify sections marked with [TO BE EXPLORED]\n- Find features requiring discovery\n- Understand testing scope and priorities\n\n#### 1.3 Read Project Context\nRead \\`.bugzy/runtime/project-context.md\\` for:\n- System architecture understanding\n- Testing environment details\n- QA workflow requirements\n\n### Step 2: Prepare Exploration Strategy\n\nBased on the arguments and context, prepare exploration instructions.\n\n#### 2.1 Focus Area Strategies\n\n**If focus is \"authentication\":**\n\\`\\`\\`\n1. Navigate to the application homepage\n2. Locate and document all authentication entry points:\n - Login button/link location and selector\n - Registration option and flow\n - Social login options (Facebook, Google, etc.)\n3. Test login flow:\n - Document form fields and validation\n - Test error states with invalid credentials\n - Verify successful login indicators\n4. Test logout functionality:\n - Find logout option\n - Verify session termination\n - Check redirect behavior\n5. Explore password recovery:\n - Locate forgot password link\n - Document recovery flow\n - Note email/SMS options\n6. Check role-based access:\n - Identify user role indicators\n - Document permission differences\n - Test admin/moderator access if available\n7. Test session persistence:\n - Check remember me functionality\n - Test timeout behavior\n - Verify multi-tab session handling\n\\`\\`\\`\n\n**If focus is \"navigation\":**\n\\`\\`\\`\n1. Document main navigation structure:\n - Primary menu items and hierarchy\n - Mobile menu behavior\n - Footer navigation links\n2. Map URL patterns:\n - Category URL structure\n - Parameter patterns\n - Deep linking support\n3. Test breadcrumb navigation:\n - Availability on different pages\n - Clickability and accuracy\n - Mobile display\n4. Explore category system:\n - Main categories and subcategories\n - Navigation between levels\n - Content organization\n5. Document special sections:\n - User profiles\n - Admin areas\n - Help/Support sections\n6. Test browser navigation:\n - Back/forward button behavior\n - History management\n - State preservation\n\\`\\`\\`\n\n**If focus is \"search\":**\n\\`\\`\\`\n1. Locate search interfaces:\n - Main search bar\n - Advanced search options\n - Category-specific search\n2. Document search features:\n - Autocomplete/suggestions\n - Search filters\n - Sort options\n3. Test search functionality:\n - Special character handling\n - Empty/invalid queries\n4. Analyze search results:\n - Result format and layout\n - Pagination\n - No results handling\n5. Check search performance:\n - Response times\n - Result relevance\n - Load more/infinite scroll\n\\`\\`\\`\n\n**If no focus specified:**\nUse comprehensive exploration covering all major areas.\n\n#### 2.2 Depth Configuration\n\n**Implementation Note**: These depths implement the exploration protocol defined in exploration-instructions.ts, extended for full application exploration.\n\n**Shallow exploration (--depth shallow):**\n- Quick discovery pass (15-20 minutes)\n- Focus on main features only\n- Basic screenshot capture\n- High-level findings\n- *Aligns with Quick/Moderate exploration from template*\n\n**Deep exploration (--depth deep or default):**\n- Comprehensive exploration (45-60 minutes)\n- Test edge cases and variations\n- Extensive screenshot documentation\n- Detailed technical findings\n- Performance observations\n- Accessibility notes\n- *Aligns with Deep exploration from template*\n\n### Step 3: Execute Exploration\n\n#### 3.1 Create Exploration Test Case\nGenerate a temporary exploration test case file at \\`./test-cases/EXPLORATION-TEMP.md\\`:\n\n\\`\\`\\`markdown\n---\nid: EXPLORATION-TEMP\ntitle: Application Exploration - [Focus Area or Comprehensive]\ntype: exploratory\npriority: high\n---\n\n## Preconditions\n- Browser with cleared cookies and cache\n- Access to [system] environment\n- Credentials configured per .env.testdata template\n\n## Test Steps\n[Generated exploration steps based on strategy]\n\n## Expected Results\nDocument all findings including:\n- UI element locations and selectors\n- Navigation patterns and URLs\n- Feature behaviors and workflows\n- Performance observations\n- Error states and edge cases\n- Screenshots of all key areas\n\\`\\`\\`\n\n#### 3.2 Launch Test Runner Agent\n{{INVOKE_TEST_RUNNER}}\n\nExecute the exploration test case with special exploration instructions:\n\n\\`\\`\\`\nExecute the exploration test case at ./test-cases/EXPLORATION-TEMP.md with focus on discovery and documentation.\n\nSpecial instructions for exploration mode:\n1. Take screenshots of EVERY significant UI element and page\n2. Document all clickable elements with their selectors\n3. Note all URL patterns and parameters\n4. Test variations and edge cases where possible\n5. Document load times and performance observations\n6. Create detailed findings report with structured data\n7. Organize screenshots by functional area\n8. Note any console errors or warnings\n9. Document which features are accessible vs restricted\n\nGenerate a comprehensive exploration report that can be used to update project documentation.\n\\`\\`\\`\n\n### Step 4: Process Exploration Results\n\n#### 4.1 Read Test Runner Output\nRead the generated test run files from \\`./test-runs/[timestamp]/EXPLORATION-TEMP/\\`:\n- \\`findings.md\\` - Main findings document\n- \\`test-log.md\\` - Detailed step execution\n- \\`screenshots/\\` - Visual documentation\n- \\`summary.json\\` - Execution summary\n\n#### 4.2 Parse and Structure Findings\nExtract and organize:\n- Discovered features and capabilities\n- UI element selectors and patterns\n- Navigation structure and URLs\n- Authentication flow details\n- Performance metrics\n- Technical observations\n- Areas requiring further investigation\n\n### Step 5: Update Project Artifacts\n\n#### 5.1 Update Test Plan\nRead and update \\`test-plan.md\\`:\n- Replace [TO BE EXPLORED] markers with concrete findings\n- Add newly discovered features to test items\n- Update navigation patterns and URL structures\n- Document actual authentication methods\n- Update environment variables if new ones discovered\n- Refine pass/fail criteria based on actual behavior\n\n#### 5.2 Create Exploration Report\nCreate \\`./exploration-reports/[timestamp]-[focus]-exploration.md\\`\n\n### Step 6: Cleanup\n\n#### 6.1 Remove Temporary Files\nDelete the temporary exploration test case:\n\\`\\`\\`bash\nrm ./test-cases/EXPLORATION-TEMP.md\n\\`\\`\\`\n\n### Step 7: Generate Summary Report\nCreate a concise summary for the user\n\n## Error Handling\n\n### Environment Issues\n- If \\`.env.testdata\\` missing: Warn user and suggest creating it from test plan\n- If credentials invalid (at runtime): Document in report and continue with public areas\n- If system unreachable: Retry with exponential backoff, report if persistent\n\n### Exploration Failures\n- If test-runner fails: Capture partial results and report\n- If specific area inaccessible: Note in findings and continue\n- If browser crashes: Attempt recovery and resume\n- If test-runner stops, but does not create files, inspect what it did and if it was not enough remove the test-run and start the test-runner agent again. If it has enough info, continue with what you have.\n\n### Data Issues\n- If dynamic content prevents exploration: Note and try alternative approaches\n- If rate limited: Implement delays and retry\n\n## Integration with Other Commands\n\n### Feeds into /generate-test-cases\n- Provides actual UI elements for test steps\n- Documents real workflows for test scenarios\n- Identifies edge cases to test\n\n### Updates from /process-event\n- New exploration findings can be processed as events\n- Discovered bugs trigger issue creation\n- Feature discoveries update test coverage\n\n### Enhances /run-tests\n- Tests use discovered selectors\n- Validation based on actual behavior\n- More reliable test execution\n\n${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}`,\n\n optionalSubagents: [],\n requiredSubagents: ['test-runner']\n};\n","/**\n * Clarification Protocol - Shared Template\n * Provides standardized instructions for detecting ambiguity, assessing severity, and seeking clarification\n * Used across all agent library tasks for consistent clarification handling\n */\n\nexport const CLARIFICATION_INSTRUCTIONS = `\n## Clarification Protocol\n\nBefore proceeding with test creation or execution, ensure requirements are clear and testable. Use this protocol to detect ambiguity, assess its severity, and determine the appropriate action.\n\n### Step {{STEP_NUMBER}}.0: Check for Pending Clarification\n\nBefore starting, check if this task is resuming from a blocked clarification:\n\n1. **Check $ARGUMENTS for clarification data:**\n - If \\`$ARGUMENTS.clarification\\` exists, this task is resuming with a clarification response\n - Extract: \\`clarification\\` (the user's answer), \\`originalArgs\\` (original task parameters)\n\n2. **If clarification is present:**\n - Read \\`.bugzy/runtime/blocked-task-queue.md\\`\n - Find and remove your task's entry from the queue (update the file)\n - Proceed using the clarification as if user just provided the answer\n - Skip ambiguity detection for the clarified aspect\n\n3. **If no clarification in $ARGUMENTS:** Proceed normally with ambiguity detection below.\n\n### Step {{STEP_NUMBER}}.1: Detect Ambiguity\n\nScan for ambiguity signals:\n\n**Language:** Vague terms (\"fix\", \"improve\", \"better\", \"like\", \"mixed up\"), relative terms without reference (\"faster\", \"more\"), undefined scope (\"the ordering\", \"the fields\", \"the page\"), modal ambiguity (\"should\", \"could\" vs \"must\", \"will\")\n\n**Details:** Missing acceptance criteria (no clear PASS/FAIL), no examples/mockups, incomplete field/element lists, unclear role behavior differences, unspecified error scenarios\n\n**Interpretation:** Multiple valid interpretations, contradictory information (description vs comments), implied vs explicit requirements\n\n**Context:** No reference documentation, \"RELEASE APPROVED\" without criteria, quick ticket creation, assumes knowledge (\"as you know...\", \"obviously...\")\n\n**Quick Check:**\n- [ ] Success criteria explicitly defined? (PASS if X, FAIL if Y)\n- [ ] All affected elements specifically listed? (field names, URLs, roles)\n- [ ] Only ONE reasonable interpretation?\n- [ ] Examples, screenshots, or mockups provided?\n- [ ] Consistent with existing system patterns?\n- [ ] Can write test assertions without assumptions?\n\n### Step {{STEP_NUMBER}}.2: Assess Severity\n\nIf ambiguity is detected, assess its severity:\n\n| Severity | Characteristics | Examples | Action |\n|----------|----------------|----------|--------|\n| 🔴 **CRITICAL** | Expected behavior undefined/contradictory; test outcome unpredictable; core functionality unclear; success criteria missing; multiple interpretations = different strategies | \"Fix the issue\" (what issue?), \"Improve performance\" (which metrics?), \"Fix sorting in todo list\" (by date? priority? completion status?) | **STOP** - Seek clarification before proceeding |\n| 🟠 **HIGH** | Core underspecified but direction clear; affects majority of scenarios; vague success criteria; assumptions risky | \"Fix ordering\" (sequence OR visibility?), \"Add validation\" (what? messages?), \"Update dashboard\" (which widgets?) | **STOP** - Seek clarification before proceeding |\n| 🟡 **MEDIUM** | Specific details missing; general requirements clear; affects subset of cases; reasonable low-risk assumptions possible; wrong assumption = test updates not strategy overhaul | Missing field labels, unclear error message text, undefined timeouts, button placement not specified, date formats unclear | **PROCEED** - (1) Moderate exploration, (2) Document assumptions: \"Assuming X because Y\", (3) Proceed with creation/execution, (4) Async clarification (team-communicator), (5) Mark [ASSUMED: description] |\n| 🟢 **LOW** | Minor edge cases; documentation gaps don't affect execution; optional/cosmetic elements; minimal impact | Tooltip text, optional field validation, icon choice, placeholder text, tab order | **PROCEED** - (1) Mark [TO BE CLARIFIED: description], (2) Proceed, (3) Mention in report \"Minor Details\", (4) No blocking/async clarification |\n\n### Step {{STEP_NUMBER}}.3: Check Memory for Similar Clarifications\n\nBefore asking, check if similar question was answered:\n\n**Process:**\n1. **Query team-communicator memory** - Search by feature name, ambiguity pattern, ticket keywords\n2. **Review past Q&A** - Similar question asked? What was answer? Applicable now?\n3. **Assess reusability:**\n - Directly applicable → Use answer, no re-ask\n - Partially applicable → Adapt and reference (\"Previously for X, clarified Y. Same here?\")\n - Not applicable → Ask as new\n4. **Update memory** - Store Q&A with task type, feature, pattern tags\n\n**Example:** Query \"todo sorting priority\" → Found 2025-01-15: \"Should completed todos appear in main list?\" → Answer: \"No, move to separate archive view\" → Directly applicable → Use, no re-ask needed\n\n### Step {{STEP_NUMBER}}.4: Formulate Clarification Questions\n\nIf clarification needed (CRITICAL/HIGH severity), formulate specific, concrete questions:\n\n**Good Questions:** Specific and concrete, provide context, offer options, reference examples, tie to test strategy\n\n**Bad Questions:** Too vague/broad, assumptive, multiple questions in one, no context\n\n**Template:**\n\\`\\`\\`\n**Context:** [Current understanding]\n**Ambiguity:** [Specific unclear aspect]\n**Question:** [Specific question with options]\n**Why Important:** [Testing strategy impact]\n\nExample:\nContext: TODO-456 \"Fix the sorting in the todo list so items appear in the right order\"\nAmbiguity: \"sorting\" = (A) by creation date, (B) by due date, (C) by priority level, or (D) custom user-defined order\nQuestion: Should todos be sorted by due date (soonest first) or priority (high to low)? Should completed items appear in the list or move to archive?\nWhy Important: Different sort criteria require different test assertions. Current app shows 15 active todos + 8 completed in mixed order.\n\\`\\`\\`\n\n### Step {{STEP_NUMBER}}.5: Communicate Clarification Request\n\n**For Slack-Triggered Tasks:** Use team-communicator subagent:\n\\`\\`\\`\nAsk clarification in Slack thread:\nContext: [From ticket/description]\nAmbiguity: [Describe ambiguity]\nSeverity: [CRITICAL/HIGH]\nQuestions:\n1. [First specific question]\n2. [Second if needed]\n\nClarification needed to proceed. I'll wait for response before testing.\n\\`\\`\\`\n\n**For Manual/API Triggers:** Include in task output:\n\\`\\`\\`markdown\n## ⚠️ Clarification Required Before Testing\n\n**Ambiguity:** [Description]\n**Severity:** [CRITICAL/HIGH]\n\n### Questions:\n1. **Question:** [First question]\n - Context: [Provide context]\n - Options: [If applicable]\n - Impact: [Testing impact]\n\n**Action Required:** Provide clarification. Testing cannot proceed.\n**Current Observation:** [What exploration revealed - concrete examples]\n\\`\\`\\`\n\n### Step {{STEP_NUMBER}}.5.1: Register Blocked Task (CRITICAL/HIGH only)\n\nWhen asking a CRITICAL or HIGH severity question that blocks progress, register the task in the blocked queue so it can be automatically re-triggered when clarification arrives.\n\n**Update \\`.bugzy/runtime/blocked-task-queue.md\\`:**\n\n1. Read the current file (create if doesn't exist)\n2. Add a new row to the Queue table\n\n\\`\\`\\`markdown\n# Blocked Task Queue\n\nTasks waiting for clarification responses.\n\n| Task Slug | Question | Original Args |\n|-----------|----------|---------------|\n| generate-test-plan | Should todos be sorted by date or priority? | \\`{\"ticketId\": \"TODO-456\"}\\` |\n\\`\\`\\`\n\n**Entry Fields:**\n- **Task Slug**: The task slug (e.g., \\`generate-test-plan\\`) - used for re-triggering\n- **Question**: The clarification question asked (so LLM can match responses)\n- **Original Args**: JSON-serialized \\`$ARGUMENTS\\` wrapped in backticks\n\n**Purpose**: The LLM processor reads this file and matches user responses to pending questions. When a match is found, it re-queues the task with the clarification.\n\n### Step {{STEP_NUMBER}}.6: Wait or Proceed Based on Severity\n\n**CRITICAL/HIGH → STOP and Wait:**\n- Do NOT create tests, run tests, or make assumptions\n- Wait for clarification, resume after answer\n- *Rationale: Wrong assumptions = incorrect tests, false results, wasted time*\n\n**MEDIUM → Proceed with Documented Assumptions:**\n- Perform moderate exploration, document assumptions, proceed with creation/execution\n- Ask clarification async (team-communicator), mark results \"based on assumptions\"\n- Update tests after clarification received\n- *Rationale: Waiting blocks progress; documented assumptions allow forward movement with later corrections*\n\n**LOW → Proceed and Mark:**\n- Proceed with creation/execution, mark gaps [TO BE CLARIFIED] or [ASSUMED]\n- Mention in report but don't prioritize, no blocking\n- *Rationale: Details don't affect strategy/results significantly*\n\n### Step {{STEP_NUMBER}}.7: Document Clarification in Results\n\nWhen reporting test results, always include an \"Ambiguities\" section if clarification occurred:\n\n\\`\\`\\`markdown\n## Ambiguities Encountered\n\n### Clarification: [Topic]\n- **Severity:** [CRITICAL/HIGH/MEDIUM/LOW]\n- **Question Asked:** [What was asked]\n- **Response:** [Answer received, or \"Awaiting response\"]\n- **Impact:** [How this affected testing]\n- **Assumption Made:** [If proceeded with assumption]\n- **Risk:** [What could be wrong if assumption is incorrect]\n\n### Resolution:\n[How the clarification was resolved and incorporated into testing]\n\\`\\`\\`\n\n---\n\n## Remember:\n\n🛑 **Block for CRITICAL/HIGH** | ✅ **Ask correctly > guess poorly** | 📝 **Document MEDIUM assumptions** | 🔍 **Check memory first** | 🎯 **Specific questions → specific answers**\n`;\n","/**\n * Generate Test Cases Task\n * Generate both manual test case documentation AND automated Playwright test scripts\n */\n\nimport { TaskTemplate } from '../types';\nimport { TASK_SLUGS } from '../constants';\nimport { EXPLORATION_INSTRUCTIONS } from '../templates/exploration-instructions';\nimport { CLARIFICATION_INSTRUCTIONS } from '../templates/clarification-instructions';\nimport { KNOWLEDGE_BASE_READ_INSTRUCTIONS, KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS } from '../templates/knowledge-base.js';\n\nexport const generateTestCasesTask: TaskTemplate = {\n slug: TASK_SLUGS.GENERATE_TEST_CASES,\n name: 'Generate Test Cases',\n description: 'Generate manual test case documentation AND automated Playwright test scripts from test plan',\n\n frontmatter: {\n description: 'Generate manual test case documentation AND automated Playwright test scripts from test plan',\n 'argument-hint': '--type [exploratory|functional|regression|smoke] --focus [optional-feature]',\n },\n\n baseContent: `# Generate Test Cases Command\n\n## SECURITY NOTICE\n**CRITICAL**: Never read the \\`.env\\` file. It contains ONLY secrets (passwords, API keys).\n- **Read \\`.env.testdata\\`** for non-secret test data (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)\n- \\`.env.testdata\\` contains actual values for test data, URLs, and non-sensitive configuration\n- For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime\n- The \\`.env\\` file access is blocked by settings.json\n\nGenerate comprehensive test artifacts including BOTH manual test case documentation AND automated Playwright test scripts.\n\n## Overview\n\nThis command generates:\n1. **Manual Test Case Documentation** (in \\`./test-cases/\\`) - Human-readable test cases in markdown format\n2. **Automated Playwright Tests** (in \\`./tests/specs/\\`) - Executable TypeScript test scripts\n3. **Page Object Models** (in \\`./tests/pages/\\`) - Reusable page classes for automated tests\n4. **Supporting Files** (fixtures, helpers, components) - As needed for test automation\n\n## Arguments\nArguments: \\$ARGUMENTS\n\n## Parse Arguments\nExtract the following from arguments:\n- **type**: Test type (exploratory, functional, regression, smoke) - defaults to functional\n- **focus**: Optional specific feature or section to focus on\n\n${KNOWLEDGE_BASE_READ_INSTRUCTIONS}\n\n## Process\n\n### Step 1: Gather Context\n\n#### 1.1 Read Test Plan\nRead the test plan from \\`test-plan.md\\` to understand:\n- Test items and features\n- Testing approach and automation strategy\n- Test Automation Strategy section (automated vs exploratory)\n- Pass/fail criteria\n- Test environment and data requirements\n- Automation decision criteria\n\n#### 1.2 Check Existing Test Cases and Tests\n- List all files in \\`./test-cases/\\` to understand existing manual test coverage\n- List all files in \\`./tests/specs/\\` to understand existing automated tests\n- Determine next test case ID (TC-XXX format)\n- Identify existing Page Objects in \\`./tests/pages/\\`\n- Avoid creating overlapping test cases or duplicate automation\n\n{{DOCUMENTATION_RESEARCHER_INSTRUCTIONS}}\n\n### Step 1.4: Explore Features (If Needed)\n\nIf documentation is insufficient or ambiguous, perform adaptive exploration to understand actual feature behavior before creating test cases.\n\n${EXPLORATION_INSTRUCTIONS.replace(/{{STEP_NUMBER}}/g, '1.4')}\n\n### Step 1.5: Clarify Ambiguities\n\nIf exploration or documentation review reveals ambiguous requirements, use the clarification protocol to resolve them before generating test cases.\n\n${CLARIFICATION_INSTRUCTIONS.replace(/{{STEP_NUMBER}}/g, '1.5')}\n\n**Important Notes:**\n- **CRITICAL/HIGH ambiguities:** STOP test case generation and seek clarification\n- **MEDIUM ambiguities:** Document assumptions explicitly in test case with [ASSUMED: reason]\n- **LOW ambiguities:** Mark with [TO BE CLARIFIED: detail] in test case notes section\n\n### Step 1.6: Organize Test Scenarios by Area\n\nBased on exploration and documentation, organize test scenarios by feature area/component:\n\n**Group scenarios into areas** (e.g., Authentication, Dashboard, Checkout, Profile Management):\n- Each area should be a logical feature grouping\n- Areas should be relatively independent for parallel test execution\n- Consider the application's navigation structure and user flows\n\n**For each area, identify scenarios**:\n\n1. **Critical User Paths** (must automate as smoke tests):\n - Login/authentication flows\n - Core feature workflows\n - Data creation/modification flows\n - Critical business transactions\n\n2. **Happy Path Scenarios** (automate for regression):\n - Standard user workflows\n - Common use cases\n - Typical data entry patterns\n\n3. **Error Handling Scenarios** (evaluate automation ROI):\n - Validation error messages\n - Network error handling\n - Permission/authorization errors\n\n4. **Edge Cases** (consider manual testing):\n - Rare scenarios (<1% occurrence)\n - Complex exploratory scenarios\n - Visual/UX validation requiring judgment\n - Features in heavy flux\n\n**Output**: Test scenarios organized by area with automation decisions for each\n\nExample structure:\n- **Authentication**: TC-001 Valid login (smoke, automate), TC-002 Invalid password (automate), TC-003 Password reset (automate)\n- **Dashboard**: TC-004 View dashboard widgets (smoke, automate), TC-005 Filter data by date (automate), TC-006 Export data (manual - rare use)\n\n### Step 1.7: Generate All Manual Test Case Files\n\nGenerate ALL manual test case markdown files in the \\`./test-cases/\\` directory BEFORE invoking the test-code-generator agent.\n\n**For each test scenario from Step 1.6:**\n\n1. **Create test case file** in \\`./test-cases/\\` with format \\`TC-XXX-feature-description.md\\`\n2. **Include frontmatter** with:\n - \\`id:\\` TC-XXX (sequential ID)\n - \\`title:\\` Clear, descriptive title\n - \\`automated:\\` true/false (based on automation decision from Step 1.6)\n - \\`automated_test:\\` (leave empty - will be filled by subagent when automated)\n - \\`type:\\` exploratory/functional/regression/smoke\n - \\`area:\\` Feature area/component\n3. **Write test case content**:\n - **Objective**: Clear description of what is being tested\n - **Preconditions**: Setup requirements, test data needed\n - **Test Steps**: Numbered, human-readable steps\n - **Expected Results**: What should happen at each step\n - **Test Data**: Environment variables to use (e.g., \\${TEST_BASE_URL}, \\${TEST_OWNER_EMAIL})\n - **Notes**: Any assumptions, clarifications needed, or special considerations\n\n**Output**: All manual test case markdown files created in \\`./test-cases/\\` with automation flags set\n\n### Step 2: Automate Test Cases Area by Area\n\n**IMPORTANT**: Process each feature area separately to enable incremental, focused test creation.\n\n**For each area from Step 1.6**, invoke the test-code-generator agent:\n\n#### Step 2.1: Prepare Area Context\n\nBefore invoking the agent, identify the test cases for the current area:\n- Current area name\n- Test case files for this area (e.g., TC-001-valid-login.md, TC-002-invalid-password.md)\n- Which test cases are marked for automation (automated: true)\n- Test type: {type}\n- Test plan reference: test-plan.md\n- Existing automated tests in ./tests/specs/\n- Existing Page Objects in ./tests/pages/\n\n#### Step 2.2: Invoke test-code-generator Agent\n\n{{INVOKE_TEST_CODE_GENERATOR}} for the current area with the following context:\n\n**Agent Invocation:**\n\"Automate test cases for the [AREA_NAME] area.\n\n**Context:**\n- Area: [AREA_NAME]\n- Manual test case files to automate: [list TC-XXX files marked with automated: true]\n- Test type: {type}\n- Test plan: test-plan.md\n- Manual test cases directory: ./test-cases/\n- Existing automated tests: ./tests/specs/\n- Existing Page Objects: ./tests/pages/\n\n**The agent should:**\n1. Read the manual test case files for this area\n2. Check existing Page Object infrastructure for this area\n3. Explore the feature area to understand implementation (gather selectors, URLs, flows)\n4. Build missing Page Objects and supporting code\n5. For each test case marked \\`automated: true\\`:\n - Create automated Playwright test in ./tests/specs/\n - Update the manual test case file to reference the automated test path\n6. Run and iterate on each test until it passes or fails with a product bug\n8. Update .env.testdata with any new variables\n\n**Focus only on the [AREA_NAME] area** - do not automate tests for other areas yet.\"\n\n#### Step 2.3: Verify Area Completion\n\nAfter the agent completes the area, verify:\n- Manual test case files updated with automated_test references\n- Automated tests created for all test cases marked automated: true\n- Tests are passing (or failing with documented product bugs)\n- Page Objects created/updated for the area\n\n#### Step 2.4: Repeat for Next Area\n\nMove to the next area and repeat Steps 2.1-2.3 until all areas are complete.\n\n**Benefits of area-by-area approach**:\n- Agent focuses on one feature at a time\n- POMs built incrementally as needed\n- Tests verified before moving to next area\n- Easier to manage and track progress\n- Can pause/resume between areas if needed\n\n### Step 2.5: Validate Generated Artifacts\n\nAfter the test-code-generator completes, verify:\n\n1. **Manual Test Cases (in \\`./test-cases/\\`)**:\n - Each has unique TC-XXX ID\n - Frontmatter includes \\`automated: true/false\\` flag\n - If automated, includes \\`automated_test\\` path reference\n - Contains human-readable steps and expected results\n - References environment variables for test data\n\n2. **Automated Tests (in \\`./tests/specs/\\`)**:\n - Organized by feature in subdirectories\n - Each test file references manual test case ID in comments\n - Uses Page Object Model pattern\n - Follows role-based selector priority\n - Uses environment variables for test data\n - Includes proper TypeScript typing\n\n3. **Page Objects (in \\`./tests/pages/\\`)**:\n - Extend BasePage class\n - Use semantic selectors (getByRole, getByLabel, getByText)\n - Contain only actions, no assertions\n - Properly typed with TypeScript\n\n4. **Supporting Files**:\n - Fixtures created for common setup (in \\`./tests/fixtures/\\`)\n - Helper functions for data generation (in \\`./tests/helpers/\\`)\n - Component objects for reusable UI elements (in \\`./tests/components/\\`)\n - Types defined as needed (in \\`./tests/types/\\`)\n\n### Step 3: Create Directories if Needed\n\nEnsure required directories exist:\n\\`\\`\\`bash\nmkdir -p ./test-cases\nmkdir -p ./tests/specs\nmkdir -p ./tests/pages\nmkdir -p ./tests/components\nmkdir -p ./tests/fixtures\nmkdir -p ./tests/helpers\n\\`\\`\\`\n\n### Step 4: Update .env.testdata (if needed)\n\nIf new environment variables were introduced:\n- Read current \\`.env.testdata\\`\n- Add new TEST_* variables with empty values\n- Group variables logically with comments\n- Document what each variable is for\n\n${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}\n\n{{TEAM_COMMUNICATOR_INSTRUCTIONS}}\n\n### Step 5: Final Summary\n\nProvide a comprehensive summary showing:\n\n**Manual Test Cases:**\n- Number of manual test cases created\n- List of test case files with IDs and titles\n- Automation status for each (automated: yes/no)\n\n**Automated Tests:**\n- Number of automated test scripts created\n- List of spec files with test counts\n- Page Objects created or updated\n- Fixtures and helpers added\n\n**Test Coverage:**\n- Features covered by manual tests\n- Features covered by automated tests\n- Areas kept manual-only (and why)\n\n**Next Steps:**\n- Command to run automated tests: \\`npx playwright test\\`\n- Instructions to run specific test file\n- Note about copying .env.testdata to .env\n- Mention any exploration needed for edge cases\n\n### Important Notes\n\n- **Both Manual AND Automated**: Generate both artifacts - they serve different purposes\n- **Manual Test Cases**: Documentation, reference, can be executed manually when needed\n- **Automated Tests**: Fast, repeatable, for CI/CD and regression testing\n- **Automation Decision**: Not all test cases need automation - rare edge cases can stay manual\n- **Linking**: Manual test cases reference automated tests; automated tests reference manual test case IDs\n- **Two-Phase Workflow**: First generate all manual test cases (Step 1.7), then automate area-by-area (Step 2)\n- **Ambiguity Handling**: Use exploration (Step 1.4) and clarification (Step 1.5) protocols before generating\n- **Environment Variables**: Use \\`process.env.VAR_NAME\\` in tests, update .env.testdata as needed\n- **Test Independence**: Each test must be runnable in isolation and in parallel`,\n\n optionalSubagents: [\n {\n role: 'documentation-researcher',\n contentBlock: `#### 1.4 Gather Product Documentation\n\n{{INVOKE_DOCUMENTATION_RESEARCHER}} to gather comprehensive product documentation:\n\n\\`\\`\\`\nExplore all available product documentation, specifically focusing on:\n- UI elements and workflows\n- User interactions and navigation paths\n- Form fields and validation rules\n- Error messages and edge cases\n- Authentication and authorization flows\n- Business rules and constraints\n- API endpoints for test data setup\n\\`\\`\\``\n },\n {\n role: 'team-communicator',\n contentBlock: `### Step 4.5: Team Communication\n\n{{INVOKE_TEAM_COMMUNICATOR}} to notify the product team about the new test cases and automated tests:\n\n\\`\\`\\`\n1. Post an update about test case and automation creation\n2. Provide summary of coverage:\n - Number of manual test cases created\n - Number of automated tests created\n - Features covered by automation\n - Areas kept manual-only (and why)\n3. Highlight key automated test scenarios\n4. Share command to run automated tests: npx playwright test\n5. Ask for team review and validation\n6. Mention any areas needing exploration or clarification\n7. Use appropriate channel and threading for the update\n\\`\\`\\`\n\nThe team communication should include:\n- **Test artifacts created**: Manual test cases + automated tests count\n- **Automation coverage**: Which features are now automated\n- **Manual-only areas**: Why some tests are kept manual (rare scenarios, exploratory)\n- **Key automated scenarios**: Critical paths now covered by automation\n- **Running tests**: Command to execute automated tests\n- **Review request**: Ask team to validate scenarios and review test code\n- **Next steps**: Plans for CI/CD integration or additional test coverage\n\n**Update team communicator memory:**\n- Record this communication\n- Note test case and automation creation\n- Track team feedback on automation approach\n- Document any clarifications requested`\n }\n ],\n requiredSubagents: ['test-runner', 'test-code-generator']\n};\n","/**\n * Generate Test Plan Task\n * Generate a comprehensive test plan from product description\n */\n\nimport { TaskTemplate } from '../types';\nimport { TASK_SLUGS } from '../constants';\nimport { EXPLORATION_INSTRUCTIONS } from '../templates/exploration-instructions';\nimport { CLARIFICATION_INSTRUCTIONS } from '../templates/clarification-instructions';\nimport { KNOWLEDGE_BASE_READ_INSTRUCTIONS, KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS } from '../templates/knowledge-base.js';\n\nexport const generateTestPlanTask: TaskTemplate = {\n slug: TASK_SLUGS.GENERATE_TEST_PLAN,\n name: 'Generate Test Plan',\n description: 'Generate a comprehensive test plan from product description',\n\n frontmatter: {\n description: 'Generate a comprehensive test plan from product description',\n 'argument-hint': '<product-description>',\n },\n\n baseContent: `# Generate Test Plan Command\n\n## SECURITY NOTICE\n**CRITICAL**: Never read the \\`.env\\` file. It contains ONLY secrets (passwords, API keys).\n- **Read \\`.env.testdata\\`** for non-secret test data (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)\n- \\`.env.testdata\\` contains actual values for test data, URLs, and non-sensitive configuration\n- For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime\n- The \\`.env\\` file access is blocked by settings.json\n\nGenerate a comprehensive test plan from product description following the Brain Module specifications.\n\n## Arguments\nProduct description: \\$ARGUMENTS\n\n${KNOWLEDGE_BASE_READ_INSTRUCTIONS}\n\n## Process\n\n### Step 1: Load project context\nRead \\`.bugzy/runtime/project-context.md\\` to understand:\n- Project overview and key platform features\n- SDLC methodology and sprint duration\n- Testing environment and goals\n- Technical stack and constraints\n- QA workflow and processes\n\n### Step 1.5: Process the product description\nUse the product description provided directly in the arguments, enriched with project context understanding.\n\n### Step 1.6: Initialize environment variables tracking\nCreate a list to track all TEST_ prefixed environment variables discovered throughout the process.\n\n{{DOCUMENTATION_RESEARCHER_INSTRUCTIONS}}\n\n### Step 1.7: Explore Product (If Needed)\n\nIf product description is vague or incomplete, perform adaptive exploration to understand actual product features and behavior.\n\n${EXPLORATION_INSTRUCTIONS.replace(/{{STEP_NUMBER}}/g, '1.7')}\n\n### Step 1.8: Clarify Ambiguities\n\nIf exploration or product description reveals ambiguous requirements, use the clarification protocol before generating the test plan.\n\n${CLARIFICATION_INSTRUCTIONS.replace(/{{STEP_NUMBER}}/g, '1.8')}\n\n**Important Notes:**\n- **CRITICAL/HIGH ambiguities:** STOP test plan generation and seek clarification\n - Examples: Undefined core features, unclear product scope, contradictory requirements\n- **MEDIUM ambiguities:** Document assumptions in test plan with [ASSUMED: reason] and seek async clarification\n - Examples: Missing field lists, unclear validation rules, vague user roles\n- **LOW ambiguities:** Mark with [TO BE EXPLORED: detail] in test plan for future investigation\n - Examples: Optional features, cosmetic details, non-critical edge cases\n\n### Step 3: Prepare the test plan generation context\n\n**After ensuring requirements are clear through exploration and clarification:**\n\nBased on the gathered information:\n- **goal**: Extract the main purpose and objectives from all available documentation\n- **knowledge**: Combine product description with discovered documentation insights\n- **testPlan**: Use the standard test plan template structure, enriched with documentation findings\n- **gaps**: Identify areas lacking documentation that will need exploration\n\n### Step 4: Generate the test plan using the prompt template\n\nYou are an expert QA Test Plan Writer with expertise in both manual and automated testing strategies. Using the gathered information and context from the product description provided, you will now produce a comprehensive test plan in Markdown format that includes an automation strategy.\n\nWriting Instructions:\n- **Use Product Terminology:** Incorporate exact terms and labels from the product description for features and UI elements (to ensure the test plan uses official naming).\n- **Testing Scope:** The plan covers both automated E2E testing via Playwright and exploratory manual testing. Focus on what a user can do and see in a browser.\n- **Test Data - IMPORTANT:**\n - DO NOT include test data values in the test plan body\n - Test data goes ONLY to the \\`.env.testdata\\` file\n - In the test plan, reference \\`.env.testdata\\` for test data requirements\n - Define test data as environment variables prefixed with TEST_ (e.g., TEST_BASE_URL, TEST_USER_EMAIL, TEST_USER_PASSWORD)\n - DO NOT GENERATE VALUES FOR THE ENV VARS, ONLY THE KEYS\n - Track all TEST_ variables for extraction to .env.testdata in Step 7\n- **DO NOT INCLUDE TEST SCENARIOS**\n- **Incorporate All Relevant Info:** If the product description mentions specific requirements, constraints, or acceptance criteria (such as field validations, role-based access rules, important parameters), make sure these are reflected in the test plan. Do not add anything not supported by the given information.\n- **Test Automation Strategy Section - REQUIRED:** Include a comprehensive \"Test Automation Strategy\" section with the following subsections:\n\n **## Test Automation Strategy**\n\n ### Automated Test Coverage\n - Identify critical user paths to automate (login, checkout, core features)\n - Define regression test scenarios for automation\n - Specify API endpoints that need automated testing\n - List smoke test scenarios for CI/CD pipeline\n\n ### Exploratory Testing Areas\n - New features not yet automated\n - Complex edge cases requiring human judgment\n - Visual/UX validation requiring subjective assessment\n - Scenarios that are not cost-effective to automate\n\n ### Test Data Management\n - Environment variables strategy (which vars go in .env.example vs .env)\n - Dynamic test data generation approach (use data generators)\n - API-based test data setup (10-20x faster than UI)\n - Test data isolation and cleanup strategy\n\n ### Automation Approach\n - **Framework:** Playwright + TypeScript (already scaffolded)\n - **Pattern:** Page Object Model for all pages\n - **Selectors:** Prioritize role-based selectors (getByRole, getByLabel, getByText)\n - **Components:** Reusable component objects for common UI elements\n - **Fixtures:** Custom fixtures for authenticated sessions and common setup\n - **API for Speed:** Use Playwright's request context to create test data via API\n - **Best Practices:** Reference \\`.bugzy/runtime/testing-best-practices.md\\` for patterns\n\n ### Test Organization\n - Automated tests location: \\`./tests/specs/[feature]/\\`\n - Page Objects location: \\`./tests/pages/\\`\n - Manual test cases location: \\`./test-cases/\\` (human-readable documentation)\n - Test case naming: TC-XXX-feature-description.md\n - Automated test naming: feature.spec.ts\n\n ### Automation Decision Criteria\n Define which scenarios warrant automation:\n - ✅ Automate: Frequent execution, critical paths, regression tests, CI/CD integration\n - ❌ Keep Manual: Rare edge cases, exploratory tests, visual validation, one-time checks\n\n### Step 5: Create the test plan file\n\nRead the test plan template from \\`.bugzy/runtime/templates/test-plan-template.md\\` and use it as the base structure. Fill in the placeholders with information extracted from BOTH the product description AND documentation research:\n\n1. Read the template file from \\`.bugzy/runtime/templates/test-plan-template.md\\`\n2. Replace placeholders like:\n - \\`[ProjectName]\\` with the actual project name from the product description\n - \\`[Date]\\` with the current date\n - Feature sections with actual features identified from all documentation sources\n - Test data requirements based on the product's needs and API documentation\n - Risks based on the complexity, known issues, and technical constraints\n3. Add any product-specific sections that may be needed based on discovered documentation\n4. **Mark ambiguities based on severity:**\n - CRITICAL/HIGH: Should be clarified before plan creation (see Step 1.8)\n - MEDIUM: Mark with [ASSUMED: reason] and note assumption\n - LOW: Mark with [TO BE EXPLORED: detail] for future investigation\n5. Include references to source documentation for traceability\n\n### Step 6: Save the test plan\n\nSave the generated test plan to a file named \\`test-plan.md\\` in the project root with appropriate frontmatter:\n\n\\`\\`\\`yaml\n---\nversion: 1.0.0\nlifecycle_phase: initial\ncreated_at: [current date]\nupdated_at: [current date]\nlast_exploration: null\ntotal_discoveries: 0\nstatus: draft\nauthor: claude\ntags: [functional, security, performance]\n---\n\\`\\`\\`\n\n### Step 7: Extract and save environment variables\n\n**CRITICAL**: Test data values must ONLY go to .env.testdata, NOT in the test plan document.\n\nAfter saving the test plan:\n\n1. **Parse the test plan** to find all TEST_ prefixed environment variables mentioned:\n - Look in the Testing Environment section\n - Search for any TEST_ variables referenced\n - Extract variables from configuration or setup sections\n - Common patterns include: TEST_BASE_URL, TEST_USER_*, TEST_API_*, TEST_ADMIN_*, etc.\n\n2. **Create .env.testdata file** with all discovered variables:\n \\`\\`\\`bash\n # Application Configuration\n TEST_BASE_URL=\n\n # Test User Credentials\n TEST_USER_EMAIL=\n TEST_USER_PASSWORD=\n TEST_ADMIN_EMAIL=\n TEST_ADMIN_PASSWORD=\n\n # API Configuration\n TEST_API_KEY=\n TEST_API_SECRET=\n\n # Other Test Data\n TEST_DB_NAME=\n TEST_TIMEOUT=\n \\`\\`\\`\n\n3. **Add helpful comments** for each variable group to guide users in filling values\n\n4. **Save the file** as \\`.env.testdata\\` in the project root\n\n5. **Verify test plan references .env.testdata**:\n - Ensure test plan DOES NOT contain test data values\n - Ensure test plan references \\`.env.testdata\\` for test data requirements\n - Add instruction: \"Fill in actual values in .env.testdata before running tests\"\n\n${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}\n\n{{TEAM_COMMUNICATOR_INSTRUCTIONS}}\n\n### Step 8: Final summary\n\nProvide a summary of:\n- Test plan created successfully at \\`test-plan.md\\`\n- Environment variables extracted to \\`.env.testdata\\`\n- Number of TEST_ variables discovered\n- Instructions for the user to fill in actual values in .env.testdata before running tests`,\n\n optionalSubagents: [\n {\n role: 'documentation-researcher',\n contentBlock: `### Step 2: Gather comprehensive project documentation\n\n{{INVOKE_DOCUMENTATION_RESEARCHER}} to explore and gather all available project information and other documentation sources. This ensures the test plan is based on complete and current information.\n\n\\`\\`\\`\nExplore all available project documentation related to: \\$ARGUMENTS\n\nSpecifically gather:\n- Product specifications and requirements\n- User stories and acceptance criteria\n- Technical architecture documentation\n- API documentation and endpoints\n- User roles and permissions\n- Business rules and validations\n- UI/UX specifications\n- Known limitations or constraints\n- Existing test documentation\n- Bug reports or known issues\n\\`\\`\\`\n\nThe agent will:\n1. Check its memory for previously discovered documentation\n2. Explore workspace for relevant pages and databases\n3. Build a comprehensive understanding of the product\n4. Return synthesized information about all discovered documentation`\n },\n {\n role: 'team-communicator',\n contentBlock: `### Step 7.5: Team Communication\n\n{{INVOKE_TEAM_COMMUNICATOR}} to notify the product team about the new test plan:\n\n\\`\\`\\`\n1. Post an update about the test plan creation\n2. Provide a brief summary of coverage areas and key features\n3. Mention any areas that need exploration or clarification\n4. Ask for team review and feedback on the test plan\n5. Include a link or reference to the test-plan.md file\n6. Use appropriate channel and threading for the update\n\\`\\`\\`\n\nThe team communication should include:\n- **Test plan scope**: Brief overview of what will be tested\n- **Coverage highlights**: Key features and user flows included\n- **Areas needing clarification**: Any uncertainties discovered during documentation research\n- **Review request**: Ask team to review and provide feedback\n- **Next steps**: Mention plan to generate test cases after review\n\n**Update team communicator memory:**\n- Record this communication in the team-communicator memory\n- Note this as a test plan creation communication\n- Track team response to this type of update`\n }\n ],\n requiredSubagents: ['test-runner']\n};\n","/**\n * Handle Message Task\n * Handle team responses and Slack communications, maintaining context for ongoing conversations\n *\n * Slack messages are processed by the LLM layer (lib/slack/llm-processor.ts)\n * which routes feedback/general chat to this task via the 'collect_feedback' action.\n * This task must be in SLACK_ALLOWED_TASKS to be Slack-callable.\n */\n\nimport { TaskTemplate } from '../types';\nimport { TASK_SLUGS } from '../constants';\nimport { KNOWLEDGE_BASE_READ_INSTRUCTIONS, KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS } from '../templates/knowledge-base.js';\n\nexport const handleMessageTask: TaskTemplate = {\n slug: TASK_SLUGS.HANDLE_MESSAGE,\n name: 'Handle Message',\n description: 'Handle team responses and Slack communications, maintaining context for ongoing conversations (LLM-routed)',\n\n frontmatter: {\n description: 'Handle team responses and Slack communications, maintaining context for ongoing conversations',\n 'argument-hint': '[slack thread context or team message]',\n },\n\n baseContent: `# Handle Message Command\n\n## SECURITY NOTICE\n**CRITICAL**: Never read the \\`.env\\` file. It contains ONLY secrets (passwords, API keys).\n- **Read \\`.env.testdata\\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)\n- \\`.env.testdata\\` contains actual values for test data, URLs, and non-sensitive configuration\n- For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime\n- The \\`.env\\` file access is blocked by settings.json\n\nProcess team responses from Slack threads and handle multi-turn conversations with the product team about testing clarifications, ambiguities, and questions.\n\n## Arguments\nTeam message/thread context: \\$ARGUMENTS\n\n${KNOWLEDGE_BASE_READ_INSTRUCTIONS}\n\n## Process\n\n### Step 0: Detect Message Intent and Load Handler\n\nBefore processing the message, identify the intent type to load the appropriate handler.\n\n#### 0.1 Extract Intent from Event Payload\n\nCheck the event payload for the \\`intent\\` field provided by the LLM layer:\n- If \\`intent\\` is present, use it directly\n- Valid intent values: \\`question\\`, \\`feedback\\`, \\`status\\`\n\n#### 0.2 Fallback Intent Detection (if no intent provided)\n\nIf intent is not in the payload, detect from message patterns:\n\n| Condition | Intent |\n|-----------|--------|\n| Keywords: \"status\", \"progress\", \"how did\", \"results\", \"how many passed\" | \\`status\\` |\n| Keywords: \"bug\", \"issue\", \"broken\", \"doesn't work\", \"failed\", \"error\" | \\`feedback\\` |\n| Question words: \"what\", \"which\", \"do we have\", \"is there\" about tests/project | \\`question\\` |\n| Default (none of above) | \\`feedback\\` |\n\n#### 0.3 Load Handler File\n\nBased on detected intent, load the handler from:\n\\`.bugzy/runtime/handlers/messages/{intent}.md\\`\n\n**Handler files:**\n- \\`question.md\\` - Questions about tests, coverage, project details\n- \\`feedback.md\\` - Bug reports, test observations, general information\n- \\`status.md\\` - Status checks on test runs, task progress\n\n#### 0.4 Follow Handler Instructions\n\n**IMPORTANT**: The handler file is authoritative for this intent type.\n\n1. Read the handler file completely\n2. Follow its processing steps in order\n3. Apply its context loading requirements\n4. Use its response guidelines\n5. Perform any memory updates it specifies\n\nThe handler file contains all necessary processing logic for the detected intent type. Each handler includes:\n- Specific processing steps for that intent\n- Context loading requirements\n- Response guidelines\n- Memory update instructions\n\n${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}\n\n## Key Principles\n\n### Context Preservation\n- Always maintain full conversation context\n- Link responses back to original uncertainties\n- Preserve reasoning chain for future reference\n\n### Actionable Responses\n- Convert team input into concrete actions\n- Don't let clarifications sit without implementation\n- Follow through on commitments made to team\n\n### Learning Integration\n- Each interaction improves our understanding\n- Build knowledge base of team preferences\n- Refine communication approaches over time\n\n### Quality Communication\n- Acknowledge team input appropriately\n- Provide updates on actions taken\n- Ask good follow-up questions when needed\n\n## Important Considerations\n\n### Thread Organization\n- Keep related discussions in same thread\n- Start new threads for new topics\n- Maintain clear conversation boundaries\n\n### Response Timing\n- Acknowledge important messages promptly\n- Allow time for implementation before status updates\n- Don't spam team with excessive communications\n\n### Action Prioritization\n- Address urgent clarifications first\n- Batch related updates when possible\n- Focus on high-impact changes\n\n### Memory Maintenance\n- Keep active conversations visible and current\n- Archive resolved discussions appropriately\n- Maintain searchable history of resolutions`,\n\n optionalSubagents: [],\n requiredSubagents: ['team-communicator']\n};\n","/**\n * Process Event Task\n * Process external system events (Jira, GitHub, Linear) using handler-defined rules to extract insights and track issues\n */\n\nimport { TaskTemplate } from '../types';\nimport { TASK_SLUGS } from '../constants';\nimport { KNOWLEDGE_BASE_READ_INSTRUCTIONS, KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS } from '../templates/knowledge-base.js';\n\nexport const processEventTask: TaskTemplate = {\n slug: TASK_SLUGS.PROCESS_EVENT,\n name: 'Process Event',\n description: 'Process external system events (Jira, GitHub, Linear) using handler-defined rules to extract insights and track issues',\n\n frontmatter: {\n description: 'Process external system events (Jira, GitHub, Linear) using handler-defined rules to extract insights and track issues',\n 'argument-hint': '[event payload or description]',\n },\n\n baseContent: `# Process Event Command\n\n## SECURITY NOTICE\n**CRITICAL**: Never read the \\`.env\\` file. It contains ONLY secrets (passwords, API keys).\n- **Read \\`.env.testdata\\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)\n- \\`.env.testdata\\` contains actual values for test data, URLs, and non-sensitive configuration\n- For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime\n- The \\`.env\\` file access is blocked by settings.json\n\nProcess various types of events using intelligent pattern matching and historical context to maintain and evolve the testing system.\n\n## Arguments\nArguments: \\$ARGUMENTS\n\n${KNOWLEDGE_BASE_READ_INSTRUCTIONS}\n\n## Process\n\n### Step 1: Understand Event Context\n\nEvents come from integrated external systems via webhooks or manual input. Common sources include:\n- **Issue Trackers**: Jira, Linear, GitHub Issues\n- **Source Control**: GitHub, GitLab\n- **Communication Tools**: Slack\n\n**Event structure and semantics vary by source.** Do not interpret events based on generic assumptions. Instead, load the appropriate handler file (Step 2.4) for system-specific processing rules.\n\n#### Event Context to Extract:\n- **What happened**: The core event (test failed, PR merged, etc.)\n- **Where**: Component, service, or area affected\n- **Impact**: How this affects testing strategy\n- **Action Required**: What needs to be done in response\n\n### Step 1.5: Clarify Unclear Events\n\nIf the event information is incomplete or ambiguous, seek clarification before processing:\n\n#### Detect Unclear Events\n\nEvents may be unclear in several ways:\n- **Vague description**: \"Something broke\", \"issue with login\" (what specifically?)\n- **Missing context**: Which component, which environment, which user?\n- **Contradictory information**: Event data conflicts with other sources\n- **Unknown references**: Mentions unfamiliar features, components, or systems\n- **Unclear severity**: Impact or priority is ambiguous\n\n#### Assess Ambiguity Severity\n\nClassify the ambiguity level to determine appropriate response:\n\n**🔴 CRITICAL - STOP and seek clarification:**\n- Cannot identify which component is affected\n- Event data is contradictory or nonsensical\n- Unknown system or feature mentioned\n- Cannot determine if this requires immediate action\n- Example: Event says \"production is down\" but unclear which service\n\n**🟠 HIGH - STOP and seek clarification:**\n- Vague problem description that could apply to multiple areas\n- Missing critical context needed for proper response\n- Unclear which team or system is responsible\n- Example: \"Login issue reported\" (login button? auth service? session? which page?)\n\n**🟡 MEDIUM - Proceed with documented assumptions:**\n- Some details missing but core event is clear\n- Can infer likely meaning from context\n- Can proceed but should clarify async\n- Example: \"Test failed on staging\" (can assume main staging, but clarify which one)\n\n**🟢 LOW - Mark and proceed:**\n- Minor details missing (optional context)\n- Cosmetic or non-critical information gaps\n- Can document gap and continue\n- Example: Missing timestamp or exact user who reported issue\n\n#### Clarification Approach by Severity\n\n**For CRITICAL/HIGH ambiguity:**\n1. **{{INVOKE_TEAM_COMMUNICATOR}} to ask specific questions**\n2. **WAIT for response before proceeding**\n3. **Document the clarification request in event history**\n\nExample clarification messages:\n- \"Event mentions 'login issue' - can you clarify if this is:\n • Login button not responding?\n • Authentication service failure?\n • Session management problem?\n • Specific page or global?\"\n\n- \"Event references component 'XYZ' which is unknown. What system does this belong to?\"\n\n- \"Event data shows contradictory information: status=success but error_count=15. Which is correct?\"\n\n**For MEDIUM ambiguity:**\n1. **Document assumption** with reasoning\n2. **Proceed with processing** based on assumption\n3. **Ask for clarification async** (non-blocking)\n4. **Mark in event history** for future reference\n\nExample: [ASSUMED: \"login issue\" refers to login button based on recent similar events]\n\n**For LOW ambiguity:**\n1. **Mark with [TO BE CLARIFIED: detail]**\n2. **Continue processing** normally\n3. **Document gap** in event history\n\nExample: [TO BE CLARIFIED: Exact timestamp of when issue was first observed]\n\n#### Document Clarification Process\n\nIn event history, record:\n- **Ambiguity detected**: What was unclear\n- **Severity assessed**: CRITICAL/HIGH/MEDIUM/LOW\n- **Clarification requested**: Questions asked (if any)\n- **Response received**: Team's clarification\n- **Assumption made**: If proceeded with assumption\n- **Resolution**: How ambiguity was resolved\n\nThis ensures future similar events can reference past clarifications and avoid redundant questions.\n\n### Step 2: Load Context and Memory\n\n#### 2.1 Check Event Processor Memory\nRead \\`.bugzy/runtime/memory/event-processor.md\\` to:\n- Find similar event patterns\n- Load example events with reasoning\n- Get system-specific rules\n- Retrieve task mapping patterns\n\n#### 2.2 Check Event History\nRead \\`.bugzy/runtime/memory/event-history.md\\` to:\n- Ensure event hasn't been processed already (idempotency)\n- Find related recent events\n- Understand event patterns and trends\n\n#### 2.3 Read Current State\n- Read \\`test-plan.md\\` for current coverage\n- List \\`./test-cases/\\` for existing tests\n- Check \\`.bugzy/runtime/knowledge-base.md\\` for past insights\n\n#### 2.4 Load System-Specific Handler (REQUIRED)\n\nBased on the event source, load the handler from \\`.bugzy/runtime/handlers/\\`:\n\n**Step 1: Detect Event Source from Payload:**\n- \\`com.jira-server.*\\` event type prefix → \\`.bugzy/runtime/handlers/jira.md\\`\n- \\`github.*\\` or GitHub webhook structure → \\`.bugzy/runtime/handlers/github.md\\`\n- \\`linear.*\\` or Linear webhook → \\`.bugzy/runtime/handlers/linear.md\\`\n- Other sources → Check for matching handler file by source name\n\n**Step 2: Load and Read the Handler File:**\nThe handler file contains system-specific instructions for:\n- Event payload structure and field meanings\n- Which triggers (status changes, resolutions) require specific actions\n- How to interpret different event types\n- When to invoke \\`/verify-changes\\`\n- How to update the knowledge base\n\n**Step 3: Follow Handler Instructions:**\nThe handler file is authoritative for this event source. Follow its instructions for:\n- Interpreting the event payload\n- Determining what actions to take\n- Formatting responses and updates\n\n**Step 4: If No Handler Exists:**\nDo NOT guess or apply generic logic. Instead:\n1. Inform the user that no handler exists for this event source\n2. Ask how this event type should be processed\n3. Suggest creating a handler file at \\`.bugzy/runtime/handlers/{source}.md\\`\n\n**Project-Specific Configuration:**\nHandlers reference \\`.bugzy/runtime/project-context.md\\` for project-specific rules like:\n- Which status transitions trigger verify-changes\n- Which resolutions should update the knowledge base\n- Which transitions to ignore\n\n### Step 3: Intelligent Event Analysis\n\n#### 3.1 Contextual Pattern Analysis\nDon't just match patterns - analyze the event within the full context:\n\n**Combine Multiple Signals**:\n- Event details + Historical patterns from memory\n- Current test plan state + Knowledge base\n- External system status + Team activity\n- Business priorities + Risk assessment\n\n**Example Contextual Analysis**:\n\\`\\`\\`\nEvent: Jira issue PROJ-456 moved to \"Ready for QA\"\n+ Handler: jira.md says \"Ready for QA\" triggers /verify-changes\n+ History: This issue was previously in \"In Progress\" for 3 days\n+ Knowledge: Related PR #123 merged yesterday\n= Decision: Invoke /verify-changes with issue context and PR reference\n\\`\\`\\`\n\n**Pattern Recognition with Context**:\n- An issue resolution depends on what the handler prescribes for that status\n- A duplicate event (same issue, same transition) should be skipped\n- Events from different sources about the same change should be correlated\n- Handler instructions take precedence over generic assumptions\n\n#### 3.2 Generate Semantic Queries\nBased on event type and content, generate 3-5 specific search queries:\n- Search for similar past events\n- Look for related test cases\n- Find relevant documentation\n- Check for known issues\n\n{{DOCUMENTATION_RESEARCHER_INSTRUCTIONS}}\n\n### Step 4: Task Planning with Reasoning\n\nGenerate tasks based on event analysis, using examples from memory as reference.\n\n#### Task Generation Logic:\nAnalyze the event in context of ALL available information to decide what actions to take:\n\n**Consider the Full Context**:\n- What does the handler prescribe for this event type?\n- How does this relate to current knowledge?\n- What's the state of related issues in external systems?\n- Is this part of a larger pattern we've been seeing?\n- What's the business impact of this event?\n\n**Contextual Decision Making**:\nThe same event type can require different actions based on context:\n- If handler says this status triggers verification → Invoke /verify-changes\n- If this issue was already processed (check event history) → Skip to avoid duplicates\n- If related PR exists in knowledge base → Include PR context in actions\n- If this is a recurring pattern from the same source → Consider flagging for review\n- If handler has no rule for this event type → Ask user for guidance\n\n**Dynamic Task Selection**:\nBased on the contextual analysis, decide which tasks make sense:\n- **extract_learning**: When the event reveals something new about the system\n- **update_test_plan**: When our understanding of what to test has changed\n- **update_test_cases**: When tests need to reflect new reality\n- **report_bug**: When we have a legitimate, impactful, reproducible issue\n- **skip_action**: When context shows no action needed (e.g., known issue, already fixed)\n\nThe key is to use ALL available context - not just react to the event type\n\n#### Document Reasoning:\nFor each task, document WHY it's being executed:\n\\`\\`\\`markdown\nTask: extract_learning\nReasoning: This event reveals a pattern of login failures on Chrome that wasn't previously documented\nData: \"Chrome-specific timeout issues with login button\"\n\\`\\`\\`\n\n### Step 5: Execute Tasks with Memory Updates\n\n#### 5.1 Execute Each Task\n\n{{ISSUE_TRACKER_INSTRUCTIONS}}\n\n##### For Other Tasks:\nFollow the standard execution logic with added context from memory.\n\n#### 5.2 Update Event Processor Memory\nIf new patterns discovered, append to \\`.bugzy/runtime/memory/event-processor.md\\`:\n\\`\\`\\`markdown\n### Pattern: [New Pattern Name]\n**First Seen**: [Date]\n**Indicators**: [What identifies this pattern]\n**Typical Tasks**: [Common task responses]\n**Example**: [This event]\n\\`\\`\\`\n\n#### 5.3 Update Event History\nAppend to \\`.bugzy/runtime/memory/event-history.md\\`:\n\\`\\`\\`markdown\n## [Timestamp] - Event #[ID]\n\n**Original Input**: [Raw arguments provided]\n**Parsed Event**:\n\\`\\`\\`yaml\ntype: [type]\nsource: [source]\n[other fields]\n\\`\\`\\`\n\n**Pattern Matched**: [Pattern name or \"New Pattern\"]\n**Tasks Executed**:\n1. [Task 1] - Reasoning: [Why]\n2. [Task 2] - Reasoning: [Why]\n\n**Files Modified**:\n- [List of files]\n\n**Outcome**: [Success/Partial/Failed]\n**Notes**: [Any additional context]\n---\n\\`\\`\\`\n\n### Step 6: Learning from Events\n\nAfter processing, check if this event teaches us something new:\n1. Is this a new type of event we haven't seen?\n2. Did our task planning work well?\n3. Should we update our patterns?\n4. Are there trends across recent events?\n\nIf yes, update the event processor memory with new patterns or refined rules.\n\n### Step 7: Create Necessary Files\n\nEnsure all required files and directories exist:\n\\`\\`\\`bash\nmkdir -p ./test-cases .claude/memory\n\\`\\`\\`\n\nCreate files if they don't exist:\n- \\`.bugzy/runtime/knowledge-base.md\\`\n- \\`.bugzy/runtime/memory/event-processor.md\\`\n- \\`.bugzy/runtime/memory/event-history.md\\`\n\n## Important Considerations\n\n### Contextual Intelligence\n- Never process events in isolation - always consider full context\n- Use knowledge base, history, and external system state to inform decisions\n- What seems like a bug might be expected behavior given the context\n- A minor event might be critical when seen as part of a pattern\n\n### Adaptive Response\n- Same event type can require different actions based on context\n- Learn from each event to improve future decision-making\n- Build understanding of system behavior over time\n- Adjust responses based on business priorities and risk\n\n### Smart Task Generation\n- Only take actions prescribed by the handler or confirmed by the user\n- Document why each decision was made with full context\n- Skip redundant actions (e.g., duplicate events, already-processed issues)\n- Escalate appropriately based on pattern recognition\n\n### Continuous Learning\n- Each event adds to our understanding of the system\n- Update patterns when new correlations are discovered\n- Refine decision rules based on outcomes\n- Build institutional memory through event history\n\n${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}`,\n\n optionalSubagents: [\n {\n role: 'documentation-researcher',\n contentBlock: `#### 3.3 Use Documentation Researcher if Needed\nFor events mentioning unknown features or components:\n\\`\\`\\`\n{{INVOKE_DOCUMENTATION_RESEARCHER}} to find information about: [component/feature]\n\\`\\`\\``\n },\n {\n role: 'issue-tracker',\n contentBlock: `##### For Issue Tracking:\n\nWhen an issue needs to be tracked (task type: report_bug or update_story):\n\\`\\`\\`\n{{INVOKE_ISSUE_TRACKER}}\n1. Check for duplicate issues in the tracking system\n2. For bugs: Create detailed bug report with:\n - Clear, descriptive title\n - Detailed description with context\n - Step-by-step reproduction instructions\n - Expected vs actual behavior\n - Environment and configuration details\n - Test case reference (if applicable)\n - Screenshots or error logs\n3. For stories: Update status and add QA comments\n4. Track issue lifecycle and maintain categorization\n\\`\\`\\`\n\nThe issue-tracker agent will handle all aspects of issue tracking including duplicate detection, story management, QA workflow transitions, and integration with your project management system (Jira, Linear, Notion, etc.).`\n }\n ],\n requiredSubagents: [],\n dependentTasks: ['verify-changes']\n};\n","/**\n * Run Tests Task\n * Select and run test cases using the test-runner agent\n */\n\nimport { TaskTemplate } from '../types';\nimport { TASK_SLUGS } from '../constants';\nimport { KNOWLEDGE_BASE_READ_INSTRUCTIONS, KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS } from '../templates/knowledge-base.js';\n\nexport const runTestsTask: TaskTemplate = {\n slug: TASK_SLUGS.RUN_TESTS,\n name: 'Run Tests',\n description: 'Execute automated Playwright tests, analyze failures, and fix test issues automatically',\n\n frontmatter: {\n description: 'Execute automated Playwright tests, analyze failures, and fix test issues automatically',\n 'argument-hint': '[file-pattern|tag|all] (e.g., \"auth\", \"@smoke\", \"tests/specs/login.spec.ts\")',\n },\n\n baseContent: `# Run Tests Command\n\n## SECURITY NOTICE\n**CRITICAL**: Never read the \\`.env\\` file. It contains ONLY secrets (passwords, API keys).\n- **Read \\`.env.testdata\\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)\n- \\`.env.testdata\\` contains actual values for test data, URLs, and non-sensitive configuration\n- For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime\n- The \\`.env\\` file access is blocked by settings.json\n\nExecute automated Playwright tests, analyze failures using JSON reports, automatically fix test issues, and log product bugs.\n\n## Arguments\nArguments: \\$ARGUMENTS\n\n## Parse Arguments\nExtract the following from arguments:\n- **selector**: Test selection criteria\n - File pattern: \"auth\" → finds tests/specs/**/*auth*.spec.ts\n - Tag: \"@smoke\" → runs tests with @smoke annotation\n - Specific file: \"tests/specs/login.spec.ts\"\n - All tests: \"all\" or \"\" → runs entire test suite\n\n${KNOWLEDGE_BASE_READ_INSTRUCTIONS}\n\n## Test Execution Strategy\n\n**IMPORTANT**: Before selecting tests, read \\`.bugzy/runtime/test-execution-strategy.md\\` to understand:\n- Available test tiers (Smoke, Component, Full Regression)\n- When to use each tier (commit, PR, release, debug)\n- Default behavior (default to @smoke unless user specifies otherwise)\n- How to interpret user intent from context keywords\n- Time/coverage trade-offs\n- Tag taxonomy\n\nApply the strategy guidance when determining which tests to run.\n\n## Process\n\n**First**, consult \\`.bugzy/runtime/test-execution-strategy.md\\` decision tree to determine appropriate test tier based on user's selector and context.\n\n### Step 1: Identify Automated Tests to Run\n\n#### 1.1 Understand Test Selection\nParse the selector argument to determine which tests to run:\n\n**File Pattern** (e.g., \"auth\", \"login\"):\n- Find matching test files: \\`tests/specs/**/*[pattern]*.spec.ts\\`\n- Example: \"auth\" → finds all test files with \"auth\" in the name\n\n**Tag** (e.g., \"@smoke\", \"@regression\"):\n- Run tests with specific Playwright tag annotation\n- Use Playwright's \\`--grep\\` option\n\n**Specific File** (e.g., \"tests/specs/auth/login.spec.ts\"):\n- Run that specific test file\n\n**All Tests** (\"all\" or no selector):\n- Run entire test suite: \\`tests/specs/**/*.spec.ts\\`\n\n#### 1.2 Find Matching Test Files\nUse glob patterns to find test files:\n\\`\\`\\`bash\n# For file pattern\nls tests/specs/**/*[pattern]*.spec.ts\n\n# For specific file\nls tests/specs/auth/login.spec.ts\n\n# For all tests\nls tests/specs/**/*.spec.ts\n\\`\\`\\`\n\n#### 1.3 Validate Test Files Exist\nCheck that at least one test file was found:\n- If no tests found, inform user and suggest available tests\n- List available test files if selection was unclear\n\n### Step 2: Execute Automated Playwright Tests\n\n#### 2.1 Build Playwright Command\nConstruct the Playwright test command based on the selector:\n\n**For file pattern or specific file**:\n\\`\\`\\`bash\nnpx playwright test [selector]\n\\`\\`\\`\n\n**For tag**:\n\\`\\`\\`bash\nnpx playwright test --grep \"[tag]\"\n\\`\\`\\`\n\n**For all tests**:\n\\`\\`\\`bash\nnpx playwright test\n\\`\\`\\`\n\n**Output**: Custom Bugzy reporter will create hierarchical test-runs/YYYYMMDD-HHMMSS/ structure with manifest.json\n\n#### 2.2 Execute Tests via Bash\nRun the Playwright command:\n\\`\\`\\`bash\nnpx playwright test [selector]\n\\`\\`\\`\n\nWait for execution to complete. This may take several minutes depending on test count.\n\n**Note**: The custom Bugzy reporter will automatically:\n- Generate timestamp in YYYYMMDD-HHMMSS format\n- Create test-runs/{timestamp}/ directory structure\n- Record execution-id.txt with BUGZY_EXECUTION_ID\n- Save results per test case in TC-{id}/exec-1/ folders\n- Generate manifest.json with complete execution summary\n\n#### 2.3 Locate and Read Test Results\nAfter execution completes, find and read the manifest:\n\n1. Find the test run directory (most recent):\n \\`\\`\\`bash\n ls -t test-runs/ | head -1\n \\`\\`\\`\n\n2. Read the manifest.json file:\n \\`\\`\\`bash\n cat test-runs/[timestamp]/manifest.json\n \\`\\`\\`\n\n3. Store the timestamp for use in test-debugger-fixer if needed\n\n### Step 3: Analyze Test Results from Manifest\n\n#### 3.1 Parse Manifest\nThe Bugzy custom reporter produces structured output in manifest.json:\n\\`\\`\\`json\n{\n \"bugzyExecutionId\": \"70a59676-cfd0-4ffd-b8ad-69ceff25c31d\",\n \"timestamp\": \"20251115-123456\",\n \"startTime\": \"2025-11-15T12:34:56.789Z\",\n \"endTime\": \"2025-11-15T12:45:23.456Z\",\n \"status\": \"completed\",\n \"stats\": {\n \"totalTests\": 10,\n \"passed\": 8,\n \"failed\": 2,\n \"totalExecutions\": 10\n },\n \"testCases\": [\n {\n \"id\": \"TC-001-login\",\n \"name\": \"Login functionality\",\n \"totalExecutions\": 1,\n \"finalStatus\": \"passed\",\n \"executions\": [\n {\n \"number\": 1,\n \"status\": \"passed\",\n \"duration\": 1234,\n \"videoFile\": \"video.webm\",\n \"hasTrace\": false,\n \"hasScreenshots\": false,\n \"error\": null\n }\n ]\n },\n {\n \"id\": \"TC-002-invalid-credentials\",\n \"name\": \"Invalid credentials error\",\n \"totalExecutions\": 1,\n \"finalStatus\": \"failed\",\n \"executions\": [\n {\n \"number\": 1,\n \"status\": \"failed\",\n \"duration\": 2345,\n \"videoFile\": \"video.webm\",\n \"hasTrace\": true,\n \"hasScreenshots\": true,\n \"error\": \"expect(locator).toBeVisible()...\"\n }\n ]\n }\n ]\n}\n\\`\\`\\`\n\n#### 3.2 Extract Test Results\nFrom the manifest, extract:\n- **Total tests**: stats.totalTests\n- **Passed tests**: stats.passed\n- **Failed tests**: stats.failed\n- **Total executions**: stats.totalExecutions (includes re-runs)\n- **Duration**: Calculate from startTime and endTime\n\nFor each failed test, collect from testCases array:\n- Test ID (id field)\n- Test name (name field)\n- Final status (finalStatus field)\n- Latest execution details:\n - Error message (executions[last].error)\n - Duration (executions[last].duration)\n - Video file location (test-runs/{timestamp}/{id}/exec-{num}/{videoFile})\n - Trace availability (executions[last].hasTrace)\n - Screenshots availability (executions[last].hasScreenshots)\n\n#### 3.3 Generate Summary Statistics\n\\`\\`\\`markdown\n## Test Execution Summary\n- Total Tests: [count]\n- Passed: [count] ([percentage]%)\n- Failed: [count] ([percentage]%)\n- Skipped: [count] ([percentage]%)\n- Total Duration: [time]\n\\`\\`\\`\n\n### Step 5: Triage Failed Tests\n\nAfter analyzing test results, triage each failure to determine if it's a product bug or test issue:\n\n#### 5.1 Triage Failed Tests FIRST\n\n**⚠️ IMPORTANT: Do NOT report bugs without triaging first.**\n\nFor each failed test:\n\n1. **Read failure details** from JSON report (error message, stack trace)\n2. **Classify the failure:**\n - **Product bug**: Application behaves incorrectly\n - **Test issue**: Test code needs fixing (selector, timing, assertion)\n3. **Document classification** for next steps\n\n**Classification Guidelines:**\n- **Product Bug**: Correct test code, unexpected application behavior\n- **Test Issue**: Selector not found, timeout, race condition, wrong assertion\n\n#### 5.2 Fix Test Issues Automatically\n\nFor each test classified as **[TEST ISSUE]**, use the test-debugger-fixer agent to automatically fix the test:\n\n\\`\\`\\`\n{{INVOKE_TEST_DEBUGGER_FIXER}}\n\nFor each failed test classified as a test issue (not a product bug), provide:\n- Test run timestamp: [from manifest.timestamp]\n- Test case ID: [from testCases[].id in manifest]\n- Test name/title: [from testCases[].name in manifest]\n- Error message: [from testCases[].executions[last].error]\n- Execution details path: test-runs/{timestamp}/{testCaseId}/exec-1/\n\nThe agent will:\n1. Read the execution details from result.json\n2. Analyze the failure (error message, trace if available)\n3. Identify the root cause (brittle selector, missing wait, race condition, etc.)\n4. Apply appropriate fix to the test code\n5. Rerun the test\n6. The custom reporter will automatically create the next exec-N/ folder\n7. Repeat up to 3 times if needed (exec-1, exec-2, exec-3)\n8. Report success or escalate as likely product bug\n\nAfter test-debugger-fixer completes:\n- If fix succeeded: Mark test as fixed, add to \"Tests Fixed\" list\n- If still failing after 3 attempts: Reclassify as potential product bug for Step 5.3\n\\`\\`\\`\n\n**Track Fixed Tests:**\n- Maintain list of tests fixed automatically\n- Include fix description (e.g., \"Updated selector from CSS to role-based\")\n- Note verification status (test now passes)\n\n{{ISSUE_TRACKER_INSTRUCTIONS}}\n\n${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}\n\n{{TEAM_COMMUNICATOR_INSTRUCTIONS}}\n\n### Step 6: Handle Special Cases\n\n#### If No Test Cases Found\nIf no test cases match the selection criteria:\n1. Inform user that no matching test cases were found\n2. List available test cases or suggest running \\`/generate-test-cases\\` first\n3. Provide examples of valid selection criteria\n\n#### If Test Runner Agent Fails\nIf the test-runner agent encounters issues:\n1. Report the specific error\n2. Suggest troubleshooting steps\n3. Offer to run tests individually if batch execution failed\n\n#### If Test Cases Are Invalid\nIf selected test cases have formatting issues:\n1. Report which test cases are invalid\n2. Specify what's missing or incorrect\n3. Offer to fix the issues or skip invalid tests\n\n### Important Notes\n\n**Test Selection Strategy**:\n- **Always read** \\`.bugzy/runtime/test-execution-strategy.md\\` before selecting tests\n- Default to \\`@smoke\\` tests for fast validation unless user explicitly requests otherwise\n- Smoke tests provide 100% manual test case coverage with zero redundancy (~2-5 min)\n- Full regression includes intentional redundancy for diagnostic value (~10-15 min)\n- Use context keywords from user request to choose appropriate tier\n\n**Test Execution**:\n- Automated Playwright tests are executed via bash command, not through agents\n- Test execution time varies by tier (see strategy document for details)\n- JSON reports provide structured test results for analysis\n- Playwright automatically captures traces, screenshots, and videos on failures\n- Test artifacts are stored in test-results/ directory\n\n**Failure Handling**:\n- Test failures are automatically triaged (product bugs vs test issues)\n- Test issues are automatically fixed by the test-debugger-fixer subagent\n- Product bugs are logged via issue tracker after triage\n- All results are analyzed for learning opportunities and team communication\n- Critical failures trigger immediate team notification\n\n**Related Documentation**:\n- \\`.bugzy/runtime/test-execution-strategy.md\\` - When and why to run specific tests\n- \\`.bugzy/runtime/testing-best-practices.md\\` - How to write tests (patterns and anti-patterns)\n\n`,\n\n optionalSubagents: [\n {\n role: 'issue-tracker',\n contentBlock: `\n\n#### 5.3 Log Product Bugs via Issue Tracker\n\nAfter triage in Step 5.1, for tests classified as **[PRODUCT BUG]**, use the issue-tracker agent to log bugs:\n\nFor each bug to report, use the issue-tracker agent:\n\n\\`\\`\\`\n{{INVOKE_ISSUE_TRACKER}}\n1. Check for duplicate bugs in the tracking system\n - The agent will automatically search for similar existing issues\n - It maintains memory of recently reported issues\n - Duplicate detection happens automatically - don't create manual checks\n\n2. For each new bug (non-duplicate):\n Create detailed bug report with:\n - **Title**: Clear, descriptive summary (e.g., \"Login button fails with timeout on checkout page\")\n - **Description**:\n - What happened vs. what was expected\n - Impact on users\n - Test reference: [file path] › [test title]\n - **Reproduction Steps**:\n - List steps from the failing test\n - Include specific test data used\n - Note any setup requirements from test file\n - **Test Execution Details**:\n - Test file: [file path from JSON report]\n - Test name: [test title from JSON report]\n - Error message: [from JSON report]\n - Stack trace: [from JSON report]\n - Trace file: [path if available]\n - Screenshots: [paths if available]\n - **Environment Details**:\n - Browser and version (from Playwright config)\n - Test environment URL (from .env.testdata BASE_URL)\n - Timestamp of failure\n - **Severity/Priority**: Based on:\n - Test type (smoke tests = high priority)\n - User impact\n - Frequency (always fails vs flaky)\n - **Additional Context**:\n - Error messages or stack traces from JSON report\n - Related test files (if part of test suite)\n - Relevant knowledge from knowledge-base.md\n\n3. Track created issues:\n - Note the issue ID/number returned\n - Update issue tracker memory with new bugs\n - Prepare issue references for team communication\n\\`\\`\\`\n\n#### 6.3 Summary of Bug Reporting\n\nAfter issue tracker agent completes, create a summary:\n\\`\\`\\`markdown\n### Bug Reporting Summary\n- Total bugs found: [count of FAIL tests]\n- New bugs reported: [count of newly created issues]\n- Duplicate bugs found: [count of duplicates detected]\n- Issues not reported: [count of skipped/known issues]\n\n**New Bug Reports**:\n- [Issue ID]: [Bug title] (Test: TC-XXX, Priority: [priority])\n- [Issue ID]: [Bug title] (Test: TC-YYY, Priority: [priority])\n\n**Duplicate Bugs** (already tracked):\n- [Existing Issue ID]: [Bug title] (Matches test: TC-XXX)\n\n**Not Reported** (skipped or known):\n- TC-XXX: Skipped due to blocker failure\n- TC-YYY: Known issue documented in knowledge base\n\\`\\`\\`\n\n**Note**: The issue tracker agent handles all duplicate detection and system integration automatically. Simply provide the bug details and let it manage the rest.`\n },\n {\n role: 'team-communicator',\n contentBlock: `### Step 6: Team Communication\n\n{{INVOKE_TEAM_COMMUNICATOR}}\n\nNotify the product team about test execution:\n\n\\`\\`\\`\n1. Post test execution summary with key statistics\n2. Highlight critical failures that need immediate attention\n3. Share important learnings about product behavior\n4. Report any potential bugs discovered during testing\n5. Ask for clarification on unexpected behaviors\n6. Provide recommendations for areas needing investigation\n7. Use appropriate urgency level based on failure severity\n\\`\\`\\`\n\nThe team communication should include:\n- **Execution summary**: Overall pass/fail statistics and timing\n- **Critical issues**: High-priority failures that need immediate attention\n- **Key learnings**: Important discoveries about product behavior\n- **Potential bugs**: Issues that may require bug reports\n- **Clarifications needed**: Unexpected behaviors requiring team input\n- **Recommendations**: Suggested follow-up actions\n\n**Communication strategy based on results**:\n- **All tests passed**: Brief positive update, highlight learnings\n- **Minor failures**: Standard update with failure details and plans\n- **Critical failures**: Urgent notification with detailed analysis\n- **New discoveries**: Separate message highlighting interesting findings\n\n**Update team communicator memory**:\n- Record test execution communication\n- Track team response patterns to test results\n- Document any clarifications provided by the team\n- Note team priorities based on their responses`\n }\n ],\n requiredSubagents: ['test-runner', 'test-debugger-fixer']\n};\n","/**\n * Verify Changes - Unified Multi-Trigger Task\n * Single dynamic task that handles all trigger sources: manual, Slack, GitHub PR, CI/CD\n *\n * This task replaces verify-changes-manual and verify-changes-slack with intelligent\n * trigger detection and multi-channel output routing.\n */\n\nimport { TaskTemplate } from '../types';\nimport { TASK_SLUGS } from '../constants';\nimport { KNOWLEDGE_BASE_READ_INSTRUCTIONS, KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS } from '../templates/knowledge-base.js';\n\nexport const verifyChangesTask: TaskTemplate = {\n slug: TASK_SLUGS.VERIFY_CHANGES,\n name: 'Verify Changes',\n description: 'Unified verification command for all trigger sources with automated tests and manual checklists',\n\n frontmatter: {\n description: 'Verify code changes with automated tests and manual verification checklists',\n 'argument-hint': '[trigger-auto-detected]',\n },\n\n baseContent: `# Verify Changes - Unified Multi-Trigger Workflow\n\n## SECURITY NOTICE\n**CRITICAL**: Never read the \\`.env\\` file. It contains ONLY secrets (passwords, API keys).\n- **Read \\`.env.testdata\\`** for non-secret environment variables (TEST_BASE_URL, TEST_OWNER_EMAIL, etc.)\n- \\`.env.testdata\\` contains actual values for test data, URLs, and non-sensitive configuration\n- For secrets: Reference variable names only (TEST_OWNER_PASSWORD) - values are injected at runtime\n- The \\`.env\\` file access is blocked by settings.json\n\n## Overview\n\nThis task performs comprehensive change verification with:\n- **Automated testing**: Execute Playwright tests with automatic triage and fixing\n- **Manual verification checklists**: Generate role-specific checklists for non-automatable scenarios\n- **Multi-trigger support**: Works from manual CLI, Slack messages, GitHub PRs, and CI/CD\n- **Smart output routing**: Results formatted and delivered to the appropriate channel\n\n## Arguments\n\n**Input**: \\$ARGUMENTS\n\nThe input format determines the trigger source and context extraction strategy.\n\n${KNOWLEDGE_BASE_READ_INSTRUCTIONS}\n\n## Step 1: Detect Trigger Source\n\nAnalyze the input format to determine how this task was invoked:\n\n### 1.1 Identify Trigger Type\n\n**GitHub PR Webhook:**\n- Input contains \\`pull_request\\` object with structure:\n \\`\\`\\`json\n {\n \"pull_request\": {\n \"number\": 123,\n \"title\": \"...\",\n \"body\": \"...\",\n \"changed_files\": [...],\n \"base\": { \"ref\": \"main\" },\n \"head\": { \"ref\": \"feature-branch\" },\n \"user\": { \"login\": \"...\" }\n }\n }\n \\`\\`\\`\n→ **Trigger detected: GITHUB_PR**\n\n**Slack Event:**\n- Input contains \\`event\\` object with structure:\n \\`\\`\\`json\n {\n \"eventType\": \"com.slack.message\" or \"com.slack.app_mention\",\n \"event\": {\n \"type\": \"message\",\n \"channel\": \"C123456\",\n \"user\": \"U123456\",\n \"text\": \"message content\",\n \"ts\": \"1234567890.123456\",\n \"thread_ts\": \"...\" (optional)\n }\n }\n \\`\\`\\`\n→ **Trigger detected: SLACK_MESSAGE**\n\n**CI/CD Environment:**\n- Environment variables present:\n - \\`CI=true\\`\n - \\`GITHUB_REF\\` (e.g., \"refs/heads/feature-branch\")\n - \\`GITHUB_SHA\\` (commit hash)\n - \\`GITHUB_BASE_REF\\` (base branch)\n - \\`GITHUB_HEAD_REF\\` (head branch)\n- Git context available via bash commands\n→ **Trigger detected: CI_CD**\n\n**Manual Invocation:**\n- Input is natural language, URL, or issue identifier\n- Patterns: \"PR #123\", GitHub URL, \"PROJ-456\", feature description\n→ **Trigger detected: MANUAL**\n\n### 1.2 Store Trigger Context\n\nStore the detected trigger for use in Step 6 (output routing):\n- Set variable: \\`TRIGGER_SOURCE\\` = [GITHUB_PR | SLACK_MESSAGE | CI_CD | MANUAL]\n- This determines output formatting and delivery channel\n\n## Step 2: Extract Context Based on Trigger\n\nBased on the detected trigger source, extract relevant context:\n\n### 2.1 GitHub PR Trigger - Extract PR Details\n\nIf trigger is GITHUB_PR:\n- **PR number**: \\`pull_request.number\\`\n- **Title**: \\`pull_request.title\\`\n- **Description**: \\`pull_request.body\\`\n- **Changed files**: \\`pull_request.changed_files\\` (array of file paths)\n- **Author**: \\`pull_request.user.login\\`\n- **Base branch**: \\`pull_request.base.ref\\`\n- **Head branch**: \\`pull_request.head.ref\\`\n\nOptional: Fetch additional details via GitHub API if needed (PR comments, reviews)\n\n### 2.2 Slack Message Trigger - Parse Natural Language\n\nIf trigger is SLACK_MESSAGE:\n- **Message text**: \\`event.text\\`\n- **Channel**: \\`event.channel\\` (for posting results)\n- **User**: \\`event.user\\` (requester)\n- **Thread**: \\`event.thread_ts\\` or \\`event.ts\\` (for threading replies)\n\n**Extract references from text:**\n- PR numbers: \"#123\", \"PR 123\", \"pull request 123\"\n- Issue IDs: \"PROJ-456\", \"BUG-123\"\n- URLs: GitHub PR links, deployment URLs\n- Feature names: Quoted terms, capitalized phrases\n- Environments: \"staging\", \"production\", \"preview\"\n\n### 2.3 CI/CD Trigger - Read CI Environment\n\nIf trigger is CI_CD:\n- **CI platform**: Read \\`CI\\` env var\n- **Branch**: \\`GITHUB_REF\\` → extract branch name\n- **Commit**: \\`GITHUB_SHA\\`\n- **Base branch**: \\`GITHUB_BASE_REF\\` (for PRs)\n- **Changed files**: Run \\`git diff --name-only $BASE_SHA...$HEAD_SHA\\`\n\nIf in PR context, can also fetch PR number from CI env vars (e.g., \\`GITHUB_EVENT_PATH\\`)\n\n### 2.4 Manual Trigger - Parse User Input\n\nIf trigger is MANUAL:\n- **GitHub PR URL**: Parse to extract PR number, then fetch details via API\n - Pattern: \\`https://github.com/owner/repo/pull/123\\`\n - Extract: owner, repo, PR number\n - Fetch: PR details, diff, comments\n- **Issue identifier**: Extract issue ID\n - Patterns: \"PROJ-123\", \"#456\", \"BUG-789\"\n- **Feature description**: Use text as-is for verification context\n- **Deployment URL**: Extract for testing environment\n\n### 2.5 Unified Context Structure\n\nAfter extraction, create unified context structure:\n\\`\\`\\`\nCHANGE_CONTEXT = {\n trigger: [GITHUB_PR | SLACK_MESSAGE | CI_CD | MANUAL],\n title: \"...\",\n description: \"...\",\n changedFiles: [\"src/pages/Login.tsx\", ...],\n author: \"...\",\n environment: \"staging\" | \"production\" | URL,\n prNumber: 123 (if available),\n issueId: \"PROJ-456\" (if available),\n\n // For output routing:\n slackChannel: \"C123456\" (if Slack trigger),\n slackThread: \"1234567890.123456\" (if Slack trigger),\n githubRepo: \"owner/repo\" (if GitHub trigger)\n}\n\\`\\`\\`\n\n## Step 3: Determine Test Scope (Smart Selection)\n\n**IMPORTANT**: You do NOT have access to code files. Infer test scope from change **descriptions** only.\n\nBased on PR title, description, and commit messages, intelligently select which tests to run:\n\n### 3.1 Infer Test Scope from Change Descriptions\n\nAnalyze the change description to identify affected feature areas:\n\n**Example mappings from descriptions to test suites:**\n\n| Description Keywords | Inferred Test Scope | Example |\n|---------------------|-------------------|---------|\n| \"login\", \"authentication\", \"sign in/up\" | \\`tests/specs/auth/\\` | \"Fix login page validation\" → Auth tests |\n| \"checkout\", \"payment\", \"purchase\" | \\`tests/specs/checkout/\\` | \"Optimize checkout flow\" → Checkout tests |\n| \"cart\", \"shopping cart\", \"add to cart\" | \\`tests/specs/cart/\\` | \"Update cart calculations\" → Cart tests |\n| \"API\", \"endpoint\", \"backend\" | API test suites | \"Add new user API endpoint\" → User API tests |\n| \"profile\", \"account\", \"settings\" | \\`tests/specs/profile/\\` or \\`tests/specs/settings/\\` | \"Profile page redesign\" → Profile tests |\n\n**Inference strategy:**\n1. **Extract feature keywords** from PR title and description\n - PR title: \"feat(checkout): Add PayPal payment option\"\n - Keywords: [\"checkout\", \"payment\"]\n - Inferred scope: Checkout tests\n\n2. **Analyze commit messages** for conventional commit scopes\n - \\`feat(auth): Add password reset flow\\` → Auth tests\n - \\`fix(cart): Resolve quantity update bug\\` → Cart tests\n\n3. **Map keywords to test organization**\n - Reference: Tests are organized by feature under \\`tests/specs/\\` (see \\`.bugzy/runtime/testing-best-practices.md\\`)\n - Feature areas typically include: auth/, checkout/, cart/, profile/, api/, etc.\n\n4. **Identify test scope breadth from description tone**\n - \"Fix typo in button label\" → Narrow scope (smoke tests)\n - \"Refactor shared utility functions\" → Wide scope (full suite)\n - \"Update single component styling\" → Narrow scope (component tests)\n\n### 3.2 Fallback Strategies Based on Description Analysis\n\n**Description patterns that indicate full suite:**\n- \"Refactor shared/common utilities\" (wide impact)\n- \"Update dependencies\" or \"Upgrade framework\" (safety validation)\n- \"Merge main into feature\" or \"Sync with main\" (comprehensive validation)\n- \"Breaking changes\" or \"Major version update\" (thorough testing)\n- \"Database migration\" or \"Schema changes\" (data integrity)\n\n**Description patterns that indicate smoke tests only:**\n- \"Fix typo\" or \"Update copy/text\" (cosmetic change)\n- \"Update README\" or \"Documentation only\" (no functional change)\n- \"Fix formatting\" or \"Linting fixes\" (no logic change)\n\n**When description is vague or ambiguous:**\n- Examples: \"Updated several components\", \"Various bug fixes\", \"Improvements\"\n- **ACTION REQUIRED**: Use AskUserQuestion tool to clarify test scope\n- Provide options based on available test suites:\n \\`\\`\\`typescript\n AskUserQuestion({\n questions: [{\n question: \"The change description is broad. Which test suites should run?\",\n header: \"Test Scope\",\n multiSelect: true,\n options: [\n { label: \"Auth tests\", description: \"Login, signup, password reset\" },\n { label: \"Checkout tests\", description: \"Purchase flow, payment processing\" },\n { label: \"Full test suite\", description: \"Run all tests for comprehensive validation\" },\n { label: \"Smoke tests only\", description: \"Quick validation of critical paths\" }\n ]\n }]\n })\n \\`\\`\\`\n\n**If specific test scope requested:**\n- User can override with: \"only smoke tests\", \"full suite\", specific test suite names\n- Honor user's explicit scope over smart selection\n\n### 3.3 Test Selection Summary\n\nGenerate summary of test selection based on description analysis:\n\\`\\`\\`markdown\n### Test Scope Determined\n- **Change description**: [PR title or summary]\n- **Identified keywords**: [list extracted keywords: \"auth\", \"checkout\", etc.]\n- **Affected test suites**: [list inferred test suite paths or names]\n- **Scope reasoning**: [explain why this scope was selected]\n- **Execution strategy**: [smart selection | full suite | smoke tests | user-specified]\n\\`\\`\\`\n\n**Example summary:**\n\\`\\`\\`markdown\n### Test Scope Determined\n- **Change description**: \"feat(checkout): Add PayPal payment option\"\n- **Identified keywords**: checkout, payment, PayPal\n- **Affected test suites**: tests/specs/checkout/payment.spec.ts, tests/specs/checkout/purchase-flow.spec.ts\n- **Scope reasoning**: Change affects checkout payment processing; running all checkout tests to validate payment integration\n- **Execution strategy**: Smart selection (checkout suite)\n\\`\\`\\`\n\n## Step 4: Run Verification Workflow\n\nExecute comprehensive verification combining automated tests and manual checklists:\n\n### 4A: Automated Testing (Integrated from /run-tests)\n\nExecute automated Playwright tests with full triage and fixing:\n\n#### 4A.1 Execute Tests\n\nRun the selected tests via Playwright:\n\\`\\`\\`bash\nnpx playwright test [scope] --reporter=json --output=test-results/\n\\`\\`\\`\n\nWait for execution to complete. Capture JSON report from \\`test-results/.last-run.json\\`.\n\n#### 4A.2 Parse Test Results\n\nRead and analyze the JSON report:\n- Extract: Total, passed, failed, skipped counts\n- For each failed test: file path, test name, error message, stack trace, trace file\n- Calculate: Pass rate, total duration\n\n#### 4A.3 Triage Failures (Classification)\n\n#### Automatic Test Issue Fixing\n\nFor each test classified as **[TEST ISSUE]**, use the test-debugger-fixer agent to automatically fix the test:\n\n\\`\\`\\`\n{{INVOKE_TEST_DEBUGGER_FIXER}}\n\nFor each failed test classified as a test issue (not a product bug), provide:\n- Test file path: [from JSON report]\n- Test name/title: [from JSON report]\n- Error message: [from JSON report]\n- Stack trace: [from JSON report]\n- Trace file path: [if available]\n\nThe agent will:\n1. Read the failing test file\n2. Analyze the failure details\n3. Open browser via Playwright MCP to debug if needed\n4. Identify the root cause (brittle selector, missing wait, race condition, etc.)\n5. Apply appropriate fix to the test code\n6. Rerun the test to verify the fix\n7. Repeat up to 3 times if needed\n8. Report success or escalate as likely product bug\n\nAfter test-debugger-fixer completes:\n- If fix succeeded: Mark test as fixed, add to \"Tests Fixed\" list\n- If still failing after 3 attempts: Reclassify as potential product bug\n\\`\\`\\`\n\n**Track Fixed Tests:**\n- Maintain list of tests fixed automatically\n- Include fix description (e.g., \"Updated selector from CSS to role-based\")\n- Note verification status (test now passes)\n- Reference .bugzy/runtime/testing-best-practices.md for best practices\n\nFor each failed test, classify as:\n- **[PRODUCT BUG]**: Correct test code, but application behaves incorrectly\n- **[TEST ISSUE]**: Test code needs fixing (selector, timing, assertion)\n\nClassification guidelines:\n- Product Bug: Expected behavior not met, functional issue\n- Test Issue: Selector not found, timeout, race condition, brittle locator\n\n#### 4A.4 Fix Test Issues Automatically\n\nFor tests classified as [TEST ISSUE]:\n- {{INVOKE_TEST_DEBUGGER_FIXER}} to analyze and fix\n- Agent debugs with browser if needed\n- Applies fix (selector update, wait condition, assertion correction)\n- Reruns test to verify fix (10x for flaky tests)\n- Max 3 fix attempts, then reclassify as product bug\n\nTrack fixed tests with:\n- Test file path\n- Fix description\n- Verification status (now passes)\n\n#### 4A.5 Log Product Bugs\n\n{{ISSUE_TRACKER_INSTRUCTIONS}}\n\nFor tests classified as [PRODUCT BUG]:\n- {{INVOKE_ISSUE_TRACKER}} to create bug reports\n- Agent checks for duplicates automatically\n- Creates detailed report with:\n - Title, description, reproduction steps\n - Test reference, error details, stack trace\n - Screenshots, traces, environment details\n - Severity based on test type and impact\n- Returns issue ID for tracking\n\n### 4B: Manual Verification Checklist (NEW)\n\nGenerate human-readable checklist for non-automatable scenarios:\n\n#### Generate Manual Verification Checklist\n\nAnalyze the code changes and generate a manual verification checklist for scenarios that cannot be automated.\n\n#### Analyze Change Context\n\nReview the provided context to understand what changed:\n- Read PR title, description, and commit messages\n- Identify change types from descriptions: visual, UX, forms, mobile, accessibility, edge cases\n- Understand the scope and impact of changes from the change descriptions\n\n#### Identify Non-Automatable Scenarios\n\nBased on the change analysis, identify scenarios that require human verification:\n\n**1. Visual Design Changes** (CSS, styling, design files, graphics)\n- Color schemes, gradients, shadows\n- Typography, font sizes, line heights\n- Spacing, margins, padding, alignment\n- Visual consistency across components\n- Brand guideline compliance\n→ Add **Design Validation** checklist items\n\n**2. UX Interaction Changes** (animations, transitions, gestures, micro-interactions)\n- Animation smoothness (60fps expectation)\n- Transition timing and easing\n- Interaction responsiveness and feel\n- Loading states and skeleton screens\n- Hover effects, focus states\n→ Add **UX Feel** checklist items\n\n**3. Form and Input Changes** (new form fields, input validation, user input)\n- Screen reader compatibility\n- Keyboard navigation (Tab order, Enter to submit)\n- Error message clarity and placement\n- Color contrast (WCAG 2.1 AA: 4.5:1 ratio for text)\n- Focus indicators visibility\n→ Add **Accessibility** checklist items\n\n**4. Mobile and Responsive Changes** (media queries, touch interactions, viewport)\n- Touch target sizes (≥44px iOS, ≥48dp Android)\n- Responsive layout breakpoints\n- Mobile keyboard behavior (doesn't obscure inputs)\n- Swipe gestures and touch interactions\n- Pinch-to-zoom functionality\n→ Add **Mobile Experience** checklist items\n\n**5. Low ROI or Rare Scenarios** (edge cases, one-time migrations, rare user paths)\n- Scenarios used by < 1% of users\n- Complex multi-system integrations\n- One-time data migrations\n- Leap year, DST, timezone edge cases\n→ Add **Exploratory Testing** notes\n\n**6. Cross-Browser Visual Consistency** (layout rendering differences)\n- Layout consistency across Chrome, Firefox, Safari\n- CSS feature support differences\n- Font rendering variations\n→ Add **Cross-Browser** checklist items (if significant visual changes)\n\n#### Generate Role-Specific Checklist Items\n\nFor each identified scenario, create clear, actionable checklist items:\n\n**Format for each item:**\n- Clear, specific task description\n- Assigned role (@design-team, @qa-team, @a11y-team, @mobile-team)\n- Acceptance criteria (what constitutes pass/fail)\n- Reference to standards when applicable (WCAG, iOS HIG, Material Design)\n- Priority indicator (🔴 critical, 🟡 important, 🟢 nice-to-have)\n\n**Example checklist items:**\n\n**Design Validation (@design-team)**\n- [ ] 🔴 Login button color matches brand guidelines (#FF6B35)\n- [ ] 🟡 Loading spinner animation smooth (60fps, no jank)\n- [ ] 🟡 Card shadows match design system (elevation-2: 0 2px 4px rgba(0,0,0,0.1))\n- [ ] 🟢 Hover states provide appropriate visual feedback\n\n**Accessibility (@a11y-team)**\n- [ ] 🔴 Screen reader announces form errors clearly (tested with VoiceOver/NVDA)\n- [ ] 🔴 Keyboard navigation: Tab through all interactive elements in logical order\n- [ ] 🔴 Color contrast meets WCAG 2.1 AA (4.5:1 for body text, 3:1 for large text)\n- [ ] 🟡 Focus indicators visible on all interactive elements\n\n**Mobile Experience (@qa-team, @mobile-team)**\n- [ ] 🔴 Touch targets ≥44px (iOS Human Interface Guidelines)\n- [ ] 🔴 Mobile keyboard doesn't obscure input fields on iOS/Android\n- [ ] 🟡 Swipe gestures work naturally without conflicts\n- [ ] 🟡 Responsive layout adapts properly on iPhone SE (smallest screen)\n\n**UX Feel (@design-team, @qa-team)**\n- [ ] 🟡 Page transitions smooth and not jarring\n- [ ] 🟡 Button click feedback immediate (< 100ms perceived response)\n- [ ] 🟢 Loading states prevent confusion during data fetch\n\n**Exploratory Testing (@qa-team)**\n- [ ] 🟢 Test edge case: User submits form during network timeout\n- [ ] 🟢 Test edge case: User navigates back during submission\n\n#### Format for Output Channel\n\nAdapt the checklist format based on the output channel (determined by trigger source):\n\n**Terminal (Manual Trigger):**\n\\`\\`\\`markdown\nMANUAL VERIFICATION CHECKLIST:\nPlease verify the following before merging:\n\nDesign Validation (@design-team):\n [ ] 🔴 Checkout button colors match brand guidelines (#FF6B35)\n [ ] 🟡 Loading spinner animation smooth (60fps)\n\nAccessibility (@a11y-team):\n [ ] 🔴 Screen reader announces error messages\n [ ] 🔴 Keyboard navigation works (Tab order logical)\n [ ] 🔴 Color contrast meets WCAG 2.1 AA (4.5:1 ratio)\n\nMobile Experience (@qa-team):\n [ ] 🔴 Touch targets ≥44px (iOS HIG)\n [ ] 🟡 Responsive layout works on iPhone SE\n\\`\\`\\`\n\n**Slack (Slack Trigger):**\n\\`\\`\\`markdown\n*Manual Verification Needed:*\n□ Visual: Button colors, animations (60fps)\n□ Mobile: Touch targets ≥44px\n□ A11y: Screen reader, keyboard nav, contrast\n\ncc @design-team @qa-team @a11y-team\n\\`\\`\\`\n\n**GitHub PR Comment (GitHub Trigger):**\n\\`\\`\\`markdown\n### Manual Verification Required\n\nThe following scenarios require human verification before release:\n\n#### Design Validation (@design-team)\n- [ ] 🔴 Checkout button colors match brand guidelines (#FF6B35)\n- [ ] 🟡 Loading spinner animation smooth (60fps)\n- [ ] 🟡 Card shadows match design system\n\n#### Accessibility (@a11y-team)\n- [ ] 🔴 Screen reader announces error messages (VoiceOver/NVDA)\n- [ ] 🔴 Keyboard navigation through all form fields (Tab order)\n- [ ] 🔴 Color contrast meets WCAG 2.1 AA (4.5:1 for body text)\n\n#### Mobile Experience (@qa-team)\n- [ ] 🔴 Touch targets ≥44px (iOS Human Interface Guidelines)\n- [ ] 🔴 Mobile keyboard doesn't obscure input fields\n- [ ] 🟡 Responsive layout works on iPhone SE (375x667)\n\n---\n*Legend: 🔴 Critical • 🟡 Important • 🟢 Nice-to-have*\n\\`\\`\\`\n\n#### Guidelines for Quality Checklists\n\n**DO:**\n- Make each item verifiable (clear pass/fail criteria)\n- Include context (why this needs manual verification)\n- Reference standards (WCAG, iOS HIG, Material Design)\n- Assign to specific roles\n- Prioritize items (critical, important, nice-to-have)\n- Be specific (not \"check colors\" but \"Login button color matches #FF6B35\")\n\n**DON'T:**\n- Create vague items (\"test thoroughly\")\n- List items that can be automated\n- Skip role assignments\n- Forget acceptance criteria\n- Omit priority indicators\n\n#### When NO Manual Verification Needed\n\nIf the changes are purely:\n- Backend logic (no UI changes)\n- Code refactoring (no behavior changes)\n- Configuration changes (no user-facing impact)\n- Fully covered by automated tests\n\nOutput:\n\\`\\`\\`markdown\n**Manual Verification:** Not required for this change.\nAll user-facing changes are fully covered by automated tests.\n\\`\\`\\`\n\n#### Summary\n\nAfter generating the checklist:\n- Count total items by priority (🔴 critical, 🟡 important, 🟢 nice-to-have)\n- Estimate time needed (e.g., \"~30 minutes for design QA, ~45 minutes for accessibility testing\")\n- Suggest who should perform each category of checks\n\n### 4C: Aggregate Results\n\nCombine automated and manual verification results:\n\n\\`\\`\\`markdown\n## Verification Results Summary\n\n### Automated Tests\n- Total tests: [count]\n- Passed: [count] ([percentage]%)\n- Failed: [count] ([percentage]%)\n- Test issues fixed: [count]\n- Product bugs logged: [count]\n- Duration: [time]\n\n### Manual Verification Required\n[Checklist generated in 4B, or \"Not required\"]\n\n### Overall Recommendation\n[✅ Safe to merge | ⚠️ Review bugs before merging | ❌ Do not merge]\n\\`\\`\\`\n\n## Step 5: Understanding the Change (Documentation Research)\n\n{{DOCUMENTATION_RESEARCHER_INSTRUCTIONS}}\n\nBefore proceeding with test creation or execution, ensure requirements are clear through ambiguity detection and adaptive exploration.\n\n**Note**: For detailed exploration and clarification protocols, refer to the complete instructions below. Adapt the depth of exploration based on requirement clarity and use the clarification protocol to detect ambiguity, assess severity, and seek clarification when needed.\n\nAfter clarification and exploration, analyze the change to determine the verification approach:\n\n### 5.1 Identify Test Scope\nBased on the change description, exploration findings, and clarified requirements:\n- **Direct impact**: Which features/functionality are directly modified\n- **Indirect impact**: What else might be affected (dependencies, integrations)\n- **Regression risk**: Existing functionality that should be retested\n- **New functionality**: Features that need new test coverage\n\n### 5.2 Determine Verification Strategy\nPlan your testing approach based on validated requirements:\n- **Priority areas**: Critical paths that must work\n- **Test types needed**: Functional, regression, integration, UI/UX\n- **Test data requirements**: What test accounts, data, or scenarios needed\n- **Success criteria**: What determines the change is working correctly (now clearly defined)\n\n## Step 6: Report Results (Multi-Channel Output)\n\nRoute output based on trigger source (from Step 1):\n\n### 6.1 MANUAL Trigger → Terminal Output\n\nFormat as comprehensive markdown report for terminal display:\n\n\\`\\`\\`markdown\n# Test Verification Report\n\n## Change Summary\n- **What Changed**: [Brief description]\n- **Scope**: [Affected features/areas]\n- **Changed Files**: [count] files\n\n## Automated Test Results\n### Statistics\n- Total Tests: [count]\n- Passed: [count] ([percentage]%)\n- Failed: [count]\n- Test Issues Fixed: [count]\n- Product Bugs Logged: [count]\n- Duration: [time]\n\n### Tests Fixed Automatically\n[For each fixed test:\n- **Test**: [file path] › [test name]\n- **Issue**: [problem found]\n- **Fix**: [what was changed]\n- **Status**: ✅ Now passing\n]\n\n### Product Bugs Logged\n[For each bug:\n- **Issue**: [ISSUE-123] [Bug title]\n- **Test**: [test file] › [test name]\n- **Severity**: [priority]\n- **Link**: [issue tracker URL]\n]\n\n## Manual Verification Checklist\n\n[Insert checklist from Step 4B]\n\n## Recommendation\n[✅ Safe to merge - all automated tests pass, complete manual checks before release]\n[⚠️ Review bugs before merging - [X] bugs need attention]\n[❌ Do not merge - critical failures]\n\n## Test Artifacts\n- JSON Report: test-results/.last-run.json\n- HTML Report: playwright-report/index.html\n- Traces: test-results/[test-id]/trace.zip\n- Screenshots: test-results/[test-id]/screenshots/\n\\`\\`\\`\n\n### 6.2 SLACK_MESSAGE Trigger → Thread Reply\n\n{{TEAM_COMMUNICATOR_INSTRUCTIONS}}\n\n{{INVOKE_TEAM_COMMUNICATOR}} to post concise results to Slack thread:\n\n\\`\\`\\`\nPost verification results.\n\n**Channel**: [from CHANGE_CONTEXT.slackChannel]\n**Thread**: [from CHANGE_CONTEXT.slackThread]\n\n**Message**:\n🧪 *Verification Results for [change title]*\n\n*Automated:* ✅ [passed]/[total] tests passed ([duration])\n[If test issues fixed:] 🔧 [count] test issues auto-fixed\n[If bugs logged:] 🐛 [count] bugs logged ([list issue IDs])\n\n*Manual Verification Needed:*\n[Concise checklist summary - collapsed/expandable]\n□ Visual: [key items]\n□ Mobile: [key items]\n□ A11y: [key items]\n\n*Recommendation:* [✅ Safe to merge | ⚠️ Review bugs | ❌ Blocked]\n\n[If bugs logged:] cc @[relevant-team-members]\n[Link to full test report if available]\n\\`\\`\\`\n\n### 6.3 GITHUB_PR Trigger → PR Comment\n\nUse GitHub API to post comprehensive comment on PR:\n\n**Format as GitHub-flavored markdown:**\n\\`\\`\\`markdown\n## 🧪 Test Verification Results\n\n**Status:** [✅ All tests passed | ⚠️ Issues found | ❌ Critical failures]\n\n### Automated Tests\n| Metric | Value |\n|--------|-------|\n| Total Tests | [count] |\n| Passed | ✅ [count] ([percentage]%) |\n| Failed | ❌ [count] |\n| Test Issues Fixed | 🔧 [count] |\n| Product Bugs Logged | 🐛 [count] |\n| Duration | ⏱️ [time] |\n\n### Failed Tests (Triaged)\n\n[For each failure:]\n\n#### ❌ **[Test Name]**\n- **File:** \\`[test-file-path]\\`\n- **Cause:** [Product bug | Test issue]\n- **Action:** [Bug logged: [ISSUE-123](url) | Fixed: [commit-hash](url)]\n- **Details:**\n \\`\\`\\`\n [Error message]\n \\`\\`\\`\n\n### Tests Fixed Automatically\n\n[For each fixed test:]\n- ✅ **[Test Name]** (\\`[file-path]\\`)\n - **Issue:** [brittle selector | missing wait | race condition]\n - **Fix:** [description of fix applied]\n - **Verified:** Passes 10/10 runs\n\n### Product Bugs Logged\n\n[For each bug:]\n- 🐛 **[[ISSUE-123](url)]** [Bug title]\n - **Test:** \\`[test-file]\\` › [test name]\n - **Severity:** [🔴 Critical | 🟡 Important | 🟢 Minor]\n - **Assignee:** @[backend-team | frontend-team]\n\n### Manual Verification Required\n\nThe following scenarios require human verification before release:\n\n#### Design Validation (@design-team)\n- [ ] 🔴 [Critical design check]\n- [ ] 🟡 [Important design check]\n\n#### Accessibility (@a11y-team)\n- [ ] 🔴 [Critical a11y check]\n- [ ] 🟡 [Important a11y check]\n\n#### Mobile Experience (@qa-team)\n- [ ] 🔴 [Critical mobile check]\n- [ ] 🟡 [Important mobile check]\n\n---\n*Legend: 🔴 Critical • 🟡 Important • 🟢 Nice-to-have*\n\n### Test Artifacts\n- [Full HTML Report](playwright-report/index.html)\n- [Test Traces](test-results/)\n\n### Recommendation\n[✅ **Safe to merge** - All automated tests pass, complete manual checks before release]\n[⚠️ **Review required** - [X] bugs need attention, complete manual checks]\n[❌ **Do not merge** - Critical failures must be resolved first]\n\n---\n*🤖 Automated by Bugzy • [View Test Code](tests/specs/) • [Manual Test Cases](test-cases/)*\n\\`\\`\\`\n\n**Post comment via GitHub API:**\n- Endpoint: \\`POST /repos/{owner}/{repo}/issues/{pr_number}/comments\\`\n- Use GitHub MCP or bash with \\`gh\\` CLI\n- Requires GITHUB_TOKEN from environment\n\n### 6.4 CI_CD Trigger → Build Log + PR Comment\n\n**Output to CI build log:**\n- Print detailed results to stdout (captured by CI)\n- Use ANSI colors if supported by CI platform\n- Same format as MANUAL terminal output\n\n**Exit with appropriate code:**\n- Exit 0: All tests passed (safe to merge)\n- Exit 1: Tests failed or critical bugs found (block merge)\n\n**Post PR comment if GitHub context available:**\n- Check for PR number in CI environment\n- If available: Post comment using 6.3 format\n- Also notify team via Slack if critical failures\n\n## Additional Steps\n\n### Handle Special Cases\n\n**If no tests found for changed files:**\n- Inform user: \"No automated tests found for changed files\"\n- Recommend: \"Run smoke test suite for basic validation\"\n- Still generate manual verification checklist\n\n**If all tests skipped:**\n- Explain why (dependencies, environment issues)\n- Recommend: Check test configuration and prerequisites\n\n**If test execution fails:**\n- Report specific error (Playwright not installed, env vars missing)\n- Suggest troubleshooting steps\n- Don't proceed with triage if tests didn't run\n\n${KNOWLEDGE_BASE_UPDATE_INSTRUCTIONS}\n\n## Important Notes\n\n- This task handles **all trigger sources** with a single unified workflow\n- Trigger detection is automatic based on input format\n- Output is automatically routed to the appropriate channel\n- Automated tests are executed with **full triage and automatic fixing**\n- Manual verification checklists are generated for **non-automatable scenarios**\n- Product bugs are logged with **automatic duplicate detection**\n- Test issues are fixed automatically with **verification**\n- Results include both automated and manual verification items\n- For best results, ensure:\n - Playwright is installed (\\`npx playwright install\\`)\n - Environment variables configured (copy \\`.env.testdata\\` to \\`.env\\`)\n - GitHub token available for PR comments (if GitHub trigger)\n - Slack integration configured (if Slack trigger)\n - Issue tracker configured (Linear, Jira, etc.)\n\n## Success Criteria\n\nA successful verification includes:\n1. ✅ Trigger source correctly detected\n2. ✅ Context extracted completely\n3. ✅ Tests executed (or skipped with explanation)\n4. ✅ All failures triaged (product bug vs test issue)\n5. ✅ Test issues fixed automatically (when possible)\n6. ✅ Product bugs logged to issue tracker\n7. ✅ Manual verification checklist generated\n8. ✅ Results formatted for output channel\n9. ✅ Results delivered to appropriate destination\n10. ✅ Clear recommendation provided (merge / review / block)`,\n\n optionalSubagents: [\n {\n role: 'documentation-researcher',\n contentBlock: `#### Research Project Documentation\n\n{{INVOKE_DOCUMENTATION_RESEARCHER}} to gather comprehensive context about the changed features:\n\n\\`\\`\\`\nExplore project documentation related to the changes.\n\nSpecifically gather:\n- Product specifications for affected features\n- User stories and acceptance criteria\n- Technical architecture documentation\n- API endpoints and contracts\n- User roles and permissions relevant to the change\n- Business rules and validations\n- UI/UX specifications\n- Known limitations or constraints\n- Related bug reports or known issues\n- Existing test documentation for this area\n\\`\\`\\`\n\nThe agent will:\n1. Check its memory for previously discovered documentation\n2. Explore workspace for relevant pages and databases\n3. Build comprehensive understanding of the affected features\n4. Return synthesized information to inform testing strategy\n\nUse this information to:\n- Better understand the change context\n- Identify comprehensive test scenarios\n- Recognize integration points and dependencies\n- Spot potential edge cases or risk areas\n- Enhance manual verification checklist generation`\n },\n {\n role: 'issue-tracker',\n contentBlock: `#### Log Product Bugs\n\nFor tests classified as **[PRODUCT BUG]**, {{INVOKE_ISSUE_TRACKER}} to log bugs:\n\n\\`\\`\\`\n1. Check for duplicate bugs in the tracking system\n - The agent will automatically search for similar existing issues\n - It maintains memory of recently reported issues\n - Duplicate detection happens automatically - don't create manual checks\n\n2. For each new bug (non-duplicate):\n Create detailed bug report with:\n - **Title**: Clear, descriptive summary (e.g., \"Login button fails with timeout on checkout page\")\n - **Description**:\n - What happened vs. what was expected\n - Impact on users\n - Test reference: [file path] › [test title]\n - **Reproduction Steps**:\n - List steps from the failing test\n - Include specific test data used\n - Note any setup requirements from test file\n - **Test Execution Details**:\n - Test file: [file path from JSON report]\n - Test name: [test title from JSON report]\n - Error message: [from JSON report]\n - Stack trace: [from JSON report]\n - Trace file: [path if available]\n - Screenshots: [paths if available]\n - **Environment Details**:\n - Browser and version (from Playwright config)\n - Test environment URL (from .env.testdata BASE_URL)\n - Timestamp of failure\n - **Severity/Priority**: Based on:\n - Test type (smoke tests = high priority)\n - User impact\n - Frequency (always fails vs flaky)\n - **Additional Context**:\n - Error messages or stack traces from JSON report\n - Related test files (if part of test suite)\n - Relevant knowledge from knowledge-base.md\n\n3. Track created issues:\n - Note the issue ID/number returned\n - Update issue tracker memory with new bugs\n - Prepare issue references for team communication\n\\`\\`\\`\n\n**Note**: The issue tracker agent handles all duplicate detection and system integration automatically. Simply provide the bug details and let it manage the rest.`\n },\n {\n role: 'team-communicator',\n contentBlock: `#### Team Communication\n\n{{INVOKE_TEAM_COMMUNICATOR}} to share verification results (primarily for Slack trigger, but can be used for other triggers):\n\n\\`\\`\\`\n1. Post verification results summary\n2. Highlight critical failures that need immediate attention\n3. Share bugs logged with issue tracker links\n4. Provide manual verification checklist summary\n5. Recommend next steps based on results\n6. Tag relevant team members for critical issues\n7. Use appropriate urgency level based on failure severity\n\\`\\`\\`\n\nThe team communication should include:\n- **Execution summary**: Overall pass/fail statistics and timing\n- **Tests fixed**: Count of test issues fixed automatically\n- **Bugs logged**: Product bugs reported to issue tracker\n- **Manual checklist**: Summary of manual verification items\n- **Recommendation**: Safe to merge / Review required / Do not merge\n- **Test artifacts**: Links to reports, traces, screenshots\n\n**Communication strategy based on trigger**:\n- **Slack**: Post concise message with expandable details in thread\n- **Manual**: Full detailed report in terminal\n- **GitHub PR**: Comprehensive PR comment with tables and checklists\n- **CI/CD**: Build log output + optional Slack notification for critical failures\n\n**Update team communicator memory**:\n- Record verification communication\n- Track response patterns by trigger type\n- Document team preferences for detail level\n- Note which team members respond to which types of issues`\n }\n ],\n requiredSubagents: ['test-runner', 'test-debugger-fixer']\n};\n","/**\n * Tasks Module\n * Central registry and utilities for all task templates\n */\n\n// Export types and constants\nexport * from './types';\nexport * from './constants';\n\n// Import all task templates\nimport { exploreApplicationTask } from './library/explore-application';\nimport { generateTestCasesTask } from './library/generate-test-cases';\nimport { generateTestPlanTask } from './library/generate-test-plan';\nimport { handleMessageTask } from './library/handle-message';\nimport { processEventTask } from './library/process-event';\nimport { runTestsTask } from './library/run-tests';\nimport { verifyChangesTask } from './library/verify-changes';\n\nimport type { TaskTemplate } from './types';\nimport { TASK_SLUGS } from './constants';\n\n/**\n * Task Templates Registry\n * Single source of truth for all available tasks\n */\nexport const TASK_TEMPLATES: Record<string, TaskTemplate> = {\n [TASK_SLUGS.EXPLORE_APPLICATION]: exploreApplicationTask,\n [TASK_SLUGS.GENERATE_TEST_CASES]: generateTestCasesTask,\n [TASK_SLUGS.GENERATE_TEST_PLAN]: generateTestPlanTask,\n [TASK_SLUGS.HANDLE_MESSAGE]: handleMessageTask,\n [TASK_SLUGS.PROCESS_EVENT]: processEventTask,\n [TASK_SLUGS.RUN_TESTS]: runTestsTask,\n [TASK_SLUGS.VERIFY_CHANGES]: verifyChangesTask,\n};\n\n/**\n * Get task template by slug\n */\nexport function getTaskTemplate(slug: string): TaskTemplate | undefined {\n return TASK_TEMPLATES[slug];\n}\n\n/**\n * Get all registered task slugs\n */\nexport function getAllTaskSlugs(): string[] {\n return Object.keys(TASK_TEMPLATES);\n}\n\n/**\n * Check if a task slug is registered\n */\nexport function isTaskRegistered(slug: string): boolean {\n return TASK_TEMPLATES[slug] !== undefined;\n}\n\n/**\n * Slash Command Configuration for Cloud Run\n * Format expected by cloudrun-claude-code API\n */\nexport interface SlashCommandConfig {\n frontmatter: Record<string, any>;\n content: string;\n}\n\n/**\n * Build slash commands configuration for Cloud Run\n * Converts task templates to the format expected by cloudrun-claude-code API\n *\n * @param slugs - Array of task slugs to include\n * @returns Record of slash command configurations\n */\nexport function buildSlashCommandsConfig(slugs: string[]): Record<string, SlashCommandConfig> {\n const configs: Record<string, SlashCommandConfig> = {};\n\n for (const slug of slugs) {\n const task = TASK_TEMPLATES[slug];\n if (!task) {\n console.warn(`Unknown task slug: ${slug}, skipping`);\n continue;\n }\n\n configs[slug] = {\n frontmatter: task.frontmatter,\n content: task.baseContent,\n };\n\n console.log(`✓ Added slash command: /${slug}`);\n }\n\n return configs;\n}\n\n/**\n * Get required MCP servers from task templates\n * Extracts MCP requirements from task slugs\n *\n * @param slugs - Array of task slugs\n * @returns Array of required MCP server names\n */\nexport function getRequiredMCPsFromTasks(slugs: string[]): string[] {\n const mcps = new Set<string>();\n\n for (const slug of slugs) {\n const task = TASK_TEMPLATES[slug];\n if (!task) continue;\n\n // Extract MCPs from required subagents\n for (const subagent of task.requiredSubagents) {\n // Map subagent roles to MCPs\n const mcpMap: Record<string, string> = {\n 'test-runner': 'playwright',\n 'team-communicator': 'slack',\n 'documentation-researcher': 'notion',\n 'issue-tracker': 'linear',\n };\n\n const mcp = mcpMap[subagent];\n if (mcp) {\n mcps.add(mcp);\n }\n }\n }\n\n return Array.from(mcps);\n}\n"],"mappings":";AAOO,IAAM,aAAa;AAAA,EACxB,qBAAqB;AAAA,EACrB,qBAAqB;AAAA,EACrB,oBAAoB;AAAA,EACpB,gBAAgB;AAAA,EAChB,eAAe;AAAA,EACf,WAAW;AAAA,EACX,gBAAgB;AAClB;;;ACTO,IAAM,2BAA2B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACAjC,IAAM,mCAAmC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAyBzC,IAAM,qCAAqC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACrB3C,IAAM,yBAAuC;AAAA,EACjD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACV,aAAa;AAAA,IACb,iBAAiB;AAAA,EACpB;AAAA,EAEA,aAAa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAoBd,gCAAgC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAkBhC,wBAAwB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4QxB,kCAAkC;AAAA,EAEjC,mBAAmB,CAAC;AAAA,EACpB,mBAAmB,CAAC,aAAa;AACpC;;;ACpUO,IAAM,6BAA6B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACKnC,IAAM,wBAAsC;AAAA,EAChD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACV,aAAa;AAAA,IACb,iBAAiB;AAAA,EACpB;AAAA,EAEA,aAAa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA2Bd,gCAAgC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4BhC,yBAAyB,QAAQ,oBAAoB,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM3D,2BAA2B,QAAQ,oBAAoB,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0L7D,kCAAkC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0CjC,mBAAmB;AAAA,IAChB;AAAA,MACG,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAcjB;AAAA,IACA;AAAA,MACG,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAgCjB;AAAA,EACH;AAAA,EACA,mBAAmB,CAAC,eAAe,qBAAqB;AAC3D;;;AClWO,IAAM,uBAAqC;AAAA,EAChD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,aAAa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcb,gCAAgC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwBhC,yBAAyB,QAAQ,oBAAoB,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM3D,2BAA2B,QAAQ,oBAAoB,KAAK,CAAC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA4J7D,kCAAkC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYlC,mBAAmB;AAAA,IACjB;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAyBhB;AAAA,IACA;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAwBhB;AAAA,EACF;AAAA,EACA,mBAAmB,CAAC,aAAa;AACnC;;;ACtRO,IAAM,oBAAkC;AAAA,EAC5C,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACV,aAAa;AAAA,IACb,iBAAiB;AAAA,EACpB;AAAA,EAEA,aAAa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcd,gCAAgC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAmDhC,kCAAkC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA8CjC,mBAAmB,CAAC;AAAA,EACpB,mBAAmB,CAAC,mBAAmB;AAC1C;;;AC/HO,IAAM,mBAAiC;AAAA,EAC5C,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,aAAa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcb,gCAAgC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EA0UhC,kCAAkC;AAAA,EAElC,mBAAmB;AAAA,IACjB;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA,IAKhB;AAAA,IACA;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAmBhB;AAAA,EACF;AAAA,EACA,mBAAmB,CAAC;AAAA,EACpB,gBAAgB,CAAC,gBAAgB;AACnC;;;ACtYO,IAAM,eAA6B;AAAA,EACxC,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,aAAa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAsBb,gCAAgC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAwPhC,kCAAkC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqDlC,mBAAmB;AAAA,IACjB;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IA2EhB;AAAA,IACA;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAmChB;AAAA,EACF;AAAA,EACA,mBAAmB,CAAC,eAAe,qBAAqB;AAC1D;;;ACjcO,IAAM,oBAAkC;AAAA,EAC7C,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,aAAa;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAuBb,gCAAgC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAqxBhC,kCAAkC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAiClC,mBAAmB;AAAA,IACjB;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAgChB;AAAA,IACA;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAgDhB;AAAA,IACA;AAAA,MACE,MAAM;AAAA,MACN,cAAc;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAiChB;AAAA,EACF;AAAA,EACA,mBAAmB,CAAC,eAAe,qBAAqB;AAC1D;;;ACv8BO,IAAM,iBAA+C;AAAA,EAC1D,CAAC,WAAW,mBAAmB,GAAG;AAAA,EAClC,CAAC,WAAW,mBAAmB,GAAG;AAAA,EAClC,CAAC,WAAW,kBAAkB,GAAG;AAAA,EACjC,CAAC,WAAW,cAAc,GAAG;AAAA,EAC7B,CAAC,WAAW,aAAa,GAAG;AAAA,EAC5B,CAAC,WAAW,SAAS,GAAG;AAAA,EACxB,CAAC,WAAW,cAAc,GAAG;AAC/B;AAKO,SAAS,gBAAgB,MAAwC;AACtE,SAAO,eAAe,IAAI;AAC5B;AAKO,SAAS,kBAA4B;AAC1C,SAAO,OAAO,KAAK,cAAc;AACnC;AAKO,SAAS,iBAAiB,MAAuB;AACtD,SAAO,eAAe,IAAI,MAAM;AAClC;AAkBO,SAAS,yBAAyB,OAAqD;AAC5F,QAAM,UAA8C,CAAC;AAErD,aAAW,QAAQ,OAAO;AACxB,UAAM,OAAO,eAAe,IAAI;AAChC,QAAI,CAAC,MAAM;AACT,cAAQ,KAAK,sBAAsB,IAAI,YAAY;AACnD;AAAA,IACF;AAEA,YAAQ,IAAI,IAAI;AAAA,MACd,aAAa,KAAK;AAAA,MAClB,SAAS,KAAK;AAAA,IAChB;AAEA,YAAQ,IAAI,gCAA2B,IAAI,EAAE;AAAA,EAC/C;AAEA,SAAO;AACT;AASO,SAAS,yBAAyB,OAA2B;AAClE,QAAM,OAAO,oBAAI,IAAY;AAE7B,aAAW,QAAQ,OAAO;AACxB,UAAM,OAAO,eAAe,IAAI;AAChC,QAAI,CAAC,KAAM;AAGX,eAAW,YAAY,KAAK,mBAAmB;AAE7C,YAAM,SAAiC;AAAA,QACrC,eAAe;AAAA,QACf,qBAAqB;AAAA,QACrB,4BAA4B;AAAA,QAC5B,iBAAiB;AAAA,MACnB;AAEA,YAAM,MAAM,OAAO,QAAQ;AAC3B,UAAI,KAAK;AACP,aAAK,IAAI,GAAG;AAAA,MACd;AAAA,IACF;AAAA,EACF;AAEA,SAAO,MAAM,KAAK,IAAI;AACxB;","names":[]}
1
+ {"version":3,"sources":["../../src/tasks/steps/types.ts","../../src/tasks/constants.ts","../../src/tasks/library/generate-test-cases.ts","../../src/tasks/library/generate-test-plan.ts","../../src/tasks/library/handle-message.ts","../../src/tasks/library/process-event.ts","../../src/tasks/library/run-tests.ts","../../src/tasks/library/verify-changes.ts","../../src/tasks/library/onboard-testing.ts","../../src/tasks/library/explore-application.ts","../../src/tasks/index.ts"],"sourcesContent":["/**\n * Step Module Types\n * Type definitions for atomic, composable task steps\n */\n\nimport type { TaskFrontmatter } from '../types';\n\n/**\n * Step category for organization and filtering\n */\nexport type StepCategory =\n | 'security' // Security notices and warnings\n | 'setup' // Loading context, reading artifacts\n | 'exploration' // Exploring the application\n | 'clarification' // Handling ambiguity and questions\n | 'execution' // Running tests, parsing results\n | 'generation' // Creating test plans, cases, code\n | 'communication' // Team notifications\n | 'maintenance'; // Knowledge base updates, cleanup\n\n/**\n * TaskStep - Atomic, reusable unit of work within a task\n *\n * Steps are the building blocks of composed tasks. Each step represents\n * a discrete piece of work with clear instructions.\n */\nexport interface TaskStep {\n /**\n * Unique identifier for the step (kebab-case)\n * Examples: 'read-knowledge-base', 'triage-failures', 'run-playwright-tests'\n */\n id: string;\n\n /**\n * Human-readable step title (used in generated markdown headers)\n * Examples: 'Read Knowledge Base', 'Triage Failed Tests'\n */\n title: string;\n\n /**\n * Step category for organization\n */\n category: StepCategory;\n\n /**\n * Step content - the actual instructions as markdown string.\n *\n * Supported placeholders:\n * - {{STEP_NUMBER}} - Auto-replaced with computed step number during assembly\n * - {{INVOKE_*}} - Subagent invocation placeholders (e.g., {{INVOKE_TEST_DEBUGGER_FIXER}})\n * - $ARGUMENTS - Task arguments from user input\n */\n content: string;\n\n /**\n * Optional subagent role this step requires to be included.\n * If specified, step is only included when this subagent is configured.\n *\n * Use for steps that make no sense without the subagent.\n * Example: 'log-product-bugs' step requires 'issue-tracker' subagent\n */\n requiresSubagent?: string;\n\n /**\n * Subagent roles that this step invokes (for MCP derivation).\n * Different from requiresSubagent - this lists subagents the step calls\n * via {{INVOKE_*}} placeholders, not what makes the step available.\n */\n invokesSubagents?: string[];\n\n /**\n * Tags for categorization, filtering, and step discovery.\n * Examples: ['setup', 'execution', 'optional', 'triage']\n */\n tags?: string[];\n}\n\n/**\n * StepReferenceObject - Reference to a step in STEP_LIBRARY with per-usage configuration\n */\nexport interface StepReferenceObject {\n /**\n * The step ID to include from STEP_LIBRARY\n */\n stepId: string;\n\n /**\n * Override the step title for this specific usage\n */\n title?: string;\n\n /**\n * Additional content to append after the step\n */\n appendContent?: string;\n\n /**\n * Make this step conditional on a subagent being configured.\n * Different from step's requiresSubagent - this is per-task configuration.\n */\n conditionalOnSubagent?: string;\n}\n\n/**\n * InlineStep - Step with body defined directly in the task definition\n * Use for task-specific content like headers, arguments, or unique steps\n */\nexport interface InlineStep {\n /**\n * Discriminator to identify inline steps\n */\n inline: true;\n\n /**\n * Step title (becomes ### Step N: {title})\n */\n title: string;\n\n /**\n * Step body content (markdown)\n */\n content: string;\n\n /**\n * Optional category for metadata/filtering\n */\n category?: StepCategory;\n\n /**\n * Make this step conditional on a subagent being configured\n */\n conditionalOnSubagent?: string;\n}\n\n/**\n * StepReference - How tasks reference steps in their composition\n *\n * Can be:\n * - Simple string (step ID from STEP_LIBRARY)\n * - StepReferenceObject (reference with overrides)\n * - InlineStep (step with body defined inline)\n */\nexport type StepReference = string | StepReferenceObject | InlineStep;\n\n\n/**\n * ComposedTaskTemplate - Task built from step composition\n *\n * This is the new task format that replaces monolithic baseContent strings\n * with an array of step references.\n */\nexport interface ComposedTaskTemplate {\n /**\n * Unique task identifier (kebab-case)\n */\n slug: string;\n\n /**\n * Human-readable task name\n */\n name: string;\n\n /**\n * Brief task description\n */\n description: string;\n\n /**\n * Frontmatter for slash command generation\n */\n frontmatter: TaskFrontmatter;\n\n /**\n * Ordered list of step references that compose this task.\n * Steps are assembled in order with auto-generated step numbers.\n */\n steps: StepReference[];\n\n /**\n * Required subagents - task fails to build without these.\n * Instructions for required subagents should be embedded in step content.\n */\n requiredSubagents: string[];\n\n /**\n * Optional subagents - enhance task when configured.\n * Steps using these are conditionally included.\n */\n optionalSubagents?: string[];\n\n /**\n * Task slugs that can be invoked during execution.\n */\n dependentTasks?: string[];\n}\n\n/**\n * Normalized step reference (internal use for library steps)\n */\nexport interface NormalizedStepReference {\n stepId: string;\n title?: string;\n appendContent?: string;\n conditionalOnSubagent?: string;\n}\n\n/**\n * Type guard to check if a StepReference is an InlineStep\n */\nexport function isInlineStep(ref: StepReference): ref is InlineStep {\n return typeof ref === 'object' && 'inline' in ref && ref.inline === true;\n}\n\n/**\n * Type guard to check if a StepReference is a StepReferenceObject\n */\nexport function isStepReferenceObject(ref: StepReference): ref is StepReferenceObject {\n return typeof ref === 'object' && 'stepId' in ref;\n}\n\n/**\n * Normalize a step reference to its full object form (for library steps only)\n * Returns null for inline steps - use isInlineStep to check first\n */\nexport function normalizeStepReference(ref: StepReference): NormalizedStepReference | null {\n if (isInlineStep(ref)) {\n return null; // Inline steps don't normalize to NormalizedStepReference\n }\n if (typeof ref === 'string') {\n return { stepId: ref };\n }\n return ref as StepReferenceObject;\n}\n","/**\n * Task Slug Constants\n * Single source of truth for all task identifiers\n *\n * These constants should be used throughout the codebase instead of hardcoded strings\n * to ensure type safety and prevent typos.\n */\nexport const TASK_SLUGS = {\n EXPLORE_APPLICATION: 'explore-application',\n ONBOARD_TESTING: 'onboard-testing',\n GENERATE_TEST_CASES: 'generate-test-cases',\n GENERATE_TEST_PLAN: 'generate-test-plan',\n HANDLE_MESSAGE: 'handle-message',\n PROCESS_EVENT: 'process-event',\n RUN_TESTS: 'run-tests',\n VERIFY_CHANGES: 'verify-changes',\n /** @deprecated Use ONBOARD_TESTING instead */\n FULL_TEST_COVERAGE: 'onboard-testing',\n} as const;\n\n/**\n * Type for task slugs\n * Ensures only valid task slugs can be used\n */\nexport type TaskSlug = typeof TASK_SLUGS[keyof typeof TASK_SLUGS];\n","/**\n * Generate Test Cases Task (Composed)\n * Generate both manual test case documentation AND automated Playwright test scripts\n */\n\nimport type { ComposedTaskTemplate } from '../steps/types';\nimport { TASK_SLUGS } from '../constants';\n\nexport const generateTestCasesTask: ComposedTaskTemplate = {\n slug: TASK_SLUGS.GENERATE_TEST_CASES,\n name: 'Generate Test Cases',\n description: 'Generate manual test case documentation AND automated Playwright test scripts from test plan',\n\n frontmatter: {\n description: 'Generate manual test case documentation AND automated Playwright test scripts from test plan',\n 'argument-hint': '--type [exploratory|functional|regression|smoke] --focus [optional-feature]',\n },\n\n steps: [\n // Step 1: Overview (inline)\n {\n inline: true,\n title: 'Generate Test Cases Overview',\n content: `Generate comprehensive test artifacts including BOTH manual test case documentation AND automated Playwright test scripts.\n\nThis command generates:\n1. **Manual Test Case Documentation** (in \\`./test-cases/\\`) - Human-readable test cases in markdown format\n2. **Automated Playwright Tests** (in \\`./tests/specs/\\`) - Executable TypeScript test scripts\n3. **Page Object Models** (in \\`./tests/pages/\\`) - Reusable page classes for automated tests\n4. **Supporting Files** (fixtures, helpers, components) - As needed for test automation`,\n },\n // Step 2: Security Notice (library)\n 'security-notice',\n // Step 3: Arguments (inline)\n {\n inline: true,\n title: 'Arguments',\n content: `Arguments: $ARGUMENTS\n\n**Parse Arguments:**\nExtract the following from arguments:\n- **type**: Test type (exploratory, functional, regression, smoke) - defaults to functional\n- **focus**: Optional specific feature or section to focus on`,\n },\n // Step 4: Knowledge Base Read (library)\n 'read-knowledge-base',\n // Step 5: Gather Context (inline)\n {\n inline: true,\n title: 'Gather Context',\n content: `**1.1 Read Test Plan**\nRead the test plan from \\`test-plan.md\\` to understand:\n- Test items and features\n- Testing approach and automation strategy\n- Test Automation Strategy section (automated vs exploratory)\n- Pass/fail criteria\n- Test environment and data requirements\n- Automation decision criteria\n\n**1.2 Check Existing Test Cases and Tests**\n- List all files in \\`./test-cases/\\` to understand existing manual test coverage\n- List all files in \\`./tests/specs/\\` to understand existing automated tests\n- Determine next test case ID (TC-XXX format)\n- Identify existing Page Objects in \\`./tests/pages/\\`\n- Avoid creating overlapping test cases or duplicate automation`,\n },\n // Step 6: Documentation Researcher (conditional inline)\n {\n inline: true,\n title: 'Gather Product Documentation',\n content: `{{INVOKE_DOCUMENTATION_RESEARCHER}} to gather comprehensive product documentation:\n\n\\`\\`\\`\nExplore all available product documentation, specifically focusing on:\n- UI elements and workflows\n- User interactions and navigation paths\n- Form fields and validation rules\n- Error messages and edge cases\n- Authentication and authorization flows\n- Business rules and constraints\n- API endpoints for test data setup\n\\`\\`\\``,\n conditionalOnSubagent: 'documentation-researcher',\n },\n // Step 7: Exploration Protocol (from library)\n 'exploration-protocol',\n // Step 8: Clarification Protocol (from library)\n 'clarification-protocol',\n // Step 9: Organize Test Scenarios (inline - task-specific)\n {\n inline: true,\n title: 'Organize Test Scenarios by Area',\n content: `Based on exploration and documentation, organize test scenarios by feature area/component:\n\n**Group scenarios into areas** (e.g., Authentication, Dashboard, Checkout, Profile Management):\n- Each area should be a logical feature grouping\n- Areas should be relatively independent for parallel test execution\n- Consider the application's navigation structure and user flows\n\n**For each area, identify scenarios**:\n\n1. **Critical User Paths** (must automate as smoke tests):\n - Login/authentication flows\n - Core feature workflows\n - Data creation/modification flows\n - Critical business transactions\n\n2. **Happy Path Scenarios** (automate for regression):\n - Standard user workflows\n - Common use cases\n - Typical data entry patterns\n\n3. **Error Handling Scenarios** (evaluate automation ROI):\n - Validation error messages\n - Network error handling\n - Permission/authorization errors\n\n4. **Edge Cases** (consider manual testing):\n - Rare scenarios (<1% occurrence)\n - Complex exploratory scenarios\n - Visual/UX validation requiring judgment\n - Features in heavy flux\n\n**Output**: Test scenarios organized by area with automation decisions for each\n\nExample structure:\n- **Authentication**: TC-001 Valid login (smoke, automate), TC-002 Invalid password (automate), TC-003 Password reset (automate)\n- **Dashboard**: TC-004 View dashboard widgets (smoke, automate), TC-005 Filter data by date (automate), TC-006 Export data (manual - rare use)`,\n },\n // Step 10: Generate Manual Test Cases (inline)\n {\n inline: true,\n title: 'Generate All Manual Test Case Files',\n content: `Generate ALL manual test case markdown files in the \\`./test-cases/\\` directory BEFORE invoking the test-code-generator agent.\n\n**For each test scenario from the previous step:**\n\n1. **Create test case file** in \\`./test-cases/\\` with format \\`TC-XXX-feature-description.md\\`\n2. **Include frontmatter** with:\n - \\`id:\\` TC-XXX (sequential ID)\n - \\`title:\\` Clear, descriptive title\n - \\`automated:\\` true/false (based on automation decision)\n - \\`automated_test:\\` (leave empty - will be filled by subagent when automated)\n - \\`type:\\` exploratory/functional/regression/smoke\n - \\`area:\\` Feature area/component\n3. **Write test case content**:\n - **Objective**: Clear description of what is being tested\n - **Preconditions**: Setup requirements, test data needed\n - **Test Steps**: Numbered, human-readable steps\n - **Expected Results**: What should happen at each step\n - **Test Data**: Environment variables to use (e.g., \\${TEST_BASE_URL}, \\${TEST_OWNER_EMAIL})\n - **Notes**: Any assumptions, clarifications needed, or special considerations\n\n**Output**: All manual test case markdown files created in \\`./test-cases/\\` with automation flags set`,\n },\n // Step 11: Automate Test Cases (inline - detailed instructions for test-code-generator)\n {\n inline: true,\n title: 'Automate Test Cases Area by Area',\n content: `**IMPORTANT**: Process each feature area separately to enable incremental, focused test creation.\n\n**For each area**, invoke the test-code-generator agent:\n\n**Prepare Area Context:**\nBefore invoking the agent, identify the test cases for the current area:\n- Current area name\n- Test case files for this area (e.g., TC-001-valid-login.md, TC-002-invalid-password.md)\n- Which test cases are marked for automation (automated: true)\n- Test type from arguments\n- Test plan reference: test-plan.md\n- Existing automated tests in ./tests/specs/\n- Existing Page Objects in ./tests/pages/\n\n**Invoke test-code-generator Agent:**\n\n{{INVOKE_TEST_CODE_GENERATOR}} for the current area with the following context:\n\n\"Automate test cases for the [AREA_NAME] area.\n\n**Context:**\n- Area: [AREA_NAME]\n- Manual test case files to automate: [list TC-XXX files marked with automated: true]\n- Test type: {type}\n- Test plan: test-plan.md\n- Manual test cases directory: ./test-cases/\n- Existing automated tests: ./tests/specs/\n- Existing Page Objects: ./tests/pages/\n\n**The agent should:**\n1. Read the manual test case files for this area\n2. Check existing Page Object infrastructure for this area\n3. Explore the feature area to understand implementation (gather selectors, URLs, flows)\n4. Build missing Page Objects and supporting code\n5. For each test case marked \\`automated: true\\`:\n - Create automated Playwright test in ./tests/specs/\n - Update the manual test case file to reference the automated test path\n6. Run and iterate on each test until it passes or fails with a product bug\n7. Update .env.testdata with any new variables\n\n**Focus only on the [AREA_NAME] area** - do not automate tests for other areas yet.\"\n\n**Verify Area Completion:**\nAfter the agent completes the area, verify:\n- Manual test case files updated with automated_test references\n- Automated tests created for all test cases marked automated: true\n- Tests are passing (or failing with documented product bugs)\n- Page Objects created/updated for the area\n\n**Repeat for Next Area:**\nMove to the next area and repeat until all areas are complete.\n\n**Benefits of area-by-area approach**:\n- Agent focuses on one feature at a time\n- POMs built incrementally as needed\n- Tests verified before moving to next area\n- Easier to manage and track progress\n- Can pause/resume between areas if needed`,\n },\n // Step 12: Validate Artifacts (library)\n 'validate-test-artifacts',\n // Step 13: Create Directories (inline)\n {\n inline: true,\n title: 'Create Directories if Needed',\n content: `Ensure required directories exist:\n\\`\\`\\`bash\nmkdir -p ./test-cases\nmkdir -p ./tests/specs\nmkdir -p ./tests/pages\nmkdir -p ./tests/components\nmkdir -p ./tests/fixtures\nmkdir -p ./tests/helpers\n\\`\\`\\``,\n },\n // Step 14: Extract Env Variables (library)\n 'extract-env-variables',\n // Step 15: Knowledge Base Update (library)\n 'update-knowledge-base',\n // Step 16: Team Communication (conditional inline)\n {\n inline: true,\n title: 'Team Communication',\n content: `{{INVOKE_TEAM_COMMUNICATOR}} to notify the product team about the new test cases and automated tests:\n\n\\`\\`\\`\n1. Post an update about test case and automation creation\n2. Provide summary of coverage:\n - Number of manual test cases created\n - Number of automated tests created\n - Features covered by automation\n - Areas kept manual-only (and why)\n3. Highlight key automated test scenarios\n4. Share command to run automated tests: npx playwright test\n5. Ask for team review and validation\n6. Mention any areas needing exploration or clarification\n7. Use appropriate channel and threading for the update\n\\`\\`\\`\n\nThe team communication should include:\n- **Test artifacts created**: Manual test cases + automated tests count\n- **Automation coverage**: Which features are now automated\n- **Manual-only areas**: Why some tests are kept manual (rare scenarios, exploratory)\n- **Key automated scenarios**: Critical paths now covered by automation\n- **Running tests**: Command to execute automated tests\n- **Review request**: Ask team to validate scenarios and review test code\n- **Next steps**: Plans for CI/CD integration or additional test coverage\n\n**Update team communicator memory:**\n- Record this communication\n- Note test case and automation creation\n- Track team feedback on automation approach\n- Document any clarifications requested`,\n conditionalOnSubagent: 'team-communicator',\n },\n // Step 17: Final Summary (inline)\n {\n inline: true,\n title: 'Final Summary',\n content: `Provide a comprehensive summary showing:\n\n**Manual Test Cases:**\n- Number of manual test cases created\n- List of test case files with IDs and titles\n- Automation status for each (automated: yes/no)\n\n**Automated Tests:**\n- Number of automated test scripts created\n- List of spec files with test counts\n- Page Objects created or updated\n- Fixtures and helpers added\n\n**Test Coverage:**\n- Features covered by manual tests\n- Features covered by automated tests\n- Areas kept manual-only (and why)\n\n**Next Steps:**\n- Command to run automated tests: \\`npx playwright test\\`\n- Instructions to run specific test file\n- Note about copying .env.testdata to .env\n- Mention any exploration needed for edge cases\n\n**Important Notes:**\n- **Both Manual AND Automated**: Generate both artifacts - they serve different purposes\n- **Manual Test Cases**: Documentation, reference, can be executed manually when needed\n- **Automated Tests**: Fast, repeatable, for CI/CD and regression testing\n- **Automation Decision**: Not all test cases need automation - rare edge cases can stay manual\n- **Linking**: Manual test cases reference automated tests; automated tests reference manual test case IDs\n- **Two-Phase Workflow**: First generate all manual test cases, then automate area-by-area\n- **Ambiguity Handling**: Use exploration and clarification protocols before generating\n- **Environment Variables**: Use \\`process.env.VAR_NAME\\` in tests, update .env.testdata as needed\n- **Test Independence**: Each test must be runnable in isolation and in parallel`,\n },\n ],\n\n requiredSubagents: ['test-runner', 'test-code-generator'],\n optionalSubagents: ['documentation-researcher', 'team-communicator'],\n dependentTasks: [],\n};\n","/**\n * Generate Test Plan Task (Composed)\n * Generate a comprehensive test plan from product description\n */\n\nimport type { ComposedTaskTemplate } from '../steps/types';\nimport { TASK_SLUGS } from '../constants';\n\nexport const generateTestPlanTask: ComposedTaskTemplate = {\n slug: TASK_SLUGS.GENERATE_TEST_PLAN,\n name: 'Generate Test Plan',\n description: 'Generate a concise feature checklist test plan from product description',\n\n frontmatter: {\n description: 'Generate a concise feature checklist test plan (~50-100 lines)',\n 'argument-hint': '<product-description>',\n },\n\n steps: [\n // Step 1: Overview (inline)\n {\n inline: true,\n title: 'Generate Test Plan Overview',\n content: `Generate a comprehensive test plan from product description following the Brain Module specifications.`,\n },\n // Step 2: Security Notice (library)\n 'security-notice',\n // Step 3: Arguments (inline)\n {\n inline: true,\n title: 'Arguments',\n content: `Product description: $ARGUMENTS`,\n },\n // Step 4: Knowledge Base Read (library)\n 'read-knowledge-base',\n // Step 5: Load Project Context (library)\n 'load-project-context',\n // Step 6: Process Description (inline)\n {\n inline: true,\n title: 'Process the Product Description',\n content: `Use the product description provided directly in the arguments, enriched with project context understanding.`,\n },\n // Step 7: Initialize Env Tracking (inline)\n {\n inline: true,\n title: 'Initialize Environment Variables Tracking',\n content: `Create a list to track all TEST_ prefixed environment variables discovered throughout the process.`,\n },\n // Step 8: Documentation Researcher (conditional inline)\n {\n inline: true,\n title: 'Gather Comprehensive Project Documentation',\n content: `{{INVOKE_DOCUMENTATION_RESEARCHER}} to explore and gather all available project information and other documentation sources. This ensures the test plan is based on complete and current information.\n\n\\`\\`\\`\nExplore all available project documentation related to: $ARGUMENTS\n\nSpecifically gather:\n- Product specifications and requirements\n- User stories and acceptance criteria\n- Technical architecture documentation\n- API documentation and endpoints\n- User roles and permissions\n- Business rules and validations\n- UI/UX specifications\n- Known limitations or constraints\n- Existing test documentation\n- Bug reports or known issues\n\\`\\`\\`\n\nThe agent will:\n1. Check its memory for previously discovered documentation\n2. Explore workspace for relevant pages and databases\n3. Build a comprehensive understanding of the product\n4. Return synthesized information about all discovered documentation`,\n conditionalOnSubagent: 'documentation-researcher',\n },\n // Step 9: Exploration Protocol (from library)\n 'exploration-protocol',\n // Step 10: Clarification Protocol (from library)\n 'clarification-protocol',\n // Step 11: Prepare Context (inline)\n {\n inline: true,\n title: 'Prepare Test Plan Generation Context',\n content: `**After ensuring requirements are clear through exploration and clarification:**\n\nBased on the gathered information:\n- **goal**: Extract the main purpose and objectives from all available documentation\n- **knowledge**: Combine product description with discovered documentation insights\n- **testPlan**: Use the standard test plan template structure, enriched with documentation findings\n- **gaps**: Identify areas lacking documentation that will need exploration`,\n },\n // Step 12: Generate Test Plan (inline - more detailed than library step)\n {\n inline: true,\n title: 'Generate Test Plan Using Simplified Format',\n content: `You are an expert QA Test Plan Writer. Generate a **concise** test plan (~50-100 lines) that serves as a feature checklist for test case generation.\n\n**CRITICAL - Keep it Simple:**\n- The test plan is a **feature checklist**, NOT a comprehensive document\n- Detailed UI elements and exploration findings go to \\`./exploration-reports/\\`\n- Technical patterns and architecture go to \\`.bugzy/runtime/knowledge-base.md\\`\n- Process documentation stays in \\`.bugzy/runtime/project-context.md\\`\n\n**Writing Instructions:**\n- **Use Product Terminology:** Use exact feature names from the product description\n- **Feature Checklist Format:** Each feature is a checkbox item with brief description\n- **Group by Feature Area:** Organize features into logical sections\n- **NO detailed UI elements** - those belong in exploration reports\n- **NO test scenarios** - those are generated in test cases\n- **NO process documentation** - keep only what's needed for test generation\n\n**Test Data Handling:**\n- Test data goes ONLY to \\`.env.testdata\\` file\n- In test plan, reference environment variable NAMES only (e.g., TEST_BASE_URL)\n- DO NOT generate values for env vars, only keys\n- Track all TEST_ variables for extraction to .env.testdata in the next step`,\n },\n // Step 13: Create Test Plan File (inline)\n {\n inline: true,\n title: 'Create Test Plan File',\n content: `Read the simplified template from \\`.bugzy/runtime/templates/test-plan-template.md\\` and fill it in:\n\n1. Read the template file\n2. Replace placeholders:\n - \\`[PROJECT_NAME]\\` with the actual project name\n - \\`[DATE]\\` with the current date\n - Feature sections with actual features grouped by area\n3. Each feature is a **checkbox item** with brief description\n4. **Mark ambiguities:**\n - MEDIUM: Mark with [ASSUMED: reason]\n - LOW: Mark with [TO BE EXPLORED: detail]\n5. Keep total document under 100 lines`,\n },\n // Step 14: Save Test Plan (inline)\n {\n inline: true,\n title: 'Save Test Plan',\n content: `Save to \\`test-plan.md\\` in project root. The template already includes frontmatter - just fill in the dates.`,\n },\n // Step 15: Extract Env Variables (inline - more detailed than library step)\n {\n inline: true,\n title: 'Extract and Save Environment Variables',\n content: `**CRITICAL**: Test data values must ONLY go to .env.testdata, NOT in the test plan document.\n\nAfter saving the test plan:\n\n1. **Parse the test plan** to find all TEST_ prefixed environment variables mentioned:\n - Look in the Testing Environment section\n - Search for any TEST_ variables referenced\n - Extract variables from configuration or setup sections\n - Common patterns include: TEST_BASE_URL, TEST_USER_*, TEST_API_*, TEST_ADMIN_*, etc.\n\n2. **Create .env.testdata file** with all discovered variables:\n \\`\\`\\`bash\n # Application Configuration\n TEST_BASE_URL=\n\n # Test User Credentials\n TEST_USER_EMAIL=\n TEST_USER_PASSWORD=\n TEST_ADMIN_EMAIL=\n TEST_ADMIN_PASSWORD=\n\n # API Configuration\n TEST_API_KEY=\n TEST_API_SECRET=\n\n # Other Test Data\n TEST_DB_NAME=\n TEST_TIMEOUT=\n \\`\\`\\`\n\n3. **Add helpful comments** for each variable group to guide users in filling values\n\n4. **Save the file** as \\`.env.testdata\\` in the project root\n\n5. **Verify test plan references .env.testdata**:\n - Ensure test plan DOES NOT contain test data values\n - Ensure test plan references \\`.env.testdata\\` for test data requirements\n - Add instruction: \"Fill in actual values in .env.testdata before running tests\"`,\n },\n // Step 16: Knowledge Base Update (library)\n 'update-knowledge-base',\n // Step 17: Team Communication (conditional inline)\n {\n inline: true,\n title: 'Team Communication',\n content: `{{INVOKE_TEAM_COMMUNICATOR}} to notify the product team about the new test plan:\n\n\\`\\`\\`\n1. Post an update about the test plan creation\n2. Provide a brief summary of coverage areas and key features\n3. Mention any areas that need exploration or clarification\n4. Ask for team review and feedback on the test plan\n5. Include a link or reference to the test-plan.md file\n6. Use appropriate channel and threading for the update\n\\`\\`\\`\n\nThe team communication should include:\n- **Test plan scope**: Brief overview of what will be tested\n- **Coverage highlights**: Key features and user flows included\n- **Areas needing clarification**: Any uncertainties discovered during documentation research\n- **Review request**: Ask team to review and provide feedback\n- **Next steps**: Mention plan to generate test cases after review\n\n**Update team communicator memory:**\n- Record this communication in the team-communicator memory\n- Note this as a test plan creation communication\n- Track team response to this type of update`,\n conditionalOnSubagent: 'team-communicator',\n },\n // Step 18: Final Summary (inline)\n {\n inline: true,\n title: 'Final Summary',\n content: `Provide a summary of:\n- Test plan created successfully at \\`test-plan.md\\`\n- Environment variables extracted to \\`.env.testdata\\`\n- Number of TEST_ variables discovered\n- Instructions for the user to fill in actual values in .env.testdata before running tests`,\n },\n ],\n\n requiredSubagents: ['test-runner'],\n optionalSubagents: ['documentation-researcher', 'team-communicator'],\n dependentTasks: [],\n};\n","/**\n * Handle Message Task (Composed)\n * Handle team responses and Slack communications, maintaining context for ongoing conversations\n *\n * Slack messages are processed by the LLM layer (lib/slack/llm-processor.ts)\n * which routes feedback/general chat to this task via the 'collect_feedback' action.\n * This task must be in SLACK_ALLOWED_TASKS to be Slack-callable.\n */\n\nimport type { ComposedTaskTemplate } from '../steps/types';\nimport { TASK_SLUGS } from '../constants';\n\nexport const handleMessageTask: ComposedTaskTemplate = {\n slug: TASK_SLUGS.HANDLE_MESSAGE,\n name: 'Handle Message',\n description: 'Handle team responses and Slack communications, maintaining context for ongoing conversations (LLM-routed)',\n\n frontmatter: {\n description: 'Handle team responses and Slack communications, maintaining context for ongoing conversations',\n 'argument-hint': '[slack thread context or team message]',\n },\n\n steps: [\n // Step 1: Overview (inline)\n {\n inline: true,\n title: 'Handle Message Overview',\n content: `# Handle Message Command\n\nProcess team responses from Slack threads and handle multi-turn conversations with the product team about testing clarifications, ambiguities, and questions.`,\n },\n // Step 2: Security Notice (library)\n 'security-notice',\n // Step 3: Arguments (inline)\n {\n inline: true,\n title: 'Arguments',\n content: `Team message/thread context: $ARGUMENTS`,\n },\n // Step 4: Knowledge Base Read (library)\n 'read-knowledge-base',\n // Step 5: Detect Intent (inline - task-specific)\n {\n inline: true,\n title: 'Detect Message Intent and Load Handler',\n content: `Before processing the message, identify the intent type to load the appropriate handler.\n\n#### 0.1 Extract Intent from Event Payload\n\nCheck the event payload for the \\`intent\\` field provided by the LLM layer:\n- If \\`intent\\` is present, use it directly\n- Valid intent values: \\`question\\`, \\`feedback\\`, \\`status\\`\n\n#### 0.2 Fallback Intent Detection (if no intent provided)\n\nIf intent is not in the payload, detect from message patterns:\n\n| Condition | Intent |\n|-----------|--------|\n| Keywords: \"status\", \"progress\", \"how did\", \"results\", \"how many passed\" | \\`status\\` |\n| Keywords: \"bug\", \"issue\", \"broken\", \"doesn't work\", \"failed\", \"error\" | \\`feedback\\` |\n| Question words: \"what\", \"which\", \"do we have\", \"is there\" about tests/project | \\`question\\` |\n| Default (none of above) | \\`feedback\\` |\n\n#### 0.3 Load Handler File\n\nBased on detected intent, load the handler from:\n\\`.bugzy/runtime/handlers/messages/{intent}.md\\`\n\n**Handler files:**\n- \\`question.md\\` - Questions about tests, coverage, project details\n- \\`feedback.md\\` - Bug reports, test observations, general information\n- \\`status.md\\` - Status checks on test runs, task progress\n\n#### 0.4 Follow Handler Instructions\n\n**IMPORTANT**: The handler file is authoritative for this intent type.\n\n1. Read the handler file completely\n2. Follow its processing steps in order\n3. Apply its context loading requirements\n4. Use its response guidelines\n5. Perform any memory updates it specifies\n\nThe handler file contains all necessary processing logic for the detected intent type. Each handler includes:\n- Specific processing steps for that intent\n- Context loading requirements\n- Response guidelines\n- Memory update instructions`,\n },\n // Step 6: Clarification Protocol (for ambiguous intents)\n 'clarification-protocol',\n // Step 8: Knowledge Base Update (library)\n 'update-knowledge-base',\n // Step 9: Key Principles (inline)\n {\n inline: true,\n title: 'Key Principles',\n content: `## Key Principles\n\n### Context Preservation\n- Always maintain full conversation context\n- Link responses back to original uncertainties\n- Preserve reasoning chain for future reference\n\n### Actionable Responses\n- Convert team input into concrete actions\n- Don't let clarifications sit without implementation\n- Follow through on commitments made to team\n\n### Learning Integration\n- Each interaction improves our understanding\n- Build knowledge base of team preferences\n- Refine communication approaches over time\n\n### Quality Communication\n- Acknowledge team input appropriately\n- Provide updates on actions taken\n- Ask good follow-up questions when needed`,\n },\n // Step 10: Important Considerations (inline)\n {\n inline: true,\n title: 'Important Considerations',\n content: `## Important Considerations\n\n### Thread Organization\n- Keep related discussions in same thread\n- Start new threads for new topics\n- Maintain clear conversation boundaries\n\n### Response Timing\n- Acknowledge important messages promptly\n- Allow time for implementation before status updates\n- Don't spam team with excessive communications\n\n### Action Prioritization\n- Address urgent clarifications first\n- Batch related updates when possible\n- Focus on high-impact changes\n\n### Memory Maintenance\n- Keep active conversations visible and current\n- Archive resolved discussions appropriately\n- Maintain searchable history of resolutions`,\n },\n ],\n\n requiredSubagents: ['team-communicator'],\n optionalSubagents: [],\n dependentTasks: [],\n};\n","/**\n * Process Event Task (Composed)\n * Process external system events (Jira, GitHub, Linear) using handler-defined rules to extract insights and track issues\n */\n\nimport type { ComposedTaskTemplate } from '../steps/types';\nimport { TASK_SLUGS } from '../constants';\n\nexport const processEventTask: ComposedTaskTemplate = {\n slug: TASK_SLUGS.PROCESS_EVENT,\n name: 'Process Event',\n description: 'Process external system events (Jira, GitHub, Linear) using handler-defined rules to extract insights and track issues',\n\n frontmatter: {\n description: 'Process external system events (Jira, GitHub, Linear) using handler-defined rules to extract insights and track issues',\n 'argument-hint': '[event payload or description]',\n },\n\n steps: [\n // Step 1: Overview (inline)\n {\n inline: true,\n title: 'Process Event Overview',\n content: `# Process Event Command\n\nProcess various types of events using intelligent pattern matching and historical context to maintain and evolve the testing system.`,\n },\n // Step 2: Security Notice (library)\n 'security-notice',\n // Step 3: Arguments (inline)\n {\n inline: true,\n title: 'Arguments',\n content: `Arguments: $ARGUMENTS`,\n },\n // Step 4: Knowledge Base Read (library)\n 'read-knowledge-base',\n // Step 5: Understand Event Context (inline)\n {\n inline: true,\n title: 'Understand Event Context',\n content: `Events come from integrated external systems via webhooks or manual input. Common sources include:\n- **Issue Trackers**: Jira, Linear, GitHub Issues\n- **Source Control**: GitHub, GitLab\n- **Communication Tools**: Slack\n\n**Event structure and semantics vary by source.** Do not interpret events based on generic assumptions. Instead, load the appropriate handler file for system-specific processing rules.\n\n#### Event Context to Extract:\n- **What happened**: The core event (test failed, PR merged, etc.)\n- **Where**: Component, service, or area affected\n- **Impact**: How this affects testing strategy\n- **Action Required**: What needs to be done in response`,\n },\n // Step 6: Clarify Unclear Events (inline - task-specific)\n {\n inline: true,\n title: 'Clarify Unclear Events',\n content: `If the event information is incomplete or ambiguous, seek clarification before processing:\n\n#### Detect Unclear Events\n\nEvents may be unclear in several ways:\n- **Vague description**: \"Something broke\", \"issue with login\" (what specifically?)\n- **Missing context**: Which component, which environment, which user?\n- **Contradictory information**: Event data conflicts with other sources\n- **Unknown references**: Mentions unfamiliar features, components, or systems\n- **Unclear severity**: Impact or priority is ambiguous\n\n#### Assess Ambiguity Severity\n\nClassify the ambiguity level to determine appropriate response:\n\n**🔴 CRITICAL - STOP and seek clarification:**\n- Cannot identify which component is affected\n- Event data is contradictory or nonsensical\n- Unknown system or feature mentioned\n- Cannot determine if this requires immediate action\n- Example: Event says \"production is down\" but unclear which service\n\n**🟠 HIGH - STOP and seek clarification:**\n- Vague problem description that could apply to multiple areas\n- Missing critical context needed for proper response\n- Unclear which team or system is responsible\n- Example: \"Login issue reported\" (login button? auth service? session? which page?)\n\n**🟡 MEDIUM - Proceed with documented assumptions:**\n- Some details missing but core event is clear\n- Can infer likely meaning from context\n- Can proceed but should clarify async\n- Example: \"Test failed on staging\" (can assume main staging, but clarify which one)\n\n**🟢 LOW - Mark and proceed:**\n- Minor details missing (optional context)\n- Cosmetic or non-critical information gaps\n- Can document gap and continue\n- Example: Missing timestamp or exact user who reported issue\n\n#### Clarification Approach by Severity\n\n**For CRITICAL/HIGH ambiguity:**\n1. **{{INVOKE_TEAM_COMMUNICATOR}} to ask specific questions**\n2. **WAIT for response before proceeding**\n3. **Document the clarification request in event history**\n\nExample clarification messages:\n- \"Event mentions 'login issue' - can you clarify if this is:\n • Login button not responding?\n • Authentication service failure?\n • Session management problem?\n • Specific page or global?\"\n\n- \"Event references component 'XYZ' which is unknown. What system does this belong to?\"\n\n- \"Event data shows contradictory information: status=success but error_count=15. Which is correct?\"\n\n**For MEDIUM ambiguity:**\n1. **Document assumption** with reasoning\n2. **Proceed with processing** based on assumption\n3. **Ask for clarification async** (non-blocking)\n4. **Mark in event history** for future reference\n\nExample: [ASSUMED: \"login issue\" refers to login button based on recent similar events]\n\n**For LOW ambiguity:**\n1. **Mark with [TO BE CLARIFIED: detail]**\n2. **Continue processing** normally\n3. **Document gap** in event history\n\nExample: [TO BE CLARIFIED: Exact timestamp of when issue was first observed]\n\n#### Document Clarification Process\n\nIn event history, record:\n- **Ambiguity detected**: What was unclear\n- **Severity assessed**: CRITICAL/HIGH/MEDIUM/LOW\n- **Clarification requested**: Questions asked (if any)\n- **Response received**: Team's clarification\n- **Assumption made**: If proceeded with assumption\n- **Resolution**: How ambiguity was resolved\n\nThis ensures future similar events can reference past clarifications and avoid redundant questions.`,\n },\n // Step 7: Load Context and Memory (inline)\n {\n inline: true,\n title: 'Load Context and Memory',\n content: `### Step 2: Load Context and Memory\n\n#### 2.1 Check Event Processor Memory\nRead \\`.bugzy/runtime/memory/event-processor.md\\` to:\n- Find similar event patterns\n- Load example events with reasoning\n- Get system-specific rules\n- Retrieve task mapping patterns\n\n#### 2.2 Check Event History\nRead \\`.bugzy/runtime/memory/event-history.md\\` to:\n- Ensure event hasn't been processed already (idempotency)\n- Find related recent events\n- Understand event patterns and trends\n\n#### 2.3 Read Current State\n- Read \\`test-plan.md\\` for current coverage\n- List \\`./test-cases/\\` for existing tests\n- Check \\`.bugzy/runtime/knowledge-base.md\\` for past insights\n\n#### 2.4 Load System-Specific Handler (REQUIRED)\n\nBased on the event source, load the handler from \\`.bugzy/runtime/handlers/\\`:\n\n**Step 1: Detect Event Source from Payload:**\n- \\`com.jira-server.*\\` event type prefix -> \\`.bugzy/runtime/handlers/jira.md\\`\n- \\`github.*\\` or GitHub webhook structure -> \\`.bugzy/runtime/handlers/github.md\\`\n- \\`linear.*\\` or Linear webhook -> \\`.bugzy/runtime/handlers/linear.md\\`\n- Other sources -> Check for matching handler file by source name\n\n**Step 2: Load and Read the Handler File:**\nThe handler file contains system-specific instructions for:\n- Event payload structure and field meanings\n- Which triggers (status changes, resolutions) require specific actions\n- How to interpret different event types\n- When to invoke \\`/verify-changes\\`\n- How to update the knowledge base\n\n**Step 3: Follow Handler Instructions:**\nThe handler file is authoritative for this event source. Follow its instructions for:\n- Interpreting the event payload\n- Determining what actions to take\n- Formatting responses and updates\n\n**Step 4: If No Handler Exists:**\nDo NOT guess or apply generic logic. Instead:\n1. Inform the user that no handler exists for this event source\n2. Ask how this event type should be processed\n3. Suggest creating a handler file at \\`.bugzy/runtime/handlers/{source}.md\\`\n\n**Project-Specific Configuration:**\nHandlers reference \\`.bugzy/runtime/project-context.md\\` for project-specific rules like:\n- Which status transitions trigger verify-changes\n- Which resolutions should update the knowledge base\n- Which transitions to ignore`,\n },\n // Step 8: Intelligent Event Analysis (inline)\n {\n inline: true,\n title: 'Intelligent Event Analysis',\n content: `### Step 3: Intelligent Event Analysis\n\n#### 3.1 Contextual Pattern Analysis\nDon't just match patterns - analyze the event within the full context:\n\n**Combine Multiple Signals**:\n- Event details + Historical patterns from memory\n- Current test plan state + Knowledge base\n- External system status + Team activity\n- Business priorities + Risk assessment\n\n**Example Contextual Analysis**:\n\\`\\`\\`\nEvent: Jira issue PROJ-456 moved to \"Ready for QA\"\n+ Handler: jira.md says \"Ready for QA\" triggers /verify-changes\n+ History: This issue was previously in \"In Progress\" for 3 days\n+ Knowledge: Related PR #123 merged yesterday\n= Decision: Invoke /verify-changes with issue context and PR reference\n\\`\\`\\`\n\n**Pattern Recognition with Context**:\n- An issue resolution depends on what the handler prescribes for that status\n- A duplicate event (same issue, same transition) should be skipped\n- Events from different sources about the same change should be correlated\n- Handler instructions take precedence over generic assumptions\n\n#### 3.2 Generate Semantic Queries\nBased on event type and content, generate 3-5 specific search queries:\n- Search for similar past events\n- Look for related test cases\n- Find relevant documentation\n- Check for known issues`,\n },\n // Step 9: Documentation Research (conditional inline)\n {\n inline: true,\n title: 'Use Documentation Researcher',\n content: `#### 3.3 Use Documentation Researcher if Needed\n\n{{INVOKE_DOCUMENTATION_RESEARCHER}} to find information about unknown features or components:\n\nFor events mentioning unknown features or components, ask the agent to explore project documentation and return:\n- Feature specifications\n- Related test cases\n- Known issues or limitations\n- Component dependencies`,\n conditionalOnSubagent: 'documentation-researcher',\n },\n // Step 10: Task Planning (inline)\n {\n inline: true,\n title: 'Task Planning with Reasoning',\n content: `### Step 4: Task Planning with Reasoning\n\nGenerate tasks based on event analysis, using examples from memory as reference.\n\n#### Task Generation Logic:\nAnalyze the event in context of ALL available information to decide what actions to take:\n\n**Consider the Full Context**:\n- What does the handler prescribe for this event type?\n- How does this relate to current knowledge?\n- What's the state of related issues in external systems?\n- Is this part of a larger pattern we've been seeing?\n- What's the business impact of this event?\n\n**Contextual Decision Making**:\nThe same event type can require different actions based on context:\n- If handler says this status triggers verification -> Invoke /verify-changes\n- If this issue was already processed (check event history) -> Skip to avoid duplicates\n- If related PR exists in knowledge base -> Include PR context in actions\n- If this is a recurring pattern from the same source -> Consider flagging for review\n- If handler has no rule for this event type -> Ask user for guidance\n\n**Dynamic Task Selection**:\nBased on the contextual analysis, decide which tasks make sense:\n- **extract_learning**: When the event reveals something new about the system\n- **update_test_plan**: When our understanding of what to test has changed\n- **update_test_cases**: When tests need to reflect new reality\n- **report_bug**: When we have a legitimate, impactful, reproducible issue\n- **skip_action**: When context shows no action needed (e.g., known issue, already fixed)\n\nThe key is to use ALL available context - not just react to the event type\n\n#### Document Reasoning:\nFor each task, document WHY it's being executed:\n\\`\\`\\`markdown\nTask: extract_learning\nReasoning: This event reveals a pattern of login failures on Chrome that wasn't previously documented\nData: \"Chrome-specific timeout issues with login button\"\n\\`\\`\\``,\n },\n // Step 11: Issue Tracking (conditional inline)\n {\n inline: true,\n title: 'Issue Tracking',\n content: `##### For Issue Tracking:\n\nWhen an issue needs to be tracked (task type: report_bug or update_story):\n\n{{INVOKE_ISSUE_TRACKER}}\n\n1. Check for duplicate issues in the tracking system\n2. For bugs: Create detailed bug report with:\n - Clear, descriptive title\n - Detailed description with context\n - Step-by-step reproduction instructions\n - Expected vs actual behavior\n - Environment and configuration details\n - Test case reference (if applicable)\n - Screenshots or error logs\n3. For stories: Update status and add QA comments\n4. Track issue lifecycle and maintain categorization\n\nThe issue-tracker agent will handle all aspects of issue tracking including duplicate detection, story management, QA workflow transitions, and integration with your project management system (Jira, Linear, Notion, etc.).`,\n conditionalOnSubagent: 'issue-tracker',\n },\n // Step 12: Execute Tasks (inline)\n {\n inline: true,\n title: 'Execute Tasks with Memory Updates',\n content: `### Step 5: Execute Tasks with Memory Updates\n\n#### 5.1 Execute Each Task\nFollow the standard execution logic with added context from memory.\n\n#### 5.2 Update Event Processor Memory\nIf new patterns discovered, append to \\`.bugzy/runtime/memory/event-processor.md\\`:\n\\`\\`\\`markdown\n### Pattern: [New Pattern Name]\n**First Seen**: [Date]\n**Indicators**: [What identifies this pattern]\n**Typical Tasks**: [Common task responses]\n**Example**: [This event]\n\\`\\`\\`\n\n#### 5.3 Update Event History\nAppend to \\`.bugzy/runtime/memory/event-history.md\\`:\n\\`\\`\\`markdown\n## [Timestamp] - Event #[ID]\n\n**Original Input**: [Raw arguments provided]\n**Parsed Event**:\n\\`\\`\\`yaml\ntype: [type]\nsource: [source]\n[other fields]\n\\`\\`\\`\n\n**Pattern Matched**: [Pattern name or \"New Pattern\"]\n**Tasks Executed**:\n1. [Task 1] - Reasoning: [Why]\n2. [Task 2] - Reasoning: [Why]\n\n**Files Modified**:\n- [List of files]\n\n**Outcome**: [Success/Partial/Failed]\n**Notes**: [Any additional context]\n---\n\\`\\`\\``,\n },\n // Step 13: Learning and Maintenance (inline)\n {\n inline: true,\n title: 'Learning from Events',\n content: `### Step 6: Learning from Events\n\nAfter processing, check if this event teaches us something new:\n1. Is this a new type of event we haven't seen?\n2. Did our task planning work well?\n3. Should we update our patterns?\n4. Are there trends across recent events?\n\nIf yes, update the event processor memory with new patterns or refined rules.\n\n### Step 7: Create Necessary Files\n\nEnsure all required files and directories exist:\n\\`\\`\\`bash\nmkdir -p ./test-cases .claude/memory\n\\`\\`\\`\n\nCreate files if they don't exist:\n- \\`.bugzy/runtime/knowledge-base.md\\`\n- \\`.bugzy/runtime/memory/event-processor.md\\`\n- \\`.bugzy/runtime/memory/event-history.md\\``,\n },\n // Step 14: Knowledge Base Update (library)\n 'update-knowledge-base',\n // Step 15: Important Considerations (inline)\n {\n inline: true,\n title: 'Important Considerations',\n content: `## Important Considerations\n\n### Contextual Intelligence\n- Never process events in isolation - always consider full context\n- Use knowledge base, history, and external system state to inform decisions\n- What seems like a bug might be expected behavior given the context\n- A minor event might be critical when seen as part of a pattern\n\n### Adaptive Response\n- Same event type can require different actions based on context\n- Learn from each event to improve future decision-making\n- Build understanding of system behavior over time\n- Adjust responses based on business priorities and risk\n\n### Smart Task Generation\n- Only take actions prescribed by the handler or confirmed by the user\n- Document why each decision was made with full context\n- Skip redundant actions (e.g., duplicate events, already-processed issues)\n- Escalate appropriately based on pattern recognition\n\n### Continuous Learning\n- Each event adds to our understanding of the system\n- Update patterns when new correlations are discovered\n- Refine decision rules based on outcomes\n- Build institutional memory through event history`,\n },\n ],\n\n requiredSubagents: ['team-communicator'],\n optionalSubagents: ['documentation-researcher', 'issue-tracker'],\n dependentTasks: ['verify-changes'],\n};\n","/**\n * Run Tests Task (Composed)\n * Select and run test cases using the test-runner agent\n */\n\nimport type { ComposedTaskTemplate } from '../steps/types';\nimport { TASK_SLUGS } from '../constants';\n\nexport const runTestsTask: ComposedTaskTemplate = {\n slug: TASK_SLUGS.RUN_TESTS,\n name: 'Run Tests',\n description: 'Execute automated Playwright tests, analyze failures, and fix test issues automatically',\n\n frontmatter: {\n description: 'Execute automated Playwright tests, analyze failures, and fix test issues automatically',\n 'argument-hint': '[file-pattern|tag|all] (e.g., \"auth\", \"@smoke\", \"tests/specs/login.spec.ts\")',\n },\n\n steps: [\n // Step 1: Overview (inline)\n {\n inline: true,\n title: 'Run Tests Overview',\n content: `# Run Tests Command\n\nExecute automated Playwright tests, analyze failures using JSON reports, automatically fix test issues, and log product bugs.`,\n },\n // Step 2: Security Notice (library)\n 'security-notice',\n // Step 3: Arguments (inline)\n {\n inline: true,\n title: 'Arguments',\n content: `Arguments: $ARGUMENTS\n\n**Parse Arguments:**\nExtract the following from arguments:\n- **selector**: Test selection criteria\n - File pattern: \"auth\" → finds tests/specs/**/*auth*.spec.ts\n - Tag: \"@smoke\" → runs tests with @smoke annotation\n - Specific file: \"tests/specs/login.spec.ts\"\n - All tests: \"all\" or \"\" → runs entire test suite`,\n },\n // Step 4: Knowledge Base Read (library)\n 'read-knowledge-base',\n // Step 5: Test Execution Strategy (library)\n 'read-test-strategy',\n // Step 6: Identify Tests (inline - task-specific)\n {\n inline: true,\n title: 'Identify Automated Tests to Run',\n content: `#### Understand Test Selection\nParse the selector argument to determine which tests to run:\n\n**File Pattern** (e.g., \"auth\", \"login\"):\n- Find matching test files: \\`tests/specs/**/*[pattern]*.spec.ts\\`\n- Example: \"auth\" → finds all test files with \"auth\" in the name\n\n**Tag** (e.g., \"@smoke\", \"@regression\"):\n- Run tests with specific Playwright tag annotation\n- Use Playwright's \\`--grep\\` option\n\n**Specific File** (e.g., \"tests/specs/auth/login.spec.ts\"):\n- Run that specific test file\n\n**All Tests** (\"all\" or no selector):\n- Run entire test suite: \\`tests/specs/**/*.spec.ts\\`\n\n#### Find Matching Test Files\nUse glob patterns to find test files:\n\\`\\`\\`bash\n# For file pattern\nls tests/specs/**/*[pattern]*.spec.ts\n\n# For specific file\nls tests/specs/auth/login.spec.ts\n\n# For all tests\nls tests/specs/**/*.spec.ts\n\\`\\`\\`\n\n#### Validate Test Files Exist\nCheck that at least one test file was found:\n- If no tests found, inform user and suggest available tests\n- List available test files if selection was unclear\n\n#### Confirm Selection Before Execution\nBefore running tests, confirm the selection with the user if ambiguous:\n- **Clear selection** (specific file or tag): Proceed immediately\n- **Pattern match** (multiple files): List matching files and ask for confirmation if count > 5\n- **No selector** (all tests): Confirm running full suite before executing`,\n },\n // Step 7-10: Test Execution (library steps)\n 'run-playwright-tests',\n 'parse-test-results',\n 'triage-failures',\n 'fix-test-issues',\n // Step 11: Log Product Bugs (conditional - library step)\n {\n stepId: 'log-product-bugs',\n conditionalOnSubagent: 'issue-tracker',\n },\n // Step 12: Knowledge Base Update (library)\n 'update-knowledge-base',\n // Step 13: Team Communication (conditional - library step)\n {\n stepId: 'notify-team',\n conditionalOnSubagent: 'team-communicator',\n },\n // Step 14: Handle Special Cases (inline - task-specific)\n {\n inline: true,\n title: 'Handle Special Cases',\n content: `#### If No Test Cases Found\nIf no test cases match the selection criteria:\n1. Inform user that no matching test cases were found\n2. List available test cases or suggest running \\`/generate-test-cases\\` first\n3. Provide examples of valid selection criteria\n\n#### If Test Runner Agent Fails\nIf the test-runner agent encounters issues:\n1. Report the specific error\n2. Suggest troubleshooting steps\n3. Offer to run tests individually if batch execution failed\n\n#### If Test Cases Are Invalid\nIf selected test cases have formatting issues:\n1. Report which test cases are invalid\n2. Specify what's missing or incorrect\n3. Offer to fix the issues or skip invalid tests\n\n### Important Notes\n\n**Test Selection Strategy**:\n- **Always read** \\`.bugzy/runtime/test-execution-strategy.md\\` before selecting tests\n- Default to \\`@smoke\\` tests for fast validation unless user explicitly requests otherwise\n- Smoke tests provide 100% manual test case coverage with zero redundancy (~2-5 min)\n- Full regression includes intentional redundancy for diagnostic value (~10-15 min)\n- Use context keywords from user request to choose appropriate tier\n\n**Test Execution**:\n- Automated Playwright tests are executed via bash command, not through agents\n- Test execution time varies by tier (see strategy document for details)\n- JSON reports provide structured test results for analysis\n- Playwright automatically captures traces, screenshots, and videos on failures\n- Test artifacts are stored in test-results/ directory\n\n**Failure Handling**:\n- Test failures are automatically triaged (product bugs vs test issues)\n- Test issues are automatically fixed by the test-debugger-fixer subagent\n- Product bugs are logged via issue tracker after triage\n- All results are analyzed for learning opportunities and team communication\n- Critical failures trigger immediate team notification\n\n**Related Documentation**:\n- \\`.bugzy/runtime/test-execution-strategy.md\\` - When and why to run specific tests\n- \\`.bugzy/runtime/testing-best-practices.md\\` - How to write tests (patterns and anti-patterns)`,\n },\n ],\n\n requiredSubagents: ['test-runner', 'test-debugger-fixer'],\n optionalSubagents: ['issue-tracker', 'team-communicator'],\n dependentTasks: [],\n};\n","/**\n * Verify Changes - Unified Multi-Trigger Task (Composed)\n * Single dynamic task that handles all trigger sources: manual, Slack, GitHub PR, CI/CD\n *\n * This task replaces verify-changes-manual and verify-changes-slack with intelligent\n * trigger detection and multi-channel output routing.\n */\n\nimport type { ComposedTaskTemplate } from '../steps/types';\nimport { TASK_SLUGS } from '../constants';\n\nexport const verifyChangesTask: ComposedTaskTemplate = {\n slug: TASK_SLUGS.VERIFY_CHANGES,\n name: 'Verify Changes',\n description: 'Unified verification command for all trigger sources with automated tests and manual checklists',\n\n frontmatter: {\n description: 'Verify code changes with automated tests and manual verification checklists',\n 'argument-hint': '[trigger-auto-detected]',\n },\n\n steps: [\n // Step 1: Overview (inline)\n {\n inline: true,\n title: 'Verify Changes Overview',\n content: `# Verify Changes - Unified Multi-Trigger Workflow\n\n## Overview\n\nThis task performs comprehensive change verification with:\n- **Automated testing**: Execute Playwright tests with automatic triage and fixing\n- **Manual verification checklists**: Generate role-specific checklists for non-automatable scenarios\n- **Multi-trigger support**: Works from manual CLI, Slack messages, GitHub PRs, and CI/CD\n- **Smart output routing**: Results formatted and delivered to the appropriate channel`,\n },\n // Step 2: Security Notice (library)\n 'security-notice',\n // Step 3: Arguments (inline)\n {\n inline: true,\n title: 'Arguments',\n content: `**Input**: $ARGUMENTS\n\nThe input format determines the trigger source and context extraction strategy.`,\n },\n // Step 4: Knowledge Base Read (library)\n 'read-knowledge-base',\n // Step 5: Detect Trigger Source (inline)\n {\n inline: true,\n title: 'Detect Trigger Source',\n content: `Analyze the input format to determine how this task was invoked:\n\n### Identify Trigger Type\n\n**GitHub PR Webhook:**\n- Input contains \\`pull_request\\` object with structure:\n \\`\\`\\`json\n {\n \"pull_request\": {\n \"number\": 123,\n \"title\": \"...\",\n \"body\": \"...\",\n \"changed_files\": [...],\n \"base\": { \"ref\": \"main\" },\n \"head\": { \"ref\": \"feature-branch\" },\n \"user\": { \"login\": \"...\" }\n }\n }\n \\`\\`\\`\n-> **Trigger detected: GITHUB_PR**\n\n**Slack Event:**\n- Input contains \\`event\\` object with structure:\n \\`\\`\\`json\n {\n \"eventType\": \"com.slack.message\" or \"com.slack.app_mention\",\n \"event\": {\n \"type\": \"message\",\n \"channel\": \"C123456\",\n \"user\": \"U123456\",\n \"text\": \"message content\",\n \"ts\": \"1234567890.123456\",\n \"thread_ts\": \"...\" (optional)\n }\n }\n \\`\\`\\`\n-> **Trigger detected: SLACK_MESSAGE**\n\n**CI/CD Environment:**\n- Environment variables present:\n - \\`CI=true\\`\n - \\`GITHUB_REF\\` (e.g., \"refs/heads/feature-branch\")\n - \\`GITHUB_SHA\\` (commit hash)\n - \\`GITHUB_BASE_REF\\` (base branch)\n - \\`GITHUB_HEAD_REF\\` (head branch)\n- Git context available via bash commands\n-> **Trigger detected: CI_CD**\n\n**Manual Invocation:**\n- Input is natural language, URL, or issue identifier\n- Patterns: \"PR #123\", GitHub URL, \"PROJ-456\", feature description\n-> **Trigger detected: MANUAL**\n\n### Store Trigger Context\n\nStore the detected trigger for use in output routing:\n- Set variable: \\`TRIGGER_SOURCE\\` = [GITHUB_PR | SLACK_MESSAGE | CI_CD | MANUAL]\n- This determines output formatting and delivery channel`,\n },\n // Step 6: Extract Context (inline)\n {\n inline: true,\n title: 'Extract Context Based on Trigger',\n content: `Based on the detected trigger source, extract relevant context:\n\n### GitHub PR Trigger - Extract PR Details\n\nIf trigger is GITHUB_PR:\n- **PR number**: \\`pull_request.number\\`\n- **Title**: \\`pull_request.title\\`\n- **Description**: \\`pull_request.body\\`\n- **Changed files**: \\`pull_request.changed_files\\` (array of file paths)\n- **Author**: \\`pull_request.user.login\\`\n- **Base branch**: \\`pull_request.base.ref\\`\n- **Head branch**: \\`pull_request.head.ref\\`\n\n### Slack Message Trigger - Parse Natural Language\n\nIf trigger is SLACK_MESSAGE:\n- **Message text**: \\`event.text\\`\n- **Channel**: \\`event.channel\\` (for posting results)\n- **User**: \\`event.user\\` (requester)\n- **Thread**: \\`event.thread_ts\\` or \\`event.ts\\` (for threading replies)\n\n**Extract references from text:**\n- PR numbers: \"#123\", \"PR 123\", \"pull request 123\"\n- Issue IDs: \"PROJ-456\", \"BUG-123\"\n- URLs: GitHub PR links, deployment URLs\n- Feature names: Quoted terms, capitalized phrases\n- Environments: \"staging\", \"production\", \"preview\"\n\n### CI/CD Trigger - Read CI Environment\n\nIf trigger is CI_CD:\n- **CI platform**: Read \\`CI\\` env var\n- **Branch**: \\`GITHUB_REF\\` -> extract branch name\n- **Commit**: \\`GITHUB_SHA\\`\n- **Base branch**: \\`GITHUB_BASE_REF\\` (for PRs)\n- **Changed files**: Run \\`git diff --name-only $BASE_SHA...$HEAD_SHA\\`\n\n### Manual Trigger - Parse User Input\n\nIf trigger is MANUAL:\n- **GitHub PR URL**: Parse to extract PR number, then fetch details via API\n- **Issue identifier**: Extract issue ID (patterns: \"PROJ-123\", \"#456\", \"BUG-789\")\n- **Feature description**: Use text as-is for verification context\n- **Deployment URL**: Extract for testing environment\n\n### Unified Context Structure\n\nAfter extraction, create unified context structure:\n\\`\\`\\`\nCHANGE_CONTEXT = {\n trigger: [GITHUB_PR | SLACK_MESSAGE | CI_CD | MANUAL],\n title: \"...\",\n description: \"...\",\n changedFiles: [\"src/pages/Login.tsx\", ...],\n author: \"...\",\n environment: \"staging\" | \"production\" | URL,\n prNumber: 123 (if available),\n issueId: \"PROJ-456\" (if available),\n slackChannel: \"C123456\" (if Slack trigger),\n slackThread: \"1234567890.123456\" (if Slack trigger),\n githubRepo: \"owner/repo\" (if GitHub trigger)\n}\n\\`\\`\\``,\n },\n // Step 7: Determine Test Scope (inline)\n {\n inline: true,\n title: 'Determine Test Scope (Smart Selection)',\n content: `**IMPORTANT**: You do NOT have access to code files. Infer test scope from change **descriptions** only.\n\nBased on PR title, description, and commit messages, intelligently select which tests to run:\n\n### Infer Test Scope from Change Descriptions\n\nAnalyze the change description to identify affected feature areas:\n\n**Example mappings from descriptions to test suites:**\n\n| Description Keywords | Inferred Test Scope | Example |\n|---------------------|-------------------|---------|\n| \"login\", \"authentication\", \"sign in/up\" | \\`tests/specs/auth/\\` | \"Fix login page validation\" -> Auth tests |\n| \"checkout\", \"payment\", \"purchase\" | \\`tests/specs/checkout/\\` | \"Optimize checkout flow\" -> Checkout tests |\n| \"cart\", \"shopping cart\", \"add to cart\" | \\`tests/specs/cart/\\` | \"Update cart calculations\" -> Cart tests |\n| \"API\", \"endpoint\", \"backend\" | API test suites | \"Add new user API endpoint\" -> User API tests |\n| \"profile\", \"account\", \"settings\" | \\`tests/specs/profile/\\` or \\`tests/specs/settings/\\` | \"Profile page redesign\" -> Profile tests |\n\n**Inference strategy:**\n1. **Extract feature keywords** from PR title and description\n2. **Analyze commit messages** for conventional commit scopes\n3. **Map keywords to test organization**\n4. **Identify test scope breadth from description tone**\n\n### Fallback Strategies Based on Description Analysis\n\n**Description patterns that indicate full suite:**\n- \"Refactor shared/common utilities\" (wide impact)\n- \"Update dependencies\" or \"Upgrade framework\" (safety validation)\n- \"Merge main into feature\" or \"Sync with main\" (comprehensive validation)\n- \"Breaking changes\" or \"Major version update\" (thorough testing)\n- \"Database migration\" or \"Schema changes\" (data integrity)\n\n**Description patterns that indicate smoke tests only:**\n- \"Fix typo\" or \"Update copy/text\" (cosmetic change)\n- \"Update README\" or \"Documentation only\" (no functional change)\n- \"Fix formatting\" or \"Linting fixes\" (no logic change)\n\n**When description is vague or ambiguous:**\n- **ACTION REQUIRED**: Use AskUserQuestion tool to clarify test scope\n\n**If specific test scope requested:**\n- User can override with: \"only smoke tests\", \"full suite\", specific test suite names\n- Honor user's explicit scope over smart selection\n\n### Test Selection Summary\n\nGenerate summary of test selection based on description analysis:\n\\`\\`\\`markdown\n### Test Scope Determined\n- **Change description**: [PR title or summary]\n- **Identified keywords**: [list extracted keywords: \"auth\", \"checkout\", etc.]\n- **Affected test suites**: [list inferred test suite paths or names]\n- **Scope reasoning**: [explain why this scope was selected]\n- **Execution strategy**: [smart selection | full suite | smoke tests | user-specified]\n\\`\\`\\``,\n },\n // Step 8-11: Test Execution (library steps)\n 'run-playwright-tests',\n 'parse-test-results',\n 'triage-failures',\n 'fix-test-issues',\n // Step 12: Log Product Bugs (conditional library step)\n {\n stepId: 'log-product-bugs',\n conditionalOnSubagent: 'issue-tracker',\n },\n // Step 13: Generate Manual Verification Checklist (inline)\n {\n inline: true,\n title: 'Generate Manual Verification Checklist',\n content: `Generate human-readable checklist for non-automatable scenarios:\n\n### Analyze Change Context\n\nReview the provided context to understand what changed:\n- Read PR title, description, and commit messages\n- Identify change types from descriptions: visual, UX, forms, mobile, accessibility, edge cases\n- Understand the scope and impact of changes from the change descriptions\n\n### Identify Non-Automatable Scenarios\n\nBased on the change analysis, identify scenarios that require human verification:\n\n**1. Visual Design Changes** (CSS, styling, design files, graphics)\n-> Add **Design Validation** checklist items\n\n**2. UX Interaction Changes** (animations, transitions, gestures, micro-interactions)\n-> Add **UX Feel** checklist items\n\n**3. Form and Input Changes** (new form fields, input validation, user input)\n-> Add **Accessibility** checklist items\n\n**4. Mobile and Responsive Changes** (media queries, touch interactions, viewport)\n-> Add **Mobile Experience** checklist items\n\n**5. Low ROI or Rare Scenarios** (edge cases, one-time migrations, rare user paths)\n-> Add **Exploratory Testing** notes\n\n### Generate Role-Specific Checklist Items\n\nFor each identified scenario, create clear, actionable checklist items:\n\n**Format for each item:**\n- Clear, specific task description\n- Assigned role (@design-team, @qa-team, @a11y-team, @mobile-team)\n- Acceptance criteria (what constitutes pass/fail)\n- Reference to standards when applicable (WCAG, iOS HIG, Material Design)\n- Priority indicator (red circle critical, yellow circle important, green circle nice-to-have)\n\n**Example checklist items:**\n\n**Design Validation (@design-team)**\n- [ ] Login button color matches brand guidelines (#FF6B35)\n- [ ] Loading spinner animation smooth (60fps, no jank)\n\n**Accessibility (@a11y-team)**\n- [ ] Screen reader announces form errors clearly (tested with VoiceOver/NVDA)\n- [ ] Keyboard navigation: Tab through all interactive elements in logical order\n- [ ] Color contrast meets WCAG 2.1 AA (4.5:1 for body text, 3:1 for large text)\n\n**Mobile Experience (@qa-team, @mobile-team)**\n- [ ] Touch targets greater than or equal to 44px (iOS Human Interface Guidelines)\n- [ ] Mobile keyboard doesn't obscure input fields on iOS/Android\n\n### When NO Manual Verification Needed\n\nIf the changes are purely:\n- Backend logic (no UI changes)\n- Code refactoring (no behavior changes)\n- Configuration changes (no user-facing impact)\n- Fully covered by automated tests\n\nOutput:\n\\`\\`\\`markdown\n**Manual Verification:** Not required for this change.\nAll user-facing changes are fully covered by automated tests.\n\\`\\`\\``,\n },\n // Step 14: Aggregate Results (inline)\n {\n inline: true,\n title: 'Aggregate Verification Results',\n content: `Combine automated and manual verification results:\n\n\\`\\`\\`markdown\n## Verification Results Summary\n\n### Automated Tests\n- Total tests: [count]\n- Passed: [count] ([percentage]%)\n- Failed: [count] ([percentage]%)\n- Test issues fixed: [count]\n- Product bugs logged: [count]\n- Duration: [time]\n\n### Manual Verification Required\n[Checklist generated in previous step, or \"Not required\"]\n\n### Overall Recommendation\n[Safe to merge | Review bugs before merging | Do not merge]\n\\`\\`\\``,\n },\n // Step 15: Documentation Research (conditional inline)\n {\n inline: true,\n title: 'Understanding the Change (Documentation Research)',\n content: `{{INVOKE_DOCUMENTATION_RESEARCHER}} to gather comprehensive context about the changed features:\n\nExplore project documentation related to the changes.\n\nSpecifically gather:\n- Product specifications for affected features\n- User stories and acceptance criteria\n- Technical architecture documentation\n- API endpoints and contracts\n- User roles and permissions relevant to the change\n- Business rules and validations\n- UI/UX specifications\n- Known limitations or constraints\n- Related bug reports or known issues\n- Existing test documentation for this area\n\nThe agent will:\n1. Check its memory for previously discovered documentation\n2. Explore workspace for relevant pages and databases\n3. Build comprehensive understanding of the affected features\n4. Return synthesized information to inform testing strategy\n\nUse this information to:\n- Better understand the change context\n- Identify comprehensive test scenarios\n- Recognize integration points and dependencies\n- Spot potential edge cases or risk areas\n- Enhance manual verification checklist generation`,\n conditionalOnSubagent: 'documentation-researcher',\n },\n // Step 16: Report Results (inline)\n {\n inline: true,\n title: 'Report Results (Multi-Channel Output)',\n content: `Route output based on trigger source:\n\n### MANUAL Trigger -> Terminal Output\n\nFormat as comprehensive markdown report for terminal display with:\n- Change Summary (what changed, scope, affected files)\n- Automated Test Results (statistics, tests fixed, bugs logged)\n- Manual Verification Checklist\n- Recommendation (safe to merge / review / do not merge)\n- Test Artifacts (JSON report, HTML report, traces, screenshots)\n\n### SLACK_MESSAGE Trigger -> Thread Reply\n\n{{INVOKE_TEAM_COMMUNICATOR}} to post concise results to Slack thread with:\n- Verification results summary\n- Critical failures that need immediate attention\n- Bugs logged with issue tracker links\n- Manual verification checklist summary\n- Recommendation and next steps\n- Tag relevant team members for critical issues\n\n### GITHUB_PR Trigger -> PR Comment\n\nUse GitHub API to post comprehensive comment on PR with:\n- Status (All tests passed / Issues found / Critical failures)\n- Automated Tests table (Total, Passed, Failed, Fixed, Bugs, Duration)\n- Failed Tests (triaged and with actions taken)\n- Tests Fixed Automatically (issue, fix, verified)\n- Product Bugs Logged (issue ID, title, test, severity)\n- Manual Verification Required (checklist)\n- Test Artifacts links\n- Recommendation\n\n### CI_CD Trigger -> Build Log + PR Comment\n\nOutput to CI build log (print detailed results to stdout) and exit with appropriate code:\n- Exit 0: All tests passed (safe to merge)\n- Exit 1: Tests failed or critical bugs found (block merge)\n\nPost PR comment if GitHub context available.`,\n conditionalOnSubagent: 'team-communicator',\n },\n // Step 17: Knowledge Base Update (library)\n 'update-knowledge-base',\n // Step 18: Handle Special Cases (inline)\n {\n inline: true,\n title: 'Handle Special Cases',\n content: `**If no tests found for changed files:**\n- Inform user: \"No automated tests found for changed files\"\n- Recommend: \"Run smoke test suite for basic validation\"\n- Still generate manual verification checklist\n\n**If all tests skipped:**\n- Explain why (dependencies, environment issues)\n- Recommend: Check test configuration and prerequisites\n\n**If test execution fails:**\n- Report specific error (Playwright not installed, env vars missing)\n- Suggest troubleshooting steps\n- Don't proceed with triage if tests didn't run\n\n## Important Notes\n\n- This task handles **all trigger sources** with a single unified workflow\n- Trigger detection is automatic based on input format\n- Output is automatically routed to the appropriate channel\n- Automated tests are executed with **full triage and automatic fixing**\n- Manual verification checklists are generated for **non-automatable scenarios**\n- Product bugs are logged with **automatic duplicate detection**\n- Test issues are fixed automatically with **verification**\n- Results include both automated and manual verification items\n\n## Success Criteria\n\nA successful verification includes:\n1. Trigger source correctly detected\n2. Context extracted completely\n3. Tests executed (or skipped with explanation)\n4. All failures triaged (product bug vs test issue)\n5. Test issues fixed automatically (when possible)\n6. Product bugs logged to issue tracker\n7. Manual verification checklist generated\n8. Results formatted for output channel\n9. Results delivered to appropriate destination\n10. Clear recommendation provided (merge / review / block)`,\n },\n ],\n\n requiredSubagents: ['test-runner', 'test-debugger-fixer'],\n optionalSubagents: ['documentation-researcher', 'issue-tracker', 'team-communicator'],\n dependentTasks: [],\n};\n","/**\n * Onboard Testing Task (Composed)\n * End-to-end workflow: explore → plan → cases → test → fix → report\n * Renamed from full-test-coverage to better reflect its purpose as a setup/onboarding task\n */\n\nimport type { ComposedTaskTemplate } from '../steps/types';\nimport { TASK_SLUGS } from '../constants';\n\nexport const onboardTestingTask: ComposedTaskTemplate = {\n slug: TASK_SLUGS.ONBOARD_TESTING,\n name: 'Onboard Testing',\n description:\n 'Complete workflow: explore application, generate test plan, create test cases, run tests, fix issues, and report results',\n\n frontmatter: {\n description: 'Complete test coverage workflow - from exploration to passing tests',\n 'argument-hint': '<focus-area-or-feature-description>',\n },\n\n steps: [\n // Step 1: Overview (inline)\n {\n inline: true,\n title: 'Onboard Testing Overview',\n content: `## Overview\n\nThis command orchestrates the complete test coverage workflow in a single execution:\n1. **Phase 1**: Read project context and explore application\n2. **Phase 2**: Generate lightweight test plan\n3. **Phase 3**: Generate and verify test cases (create + fix until passing)\n4. **Phase 4**: Triage failures and fix test issues\n5. **Phase 5**: Log product bugs\n6. **Phase 6**: Final report`,\n },\n // Step 2: Security Notice (from library)\n 'security-notice',\n // Step 3: Arguments (inline)\n {\n inline: true,\n title: 'Arguments',\n content: `Focus area: $ARGUMENTS`,\n },\n // Phase 1: Setup\n 'read-knowledge-base',\n\n // Phase 2: Exploration Protocol\n 'exploration-protocol',\n\n // Execute exploration via test-runner\n 'create-exploration-test-case',\n 'run-exploration',\n 'process-exploration-results',\n\n // Phase 3: Test Plan Generation\n 'generate-test-plan',\n 'extract-env-variables',\n\n // Phase 4: Test Case Generation\n 'generate-test-cases',\n 'automate-test-cases',\n\n // Phase 5: Test Execution\n 'run-playwright-tests',\n 'parse-test-results',\n\n // Phase 6: Triage and Fix (NEW - was missing from full-test-coverage)\n 'triage-failures',\n 'fix-test-issues',\n {\n stepId: 'log-product-bugs',\n conditionalOnSubagent: 'issue-tracker',\n },\n\n // Phase 7: Reporting and Communication\n 'update-knowledge-base',\n {\n stepId: 'notify-team',\n conditionalOnSubagent: 'team-communicator',\n },\n 'generate-final-report',\n ],\n\n requiredSubagents: ['test-runner', 'test-code-generator', 'test-debugger-fixer'],\n optionalSubagents: ['documentation-researcher', 'team-communicator', 'issue-tracker'],\n dependentTasks: ['run-tests', 'generate-test-cases'],\n};\n","/**\n * Explore Application Task (Composed)\n * Systematically explore application to discover UI elements, workflows, and behaviors\n */\n\nimport type { ComposedTaskTemplate } from '../steps/types';\nimport { TASK_SLUGS } from '../constants';\n\nexport const exploreApplicationTask: ComposedTaskTemplate = {\n slug: TASK_SLUGS.EXPLORE_APPLICATION,\n name: 'Explore Application',\n description: 'Systematically explore application to discover UI elements, workflows, and behaviors',\n\n frontmatter: {\n description: 'Explore application to discover UI, workflows, and behaviors',\n 'argument-hint': '--focus [area] --depth [shallow|deep] --system [name]',\n },\n\n steps: [\n // Step 1: Overview (inline)\n {\n inline: true,\n title: 'Explore Application Overview',\n content: `Discover actual UI elements, workflows, and behaviors using the test-runner agent. Updates test plan and project documentation with findings.`,\n },\n // Step 2: Security Notice (from library)\n 'security-notice',\n // Step 3: Arguments (inline)\n {\n inline: true,\n title: 'Arguments',\n content: `**Arguments**: $ARGUMENTS\n\n**Parse:**\n- **focus**: auth, navigation, search, content, admin (default: comprehensive)\n- **depth**: shallow (15-20 min) or deep (45-60 min, default)\n- **system**: target system (optional for multi-system setups)`,\n },\n // Setup\n 'read-knowledge-base',\n 'load-project-context',\n\n // Exploration Protocol (adaptive depth)\n 'exploration-protocol',\n\n // Execute\n 'create-exploration-test-case',\n 'run-exploration',\n 'process-exploration-results',\n\n // Update\n 'update-exploration-artifacts',\n // Team Communication (conditional inline)\n {\n inline: true,\n title: 'Team Communication',\n content: `{{INVOKE_TEAM_COMMUNICATOR}} to notify the product team about exploration findings:\n\n\\`\\`\\`\n1. Post an update about exploration completion\n2. Summarize key discoveries:\n - UI elements and workflows identified\n - Behaviors documented\n - Areas needing further investigation\n3. Share exploration report location\n4. Ask for team feedback on findings\n5. Use appropriate channel and threading\n\\`\\`\\``,\n conditionalOnSubagent: 'team-communicator',\n },\n 'cleanup-temp-files',\n 'update-knowledge-base',\n ],\n\n requiredSubagents: ['test-runner'],\n optionalSubagents: ['team-communicator'],\n dependentTasks: [],\n};\n","/**\n * Tasks Module\n * Central registry and utilities for all task templates\n */\n\n// Export types and constants\nexport * from './types';\nexport * from './constants';\n\n// Import task templates\nimport { generateTestCasesTask } from './library/generate-test-cases';\nimport { generateTestPlanTask } from './library/generate-test-plan';\nimport { handleMessageTask } from './library/handle-message';\nimport { processEventTask } from './library/process-event';\nimport { runTestsTask } from './library/run-tests';\nimport { verifyChangesTask } from './library/verify-changes';\nimport { onboardTestingTask } from './library/onboard-testing';\nimport { exploreApplicationTask } from './library/explore-application';\n\nimport type { ComposedTaskTemplate } from './types';\nimport { TASK_SLUGS } from './constants';\n\n/**\n * Task Templates Registry\n * All tasks use the step-based composition format\n */\nexport const TASK_TEMPLATES: Record<string, ComposedTaskTemplate> = {\n [TASK_SLUGS.GENERATE_TEST_CASES]: generateTestCasesTask,\n [TASK_SLUGS.GENERATE_TEST_PLAN]: generateTestPlanTask,\n [TASK_SLUGS.HANDLE_MESSAGE]: handleMessageTask,\n [TASK_SLUGS.PROCESS_EVENT]: processEventTask,\n [TASK_SLUGS.RUN_TESTS]: runTestsTask,\n [TASK_SLUGS.VERIFY_CHANGES]: verifyChangesTask,\n [TASK_SLUGS.ONBOARD_TESTING]: onboardTestingTask,\n [TASK_SLUGS.EXPLORE_APPLICATION]: exploreApplicationTask,\n};\n\n/**\n * Get task template by slug\n */\nexport function getTaskTemplate(slug: string): ComposedTaskTemplate | undefined {\n return TASK_TEMPLATES[slug];\n}\n\n/**\n * Get all registered task slugs\n */\nexport function getAllTaskSlugs(): string[] {\n return Object.keys(TASK_TEMPLATES);\n}\n\n/**\n * Check if a task slug is registered\n */\nexport function isTaskRegistered(slug: string): boolean {\n return TASK_TEMPLATES[slug] !== undefined;\n}\n\n/**\n * Slash Command Configuration for Cloud Run\n * Format expected by cloudrun-claude-code API\n */\nexport interface SlashCommandConfig {\n frontmatter: Record<string, any>;\n content: string;\n}\n\n"],"mappings":";AAiNO,SAAS,aAAa,KAAuC;AAClE,SAAO,OAAO,QAAQ,YAAY,YAAY,OAAO,IAAI,WAAW;AACtE;AAKO,SAAS,sBAAsB,KAAgD;AACpF,SAAO,OAAO,QAAQ,YAAY,YAAY;AAChD;;;ACnNO,IAAM,aAAa;AAAA,EACxB,qBAAqB;AAAA,EACrB,iBAAiB;AAAA,EACjB,qBAAqB;AAAA,EACrB,oBAAoB;AAAA,EACpB,gBAAgB;AAAA,EAChB,eAAe;AAAA,EACf,WAAW;AAAA,EACX,gBAAgB;AAAA;AAAA,EAEhB,oBAAoB;AACtB;;;ACVO,IAAM,wBAA8C;AAAA,EACzD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,OAAO;AAAA;AAAA,IAEL;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAOX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAMX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAeX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAYT,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAoCX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAqBX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IA0DX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MA8BT,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAkCX;AAAA,EACF;AAAA,EAEA,mBAAmB,CAAC,eAAe,qBAAqB;AAAA,EACxD,mBAAmB,CAAC,4BAA4B,mBAAmB;AAAA,EACnE,gBAAgB,CAAC;AACnB;;;ACtTO,IAAM,uBAA6C;AAAA,EACxD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,OAAO;AAAA;AAAA,IAEL;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAuBT,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAOX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAqBX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAYX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAsCX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAsBT,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA,IAKX;AAAA,EACF;AAAA,EAEA,mBAAmB,CAAC,aAAa;AAAA,EACjC,mBAAmB,CAAC,4BAA4B,mBAAmB;AAAA,EACnE,gBAAgB,CAAC;AACnB;;;AC3NO,IAAM,oBAA0C;AAAA,EACrD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,OAAO;AAAA;AAAA,IAEL;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA,IAGX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IA4CX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAqBX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAqBX;AAAA,EACF;AAAA,EAEA,mBAAmB,CAAC,mBAAmB;AAAA,EACvC,mBAAmB,CAAC;AAAA,EACpB,gBAAgB,CAAC;AACnB;;;AC/IO,IAAM,mBAAyC;AAAA,EACpD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,OAAO;AAAA;AAAA,IAEL;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA,IAGX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAYX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAoFX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAuDX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAgCX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAST,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAuCX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAmBT,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAwCX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAqBX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAyBX;AAAA,EACF;AAAA,EAEA,mBAAmB,CAAC,mBAAmB;AAAA,EACvC,mBAAmB,CAAC,4BAA4B,eAAe;AAAA,EAC/D,gBAAgB,CAAC,gBAAgB;AACnC;;;ACxaO,IAAM,eAAqC;AAAA,EAChD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,OAAO;AAAA;AAAA,IAEL;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA,IAGX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAwCX;AAAA;AAAA,IAEA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IA4CX;AAAA,EACF;AAAA,EAEA,mBAAmB,CAAC,eAAe,qBAAqB;AAAA,EACxD,mBAAmB,CAAC,iBAAiB,mBAAmB;AAAA,EACxD,gBAAgB,CAAC;AACnB;;;ACxJO,IAAM,oBAA0C;AAAA,EACrD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,OAAO;AAAA;AAAA,IAEL;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA,IAGX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IA0DX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IA+DX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAwDX;AAAA;AAAA,IAEA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAmEX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAmBX;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MA4BT,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAwCT,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAsCX;AAAA,EACF;AAAA,EAEA,mBAAmB,CAAC,eAAe,qBAAqB;AAAA,EACxD,mBAAmB,CAAC,4BAA4B,iBAAiB,mBAAmB;AAAA,EACpF,gBAAgB,CAAC;AACnB;;;ACndO,IAAM,qBAA2C;AAAA,EACtD,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aACE;AAAA,EAEF,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,OAAO;AAAA;AAAA,IAEL;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IASX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA;AAAA,IAGA;AAAA;AAAA,IAGA;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAGA;AAAA,IACA;AAAA;AAAA,IAGA;AAAA,IACA;AAAA;AAAA,IAGA;AAAA,IACA;AAAA;AAAA,IAGA;AAAA,IACA;AAAA,IACA;AAAA,MACE,QAAQ;AAAA,MACR,uBAAuB;AAAA,IACzB;AAAA;AAAA,IAGA;AAAA,IACA;AAAA,MACE,QAAQ;AAAA,MACR,uBAAuB;AAAA,IACzB;AAAA,IACA;AAAA,EACF;AAAA,EAEA,mBAAmB,CAAC,eAAe,uBAAuB,qBAAqB;AAAA,EAC/E,mBAAmB,CAAC,4BAA4B,qBAAqB,eAAe;AAAA,EACpF,gBAAgB,CAAC,aAAa,qBAAqB;AACrD;;;AC9EO,IAAM,yBAA+C;AAAA,EAC1D,MAAM,WAAW;AAAA,EACjB,MAAM;AAAA,EACN,aAAa;AAAA,EAEb,aAAa;AAAA,IACX,aAAa;AAAA,IACb,iBAAiB;AAAA,EACnB;AAAA,EAEA,OAAO;AAAA;AAAA,IAEL;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA,IACX;AAAA;AAAA,IAEA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,IAMX;AAAA;AAAA,IAEA;AAAA,IACA;AAAA;AAAA,IAGA;AAAA;AAAA,IAGA;AAAA,IACA;AAAA,IACA;AAAA;AAAA,IAGA;AAAA;AAAA,IAEA;AAAA,MACE,QAAQ;AAAA,MACR,OAAO;AAAA,MACP,SAAS;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,MAYT,uBAAuB;AAAA,IACzB;AAAA,IACA;AAAA,IACA;AAAA,EACF;AAAA,EAEA,mBAAmB,CAAC,aAAa;AAAA,EACjC,mBAAmB,CAAC,mBAAmB;AAAA,EACvC,gBAAgB,CAAC;AACnB;;;ACnDO,IAAM,iBAAuD;AAAA,EAClE,CAAC,WAAW,mBAAmB,GAAG;AAAA,EAClC,CAAC,WAAW,kBAAkB,GAAG;AAAA,EACjC,CAAC,WAAW,cAAc,GAAG;AAAA,EAC7B,CAAC,WAAW,aAAa,GAAG;AAAA,EAC5B,CAAC,WAAW,SAAS,GAAG;AAAA,EACxB,CAAC,WAAW,cAAc,GAAG;AAAA,EAC7B,CAAC,WAAW,eAAe,GAAG;AAAA,EAC9B,CAAC,WAAW,mBAAmB,GAAG;AACpC;AAKO,SAAS,gBAAgB,MAAgD;AAC9E,SAAO,eAAe,IAAI;AAC5B;AAKO,SAAS,kBAA4B;AAC1C,SAAO,OAAO,KAAK,cAAc;AACnC;AAKO,SAAS,iBAAiB,MAAuB;AACtD,SAAO,eAAe,IAAI,MAAM;AAClC;","names":[]}