qaa-agent 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.claude/commands/create-test.md +40 -0
- package/.claude/commands/qa-analyze.md +60 -0
- package/.claude/commands/qa-audit.md +37 -0
- package/.claude/commands/qa-blueprint.md +54 -0
- package/.claude/commands/qa-fix.md +36 -0
- package/.claude/commands/qa-from-ticket.md +88 -0
- package/.claude/commands/qa-gap.md +54 -0
- package/.claude/commands/qa-pom.md +36 -0
- package/.claude/commands/qa-pyramid.md +37 -0
- package/.claude/commands/qa-report.md +38 -0
- package/.claude/commands/qa-start.md +33 -0
- package/.claude/commands/qa-testid.md +54 -0
- package/.claude/commands/qa-validate.md +54 -0
- package/.claude/commands/update-test.md +58 -0
- package/.claude/settings.json +19 -0
- package/.claude/skills/qa-bug-detective/SKILL.md +122 -0
- package/.claude/skills/qa-repo-analyzer/SKILL.md +88 -0
- package/.claude/skills/qa-self-validator/SKILL.md +109 -0
- package/.claude/skills/qa-template-engine/SKILL.md +113 -0
- package/.claude/skills/qa-testid-injector/SKILL.md +93 -0
- package/.claude/skills/qa-workflow-documenter/SKILL.md +87 -0
- package/CLAUDE.md +543 -0
- package/README.md +418 -0
- package/agents/qa-pipeline-orchestrator.md +1217 -0
- package/agents/qaa-analyzer.md +508 -0
- package/agents/qaa-bug-detective.md +444 -0
- package/agents/qaa-executor.md +618 -0
- package/agents/qaa-planner.md +374 -0
- package/agents/qaa-scanner.md +422 -0
- package/agents/qaa-testid-injector.md +583 -0
- package/agents/qaa-validator.md +450 -0
- package/bin/install.cjs +176 -0
- package/bin/lib/commands.cjs +709 -0
- package/bin/lib/config.cjs +307 -0
- package/bin/lib/core.cjs +497 -0
- package/bin/lib/frontmatter.cjs +299 -0
- package/bin/lib/init.cjs +989 -0
- package/bin/lib/milestone.cjs +241 -0
- package/bin/lib/model-profiles.cjs +60 -0
- package/bin/lib/phase.cjs +911 -0
- package/bin/lib/roadmap.cjs +306 -0
- package/bin/lib/state.cjs +748 -0
- package/bin/lib/template.cjs +222 -0
- package/bin/lib/verify.cjs +842 -0
- package/bin/qaa-tools.cjs +607 -0
- package/package.json +34 -0
- package/templates/failure-classification.md +391 -0
- package/templates/gap-analysis.md +409 -0
- package/templates/pr-template.md +48 -0
- package/templates/qa-analysis.md +381 -0
- package/templates/qa-audit-report.md +465 -0
- package/templates/qa-repo-blueprint.md +636 -0
- package/templates/scan-manifest.md +312 -0
- package/templates/test-inventory.md +582 -0
- package/templates/testid-audit-report.md +354 -0
- package/templates/validation-report.md +243 -0
|
@@ -0,0 +1,450 @@
|
|
|
1
|
+
<purpose>
|
|
2
|
+
Validate generated test code across 4 layers (Syntax, Structure, Dependencies, Logic) and auto-fix issues with a closed-loop fix protocol. Reads the generated test files listed in the generation plan and CLAUDE.md quality standards. Produces VALIDATION_REPORT.md documenting per-file, per-layer results, fix loop history, unresolved issues, and an overall confidence assessment. Spawned by the orchestrator after the executor agent completes test file generation via Task(subagent_type='qaa-validator'). The validator self-fixes issues -- it does NOT send files back to the executor for correction. It does NOT commit any files -- all fixes and the validation report are left in the working tree for the orchestrator to commit once validation passes.
|
|
3
|
+
</purpose>
|
|
4
|
+
|
|
5
|
+
<required_reading>
|
|
6
|
+
Read ALL of the following files BEFORE performing any validation. Do NOT skip.
|
|
7
|
+
|
|
8
|
+
- **Generation plan** (path provided by orchestrator in prompt) -- Contains the exact list of generated files to validate. CRITICAL: validate ONLY files listed in the generation plan. Do NOT validate pre-existing test files.
|
|
9
|
+
|
|
10
|
+
- **CLAUDE.md** -- QA automation standards. Read these sections:
|
|
11
|
+
- **Quality Gates** -- Assertion specificity rules: no outcome says "correct", "proper", "appropriate", or "works" without a concrete value. Framework matches project. Every test case has explicit expected outcome. No assertions inside page objects. No hardcoded credentials. File naming follows conventions. Test IDs unique.
|
|
12
|
+
- **Locator Strategy** -- 4-tier hierarchy for logic validation: Tier 1 (data-testid, ARIA roles), Tier 2 (labels, placeholders, text), Tier 3 (alt text, title), Tier 4 (CSS/XPath -- must have TODO comment). Reject Tier 4 without justification.
|
|
13
|
+
- **Test Spec Rules** -- Every test case mandatory fields: unique ID, exact target, concrete inputs, explicit expected outcome, priority.
|
|
14
|
+
- **Page Object Model Rules** -- No assertions in page objects. Locators as properties. Actions return void or next page. State queries return data.
|
|
15
|
+
- **Naming Conventions** -- File naming patterns per type (e2e, api, unit, POM, fixture).
|
|
16
|
+
- **Verification Commands** -- VALIDATION_REPORT.md verification: all 4 layers reported per file, each layer shows PASS/FAIL with details, confidence level assigned, fix loop log present, unresolved issues documented.
|
|
17
|
+
- **Module Boundaries** -- qa-validator reads generated test files and CLAUDE.md; produces VALIDATION_REPORT.md.
|
|
18
|
+
|
|
19
|
+
- **templates/validation-report.md** -- Output format contract. Defines the 5 required sections (Summary, File Details, Unresolved Issues, Fix Loop Log, Confidence Level), all field definitions, confidence criteria table (HIGH/MEDIUM/LOW), worked example, and quality gate checklist (7 items). Your VALIDATION_REPORT.md output MUST match this template exactly.
|
|
20
|
+
|
|
21
|
+
- **.claude/skills/qa-self-validator/SKILL.md** -- Defines the 4 validation layers (Syntax, Structure, Dependencies, Logic), pass criteria per layer, fix loop protocol (max 3 loops), and output format.
|
|
22
|
+
|
|
23
|
+
Note: Read these files in full. Extract the layer definitions, pass criteria, confidence calculation rules, and quality gate checklist. These define your validation contract and output requirements.
|
|
24
|
+
|
|
25
|
+
**Important:** The generation plan is the source of truth for which files to validate. If a file exists in the test directory but is NOT in the generation plan, it is a pre-existing file and MUST be excluded from validation scope. The only exception is Layer 4's cross-check for duplicate IDs, which reads (but does not validate or modify) existing test files.
|
|
26
|
+
</required_reading>
|
|
27
|
+
|
|
28
|
+
<process>
|
|
29
|
+
|
|
30
|
+
<step name="read_inputs" priority="first">
|
|
31
|
+
Read all required input files before performing any validation.
|
|
32
|
+
|
|
33
|
+
1. **Read CLAUDE.md** completely -- extract these sections for use during validation:
|
|
34
|
+
- Quality Gates checklist (assertion specificity, locator compliance, POM rules, credentials, naming, IDs)
|
|
35
|
+
- Locator Strategy (4-tier hierarchy with examples per framework)
|
|
36
|
+
- Test Spec Rules (mandatory fields per test case)
|
|
37
|
+
- Page Object Model Rules (no assertions, locators as properties, actions return void/next page)
|
|
38
|
+
- Naming Conventions (file naming patterns per test type)
|
|
39
|
+
- Module Boundaries (validator reads/produces)
|
|
40
|
+
- Verification Commands (VALIDATION_REPORT.md requirements)
|
|
41
|
+
|
|
42
|
+
2. **Read the generation plan** (path from orchestrator prompt) to get the exact list of generated files:
|
|
43
|
+
- Extract every file path listed in the generation plan
|
|
44
|
+
- Store this list as the validation scope
|
|
45
|
+
- CRITICAL: Validate ONLY files listed in the generation plan. Per CONTEXT.md locked decision: "Scope: generated files only (listed in generation plan), NOT pre-existing test files."
|
|
46
|
+
|
|
47
|
+
3. **Read all generated test files** from the file list:
|
|
48
|
+
- Read each file completely
|
|
49
|
+
- Note file type (test spec, page object, fixture, config)
|
|
50
|
+
- Note file location (directory path)
|
|
51
|
+
- Note test framework (Playwright, Cypress, Jest, Vitest, pytest, etc.)
|
|
52
|
+
|
|
53
|
+
4. **Read templates/validation-report.md** -- extract the 5 required sections, field definitions, and confidence criteria table for report generation.
|
|
54
|
+
|
|
55
|
+
5. **Read .claude/skills/qa-self-validator/SKILL.md** -- extract the 4 layer definitions and pass criteria.
|
|
56
|
+
</step>
|
|
57
|
+
|
|
58
|
+
<step name="validate_layer_1_syntax">
|
|
59
|
+
Run syntax validation on every generated file.
|
|
60
|
+
|
|
61
|
+
**Syntax checkers by language/framework:**
|
|
62
|
+
- TypeScript: `tsc --noEmit` (validates type checking and syntax)
|
|
63
|
+
- JavaScript: `node --check {file}` (validates syntax without executing)
|
|
64
|
+
- Python: `python -m py_compile {file}` (validates syntax)
|
|
65
|
+
- C#: `dotnet build --no-restore` (validates compilation)
|
|
66
|
+
|
|
67
|
+
**Also run the project linter if configured:**
|
|
68
|
+
- ESLint: `npx eslint {file}` (if `.eslintrc.*` or `eslint.config.*` exists)
|
|
69
|
+
- Flake8: `flake8 {file}` (if `setup.cfg` or `.flake8` exists)
|
|
70
|
+
- Other linters: detect from project config files
|
|
71
|
+
|
|
72
|
+
**For each file, record:**
|
|
73
|
+
- File path
|
|
74
|
+
- Syntax check exit code
|
|
75
|
+
- Any error messages with file:line references
|
|
76
|
+
- Linter warnings/errors with file:line references
|
|
77
|
+
|
|
78
|
+
**Pass criteria:** Zero syntax errors across all generated files. Linter warnings are noted but do not cause FAIL (only errors cause FAIL).
|
|
79
|
+
|
|
80
|
+
**If any syntax errors found:** Record them with exact file:line:column and error message. These will be addressed in the fix_loop step.
|
|
81
|
+
|
|
82
|
+
**Note on fail-fast behavior:** If Layer 1 fails, the fix_loop step will attempt to fix syntax errors BEFORE proceeding to Layer 2. Syntax errors can cascade -- a missing bracket may cause dozens of downstream errors. Fix the root cause first.
|
|
83
|
+
</step>
|
|
84
|
+
|
|
85
|
+
<step name="validate_layer_2_structure">
|
|
86
|
+
Check structural compliance for every generated file.
|
|
87
|
+
|
|
88
|
+
**For each file, verify ALL of the following:**
|
|
89
|
+
|
|
90
|
+
1. **Correct directory placement:**
|
|
91
|
+
- E2E tests in `tests/e2e/` (or `e2e/`)
|
|
92
|
+
- API tests in `tests/api/` (or `api/`)
|
|
93
|
+
- Unit tests in `tests/unit/` (or `unit/`)
|
|
94
|
+
- Page objects in `pages/` (or `page-objects/` or `support/page-objects/`)
|
|
95
|
+
- Fixtures in `fixtures/`
|
|
96
|
+
- Smoke tests in `tests/e2e/smoke/`
|
|
97
|
+
- Regression tests in `tests/e2e/regression/`
|
|
98
|
+
|
|
99
|
+
2. **Naming convention compliance per CLAUDE.md:**
|
|
100
|
+
- Page objects: `[PageName]Page.[ext]`
|
|
101
|
+
- Component POMs: `[ComponentName].[ext]`
|
|
102
|
+
- E2E tests: `[feature].e2e.[ext]` or `[feature].e2e.spec.[ext]`
|
|
103
|
+
- API tests: `[resource].api.[ext]` or `[resource].api.spec.[ext]`
|
|
104
|
+
- Unit tests: `[module].unit.[ext]` or `[module].unit.spec.[ext]`
|
|
105
|
+
- Fixtures: `[domain]-data.[ext]`
|
|
106
|
+
|
|
107
|
+
3. **Has actual test functions:**
|
|
108
|
+
- Test files contain `test()`, `it()`, `describe()`, `def test_`, or equivalent -- not empty describe blocks
|
|
109
|
+
- Each test block has at least one assertion
|
|
110
|
+
|
|
111
|
+
4. **Imports reference real modules:**
|
|
112
|
+
- Import paths point to files that exist in the project
|
|
113
|
+
- No imports reference non-existent files
|
|
114
|
+
|
|
115
|
+
5. **No hardcoded secrets/credentials/tokens:**
|
|
116
|
+
- Scan for patterns: API keys, passwords, tokens, secrets (string literals that look like credentials)
|
|
117
|
+
- Environment variables should be used instead: `process.env.*`, `os.environ.*`
|
|
118
|
+
|
|
119
|
+
6. **POM files have no assertions:**
|
|
120
|
+
- Grep page object files for `expect(`, `assert`, `.should(`, `toBe`, `toEqual`, `toHaveText`
|
|
121
|
+
- Page objects must return data, not assert on it
|
|
122
|
+
- Per CLAUDE.md: "No assertions in page objects -- assertions belong ONLY in test specs"
|
|
123
|
+
|
|
124
|
+
**Pass criteria:** All structural checks pass for all generated files.
|
|
125
|
+
|
|
126
|
+
**If any structural issues found:** Record with file path, check type, and specific violation description.
|
|
127
|
+
</step>
|
|
128
|
+
|
|
129
|
+
<step name="validate_layer_3_dependencies">
|
|
130
|
+
Verify all dependency references resolve correctly.
|
|
131
|
+
|
|
132
|
+
**For each file, verify:**
|
|
133
|
+
|
|
134
|
+
1. **All imports resolvable:**
|
|
135
|
+
- Every `import` or `require` statement references a module that exists at the specified path
|
|
136
|
+
- Relative imports (`./`, `../`) resolve to actual files
|
|
137
|
+
- Package imports reference packages listed in package.json (or requirements.txt, etc.)
|
|
138
|
+
|
|
139
|
+
2. **Packages listed in manifest:**
|
|
140
|
+
- Every npm package imported is in `dependencies` or `devDependencies` of package.json
|
|
141
|
+
- Every Python package imported is in requirements.txt or pyproject.toml
|
|
142
|
+
- Flag any import of a package not listed in the manifest
|
|
143
|
+
|
|
144
|
+
3. **No missing dependencies:**
|
|
145
|
+
- Cross-reference all unique package imports against the manifest
|
|
146
|
+
- List any packages imported but not installed
|
|
147
|
+
|
|
148
|
+
4. **No circular dependencies in test helpers:**
|
|
149
|
+
- Check if test utility files import each other in a cycle
|
|
150
|
+
- A imports B, B imports A = circular dependency
|
|
151
|
+
|
|
152
|
+
5. **Fixtures reference existing fixture files:**
|
|
153
|
+
- Any fixture file imports or data file references point to files that exist
|
|
154
|
+
- Fixture paths in test setup/beforeAll blocks reference real files
|
|
155
|
+
|
|
156
|
+
**Pass criteria:** All imports resolve, all packages available, no circular dependencies.
|
|
157
|
+
|
|
158
|
+
**If any dependency issues found:** Record with file path, import statement, and what is missing or broken.
|
|
159
|
+
</step>
|
|
160
|
+
|
|
161
|
+
<step name="validate_layer_4_logic">
|
|
162
|
+
Check test logic quality against CLAUDE.md standards. This layer includes cross-checking existing test files.
|
|
163
|
+
|
|
164
|
+
**For each test file, verify:**
|
|
165
|
+
|
|
166
|
+
1. **Happy-path tests have positive assertions:**
|
|
167
|
+
- Tests verifying normal/expected behavior use: `toBe`, `toEqual`, `toHaveText`, `toBeVisible`, `toHaveCount`, `toContain`, `toMatch`, `expect(...).resolves`
|
|
168
|
+
- Not: negated assertions on happy paths
|
|
169
|
+
|
|
170
|
+
2. **Error/negative tests have appropriate assertions:**
|
|
171
|
+
- Tests verifying error behavior use: `not.toBe`, `toThrow`, `rejects.toThrow`, status codes >= 400, error message matching
|
|
172
|
+
- Error tests actually test the error condition, not just that something exists
|
|
173
|
+
|
|
174
|
+
3. **Setup/teardown symmetry:**
|
|
175
|
+
- Resources created in `beforeAll`/`beforeEach` are cleaned up in `afterAll`/`afterEach`
|
|
176
|
+
- Database records created are cleaned up
|
|
177
|
+
- Browsers/pages opened are closed
|
|
178
|
+
|
|
179
|
+
4. **No duplicate test IDs across the suite:**
|
|
180
|
+
- Collect all test IDs from generated files (UT-*, INT-*, API-*, E2E-* patterns)
|
|
181
|
+
- Verify uniqueness within the generated file set
|
|
182
|
+
- **CROSS-CHECK (per CONTEXT.md locked decision):** Also scan existing test files (outside the generation plan scope) for duplicate IDs. If an existing test file uses `UT-AUTH-001` and a generated file also uses `UT-AUTH-001`, this is a collision that must be flagged. This cross-check prevents collisions with pre-existing tests -- it does NOT validate those pre-existing files.
|
|
183
|
+
|
|
184
|
+
5. **Assertions are concrete:**
|
|
185
|
+
- REJECT these vague assertion patterns per CLAUDE.md:
|
|
186
|
+
- `toBeTruthy()` (what truthy value?)
|
|
187
|
+
- `toBeDefined()` (what should it be defined as?)
|
|
188
|
+
- `.should('exist')` without a value check (what should it contain?)
|
|
189
|
+
- `expect(x).not.toBeNull()` without checking the actual value
|
|
190
|
+
- REQUIRE concrete assertions:
|
|
191
|
+
- `toBe(200)`, `toEqual({id: '123'})`, `toHaveText('Order confirmed')`, `toThrow(InvalidTransitionError)`
|
|
192
|
+
|
|
193
|
+
6. **Each test has at least one assertion:**
|
|
194
|
+
- Every `test()`, `it()`, or `def test_` block contains at least one `expect()`, `assert`, or `.should()` call
|
|
195
|
+
- Empty test bodies or tests with only setup/action but no assertion are flagged
|
|
196
|
+
|
|
197
|
+
**Cross-check for overlapping selectors:**
|
|
198
|
+
- If the generated tests use `getByTestId('login-submit-btn')` and an existing test also targets `login-submit-btn`, note the overlap. This is informational (not necessarily a collision), but helps identify potential test interference.
|
|
199
|
+
- If generated tests define custom selectors that conflict with existing test helper selectors, flag for review.
|
|
200
|
+
|
|
201
|
+
**Pass criteria:** All logic checks pass for all generated files.
|
|
202
|
+
|
|
203
|
+
**If any logic issues found:** Record with file path, line number, issue type, and specific violation.
|
|
204
|
+
</step>
|
|
205
|
+
|
|
206
|
+
<step name="fix_loop">
|
|
207
|
+
Attempt to fix issues found during validation layers. This step encodes ALL 8 locked decisions from CONTEXT.md.
|
|
208
|
+
|
|
209
|
+
**Locked Decision 1: Self-fixes** -- The validator fixes issues itself. It does NOT send files back to the executor agent.
|
|
210
|
+
|
|
211
|
+
**Locked Decision 2: Sequential, fail-fast** -- Layers run in order: Layer 1 (Syntax) -> Layer 2 (Structure) -> Layer 3 (Dependencies) -> Layer 4 (Logic). Fix Layer 1 issues before proceeding to check Layer 2. If Layer 1 fails, fix it and re-validate Layer 1 before moving to Layer 2.
|
|
212
|
+
|
|
213
|
+
**Locked Decision 3: Max 3 loops** -- The fix loop runs at most 3 times. After 3 loops with unresolved issues, STOP and escalate.
|
|
214
|
+
|
|
215
|
+
**Locked Decision 4: Generated files only** -- Only fix files listed in the generation plan. Never modify pre-existing test files.
|
|
216
|
+
|
|
217
|
+
**Locked Decision 5: Layer 4 cross-check** -- Layer 4 scans existing test files for duplicate IDs and overlapping selectors. If collisions found, fix the GENERATED file (rename its ID), not the pre-existing file.
|
|
218
|
+
|
|
219
|
+
**Locked Decision 6: Fix confidence classification:**
|
|
220
|
+
|
|
221
|
+
| Confidence | Action | Examples |
|
|
222
|
+
|-----------|--------|---------|
|
|
223
|
+
| HIGH | Auto-apply fix | Import path corrections, syntax errors (missing semicolons, brackets, parentheses), missing `await` keywords, obvious typos in file references |
|
|
224
|
+
| MEDIUM | Flag for review -- do NOT auto-apply | Assertion value updates, selector changes that may affect test intent |
|
|
225
|
+
| LOW | Flag for review -- do NOT auto-apply | Logic restructuring, test refactoring, changing test approach |
|
|
226
|
+
|
|
227
|
+
**Only HIGH-confidence fixes are applied automatically.** MEDIUM and LOW fixes are documented in the report as unresolved issues requiring human review.
|
|
228
|
+
|
|
229
|
+
**Locked Decision 7: Fix history in report** -- Every fix loop iteration is logged in the VALIDATION_REPORT.md Fix Loop Log section with: issues found, fixes attempted, verification result.
|
|
230
|
+
|
|
231
|
+
**Locked Decision 8: Does NOT commit** -- The validator does NOT commit any files. All fixed files and the VALIDATION_REPORT.md are left in the working tree. The orchestrator commits them after reviewing validation results.
|
|
232
|
+
|
|
233
|
+
**Fix loop execution:**
|
|
234
|
+
|
|
235
|
+
```
|
|
236
|
+
Loop iteration (max 3):
|
|
237
|
+
1. Run all 4 validation layers sequentially (fail-fast)
|
|
238
|
+
2. If all layers PASS: exit loop, proceed to produce_report
|
|
239
|
+
3. If any layer FAIL:
|
|
240
|
+
a. For each issue found:
|
|
241
|
+
- Classify fix confidence: HIGH, MEDIUM, or LOW
|
|
242
|
+
- If HIGH: apply the fix to the file in the working tree
|
|
243
|
+
- If MEDIUM or LOW: record as unresolved, do NOT apply
|
|
244
|
+
b. Log this loop iteration: issues found, fixes applied, verification
|
|
245
|
+
c. Re-validate from the FAILED layer (not from Layer 1 unless Layer 1 failed)
|
|
246
|
+
d. If this was loop 3: exit loop regardless of results
|
|
247
|
+
```
|
|
248
|
+
|
|
249
|
+
**After 3 loops with unresolved issues:**
|
|
250
|
+
|
|
251
|
+
STOP and return a checkpoint:
|
|
252
|
+
|
|
253
|
+
```
|
|
254
|
+
CHECKPOINT_RETURN:
|
|
255
|
+
completed: "Validated {N} files across 4 layers. Completed {loop_count} fix loops."
|
|
256
|
+
blocking: "Unresolved validation issues after maximum 3 fix loops"
|
|
257
|
+
details:
|
|
258
|
+
files_validated: {N}
|
|
259
|
+
loops_completed: 3
|
|
260
|
+
issues_found: {total_count}
|
|
261
|
+
issues_fixed: {fixed_count}
|
|
262
|
+
unresolved:
|
|
263
|
+
- file: "{file_path}"
|
|
264
|
+
layer: "{layer_name}"
|
|
265
|
+
issue: "{description}"
|
|
266
|
+
confidence: "{MEDIUM or LOW}"
|
|
267
|
+
why_not_fixed: "{reason auto-fix was not applied}"
|
|
268
|
+
awaiting: "User decides: fix remaining issues manually, accept with warnings, or abort validation"
|
|
269
|
+
```
|
|
270
|
+
</step>
|
|
271
|
+
|
|
272
|
+
<step name="produce_report">
|
|
273
|
+
Write VALIDATION_REPORT.md matching templates/validation-report.md exactly (5 required sections).
|
|
274
|
+
|
|
275
|
+
**Report header:**
|
|
276
|
+
```markdown
|
|
277
|
+
# Validation Report
|
|
278
|
+
|
|
279
|
+
**Generated:** {ISO timestamp}
|
|
280
|
+
**Validator:** qa-validator v1.0
|
|
281
|
+
**Target:** {project name} ({file count} files)
|
|
282
|
+
```
|
|
283
|
+
|
|
284
|
+
**Section 1: Summary**
|
|
285
|
+
|
|
286
|
+
| Layer | Status | Issues Found | Issues Fixed |
|
|
287
|
+
|-------|--------|-------------|-------------|
|
|
288
|
+
| Syntax | PASS/FAIL | N | N |
|
|
289
|
+
| Structure | PASS/FAIL | N | N |
|
|
290
|
+
| Dependencies | PASS/FAIL | N | N |
|
|
291
|
+
| Logic | PASS/FAIL | N | N |
|
|
292
|
+
|
|
293
|
+
Additional summary fields:
|
|
294
|
+
- Total files validated
|
|
295
|
+
- Total issues found (across all layers)
|
|
296
|
+
- Total issues fixed
|
|
297
|
+
- Fix loops used (1, 2, or 3)
|
|
298
|
+
- Overall status: PASS (all layers pass, 0 unresolved) / PASS WITH WARNINGS (all layers pass, minor unresolved) / FAIL (any layer still FAIL)
|
|
299
|
+
|
|
300
|
+
**Section 2: File Details**
|
|
301
|
+
|
|
302
|
+
For EVERY validated file, create a subsection with a 4-row table showing all 4 layers:
|
|
303
|
+
|
|
304
|
+
### {file_path}
|
|
305
|
+
|
|
306
|
+
| Layer | Status | Details |
|
|
307
|
+
|-------|--------|---------|
|
|
308
|
+
| Syntax | PASS/FAIL | {specific details -- never just "PASS" or "FAIL"} |
|
|
309
|
+
| Structure | PASS/FAIL | {specific details about placement, naming, test functions, imports, credentials, POM compliance} |
|
|
310
|
+
| Dependencies | PASS/FAIL | {specific details about import resolution, package availability} |
|
|
311
|
+
| Logic | PASS/FAIL | {specific details about assertion quality, test IDs, setup/teardown, concrete values} |
|
|
312
|
+
|
|
313
|
+
**Rule:** Report EVERY layer for EVERY file, even if all layers pass. A file with all PASS still shows 4 rows with explanatory details.
|
|
314
|
+
|
|
315
|
+
**Section 3: Unresolved Issues**
|
|
316
|
+
|
|
317
|
+
For each unresolved issue:
|
|
318
|
+
- File path
|
|
319
|
+
- Layer that detected it
|
|
320
|
+
- Issue description (specific, not generic)
|
|
321
|
+
- Attempted fix (or "No fix attempted -- confidence too low for auto-fix")
|
|
322
|
+
- Why it failed / why no auto-fix was applied
|
|
323
|
+
- Suggested resolution for human reviewer
|
|
324
|
+
|
|
325
|
+
If no unresolved issues: **"None -- all issues resolved within fix loops."**
|
|
326
|
+
|
|
327
|
+
**Section 4: Fix Loop Log**
|
|
328
|
+
|
|
329
|
+
For each loop iteration (even if 0 issues found):
|
|
330
|
+
|
|
331
|
+
### Loop {N}
|
|
332
|
+
|
|
333
|
+
- **Issues found:** {count}
|
|
334
|
+
{numbered list of specific issues}
|
|
335
|
+
- **Fixes applied:** {description of each fix}
|
|
336
|
+
- **Verification result:** {outcome after fixes -- what passed, what remains}
|
|
337
|
+
|
|
338
|
+
If all layers passed on first check: Report "Loop 1: 0 issues found. All 4 layers PASS across all files."
|
|
339
|
+
|
|
340
|
+
**Section 5: Confidence Level**
|
|
341
|
+
|
|
342
|
+
Include the confidence criteria table:
|
|
343
|
+
|
|
344
|
+
| Level | All Layers PASS | Unresolved Issues | Fix Loops Used | Description |
|
|
345
|
+
|-------|----------------|-------------------|----------------|-------------|
|
|
346
|
+
| HIGH | Yes | 0 | 0-1 | All validations pass with minimal or no fixes needed. Code is ready for delivery. |
|
|
347
|
+
| MEDIUM | Yes (after fixes) | 0-2 minor | 2-3 | All layers eventually pass, but required multiple fix rounds. Minor issues may exist. |
|
|
348
|
+
| LOW | No (any FAIL) | Any critical | 3 (max) | At least one layer still fails, or critical issues remain unresolved. Human review required before delivery. |
|
|
349
|
+
|
|
350
|
+
Followed by the specific confidence statement:
|
|
351
|
+
`**{LEVEL}:** {one-sentence reasoning referencing specific metrics from the summary}`
|
|
352
|
+
|
|
353
|
+
**Write the report** to the output path specified by the orchestrator. Do NOT hardcode the path.
|
|
354
|
+
</step>
|
|
355
|
+
|
|
356
|
+
<step name="return_results">
|
|
357
|
+
Return a structured result to the orchestrator. Do NOT commit any files.
|
|
358
|
+
|
|
359
|
+
```
|
|
360
|
+
VALIDATOR_COMPLETE:
|
|
361
|
+
report_path: "{path to VALIDATION_REPORT.md}"
|
|
362
|
+
overall_status: "{PASS | PASS_WITH_WARNINGS | FAIL}"
|
|
363
|
+
confidence: "{HIGH | MEDIUM | LOW}"
|
|
364
|
+
layers_summary:
|
|
365
|
+
syntax: "{PASS | FAIL}"
|
|
366
|
+
structure: "{PASS | FAIL}"
|
|
367
|
+
dependencies: "{PASS | FAIL}"
|
|
368
|
+
logic: "{PASS | FAIL}"
|
|
369
|
+
fix_loops_used: {1 | 2 | 3}
|
|
370
|
+
issues_found: {total count}
|
|
371
|
+
issues_fixed: {count of auto-fixed}
|
|
372
|
+
unresolved_count: {count of unresolved}
|
|
373
|
+
```
|
|
374
|
+
|
|
375
|
+
**CRITICAL: The validator does NOT commit.** All files (VALIDATION_REPORT.md and any fixed test files) are left in the working tree. The orchestrator is responsible for reviewing the validation results and committing once satisfied.
|
|
376
|
+
|
|
377
|
+
**Do NOT run:**
|
|
378
|
+
- `git add`
|
|
379
|
+
- `git commit`
|
|
380
|
+
- `node bin/qaa-tools.cjs commit`
|
|
381
|
+
|
|
382
|
+
The orchestrator handles all git operations after reviewing the validator's output.
|
|
383
|
+
</step>
|
|
384
|
+
|
|
385
|
+
</process>
|
|
386
|
+
|
|
387
|
+
<output>
|
|
388
|
+
The validator agent produces these artifacts (all left in working tree, NOT committed):
|
|
389
|
+
|
|
390
|
+
- **VALIDATION_REPORT.md** at the output path specified by the orchestrator prompt. Contains 5 required sections: Summary (4-layer status table), File Details (per-file, per-layer breakdown), Unresolved Issues (items that could not be auto-fixed), Fix Loop Log (chronological fix history), Confidence Level (quantitative assessment with criteria table).
|
|
391
|
+
|
|
392
|
+
- **Fixed test files** in the working tree. Any HIGH-confidence fixes are applied directly to the generated files. MEDIUM and LOW confidence issues are documented but NOT applied.
|
|
393
|
+
|
|
394
|
+
**Return values to orchestrator:**
|
|
395
|
+
|
|
396
|
+
```
|
|
397
|
+
VALIDATOR_COMPLETE:
|
|
398
|
+
report_path: "{path to VALIDATION_REPORT.md}"
|
|
399
|
+
overall_status: "{PASS | PASS_WITH_WARNINGS | FAIL}"
|
|
400
|
+
confidence: "{HIGH | MEDIUM | LOW}"
|
|
401
|
+
layers_summary:
|
|
402
|
+
syntax: "{PASS | FAIL}"
|
|
403
|
+
structure: "{PASS | FAIL}"
|
|
404
|
+
dependencies: "{PASS | FAIL}"
|
|
405
|
+
logic: "{PASS | FAIL}"
|
|
406
|
+
fix_loops_used: {N}
|
|
407
|
+
issues_found: {N}
|
|
408
|
+
issues_fixed: {N}
|
|
409
|
+
unresolved_count: {N}
|
|
410
|
+
```
|
|
411
|
+
|
|
412
|
+
**NOT committed:** The validator does NOT commit any files. The orchestrator commits VALIDATION_REPORT.md and fixed files after reviewing results. This separation ensures the orchestrator can inspect fixes before they become permanent.
|
|
413
|
+
</output>
|
|
414
|
+
|
|
415
|
+
<quality_gate>
|
|
416
|
+
Before considering validation complete, verify ALL of the following.
|
|
417
|
+
|
|
418
|
+
**From templates/validation-report.md quality gate (all 7 items -- VERBATIM):**
|
|
419
|
+
|
|
420
|
+
- [ ] All 5 required sections are present (Summary, File Details, Unresolved Issues, Fix Loop Log, Confidence Level)
|
|
421
|
+
- [ ] Summary table shows all 4 layers (Syntax, Structure, Dependencies, Logic) with counts
|
|
422
|
+
- [ ] Every validated file has its own File Details subsection with all 4 layers reported
|
|
423
|
+
- [ ] Unresolved Issues section is present (either with issues or "None" statement)
|
|
424
|
+
- [ ] Fix Loop Log documents every loop iteration with issues found, fixes applied, and verification result
|
|
425
|
+
- [ ] Confidence Level includes the criteria table and a specific confidence statement with reasoning
|
|
426
|
+
- [ ] No file details entry says just "PASS" or "FAIL" without explanatory details
|
|
427
|
+
|
|
428
|
+
**Additional validator-specific checks:**
|
|
429
|
+
|
|
430
|
+
- [ ] Only generated files were validated (not pre-existing test files) -- verify every file in the report appears in the generation plan file list
|
|
431
|
+
- [ ] Layer 4 cross-checked existing test files for duplicate IDs and overlapping selectors to prevent collisions
|
|
432
|
+
- [ ] Fix confidence correctly classified (HIGH auto-applied, MEDIUM/LOW flagged for review but NOT auto-applied)
|
|
433
|
+
- [ ] Fix loop count did not exceed 3 iterations
|
|
434
|
+
- [ ] If 3 loops exhausted with unresolved issues: CHECKPOINT_RETURN was provided to escalate to user
|
|
435
|
+
- [ ] Validator did NOT commit any files (no git add, no git commit, no qaa-tools commit)
|
|
436
|
+
|
|
437
|
+
If any check fails, fix the issue before finalizing the output. Do not deliver a validation report that fails its own quality gate.
|
|
438
|
+
</quality_gate>
|
|
439
|
+
|
|
440
|
+
<success_criteria>
|
|
441
|
+
The validator agent has completed successfully when:
|
|
442
|
+
|
|
443
|
+
1. VALIDATION_REPORT.md exists at the output path specified by the orchestrator
|
|
444
|
+
2. All 5 required sections are populated with data specific to the validated files
|
|
445
|
+
3. Fix loop log includes all iterations (even if 0 issues found -- report "Loop 1: 0 issues found")
|
|
446
|
+
4. Confidence level is calculated correctly using the quantitative criteria table (HIGH/MEDIUM/LOW based on layers passing, unresolved count, loop count)
|
|
447
|
+
5. All generated files are left in the working tree -- NOT committed by the validator
|
|
448
|
+
6. Return values provided to orchestrator: report_path, overall_status, confidence, layers_summary, fix_loops_used, issues_found, issues_fixed, unresolved_count
|
|
449
|
+
7. All quality gate checks pass (7 template items + 6 validator-specific items)
|
|
450
|
+
</success_criteria>
|
package/bin/install.cjs
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* QAA - QA Automation Agent Installer
|
|
4
|
+
* Run with: npx qaa-agent
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const fs = require('fs');
|
|
8
|
+
const path = require('path');
|
|
9
|
+
const readline = require('readline');
|
|
10
|
+
|
|
11
|
+
const VERSION = require('../package.json').version;
|
|
12
|
+
const ROOT = path.resolve(__dirname, '..');
|
|
13
|
+
const HOME = process.env.HOME || process.env.USERPROFILE;
|
|
14
|
+
|
|
15
|
+
// Runtime configs
|
|
16
|
+
const RUNTIMES = {
|
|
17
|
+
'1': { name: 'Claude Code', dir: path.join(HOME, '.claude') },
|
|
18
|
+
'2': { name: 'OpenCode', dir: path.join(HOME, '.config', 'opencode') },
|
|
19
|
+
};
|
|
20
|
+
|
|
21
|
+
function ask(question, defaultVal) {
|
|
22
|
+
const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
|
|
23
|
+
return new Promise(resolve => {
|
|
24
|
+
rl.question(question, answer => {
|
|
25
|
+
rl.close();
|
|
26
|
+
resolve(answer.trim() || defaultVal);
|
|
27
|
+
});
|
|
28
|
+
});
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
function copyDir(src, dest) {
|
|
32
|
+
if (!fs.existsSync(src)) return 0;
|
|
33
|
+
fs.mkdirSync(dest, { recursive: true });
|
|
34
|
+
let count = 0;
|
|
35
|
+
for (const entry of fs.readdirSync(src, { withFileTypes: true })) {
|
|
36
|
+
const srcPath = path.join(src, entry.name);
|
|
37
|
+
const destPath = path.join(dest, entry.name);
|
|
38
|
+
if (entry.isDirectory()) {
|
|
39
|
+
count += copyDir(srcPath, destPath);
|
|
40
|
+
} else {
|
|
41
|
+
fs.copyFileSync(srcPath, destPath);
|
|
42
|
+
count++;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
return count;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function copyFile(src, dest) {
|
|
49
|
+
if (!fs.existsSync(src)) return false;
|
|
50
|
+
fs.mkdirSync(path.dirname(dest), { recursive: true });
|
|
51
|
+
fs.copyFileSync(src, dest);
|
|
52
|
+
return true;
|
|
53
|
+
}
|
|
54
|
+
|
|
55
|
+
function ok(msg) { console.log(` \x1b[32m✓\x1b[0m ${msg}`); }
|
|
56
|
+
function info(msg) { console.log(` ${msg}`); }
|
|
57
|
+
|
|
58
|
+
async function main() {
|
|
59
|
+
console.log('');
|
|
60
|
+
console.log(' \x1b[36m ██████╗ █████╗ █████╗ \x1b[0m');
|
|
61
|
+
console.log(' \x1b[36m██╔═══██╗██╔══██╗██╔══██╗\x1b[0m');
|
|
62
|
+
console.log(' \x1b[36m██║ ██║███████║███████║\x1b[0m');
|
|
63
|
+
console.log(' \x1b[36m██║▄▄ ██║██╔══██║██╔══██║\x1b[0m');
|
|
64
|
+
console.log(' \x1b[36m╚██████╔╝██║ ██║██║ ██║\x1b[0m');
|
|
65
|
+
console.log(' \x1b[36m ╚══▀▀═╝ ╚═╝ ╚═╝╚═╝ ╚═╝\x1b[0m');
|
|
66
|
+
console.log('');
|
|
67
|
+
console.log(` \x1b[1mQA Automation Agent\x1b[0m v${VERSION}`);
|
|
68
|
+
console.log(' Multi-agent QA pipeline for Claude Code.');
|
|
69
|
+
console.log(' Analyzes repos, generates tests, validates, and creates PRs.');
|
|
70
|
+
console.log('');
|
|
71
|
+
|
|
72
|
+
// Ask runtime
|
|
73
|
+
console.log(' Which runtime would you like to install for?');
|
|
74
|
+
console.log('');
|
|
75
|
+
console.log(' 1) Claude Code (~/.claude)');
|
|
76
|
+
console.log(' 2) OpenCode (~/.config/opencode)');
|
|
77
|
+
console.log('');
|
|
78
|
+
const runtimeChoice = await ask(' Choice [1]: ', '1');
|
|
79
|
+
const runtime = RUNTIMES[runtimeChoice] || RUNTIMES['1'];
|
|
80
|
+
|
|
81
|
+
// Ask scope
|
|
82
|
+
console.log('');
|
|
83
|
+
console.log(' Where would you like to install?');
|
|
84
|
+
console.log('');
|
|
85
|
+
console.log(` 1) Global (~/${path.relative(HOME, runtime.dir)}) - available in all projects`);
|
|
86
|
+
console.log(' 2) Local (./.claude) - this project only');
|
|
87
|
+
console.log('');
|
|
88
|
+
const scopeChoice = await ask(' Choice [1]: ', '1');
|
|
89
|
+
const isGlobal = scopeChoice !== '2';
|
|
90
|
+
const baseDir = isGlobal ? runtime.dir : path.join(process.cwd(), '.claude');
|
|
91
|
+
const qaaDir = isGlobal ? path.join(runtime.dir, 'qaa') : path.join(process.cwd(), '.claude', 'qaa');
|
|
92
|
+
|
|
93
|
+
console.log('');
|
|
94
|
+
console.log(` Installing for ${runtime.name} to ${isGlobal ? '~/' + path.relative(HOME, runtime.dir) : './.claude'}`);
|
|
95
|
+
console.log('');
|
|
96
|
+
|
|
97
|
+
// Install commands
|
|
98
|
+
const commandsSrc = path.join(ROOT, '.claude', 'commands');
|
|
99
|
+
const commandsDest = path.join(baseDir, 'commands');
|
|
100
|
+
const cmdCount = copyDir(commandsSrc, commandsDest);
|
|
101
|
+
ok(`Installed ${cmdCount} slash commands`);
|
|
102
|
+
|
|
103
|
+
// Install skills
|
|
104
|
+
const skillsSrc = path.join(ROOT, '.claude', 'skills');
|
|
105
|
+
const skillsDest = path.join(baseDir, 'skills');
|
|
106
|
+
const skillCount = copyDir(skillsSrc, skillsDest);
|
|
107
|
+
ok(`Installed ${skillCount} skill files (6 skills)`);
|
|
108
|
+
|
|
109
|
+
// Install agents
|
|
110
|
+
const agentsSrc = path.join(ROOT, 'agents');
|
|
111
|
+
const agentsDest = path.join(qaaDir, 'agents');
|
|
112
|
+
const agentCount = copyDir(agentsSrc, agentsDest);
|
|
113
|
+
ok(`Installed ${agentCount} agent definitions`);
|
|
114
|
+
|
|
115
|
+
// Install templates
|
|
116
|
+
const templatesSrc = path.join(ROOT, 'templates');
|
|
117
|
+
const templatesDest = path.join(qaaDir, 'templates');
|
|
118
|
+
const templateCount = copyDir(templatesSrc, templatesDest);
|
|
119
|
+
ok(`Installed ${templateCount} templates`);
|
|
120
|
+
|
|
121
|
+
// Install bin
|
|
122
|
+
const binSrc = path.join(ROOT, 'bin');
|
|
123
|
+
const binDest = path.join(qaaDir, 'bin');
|
|
124
|
+
const binCount = copyDir(binSrc, binDest);
|
|
125
|
+
try { fs.unlinkSync(path.join(binDest, 'install.cjs')); } catch {}
|
|
126
|
+
ok(`Installed CLI tooling`);
|
|
127
|
+
|
|
128
|
+
// Install CLAUDE.md
|
|
129
|
+
copyFile(path.join(ROOT, 'CLAUDE.md'), path.join(qaaDir, 'CLAUDE.md'));
|
|
130
|
+
ok('Installed QA standards (CLAUDE.md)');
|
|
131
|
+
|
|
132
|
+
// Write version
|
|
133
|
+
fs.writeFileSync(path.join(qaaDir, 'VERSION'), VERSION);
|
|
134
|
+
ok(`Wrote VERSION (${VERSION})`);
|
|
135
|
+
|
|
136
|
+
// Merge settings
|
|
137
|
+
const settingsSrc = path.join(ROOT, '.claude', 'settings.json');
|
|
138
|
+
const settingsDest = path.join(baseDir, 'settings.json');
|
|
139
|
+
if (fs.existsSync(settingsSrc)) {
|
|
140
|
+
let existing = {};
|
|
141
|
+
if (fs.existsSync(settingsDest)) {
|
|
142
|
+
try { existing = JSON.parse(fs.readFileSync(settingsDest, 'utf8')); } catch {}
|
|
143
|
+
}
|
|
144
|
+
const qaaSettings = JSON.parse(fs.readFileSync(settingsSrc, 'utf8'));
|
|
145
|
+
if (qaaSettings.permissions) {
|
|
146
|
+
existing.permissions = existing.permissions || {};
|
|
147
|
+
existing.permissions.allow = [...new Set([
|
|
148
|
+
...(existing.permissions.allow || []),
|
|
149
|
+
...(qaaSettings.permissions.allow || [])
|
|
150
|
+
])];
|
|
151
|
+
}
|
|
152
|
+
fs.writeFileSync(settingsDest, JSON.stringify(existing, null, 2));
|
|
153
|
+
ok('Merged permissions into settings.json');
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
// Done
|
|
157
|
+
const total = cmdCount + skillCount + agentCount + templateCount + binCount;
|
|
158
|
+
console.log('');
|
|
159
|
+
console.log(` \x1b[32m✓ Done!\x1b[0m Installed ${total} files.`);
|
|
160
|
+
console.log('');
|
|
161
|
+
console.log(' Open Claude Code in any project and run:');
|
|
162
|
+
console.log('');
|
|
163
|
+
console.log(' \x1b[1m/qa-start\x1b[0m Full QA pipeline (multi-agent)');
|
|
164
|
+
console.log(' \x1b[1m/qa-analyze\x1b[0m Analysis only');
|
|
165
|
+
console.log(' \x1b[1m/create-test\x1b[0m Tests for a feature');
|
|
166
|
+
console.log(' \x1b[1m/qa-from-ticket\x1b[0m Tests from a Jira/Linear ticket');
|
|
167
|
+
console.log(' \x1b[1m/qa-validate\x1b[0m Validate existing tests');
|
|
168
|
+
console.log('');
|
|
169
|
+
console.log(' 14 commands + 6 skills + 8 agents ready.');
|
|
170
|
+
console.log('');
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
main().catch(err => {
|
|
174
|
+
console.error('Installation failed:', err.message);
|
|
175
|
+
process.exit(1);
|
|
176
|
+
});
|