@nhonh/qabot 0.5.1 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 Hoài Nhớ
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,406 @@
1
+ <p align="center">
2
+ <img src="https://img.shields.io/badge/QABot-AI%20Powered%20QA-7c3aed?style=for-the-badge&logo=data:image/svg+xml;base64,PHN2ZyB4bWxucz0iaHR0cDovL3d3dy53My5vcmcvMjAwMC9zdmciIHZpZXdCb3g9IjAgMCAyNCAyNCIgZmlsbD0id2hpdGUiPjxwYXRoIGQ9Ik05LjQgMTYuNkw0LjggMTJsNC42LTQuNkw4IDZsLTYgNiA2IDYgMS40LTEuNHptNS4yIDBsNC42LTQuNi00LjYtNC42TDE2IDZsNiA2LTYgNi0xLjQtMS40eiIvPjwvc3ZnPg==&logoColor=white" alt="QABot" />
3
+ </p>
4
+
5
+ <h1 align="center">QABot</h1>
6
+
7
+ <p align="center">
8
+ <strong>AI-Powered Universal QA Automation Tool</strong>
9
+ </p>
10
+
11
+ <p align="center">
12
+ Import any project. AI analyzes structure. Run tests across all layers. Zero config.
13
+ </p>
14
+
15
+ <p align="center">
16
+ <a href="https://www.npmjs.com/package/@nhonh/qabot"><img src="https://img.shields.io/npm/v/@nhonh/qabot?color=cb3837&logo=npm" alt="npm version" /></a>
17
+ <a href="https://www.npmjs.com/package/@nhonh/qabot"><img src="https://img.shields.io/npm/dw/@nhonh/qabot?color=cb3837&logo=npm&label=downloads%2Fweek" alt="npm downloads/week" /></a>
18
+ <a href="https://www.npmjs.com/package/@nhonh/qabot"><img src="https://img.shields.io/npm/dt/@nhonh/qabot?color=cb3837&logo=npm&label=total%20downloads" alt="total downloads" /></a>
19
+ <a href="https://github.com/hoainho/qabot/blob/main/LICENSE"><img src="https://img.shields.io/npm/l/@nhonh/qabot?color=blue" alt="license" /></a>
20
+ <a href="https://nodejs.org"><img src="https://img.shields.io/node/v/@nhonh/qabot?color=339933&logo=node.js&logoColor=white" alt="node version" /></a>
21
+ <a href="https://github.com/hoainho/qabot"><img src="https://img.shields.io/github/stars/hoainho/qabot?style=social" alt="GitHub stars" /></a>
22
+ </p>
23
+
24
+ ---
25
+
26
+ ## Why QABot?
27
+
28
+ Testing is the most critical part of software development, but setting up and maintaining test infrastructure across frameworks, layers, and environments is painfully repetitive. **QABot eliminates that friction.**
29
+
30
+ Point QABot at **any** project — React, Next.js, Vue, Angular, .NET, Python, or Node.js — and it will **auto-detect** your framework, test runners, features, and environments. Then it runs tests across **unit, integration, and E2E layers** from a single CLI. Need test cases? QABot's AI engine analyzes your code and **generates production-ready tests** using OpenAI, Claude, Gemini, or 4 other providers.
31
+
32
+ ---
33
+
34
+ ## Features
35
+
36
+ - **AI-Powered Test Analysis** — AI analyzes your source code and generates comprehensive test cases. Supports OpenAI, Anthropic Claude, Google Gemini, DeepSeek, Groq, Ollama (local), and custom proxy endpoints.
37
+
38
+ - **Smart Project Detection** — Auto-detects project type (React, Next.js, Vue, Angular, .NET, Python, Node.js), framework, bundler, state management, and auth provider. Zero configuration needed.
39
+
40
+ - **Universal Test Runner** — Runs Jest, Vitest, Playwright, Cypress, pytest, xUnit, and dotnet-test from a single CLI. No more switching between different test commands.
41
+
42
+ - **Beautiful HTML Reports** — Generates interactive HTML + JSON reports with pass/fail/skip statistics, duration tracking, and historical run comparison.
43
+
44
+ - **E2E Test Generation** — AI generates Playwright E2E tests from feature descriptions with automatic error detection and self-healing fix capabilities.
45
+
46
+ - **Multi-Layer Testing** — Run unit, integration, and E2E tests separately or together. Filter by layer with `--layer unit` or run everything at once.
47
+
48
+ - **Environment Management** — Detect and switch between local, staging, and production environments. Each environment gets its own config and credentials.
49
+
50
+ - **Use Case Driven** — Import use cases from Markdown, Gherkin Feature files, or plain text docs. Map them directly to test coverage.
51
+
52
+ - **Extensible Runner System** — Plugin-based architecture makes it easy to add support for new test frameworks. Each runner extends a common `BaseRunner` interface.
53
+
54
+ - **Multi-AI Provider** — Switch AI providers instantly. Configure during `qabot init` or change anytime in `qabot.config.json`. Supports 7 providers including local Ollama.
55
+
56
+ ---
57
+
58
+ ## Quick Start
59
+
60
+ ### Install
61
+
62
+ ```bash
63
+ # Install globally
64
+ npm install -g @nhonh/qabot
65
+
66
+ # Or use npx (no install needed)
67
+ npx @nhonh/qabot --help
68
+ ```
69
+
70
+ ### Initialize
71
+
72
+ ```bash
73
+ cd your-project
74
+ qabot init
75
+ ```
76
+
77
+ QABot scans your project and generates a `qabot.config.json`:
78
+
79
+ ```
80
+ ╔═══════════════════════════════════╗
81
+ ║ QABot - Project Analysis ║
82
+ ╠═══════════════════════════════════╣
83
+ ║ Project: my-react-app ║
84
+ ║ Type: react-spa ║
85
+ ║ Framework: React 18 ║
86
+ ║ Runners: Jest, Playwright ║
87
+ ║ Features: 12 detected ║
88
+ ║ Unit Tests: 45 files ║
89
+ ║ E2E Tests: 8 files ║
90
+ ╚═══════════════════════════════════╝
91
+ ```
92
+
93
+ ### Run Tests
94
+
95
+ ```bash
96
+ # Run all tests
97
+ qabot run
98
+
99
+ # Run tests for a specific feature
100
+ qabot run auth
101
+
102
+ # Run only unit tests
103
+ qabot run --layer unit
104
+
105
+ # Run E2E tests against staging
106
+ qabot run --layer e2e --env staging
107
+
108
+ # Run with coverage
109
+ qabot run --coverage
110
+
111
+ # Verbose output
112
+ qabot run --verbose
113
+ ```
114
+
115
+ ### Generate Tests with AI
116
+
117
+ ```bash
118
+ # Generate test cases for a feature
119
+ qabot generate login
120
+
121
+ # Generate E2E tests
122
+ qabot generate checkout --layer e2e
123
+ ```
124
+
125
+ ---
126
+
127
+ ## CLI Commands
128
+
129
+ | Command | Description |
130
+ |---|---|
131
+ | `qabot init` | Analyze project and generate configuration |
132
+ | `qabot init -y` | Initialize with all defaults (no prompts) |
133
+ | `qabot run [feature]` | Run tests for all or a specific feature |
134
+ | `qabot run --layer <layers>` | Run specific test layers (unit, integration, e2e) |
135
+ | `qabot test` | Run tests with advanced options |
136
+ | `qabot list` | Show detected features, test frameworks, and coverage |
137
+ | `qabot generate <feature>` | AI-generate test cases for a feature |
138
+ | `qabot report` | Open the latest HTML test report |
139
+ | `qabot auth` | Configure authentication for E2E tests |
140
+
141
+ ---
142
+
143
+ ## Supported Frameworks
144
+
145
+ | Category | Supported |
146
+ |---|---|
147
+ | **Frontend** | React, Next.js, Vue, Angular |
148
+ | **Backend** | Node.js, .NET, Python |
149
+ | **Test Runners** | Jest, Vitest, Playwright, Cypress, pytest, xUnit, dotnet-test |
150
+ | **AI Providers** | OpenAI, Anthropic Claude, Google Gemini, DeepSeek, Groq, Ollama, Custom Proxy |
151
+ | **Languages** | JavaScript, TypeScript, Python, C# |
152
+ | **Package Managers** | npm, yarn, pnpm, pip, dotnet |
153
+
154
+ ---
155
+
156
+ ## Multi-Layer Testing
157
+
158
+ QABot organizes tests into three distinct layers, each serving a specific purpose:
159
+
160
+ ### Unit Tests
161
+ Validate individual functions, components, and modules in isolation. Fast feedback loop, run in milliseconds.
162
+
163
+ ```bash
164
+ qabot run --layer unit
165
+ ```
166
+
167
+ ### Integration Tests
168
+ Test how modules interact with each other, including API calls, database queries, and service communication.
169
+
170
+ ```bash
171
+ qabot run --layer integration
172
+ ```
173
+
174
+ ### E2E Tests
175
+ Full user journey simulation in a real browser. Tests the complete application stack from the user's perspective.
176
+
177
+ ```bash
178
+ qabot run --layer e2e --env staging
179
+ ```
180
+
181
+ ### Test Priority System
182
+
183
+ QABot assigns priorities to help you focus on what matters most:
184
+
185
+ | Priority | Meaning | Example |
186
+ |---|---|---|
187
+ | **P0** | Critical — Must pass before deploy | Login, Payment, Core API |
188
+ | **P1** | High — Important user journeys | Registration, Search, Profile |
189
+ | **P2** | Medium — Standard features | Settings, Notifications |
190
+ | **P3** | Low — Edge cases and cosmetic | Tooltips, Animations |
191
+
192
+ ---
193
+
194
+ ## AI Test Generation
195
+
196
+ QABot's AI engine doesn't just suggest test names — it generates **complete, runnable test code**.
197
+
198
+ ### How It Works
199
+
200
+ 1. **Analyze** — AI reads your source code and understands component behavior, API contracts, and edge cases
201
+ 2. **Generate** — Produces full test files compatible with your test runner (Jest, Vitest, Playwright, etc.)
202
+ 3. **Auto-Fix** — If generated tests fail on first run, AI reads the error output and automatically fixes the code
203
+ 4. **Batch Support** — Handles large codebases by batching test generation (up to 8 test cases per batch)
204
+
205
+ ### Supported AI Providers
206
+
207
+ | Provider | Model | Setup |
208
+ |---|---|---|
209
+ | **OpenAI** | GPT-4o | `OPENAI_API_KEY` |
210
+ | **Anthropic** | Claude Sonnet 4 | `ANTHROPIC_API_KEY` |
211
+ | **Google** | Gemini 2.5 Flash | `GEMINI_API_KEY` |
212
+ | **DeepSeek** | DeepSeek Chat | `DEEPSEEK_API_KEY` |
213
+ | **Groq** | Llama 3.3 70B | `GROQ_API_KEY` |
214
+ | **Ollama** | Llama 3 (local) | No key needed |
215
+ | **Custom Proxy** | Any model | `PROXY_API_KEY` + `baseUrl` |
216
+
217
+ Configure during initialization:
218
+
219
+ ```bash
220
+ qabot init
221
+ # Select your AI provider when prompted
222
+ ```
223
+
224
+ Or set in `qabot.config.json`:
225
+
226
+ ```json
227
+ {
228
+ "ai": {
229
+ "provider": "anthropic",
230
+ "model": "claude-sonnet-4-20250514",
231
+ "apiKeyEnv": "ANTHROPIC_API_KEY"
232
+ }
233
+ }
234
+ ```
235
+
236
+ ---
237
+
238
+ ## Configuration
239
+
240
+ QABot uses a `qabot.config.json` file in your project root. Generated automatically by `qabot init`.
241
+
242
+ ```json
243
+ {
244
+ "project": {
245
+ "name": "my-app",
246
+ "type": "react-spa"
247
+ },
248
+ "environments": {
249
+ "local": { "url": "http://localhost:3000" },
250
+ "staging": { "url": "https://staging.myapp.com" }
251
+ },
252
+ "layers": {
253
+ "unit": {
254
+ "runner": "jest",
255
+ "command": "npx jest {pattern}",
256
+ "testMatch": "**/*.test.*"
257
+ },
258
+ "e2e": {
259
+ "runner": "playwright",
260
+ "command": "npx playwright test {pattern}",
261
+ "testDir": "e2e/tests"
262
+ }
263
+ },
264
+ "features": {
265
+ "auth": { "src": "src/features/auth", "priority": "P0" },
266
+ "dashboard": { "src": "src/pages/Dashboard", "priority": "P1" },
267
+ "settings": { "src": "src/pages/Settings", "priority": "P2" }
268
+ },
269
+ "reporting": {
270
+ "outputDir": "./qabot-reports",
271
+ "openAfterRun": true,
272
+ "history": true,
273
+ "formats": ["html", "json"]
274
+ },
275
+ "ai": {
276
+ "provider": "openai",
277
+ "model": "gpt-4o",
278
+ "apiKeyEnv": "OPENAI_API_KEY"
279
+ },
280
+ "useCases": {
281
+ "dir": "./docs/use-cases",
282
+ "formats": ["md", "feature", "txt"]
283
+ }
284
+ }
285
+ ```
286
+
287
+ ---
288
+
289
+ ## Reports
290
+
291
+ QABot generates beautiful interactive reports after each test run:
292
+
293
+ - **HTML Report** — Visual dashboard with pass/fail charts, duration breakdown, and test details
294
+ - **JSON Report** — Machine-readable results for CI/CD integration
295
+ - **Historical Tracking** — Compare results across runs to spot regressions
296
+ - **Auto-Open** — Reports open in your browser automatically (configurable)
297
+
298
+ View reports anytime:
299
+
300
+ ```bash
301
+ qabot report
302
+ ```
303
+
304
+ Reports are saved to `./qabot-reports/` organized by date and feature.
305
+
306
+ ---
307
+
308
+ ## Environment Variables
309
+
310
+ Create a `.env` file in your project root (see `.env.example`):
311
+
312
+ ```env
313
+ # AI Providers (set the one you use)
314
+ OPENAI_API_KEY=sk-your-key-here
315
+ ANTHROPIC_API_KEY=sk-ant-your-key-here
316
+ GEMINI_API_KEY=your-gemini-key-here
317
+ DEEPSEEK_API_KEY=your-deepseek-key-here
318
+ GROQ_API_KEY=gsk_your-groq-key-here
319
+ PROXY_API_KEY=your-proxy-key-here
320
+
321
+ # E2E Test Credentials
322
+ E2E_TEST_EMAIL=testuser@example.com
323
+ E2E_TEST_PASSWORD=your-test-password
324
+ ```
325
+
326
+ ---
327
+
328
+ ## Architecture
329
+
330
+ ```
331
+ qabot/
332
+ ├── bin/
333
+ │ └── qabot.js # CLI entry point
334
+ ├── src/
335
+ │ ├── ai/ # AI engine & prompt builders
336
+ │ │ ├── ai-engine.js # Multi-provider AI client
337
+ │ │ ├── prompt-builder.js # Test analysis/generation prompts
338
+ │ │ └── usecase-parser.js # Use case file parser
339
+ │ ├── analyzers/ # Project analysis
340
+ │ │ ├── project-analyzer.js
341
+ │ │ ├── test-detector.js
342
+ │ │ ├── feature-detector.js
343
+ │ │ └── env-detector.js
344
+ │ ├── cli/commands/ # CLI command handlers
345
+ │ ├── core/ # Config, constants, logger
346
+ │ ├── e2e/ # E2E test generation
347
+ │ ├── executor/ # Test execution engine
348
+ │ ├── reporter/ # HTML/JSON report generation
349
+ │ ├── runners/ # Test framework runners
350
+ │ │ ├── base-runner.js # Runner interface
351
+ │ │ ├── jest-runner.js
352
+ │ │ ├── vitest-runner.js
353
+ │ │ ├── playwright-runner.js
354
+ │ │ ├── pytest-runner.js
355
+ │ │ └── dotnet-runner.js
356
+ │ └── utils/
357
+ ├── templates/ # Config templates
358
+ └── tests/ # QABot's own test suite
359
+ ```
360
+
361
+ ---
362
+
363
+ ## Contributing
364
+
365
+ Contributions are welcome! Here's how to get started:
366
+
367
+ 1. Fork the repository
368
+ 2. Create a feature branch: `git checkout -b feature/my-feature`
369
+ 3. Make your changes
370
+ 4. Run the test suite: `npm test`
371
+ 5. Commit your changes: `git commit -m "feat: add my feature"`
372
+ 6. Push to the branch: `git push origin feature/my-feature`
373
+ 7. Open a Pull Request
374
+
375
+ ### Development
376
+
377
+ ```bash
378
+ git clone https://github.com/hoainho/qabot.git
379
+ cd qabot
380
+ npm install
381
+ npm test
382
+ npm run dev
383
+ ```
384
+
385
+ ---
386
+
387
+ ## Roadmap
388
+
389
+ - [ ] GitHub Actions integration (auto-run on PR)
390
+ - [ ] Slack/Discord notifications for test results
391
+ - [ ] Visual regression testing support
392
+ - [ ] Test coverage mapping and gap analysis
393
+ - [ ] Custom reporter plugins
394
+ - [ ] CI/CD pipeline templates
395
+
396
+ ---
397
+
398
+ ## License
399
+
400
+ [MIT](./LICENSE) - Copyright (c) 2025 Hoai Nho
401
+
402
+ ---
403
+
404
+ <p align="center">
405
+ Made with ❤️ by <a href="https://github.com/hoainho">Hoai Nho</a>
406
+ </p>
package/bin/qabot.js CHANGED
@@ -8,6 +8,7 @@ import { registerListCommand } from "../src/cli/commands/list.js";
8
8
  import { registerGenerateCommand } from "../src/cli/commands/generate.js";
9
9
  import { registerReportCommand } from "../src/cli/commands/report.js";
10
10
  import { registerAuthCommand } from "../src/cli/commands/auth.js";
11
+ import { registerTestCommand } from "../src/cli/commands/test.js";
11
12
 
12
13
  const program = new Command();
13
14
 
@@ -18,6 +19,7 @@ program
18
19
 
19
20
  registerInitCommand(program);
20
21
  registerRunCommand(program);
22
+ registerTestCommand(program);
21
23
  registerListCommand(program);
22
24
  registerGenerateCommand(program);
23
25
  registerReportCommand(program);
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@nhonh/qabot",
3
- "version": "0.5.1",
3
+ "version": "0.6.1",
4
4
  "description": "AI-powered universal QA automation tool. Import any project, AI analyzes and runs tests across all layers.",
5
5
  "main": "src/index.js",
6
6
  "bin": {
@@ -28,8 +28,16 @@
28
28
  "test-report",
29
29
  "cli"
30
30
  ],
31
- "author": "GearGames",
31
+ "author": "Hoài Nhớ <nhoxtvt@gmail.com> (https://github.com/hoainho)",
32
32
  "license": "MIT",
33
+ "homepage": "https://hoainho.github.io/qabot",
34
+ "repository": {
35
+ "type": "git",
36
+ "url": "git+https://github.com/hoainho/qabot.git"
37
+ },
38
+ "bugs": {
39
+ "url": "https://github.com/hoainho/qabot/issues"
40
+ },
33
41
  "engines": {
34
42
  "node": ">=18.0.0"
35
43
  },
@@ -0,0 +1,467 @@
1
+ import chalk from "chalk";
2
+ import ora from "ora";
3
+ import path from "node:path";
4
+ import { writeFile, readFile } from "node:fs/promises";
5
+ import { execSync } from "node:child_process";
6
+ import { logger } from "../../core/logger.js";
7
+ import { loadConfig } from "../../core/config.js";
8
+ import { analyzeProject } from "../../analyzers/project-analyzer.js";
9
+ import { AIEngine } from "../../ai/ai-engine.js";
10
+ import { UseCaseParser } from "../../ai/usecase-parser.js";
11
+ import {
12
+ ensurePlaywright,
13
+ ensureE2EStructure,
14
+ writePlaywrightConfig,
15
+ writeAuthHelper,
16
+ } from "../../e2e/playwright-setup.js";
17
+ import { generateE2ESpec, fixE2ESpec } from "../../e2e/e2e-generator.js";
18
+ import {
19
+ findFiles,
20
+ safeReadFile,
21
+ ensureDir,
22
+ fileExists,
23
+ } from "../../utils/file-utils.js";
24
+ import { ReportGenerator } from "../../reporter/report-generator.js";
25
+
26
+ const MAX_FIX_ATTEMPTS = 3;
27
+
28
+ export function registerTestCommand(program) {
29
+ program
30
+ .command("test [feature]")
31
+ .description("Run E2E automation tests (Playwright)")
32
+ .option("-e, --env <environment>", "Target environment", "default")
33
+ .option("--headed", "Run browser in headed mode (visible)")
34
+ .option("--no-fix", "Skip auto-fix on failure")
35
+ .option("--skip-gen", "Skip test generation, run existing specs only")
36
+ .option("-u, --url <url>", "Target URL (overrides environment config)")
37
+ .option("--use-cases <dir>", "Directory containing use case documents")
38
+ .option("--model <model>", "AI model to use")
39
+ .option("-d, --dir <path>", "Project directory", process.cwd())
40
+ .action(runTest);
41
+ }
42
+
43
+ async function runTest(feature, options) {
44
+ const projectDir = options.dir;
45
+ const { config, isEmpty } = await loadConfig(projectDir);
46
+
47
+ if (isEmpty) {
48
+ logger.warn("No qabot.config.json found. Run `qabot init` first.");
49
+ return;
50
+ }
51
+
52
+ const profile = await analyzeProject(projectDir);
53
+ const envConfig =
54
+ config.environments?.[options.env] || config.environments?.default;
55
+ const baseUrl = options.url || envConfig?.url || "http://localhost:3000";
56
+
57
+ logger.header("QABot \u2014 E2E Automation Test");
58
+ logger.blank();
59
+ logger.info(`Feature: ${chalk.bold(feature || "all")}`);
60
+ logger.info(`Target: ${chalk.bold.cyan(baseUrl)}`);
61
+ logger.info(`Environment: ${chalk.bold(options.env)}`);
62
+ logger.info(
63
+ `Browser: ${options.headed ? "Headed (visible)" : "Headless"}`,
64
+ );
65
+ logger.blank();
66
+ console.log(chalk.dim(` ${"\u2500".repeat(50)}`));
67
+
68
+ const spinner = ora("Setting up Playwright...").start();
69
+ try {
70
+ await ensurePlaywright(projectDir);
71
+ await ensureE2EStructure(projectDir);
72
+ await writePlaywrightConfig(projectDir, config);
73
+ await writeAuthHelper(projectDir, config);
74
+ spinner.succeed("Playwright ready");
75
+ } catch (err) {
76
+ spinner.fail(`Playwright setup failed: ${err.message}`);
77
+ return;
78
+ }
79
+
80
+ const featureConfig = feature ? config.features?.[feature] : null;
81
+ const specDir = path.join(projectDir, "e2e", "tests");
82
+ let specFile;
83
+
84
+ if (options.skipGen) {
85
+ const existing = await findFiles(specDir, "*.spec.js");
86
+ if (existing.length === 0) {
87
+ logger.error(
88
+ "No existing E2E specs found. Run without --skip-gen to generate.",
89
+ );
90
+ return;
91
+ }
92
+ specFile = feature
93
+ ? existing.find((f) => f.toLowerCase().includes(feature.toLowerCase()))
94
+ : null;
95
+ logger.info(
96
+ `Running ${specFile ? path.basename(specFile) : "all"} existing specs`,
97
+ );
98
+ } else {
99
+ if (!feature) {
100
+ logger.error(
101
+ "Feature name required for test generation. Usage: qabot test <feature>",
102
+ );
103
+ return;
104
+ }
105
+
106
+ const aiConfig = { ...config.ai };
107
+ if (options.model) aiConfig.model = options.model;
108
+ const ai = new AIEngine(aiConfig);
109
+
110
+ if (!ai.isAvailable()) {
111
+ logger.error("AI not configured. Run `qabot auth` first.");
112
+ return;
113
+ }
114
+
115
+ const spinner2 = ora(
116
+ "AI is analyzing feature and generating E2E tests...",
117
+ ).start();
118
+
119
+ let sourceCode = "";
120
+ if (featureConfig?.src) {
121
+ const sourceFiles = await findFiles(
122
+ projectDir,
123
+ `${featureConfig.src}/**/*.{js,jsx,ts,tsx}`,
124
+ );
125
+ const filtered = sourceFiles.filter(
126
+ (f) => !f.includes("/tests/") && !f.includes(".test."),
127
+ );
128
+ sourceCode = (
129
+ await Promise.all(filtered.slice(0, 8).map((f) => safeReadFile(f)))
130
+ )
131
+ .filter(Boolean)
132
+ .join("\n\n---\n\n");
133
+ }
134
+
135
+ let useCases = [];
136
+ const useCaseDir = options.useCases || config.useCases?.dir;
137
+ if (useCaseDir) {
138
+ const parser = new UseCaseParser();
139
+ const ucFiles = await findFiles(
140
+ projectDir,
141
+ `${useCaseDir}/**/*.{md,feature,txt}`,
142
+ );
143
+ for (const f of ucFiles) {
144
+ try {
145
+ useCases.push(...(await parser.parse(f)));
146
+ } catch {
147
+ /* skip */
148
+ }
149
+ }
150
+ }
151
+
152
+ const route = featureConfig?.route || guessRoute(feature);
153
+
154
+ try {
155
+ const spec = await generateE2ESpec(ai, feature, {
156
+ baseUrl,
157
+ sourceCode,
158
+ route,
159
+ authProvider: config.auth?.provider || "none",
160
+ useCases,
161
+ });
162
+
163
+ specFile = path.join(specDir, `${feature}.spec.js`);
164
+ await writeFile(specFile, spec, "utf-8");
165
+ spinner2.succeed(
166
+ `E2E spec generated: ${chalk.underline(path.relative(projectDir, specFile))}`,
167
+ );
168
+ } catch (err) {
169
+ spinner2.fail(`E2E generation failed: ${err.message}`);
170
+ return;
171
+ }
172
+
173
+ if (options.fix !== false) {
174
+ const aiForFix = ai;
175
+
176
+ for (let attempt = 1; attempt <= MAX_FIX_ATTEMPTS; attempt++) {
177
+ logger.blank();
178
+ logger.step(
179
+ attempt,
180
+ MAX_FIX_ATTEMPTS,
181
+ `Running E2E tests (attempt ${attempt})...`,
182
+ );
183
+
184
+ const {
185
+ exitCode,
186
+ error: testError,
187
+ stdout,
188
+ } = runPlaywright(projectDir, specFile, options);
189
+
190
+ if (exitCode === 0) {
191
+ printPlaywrightSummary(stdout);
192
+ logger.blank();
193
+ logger.success(`E2E tests passed on attempt ${attempt}!`);
194
+ await generateE2EReport(
195
+ projectDir,
196
+ config,
197
+ feature,
198
+ options.env,
199
+ baseUrl,
200
+ );
201
+ return;
202
+ }
203
+
204
+ if (attempt >= MAX_FIX_ATTEMPTS) {
205
+ printPlaywrightSummary(stdout || testError);
206
+ logger.blank();
207
+ logger.warn(
208
+ `E2E tests still failing after ${MAX_FIX_ATTEMPTS} attempts.`,
209
+ );
210
+ logger.dim(` Review: cat ${path.relative(projectDir, specFile)}`);
211
+ logger.dim(` Debug: qabot test ${feature} --headed`);
212
+ await generateE2EReport(
213
+ projectDir,
214
+ config,
215
+ feature,
216
+ options.env,
217
+ baseUrl,
218
+ );
219
+ return;
220
+ }
221
+
222
+ logger.dim(` ${extractFirstError(testError || stdout)}`);
223
+ logger.step(attempt, MAX_FIX_ATTEMPTS, "AI is fixing E2E test...");
224
+
225
+ try {
226
+ const currentCode = await readFile(specFile, "utf-8");
227
+ const fixedCode = await fixE2ESpec(
228
+ aiForFix,
229
+ currentCode,
230
+ testError || stdout,
231
+ {
232
+ baseUrl,
233
+ authProvider: config.auth?.provider,
234
+ },
235
+ );
236
+ await writeFile(specFile, fixedCode, "utf-8");
237
+ logger.dim(" Fix applied. Re-running...");
238
+ } catch (err) {
239
+ logger.warn(` AI fix failed: ${err.message}`);
240
+ break;
241
+ }
242
+ }
243
+ }
244
+ }
245
+
246
+ if (!specFile && !options.skipGen) return;
247
+
248
+ logger.blank();
249
+ logger.info("Running E2E tests...");
250
+
251
+ const {
252
+ exitCode,
253
+ stdout,
254
+ error: testError,
255
+ } = runPlaywright(projectDir, specFile, options);
256
+ printPlaywrightSummary(stdout || testError);
257
+ await generateE2EReport(
258
+ projectDir,
259
+ config,
260
+ feature || "all",
261
+ options.env,
262
+ baseUrl,
263
+ );
264
+
265
+ if (exitCode !== 0) process.exit(1);
266
+ }
267
+
268
+ function runPlaywright(projectDir, specFile, options) {
269
+ const configPath = path.join(projectDir, "e2e", "playwright.config.js");
270
+ const parts = ["npx", "playwright", "test"];
271
+ if (specFile)
272
+ parts.push(path.relative(path.join(projectDir, "e2e"), specFile));
273
+ parts.push(`--config="${configPath}"`);
274
+ parts.push("--project=chromium");
275
+ if (options.headed) parts.push("--headed");
276
+
277
+ const env = {
278
+ ...process.env,
279
+ E2E_ENV: options.env || "default",
280
+ FORCE_COLOR: "0",
281
+ };
282
+
283
+ try {
284
+ const stdout = execSync(parts.join(" "), {
285
+ cwd: projectDir,
286
+ stdio: "pipe",
287
+ timeout: 120000,
288
+ env,
289
+ }).toString();
290
+ return { exitCode: 0, stdout, error: "" };
291
+ } catch (err) {
292
+ return {
293
+ exitCode: err.status || 1,
294
+ stdout: err.stdout?.toString() || "",
295
+ error: err.stderr?.toString() || err.stdout?.toString() || "",
296
+ };
297
+ }
298
+ }
299
+
300
+ function printPlaywrightSummary(output) {
301
+ if (!output) return;
302
+ const lines = output.split("\n");
303
+ for (const line of lines) {
304
+ const trimmed = line.trim();
305
+ if (!trimmed) continue;
306
+ if (trimmed.includes("\u2713") || trimmed.includes("passed")) {
307
+ logger.success(trimmed);
308
+ } else if (
309
+ trimmed.includes("\u2717") ||
310
+ trimmed.includes("failed") ||
311
+ trimmed.includes("Error")
312
+ ) {
313
+ logger.error(trimmed);
314
+ } else if (
315
+ trimmed.includes("─") ||
316
+ trimmed.includes("Running") ||
317
+ trimmed.includes("test")
318
+ ) {
319
+ logger.dim(trimmed);
320
+ }
321
+ }
322
+ }
323
+
324
+ function extractFirstError(output) {
325
+ if (!output) return "Unknown error";
326
+ const lines = output.split("\n").filter((l) => l.trim());
327
+ const errorLine = lines.find(
328
+ (l) =>
329
+ l.includes("Error") ||
330
+ l.includes("expect") ||
331
+ l.includes("Timeout") ||
332
+ l.includes("locator"),
333
+ );
334
+ return (
335
+ errorLine?.trim().slice(0, 150) ||
336
+ lines[0]?.trim().slice(0, 150) ||
337
+ "Test failed"
338
+ );
339
+ }
340
+
341
+ function guessRoute(featureName) {
342
+ const routes = {
343
+ home: "/",
344
+ lobby: "/lobby",
345
+ auth: "/signin",
346
+ account: "/account",
347
+ redemption: "/redemption",
348
+ "refer-a-friend": "/refer-a-friend",
349
+ faq: "/faq",
350
+ promotions: "/promotions",
351
+ "game-details": "/game-details",
352
+ "game-zone": "/game-zone",
353
+ "privacy-policy": "/privacy-policy",
354
+ "terms-of-service": "/terms-of-service",
355
+ };
356
+ return routes[featureName] || `/${featureName}`;
357
+ }
358
+
359
+ async function generateE2EReport(projectDir, config, feature, env, baseUrl) {
360
+ try {
361
+ const screenshotsDir = path.join(projectDir, "e2e", "screenshots");
362
+ const screenshots = await findFiles(screenshotsDir, "*.png").catch(
363
+ () => [],
364
+ );
365
+
366
+ const reporter = new ReportGenerator(config);
367
+ const results = {
368
+ summary: {
369
+ totalTests: 0,
370
+ totalPassed: 0,
371
+ totalFailed: 0,
372
+ totalSkipped: 0,
373
+ overallPassRate: 0,
374
+ byLayer: {},
375
+ },
376
+ results: [],
377
+ };
378
+
379
+ const pwResultsPath = path.join(
380
+ projectDir,
381
+ "qabot-reports",
382
+ "playwright",
383
+ "results.json",
384
+ );
385
+ if (await fileExists(pwResultsPath)) {
386
+ try {
387
+ const pwResults = JSON.parse(await readFile(pwResultsPath, "utf-8"));
388
+ if (pwResults.suites) {
389
+ const tests = [];
390
+ flattenSuites(pwResults.suites, tests);
391
+ results.results = [
392
+ {
393
+ runner: "playwright",
394
+ layer: "e2e",
395
+ feature,
396
+ tests,
397
+ summary: {
398
+ total: tests.length,
399
+ passed: tests.filter((t) => t.status === "passed").length,
400
+ failed: tests.filter((t) => t.status === "failed").length,
401
+ skipped: tests.filter((t) => t.status === "skipped").length,
402
+ },
403
+ },
404
+ ];
405
+ results.summary.totalTests = tests.length;
406
+ results.summary.totalPassed = tests.filter(
407
+ (t) => t.status === "passed",
408
+ ).length;
409
+ results.summary.totalFailed = tests.filter(
410
+ (t) => t.status === "failed",
411
+ ).length;
412
+ results.summary.overallPassRate =
413
+ tests.length > 0
414
+ ? Math.round((results.summary.totalPassed / tests.length) * 100)
415
+ : 0;
416
+ }
417
+ } catch {
418
+ /* skip malformed results */
419
+ }
420
+ }
421
+
422
+ const reportPaths = await reporter.generate(results, {
423
+ feature,
424
+ environment: env,
425
+ projectName: config.project?.name || "unknown",
426
+ timestamp: new Date().toISOString(),
427
+ duration: 0,
428
+ });
429
+
430
+ logger.blank();
431
+ logger.info(`Report: ${chalk.underline(reportPaths.htmlPath)}`);
432
+ } catch {
433
+ /* report generation is best-effort */
434
+ }
435
+ }
436
+
437
+ function flattenSuites(suites, tests) {
438
+ for (const suite of suites || []) {
439
+ for (const spec of suite.specs || []) {
440
+ for (const test of spec.tests || []) {
441
+ const result = test.results?.[test.results.length - 1];
442
+ tests.push({
443
+ name: spec.title,
444
+ suite: suite.title,
445
+ file: suite.file || "",
446
+ status:
447
+ test.status === "expected"
448
+ ? "passed"
449
+ : test.status === "skipped"
450
+ ? "skipped"
451
+ : "failed",
452
+ duration: result?.duration || 0,
453
+ error: result?.error
454
+ ? {
455
+ message: result.error.message || "",
456
+ stack: result.error.stack || "",
457
+ }
458
+ : null,
459
+ screenshots: (result?.attachments || [])
460
+ .filter((a) => a.contentType?.startsWith("image/"))
461
+ .map((a) => a.path),
462
+ });
463
+ }
464
+ }
465
+ if (suite.suites) flattenSuites(suite.suites, tests);
466
+ }
467
+ }
@@ -1,4 +1,4 @@
1
- export const VERSION = "0.5.1";
1
+ export const VERSION = "0.6.1";
2
2
  export const TOOL_NAME = "qabot";
3
3
 
4
4
  export const PROJECT_TYPES = [
@@ -0,0 +1,61 @@
1
+ import { buildE2EPrompt } from "./e2e-prompts.js";
2
+
3
+ export async function generateE2ESpec(ai, featureName, context) {
4
+ const prompt = buildE2EPrompt(featureName, context);
5
+
6
+ const savedMaxTokens = ai.maxTokens;
7
+ ai.maxTokens = Math.max(ai.maxTokens, 8192);
8
+ try {
9
+ const code = await ai.complete(prompt);
10
+ return cleanSpec(code);
11
+ } finally {
12
+ ai.maxTokens = savedMaxTokens;
13
+ }
14
+ }
15
+
16
+ export async function fixE2ESpec(ai, code, errorMessage, context) {
17
+ const prompt = `You are fixing a broken Playwright E2E test.
18
+
19
+ ## Error
20
+ \`\`\`
21
+ ${errorMessage.slice(0, 2000)}
22
+ \`\`\`
23
+
24
+ ## Current test code
25
+ \`\`\`javascript
26
+ ${code}
27
+ \`\`\`
28
+
29
+ ## Context
30
+ - Base URL: ${context.baseUrl || "http://localhost:3000"}
31
+ - Auth: ${context.authProvider || "none"}
32
+
33
+ ## Common Playwright fixes
34
+ 1. Wrong selector — use getByRole, getByText, getByTestId instead of CSS selectors
35
+ 2. Timing — add waitForLoadState("networkidle") or waitForSelector
36
+ 3. Element not visible — scroll into view or wait for animation
37
+ 4. Navigation — ensure page.goto uses correct path
38
+ 5. Auth — ensure login completed before testing
39
+
40
+ Return the COMPLETE fixed test file. No markdown fences.`;
41
+
42
+ const savedMaxTokens = ai.maxTokens;
43
+ ai.maxTokens = Math.max(ai.maxTokens, 8192);
44
+ try {
45
+ const fixed = await ai.complete(prompt);
46
+ return cleanSpec(fixed);
47
+ } finally {
48
+ ai.maxTokens = savedMaxTokens;
49
+ }
50
+ }
51
+
52
+ function cleanSpec(code) {
53
+ let cleaned = code.trim();
54
+ if (cleaned.startsWith("```")) {
55
+ cleaned = cleaned.slice(cleaned.indexOf("\n") + 1);
56
+ }
57
+ if (cleaned.endsWith("```")) {
58
+ cleaned = cleaned.slice(0, cleaned.lastIndexOf("```"));
59
+ }
60
+ return cleaned.trim() + "\n";
61
+ }
@@ -0,0 +1,66 @@
1
+ export function buildE2EPrompt(featureName, context) {
2
+ const sourceSection = context.sourceCode
3
+ ? `\n## Source Code (for understanding page structure)\n\`\`\`\n${context.sourceCode.slice(0, 6000)}\n\`\`\`\n`
4
+ : "";
5
+
6
+ const routeSection = context.route
7
+ ? `\n## Page Route: ${context.route}\n`
8
+ : "";
9
+
10
+ const useCaseSection = context.useCases?.length
11
+ ? `\n## QA Use Cases\n${context.useCases.map((uc) => `### ${uc.scenario}\n${uc.steps.map((s, i) => `${i + 1}. ${s}`).join("\n")}`).join("\n\n")}\n`
12
+ : "";
13
+
14
+ return `You are a senior QA automation engineer writing Playwright E2E tests.
15
+
16
+ ## Feature: ${featureName}
17
+ ## Base URL: ${context.baseUrl || "http://localhost:3000"}
18
+ ## Auth Provider: ${context.authProvider || "none"}
19
+ ${routeSection}${sourceSection}${useCaseSection}
20
+ ## Task
21
+ Write a Playwright test spec for the "${featureName}" feature. The test should:
22
+ 1. Navigate to the correct page
23
+ 2. Verify the page loads correctly (key elements visible)
24
+ 3. Test main user interactions
25
+ 4. Take screenshots at key checkpoints
26
+ 5. Verify expected outcomes
27
+
28
+ ## Playwright Rules
29
+ 1. Use \`const { test, expect } = require("@playwright/test");\`
30
+ 2. Use \`test.describe\` for grouping, \`test\` for individual tests
31
+ 3. Use accessible selectors: \`page.getByRole()\`, \`page.getByText()\`, \`page.getByTestId()\`
32
+ 4. Use \`await page.waitForLoadState("networkidle")\` after navigation
33
+ 5. Use \`await page.screenshot({ path: "e2e/screenshots/${featureName}-{step}.png" })\` at key points
34
+ 6. Use \`expect(page).toHaveURL()\` for navigation verification
35
+ 7. Use \`expect(locator).toBeVisible()\` for element verification
36
+ 8. Handle authentication if needed:
37
+ \`\`\`
38
+ const { login } = require("../helpers/auth.js");
39
+ test.beforeEach(async ({ page, baseURL }) => { await login(page, baseURL); });
40
+ \`\`\`
41
+ 9. Each test should be independent
42
+ 10. Add reasonable timeouts for slow operations
43
+
44
+ ## Structure
45
+ \`\`\`
46
+ test.describe("${featureName}", () => {
47
+ test.beforeEach(async ({ page, baseURL }) => {
48
+ // login if needed
49
+ // navigate to feature page
50
+ });
51
+
52
+ test("page loads correctly", async ({ page }) => {
53
+ // verify key elements
54
+ // take screenshot
55
+ });
56
+
57
+ test("main interaction works", async ({ page }) => {
58
+ // perform action
59
+ // verify outcome
60
+ // take screenshot
61
+ });
62
+ });
63
+ \`\`\`
64
+
65
+ Return ONLY the JavaScript code. No markdown fences. No explanation.`;
66
+ }
@@ -0,0 +1,144 @@
1
+ import path from "node:path";
2
+ import { execSync } from "node:child_process";
3
+ import { writeFile } from "node:fs/promises";
4
+ import { fileExists, ensureDir } from "../utils/file-utils.js";
5
+
6
+ export async function ensurePlaywright(projectDir) {
7
+ const pwBin = path.join(projectDir, "node_modules", ".bin", "playwright");
8
+
9
+ if (!(await fileExists(pwBin))) {
10
+ execSync("npm install -D @playwright/test", {
11
+ cwd: projectDir,
12
+ stdio: "pipe",
13
+ });
14
+ }
15
+
16
+ try {
17
+ execSync("npx playwright install chromium", {
18
+ cwd: projectDir,
19
+ stdio: "pipe",
20
+ timeout: 120000,
21
+ });
22
+ } catch {
23
+ execSync("npx playwright install", {
24
+ cwd: projectDir,
25
+ stdio: "pipe",
26
+ timeout: 120000,
27
+ });
28
+ }
29
+
30
+ return true;
31
+ }
32
+
33
+ export async function ensureE2EStructure(projectDir) {
34
+ const dirs = [
35
+ "e2e/tests",
36
+ "e2e/pages",
37
+ "e2e/helpers",
38
+ "e2e/.auth",
39
+ "e2e/screenshots",
40
+ ];
41
+ for (const dir of dirs) {
42
+ await ensureDir(path.join(projectDir, dir));
43
+ }
44
+ }
45
+
46
+ export async function writePlaywrightConfig(projectDir, config) {
47
+ const configPath = path.join(projectDir, "e2e", "playwright.config.js");
48
+ if (await fileExists(configPath)) return configPath;
49
+
50
+ const envUrls = {};
51
+ if (config.environments) {
52
+ for (const [name, env] of Object.entries(config.environments)) {
53
+ if (env.url) envUrls[name] = env.url;
54
+ }
55
+ }
56
+
57
+ const content = `const { defineConfig, devices } = require("@playwright/test");
58
+
59
+ const ENV_URLS = ${JSON.stringify(envUrls, null, 2)};
60
+
61
+ module.exports = defineConfig({
62
+ testDir: "./tests",
63
+ fullyParallel: false,
64
+ retries: 1,
65
+ workers: 1,
66
+ reporter: [
67
+ ["html", { open: "never", outputFolder: "../qabot-reports/playwright" }],
68
+ ["json", { outputFile: "../qabot-reports/playwright/results.json" }],
69
+ ],
70
+ use: {
71
+ baseURL: ENV_URLS[process.env.E2E_ENV || "default"] || "http://localhost:3000",
72
+ trace: "on-first-retry",
73
+ screenshot: "on",
74
+ ignoreHTTPSErrors: true,
75
+ actionTimeout: 10000,
76
+ navigationTimeout: 30000,
77
+ },
78
+ projects: [
79
+ {
80
+ name: "chromium",
81
+ use: { ...devices["Desktop Chrome"] },
82
+ },
83
+ ],
84
+ });
85
+ `;
86
+
87
+ await writeFile(configPath, content, "utf-8");
88
+ return configPath;
89
+ }
90
+
91
+ export async function writeAuthHelper(projectDir, config) {
92
+ const helperPath = path.join(projectDir, "e2e", "helpers", "auth.js");
93
+ if (await fileExists(helperPath)) return;
94
+
95
+ const authProvider = config.auth?.provider || "none";
96
+ const content = `const { expect } = require("@playwright/test");
97
+
98
+ async function login(page, baseURL) {
99
+ const email = process.env.E2E_TEST_EMAIL || "";
100
+ const password = process.env.E2E_TEST_PASSWORD || "";
101
+
102
+ if (!email || !password) {
103
+ console.warn("E2E_TEST_EMAIL or E2E_TEST_PASSWORD not set — skipping login");
104
+ return;
105
+ }
106
+
107
+ await page.goto(baseURL || "/");
108
+ await page.waitForLoadState("networkidle");
109
+
110
+ ${
111
+ authProvider === "auth0"
112
+ ? `
113
+ const signInBtn = page.getByRole("button", { name: /sign in/i })
114
+ .or(page.getByRole("link", { name: /sign in/i }))
115
+ .or(page.locator("[data-testid='sign-in-button']"));
116
+
117
+ if (await signInBtn.isVisible({ timeout: 5000 }).catch(() => false)) {
118
+ await signInBtn.click();
119
+ await page.waitForLoadState("networkidle");
120
+ }
121
+
122
+ const emailInput = page.getByLabel(/email/i).or(page.locator("input[name='email']")).or(page.locator("input[type='email']"));
123
+ if (await emailInput.isVisible({ timeout: 5000 }).catch(() => false)) {
124
+ await emailInput.fill(email);
125
+ const passwordInput = page.getByLabel(/password/i).or(page.locator("input[name='password']")).or(page.locator("input[type='password']"));
126
+ await passwordInput.fill(password);
127
+ const submitBtn = page.getByRole("button", { name: /continue|log in|sign in|submit/i });
128
+ await submitBtn.click();
129
+ await page.waitForLoadState("networkidle");
130
+ }`
131
+ : `
132
+ // Generic login — customize for your auth provider
133
+ await page.getByLabel(/email/i).fill(email);
134
+ await page.getByLabel(/password/i).fill(password);
135
+ await page.getByRole("button", { name: /sign in|log in|submit/i }).click();
136
+ await page.waitForLoadState("networkidle");`
137
+ }
138
+ }
139
+
140
+ module.exports = { login };
141
+ `;
142
+
143
+ await writeFile(helperPath, content, "utf-8");
144
+ }