e2e-ai 1.3.0 → 1.4.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -10,9 +10,12 @@ Includes a **codebase scanner** that builds a QA map of your application's featu
10
10
  # Install
11
11
  npm install e2e-ai
12
12
 
13
- # Initialize config + project context
13
+ # Initialize config + copy agents
14
14
  npx e2e-ai init
15
15
 
16
+ # Generate project context (use init-agent in your AI tool)
17
+ # → produces .e2e-ai/context.md
18
+
16
19
  # Show all commands
17
20
  npx e2e-ai --help
18
21
 
@@ -57,7 +60,7 @@ Scans your codebase structure (routes, components, hooks), uses AI to identify f
57
60
 
58
61
  ### `init` - Project Setup
59
62
 
60
- Interactive wizard that scans your codebase and generates configuration + project context.
63
+ Interactive wizard that creates `.e2e-ai/config.ts` and copies agent prompts to `.e2e-ai/agents/`.
61
64
 
62
65
  ```bash
63
66
  npx e2e-ai init
@@ -66,6 +69,8 @@ npx e2e-ai init
66
69
  npx e2e-ai init --non-interactive
67
70
  ```
68
71
 
72
+ After init, use the **init-agent** in your AI tool (Claude Code, Cursor, etc.) to generate `.e2e-ai/context.md`. This context file teaches all downstream agents about your project's test conventions. If you have the MCP server configured, the AI tool can call `e2e_ai_scan_codebase` to analyze your project automatically.
73
+
69
74
  ---
70
75
 
71
76
  ### `record` - Browser Recording
@@ -409,7 +414,7 @@ npx e2e-ai analyze
409
414
 
410
415
  ## Configuration
411
416
 
412
- Run `npx e2e-ai init` to generate `e2e-ai.config.ts`:
417
+ Run `npx e2e-ai init` to generate `.e2e-ai/config.ts`:
413
418
 
414
419
  ```typescript
415
420
  import { defineConfig } from 'e2e-ai/config';
@@ -463,12 +468,10 @@ export default defineConfig({
463
468
  retries: 0,
464
469
  traceMode: 'on',
465
470
  },
466
-
467
- contextFile: '.e2e-ai/context.md',
468
471
  });
469
472
  ```
470
473
 
471
- See `templates/e2e-ai.context.example.md` for the project context file format.
474
+ All configuration lives inside the `.e2e-ai/` directory no files at project root.
472
475
 
473
476
  ## Global Options
474
477
 
@@ -524,7 +527,7 @@ You can customize agent behavior by editing the `.md` files directly. The frontm
524
527
 
525
528
  ## Output Directory Structure
526
529
 
527
- Default paths (configurable via `e2e-ai.config.ts`):
530
+ Default paths (configurable via `.e2e-ai/config.ts`):
528
531
 
529
532
  ```
530
533
  e2e/
@@ -534,12 +537,85 @@ e2e/
534
537
  qa/ # QA documentation .md files
535
538
 
536
539
  .e2e-ai/
540
+ config.ts # project configuration
541
+ context.md # project context (generated by init-agent)
542
+ agents/ # agent prompt definitions (.md files)
537
543
  <KEY>/ # per-issue working dir: codegen, recordings/, intermediate files
538
544
  ast-scan.json # scan command output
539
545
  qa-map.json # analyze command output
540
546
  scan-cache/ # file-level parse cache (gitignored)
541
547
  ```
542
548
 
549
+ ## MCP Server
550
+
551
+ e2e-ai ships an MCP (Model Context Protocol) server that lets AI assistants interact with your project's test infrastructure directly. The server binary is `e2e-ai-mcp`.
552
+
553
+ ### Setup
554
+
555
+ Add to your MCP client configuration:
556
+
557
+ **Claude Desktop** (`~/Library/Application Support/Claude/claude_desktop_config.json`):
558
+
559
+ ```json
560
+ {
561
+ "mcpServers": {
562
+ "e2e-ai": {
563
+ "command": "npx",
564
+ "args": ["e2e-ai-mcp"],
565
+ "cwd": "/path/to/your/project"
566
+ }
567
+ }
568
+ }
569
+ ```
570
+
571
+ **Claude Code:**
572
+
573
+ ```bash
574
+ claude mcp add e2e-ai -- npx e2e-ai-mcp
575
+ ```
576
+
577
+ **Cursor** (`.cursor/mcp.json`):
578
+
579
+ ```json
580
+ {
581
+ "mcpServers": {
582
+ "e2e-ai": {
583
+ "command": "npx",
584
+ "args": ["e2e-ai-mcp"]
585
+ }
586
+ }
587
+ }
588
+ ```
589
+
590
+ If e2e-ai is installed globally or as a project dependency, you can use the binary path directly instead of `npx`:
591
+
592
+ ```json
593
+ {
594
+ "command": "node",
595
+ "args": ["node_modules/.bin/e2e-ai-mcp"]
596
+ }
597
+ ```
598
+
599
+ ### Available Tools
600
+
601
+ | Tool | Description | Input |
602
+ |------|-------------|-------|
603
+ | `e2e_ai_scan_codebase` | Scan project for test files, configs, fixtures, path aliases, and sample test content | `projectRoot?` (defaults to cwd) |
604
+ | `e2e_ai_validate_context` | Validate that a context markdown file has all required sections | `content` (markdown string) |
605
+ | `e2e_ai_read_agent` | Load an agent prompt by name — returns system prompt + config | `agentName` (e.g. `scenario-agent`) |
606
+ | `e2e_ai_get_example` | Get the example context markdown template | (none) |
607
+
608
+ ### Usage with AI Assistants
609
+
610
+ Once configured, an AI assistant can:
611
+
612
+ 1. **Scan your project** to understand its test structure, fixtures, and conventions
613
+ 2. **Read agent prompts** to understand how each pipeline step works
614
+ 3. **Validate context files** to ensure they have the right format before running commands
615
+ 4. **Get the example template** as a starting point for writing `e2e-ai.context.md`
616
+
617
+ This enables AI assistants to help you set up e2e-ai, debug pipeline issues, and generate better project context files.
618
+
543
619
  ## Library API
544
620
 
545
621
  e2e-ai also exports types and config helpers for programmatic use:
@@ -1,9 +1,5 @@
1
1
  ---
2
2
  agent: feature-analyzer-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 8192
6
- temperature: 0.1
7
3
  ---
8
4
 
9
5
  # System Prompt
@@ -1,20 +1,24 @@
1
1
  ---
2
2
  agent: init-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 8192
6
- temperature: 0.3
7
3
  ---
8
4
 
9
5
  # System Prompt
10
6
 
11
- You are a codebase analysis assistant for the e2e-ai test automation tool. Your job is to analyze a project's test infrastructure and produce a well-structured context document (`e2e-ai.context.md`) that will guide AI agents when generating, refining, and healing Playwright tests for this specific project.
7
+ You are a codebase analysis assistant for the e2e-ai test automation tool. Your job is to analyze a project's test infrastructure and produce a well-structured context document (`.e2e-ai/context.md`) that will guide AI agents when generating, refining, and healing Playwright tests for this specific project.
12
8
 
13
- You will receive scan results from the target codebase and engage in a conversation to clarify patterns you're uncertain about.
9
+ ## How to Use This Agent
10
+
11
+ This agent is designed to be used directly in your AI tool (Claude Code, Cursor, Gemini CLI, etc.). Start a conversation and ask it to generate your project context.
12
+
13
+ **If the e2e-ai MCP server is configured**, call `e2e_ai_scan_codebase` to get scan results, then follow this agent's instructions to produce the context file.
14
+
15
+ **If no MCP server**, manually explore the codebase: look at test files, fixtures, playwright config, tsconfig paths, and helper modules.
14
16
 
15
17
  ## Your Task
16
18
 
17
- Analyze the provided codebase scan and produce a context document covering:
19
+ Analyze the project codebase and produce a file at `.e2e-ai/context.md` that documents the project's test infrastructure, conventions, and patterns. This context file is consumed by downstream AI agents (scenario, generator, refiner, healer, QA) to produce Playwright tests that match the project's existing style.
20
+
21
+ Cover these areas:
18
22
 
19
23
  1. **Application Overview**: What the app does, tech stack, key pages/routes
20
24
  2. **Test Infrastructure**: Fixtures, custom test helpers, step counters, auth patterns
@@ -26,13 +30,13 @@ Analyze the provided codebase scan and produce a context document covering:
26
30
 
27
31
  ## Output Format
28
32
 
29
- When you have enough information, produce the final context as a markdown document with these sections:
33
+ Produce the context document with these sections and save it to `.e2e-ai/context.md`:
30
34
 
31
35
  ```markdown
32
36
  # Project Context for e2e-ai
33
37
 
34
38
  ## Application
35
- <name, description, tech stack>
39
+ <name, description, tech stack, base URL>
36
40
 
37
41
  ## Test Infrastructure
38
42
  <fixtures, helpers, auth pattern>
@@ -53,6 +57,22 @@ When you have enough information, produce the final context as a markdown docume
53
57
  <timeouts, waits, assertion patterns>
54
58
  ```
55
59
 
60
+ All sections are required. The file should be 100-300 lines, self-contained, and use actual code from the project (not generic Playwright examples).
61
+
62
+ ## How Context is Used
63
+
64
+ Each pipeline agent reads `.e2e-ai/context.md` to understand project conventions:
65
+
66
+ | Agent | Uses context for |
67
+ |-------|-----------------|
68
+ | **scenario-agent** | Structuring test steps to match project patterns |
69
+ | **playwright-generator-agent** | Generating code with correct imports, fixtures, selectors |
70
+ | **refactor-agent** | Applying project-specific refactoring patterns |
71
+ | **self-healing-agent** | Understanding expected test structure when fixing failures |
72
+ | **qa-testcase-agent** | Formatting QA documentation to match conventions |
73
+ | **feature-analyzer-agent** | Understanding app structure for QA map generation |
74
+ | **scenario-planner-agent** | Generating realistic test scenarios from codebase analysis |
75
+
56
76
  ## Rules
57
77
 
58
78
  1. Ask clarifying questions if the scan data is ambiguous — do NOT guess
@@ -61,15 +81,3 @@ When you have enough information, produce the final context as a markdown docume
61
81
  4. The context file should be self-contained — an AI agent reading only this file should understand all project conventions
62
82
  5. Keep the document concise but complete — aim for 100-300 lines
63
83
  6. If you need to see specific files to complete the analysis, list them explicitly
64
-
65
- ## Conversation Flow
66
-
67
- 1. **First turn**: Receive scan results, analyze them, ask clarifying questions if needed
68
- 2. **Middle turns**: Receive answers, refine understanding
69
- 3. **Final turn**: When you have enough info, produce the complete context document wrapped in a `<context>` tag:
70
- ```
71
- <context>
72
- # Project Context for e2e-ai
73
- ...
74
- </context>
75
- ```
@@ -1,9 +1,5 @@
1
1
  ---
2
2
  agent: playwright-generator-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 8192
6
- temperature: 0.2
7
3
  ---
8
4
 
9
5
  # System Prompt
@@ -1,9 +1,5 @@
1
1
  ---
2
2
  agent: qa-testcase-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 8192
6
- temperature: 0.2
7
3
  ---
8
4
 
9
5
  # System Prompt
@@ -1,9 +1,5 @@
1
1
  ---
2
2
  agent: refactor-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 8192
6
- temperature: 0.2
7
3
  ---
8
4
 
9
5
  # System Prompt
@@ -1,9 +1,5 @@
1
1
  ---
2
2
  agent: scenario-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 4096
6
- temperature: 0.2
7
3
  ---
8
4
 
9
5
  # System Prompt
@@ -1,9 +1,5 @@
1
1
  ---
2
2
  agent: scenario-planner-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 8192
6
- temperature: 0.2
7
3
  ---
8
4
 
9
5
  # System Prompt
@@ -1,9 +1,5 @@
1
1
  ---
2
2
  agent: self-healing-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 8192
6
- temperature: 0.2
7
3
  ---
8
4
 
9
5
  # System Prompt
@@ -1,9 +1,5 @@
1
1
  ---
2
2
  agent: transcript-agent
3
- version: "1.0"
4
- model: gpt-4o
5
- max_tokens: 4096
6
- temperature: 0.2
7
3
  ---
8
4
 
9
5
  # System Prompt
@@ -0,0 +1,101 @@
1
+ import {
2
+ getPackageRoot,
3
+ getProjectRoot
4
+ } from "./cli-kx32qnf3.js";
5
+
6
+ // src/agents/loadAgent.ts
7
+ import { readFileSync, existsSync } from "node:fs";
8
+ import { join } from "node:path";
9
+ function loadAgent(agentName, config) {
10
+ const localPath = join(getProjectRoot(), ".e2e-ai", "agents", `${agentName}.md`);
11
+ const packagePath = join(getPackageRoot(), "agents", `${agentName}.md`);
12
+ const filePath = existsSync(localPath) ? localPath : packagePath;
13
+ let content;
14
+ try {
15
+ content = readFileSync(filePath, "utf-8");
16
+ } catch {
17
+ throw new Error(`Agent file not found: ${filePath}`);
18
+ }
19
+ const { frontmatter, body } = parseFrontmatter(content);
20
+ const agentConfig = extractConfig(frontmatter);
21
+ let systemPrompt = body;
22
+ if (config) {
23
+ const contextPath = join(getProjectRoot(), ".e2e-ai", "context.md");
24
+ if (existsSync(contextPath)) {
25
+ const projectContext = readFileSync(contextPath, "utf-8").trim();
26
+ if (projectContext) {
27
+ systemPrompt = `${body}
28
+
29
+ ## Project Context
30
+
31
+ ${projectContext}`;
32
+ }
33
+ }
34
+ if (config.llm.agentModels[agentName]) {
35
+ agentConfig.model = config.llm.agentModels[agentName];
36
+ }
37
+ }
38
+ const sections = parseSections(body);
39
+ return {
40
+ name: frontmatter.agent ?? agentName,
41
+ systemPrompt,
42
+ inputSchema: sections["Input Schema"],
43
+ outputSchema: sections["Output Schema"],
44
+ rules: sections["Rules"],
45
+ example: sections["Example"],
46
+ config: agentConfig
47
+ };
48
+ }
49
+ function parseFrontmatter(content) {
50
+ const match = content.match(/^---\n([\s\S]*?)\n---\n([\s\S]*)$/);
51
+ if (!match)
52
+ return { frontmatter: {}, body: content };
53
+ const frontmatter = {};
54
+ for (const line of match[1].split(`
55
+ `)) {
56
+ const colonIdx = line.indexOf(":");
57
+ if (colonIdx === -1)
58
+ continue;
59
+ const key = line.slice(0, colonIdx).trim();
60
+ let value = line.slice(colonIdx + 1).trim();
61
+ if (value.startsWith('"') && value.endsWith('"'))
62
+ value = value.slice(1, -1);
63
+ if (value === "true")
64
+ value = true;
65
+ if (value === "false")
66
+ value = false;
67
+ if (!isNaN(Number(value)) && value !== "")
68
+ value = Number(value);
69
+ frontmatter[key] = value;
70
+ }
71
+ return { frontmatter, body: match[2] };
72
+ }
73
+ function extractConfig(frontmatter) {
74
+ return {
75
+ model: frontmatter.model,
76
+ maxTokens: frontmatter.max_tokens ?? 4096,
77
+ temperature: frontmatter.temperature ?? 0.2
78
+ };
79
+ }
80
+ function parseSections(body) {
81
+ const sections = {};
82
+ const headingRegex = /^##\s+(.+)$/gm;
83
+ const headings = [];
84
+ let match;
85
+ while ((match = headingRegex.exec(body)) !== null) {
86
+ headings.push({ title: match[1].trim(), index: match.index });
87
+ }
88
+ const systemMatch = body.match(/^#\s+System Prompt\n([\s\S]*?)(?=\n##\s|$)/m);
89
+ if (systemMatch) {
90
+ sections["System Prompt"] = systemMatch[1].trim();
91
+ }
92
+ for (let i = 0;i < headings.length; i++) {
93
+ const start = headings[i].index + body.slice(headings[i].index).indexOf(`
94
+ `) + 1;
95
+ const end = i + 1 < headings.length ? headings[i + 1].index : body.length;
96
+ sections[headings[i].title] = body.slice(start, end).trim();
97
+ }
98
+ return sections;
99
+ }
100
+
101
+ export { loadAgent };
@@ -0,0 +1,67 @@
1
+ import {
2
+ E2eAiConfigSchema
3
+ } from "./cli-fgp618yt.js";
4
+
5
+ // src/config/loader.ts
6
+ import { existsSync } from "node:fs";
7
+ import { dirname, join, resolve } from "node:path";
8
+ import { pathToFileURL } from "node:url";
9
+ var CONFIG_DIR = ".e2e-ai";
10
+ var CONFIG_FILENAMES = ["config.ts", "config.js", "config.mjs"];
11
+ var cachedConfig = null;
12
+ var cachedProjectRoot = null;
13
+ function findConfigDir(startDir) {
14
+ let dir = resolve(startDir);
15
+ const root = dirname(dir) === dir ? dir : undefined;
16
+ while (true) {
17
+ const e2eDir = join(dir, CONFIG_DIR);
18
+ for (const name of CONFIG_FILENAMES) {
19
+ if (existsSync(join(e2eDir, name))) {
20
+ return dir;
21
+ }
22
+ }
23
+ const parent = dirname(dir);
24
+ if (parent === dir || dir === root)
25
+ return null;
26
+ dir = parent;
27
+ }
28
+ }
29
+ function getProjectRoot() {
30
+ if (cachedProjectRoot)
31
+ return cachedProjectRoot;
32
+ const found = findConfigDir(process.cwd());
33
+ cachedProjectRoot = found ?? process.cwd();
34
+ return cachedProjectRoot;
35
+ }
36
+ function getPackageRoot() {
37
+ let dir = import.meta.dirname;
38
+ while (!existsSync(join(dir, "package.json"))) {
39
+ const parent = dirname(dir);
40
+ if (parent === dir)
41
+ return dir;
42
+ dir = parent;
43
+ }
44
+ return dir;
45
+ }
46
+ async function loadConfig() {
47
+ if (cachedConfig)
48
+ return cachedConfig;
49
+ const projectRoot = getProjectRoot();
50
+ const e2eDir = join(projectRoot, CONFIG_DIR);
51
+ let userConfig = {};
52
+ for (const name of CONFIG_FILENAMES) {
53
+ const configPath = join(e2eDir, name);
54
+ if (existsSync(configPath)) {
55
+ try {
56
+ const fileUrl = pathToFileURL(configPath).href;
57
+ const mod = await import(fileUrl);
58
+ userConfig = mod.default ?? mod;
59
+ break;
60
+ } catch {}
61
+ }
62
+ }
63
+ cachedConfig = E2eAiConfigSchema.parse(userConfig);
64
+ return cachedConfig;
65
+ }
66
+
67
+ export { getProjectRoot, getPackageRoot, loadConfig };