@postxl/generators 1.16.0 โ 1.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/base/template/.claude/commands/README.md +65 -0
- package/dist/base/template/.claude/settings.json +3 -1
- package/dist/base/template/.github/.copilot-prompts.json +22 -0
- package/dist/e2e/e2e.generator.js +43 -1
- package/dist/e2e/e2e.generator.js.map +1 -1
- package/dist/e2e/template/.claude/commands/prepare-e2e-tests.md +251 -0
- package/dist/e2e/template/.claude/commands/run-e2e-tests.md +221 -0
- package/dist/e2e/template/scripts/e2e.sh +398 -0
- package/package.json +2 -2
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# Custom Commands for Claude Code
|
|
2
|
+
|
|
3
|
+
This directory contains custom command documentation that Claude Code can reference when assisting with development tasks.
|
|
4
|
+
|
|
5
|
+
## Available Commands
|
|
6
|
+
|
|
7
|
+
### 1. Run E2E Tests (`run-e2e-tests.md`)
|
|
8
|
+
Comprehensive instructions for running end-to-end tests using Playwright.
|
|
9
|
+
|
|
10
|
+
**Trigger phrases:**
|
|
11
|
+
- "run e2e tests"
|
|
12
|
+
- "run end-to-end tests"
|
|
13
|
+
- "execute e2e test suite"
|
|
14
|
+
|
|
15
|
+
### 2. Fix CI (`fix-ci.md`)
|
|
16
|
+
Guidance for troubleshooting and fixing CI/CD pipeline issues.
|
|
17
|
+
|
|
18
|
+
**Trigger phrases:**
|
|
19
|
+
- "fix ci"
|
|
20
|
+
- "help with continuous integration"
|
|
21
|
+
|
|
22
|
+
### 3. Browser Observe (`browser-observe.md`)
|
|
23
|
+
Instructions for browser-based testing and observation workflows.
|
|
24
|
+
|
|
25
|
+
**Trigger phrases:**
|
|
26
|
+
- "observe browser"
|
|
27
|
+
- "start browser observation"
|
|
28
|
+
|
|
29
|
+
### 4. Prepare E2E Tests (`prepare-e2e-tests.md`)
|
|
30
|
+
Analyzes code changes, determines needed E2E test coverage, writes test specs, and runs them to verify they pass.
|
|
31
|
+
|
|
32
|
+
**Trigger phrases:**
|
|
33
|
+
- "prepare e2e tests"
|
|
34
|
+
- "write e2e tests for my changes"
|
|
35
|
+
- "add e2e test coverage"
|
|
36
|
+
|
|
37
|
+
## How It Works
|
|
38
|
+
|
|
39
|
+
When you ask Claude to perform one of these tasks, it will:
|
|
40
|
+
1. Automatically detect the request based on your natural language
|
|
41
|
+
2. Reference the appropriate command file
|
|
42
|
+
3. Follow the detailed instructions in that file
|
|
43
|
+
4. Execute the necessary steps to complete the task
|
|
44
|
+
|
|
45
|
+
## Adding New Commands
|
|
46
|
+
|
|
47
|
+
To add a new custom command:
|
|
48
|
+
|
|
49
|
+
1. Create a new markdown file in this directory with a descriptive name (e.g., `deploy-staging.md`)
|
|
50
|
+
2. Write clear, detailed instructions including:
|
|
51
|
+
- Overview of the task
|
|
52
|
+
- Prerequisites
|
|
53
|
+
- Step-by-step instructions
|
|
54
|
+
- Common issues and troubleshooting
|
|
55
|
+
- Examples
|
|
56
|
+
3. Update this README to list the new command
|
|
57
|
+
5. Commit all files to version control
|
|
58
|
+
|
|
59
|
+
## Best Practices
|
|
60
|
+
|
|
61
|
+
- **Be specific**: Include exact commands, file paths, and configuration details
|
|
62
|
+
- **Include context**: Explain why certain steps are necessary
|
|
63
|
+
- **Add troubleshooting**: Document common errors and solutions
|
|
64
|
+
- **Use examples**: Show concrete examples of usage
|
|
65
|
+
- **Keep updated**: Review and update commands as the project evolves
|
|
@@ -4,6 +4,8 @@
|
|
|
4
4
|
"Read CLAUDE.md for project-specific guidance before making changes",
|
|
5
5
|
"This is a PostXL-generated project - see the main PostXL monorepo CLAUDE.md for full documentation",
|
|
6
6
|
"Always run type checks after code changes: pnpm run test:types",
|
|
7
|
-
"Use @postxl/ui-components for all UI elements - never create custom generic UI primitives"
|
|
7
|
+
"Use @postxl/ui-components for all UI elements - never create custom generic UI primitives",
|
|
8
|
+
"When asked to 'run e2e tests', 'run end-to-end tests', or 'execute e2e': ALWAYS read the detailed instructions from .claude/commands/run-e2e-tests.md first, then run the automated script at scripts/e2e.sh from the project root",
|
|
9
|
+
"When significant frontend or backend changes are made (new pages, routes, forms, tables, or API endpoints), proactively suggest running /prepare-e2e-tests to ensure E2E test coverage"
|
|
8
10
|
]
|
|
9
11
|
}
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
{
|
|
2
|
+
"prompts": [
|
|
3
|
+
{
|
|
4
|
+
"name": "Run E2E Tests",
|
|
5
|
+
"description": "Run end-to-end tests using Playwright",
|
|
6
|
+
"path": "../.claude/commands/run-e2e-tests.md",
|
|
7
|
+
"tags": ["testing", "e2e", "playwright"]
|
|
8
|
+
},
|
|
9
|
+
{
|
|
10
|
+
"name": "Fix CI",
|
|
11
|
+
"description": "Help fix CI/CD pipeline issues",
|
|
12
|
+
"path": "../.claude/commands/fix-ci.md",
|
|
13
|
+
"tags": ["ci", "devops"]
|
|
14
|
+
},
|
|
15
|
+
{
|
|
16
|
+
"name": "Browser Observe",
|
|
17
|
+
"description": "Browser testing and observation workflows",
|
|
18
|
+
"path": "../.claude/commands/browser-observe.md",
|
|
19
|
+
"tags": ["testing", "browser"]
|
|
20
|
+
}
|
|
21
|
+
]
|
|
22
|
+
}
|
|
@@ -54,7 +54,22 @@ exports.generator = {
|
|
|
54
54
|
},
|
|
55
55
|
generate: async (context) => {
|
|
56
56
|
const vfs = new Generator.VirtualFileSystem();
|
|
57
|
-
const
|
|
57
|
+
const { projectType, slug } = context.schema;
|
|
58
|
+
const isWorkspace = projectType === 'workspace';
|
|
59
|
+
// Template context with project-type-aware values
|
|
60
|
+
const templateContext = {
|
|
61
|
+
schema: context.schema,
|
|
62
|
+
// For e2e.sh
|
|
63
|
+
monorepoRootExpr: isWorkspace ? '"$(cd "$PROJECT_DIR/../.." && pwd)"' : '"$PROJECT_DIR"',
|
|
64
|
+
dockerWorkDir: isWorkspace ? `/pxl/projects/${slug}/e2e` : '/pxl/e2e',
|
|
65
|
+
nodeShimsCleanup: isWorkspace
|
|
66
|
+
? 'rm -f /pxl/node_modules/.bin/node /pxl/projects/*/node_modules/.bin/node 2>/dev/null'
|
|
67
|
+
: 'rm -f /pxl/node_modules/.bin/node 2>/dev/null',
|
|
68
|
+
// For run-e2e-tests.md
|
|
69
|
+
cdProjectDir: isWorkspace ? `cd projects/${slug}\n` : '',
|
|
70
|
+
backendPath: isWorkspace ? `projects/${slug}/backend` : 'backend',
|
|
71
|
+
frontendPath: isWorkspace ? `projects/${slug}/frontend` : 'frontend',
|
|
72
|
+
};
|
|
58
73
|
// Load e2e folder, excluding files that need template substitution
|
|
59
74
|
await vfs.loadFolder({
|
|
60
75
|
diskPath: path.resolve(__dirname, './template/e2e'),
|
|
@@ -70,6 +85,33 @@ exports.generator = {
|
|
|
70
85
|
throw new Error(`Failed to generate package.json: ${packageJsonContent.unwrapErr().message}`);
|
|
71
86
|
}
|
|
72
87
|
vfs.write(`/e2e/${PACKAGE_JSON_FILENAME}`, packageJsonContent.unwrap());
|
|
88
|
+
// Generate e2e.sh with project-type-aware paths
|
|
89
|
+
const e2eShContent = await Generator.generateFromTemplate({
|
|
90
|
+
file: path.resolve(__dirname, './template/scripts/e2e.sh'),
|
|
91
|
+
context: templateContext,
|
|
92
|
+
});
|
|
93
|
+
if (e2eShContent.isErr()) {
|
|
94
|
+
throw new Error(`Failed to generate e2e.sh: ${e2eShContent.unwrapErr().message}`);
|
|
95
|
+
}
|
|
96
|
+
vfs.write('/scripts/e2e.sh', e2eShContent.unwrap());
|
|
97
|
+
// Generate run-e2e-tests.md with project-type-aware paths
|
|
98
|
+
const runE2eTestsMdContent = await Generator.generateFromTemplate({
|
|
99
|
+
file: path.resolve(__dirname, './template/.claude/commands/run-e2e-tests.md'),
|
|
100
|
+
context: templateContext,
|
|
101
|
+
});
|
|
102
|
+
if (runE2eTestsMdContent.isErr()) {
|
|
103
|
+
throw new Error(`Failed to generate run-e2e-tests.md: ${runE2eTestsMdContent.unwrapErr().message}`);
|
|
104
|
+
}
|
|
105
|
+
vfs.write('/.claude/commands/run-e2e-tests.md', runE2eTestsMdContent.unwrap());
|
|
106
|
+
// Generate prepare-e2e-tests.md with project-type-aware paths
|
|
107
|
+
const prepareE2eTestsMdContent = await Generator.generateFromTemplate({
|
|
108
|
+
file: path.resolve(__dirname, './template/.claude/commands/prepare-e2e-tests.md'),
|
|
109
|
+
context: templateContext,
|
|
110
|
+
});
|
|
111
|
+
if (prepareE2eTestsMdContent.isErr()) {
|
|
112
|
+
throw new Error(`Failed to generate prepare-e2e-tests.md: ${prepareE2eTestsMdContent.unwrapErr().message}`);
|
|
113
|
+
}
|
|
114
|
+
vfs.write('/.claude/commands/prepare-e2e-tests.md', prepareE2eTestsMdContent.unwrap());
|
|
73
115
|
// write dynamic files
|
|
74
116
|
vfs.write('/e2e/support/model-test-ids.ts', (0, model_test_id_generator_1.generateModelTestIds)(context.e2e));
|
|
75
117
|
vfs.write('/scripts/docker.sh', (0, docker_sh_generator_1.generateDockerSh)(context));
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"e2e.generator.js","sourceRoot":"","sources":["../../src/e2e/e2e.generator.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA,gDAAiC;AAEjC,6DAA8C;AAI9C,0EAAmE;AACnE,kFAA2E;AAE3E,MAAM,qBAAqB,GAAG,cAAc,CAAA;AAsB/B,QAAA,WAAW,GAAG,SAAS,CAAC,sBAAsB,CAAC,KAAK,CAAC,CAAA;AAErD,QAAA,SAAS,GAAiC;IACrD,EAAE,EAAE,mBAAW;IACf,QAAQ,EAAE,CAAC,mBAAW,CAAC;IAEvB,QAAQ,EAAE,CAAiD,GAAY,EAAiB,EAAE;QACxF,GAAG,CAAC,OAAO,CAAC,WAAW,CAAC,eAAe,CAAC,IAAI,CAAC,EAAE,WAAW,EAAE,IAAI,EAAE,OAAO,EAAE,SAAS,EAAE,CAAC,CAAA;QACvF,OAAO;YACL,GAAG,GAAG;YACN,GAAG,EAAE;gBACH,OAAO,EAAE,EAAE;aACZ;SACF,CAAA;IACH,CAAC;IAED,QAAQ,EAAE,KAAK,EAAiC,OAAgB,EAAoB,EAAE;QACpF,MAAM,GAAG,GAAG,IAAI,SAAS,CAAC,iBAAiB,EAAE,CAAA;QAC7C,MAAM,eAAe,GAAG,EAAE,MAAM,EAAE,
|
|
1
|
+
{"version":3,"file":"e2e.generator.js","sourceRoot":"","sources":["../../src/e2e/e2e.generator.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;AAAA,gDAAiC;AAEjC,6DAA8C;AAI9C,0EAAmE;AACnE,kFAA2E;AAE3E,MAAM,qBAAqB,GAAG,cAAc,CAAA;AAsB/B,QAAA,WAAW,GAAG,SAAS,CAAC,sBAAsB,CAAC,KAAK,CAAC,CAAA;AAErD,QAAA,SAAS,GAAiC;IACrD,EAAE,EAAE,mBAAW;IACf,QAAQ,EAAE,CAAC,mBAAW,CAAC;IAEvB,QAAQ,EAAE,CAAiD,GAAY,EAAiB,EAAE;QACxF,GAAG,CAAC,OAAO,CAAC,WAAW,CAAC,eAAe,CAAC,IAAI,CAAC,EAAE,WAAW,EAAE,IAAI,EAAE,OAAO,EAAE,SAAS,EAAE,CAAC,CAAA;QACvF,OAAO;YACL,GAAG,GAAG;YACN,GAAG,EAAE;gBACH,OAAO,EAAE,EAAE;aACZ;SACF,CAAA;IACH,CAAC;IAED,QAAQ,EAAE,KAAK,EAAiC,OAAgB,EAAoB,EAAE;QACpF,MAAM,GAAG,GAAG,IAAI,SAAS,CAAC,iBAAiB,EAAE,CAAA;QAC7C,MAAM,EAAE,WAAW,EAAE,IAAI,EAAE,GAAG,OAAO,CAAC,MAAM,CAAA;QAC5C,MAAM,WAAW,GAAG,WAAW,KAAK,WAAW,CAAA;QAE/C,kDAAkD;QAClD,MAAM,eAAe,GAAG;YACtB,MAAM,EAAE,OAAO,CAAC,MAAM;YACtB,aAAa;YACb,gBAAgB,EAAE,WAAW,CAAC,CAAC,CAAC,qCAAqC,CAAC,CAAC,CAAC,gBAAgB;YACxF,aAAa,EAAE,WAAW,CAAC,CAAC,CAAC,iBAAiB,IAAI,MAAM,CAAC,CAAC,CAAC,UAAU;YACrE,gBAAgB,EAAE,WAAW;gBAC3B,CAAC,CAAC,sFAAsF;gBACxF,CAAC,CAAC,+CAA+C;YACnD,uBAAuB;YACvB,YAAY,EAAE,WAAW,CAAC,CAAC,CAAC,eAAe,IAAI,IAAI,CAAC,CAAC,CAAC,EAAE;YACxD,WAAW,EAAE,WAAW,CAAC,CAAC,CAAC,YAAY,IAAI,UAAU,CAAC,CAAC,CAAC,SAAS;YACjE,YAAY,EAAE,WAAW,CAAC,CAAC,CAAC,YAAY,IAAI,WAAW,CAAC,CAAC,CAAC,UAAU;SACrE,CAAA;QAED,mEAAmE;QACnE,MAAM,GAAG,CAAC,UAAU,CAAC;YACnB,QAAQ,EAAE,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,gBAAgB,CAAC;YACnD,UAAU,EAAE,MAAM;YAClB,MAAM,EAAE,CAAC,QAAQ,EAAE,EAAE,CAAC,CAAC,QAAQ,CAAC,QAAQ,CAAC,qBAAqB,CAAC;SAChE,CAAC,CAAA;QAEF,wCAAwC;QACxC,MAAM,kBAAkB,GAAG,MAAM,SAAS,CAAC,oBAAoB,CAAC;YAC9D,IAAI,EAAE,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,gBAAgB,EAAE,qBAAqB,CAAC;YACtE,OAAO,EAAE,eAAe;SACzB,CAAC,CAAA;QACF,IAAI,kBAAkB,CAAC,KAAK,EAAE,EAAE,CAAC;YAC/B,MAAM,IAAI,KAAK,CAAC,oCAAoC,kBAAkB,CAAC,SAAS,EAAE,CAAC,OAAO,EAAE,CAAC,CAAA;QAC/F,CAAC;QACD,GAAG,CAAC,KAAK,CAAC,QAAQ,qBAAqB,EAAE,EAAE,kBAAkB,CAAC,MAAM,EAAE,CAAC,CAAA;QAEvE,gDAAgD;QAChD,MAAM,YAAY,GAAG,MAAM,SAAS,CAAC,oBAAoB,CAAC;YACxD,IAAI,EAAE,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,2BAA2B,CAAC;YAC1D,OAAO,EAAE,eAAe;SACzB,CAAC,CAAA;QACF,IAAI,YAAY,CAAC,KAAK,EAAE,EAAE,CAAC;YACzB,MAAM,IAAI,KAAK,CAAC,8BAA8B,YAAY,CAAC,SAAS,EAAE,CAAC,OAAO,EAAE,CAAC,CAAA;QACnF,CAAC;QACD,GAAG,CAAC,KAAK,CAAC,iBAAiB,EAAE,YAAY,CAAC,MAAM,EAAE,CAAC,CAAA;QAEnD,0DAA0D;QAC1D,MAAM,oBAAoB,GAAG,MAAM,SAAS,CAAC,oBAAoB,CAAC;YAChE,IAAI,EAAE,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,8CAA8C,CAAC;YAC7E,OAAO,EAAE,eAAe;SACzB,CAAC,CAAA;QACF,IAAI,oBAAoB,CAAC,KAAK,EAAE,EAAE,CAAC;YACjC,MAAM,IAAI,KAAK,CAAC,wCAAwC,oBAAoB,CAAC,SAAS,EAAE,CAAC,OAAO,EAAE,CAAC,CAAA;QACrG,CAAC;QACD,GAAG,CAAC,KAAK,CAAC,oCAAoC,EAAE,oBAAoB,CAAC,MAAM,EAAE,CAAC,CAAA;QAE9E,8DAA8D;QAC9D,MAAM,wBAAwB,GAAG,MAAM,SAAS,CAAC,oBAAoB,CAAC;YACpE,IAAI,EAAE,IAAI,CAAC,OAAO,CAAC,SAAS,EAAE,kDAAkD,CAAC;YACjF,OAAO,EAAE,eAAe;SACzB,CAAC,CAAA;QACF,IAAI,wBAAwB,CAAC,KAAK,EAAE,EAAE,CAAC;YACrC,MAAM,IAAI,KAAK,CAAC,4CAA4C,wBAAwB,CAAC,SAAS,EAAE,CAAC,OAAO,EAAE,CAAC,CAAA;QAC7G,CAAC;QACD,GAAG,CAAC,KAAK,CAAC,wCAAwC,EAAE,wBAAwB,CAAC,MAAM,EAAE,CAAC,CAAA;QAEtF,sBAAsB;QACtB,GAAG,CAAC,KAAK,CAAC,gCAAgC,EAAE,IAAA,8CAAoB,EAAC,OAAO,CAAC,GAAG,CAAC,CAAC,CAAA;QAC9E,GAAG,CAAC,KAAK,CAAC,oBAAoB,EAAE,IAAA,sCAAgB,EAAC,OAAO,CAAC,CAAC,CAAA;QAE1D,OAAO,CAAC,GAAG,CAAC,aAAa,CAAC,EAAE,GAAG,EAAE,UAAU,EAAE,GAAG,EAAE,CAAC,CAAA;QAEnD,OAAO,OAAO,CAAA;IAChB,CAAC;CACF,CAAA"}
|
|
@@ -0,0 +1,251 @@
|
|
|
1
|
+
# Prepare E2E Tests
|
|
2
|
+
|
|
3
|
+
You are tasked with analyzing code changes and preparing end-to-end (E2E) tests for the <% schema.slug %> project.
|
|
4
|
+
|
|
5
|
+
## Arguments
|
|
6
|
+
|
|
7
|
+
- `$ARGUMENTS` - Optional: specific features or areas to focus testing on. If not provided, analyze the git diff automatically.
|
|
8
|
+
|
|
9
|
+
## Overview
|
|
10
|
+
|
|
11
|
+
This command analyzes your recent code changes, determines what E2E test coverage is needed, writes the test specs following existing patterns, and runs them to verify they pass.
|
|
12
|
+
|
|
13
|
+
## Step 1: Analyze Changes
|
|
14
|
+
|
|
15
|
+
### 1.1 Get Changed Files
|
|
16
|
+
|
|
17
|
+
Determine what has changed by examining the git diff:
|
|
18
|
+
|
|
19
|
+
```bash
|
|
20
|
+
# If on a feature branch, diff against main
|
|
21
|
+
git diff main...HEAD --name-only
|
|
22
|
+
|
|
23
|
+
# Also check for any uncommitted changes
|
|
24
|
+
git diff --name-only
|
|
25
|
+
git diff --staged --name-only
|
|
26
|
+
```
|
|
27
|
+
|
|
28
|
+
If `$ARGUMENTS` was provided, focus analysis on the specified features or areas instead of the full diff.
|
|
29
|
+
|
|
30
|
+
### 1.2 Categorize Changes
|
|
31
|
+
|
|
32
|
+
Read each changed file and categorize it:
|
|
33
|
+
|
|
34
|
+
- **Frontend pages/routes**: Files in `<% frontendPath %>/src/routes/` โ need navigation tests, content verification, and visual snapshots
|
|
35
|
+
- **Frontend components**: Files in `<% frontendPath %>/src/components/` or `<% frontendPath %>/src/pages/` โ need interaction tests
|
|
36
|
+
- **Backend endpoints/routers**: Files in `<% backendPath %>/libs/router-trpc/` or `<% backendPath %>/libs/actions/` โ need frontend integration tests verifying data display
|
|
37
|
+
- **Backend views**: Files in `<% backendPath %>/libs/view/` โ need data display verification tests
|
|
38
|
+
- **Shared types**: Files in `<% backendPath %>/libs/types/` โ may affect existing tests
|
|
39
|
+
- **Schema changes**: `postxl-schema.json` โ affect models and may need comprehensive new tests
|
|
40
|
+
- **Forms**: Components containing form elements (TanStack Form, inputs, selects) โ need form interaction tests
|
|
41
|
+
- **Tables/DataGrids**: Components using DataGrid โ need data display, sorting, and filtering tests
|
|
42
|
+
|
|
43
|
+
### 1.3 Read Changed Files
|
|
44
|
+
|
|
45
|
+
For each changed file, read its content to understand:
|
|
46
|
+
|
|
47
|
+
- What routes/URLs are added or modified
|
|
48
|
+
- What data is displayed (model names, field names)
|
|
49
|
+
- What user interactions are possible (clicks, form fills, navigation)
|
|
50
|
+
- What test IDs are used (`data-test-id` attributes)
|
|
51
|
+
|
|
52
|
+
Also read these reference files for context:
|
|
53
|
+
|
|
54
|
+
```
|
|
55
|
+
e2e/support/model-test-ids.ts # Available test IDs
|
|
56
|
+
e2e/fixtures/test-fixtures.ts # Available test fixtures
|
|
57
|
+
e2e/support/wait-for-page-loaded.ts # Page loading utility
|
|
58
|
+
e2e/support/page-stability.ts # API stability utility
|
|
59
|
+
```
|
|
60
|
+
|
|
61
|
+
### 1.4 Check Existing Test Coverage
|
|
62
|
+
|
|
63
|
+
Read all existing spec files in `e2e/specs/` to understand what is already covered. Do not duplicate existing tests.
|
|
64
|
+
|
|
65
|
+
## Step 2: Determine Test Coverage
|
|
66
|
+
|
|
67
|
+
Based on the analysis, determine which tests to write:
|
|
68
|
+
|
|
69
|
+
### For New Pages/Routes
|
|
70
|
+
|
|
71
|
+
- **Navigation test**: Verify the page is accessible at its URL
|
|
72
|
+
- **Content verification test**: Verify key content elements are visible
|
|
73
|
+
- **Visual regression snapshot**: Capture a baseline screenshot with `toHaveScreenshot()`
|
|
74
|
+
|
|
75
|
+
### For New Forms
|
|
76
|
+
|
|
77
|
+
- **Form display test**: Verify form fields are rendered correctly
|
|
78
|
+
- **Form interaction test**: Fill fields, submit, and verify success feedback
|
|
79
|
+
- **Form validation test**: Submit with invalid data and verify error messages
|
|
80
|
+
|
|
81
|
+
### For New Tables/Data Views
|
|
82
|
+
|
|
83
|
+
- **Data display test**: Verify table renders with expected columns and data
|
|
84
|
+
- **Sorting test**: Click column headers and verify sort order (if sortable)
|
|
85
|
+
- **Empty state test**: Verify empty state message when no data exists
|
|
86
|
+
|
|
87
|
+
### For API/Backend Changes
|
|
88
|
+
|
|
89
|
+
- **Data integration test**: Verify the frontend correctly displays API data
|
|
90
|
+
- **Error state test**: Verify error handling when API returns errors (if applicable)
|
|
91
|
+
|
|
92
|
+
### For UI Component Changes
|
|
93
|
+
|
|
94
|
+
- **Visual regression snapshot**: Capture screenshots of affected pages
|
|
95
|
+
- **Interaction test**: Verify component interactions work correctly
|
|
96
|
+
|
|
97
|
+
## Step 3: Write Tests
|
|
98
|
+
|
|
99
|
+
### 3.1 Test File Structure
|
|
100
|
+
|
|
101
|
+
Create new test files in `e2e/specs/` following the naming convention `<feature>.spec.ts`.
|
|
102
|
+
|
|
103
|
+
**CRITICAL**: Always use the project's custom test fixtures, NOT bare Playwright imports:
|
|
104
|
+
|
|
105
|
+
```typescript
|
|
106
|
+
import { expect, test } from '../fixtures/test-fixtures'
|
|
107
|
+
```
|
|
108
|
+
|
|
109
|
+
### 3.2 Test Patterns to Follow
|
|
110
|
+
|
|
111
|
+
**Reference the existing examples:**
|
|
112
|
+
|
|
113
|
+
- `e2e/specs/example.spec.ts` โ basic navigation, visual snapshots, content verification
|
|
114
|
+
- `e2e/specs/example-backend-isolation.spec.ts` โ backend isolation pattern
|
|
115
|
+
|
|
116
|
+
**Navigation and Snapshot Test Pattern:**
|
|
117
|
+
|
|
118
|
+
```typescript
|
|
119
|
+
import { expect, test } from '../fixtures/test-fixtures'
|
|
120
|
+
|
|
121
|
+
test.describe('<Feature Name>', () => {
|
|
122
|
+
test('<Page name> page is accessible', async ({ page }) => {
|
|
123
|
+
await page.goto('/<route>')
|
|
124
|
+
await expect(page).toHaveURL(/.*\/<route>$/)
|
|
125
|
+
})
|
|
126
|
+
|
|
127
|
+
test('<Page name> page visual snapshot', async ({ page }) => {
|
|
128
|
+
await page.goto('/<route>')
|
|
129
|
+
await expect(page).toHaveScreenshot()
|
|
130
|
+
})
|
|
131
|
+
})
|
|
132
|
+
```
|
|
133
|
+
|
|
134
|
+
**Content Verification Pattern:**
|
|
135
|
+
|
|
136
|
+
```typescript
|
|
137
|
+
test('<Page> displays expected content', async ({ page }) => {
|
|
138
|
+
await page.goto('/<route>')
|
|
139
|
+
await expect(page.getByText('Expected Heading')).toBeVisible()
|
|
140
|
+
await expect(page.getByTestId('some-test-id')).toBeVisible()
|
|
141
|
+
})
|
|
142
|
+
```
|
|
143
|
+
|
|
144
|
+
**Form Interaction Pattern:**
|
|
145
|
+
|
|
146
|
+
```typescript
|
|
147
|
+
test.describe('<Form Name> Form', () => {
|
|
148
|
+
test('can fill and submit form', async ({ page }) => {
|
|
149
|
+
await page.goto('/<form-route>')
|
|
150
|
+
|
|
151
|
+
// Fill form fields
|
|
152
|
+
await page.getByLabel('Field Name').fill('Test Value')
|
|
153
|
+
await page.getByRole('combobox').click()
|
|
154
|
+
await page.getByRole('option', { name: 'Option' }).click()
|
|
155
|
+
|
|
156
|
+
// Submit
|
|
157
|
+
await page.getByRole('button', { name: 'Submit' }).click()
|
|
158
|
+
|
|
159
|
+
// Verify success
|
|
160
|
+
await expect(page.getByText('Success')).toBeVisible()
|
|
161
|
+
})
|
|
162
|
+
})
|
|
163
|
+
```
|
|
164
|
+
|
|
165
|
+
**Table/DataGrid Verification Pattern:**
|
|
166
|
+
|
|
167
|
+
```typescript
|
|
168
|
+
test.describe('<Model> Table', () => {
|
|
169
|
+
test('displays data in table', async ({ page }) => {
|
|
170
|
+
await page.goto('/<admin-route>')
|
|
171
|
+
|
|
172
|
+
// Verify table headers
|
|
173
|
+
await expect(page.getByText('Column Header')).toBeVisible()
|
|
174
|
+
|
|
175
|
+
// Verify data is loaded
|
|
176
|
+
await expect(page.locator('table tbody tr').first()).toBeVisible()
|
|
177
|
+
})
|
|
178
|
+
})
|
|
179
|
+
```
|
|
180
|
+
|
|
181
|
+
### 3.3 Available Utilities
|
|
182
|
+
|
|
183
|
+
Use these utilities from the `support/` and `fixtures/` directories:
|
|
184
|
+
|
|
185
|
+
- `waitForPageLoaded(page)` from `support/wait-for-page-loaded` โ wait for DOM + images
|
|
186
|
+
- `waitForAppIdle(page)` from `support/page-stability` โ wait for API requests to complete
|
|
187
|
+
- `resetData()` from `support/reset-data` โ reset backend data (stateful mode only)
|
|
188
|
+
- `setData(data)` from `support/set-data` โ set specific test data (stateful mode only)
|
|
189
|
+
- `maskToast(page)` from `support/mask-toast` โ hide toast notifications for screenshots
|
|
190
|
+
- `clickCheckboxByName(page, name)` from `support/checkbox-click` โ click checkboxes reliably
|
|
191
|
+
- `MODEL_TEST_IDS` from `support/model-test-ids` โ generated test IDs for models
|
|
192
|
+
- `DataMocker` from `@mock-data/dataMocker.class` โ create mock data
|
|
193
|
+
|
|
194
|
+
### 3.4 Best Practices
|
|
195
|
+
|
|
196
|
+
1. **Always use custom test fixtures**: `import { expect, test } from '../fixtures/test-fixtures'`
|
|
197
|
+
2. **Use descriptive test names**: Explain what the test verifies, not how
|
|
198
|
+
3. **Use `data-test-id` attributes**: Prefer `getByTestId()` for stable selectors
|
|
199
|
+
4. **Use role-based selectors**: `getByRole('button', { name: '...' })` for accessibility
|
|
200
|
+
5. **Avoid `waitForTimeout()`**: Use condition-based waits instead
|
|
201
|
+
6. **Group related tests**: Use `test.describe()` blocks
|
|
202
|
+
7. **Mask toasts for screenshots**: Use `maskToast(page)` before `toHaveScreenshot()` if toasts interfere
|
|
203
|
+
8. **Do not modify existing test files** unless the changes specifically affect those tests
|
|
204
|
+
|
|
205
|
+
## Step 4: Run and Verify
|
|
206
|
+
|
|
207
|
+
### 4.1 Run the Tests
|
|
208
|
+
|
|
209
|
+
Execute the E2E tests using the automated script:
|
|
210
|
+
|
|
211
|
+
```bash
|
|
212
|
+
<% cdProjectDir %>./scripts/e2e.sh
|
|
213
|
+
```
|
|
214
|
+
|
|
215
|
+
### 4.2 Handle First-Run Snapshot Baselines
|
|
216
|
+
|
|
217
|
+
New tests with `toHaveScreenshot()` will fail on the first run because no baseline snapshot exists yet. This is expected. Re-run the tests to create the baseline and confirm the snapshot is correct.
|
|
218
|
+
|
|
219
|
+
### 4.3 Investigate Failures
|
|
220
|
+
|
|
221
|
+
If tests fail for reasons other than missing baselines, follow the investigation workflow from the `run-e2e-tests` command:
|
|
222
|
+
|
|
223
|
+
1. **Parse test output** to identify which tests failed
|
|
224
|
+
2. **Read the last run status**: `cat e2e/test-results/.last-run.json`
|
|
225
|
+
3. **Read monocart report**: `cat e2e/monocart-report/index.json`
|
|
226
|
+
4. **Analyze failure type**: snapshot mismatch, element not found, timeout, assertion failure
|
|
227
|
+
5. **Read screenshots** from `e2e/test-results/` for visual context
|
|
228
|
+
6. **Fix and re-run** until all tests pass
|
|
229
|
+
|
|
230
|
+
### 4.4 Common Fixes
|
|
231
|
+
|
|
232
|
+
- **Element not found**: Add `waitForPageLoaded()` or check the selector matches the DOM
|
|
233
|
+
- **Timeout**: Add proper waits for API responses using `waitForAppIdle()`
|
|
234
|
+
- **Toast interfering with screenshot**: Add `maskToast(page)` before the screenshot assertion
|
|
235
|
+
- **Animation causing flaky snapshots**: The fixture auto-disables them, but custom CSS animations may need attention
|
|
236
|
+
|
|
237
|
+
## Step 5: Report Summary
|
|
238
|
+
|
|
239
|
+
After completing the process, provide:
|
|
240
|
+
|
|
241
|
+
- **Changes analyzed**: List of files categorized by type
|
|
242
|
+
- **Tests created**: List of new spec files with test names
|
|
243
|
+
- **Test results**: Pass/fail status
|
|
244
|
+
- **Coverage gaps**: Any areas that could not be automatically tested (e.g., require auth, complex multi-step workflows)
|
|
245
|
+
|
|
246
|
+
## Important Notes
|
|
247
|
+
|
|
248
|
+
- **Tests run in Docker** for consistent snapshots โ never update snapshots outside Docker
|
|
249
|
+
- **Use stateless mode** as default for E2E tests
|
|
250
|
+
- **Follow existing patterns**: Match the coding style and structure of `example.spec.ts`
|
|
251
|
+
- **CRITICAL**: Always run tests in a NEW terminal separate from backend and frontend
|
|
@@ -0,0 +1,221 @@
|
|
|
1
|
+
# Run E2E Tests
|
|
2
|
+
|
|
3
|
+
You are tasked with helping the user run end-to-end (E2E) tests in the Claude Code environment (VS Code with Claude assistance).
|
|
4
|
+
|
|
5
|
+
## Overview
|
|
6
|
+
|
|
7
|
+
The E2E tests use Playwright to verify that the application works as expected from the user's perspective. This command works for the <% schema.slug %> project.
|
|
8
|
+
|
|
9
|
+
**Tests always run inside a Docker container** to ensure snapshot consistency across different developer machines and CI. Docker guarantees identical rendering (fonts, DPI, browser version) so snapshot comparisons are deterministic regardless of the host OS.
|
|
10
|
+
|
|
11
|
+
> **โ ๏ธ TERMINAL MANAGEMENT IN CLAUDE CODE**: When running commands through Claude, terminal reuse can cause background services to be interrupted. To avoid this, manually open separate terminal windows using the VS Code Terminal menu before starting each service (Backend, Frontend, Tests). This ensures each service runs in its own isolated terminal.
|
|
12
|
+
|
|
13
|
+
## E2E Test Modes
|
|
14
|
+
|
|
15
|
+
The project supports two types of E2E tests:
|
|
16
|
+
|
|
17
|
+
- **Stateful**: Tests that can change data. Each test runs a data reset before execution. Slower, uses limited seed data.
|
|
18
|
+
- **Stateless**: Tests that don't alter server data, only test views. Faster, can use larger/more complex seed data.
|
|
19
|
+
|
|
20
|
+
**For Claude Code environment, use stateless mode** as the default.
|
|
21
|
+
|
|
22
|
+
## Prerequisites
|
|
23
|
+
|
|
24
|
+
Before running E2E tests, ensure:
|
|
25
|
+
|
|
26
|
+
1. **Docker is running** on the host machine
|
|
27
|
+
2. **Backend is running** in E2E stateless mode (or let the script start it)
|
|
28
|
+
3. **Frontend is built and running** in E2E mode with Docker URLs (or let the script build it)
|
|
29
|
+
|
|
30
|
+
## Running E2E Tests
|
|
31
|
+
|
|
32
|
+
### Option 1: Automated Script (Recommended)
|
|
33
|
+
|
|
34
|
+
The easiest way to run E2E tests is using the automated script:
|
|
35
|
+
|
|
36
|
+
```bash
|
|
37
|
+
<% cdProjectDir %>./scripts/e2e.sh
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
**What the script does:**
|
|
41
|
+
|
|
42
|
+
1. โ
Auto-detects pnpm and activates Node.js 24+ (via nvm/fnm/volta)
|
|
43
|
+
2. โ
Checks if backend/frontend are already running (reuses them if available)
|
|
44
|
+
3. ๐ Starts backend in E2E stateless mode (if needed)
|
|
45
|
+
4. ๐๏ธ Builds frontend with Docker URLs and starts preview server (if needed)
|
|
46
|
+
5. ๐งช Runs E2E tests inside a Docker container
|
|
47
|
+
6. ๐งน Cleans up only script-started processes on exit (Ctrl+C safe)
|
|
48
|
+
|
|
49
|
+
**Benefits:**
|
|
50
|
+
|
|
51
|
+
- Single command to run everything
|
|
52
|
+
- Automatic service detection and reuse
|
|
53
|
+
- Only cleans up processes it started (your manually-started services are preserved)
|
|
54
|
+
- Detailed logs saved to `.e2e-backend.log` and `.e2e-frontend.log`
|
|
55
|
+
- Works great with Claude Code
|
|
56
|
+
|
|
57
|
+
```bash
|
|
58
|
+
# Run E2E tests (Docker mode)
|
|
59
|
+
<% cdProjectDir %>./scripts/e2e.sh
|
|
60
|
+
|
|
61
|
+
# Just build the Docker image
|
|
62
|
+
<% cdProjectDir %>./scripts/e2e.sh --build-image
|
|
63
|
+
```
|
|
64
|
+
|
|
65
|
+
### Option 2: Manual Step-by-Step Process
|
|
66
|
+
|
|
67
|
+
If you prefer to start services manually before running the script:
|
|
68
|
+
|
|
69
|
+
#### 1. Start Backend in E2E Stateless Mode
|
|
70
|
+
|
|
71
|
+
**Open a new terminal** and run:
|
|
72
|
+
|
|
73
|
+
```bash
|
|
74
|
+
cd <% backendPath %>
|
|
75
|
+
pnpm e2e:stateless
|
|
76
|
+
```
|
|
77
|
+
|
|
78
|
+
**Note**: Keep this terminal running.
|
|
79
|
+
|
|
80
|
+
#### 2. Build and Start Frontend in E2E Mode
|
|
81
|
+
|
|
82
|
+
**Open a new terminal** and run:
|
|
83
|
+
|
|
84
|
+
```bash
|
|
85
|
+
cd <% frontendPath %>
|
|
86
|
+
pnpm e2e:build
|
|
87
|
+
```
|
|
88
|
+
|
|
89
|
+
**Note**: This builds the production version with `host.docker.internal` URLs required for Docker-based testing. Keep this terminal running.
|
|
90
|
+
|
|
91
|
+
**Important**: Do NOT use `pnpm dev` for E2E tests โ the Vite dev server uses `localhost` URLs which are not reachable from inside the Docker container.
|
|
92
|
+
|
|
93
|
+
#### 3. Run E2E Tests
|
|
94
|
+
|
|
95
|
+
**Open a new terminal** and run:
|
|
96
|
+
|
|
97
|
+
```bash
|
|
98
|
+
<% cdProjectDir %>./scripts/e2e.sh
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
The script will detect the running backend and frontend, skip starting them, and run the Docker test container directly.
|
|
102
|
+
|
|
103
|
+
## Test Results and Reports
|
|
104
|
+
|
|
105
|
+
### View Test Results
|
|
106
|
+
|
|
107
|
+
- Detailed test results are saved in the `test-results/` directory
|
|
108
|
+
- HTML report available in `playwright-report/`
|
|
109
|
+
- Test coverage data in `monocart-report/`
|
|
110
|
+
|
|
111
|
+
## Investigating Test Failures
|
|
112
|
+
|
|
113
|
+
When tests fail, follow this investigation workflow **automatically** before asking the user for help.
|
|
114
|
+
|
|
115
|
+
### Step 1: Identify Failed Tests
|
|
116
|
+
|
|
117
|
+
1. Parse the test runner output to identify which tests failed and their error messages
|
|
118
|
+
2. Read the last run status file for a quick summary:
|
|
119
|
+
```bash
|
|
120
|
+
cat e2e/test-results/.last-run.json
|
|
121
|
+
```
|
|
122
|
+
3. Read the monocart report for detailed structured results (flaky detection, retry info):
|
|
123
|
+
```bash
|
|
124
|
+
cat e2e/monocart-report/index.json
|
|
125
|
+
```
|
|
126
|
+
|
|
127
|
+
### Step 2: Analyze Failure Type
|
|
128
|
+
|
|
129
|
+
Determine the failure category:
|
|
130
|
+
|
|
131
|
+
- **Snapshot mismatch**: Expected vs actual screenshot differs. Look for `toHaveScreenshot` errors and compare images in `e2e/test-results/` and `e2e/specs/*-snapshots/`
|
|
132
|
+
- **Element not found / Timeout**: A locator couldn't find an element or an action timed out. Usually indicates a selector change, missing wait, or race condition
|
|
133
|
+
- **Assertion failure**: A `toEqual`, `toContain`, `toBeVisible` etc. check failed. Check if the expected value or app behavior changed
|
|
134
|
+
- **Network/Connection error**: Backend or frontend unreachable. See Troubleshooting section below
|
|
135
|
+
- **Flaky test**: Test passed on retry (monocart marks these as `caseType: "flaky"`). Needs stabilization
|
|
136
|
+
|
|
137
|
+
### Step 3: Read Source Code and Artifacts
|
|
138
|
+
|
|
139
|
+
1. **Read the failing spec file** to understand what the test does:
|
|
140
|
+
```
|
|
141
|
+
e2e/specs/<test-file>.spec.ts
|
|
142
|
+
```
|
|
143
|
+
2. **Check screenshots** for visual context โ read any `.png` files in `e2e/test-results/` to see what the page actually looked like
|
|
144
|
+
3. **Check trace files** if available (captured on first retry): `e2e/test-results/*/trace.zip`
|
|
145
|
+
4. **Read related page/component code** if the test interacts with specific frontend components or backend endpoints
|
|
146
|
+
|
|
147
|
+
### Step 4: Fix and Re-run
|
|
148
|
+
|
|
149
|
+
Based on the failure type:
|
|
150
|
+
|
|
151
|
+
**For snapshot mismatches:**
|
|
152
|
+
- If the visual change is intentional (e.g., you modified a component), update the snapshot:
|
|
153
|
+
```bash
|
|
154
|
+
<% cdProjectDir %>./scripts/e2e.sh # re-run โ snapshots update inside Docker for consistency
|
|
155
|
+
```
|
|
156
|
+
- If unintentional, investigate what caused the rendering difference
|
|
157
|
+
|
|
158
|
+
**For flaky tests (timing/race conditions):**
|
|
159
|
+
- Add explicit waits using the project's `waitForPageLoaded()` utility or `page.waitForSelector()`
|
|
160
|
+
- Replace `page.waitForTimeout()` with condition-based waits (e.g., `expect(locator).toBeVisible()`)
|
|
161
|
+
- Use the `pageStability` fixture to wait for API requests to complete
|
|
162
|
+
- Check if animations or transitions need to be disabled (the fixture auto-disables them, but custom CSS animations may need attention)
|
|
163
|
+
|
|
164
|
+
**For element not found errors:**
|
|
165
|
+
- Verify the selector matches the current DOM structure
|
|
166
|
+
- Check if a data-testid was renamed or removed
|
|
167
|
+
- Add a `waitForPageLoaded()` call before interacting with the element
|
|
168
|
+
|
|
169
|
+
**For assertion failures:**
|
|
170
|
+
- Check if the expected values need updating due to intentional code changes
|
|
171
|
+
- Verify test data setup (seed data, mocked data) is correct
|
|
172
|
+
|
|
173
|
+
### Step 5: Verify the Fix
|
|
174
|
+
|
|
175
|
+
After applying fixes, re-run the tests:
|
|
176
|
+
```bash
|
|
177
|
+
<% cdProjectDir %>./scripts/e2e.sh
|
|
178
|
+
```
|
|
179
|
+
|
|
180
|
+
If a test was flaky, consider running it multiple times to confirm stability. You can run a single test file:
|
|
181
|
+
```bash
|
|
182
|
+
# Inside the Docker container or via the script, target specific tests
|
|
183
|
+
<% cdProjectDir %>./scripts/e2e.sh # full suite to confirm no regressions
|
|
184
|
+
```
|
|
185
|
+
|
|
186
|
+
## Troubleshooting
|
|
187
|
+
|
|
188
|
+
### Connection Refused Errors
|
|
189
|
+
|
|
190
|
+
If tests fail with connection errors like "connect ECONNREFUSED":
|
|
191
|
+
|
|
192
|
+
1. **Verify Docker is running**: The test container needs Docker to be available
|
|
193
|
+
2. **Verify backend is running**: Check that the backend is listening on port 4000 and E2E helper on port 3001
|
|
194
|
+
3. **Verify frontend is running**: Check that the frontend preview server is on port 3000
|
|
195
|
+
4. **Verify frontend was built with Docker URLs**: The frontend must be built with `VITE_PUBLIC_API_URL=http://host.docker.internal:3001`. If you started it with `pnpm dev`, restart with `pnpm e2e:build`
|
|
196
|
+
|
|
197
|
+
### Greeting Test Shows "Hi !" (Empty User)
|
|
198
|
+
|
|
199
|
+
This means the frontend cannot reach the backend API from inside the Docker container. The frontend was likely built with `localhost` URLs instead of `host.docker.internal`. Fix by:
|
|
200
|
+
|
|
201
|
+
1. Stopping the frontend
|
|
202
|
+
2. Rebuilding with `pnpm e2e:build` (uses Docker URLs)
|
|
203
|
+
3. Or let `./scripts/e2e.sh` handle everything automatically
|
|
204
|
+
|
|
205
|
+
### Database Connection Issues
|
|
206
|
+
|
|
207
|
+
If tests fail with database authentication errors:
|
|
208
|
+
|
|
209
|
+
1. Ensure your local database is running
|
|
210
|
+
2. Check database credentials in the backend configuration
|
|
211
|
+
3. Verify the database exists and migrations have been run
|
|
212
|
+
|
|
213
|
+
## Important Notes
|
|
214
|
+
|
|
215
|
+
- **Tests always run in Docker** for consistent snapshots and CI parity
|
|
216
|
+
- **The script preserves your services**: Cleanup only kills processes the script started
|
|
217
|
+
- **Frontend must use Docker URLs**: Built with `host.docker.internal` (via `pnpm e2e:build` or the script)
|
|
218
|
+
- **Keep backend and frontend running** while executing tests
|
|
219
|
+
- **Use stateless mode** as default for better performance
|
|
220
|
+
- **Test results** are saved in `test-results/` directory
|
|
221
|
+
- **CRITICAL**: Always run tests in a NEW terminal separate from backend and frontend terminals to avoid interrupting the servers
|
|
@@ -0,0 +1,398 @@
|
|
|
1
|
+
#!/bin/bash
|
|
2
|
+
|
|
3
|
+
# E2E Test Runner Script
|
|
4
|
+
# This script starts backend, frontend, and runs E2E tests in Docker automatically.
|
|
5
|
+
#
|
|
6
|
+
# The script auto-detects pnpm and Node.js 24+ regardless of how it's invoked
|
|
7
|
+
# (interactive shell, non-interactive subprocess, CI, Claude Code, etc.)
|
|
8
|
+
|
|
9
|
+
set -e # Exit on error
|
|
10
|
+
|
|
11
|
+
# Colors for output
|
|
12
|
+
RED='\033[0;31m'
|
|
13
|
+
GREEN='\033[0;32m'
|
|
14
|
+
YELLOW='\033[1;33m'
|
|
15
|
+
BLUE='\033[0;34m'
|
|
16
|
+
NC='\033[0m' # No Color
|
|
17
|
+
|
|
18
|
+
# Get the directory where this script is located
|
|
19
|
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
20
|
+
PROJECT_DIR="$(dirname "$SCRIPT_DIR")"
|
|
21
|
+
MONOREPO_ROOT=<% monorepoRootExpr %>
|
|
22
|
+
|
|
23
|
+
# Ports
|
|
24
|
+
BACKEND_PORT=3001
|
|
25
|
+
FRONTEND_PORT=3000
|
|
26
|
+
BACKEND_MAIN_PORT=4000
|
|
27
|
+
|
|
28
|
+
# PID file paths
|
|
29
|
+
BACKEND_PID_FILE="$PROJECT_DIR/.e2e-backend.pid"
|
|
30
|
+
FRONTEND_PID_FILE="$PROJECT_DIR/.e2e-frontend.pid"
|
|
31
|
+
|
|
32
|
+
# Docker image name
|
|
33
|
+
DOCKER_IMAGE="e2e-pxl-env"
|
|
34
|
+
|
|
35
|
+
# --------------------------------------------------------------------------
|
|
36
|
+
# Environment setup: resolve pnpm and Node.js 24+
|
|
37
|
+
# --------------------------------------------------------------------------
|
|
38
|
+
# This section ensures pnpm and the correct Node.js version are available
|
|
39
|
+
# even in non-interactive shells (e.g., Claude Code, CI, cron, subprocesses).
|
|
40
|
+
|
|
41
|
+
setup_environment() {
|
|
42
|
+
# Common paths where pnpm might be installed
|
|
43
|
+
local search_paths=(
|
|
44
|
+
"$HOME/.local/share/pnpm"
|
|
45
|
+
"$HOME/.pnpm"
|
|
46
|
+
"/opt/homebrew/bin"
|
|
47
|
+
"/usr/local/bin"
|
|
48
|
+
"$HOME/.corepack/bin"
|
|
49
|
+
)
|
|
50
|
+
|
|
51
|
+
# Add search paths to PATH if they exist and aren't already in PATH
|
|
52
|
+
for p in "${search_paths[@]}"; do
|
|
53
|
+
if [[ -d "$p" ]] && [[ ":$PATH:" != *":$p:"* ]]; then
|
|
54
|
+
export PATH="$p:$PATH"
|
|
55
|
+
fi
|
|
56
|
+
done
|
|
57
|
+
|
|
58
|
+
# Resolve pnpm
|
|
59
|
+
PNPM_BIN=$(command -v pnpm 2>/dev/null || true)
|
|
60
|
+
if [[ -z "$PNPM_BIN" ]]; then
|
|
61
|
+
echo -e "${RED}โ pnpm not found. Please install pnpm (https://pnpm.io/installation)${NC}"
|
|
62
|
+
exit 1
|
|
63
|
+
fi
|
|
64
|
+
|
|
65
|
+
# Check current Node.js version - we need 24+ for ESM require() support
|
|
66
|
+
local node_version
|
|
67
|
+
node_version=$(node --version 2>/dev/null | sed 's/^v//' | cut -d. -f1)
|
|
68
|
+
|
|
69
|
+
if [[ -n "$node_version" ]] && [[ "$node_version" -ge 24 ]]; then
|
|
70
|
+
echo -e "${GREEN}โ
Node.js $(node --version) detected${NC}"
|
|
71
|
+
return 0
|
|
72
|
+
fi
|
|
73
|
+
|
|
74
|
+
# Try to activate Node.js 24+ via nvm
|
|
75
|
+
if [[ -s "$HOME/.nvm/nvm.sh" ]]; then
|
|
76
|
+
export NVM_DIR="$HOME/.nvm"
|
|
77
|
+
# shellcheck source=/dev/null
|
|
78
|
+
source "$NVM_DIR/nvm.sh" 2>/dev/null
|
|
79
|
+
|
|
80
|
+
# Find a Node.js 24+ version installed via nvm
|
|
81
|
+
local nvm_node
|
|
82
|
+
nvm_node=$(nvm ls --no-alias --no-colors 2>/dev/null | grep -oE 'v2[4-9]\.[0-9]+\.[0-9]+|v[3-9][0-9]\.[0-9]+\.[0-9]+' | sort -V | tail -1 || true)
|
|
83
|
+
|
|
84
|
+
if [[ -n "$nvm_node" ]]; then
|
|
85
|
+
echo -e "${BLUE}๐ Activating $nvm_node via nvm...${NC}"
|
|
86
|
+
nvm use "$nvm_node" >/dev/null 2>&1
|
|
87
|
+
# Ensure nvm's bin dir is first in PATH so pnpm picks it up
|
|
88
|
+
export PATH="$NVM_BIN:$PATH"
|
|
89
|
+
echo -e "${GREEN}โ
Node.js $(node --version) activated via nvm${NC}"
|
|
90
|
+
return 0
|
|
91
|
+
fi
|
|
92
|
+
fi
|
|
93
|
+
|
|
94
|
+
# Try to activate Node.js 24+ via fnm
|
|
95
|
+
if command -v fnm >/dev/null 2>&1; then
|
|
96
|
+
local fnm_node
|
|
97
|
+
fnm_node=$(fnm list 2>/dev/null | grep -oE 'v2[4-9]\.[0-9]+\.[0-9]+|v[3-9][0-9]\.[0-9]+\.[0-9]+' | sort -V | tail -1 || true)
|
|
98
|
+
|
|
99
|
+
if [[ -n "$fnm_node" ]]; then
|
|
100
|
+
echo -e "${BLUE}๐ Activating $fnm_node via fnm...${NC}"
|
|
101
|
+
eval "$(fnm env)" 2>/dev/null
|
|
102
|
+
fnm use "$fnm_node" >/dev/null 2>&1
|
|
103
|
+
echo -e "${GREEN}โ
Node.js $(node --version) activated via fnm${NC}"
|
|
104
|
+
return 0
|
|
105
|
+
fi
|
|
106
|
+
fi
|
|
107
|
+
|
|
108
|
+
# Try to activate Node.js 24+ via volta
|
|
109
|
+
if command -v volta >/dev/null 2>&1; then
|
|
110
|
+
echo -e "${BLUE}๐ Using volta for Node.js management...${NC}"
|
|
111
|
+
# volta manages node automatically based on package.json
|
|
112
|
+
local volta_node
|
|
113
|
+
volta_node=$(node --version 2>/dev/null | sed 's/^v//' | cut -d. -f1)
|
|
114
|
+
if [[ "$volta_node" -ge 24 ]]; then
|
|
115
|
+
echo -e "${GREEN}โ
Node.js $(node --version) via volta${NC}"
|
|
116
|
+
return 0
|
|
117
|
+
fi
|
|
118
|
+
fi
|
|
119
|
+
|
|
120
|
+
# Node.js 24+ not found - warn but don't fail (pnpm devEngines might handle it)
|
|
121
|
+
echo -e "${YELLOW}โ ๏ธ Node.js 24+ not found (current: $(node --version 2>/dev/null || echo 'none')).${NC}"
|
|
122
|
+
echo -e "${YELLOW} The project requires Node.js 24+ for ESM support.${NC}"
|
|
123
|
+
echo -e "${YELLOW} Install via: nvm install 24 / fnm install 24 / volta install node@24${NC}"
|
|
124
|
+
echo -e "${YELLOW} Continuing anyway (pnpm devEngines.runtime may auto-download it)...${NC}"
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
# Kill a process and all its descendants (children, grandchildren, etc.)
|
|
128
|
+
kill_tree() {
|
|
129
|
+
local pid=$1
|
|
130
|
+
local children
|
|
131
|
+
children=$(pgrep -P "$pid" 2>/dev/null || true)
|
|
132
|
+
for child in $children; do
|
|
133
|
+
kill_tree "$child"
|
|
134
|
+
done
|
|
135
|
+
kill "$pid" 2>/dev/null || true
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
# Kill any remaining process listening on a port
|
|
139
|
+
kill_port() {
|
|
140
|
+
local port=$1
|
|
141
|
+
local pids
|
|
142
|
+
pids=$(lsof -Pi :"$port" -sTCP:LISTEN -t 2>/dev/null || true)
|
|
143
|
+
if [[ -n "$pids" ]]; then
|
|
144
|
+
echo -e "${BLUE} Killing remaining process(es) on port $port...${NC}"
|
|
145
|
+
echo "$pids" | xargs kill 2>/dev/null || true
|
|
146
|
+
fi
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
# Cleanup function
|
|
150
|
+
cleanup() {
|
|
151
|
+
echo -e "\n${YELLOW}๐งน Cleaning up...${NC}"
|
|
152
|
+
|
|
153
|
+
# Kill backend if we started it
|
|
154
|
+
if [[ -f "$BACKEND_PID_FILE" ]]; then
|
|
155
|
+
BACKEND_PID=$(cat "$BACKEND_PID_FILE")
|
|
156
|
+
if ps -p "$BACKEND_PID" > /dev/null 2>&1; then
|
|
157
|
+
echo -e "${BLUE}โน๏ธ Stopping backend (PID: $BACKEND_PID)...${NC}"
|
|
158
|
+
kill_tree "$BACKEND_PID"
|
|
159
|
+
fi
|
|
160
|
+
rm -f "$BACKEND_PID_FILE"
|
|
161
|
+
# Ensure ports are fully released
|
|
162
|
+
sleep 0.5
|
|
163
|
+
kill_port $BACKEND_PORT
|
|
164
|
+
kill_port $BACKEND_MAIN_PORT
|
|
165
|
+
fi
|
|
166
|
+
|
|
167
|
+
# Kill frontend if we started it
|
|
168
|
+
if [[ -f "$FRONTEND_PID_FILE" ]]; then
|
|
169
|
+
FRONTEND_PID=$(cat "$FRONTEND_PID_FILE")
|
|
170
|
+
if ps -p "$FRONTEND_PID" > /dev/null 2>&1; then
|
|
171
|
+
echo -e "${BLUE}โน๏ธ Stopping frontend (PID: $FRONTEND_PID)...${NC}"
|
|
172
|
+
kill_tree "$FRONTEND_PID"
|
|
173
|
+
fi
|
|
174
|
+
rm -f "$FRONTEND_PID_FILE"
|
|
175
|
+
# Ensure port is fully released
|
|
176
|
+
sleep 0.5
|
|
177
|
+
kill_port $FRONTEND_PORT
|
|
178
|
+
fi
|
|
179
|
+
|
|
180
|
+
echo -e "${GREEN}โ
Cleanup complete${NC}"
|
|
181
|
+
}
|
|
182
|
+
|
|
183
|
+
# Register cleanup on script exit
|
|
184
|
+
trap cleanup EXIT INT TERM
|
|
185
|
+
|
|
186
|
+
# Check if services are already running
|
|
187
|
+
check_services() {
|
|
188
|
+
echo -e "${BLUE}๐ Checking for existing services...${NC}"
|
|
189
|
+
|
|
190
|
+
BACKEND_RUNNING=false
|
|
191
|
+
FRONTEND_RUNNING=false
|
|
192
|
+
|
|
193
|
+
if lsof -Pi :$BACKEND_PORT -sTCP:LISTEN -t >/dev/null 2>&1; then
|
|
194
|
+
echo -e "${YELLOW}โ ๏ธ Backend already running on port $BACKEND_PORT${NC}"
|
|
195
|
+
BACKEND_RUNNING=true
|
|
196
|
+
fi
|
|
197
|
+
|
|
198
|
+
if lsof -Pi :$FRONTEND_PORT -sTCP:LISTEN -t >/dev/null 2>&1; then
|
|
199
|
+
echo -e "${YELLOW}โ ๏ธ Frontend already running on port $FRONTEND_PORT${NC}"
|
|
200
|
+
FRONTEND_RUNNING=true
|
|
201
|
+
fi
|
|
202
|
+
}
|
|
203
|
+
|
|
204
|
+
# Wait for a service to be ready
|
|
205
|
+
wait_for_service() {
|
|
206
|
+
local port=$1
|
|
207
|
+
local service_name=$2
|
|
208
|
+
local max_attempts=60 # 60 seconds timeout
|
|
209
|
+
local attempt=0
|
|
210
|
+
|
|
211
|
+
echo -e "${BLUE}โณ Waiting for $service_name to be ready on port $port...${NC}"
|
|
212
|
+
|
|
213
|
+
while ! lsof -Pi :$port -sTCP:LISTEN -t >/dev/null 2>&1; do
|
|
214
|
+
attempt=$((attempt + 1))
|
|
215
|
+
if [[ $attempt -gt $max_attempts ]]; then
|
|
216
|
+
echo -e "${RED}โ Timeout waiting for $service_name to start${NC}"
|
|
217
|
+
return 1
|
|
218
|
+
fi
|
|
219
|
+
sleep 1
|
|
220
|
+
done
|
|
221
|
+
|
|
222
|
+
echo -e "${GREEN}โ
$service_name is ready${NC}"
|
|
223
|
+
return 0
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
# Start backend
|
|
227
|
+
start_backend() {
|
|
228
|
+
if [[ "$BACKEND_RUNNING" == "true" ]]; then
|
|
229
|
+
echo -e "${GREEN}โ
Using existing backend on port $BACKEND_PORT${NC}"
|
|
230
|
+
return 0
|
|
231
|
+
fi
|
|
232
|
+
|
|
233
|
+
echo -e "${BLUE}๐ Starting backend in E2E stateless mode...${NC}"
|
|
234
|
+
cd "$PROJECT_DIR/backend"
|
|
235
|
+
|
|
236
|
+
# Start backend in background and save PID
|
|
237
|
+
"$PNPM_BIN" e2e:stateless > "$PROJECT_DIR/.e2e-backend.log" 2>&1 &
|
|
238
|
+
echo $! > "$BACKEND_PID_FILE"
|
|
239
|
+
|
|
240
|
+
# Wait for backend to be ready
|
|
241
|
+
if ! wait_for_service $BACKEND_PORT "Backend"; then
|
|
242
|
+
echo -e "${RED}โ Failed to start backend${NC}"
|
|
243
|
+
echo -e "${YELLOW}๐ Backend log (last 20 lines):${NC}"
|
|
244
|
+
tail -n 20 "$PROJECT_DIR/.e2e-backend.log"
|
|
245
|
+
return 1
|
|
246
|
+
fi
|
|
247
|
+
}
|
|
248
|
+
|
|
249
|
+
# Build and start frontend
|
|
250
|
+
start_frontend() {
|
|
251
|
+
if [[ "$FRONTEND_RUNNING" == "true" ]]; then
|
|
252
|
+
echo -e "${GREEN}โ
Using existing frontend on port $FRONTEND_PORT${NC}"
|
|
253
|
+
return 0
|
|
254
|
+
fi
|
|
255
|
+
|
|
256
|
+
echo -e "${BLUE}๐๏ธ Building frontend for E2E...${NC}"
|
|
257
|
+
cd "$PROJECT_DIR/frontend"
|
|
258
|
+
|
|
259
|
+
# Build frontend with Docker URLs (tests run inside Docker container accessing host services)
|
|
260
|
+
if ! VITE_PUBLIC_API_URL=http://host.docker.internal:3001 VITE_PUBLIC_BASE_URL=http://host.docker.internal:3000 NODE_V8_COVERAGE=.v8-coverage VITE_PUBLIC_MAPBOX_DISABLE_TRANSITIONS=true VITE_PUBLIC_USE_TEST_ASSETS=true VITE_AUTH=false "$PNPM_BIN" exec vite build; then
|
|
261
|
+
echo -e "${RED}โ Failed to build frontend${NC}"
|
|
262
|
+
return 1
|
|
263
|
+
fi
|
|
264
|
+
|
|
265
|
+
echo -e "${BLUE}๐ Starting frontend preview server...${NC}"
|
|
266
|
+
|
|
267
|
+
# Start frontend preview server in background and save PID
|
|
268
|
+
"$PNPM_BIN" exec vite preview --port 3000 > "$PROJECT_DIR/.e2e-frontend.log" 2>&1 &
|
|
269
|
+
echo $! > "$FRONTEND_PID_FILE"
|
|
270
|
+
|
|
271
|
+
# Wait for frontend to be ready
|
|
272
|
+
if ! wait_for_service $FRONTEND_PORT "Frontend"; then
|
|
273
|
+
echo -e "${RED}โ Failed to start frontend${NC}"
|
|
274
|
+
echo -e "${YELLOW}๐ Frontend log (last 20 lines):${NC}"
|
|
275
|
+
tail -n 20 "$PROJECT_DIR/.e2e-frontend.log"
|
|
276
|
+
return 1
|
|
277
|
+
fi
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
# Build Docker image
|
|
281
|
+
build_docker_image() {
|
|
282
|
+
echo -e "${BLUE}๐ณ Building Docker image...${NC}"
|
|
283
|
+
cd "$PROJECT_DIR"
|
|
284
|
+
|
|
285
|
+
if docker build -t $DOCKER_IMAGE ./e2e/; then
|
|
286
|
+
echo -e "${GREEN}โ
Docker image built successfully${NC}"
|
|
287
|
+
return 0
|
|
288
|
+
else
|
|
289
|
+
echo -e "${RED}โ Failed to build Docker image${NC}"
|
|
290
|
+
return 1
|
|
291
|
+
fi
|
|
292
|
+
}
|
|
293
|
+
|
|
294
|
+
# Run E2E tests in Docker
|
|
295
|
+
run_tests() {
|
|
296
|
+
echo -e "${BLUE}๐งช Running E2E tests in Docker...${NC}"
|
|
297
|
+
|
|
298
|
+
# Build Docker image if it doesn't exist
|
|
299
|
+
if ! docker image inspect $DOCKER_IMAGE >/dev/null 2>&1; then
|
|
300
|
+
echo -e "${YELLOW}Docker image not found, building...${NC}"
|
|
301
|
+
if ! build_docker_image; then
|
|
302
|
+
return 1
|
|
303
|
+
fi
|
|
304
|
+
fi
|
|
305
|
+
|
|
306
|
+
echo -e "${BLUE}๐ฆ Running tests in Docker container...${NC}"
|
|
307
|
+
|
|
308
|
+
# Clean test output directories so they can be recreated with correct permissions
|
|
309
|
+
rm -rf "$MONOREPO_ROOT/projects/demo/e2e/test-results" "$MONOREPO_ROOT/projects/demo/e2e/playwright-report" "$MONOREPO_ROOT/projects/demo/e2e/monocart-report"
|
|
310
|
+
|
|
311
|
+
# Run tests in Docker with proper volume mounting
|
|
312
|
+
# Use host user ID to avoid permission issues with mounted volumes
|
|
313
|
+
docker run --rm \
|
|
314
|
+
-v "$MONOREPO_ROOT:/pxl" \
|
|
315
|
+
-w <% dockerWorkDir %> \
|
|
316
|
+
--add-host=host.docker.internal:host-gateway \
|
|
317
|
+
$DOCKER_IMAGE \
|
|
318
|
+
/bin/bash -c "<% nodeShimsCleanup %>; pnpm e2e"
|
|
319
|
+
|
|
320
|
+
local test_exit_code=$?
|
|
321
|
+
|
|
322
|
+
if [[ $test_exit_code -eq 0 ]]; then
|
|
323
|
+
echo -e "${GREEN}โ
All tests passed!${NC}"
|
|
324
|
+
else
|
|
325
|
+
echo -e "${RED}โ Tests failed with exit code $test_exit_code${NC}"
|
|
326
|
+
fi
|
|
327
|
+
|
|
328
|
+
return $test_exit_code
|
|
329
|
+
}
|
|
330
|
+
|
|
331
|
+
# Main execution
|
|
332
|
+
main() {
|
|
333
|
+
echo -e "${GREEN}โโโโโโโโโโโโโโโโโโโโโ${NC}"
|
|
334
|
+
echo -e "${GREEN}โ E2E Test Runner โ${NC}"
|
|
335
|
+
echo -e "${GREEN}โโโโโโโโโโโโโโโโโโโโโ${NC}\n"
|
|
336
|
+
|
|
337
|
+
echo -e "${BLUE}๐ณ Running in Docker mode${NC}\n"
|
|
338
|
+
|
|
339
|
+
# Setup environment (resolve pnpm, activate Node.js 24+)
|
|
340
|
+
setup_environment
|
|
341
|
+
|
|
342
|
+
# Check for existing services
|
|
343
|
+
check_services
|
|
344
|
+
|
|
345
|
+
# Start backend
|
|
346
|
+
if ! start_backend; then
|
|
347
|
+
echo -e "${RED}โ Failed to start backend. Exiting.${NC}"
|
|
348
|
+
exit 1
|
|
349
|
+
fi
|
|
350
|
+
|
|
351
|
+
# Start frontend
|
|
352
|
+
if ! start_frontend; then
|
|
353
|
+
echo -e "${RED}โ Failed to start frontend. Exiting.${NC}"
|
|
354
|
+
exit 1
|
|
355
|
+
fi
|
|
356
|
+
|
|
357
|
+
# Run tests in Docker
|
|
358
|
+
run_tests
|
|
359
|
+
local exit_code=$?
|
|
360
|
+
|
|
361
|
+
exit $exit_code
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
# Parse command line arguments
|
|
365
|
+
case "${1:-}" in
|
|
366
|
+
--help|-h)
|
|
367
|
+
echo "Usage: $0 [OPTIONS]"
|
|
368
|
+
echo ""
|
|
369
|
+
echo "Options:"
|
|
370
|
+
echo " --help, -h Show this help message"
|
|
371
|
+
echo " --build-image Build Docker image and exit"
|
|
372
|
+
echo ""
|
|
373
|
+
echo "This script automatically:"
|
|
374
|
+
echo " 1. Detects pnpm and activates Node.js 24+ (via nvm/fnm/volta)"
|
|
375
|
+
echo " 2. Checks if backend/frontend are already running (reuses them)"
|
|
376
|
+
echo " 3. Starts backend in E2E stateless mode (if needed)"
|
|
377
|
+
echo " 4. Builds and starts frontend preview server (if needed)"
|
|
378
|
+
echo " 5. Runs E2E tests in Docker"
|
|
379
|
+
echo " 6. Cleans up all processes on exit (Ctrl+C safe)"
|
|
380
|
+
echo ""
|
|
381
|
+
echo "Examples:"
|
|
382
|
+
echo " $0 # Run E2E tests"
|
|
383
|
+
echo " $0 --build-image # Just build the Docker image"
|
|
384
|
+
echo ""
|
|
385
|
+
echo "Logs:"
|
|
386
|
+
echo " Backend log: .e2e-backend.log"
|
|
387
|
+
echo " Frontend log: .e2e-frontend.log"
|
|
388
|
+
exit 0
|
|
389
|
+
;;
|
|
390
|
+
--build-image)
|
|
391
|
+
setup_environment
|
|
392
|
+
build_docker_image
|
|
393
|
+
exit $?
|
|
394
|
+
;;
|
|
395
|
+
*)
|
|
396
|
+
main
|
|
397
|
+
;;
|
|
398
|
+
esac
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@postxl/generators",
|
|
3
|
-
"version": "1.
|
|
3
|
+
"version": "1.17.0",
|
|
4
4
|
"description": "Code generators for PXL - generates backend, frontend, Prisma schemas, and more",
|
|
5
5
|
"main": "./dist/index.js",
|
|
6
6
|
"module": "./dist/index.js",
|
|
@@ -46,7 +46,7 @@
|
|
|
46
46
|
"exceljs": "^4.4.0",
|
|
47
47
|
"@postxl/generator": "^1.3.5",
|
|
48
48
|
"@postxl/schema": "^1.6.0",
|
|
49
|
-
"@postxl/ui-components": "^1.5.
|
|
49
|
+
"@postxl/ui-components": "^1.5.2",
|
|
50
50
|
"@postxl/utils": "^1.3.3"
|
|
51
51
|
},
|
|
52
52
|
"devDependencies": {},
|