codeceptjs 3.7.4 → 3.7.5-beta.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -64,6 +64,8 @@ You don't need to worry about asynchronous nature of NodeJS or about various API
64
64
  - Also plays nice with TypeScript.
65
65
  - </> Smart locators: use names, labels, matching text, CSS or XPath to locate elements.
66
66
  - 🌐 Interactive debugging shell: pause test at any point and try different commands in a browser.
67
+ - ⚡ **Parallel testing** with dynamic test pooling for optimal load balancing and performance.
68
+ - 📊 **Built-in HTML Reporter** with interactive dashboard, step-by-step execution details, and comprehensive test analytics.
67
69
  - Easily create tests, pageobjects, stepobjects with CLI generators.
68
70
 
69
71
  ## Installation
@@ -233,6 +235,49 @@ Scenario('test title', () => {
233
235
  })
234
236
  ```
235
237
 
238
+ ## HTML Reporter
239
+
240
+ CodeceptJS includes a powerful built-in HTML Reporter that generates comprehensive, interactive test reports with detailed information about your test runs. The HTML reporter is **enabled by default** for all new projects and provides:
241
+
242
+ ### Features
243
+
244
+ - **Interactive Dashboard**: Visual statistics, pie charts, and expandable test details
245
+ - **Step-by-Step Execution**: Shows individual test steps with timing and status indicators
246
+ - **BDD/Gherkin Support**: Full support for feature files with proper scenario formatting
247
+ - **System Information**: Comprehensive environment details including browser versions
248
+ - **Advanced Filtering**: Real-time filtering by status, tags, features, and test types
249
+ - **History Tracking**: Multi-run history with trend visualization
250
+ - **Error Details**: Clean formatting of error messages and stack traces
251
+ - **Artifacts Support**: Display screenshots and other test artifacts
252
+
253
+ ### Visual Examples
254
+
255
+ #### Interactive Test Dashboard
256
+
257
+ The main dashboard provides a complete overview with interactive statistics and pie charts:
258
+
259
+ ![HTML Reporter Dashboard](docs/shared/html-reporter-main-dashboard.png)
260
+
261
+ #### Detailed Test Results
262
+
263
+ Each test shows comprehensive execution details with expandable step information:
264
+
265
+ ![HTML Reporter Test Details](docs/shared/html-reporter-test-details.png)
266
+
267
+ #### Advanced Filtering Capabilities
268
+
269
+ Real-time filtering allows quick navigation through test results:
270
+
271
+ ![HTML Reporter Filtering](docs/shared/html-reporter-filtering.png)
272
+
273
+ #### BDD/Gherkin Support
274
+
275
+ Full support for Gherkin scenarios with proper feature formatting:
276
+
277
+ ![HTML Reporter BDD Details](docs/shared/html-reporter-bdd-details.png)
278
+
279
+ The HTML reporter generates self-contained reports that can be easily shared with your team. Learn more about configuration and features in the [HTML Reporter documentation](https://codecept.io/plugins/#htmlreporter).
280
+
236
281
  ## PageObjects
237
282
 
238
283
  CodeceptJS provides the most simple way to create and use page objects in your test.
package/bin/codecept.js CHANGED
@@ -165,6 +165,7 @@ program
165
165
  .option('--no-timeouts', 'disable all timeouts')
166
166
  .option('-p, --plugins <k=v,k2=v2,...>', 'enable plugins, comma-separated')
167
167
  .option('--shuffle', 'Shuffle the order in which test files run')
168
+ .option('--shard <index/total>', 'run only a fraction of tests (e.g., --shard 1/4)')
168
169
 
169
170
  // mocha options
170
171
  .option('--colors', 'force enabling of colors')
@@ -196,6 +197,7 @@ program
196
197
  .option('-i, --invert', 'inverts --grep matches')
197
198
  .option('-o, --override [value]', 'override current config options')
198
199
  .option('--suites', 'parallel execution of suites not single tests')
200
+ .option('--by <strategy>', 'test distribution strategy: "test" (pre-assign individual tests), "suite" (pre-assign test suites), or "pool" (dynamic distribution for optimal load balancing, recommended)')
199
201
  .option(commandFlags.debug.flag, commandFlags.debug.description)
200
202
  .option(commandFlags.verbose.flag, commandFlags.verbose.description)
201
203
  .option('--features', 'run only *.feature files and skip tests')
@@ -294,6 +296,29 @@ program
294
296
 
295
297
  .action(require('../lib/command/run-rerun'))
296
298
 
299
+ program
300
+ .command('run-failed-tests')
301
+ .description('Re-run tests that failed in the previous test run')
302
+ .option(commandFlags.config.flag, commandFlags.config.description)
303
+ .option(commandFlags.profile.flag, commandFlags.profile.description)
304
+ .option(commandFlags.verbose.flag, commandFlags.verbose.description)
305
+ .option(commandFlags.debug.flag, commandFlags.debug.description)
306
+ .option(commandFlags.steps.flag, commandFlags.steps.description)
307
+ .option('-o, --override [value]', 'override current config options')
308
+ .option('-f, --file [path]', 'path to failed tests file (default: ./failed-tests.json)')
309
+ .option('-g, --grep <pattern>', 'only run failed tests matching <pattern>')
310
+ .option('-p, --plugins <k=v,k2=v2,...>', 'enable plugins, comma-separated')
311
+ .option('--features', 'run only *.feature files and skip tests')
312
+ .option('--tests', 'run only JS test files and skip features')
313
+ .option('--colors', 'force enabling of colors')
314
+ .option('--no-colors', 'force disabling of colors')
315
+ .option('-R, --reporter <name>', 'specify the reporter to use')
316
+ .option('-O, --reporter-options <k=v,k2=v2,...>', 'reporter-specific options')
317
+ .option('--workers <number>', 'run failed tests in parallel using specified number of workers')
318
+ .option('--suites', 'parallel execution of suites not single tests (when using --workers)')
319
+ .option('--by <strategy>', 'test distribution strategy when using --workers: "test", "suite", or "pool"')
320
+ .action(errorHandler(require('../lib/command/run-failed-tests')))
321
+
297
322
  program.on('command:*', cmd => {
298
323
  console.log(`\nUnknown command ${cmd}\n`)
299
324
  program.outputHelp()
@@ -0,0 +1,53 @@
1
+ #!/usr/bin/env node
2
+
3
+ /**
4
+ * Standalone test server script to replace json-server
5
+ */
6
+
7
+ const path = require('path')
8
+ const TestServer = require('../lib/test-server')
9
+
10
+ // Parse command line arguments
11
+ const args = process.argv.slice(2)
12
+ let dbFile = path.join(__dirname, '../test/data/rest/db.json')
13
+ let port = 8010
14
+ let host = '0.0.0.0'
15
+
16
+ // Simple argument parsing
17
+ for (let i = 0; i < args.length; i++) {
18
+ const arg = args[i]
19
+
20
+ if (arg === '-p' || arg === '--port') {
21
+ port = parseInt(args[++i])
22
+ } else if (arg === '--host') {
23
+ host = args[++i]
24
+ } else if (!arg.startsWith('-')) {
25
+ dbFile = path.resolve(arg)
26
+ }
27
+ }
28
+
29
+ // Create and start server
30
+ const server = new TestServer({ port, host, dbFile })
31
+
32
+ console.log(`Starting test server with db file: ${dbFile}`)
33
+
34
+ server
35
+ .start()
36
+ .then(() => {
37
+ console.log(`Test server is ready and listening on http://${host}:${port}`)
38
+ })
39
+ .catch(err => {
40
+ console.error('Failed to start test server:', err)
41
+ process.exit(1)
42
+ })
43
+
44
+ // Graceful shutdown
45
+ process.on('SIGINT', () => {
46
+ console.log('\nShutting down test server...')
47
+ server.stop().then(() => process.exit(0))
48
+ })
49
+
50
+ process.on('SIGTERM', () => {
51
+ console.log('\nShutting down test server...')
52
+ server.stop().then(() => process.exit(0))
53
+ })
package/lib/codecept.js CHANGED
@@ -111,6 +111,7 @@ class Codecept {
111
111
  runHook(require('./listener/helpers'))
112
112
  runHook(require('./listener/globalTimeout'))
113
113
  runHook(require('./listener/globalRetry'))
114
+ runHook(require('./listener/retryEnhancer'))
114
115
  runHook(require('./listener/exit'))
115
116
  runHook(require('./listener/emptyRun'))
116
117
 
@@ -185,6 +186,46 @@ class Codecept {
185
186
  if (this.opts.shuffle) {
186
187
  this.testFiles = shuffle(this.testFiles)
187
188
  }
189
+
190
+ if (this.opts.shard) {
191
+ this.testFiles = this._applySharding(this.testFiles, this.opts.shard)
192
+ }
193
+ }
194
+
195
+ /**
196
+ * Apply sharding to test files based on shard configuration
197
+ *
198
+ * @param {Array<string>} testFiles - Array of test file paths
199
+ * @param {string} shardConfig - Shard configuration in format "index/total" (e.g., "1/4")
200
+ * @returns {Array<string>} - Filtered array of test files for this shard
201
+ */
202
+ _applySharding(testFiles, shardConfig) {
203
+ const shardMatch = shardConfig.match(/^(\d+)\/(\d+)$/)
204
+ if (!shardMatch) {
205
+ throw new Error('Invalid shard format. Expected format: "index/total" (e.g., "1/4")')
206
+ }
207
+
208
+ const shardIndex = parseInt(shardMatch[1], 10)
209
+ const shardTotal = parseInt(shardMatch[2], 10)
210
+
211
+ if (shardTotal < 1) {
212
+ throw new Error('Shard total must be at least 1')
213
+ }
214
+
215
+ if (shardIndex < 1 || shardIndex > shardTotal) {
216
+ throw new Error(`Shard index ${shardIndex} must be between 1 and ${shardTotal}`)
217
+ }
218
+
219
+ if (testFiles.length === 0) {
220
+ return testFiles
221
+ }
222
+
223
+ // Calculate which tests belong to this shard
224
+ const shardSize = Math.ceil(testFiles.length / shardTotal)
225
+ const startIndex = (shardIndex - 1) * shardSize
226
+ const endIndex = Math.min(startIndex + shardSize, testFiles.length)
227
+
228
+ return testFiles.slice(startIndex, endIndex)
188
229
  }
189
230
 
190
231
  /**
@@ -18,6 +18,11 @@ const defaultConfig = {
18
18
  output: '',
19
19
  helpers: {},
20
20
  include: {},
21
+ plugins: {
22
+ htmlReporter: {
23
+ enabled: true,
24
+ },
25
+ },
21
26
  }
22
27
 
23
28
  const helpers = ['Playwright', 'WebDriver', 'Puppeteer', 'REST', 'GraphQL', 'Appium', 'TestCafe']
@@ -0,0 +1,216 @@
1
+ const fs = require('fs')
2
+ const path = require('path')
3
+ const { getConfig, printError, getTestRoot, createOutputDir } = require('./utils')
4
+ const Config = require('../config')
5
+ const store = require('../store')
6
+ const Codecept = require('../codecept')
7
+ const output = require('../output')
8
+ const Workers = require('../workers')
9
+ const { tryOrDefault } = require('../utils')
10
+
11
+ module.exports = async function (options) {
12
+ // registering options globally to use in config
13
+ if (options.profile) {
14
+ process.env.profile = options.profile
15
+ }
16
+ if (options.verbose || options.debug) store.debugMode = true
17
+
18
+ const configFile = options.config
19
+ let config = getConfig(configFile)
20
+
21
+ if (options.override) {
22
+ config = Config.append(JSON.parse(options.override))
23
+ }
24
+
25
+ const testRoot = getTestRoot(configFile)
26
+ createOutputDir(config, testRoot)
27
+
28
+ // Determine failed tests file path
29
+ const failedTestsFile = options.file || './failed-tests.json'
30
+ const failedTestsPath = path.resolve(process.cwd(), failedTestsFile)
31
+
32
+ // Check if failed tests file exists
33
+ if (!fs.existsSync(failedTestsPath)) {
34
+ output.error(`Failed tests file not found: ${failedTestsPath}`)
35
+ output.print('Run tests first to generate a failed tests file, or specify a different file with --file option')
36
+ process.exitCode = 1
37
+ return
38
+ }
39
+
40
+ let failedTestsData
41
+ try {
42
+ const fileContent = fs.readFileSync(failedTestsPath, 'utf8')
43
+ failedTestsData = JSON.parse(fileContent)
44
+ } catch (error) {
45
+ output.error(`Failed to read or parse failed tests file: ${error.message}`)
46
+ process.exitCode = 1
47
+ return
48
+ }
49
+
50
+ if (!failedTestsData.tests || failedTestsData.tests.length === 0) {
51
+ output.print('No failed tests found in the file')
52
+ return
53
+ }
54
+
55
+ output.print(`Found ${failedTestsData.tests.length} failed tests from ${failedTestsData.timestamp}`)
56
+
57
+ // Build test patterns from failed tests
58
+ const testPatterns = []
59
+ const testsByFile = new Map()
60
+
61
+ // Group tests by file for more efficient execution
62
+ failedTestsData.tests.forEach(test => {
63
+ if (test.file) {
64
+ if (!testsByFile.has(test.file)) {
65
+ testsByFile.set(test.file, [])
66
+ }
67
+ testsByFile.get(test.file).push(test)
68
+ }
69
+ })
70
+
71
+ // If we have specific test files, use them
72
+ if (testsByFile.size > 0) {
73
+ for (const [file, tests] of testsByFile) {
74
+ if (options.grep) {
75
+ // If grep is specified, combine with file pattern
76
+ testPatterns.push(file)
77
+ } else {
78
+ // Try to be more specific with test titles if possible
79
+ testPatterns.push(file)
80
+ }
81
+ }
82
+ } else {
83
+ // Fallback: use test titles with grep
84
+ const testTitles = failedTestsData.tests.map(test => test.title).filter(Boolean)
85
+ if (testTitles.length > 0) {
86
+ // Create a regex pattern to match any of the failed test titles
87
+ const grepPattern = testTitles.map(title => title.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')).join('|')
88
+ options.grep = grepPattern
89
+ }
90
+ }
91
+
92
+ // Check if user wants to run with workers
93
+ if (options.workers) {
94
+ await runWithWorkers(config, options, testPatterns, failedTestsData)
95
+ } else {
96
+ await runWithoutWorkers(config, options, testPatterns, failedTestsData, testRoot)
97
+ }
98
+ }
99
+
100
+ async function runWithWorkers(config, options, testPatterns, failedTestsData) {
101
+ const numberOfWorkers = parseInt(options.workers, 10)
102
+ const overrideConfigs = tryOrDefault(() => JSON.parse(options.override || '{}'), {})
103
+
104
+ // Determine test split strategy
105
+ let by = 'test' // default for failed tests
106
+ if (options.by) {
107
+ by = options.by
108
+ } else if (options.suites) {
109
+ by = 'suite'
110
+ }
111
+
112
+ // Validate the by option
113
+ const validStrategies = ['test', 'suite', 'pool']
114
+ if (!validStrategies.includes(by)) {
115
+ throw new Error(`Invalid --by strategy: ${by}. Valid options are: ${validStrategies.join(', ')}`)
116
+ }
117
+
118
+ const workerConfig = {
119
+ by,
120
+ testConfig: options.config,
121
+ options,
122
+ selectedRuns: undefined,
123
+ }
124
+
125
+ output.print(`CodeceptJS v${require('../codecept').version()}`)
126
+ output.print(`Re-running ${failedTestsData.tests.length} failed tests in ${output.styles.bold(numberOfWorkers)} workers...`)
127
+ output.print()
128
+ store.hasWorkers = true
129
+
130
+ const workers = new Workers(numberOfWorkers, workerConfig)
131
+ workers.overrideConfig(overrideConfigs)
132
+
133
+ // Set up event listeners for worker output
134
+ workers.on('test.failed', test => {
135
+ output.test.failed(test)
136
+ })
137
+
138
+ workers.on('test.passed', test => {
139
+ output.test.passed(test)
140
+ })
141
+
142
+ workers.on('test.skipped', test => {
143
+ output.test.skipped(test)
144
+ })
145
+
146
+ workers.on('all.result', result => {
147
+ workers.printResults()
148
+ })
149
+
150
+ try {
151
+ if (options.verbose || options.debug) store.debugMode = true
152
+
153
+ if (options.verbose) {
154
+ output.print('\nFailed tests to re-run with workers:')
155
+ failedTestsData.tests.forEach((test, index) => {
156
+ output.print(` ${index + 1}. ${test.fullTitle || test.title} (${test.file || 'unknown file'})`)
157
+ if (test.error && test.error.message) {
158
+ output.print(` Error: ${test.error.message}`)
159
+ }
160
+ })
161
+ output.print('')
162
+
163
+ const { getMachineInfo } = require('./info')
164
+ await getMachineInfo()
165
+ }
166
+
167
+ await workers.bootstrapAll()
168
+ await workers.run()
169
+ } catch (err) {
170
+ printError(err)
171
+ process.exitCode = 1
172
+ } finally {
173
+ await workers.teardownAll()
174
+ }
175
+ }
176
+
177
+ async function runWithoutWorkers(config, options, testPatterns, failedTestsData, testRoot) {
178
+ const codecept = new Codecept(config, options)
179
+
180
+ try {
181
+ codecept.init(testRoot)
182
+ await codecept.bootstrap()
183
+
184
+ // Load tests - if we have specific patterns, use them, otherwise load all and filter with grep
185
+ if (testPatterns.length > 0) {
186
+ codecept.loadTests(testPatterns.join(' '))
187
+ } else {
188
+ codecept.loadTests()
189
+ }
190
+
191
+ if (options.verbose) {
192
+ global.debugMode = true
193
+ const { getMachineInfo } = require('./info')
194
+ await getMachineInfo()
195
+ }
196
+
197
+ // Display information about what we're running
198
+ if (options.verbose) {
199
+ output.print('\nFailed tests to re-run:')
200
+ failedTestsData.tests.forEach((test, index) => {
201
+ output.print(` ${index + 1}. ${test.fullTitle || test.title} (${test.file || 'unknown file'})`)
202
+ if (test.error && test.error.message) {
203
+ output.print(` Error: ${test.error.message}`)
204
+ }
205
+ })
206
+ output.print('')
207
+ }
208
+
209
+ await codecept.run()
210
+ } catch (err) {
211
+ printError(err)
212
+ process.exitCode = 1
213
+ } finally {
214
+ await codecept.teardown()
215
+ }
216
+ }
@@ -10,7 +10,22 @@ module.exports = async function (workerCount, selectedRuns, options) {
10
10
 
11
11
  const { config: testConfig, override = '' } = options
12
12
  const overrideConfigs = tryOrDefault(() => JSON.parse(override), {})
13
- const by = options.suites ? 'suite' : 'test'
13
+
14
+ // Determine test split strategy
15
+ let by = 'test' // default
16
+ if (options.by) {
17
+ // Explicit --by option takes precedence
18
+ by = options.by
19
+ } else if (options.suites) {
20
+ // Legacy --suites option
21
+ by = 'suite'
22
+ }
23
+
24
+ // Validate the by option
25
+ const validStrategies = ['test', 'suite', 'pool']
26
+ if (!validStrategies.includes(by)) {
27
+ throw new Error(`Invalid --by strategy: ${by}. Valid options are: ${validStrategies.join(', ')}`)
28
+ }
14
29
  delete options.parent
15
30
  const config = {
16
31
  by,