codeceptjs 3.7.5-beta.9 → 3.7.6-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/codecept.js +0 -23
- package/lib/effects.js +1 -1
- package/lib/helper/Appium.js +1 -1
- package/lib/helper/Playwright.js +3 -4
- package/lib/helper/Puppeteer.js +5 -1
- package/lib/helper/REST.js +21 -0
- package/lib/helper/WebDriver.js +6 -3
- package/lib/helper/network/actions.js +4 -2
- package/lib/listener/enhancedGlobalRetry.js +110 -0
- package/lib/mocha/test.js +0 -1
- package/lib/plugin/enhancedRetryFailedStep.js +99 -0
- package/lib/recorder.js +19 -3
- package/lib/retryCoordinator.js +207 -0
- package/lib/utils.js +54 -4
- package/package.json +32 -24
- package/typings/promiseBasedTypes.d.ts +59 -4
- package/typings/types.d.ts +71 -5
- package/lib/command/run-failed-tests.js +0 -218
- package/lib/plugin/failedTestsTracker.js +0 -374
|
@@ -1,218 +0,0 @@
|
|
|
1
|
-
const fs = require('fs')
|
|
2
|
-
const path = require('path')
|
|
3
|
-
const { getConfig, printError, getTestRoot, createOutputDir } = require('./utils')
|
|
4
|
-
const Config = require('../config')
|
|
5
|
-
const store = require('../store')
|
|
6
|
-
const Codecept = require('../codecept')
|
|
7
|
-
const output = require('../output')
|
|
8
|
-
const Workers = require('../workers')
|
|
9
|
-
const { tryOrDefault } = require('../utils')
|
|
10
|
-
|
|
11
|
-
module.exports = async function (options) {
|
|
12
|
-
// registering options globally to use in config
|
|
13
|
-
if (options.profile) {
|
|
14
|
-
process.env.profile = options.profile
|
|
15
|
-
}
|
|
16
|
-
if (options.verbose || options.debug) store.debugMode = true
|
|
17
|
-
|
|
18
|
-
const configFile = options.config
|
|
19
|
-
let config = getConfig(configFile)
|
|
20
|
-
|
|
21
|
-
if (options.override) {
|
|
22
|
-
config = Config.append(JSON.parse(options.override))
|
|
23
|
-
}
|
|
24
|
-
|
|
25
|
-
const testRoot = getTestRoot(configFile)
|
|
26
|
-
createOutputDir(config, testRoot)
|
|
27
|
-
|
|
28
|
-
// Determine failed tests file path - respect CodeceptJS output directory
|
|
29
|
-
const failedTestsFile = options.file || 'failed-tests.json'
|
|
30
|
-
const failedTestsPath = path.isAbsolute(failedTestsFile)
|
|
31
|
-
? failedTestsFile
|
|
32
|
-
: path.resolve(global.output_dir || './output', failedTestsFile)
|
|
33
|
-
|
|
34
|
-
// Check if failed tests file exists
|
|
35
|
-
if (!fs.existsSync(failedTestsPath)) {
|
|
36
|
-
output.error(`Failed tests file not found: ${failedTestsPath}`)
|
|
37
|
-
output.print('Run tests first to generate a failed tests file, or specify a different file with --file option')
|
|
38
|
-
process.exitCode = 1
|
|
39
|
-
return
|
|
40
|
-
}
|
|
41
|
-
|
|
42
|
-
let failedTestsData
|
|
43
|
-
try {
|
|
44
|
-
const fileContent = fs.readFileSync(failedTestsPath, 'utf8')
|
|
45
|
-
failedTestsData = JSON.parse(fileContent)
|
|
46
|
-
} catch (error) {
|
|
47
|
-
output.error(`Failed to read or parse failed tests file: ${error.message}`)
|
|
48
|
-
process.exitCode = 1
|
|
49
|
-
return
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
if (!failedTestsData.tests || failedTestsData.tests.length === 0) {
|
|
53
|
-
output.print('No failed tests found in the file')
|
|
54
|
-
return
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
output.print(`Found ${failedTestsData.tests.length} failed tests from ${failedTestsData.timestamp}`)
|
|
58
|
-
|
|
59
|
-
// Build test patterns from failed tests
|
|
60
|
-
const testPatterns = []
|
|
61
|
-
const testsByFile = new Map()
|
|
62
|
-
|
|
63
|
-
// Group tests by file for more efficient execution
|
|
64
|
-
failedTestsData.tests.forEach(test => {
|
|
65
|
-
if (test.file) {
|
|
66
|
-
if (!testsByFile.has(test.file)) {
|
|
67
|
-
testsByFile.set(test.file, [])
|
|
68
|
-
}
|
|
69
|
-
testsByFile.get(test.file).push(test)
|
|
70
|
-
}
|
|
71
|
-
})
|
|
72
|
-
|
|
73
|
-
// If we have specific test files, use them
|
|
74
|
-
if (testsByFile.size > 0) {
|
|
75
|
-
for (const [file, tests] of testsByFile) {
|
|
76
|
-
if (options.grep) {
|
|
77
|
-
// If grep is specified, combine with file pattern
|
|
78
|
-
testPatterns.push(file)
|
|
79
|
-
} else {
|
|
80
|
-
// Try to be more specific with test titles if possible
|
|
81
|
-
testPatterns.push(file)
|
|
82
|
-
}
|
|
83
|
-
}
|
|
84
|
-
} else {
|
|
85
|
-
// Fallback: use test titles with grep
|
|
86
|
-
const testTitles = failedTestsData.tests.map(test => test.title).filter(Boolean)
|
|
87
|
-
if (testTitles.length > 0) {
|
|
88
|
-
// Create a regex pattern to match any of the failed test titles
|
|
89
|
-
const grepPattern = testTitles.map(title => title.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')).join('|')
|
|
90
|
-
options.grep = grepPattern
|
|
91
|
-
}
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
// Check if user wants to run with workers
|
|
95
|
-
if (options.workers) {
|
|
96
|
-
await runWithWorkers(config, options, testPatterns, failedTestsData)
|
|
97
|
-
} else {
|
|
98
|
-
await runWithoutWorkers(config, options, testPatterns, failedTestsData, testRoot)
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
async function runWithWorkers(config, options, testPatterns, failedTestsData) {
|
|
103
|
-
const numberOfWorkers = parseInt(options.workers, 10)
|
|
104
|
-
const overrideConfigs = tryOrDefault(() => JSON.parse(options.override || '{}'), {})
|
|
105
|
-
|
|
106
|
-
// Determine test split strategy
|
|
107
|
-
let by = 'test' // default for failed tests
|
|
108
|
-
if (options.by) {
|
|
109
|
-
by = options.by
|
|
110
|
-
} else if (options.suites) {
|
|
111
|
-
by = 'suite'
|
|
112
|
-
}
|
|
113
|
-
|
|
114
|
-
// Validate the by option
|
|
115
|
-
const validStrategies = ['test', 'suite', 'pool']
|
|
116
|
-
if (!validStrategies.includes(by)) {
|
|
117
|
-
throw new Error(`Invalid --by strategy: ${by}. Valid options are: ${validStrategies.join(', ')}`)
|
|
118
|
-
}
|
|
119
|
-
|
|
120
|
-
const workerConfig = {
|
|
121
|
-
by,
|
|
122
|
-
testConfig: options.config,
|
|
123
|
-
options,
|
|
124
|
-
selectedRuns: undefined,
|
|
125
|
-
}
|
|
126
|
-
|
|
127
|
-
output.print(`CodeceptJS v${require('../codecept').version()}`)
|
|
128
|
-
output.print(`Re-running ${failedTestsData.tests.length} failed tests in ${output.styles.bold(numberOfWorkers)} workers...`)
|
|
129
|
-
output.print()
|
|
130
|
-
store.hasWorkers = true
|
|
131
|
-
|
|
132
|
-
const workers = new Workers(numberOfWorkers, workerConfig)
|
|
133
|
-
workers.overrideConfig(overrideConfigs)
|
|
134
|
-
|
|
135
|
-
// Set up event listeners for worker output
|
|
136
|
-
workers.on('test.failed', test => {
|
|
137
|
-
output.test.failed(test)
|
|
138
|
-
})
|
|
139
|
-
|
|
140
|
-
workers.on('test.passed', test => {
|
|
141
|
-
output.test.passed(test)
|
|
142
|
-
})
|
|
143
|
-
|
|
144
|
-
workers.on('test.skipped', test => {
|
|
145
|
-
output.test.skipped(test)
|
|
146
|
-
})
|
|
147
|
-
|
|
148
|
-
workers.on('all.result', result => {
|
|
149
|
-
workers.printResults()
|
|
150
|
-
})
|
|
151
|
-
|
|
152
|
-
try {
|
|
153
|
-
if (options.verbose || options.debug) store.debugMode = true
|
|
154
|
-
|
|
155
|
-
if (options.verbose) {
|
|
156
|
-
output.print('\nFailed tests to re-run with workers:')
|
|
157
|
-
failedTestsData.tests.forEach((test, index) => {
|
|
158
|
-
output.print(` ${index + 1}. ${test.fullTitle || test.title} (${test.file || 'unknown file'})`)
|
|
159
|
-
if (test.error && test.error.message) {
|
|
160
|
-
output.print(` Error: ${test.error.message}`)
|
|
161
|
-
}
|
|
162
|
-
})
|
|
163
|
-
output.print('')
|
|
164
|
-
|
|
165
|
-
const { getMachineInfo } = require('./info')
|
|
166
|
-
await getMachineInfo()
|
|
167
|
-
}
|
|
168
|
-
|
|
169
|
-
await workers.bootstrapAll()
|
|
170
|
-
await workers.run()
|
|
171
|
-
} catch (err) {
|
|
172
|
-
printError(err)
|
|
173
|
-
process.exitCode = 1
|
|
174
|
-
} finally {
|
|
175
|
-
await workers.teardownAll()
|
|
176
|
-
}
|
|
177
|
-
}
|
|
178
|
-
|
|
179
|
-
async function runWithoutWorkers(config, options, testPatterns, failedTestsData, testRoot) {
|
|
180
|
-
const codecept = new Codecept(config, options)
|
|
181
|
-
|
|
182
|
-
try {
|
|
183
|
-
codecept.init(testRoot)
|
|
184
|
-
await codecept.bootstrap()
|
|
185
|
-
|
|
186
|
-
// Load tests - if we have specific patterns, use them, otherwise load all and filter with grep
|
|
187
|
-
if (testPatterns.length > 0) {
|
|
188
|
-
codecept.loadTests(testPatterns.join(' '))
|
|
189
|
-
} else {
|
|
190
|
-
codecept.loadTests()
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
if (options.verbose) {
|
|
194
|
-
global.debugMode = true
|
|
195
|
-
const { getMachineInfo } = require('./info')
|
|
196
|
-
await getMachineInfo()
|
|
197
|
-
}
|
|
198
|
-
|
|
199
|
-
// Display information about what we're running
|
|
200
|
-
if (options.verbose) {
|
|
201
|
-
output.print('\nFailed tests to re-run:')
|
|
202
|
-
failedTestsData.tests.forEach((test, index) => {
|
|
203
|
-
output.print(` ${index + 1}. ${test.fullTitle || test.title} (${test.file || 'unknown file'})`)
|
|
204
|
-
if (test.error && test.error.message) {
|
|
205
|
-
output.print(` Error: ${test.error.message}`)
|
|
206
|
-
}
|
|
207
|
-
})
|
|
208
|
-
output.print('')
|
|
209
|
-
}
|
|
210
|
-
|
|
211
|
-
await codecept.run()
|
|
212
|
-
} catch (err) {
|
|
213
|
-
printError(err)
|
|
214
|
-
process.exitCode = 1
|
|
215
|
-
} finally {
|
|
216
|
-
await codecept.teardown()
|
|
217
|
-
}
|
|
218
|
-
}
|
|
@@ -1,374 +0,0 @@
|
|
|
1
|
-
const fs = require('fs')
|
|
2
|
-
const path = require('path')
|
|
3
|
-
const event = require('../event')
|
|
4
|
-
const output = require('../output')
|
|
5
|
-
const store = require('../store')
|
|
6
|
-
|
|
7
|
-
const defaultConfig = {
|
|
8
|
-
enabled: true,
|
|
9
|
-
outputFile: 'failed-tests.json',
|
|
10
|
-
clearOnSuccess: true,
|
|
11
|
-
includeStackTrace: true,
|
|
12
|
-
includeMetadata: true,
|
|
13
|
-
}
|
|
14
|
-
|
|
15
|
-
/**
|
|
16
|
-
* Failed Tests Tracker Plugin for CodeceptJS
|
|
17
|
-
*
|
|
18
|
-
* Tracks failed tests and saves them to a file for later re-execution.
|
|
19
|
-
*
|
|
20
|
-
* ## Configuration
|
|
21
|
-
*
|
|
22
|
-
* ```js
|
|
23
|
-
* "plugins": {
|
|
24
|
-
* "failedTestsTracker": {
|
|
25
|
-
* "enabled": true,
|
|
26
|
-
* "outputFile": "failed-tests.json",
|
|
27
|
-
* "clearOnSuccess": true,
|
|
28
|
-
* "includeStackTrace": true,
|
|
29
|
-
* "includeMetadata": true
|
|
30
|
-
* }
|
|
31
|
-
* }
|
|
32
|
-
* ```
|
|
33
|
-
*
|
|
34
|
-
* @param {object} config plugin configuration
|
|
35
|
-
*/
|
|
36
|
-
module.exports = function (config) {
|
|
37
|
-
const options = { ...defaultConfig, ...config }
|
|
38
|
-
let failedTests = []
|
|
39
|
-
let allTestsPassed = true
|
|
40
|
-
let workerFailedTests = new Map() // Track failed tests from workers
|
|
41
|
-
|
|
42
|
-
// Track test failures - only when not using workers
|
|
43
|
-
event.dispatcher.on(event.test.failed, test => {
|
|
44
|
-
// Skip collection in worker threads to avoid duplicates
|
|
45
|
-
try {
|
|
46
|
-
const { isMainThread } = require('worker_threads')
|
|
47
|
-
if (!isMainThread) return
|
|
48
|
-
} catch (e) {
|
|
49
|
-
// worker_threads not available, continue
|
|
50
|
-
}
|
|
51
|
-
|
|
52
|
-
if (store.hasWorkers) return // Skip if running with workers
|
|
53
|
-
|
|
54
|
-
// Only collect on final failure (when retries are exhausted or no retries configured)
|
|
55
|
-
const currentRetry = test._currentRetry || 0
|
|
56
|
-
const maxRetries = typeof test.retries === 'function' ? test.retries() : (test.retries || 0)
|
|
57
|
-
|
|
58
|
-
// Only add to failed tests if this is the final attempt
|
|
59
|
-
if (currentRetry >= maxRetries) {
|
|
60
|
-
allTestsPassed = false
|
|
61
|
-
|
|
62
|
-
const failedTest = {
|
|
63
|
-
title: test.title,
|
|
64
|
-
fullTitle: test.fullTitle(),
|
|
65
|
-
file: test.file || (test.parent && test.parent.file),
|
|
66
|
-
uid: test.uid,
|
|
67
|
-
timestamp: new Date().toISOString(),
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
// Add parent suite information
|
|
71
|
-
if (test.parent) {
|
|
72
|
-
failedTest.suite = test.parent.title
|
|
73
|
-
failedTest.suiteFile = test.parent.file
|
|
74
|
-
}
|
|
75
|
-
|
|
76
|
-
// Add error information if available
|
|
77
|
-
if (test.err && options.includeStackTrace) {
|
|
78
|
-
failedTest.error = {
|
|
79
|
-
message: test.err.message || 'Test failed',
|
|
80
|
-
stack: test.err.stack || '',
|
|
81
|
-
name: test.err.name || 'Error',
|
|
82
|
-
}
|
|
83
|
-
}
|
|
84
|
-
|
|
85
|
-
// Add metadata if available
|
|
86
|
-
if (options.includeMetadata) {
|
|
87
|
-
failedTest.metadata = {
|
|
88
|
-
tags: test.tags || [],
|
|
89
|
-
meta: test.meta || {},
|
|
90
|
-
opts: test.opts || {},
|
|
91
|
-
duration: test.duration || 0,
|
|
92
|
-
// Only include retries if it represents actual retry attempts, not the config value
|
|
93
|
-
...(test._currentRetry > 0 && { actualRetries: test._currentRetry }),
|
|
94
|
-
...(maxRetries > 0 && maxRetries !== -1 && { maxRetries: maxRetries }),
|
|
95
|
-
}
|
|
96
|
-
}
|
|
97
|
-
|
|
98
|
-
// Add BDD/Gherkin information if available
|
|
99
|
-
if (test.parent && test.parent.feature) {
|
|
100
|
-
failedTest.bdd = {
|
|
101
|
-
feature: test.parent.feature.name || test.parent.title,
|
|
102
|
-
scenario: test.title,
|
|
103
|
-
featureFile: test.parent.file,
|
|
104
|
-
}
|
|
105
|
-
}
|
|
106
|
-
|
|
107
|
-
failedTests.push(failedTest)
|
|
108
|
-
output.print(`Failed Tests Tracker: Recorded failed test - ${test.title}`)
|
|
109
|
-
}
|
|
110
|
-
})
|
|
111
|
-
|
|
112
|
-
// Handle test completion and save failed tests
|
|
113
|
-
event.dispatcher.on(event.all.result, (result) => {
|
|
114
|
-
|
|
115
|
-
// Respect CodeceptJS output directory like other plugins
|
|
116
|
-
const outputDir = global.output_dir || './output'
|
|
117
|
-
const outputPath = path.isAbsolute(options.outputFile)
|
|
118
|
-
? options.outputFile
|
|
119
|
-
: path.resolve(outputDir, options.outputFile)
|
|
120
|
-
let allFailedTests = [...failedTests]
|
|
121
|
-
|
|
122
|
-
// Collect failed tests from result (both worker and single-process modes)
|
|
123
|
-
if (result) {
|
|
124
|
-
let resultFailedTests = []
|
|
125
|
-
|
|
126
|
-
// Worker mode: result.tests
|
|
127
|
-
if (store.hasWorkers && result.tests) {
|
|
128
|
-
resultFailedTests = result.tests.filter(test => test.state === 'failed' || test.err)
|
|
129
|
-
}
|
|
130
|
-
// Single-process mode: result._failures or result._tests
|
|
131
|
-
else if (!store.hasWorkers && (result._failures || result._tests)) {
|
|
132
|
-
if (result._failures && result._failures.length > 0) {
|
|
133
|
-
resultFailedTests = result._failures.map(failure => failure.test || failure)
|
|
134
|
-
} else if (result._tests) {
|
|
135
|
-
resultFailedTests = result._tests.filter(test => test.state === 'failed' || test.err)
|
|
136
|
-
}
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
// Use a Set to track unique test identifiers to prevent duplicates
|
|
140
|
-
const existingTestIds = new Set(allFailedTests.map(test => test.uid || `${test.file}:${test.title}`))
|
|
141
|
-
|
|
142
|
-
resultFailedTests.forEach(test => {
|
|
143
|
-
// Create unique identifier for deduplication
|
|
144
|
-
const testId = test.uid || `${test.file || 'unknown'}:${test.title}`
|
|
145
|
-
|
|
146
|
-
// Skip if we already have this test
|
|
147
|
-
if (existingTestIds.has(testId)) {
|
|
148
|
-
return
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
const failedTest = {
|
|
152
|
-
title: test.title,
|
|
153
|
-
fullTitle: test.fullTitle || test.title,
|
|
154
|
-
file: test.file || 'unknown',
|
|
155
|
-
uid: test.uid,
|
|
156
|
-
timestamp: new Date().toISOString(),
|
|
157
|
-
}
|
|
158
|
-
|
|
159
|
-
// Add parent suite information
|
|
160
|
-
if (test.parent) {
|
|
161
|
-
failedTest.suite = test.parent.title
|
|
162
|
-
failedTest.suiteFile = test.parent.file
|
|
163
|
-
}
|
|
164
|
-
|
|
165
|
-
// Add error information if available
|
|
166
|
-
if (test.err && options.includeStackTrace) {
|
|
167
|
-
failedTest.error = {
|
|
168
|
-
message: test.err.message || 'Test failed',
|
|
169
|
-
stack: test.err.stack || '',
|
|
170
|
-
name: test.err.name || 'Error',
|
|
171
|
-
}
|
|
172
|
-
}
|
|
173
|
-
|
|
174
|
-
// Add metadata if available
|
|
175
|
-
if (options.includeMetadata) {
|
|
176
|
-
failedTest.metadata = {
|
|
177
|
-
tags: test.tags || [],
|
|
178
|
-
meta: test.meta || {},
|
|
179
|
-
opts: test.opts || {},
|
|
180
|
-
duration: test.duration || 0,
|
|
181
|
-
retries: test.retries || 0,
|
|
182
|
-
}
|
|
183
|
-
}
|
|
184
|
-
|
|
185
|
-
// Add BDD/Gherkin information if available
|
|
186
|
-
if (test.parent && test.parent.feature) {
|
|
187
|
-
failedTest.bdd = {
|
|
188
|
-
feature: test.parent.feature.name || test.parent.title,
|
|
189
|
-
scenario: test.title,
|
|
190
|
-
featureFile: test.parent.file,
|
|
191
|
-
}
|
|
192
|
-
}
|
|
193
|
-
|
|
194
|
-
allFailedTests.push(failedTest)
|
|
195
|
-
existingTestIds.add(testId)
|
|
196
|
-
})
|
|
197
|
-
|
|
198
|
-
output.print(`Failed Tests Tracker: Collected ${resultFailedTests.length} failed tests from result`)
|
|
199
|
-
}
|
|
200
|
-
|
|
201
|
-
if (allFailedTests.length === 0) {
|
|
202
|
-
if (options.clearOnSuccess && fs.existsSync(outputPath)) {
|
|
203
|
-
try {
|
|
204
|
-
fs.unlinkSync(outputPath)
|
|
205
|
-
output.print(`Failed Tests Tracker: Cleared previous failed tests file (all tests passed)`)
|
|
206
|
-
} catch (error) {
|
|
207
|
-
output.print(`Failed Tests Tracker: Could not clear failed tests file: ${error.message}`)
|
|
208
|
-
}
|
|
209
|
-
} else {
|
|
210
|
-
output.print(`Failed Tests Tracker: No failed tests to save`)
|
|
211
|
-
}
|
|
212
|
-
return
|
|
213
|
-
}
|
|
214
|
-
|
|
215
|
-
const failedTestsData = {
|
|
216
|
-
timestamp: new Date().toISOString(),
|
|
217
|
-
totalFailedTests: allFailedTests.length,
|
|
218
|
-
codeceptVersion: require('../codecept').version(),
|
|
219
|
-
tests: allFailedTests,
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
try {
|
|
223
|
-
// Ensure directory exists
|
|
224
|
-
const dir = path.dirname(outputPath)
|
|
225
|
-
if (!fs.existsSync(dir)) {
|
|
226
|
-
fs.mkdirSync(dir, { recursive: true })
|
|
227
|
-
}
|
|
228
|
-
|
|
229
|
-
fs.writeFileSync(outputPath, JSON.stringify(failedTestsData, null, 2))
|
|
230
|
-
output.print(`Failed Tests Tracker: Saved ${allFailedTests.length} failed tests to ${outputPath}`)
|
|
231
|
-
} catch (error) {
|
|
232
|
-
output.print(`Failed Tests Tracker: Failed to save failed tests: ${error.message}`)
|
|
233
|
-
}
|
|
234
|
-
})
|
|
235
|
-
|
|
236
|
-
// Reset state for new test runs
|
|
237
|
-
event.dispatcher.on(event.all.before, () => {
|
|
238
|
-
failedTests = []
|
|
239
|
-
allTestsPassed = true
|
|
240
|
-
workerFailedTests.clear()
|
|
241
|
-
})
|
|
242
|
-
|
|
243
|
-
// Handle worker mode - listen to workers.result event for consolidated results
|
|
244
|
-
event.dispatcher.on(event.workers.result, (result) => {
|
|
245
|
-
// Respect CodeceptJS output directory like other plugins
|
|
246
|
-
const outputDir = global.output_dir || './output'
|
|
247
|
-
const outputPath = path.isAbsolute(options.outputFile)
|
|
248
|
-
? options.outputFile
|
|
249
|
-
: path.resolve(outputDir, options.outputFile)
|
|
250
|
-
|
|
251
|
-
let allFailedTests = []
|
|
252
|
-
|
|
253
|
-
// In worker mode, collect failed tests from consolidated result
|
|
254
|
-
if (result && result.tests) {
|
|
255
|
-
const workerFailedTests = result.tests.filter(test => test.state === 'failed' || test.err)
|
|
256
|
-
|
|
257
|
-
workerFailedTests.forEach(test => {
|
|
258
|
-
// Extract file path from test title or error stack trace as fallback
|
|
259
|
-
let filePath = test.file || test.parent?.file || 'unknown'
|
|
260
|
-
|
|
261
|
-
// If still unknown, try to extract from error stack trace
|
|
262
|
-
if (filePath === 'unknown' && test.err && test.err.stack) {
|
|
263
|
-
// Try multiple regex patterns for different stack trace formats
|
|
264
|
-
const patterns = [
|
|
265
|
-
/at.*\(([^)]+\.js):\d+:\d+\)/, // Standard format
|
|
266
|
-
/at.*\(.*[\/\\]([^\/\\]+\.js):\d+:\d+\)/, // With path separators
|
|
267
|
-
/\(([^)]*\.js):\d+:\d+\)/, // Simpler format
|
|
268
|
-
/([^\/\\]+\.js):\d+:\d+/, // Just filename with line numbers
|
|
269
|
-
]
|
|
270
|
-
|
|
271
|
-
for (const pattern of patterns) {
|
|
272
|
-
const stackMatch = test.err.stack.match(pattern)
|
|
273
|
-
if (stackMatch && stackMatch[1]) {
|
|
274
|
-
const absolutePath = stackMatch[1]
|
|
275
|
-
const relativePath = absolutePath.replace(process.cwd() + '/', '').replace(/^.*[\/\\]/, '')
|
|
276
|
-
filePath = relativePath
|
|
277
|
-
break
|
|
278
|
-
}
|
|
279
|
-
}
|
|
280
|
-
}
|
|
281
|
-
|
|
282
|
-
// If still unknown, try to extract from test context or use test file pattern
|
|
283
|
-
if (filePath === 'unknown') {
|
|
284
|
-
// Look for common test file patterns in the test title or fullTitle
|
|
285
|
-
const fullTitle = test.fullTitle || test.title
|
|
286
|
-
if (fullTitle && fullTitle.includes('checkout')) {
|
|
287
|
-
filePath = 'checkout_test.js'
|
|
288
|
-
} else if (fullTitle && fullTitle.includes('github')) {
|
|
289
|
-
filePath = 'github_test.js'
|
|
290
|
-
}
|
|
291
|
-
}
|
|
292
|
-
|
|
293
|
-
const failedTest = {
|
|
294
|
-
title: test.title,
|
|
295
|
-
fullTitle: test.fullTitle || test.title,
|
|
296
|
-
file: filePath,
|
|
297
|
-
uid: test.uid,
|
|
298
|
-
timestamp: new Date().toISOString(),
|
|
299
|
-
}
|
|
300
|
-
|
|
301
|
-
// Add parent suite information
|
|
302
|
-
if (test.parent) {
|
|
303
|
-
failedTest.suite = test.parent.title
|
|
304
|
-
failedTest.suiteFile = test.parent.file
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
// Add error information if available
|
|
308
|
-
if (test.err && options.includeStackTrace) {
|
|
309
|
-
failedTest.error = {
|
|
310
|
-
message: test.err.message || 'Test failed',
|
|
311
|
-
stack: test.err.stack || '',
|
|
312
|
-
name: test.err.name || 'Error',
|
|
313
|
-
}
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
// Add metadata if available
|
|
317
|
-
if (options.includeMetadata) {
|
|
318
|
-
failedTest.metadata = {
|
|
319
|
-
tags: test.tags || [],
|
|
320
|
-
meta: test.meta || {},
|
|
321
|
-
opts: test.opts || {},
|
|
322
|
-
duration: test.duration || 0,
|
|
323
|
-
retries: test.retries || 0,
|
|
324
|
-
}
|
|
325
|
-
}
|
|
326
|
-
|
|
327
|
-
// Add BDD/Gherkin information if available
|
|
328
|
-
if (test.parent && test.parent.feature) {
|
|
329
|
-
failedTest.bdd = {
|
|
330
|
-
feature: test.parent.feature.name || test.parent.title,
|
|
331
|
-
scenario: test.title,
|
|
332
|
-
featureFile: test.parent.file,
|
|
333
|
-
}
|
|
334
|
-
}
|
|
335
|
-
|
|
336
|
-
allFailedTests.push(failedTest)
|
|
337
|
-
})
|
|
338
|
-
|
|
339
|
-
output.print(`Failed Tests Tracker: Collected ${allFailedTests.length - failedTests.length} failed tests from workers`)
|
|
340
|
-
}
|
|
341
|
-
|
|
342
|
-
if (allFailedTests.length === 0) {
|
|
343
|
-
if (options.clearOnSuccess && fs.existsSync(outputPath)) {
|
|
344
|
-
try {
|
|
345
|
-
fs.unlinkSync(outputPath)
|
|
346
|
-
output.print(`Failed Tests Tracker: Cleared previous failed tests file (all tests passed)`)
|
|
347
|
-
} catch (error) {
|
|
348
|
-
output.print(`Failed Tests Tracker: Could not clear failed tests file: ${error.message}`)
|
|
349
|
-
}
|
|
350
|
-
}
|
|
351
|
-
return
|
|
352
|
-
}
|
|
353
|
-
|
|
354
|
-
// Save failed tests to file
|
|
355
|
-
try {
|
|
356
|
-
const failedTestsData = {
|
|
357
|
-
timestamp: new Date().toISOString(),
|
|
358
|
-
totalFailed: allFailedTests.length,
|
|
359
|
-
tests: allFailedTests,
|
|
360
|
-
}
|
|
361
|
-
|
|
362
|
-
// Ensure output directory exists
|
|
363
|
-
const dir = path.dirname(outputPath)
|
|
364
|
-
if (!fs.existsSync(dir)) {
|
|
365
|
-
fs.mkdirSync(dir, { recursive: true })
|
|
366
|
-
}
|
|
367
|
-
|
|
368
|
-
fs.writeFileSync(outputPath, JSON.stringify(failedTestsData, null, 2))
|
|
369
|
-
output.print(`Failed Tests Tracker: Saved ${allFailedTests.length} failed tests to ${outputPath}`)
|
|
370
|
-
} catch (error) {
|
|
371
|
-
output.print(`Failed Tests Tracker: Failed to save failed tests: ${error.message}`)
|
|
372
|
-
}
|
|
373
|
-
})
|
|
374
|
-
}
|