@bugzy-ai/bugzy 1.18.2 → 1.18.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -21
- package/README.md +273 -273
- package/dist/cli/index.cjs +295 -210
- package/dist/cli/index.cjs.map +1 -1
- package/dist/cli/index.js +294 -209
- package/dist/cli/index.js.map +1 -1
- package/dist/index.cjs +291 -206
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +291 -206
- package/dist/index.js.map +1 -1
- package/dist/subagents/index.cjs +115 -137
- package/dist/subagents/index.cjs.map +1 -1
- package/dist/subagents/index.js +115 -137
- package/dist/subagents/index.js.map +1 -1
- package/dist/subagents/metadata.cjs +12 -18
- package/dist/subagents/metadata.cjs.map +1 -1
- package/dist/subagents/metadata.js +12 -18
- package/dist/subagents/metadata.js.map +1 -1
- package/dist/tasks/index.cjs +142 -54
- package/dist/tasks/index.cjs.map +1 -1
- package/dist/tasks/index.js +142 -54
- package/dist/tasks/index.js.map +1 -1
- package/dist/templates/init/.bugzy/runtime/knowledge-base.md +61 -0
- package/dist/templates/init/.bugzy/runtime/knowledge-maintenance-guide.md +97 -0
- package/dist/templates/init/.bugzy/runtime/project-context.md +35 -0
- package/dist/templates/init/.bugzy/runtime/subagent-memory-guide.md +87 -0
- package/dist/templates/init/.bugzy/runtime/templates/test-plan-template.md +50 -0
- package/dist/templates/init/.bugzy/runtime/templates/test-result-schema.md +498 -0
- package/dist/templates/init/.bugzy/runtime/test-execution-strategy.md +535 -0
- package/dist/templates/init/.bugzy/runtime/testing-best-practices.md +632 -0
- package/dist/templates/init/.gitignore-template +25 -0
- package/package.json +95 -95
- package/templates/init/.bugzy/runtime/knowledge-base.md +61 -61
- package/templates/init/.bugzy/runtime/knowledge-maintenance-guide.md +97 -97
- package/templates/init/.bugzy/runtime/project-context.md +35 -35
- package/templates/init/.bugzy/runtime/subagent-memory-guide.md +87 -87
- package/templates/init/.bugzy/runtime/templates/event-examples.md +194 -194
- package/templates/init/.bugzy/runtime/templates/test-plan-template.md +50 -50
- package/templates/init/.bugzy/runtime/templates/test-result-schema.md +498 -498
- package/templates/init/.claude/settings.json +28 -28
- package/templates/init/.env.testdata +18 -18
- package/templates/init/.gitignore-template +24 -24
- package/templates/init/AGENTS.md +155 -155
- package/templates/init/CLAUDE.md +157 -157
- package/templates/init/test-runs/README.md +45 -45
- package/templates/init/tests/CLAUDE.md +193 -193
- package/templates/init/tests/docs/test-execution-strategy.md +535 -535
- package/templates/init/tests/docs/testing-best-practices.md +724 -724
- package/templates/playwright/BasePage.template.ts +190 -190
- package/templates/playwright/auth.setup.template.ts +89 -89
- package/templates/playwright/dataGenerators.helper.template.ts +148 -148
- package/templates/playwright/dateUtils.helper.template.ts +96 -96
- package/templates/playwright/pages.fixture.template.ts +50 -50
- package/templates/playwright/playwright.config.template.ts +97 -97
- package/templates/playwright/reporters/__tests__/bugzy-reporter-failure-classification.test.ts +299 -299
- package/templates/playwright/reporters/__tests__/bugzy-reporter-manifest-merge.test.ts +329 -329
- package/templates/playwright/reporters/__tests__/playwright.config.ts +5 -5
- package/templates/playwright/reporters/bugzy-reporter.ts +784 -784
- package/templates/init/.bugzy/runtime/handlers/messages/feedback.md +0 -178
- package/templates/init/.bugzy/runtime/handlers/messages/question.md +0 -122
- package/templates/init/.bugzy/runtime/handlers/messages/status.md +0 -146
|
@@ -1,784 +1,784 @@
|
|
|
1
|
-
import type {
|
|
2
|
-
Reporter,
|
|
3
|
-
FullConfig,
|
|
4
|
-
Suite,
|
|
5
|
-
TestCase,
|
|
6
|
-
TestResult,
|
|
7
|
-
FullResult,
|
|
8
|
-
TestStep,
|
|
9
|
-
} from '@playwright/test/reporter';
|
|
10
|
-
import * as fs from 'fs';
|
|
11
|
-
import * as path from 'path';
|
|
12
|
-
|
|
13
|
-
/**
|
|
14
|
-
* Step data for steps.json
|
|
15
|
-
*/
|
|
16
|
-
interface StepData {
|
|
17
|
-
index: number;
|
|
18
|
-
timestamp: string;
|
|
19
|
-
videoTimeSeconds: number;
|
|
20
|
-
action: string;
|
|
21
|
-
status: 'success' | 'failed' | 'skipped';
|
|
22
|
-
description: string;
|
|
23
|
-
technicalDetails: string;
|
|
24
|
-
duration?: number;
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
/**
|
|
28
|
-
* Manifest execution entry
|
|
29
|
-
*/
|
|
30
|
-
interface ManifestExecution {
|
|
31
|
-
number: number;
|
|
32
|
-
status: string;
|
|
33
|
-
duration: number;
|
|
34
|
-
videoFile: string | null;
|
|
35
|
-
hasTrace: boolean;
|
|
36
|
-
hasScreenshots: boolean;
|
|
37
|
-
error: string | null;
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
/**
|
|
41
|
-
* Manifest test case entry
|
|
42
|
-
*/
|
|
43
|
-
interface ManifestTestCase {
|
|
44
|
-
id: string;
|
|
45
|
-
name: string;
|
|
46
|
-
totalExecutions: number;
|
|
47
|
-
finalStatus: string;
|
|
48
|
-
executions: ManifestExecution[];
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
/**
|
|
52
|
-
* Failure classification entry for new vs known failures
|
|
53
|
-
*/
|
|
54
|
-
interface FailureClassification {
|
|
55
|
-
id: string;
|
|
56
|
-
name: string;
|
|
57
|
-
error: string | null;
|
|
58
|
-
lastPassedRun: string | null;
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
/**
|
|
62
|
-
* Manifest structure for test run sessions
|
|
63
|
-
*/
|
|
64
|
-
interface Manifest {
|
|
65
|
-
bugzyExecutionId: string;
|
|
66
|
-
timestamp: string;
|
|
67
|
-
startTime: string;
|
|
68
|
-
endTime: string;
|
|
69
|
-
status: string;
|
|
70
|
-
stats: {
|
|
71
|
-
totalTests: number;
|
|
72
|
-
passed: number;
|
|
73
|
-
failed: number;
|
|
74
|
-
totalExecutions: number;
|
|
75
|
-
};
|
|
76
|
-
testCases: ManifestTestCase[];
|
|
77
|
-
new_failures?: FailureClassification[];
|
|
78
|
-
known_failures?: FailureClassification[];
|
|
79
|
-
}
|
|
80
|
-
|
|
81
|
-
/**
|
|
82
|
-
* Classify failures as new or known by checking previous test run manifests.
|
|
83
|
-
*
|
|
84
|
-
* A failure is "new" if the test passed in any of the last N runs.
|
|
85
|
-
* A failure is "known" if the test failed in ALL of the last N runs (or no prior data exists for that specific test).
|
|
86
|
-
* If there are no previous runs at all (first run), all failures are treated as "new".
|
|
87
|
-
*
|
|
88
|
-
* @param currentManifest - The current run's manifest
|
|
89
|
-
* @param testRunsRoot - Path to the test-runs/ directory
|
|
90
|
-
* @returns Object with newFailures and knownFailures arrays
|
|
91
|
-
*/
|
|
92
|
-
export function classifyFailures(
|
|
93
|
-
currentManifest: Manifest,
|
|
94
|
-
testRunsRoot: string
|
|
95
|
-
): { newFailures: FailureClassification[]; knownFailures: FailureClassification[] } {
|
|
96
|
-
const lookback = parseInt(process.env.BUGZY_FAILURE_LOOKBACK || '5', 10);
|
|
97
|
-
const newFailures: FailureClassification[] = [];
|
|
98
|
-
const knownFailures: FailureClassification[] = [];
|
|
99
|
-
|
|
100
|
-
// Get failed test cases from current manifest
|
|
101
|
-
const failedTests = currentManifest.testCases.filter(
|
|
102
|
-
tc => tc.finalStatus === 'failed' || tc.finalStatus === 'timedOut'
|
|
103
|
-
);
|
|
104
|
-
|
|
105
|
-
if (failedTests.length === 0) {
|
|
106
|
-
return { newFailures, knownFailures };
|
|
107
|
-
}
|
|
108
|
-
|
|
109
|
-
// Read previous manifests
|
|
110
|
-
const previousManifests: Manifest[] = [];
|
|
111
|
-
if (fs.existsSync(testRunsRoot)) {
|
|
112
|
-
const dirs = fs.readdirSync(testRunsRoot)
|
|
113
|
-
.filter(d => {
|
|
114
|
-
try {
|
|
115
|
-
return fs.statSync(path.join(testRunsRoot, d)).isDirectory();
|
|
116
|
-
} catch {
|
|
117
|
-
return false;
|
|
118
|
-
}
|
|
119
|
-
})
|
|
120
|
-
.sort()
|
|
121
|
-
.reverse(); // Latest first
|
|
122
|
-
|
|
123
|
-
for (const dir of dirs) {
|
|
124
|
-
// Skip current run
|
|
125
|
-
if (dir === currentManifest.timestamp) continue;
|
|
126
|
-
|
|
127
|
-
if (previousManifests.length >= lookback) break;
|
|
128
|
-
|
|
129
|
-
const manifestPath = path.join(testRunsRoot, dir, 'manifest.json');
|
|
130
|
-
if (fs.existsSync(manifestPath)) {
|
|
131
|
-
try {
|
|
132
|
-
const manifest: Manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
133
|
-
previousManifests.push(manifest);
|
|
134
|
-
} catch {
|
|
135
|
-
// Skip invalid manifests
|
|
136
|
-
}
|
|
137
|
-
}
|
|
138
|
-
}
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
// If no previous runs exist, all failures are new (first run)
|
|
142
|
-
if (previousManifests.length === 0) {
|
|
143
|
-
for (const tc of failedTests) {
|
|
144
|
-
const lastExec = tc.executions[tc.executions.length - 1];
|
|
145
|
-
newFailures.push({
|
|
146
|
-
id: tc.id,
|
|
147
|
-
name: tc.name,
|
|
148
|
-
error: lastExec?.error || null,
|
|
149
|
-
lastPassedRun: null,
|
|
150
|
-
});
|
|
151
|
-
}
|
|
152
|
-
return { newFailures, knownFailures };
|
|
153
|
-
}
|
|
154
|
-
|
|
155
|
-
// For each failed test, check if it passed in any previous run
|
|
156
|
-
for (const tc of failedTests) {
|
|
157
|
-
const lastExec = tc.executions[tc.executions.length - 1];
|
|
158
|
-
let lastPassedRun: string | null = null;
|
|
159
|
-
|
|
160
|
-
for (const prevManifest of previousManifests) {
|
|
161
|
-
const prevTc = prevManifest.testCases.find(ptc => ptc.id === tc.id);
|
|
162
|
-
if (prevTc && (prevTc.finalStatus === 'passed')) {
|
|
163
|
-
lastPassedRun = prevManifest.timestamp;
|
|
164
|
-
break;
|
|
165
|
-
}
|
|
166
|
-
}
|
|
167
|
-
|
|
168
|
-
if (lastPassedRun) {
|
|
169
|
-
// Test passed recently, so this is a new failure
|
|
170
|
-
newFailures.push({
|
|
171
|
-
id: tc.id,
|
|
172
|
-
name: tc.name,
|
|
173
|
-
error: lastExec?.error || null,
|
|
174
|
-
lastPassedRun,
|
|
175
|
-
});
|
|
176
|
-
} else {
|
|
177
|
-
// Check if test exists in any previous run at all
|
|
178
|
-
const existsInPrevious = previousManifests.some(
|
|
179
|
-
pm => pm.testCases.some(ptc => ptc.id === tc.id)
|
|
180
|
-
);
|
|
181
|
-
|
|
182
|
-
if (!existsInPrevious) {
|
|
183
|
-
// New test that doesn't exist in history - treat as new failure
|
|
184
|
-
newFailures.push({
|
|
185
|
-
id: tc.id,
|
|
186
|
-
name: tc.name,
|
|
187
|
-
error: lastExec?.error || null,
|
|
188
|
-
lastPassedRun: null,
|
|
189
|
-
});
|
|
190
|
-
} else {
|
|
191
|
-
// Failed in all previous runs - known failure
|
|
192
|
-
knownFailures.push({
|
|
193
|
-
id: tc.id,
|
|
194
|
-
name: tc.name,
|
|
195
|
-
error: lastExec?.error || null,
|
|
196
|
-
lastPassedRun: null,
|
|
197
|
-
});
|
|
198
|
-
}
|
|
199
|
-
}
|
|
200
|
-
}
|
|
201
|
-
|
|
202
|
-
return { newFailures, knownFailures };
|
|
203
|
-
}
|
|
204
|
-
|
|
205
|
-
/**
|
|
206
|
-
* Merge an existing manifest with the current run's manifest.
|
|
207
|
-
* If existing is null, returns current as-is.
|
|
208
|
-
* Deduplicates executions by number (current run wins on collision).
|
|
209
|
-
* Recalculates stats from the merged data.
|
|
210
|
-
*/
|
|
211
|
-
export function mergeManifests(existing: Manifest | null, current: Manifest): Manifest {
|
|
212
|
-
if (!existing) {
|
|
213
|
-
return current;
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
// Build map of test cases by id from existing manifest
|
|
217
|
-
const testCaseMap = new Map<string, ManifestTestCase>();
|
|
218
|
-
for (const tc of existing.testCases) {
|
|
219
|
-
testCaseMap.set(tc.id, { ...tc, executions: [...tc.executions] });
|
|
220
|
-
}
|
|
221
|
-
|
|
222
|
-
// Merge current run's test cases
|
|
223
|
-
for (const tc of current.testCases) {
|
|
224
|
-
const existingTc = testCaseMap.get(tc.id);
|
|
225
|
-
if (existingTc) {
|
|
226
|
-
// Merge executions: build a map keyed by execution number
|
|
227
|
-
const execMap = new Map<number, ManifestExecution>();
|
|
228
|
-
for (const exec of existingTc.executions) {
|
|
229
|
-
execMap.set(exec.number, exec);
|
|
230
|
-
}
|
|
231
|
-
// Current run's executions overwrite on collision
|
|
232
|
-
for (const exec of tc.executions) {
|
|
233
|
-
execMap.set(exec.number, exec);
|
|
234
|
-
}
|
|
235
|
-
// Sort by execution number
|
|
236
|
-
const mergedExecs = Array.from(execMap.values()).sort((a, b) => a.number - b.number);
|
|
237
|
-
const finalStatus = mergedExecs[mergedExecs.length - 1].status;
|
|
238
|
-
|
|
239
|
-
testCaseMap.set(tc.id, {
|
|
240
|
-
id: tc.id,
|
|
241
|
-
name: tc.name,
|
|
242
|
-
totalExecutions: mergedExecs.length,
|
|
243
|
-
finalStatus,
|
|
244
|
-
executions: mergedExecs,
|
|
245
|
-
});
|
|
246
|
-
} else {
|
|
247
|
-
// New test case from current run
|
|
248
|
-
testCaseMap.set(tc.id, { ...tc, executions: [...tc.executions] });
|
|
249
|
-
}
|
|
250
|
-
}
|
|
251
|
-
|
|
252
|
-
// Build merged test cases array
|
|
253
|
-
const mergedTestCases = Array.from(testCaseMap.values());
|
|
254
|
-
|
|
255
|
-
// Recalculate stats
|
|
256
|
-
let totalTests = 0;
|
|
257
|
-
let totalExecutions = 0;
|
|
258
|
-
let passedTests = 0;
|
|
259
|
-
let failedTests = 0;
|
|
260
|
-
|
|
261
|
-
for (const tc of mergedTestCases) {
|
|
262
|
-
totalTests++;
|
|
263
|
-
totalExecutions += tc.executions.length;
|
|
264
|
-
if (tc.finalStatus === 'passed') {
|
|
265
|
-
passedTests++;
|
|
266
|
-
} else {
|
|
267
|
-
failedTests++;
|
|
268
|
-
}
|
|
269
|
-
}
|
|
270
|
-
|
|
271
|
-
// Use earliest startTime, latest endTime
|
|
272
|
-
const startTime = new Date(existing.startTime) < new Date(current.startTime)
|
|
273
|
-
? existing.startTime
|
|
274
|
-
: current.startTime;
|
|
275
|
-
const endTime = new Date(existing.endTime) > new Date(current.endTime)
|
|
276
|
-
? existing.endTime
|
|
277
|
-
: current.endTime;
|
|
278
|
-
|
|
279
|
-
// Status: if any test case failed, overall is failed
|
|
280
|
-
const hasFailure = mergedTestCases.some(tc => tc.finalStatus === 'failed' || tc.finalStatus === 'timedOut');
|
|
281
|
-
const status = hasFailure ? 'failed' : current.status;
|
|
282
|
-
|
|
283
|
-
const merged: Manifest = {
|
|
284
|
-
bugzyExecutionId: current.bugzyExecutionId,
|
|
285
|
-
timestamp: existing.timestamp, // Keep original session timestamp
|
|
286
|
-
startTime,
|
|
287
|
-
endTime,
|
|
288
|
-
status,
|
|
289
|
-
stats: {
|
|
290
|
-
totalTests,
|
|
291
|
-
passed: passedTests,
|
|
292
|
-
failed: failedTests,
|
|
293
|
-
totalExecutions,
|
|
294
|
-
},
|
|
295
|
-
testCases: mergedTestCases,
|
|
296
|
-
};
|
|
297
|
-
|
|
298
|
-
// Preserve failure classification (current run's classification wins)
|
|
299
|
-
if (current.new_failures) {
|
|
300
|
-
merged.new_failures = current.new_failures;
|
|
301
|
-
} else if (existing.new_failures) {
|
|
302
|
-
merged.new_failures = existing.new_failures;
|
|
303
|
-
}
|
|
304
|
-
|
|
305
|
-
if (current.known_failures) {
|
|
306
|
-
merged.known_failures = current.known_failures;
|
|
307
|
-
} else if (existing.known_failures) {
|
|
308
|
-
merged.known_failures = existing.known_failures;
|
|
309
|
-
}
|
|
310
|
-
|
|
311
|
-
return merged;
|
|
312
|
-
}
|
|
313
|
-
|
|
314
|
-
/**
|
|
315
|
-
* Bugzy Custom Playwright Reporter
|
|
316
|
-
*
|
|
317
|
-
* Records test executions in hierarchical structure:
|
|
318
|
-
* test-runs/YYYYMMDD-HHMMSS/TC-{id}/exec-{num}/
|
|
319
|
-
*
|
|
320
|
-
* Features:
|
|
321
|
-
* - Groups multiple test runs under same directory when BUGZY_EXECUTION_ID matches
|
|
322
|
-
* - Checks latest directory's manifest to reuse existing session directory
|
|
323
|
-
* - Tracks multiple execution attempts per test
|
|
324
|
-
* - Records videos for all tests
|
|
325
|
-
* - Captures traces/screenshots for failures only
|
|
326
|
-
* - Links to BUGZY_EXECUTION_ID for session tracking
|
|
327
|
-
* - Generates manifest.json with execution summary
|
|
328
|
-
* - Generates steps.json with video timestamps for test.step() calls
|
|
329
|
-
*/
|
|
330
|
-
class BugzyReporter implements Reporter {
|
|
331
|
-
private testRunDir!: string;
|
|
332
|
-
private timestamp!: string;
|
|
333
|
-
private bugzyExecutionId!: string;
|
|
334
|
-
private startTime!: Date;
|
|
335
|
-
private testResults: Map<string, Array<any>> = new Map();
|
|
336
|
-
private testSteps: Map<string, Array<StepData>> = new Map();
|
|
337
|
-
private testStartTimes: Map<string, number> = new Map();
|
|
338
|
-
|
|
339
|
-
constructor() {
|
|
340
|
-
// No longer need to read execution number from environment
|
|
341
|
-
// It will be auto-detected per test case
|
|
342
|
-
}
|
|
343
|
-
|
|
344
|
-
/**
|
|
345
|
-
* Called once before running tests
|
|
346
|
-
*/
|
|
347
|
-
onBegin(config: FullConfig, suite: Suite): void {
|
|
348
|
-
this.startTime = new Date();
|
|
349
|
-
|
|
350
|
-
// Generate timestamp in YYYYMMDD-HHMMSS format
|
|
351
|
-
this.timestamp = this.startTime
|
|
352
|
-
.toISOString()
|
|
353
|
-
.replace(/[-:]/g, '')
|
|
354
|
-
.replace(/T/, '-')
|
|
355
|
-
.slice(0, 15);
|
|
356
|
-
|
|
357
|
-
const testRunsRoot = path.join(process.cwd(), 'test-runs');
|
|
358
|
-
|
|
359
|
-
// Check if we should reuse an existing session
|
|
360
|
-
let reuseDir: string | null = null;
|
|
361
|
-
|
|
362
|
-
// If BUGZY_EXECUTION_ID is provided, use it directly
|
|
363
|
-
if (process.env.BUGZY_EXECUTION_ID) {
|
|
364
|
-
this.bugzyExecutionId = process.env.BUGZY_EXECUTION_ID;
|
|
365
|
-
} else {
|
|
366
|
-
// For local runs, check if we can reuse the latest session
|
|
367
|
-
// Reuse if the latest manifest is within 60 minutes
|
|
368
|
-
if (fs.existsSync(testRunsRoot)) {
|
|
369
|
-
const dirs = fs.readdirSync(testRunsRoot)
|
|
370
|
-
.filter(d => fs.statSync(path.join(testRunsRoot, d)).isDirectory())
|
|
371
|
-
.sort()
|
|
372
|
-
.reverse(); // Sort descending (latest first)
|
|
373
|
-
|
|
374
|
-
if (dirs.length > 0) {
|
|
375
|
-
const latestDir = dirs[0];
|
|
376
|
-
const manifestPath = path.join(testRunsRoot, latestDir, 'manifest.json');
|
|
377
|
-
|
|
378
|
-
if (fs.existsSync(manifestPath)) {
|
|
379
|
-
try {
|
|
380
|
-
const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
381
|
-
const manifestTime = new Date(manifest.startTime).getTime();
|
|
382
|
-
const currentTime = this.startTime.getTime();
|
|
383
|
-
const minutesDiff = (currentTime - manifestTime) / (1000 * 60);
|
|
384
|
-
|
|
385
|
-
// Reuse if within 60 minutes and has a local execution ID
|
|
386
|
-
if (minutesDiff <= 60 && manifest.bugzyExecutionId?.startsWith('local-')) {
|
|
387
|
-
this.bugzyExecutionId = manifest.bugzyExecutionId;
|
|
388
|
-
reuseDir = latestDir;
|
|
389
|
-
}
|
|
390
|
-
} catch (err) {
|
|
391
|
-
// Ignore parsing errors
|
|
392
|
-
}
|
|
393
|
-
}
|
|
394
|
-
}
|
|
395
|
-
}
|
|
396
|
-
|
|
397
|
-
// If no session to reuse, generate new local ID
|
|
398
|
-
if (!this.bugzyExecutionId) {
|
|
399
|
-
this.bugzyExecutionId = 'local-' + this.timestamp;
|
|
400
|
-
}
|
|
401
|
-
}
|
|
402
|
-
|
|
403
|
-
// If we have a specific execution ID but haven't found a reuse dir yet, check for matching session
|
|
404
|
-
if (!reuseDir && fs.existsSync(testRunsRoot)) {
|
|
405
|
-
const dirs = fs.readdirSync(testRunsRoot)
|
|
406
|
-
.filter(d => fs.statSync(path.join(testRunsRoot, d)).isDirectory())
|
|
407
|
-
.sort()
|
|
408
|
-
.reverse();
|
|
409
|
-
|
|
410
|
-
for (const dir of dirs) {
|
|
411
|
-
const manifestPath = path.join(testRunsRoot, dir, 'manifest.json');
|
|
412
|
-
if (fs.existsSync(manifestPath)) {
|
|
413
|
-
try {
|
|
414
|
-
const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
415
|
-
if (manifest.bugzyExecutionId === this.bugzyExecutionId) {
|
|
416
|
-
reuseDir = dir;
|
|
417
|
-
break;
|
|
418
|
-
}
|
|
419
|
-
} catch (err) {
|
|
420
|
-
// Ignore parsing errors
|
|
421
|
-
}
|
|
422
|
-
}
|
|
423
|
-
}
|
|
424
|
-
}
|
|
425
|
-
|
|
426
|
-
if (reuseDir) {
|
|
427
|
-
this.testRunDir = path.join(testRunsRoot, reuseDir);
|
|
428
|
-
console.log(`\n🔄 Continuing test run: ${reuseDir}`);
|
|
429
|
-
console.log(`📋 Execution ID: ${this.bugzyExecutionId}`);
|
|
430
|
-
console.log(`📁 Output directory: ${this.testRunDir}\n`);
|
|
431
|
-
} else {
|
|
432
|
-
this.testRunDir = path.join(testRunsRoot, this.timestamp);
|
|
433
|
-
fs.mkdirSync(this.testRunDir, { recursive: true });
|
|
434
|
-
console.log(`\n🆕 New test run: ${this.timestamp}`);
|
|
435
|
-
console.log(`📋 Execution ID: ${this.bugzyExecutionId}`);
|
|
436
|
-
console.log(`📁 Output directory: ${this.testRunDir}\n`);
|
|
437
|
-
}
|
|
438
|
-
}
|
|
439
|
-
|
|
440
|
-
/**
|
|
441
|
-
* Called after each test completes
|
|
442
|
-
*/
|
|
443
|
-
onTestEnd(test: TestCase, result: TestResult): void {
|
|
444
|
-
// Extract test ID from test title or file path
|
|
445
|
-
const testId = this.extractTestId(test);
|
|
446
|
-
|
|
447
|
-
// Create test case directory
|
|
448
|
-
const testCaseDir = path.join(this.testRunDir, testId);
|
|
449
|
-
fs.mkdirSync(testCaseDir, { recursive: true });
|
|
450
|
-
|
|
451
|
-
// Auto-detect execution number from existing folders
|
|
452
|
-
let executionNum = 1;
|
|
453
|
-
if (fs.existsSync(testCaseDir)) {
|
|
454
|
-
const existingExecs = fs.readdirSync(testCaseDir)
|
|
455
|
-
.filter(d => d.startsWith('exec-') && fs.statSync(path.join(testCaseDir, d)).isDirectory())
|
|
456
|
-
.map(d => parseInt(d.replace('exec-', ''), 10))
|
|
457
|
-
.filter(n => !isNaN(n));
|
|
458
|
-
|
|
459
|
-
if (existingExecs.length > 0) {
|
|
460
|
-
executionNum = Math.max(...existingExecs) + 1;
|
|
461
|
-
}
|
|
462
|
-
}
|
|
463
|
-
|
|
464
|
-
// Create execution directory
|
|
465
|
-
const execDir = path.join(testCaseDir, `exec-${executionNum}`);
|
|
466
|
-
fs.mkdirSync(execDir, { recursive: true });
|
|
467
|
-
|
|
468
|
-
// Prepare result data in Playwright format
|
|
469
|
-
const resultData = {
|
|
470
|
-
status: result.status,
|
|
471
|
-
duration: result.duration,
|
|
472
|
-
errors: result.errors,
|
|
473
|
-
retry: result.retry,
|
|
474
|
-
startTime: result.startTime.toISOString(),
|
|
475
|
-
attachments: [] as Array<{ name: string; path: string; contentType: string }>,
|
|
476
|
-
};
|
|
477
|
-
|
|
478
|
-
// Handle attachments (videos, traces, screenshots)
|
|
479
|
-
let hasVideo = false;
|
|
480
|
-
let hasTrace = false;
|
|
481
|
-
let hasScreenshots = false;
|
|
482
|
-
|
|
483
|
-
for (const attachment of result.attachments) {
|
|
484
|
-
if (attachment.name === 'video' && attachment.path) {
|
|
485
|
-
// Copy video file to execution directory
|
|
486
|
-
const videoFileName = 'video.webm';
|
|
487
|
-
const videoDestPath = path.join(execDir, videoFileName);
|
|
488
|
-
|
|
489
|
-
try {
|
|
490
|
-
fs.copyFileSync(attachment.path, videoDestPath);
|
|
491
|
-
resultData.attachments.push({
|
|
492
|
-
name: 'video',
|
|
493
|
-
path: videoFileName,
|
|
494
|
-
contentType: attachment.contentType || 'video/webm',
|
|
495
|
-
});
|
|
496
|
-
hasVideo = true;
|
|
497
|
-
} catch (err) {
|
|
498
|
-
console.error(`Failed to copy video: ${err}`);
|
|
499
|
-
}
|
|
500
|
-
} else if (attachment.name === 'trace' && attachment.path) {
|
|
501
|
-
// Copy trace file to execution directory (only for failures)
|
|
502
|
-
if (result.status === 'failed' || result.status === 'timedOut') {
|
|
503
|
-
const traceFileName = 'trace.zip';
|
|
504
|
-
const traceDestPath = path.join(execDir, traceFileName);
|
|
505
|
-
|
|
506
|
-
try {
|
|
507
|
-
fs.copyFileSync(attachment.path, traceDestPath);
|
|
508
|
-
resultData.attachments.push({
|
|
509
|
-
name: 'trace',
|
|
510
|
-
path: traceFileName,
|
|
511
|
-
contentType: attachment.contentType || 'application/zip',
|
|
512
|
-
});
|
|
513
|
-
hasTrace = true;
|
|
514
|
-
} catch (err) {
|
|
515
|
-
console.error(`Failed to copy trace: ${err}`);
|
|
516
|
-
}
|
|
517
|
-
}
|
|
518
|
-
} else if (attachment.name === 'screenshot' && attachment.path) {
|
|
519
|
-
// Copy screenshots to execution directory (only for failures)
|
|
520
|
-
if (result.status === 'failed' || result.status === 'timedOut') {
|
|
521
|
-
const screenshotsDir = path.join(execDir, 'screenshots');
|
|
522
|
-
fs.mkdirSync(screenshotsDir, { recursive: true });
|
|
523
|
-
|
|
524
|
-
const screenshotFileName = path.basename(attachment.path);
|
|
525
|
-
const screenshotDestPath = path.join(screenshotsDir, screenshotFileName);
|
|
526
|
-
|
|
527
|
-
try {
|
|
528
|
-
fs.copyFileSync(attachment.path, screenshotDestPath);
|
|
529
|
-
resultData.attachments.push({
|
|
530
|
-
name: 'screenshot',
|
|
531
|
-
path: path.join('screenshots', screenshotFileName),
|
|
532
|
-
contentType: attachment.contentType || 'image/png',
|
|
533
|
-
});
|
|
534
|
-
hasScreenshots = true;
|
|
535
|
-
} catch (err) {
|
|
536
|
-
console.error(`Failed to copy screenshot: ${err}`);
|
|
537
|
-
}
|
|
538
|
-
}
|
|
539
|
-
}
|
|
540
|
-
}
|
|
541
|
-
|
|
542
|
-
// Write result.json
|
|
543
|
-
const resultPath = path.join(execDir, 'result.json');
|
|
544
|
-
fs.writeFileSync(resultPath, JSON.stringify(resultData, null, 2));
|
|
545
|
-
|
|
546
|
-
// Store execution info for manifest
|
|
547
|
-
if (!this.testResults.has(testId)) {
|
|
548
|
-
this.testResults.set(testId, []);
|
|
549
|
-
}
|
|
550
|
-
|
|
551
|
-
this.testResults.get(testId)!.push({
|
|
552
|
-
number: executionNum,
|
|
553
|
-
status: result.status,
|
|
554
|
-
duration: result.duration,
|
|
555
|
-
videoFile: hasVideo ? 'video.webm' : null,
|
|
556
|
-
hasTrace,
|
|
557
|
-
hasScreenshots,
|
|
558
|
-
error: result.errors.length > 0 ? result.errors[0].message : null,
|
|
559
|
-
});
|
|
560
|
-
|
|
561
|
-
// Generate steps.json if test has steps
|
|
562
|
-
const testKey = this.getTestKey(test);
|
|
563
|
-
const steps = this.testSteps.get(testKey);
|
|
564
|
-
if (steps && steps.length > 0) {
|
|
565
|
-
const stepsData = {
|
|
566
|
-
steps,
|
|
567
|
-
summary: {
|
|
568
|
-
totalSteps: steps.length,
|
|
569
|
-
successfulSteps: steps.filter(s => s.status === 'success').length,
|
|
570
|
-
failedSteps: steps.filter(s => s.status === 'failed').length,
|
|
571
|
-
skippedSteps: steps.filter(s => s.status === 'skipped').length,
|
|
572
|
-
},
|
|
573
|
-
};
|
|
574
|
-
|
|
575
|
-
const stepsPath = path.join(execDir, 'steps.json');
|
|
576
|
-
fs.writeFileSync(stepsPath, JSON.stringify(stepsData, null, 2));
|
|
577
|
-
}
|
|
578
|
-
|
|
579
|
-
// Log execution result
|
|
580
|
-
const statusIcon = result.status === 'passed' ? '✅' : result.status === 'failed' ? '❌' : '⚠️';
|
|
581
|
-
console.log(`${statusIcon} ${testId} [exec-${executionNum}] - ${result.status} (${result.duration}ms)`);
|
|
582
|
-
}
|
|
583
|
-
|
|
584
|
-
/**
|
|
585
|
-
* Called when a test step begins
|
|
586
|
-
*/
|
|
587
|
-
onStepBegin(test: TestCase, _result: TestResult, step: TestStep): void {
|
|
588
|
-
// Only track test.step() calls (not hooks, fixtures, or expects)
|
|
589
|
-
if (step.category !== 'test.step') {
|
|
590
|
-
return;
|
|
591
|
-
}
|
|
592
|
-
|
|
593
|
-
const testKey = this.getTestKey(test);
|
|
594
|
-
|
|
595
|
-
// Record test start time on first step
|
|
596
|
-
if (!this.testStartTimes.has(testKey)) {
|
|
597
|
-
this.testStartTimes.set(testKey, step.startTime.getTime());
|
|
598
|
-
}
|
|
599
|
-
|
|
600
|
-
// Initialize steps array for this test
|
|
601
|
-
if (!this.testSteps.has(testKey)) {
|
|
602
|
-
this.testSteps.set(testKey, []);
|
|
603
|
-
}
|
|
604
|
-
|
|
605
|
-
const steps = this.testSteps.get(testKey)!;
|
|
606
|
-
const testStartTime = this.testStartTimes.get(testKey)!;
|
|
607
|
-
const videoTimeSeconds = Math.floor((step.startTime.getTime() - testStartTime) / 1000);
|
|
608
|
-
|
|
609
|
-
steps.push({
|
|
610
|
-
index: steps.length + 1,
|
|
611
|
-
timestamp: step.startTime.toISOString(),
|
|
612
|
-
videoTimeSeconds,
|
|
613
|
-
action: step.title,
|
|
614
|
-
status: 'success', // Will be updated in onStepEnd if it fails
|
|
615
|
-
description: `${step.title} - in progress`,
|
|
616
|
-
technicalDetails: 'test.step',
|
|
617
|
-
});
|
|
618
|
-
}
|
|
619
|
-
|
|
620
|
-
/**
|
|
621
|
-
* Called when a test step ends
|
|
622
|
-
*/
|
|
623
|
-
onStepEnd(test: TestCase, _result: TestResult, step: TestStep): void {
|
|
624
|
-
// Only track test.step() calls
|
|
625
|
-
if (step.category !== 'test.step') {
|
|
626
|
-
return;
|
|
627
|
-
}
|
|
628
|
-
|
|
629
|
-
const testKey = this.getTestKey(test);
|
|
630
|
-
const steps = this.testSteps.get(testKey);
|
|
631
|
-
|
|
632
|
-
if (!steps || steps.length === 0) {
|
|
633
|
-
return;
|
|
634
|
-
}
|
|
635
|
-
|
|
636
|
-
// Update the last step with final status and duration
|
|
637
|
-
const lastStep = steps[steps.length - 1];
|
|
638
|
-
lastStep.duration = step.duration;
|
|
639
|
-
|
|
640
|
-
if (step.error) {
|
|
641
|
-
lastStep.status = 'failed';
|
|
642
|
-
lastStep.description = `${step.title} - failed: ${step.error.message}`;
|
|
643
|
-
} else {
|
|
644
|
-
lastStep.status = 'success';
|
|
645
|
-
lastStep.description = `${step.title} - completed successfully`;
|
|
646
|
-
}
|
|
647
|
-
}
|
|
648
|
-
|
|
649
|
-
/**
|
|
650
|
-
* Called after all tests complete
|
|
651
|
-
*/
|
|
652
|
-
onEnd(result: FullResult): void {
|
|
653
|
-
const endTime = new Date();
|
|
654
|
-
|
|
655
|
-
// Calculate statistics
|
|
656
|
-
let totalTests = 0;
|
|
657
|
-
let totalExecutions = 0;
|
|
658
|
-
let passedTests = 0;
|
|
659
|
-
let failedTests = 0;
|
|
660
|
-
|
|
661
|
-
const testCases: Array<any> = [];
|
|
662
|
-
|
|
663
|
-
for (const [testId, executions] of this.testResults.entries()) {
|
|
664
|
-
totalTests++;
|
|
665
|
-
totalExecutions += executions.length;
|
|
666
|
-
|
|
667
|
-
const finalStatus = executions[executions.length - 1].status;
|
|
668
|
-
if (finalStatus === 'passed') {
|
|
669
|
-
passedTests++;
|
|
670
|
-
} else {
|
|
671
|
-
failedTests++;
|
|
672
|
-
}
|
|
673
|
-
|
|
674
|
-
testCases.push({
|
|
675
|
-
id: testId,
|
|
676
|
-
name: testId.replace(/^TC-\d+-/, '').replace(/-/g, ' '),
|
|
677
|
-
totalExecutions: executions.length,
|
|
678
|
-
finalStatus,
|
|
679
|
-
executions,
|
|
680
|
-
});
|
|
681
|
-
}
|
|
682
|
-
|
|
683
|
-
// Build current run's manifest
|
|
684
|
-
const currentManifest: Manifest = {
|
|
685
|
-
bugzyExecutionId: this.bugzyExecutionId,
|
|
686
|
-
timestamp: this.timestamp,
|
|
687
|
-
startTime: this.startTime.toISOString(),
|
|
688
|
-
endTime: endTime.toISOString(),
|
|
689
|
-
status: result.status,
|
|
690
|
-
stats: {
|
|
691
|
-
totalTests,
|
|
692
|
-
passed: passedTests,
|
|
693
|
-
failed: failedTests,
|
|
694
|
-
totalExecutions,
|
|
695
|
-
},
|
|
696
|
-
testCases,
|
|
697
|
-
};
|
|
698
|
-
|
|
699
|
-
// Read existing manifest for merge (if session is being reused)
|
|
700
|
-
const manifestPath = path.join(this.testRunDir, 'manifest.json');
|
|
701
|
-
let existingManifest: Manifest | null = null;
|
|
702
|
-
if (fs.existsSync(manifestPath)) {
|
|
703
|
-
try {
|
|
704
|
-
existingManifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
705
|
-
} catch (err) {
|
|
706
|
-
console.warn(`⚠️ Could not parse existing manifest, will overwrite: ${err}`);
|
|
707
|
-
}
|
|
708
|
-
}
|
|
709
|
-
|
|
710
|
-
// Merge with existing manifest data
|
|
711
|
-
const merged = mergeManifests(existingManifest, currentManifest);
|
|
712
|
-
|
|
713
|
-
// Classify failures as new vs known
|
|
714
|
-
if (merged.stats.failed > 0) {
|
|
715
|
-
try {
|
|
716
|
-
const testRunsRoot = path.join(process.cwd(), 'test-runs');
|
|
717
|
-
const { newFailures, knownFailures } = classifyFailures(merged, testRunsRoot);
|
|
718
|
-
if (newFailures.length > 0) {
|
|
719
|
-
merged.new_failures = newFailures;
|
|
720
|
-
}
|
|
721
|
-
if (knownFailures.length > 0) {
|
|
722
|
-
merged.known_failures = knownFailures;
|
|
723
|
-
}
|
|
724
|
-
|
|
725
|
-
console.log(`\n🔍 Failure Classification:`);
|
|
726
|
-
console.log(` New failures: ${newFailures.length}`);
|
|
727
|
-
console.log(` Known failures: ${knownFailures.length}`);
|
|
728
|
-
} catch (err) {
|
|
729
|
-
console.warn(`⚠️ Could not classify failures: ${err}`);
|
|
730
|
-
}
|
|
731
|
-
}
|
|
732
|
-
|
|
733
|
-
// Write atomically (temp file + rename)
|
|
734
|
-
const tmpPath = manifestPath + '.tmp';
|
|
735
|
-
fs.writeFileSync(tmpPath, JSON.stringify(merged, null, 2));
|
|
736
|
-
fs.renameSync(tmpPath, manifestPath);
|
|
737
|
-
|
|
738
|
-
console.log(`\n📊 Test Run Summary (this run):`);
|
|
739
|
-
console.log(` Total tests: ${totalTests}`);
|
|
740
|
-
console.log(` Passed: ${passedTests}`);
|
|
741
|
-
console.log(` Failed: ${failedTests}`);
|
|
742
|
-
console.log(` Total executions: ${totalExecutions}`);
|
|
743
|
-
|
|
744
|
-
if (existingManifest) {
|
|
745
|
-
console.log(`\n🔗 Merged with previous session data:`);
|
|
746
|
-
console.log(` Session total tests: ${merged.stats.totalTests}`);
|
|
747
|
-
console.log(` Session total executions: ${merged.stats.totalExecutions}`);
|
|
748
|
-
}
|
|
749
|
-
|
|
750
|
-
console.log(` Manifest: ${manifestPath}\n`);
|
|
751
|
-
}
|
|
752
|
-
|
|
753
|
-
/**
|
|
754
|
-
* Extract test ID from test case
|
|
755
|
-
* Generates TC-XXX-{test-name} format
|
|
756
|
-
*/
|
|
757
|
-
private extractTestId(test: TestCase): string {
|
|
758
|
-
// Try to extract from test title
|
|
759
|
-
const title = test.title.toLowerCase().replace(/\s+/g, '-');
|
|
760
|
-
|
|
761
|
-
// Get test file name without extension
|
|
762
|
-
const fileName = path.basename(test.location.file, path.extname(test.location.file));
|
|
763
|
-
|
|
764
|
-
// Extract number from filename if it follows TC-XXX pattern
|
|
765
|
-
const tcMatch = fileName.match(/TC-(\d+)/i);
|
|
766
|
-
if (tcMatch) {
|
|
767
|
-
return `TC-${tcMatch[1]}-${title}`;
|
|
768
|
-
}
|
|
769
|
-
|
|
770
|
-
// Otherwise generate from index
|
|
771
|
-
// This is a simple fallback - you may want to improve this
|
|
772
|
-
const testIndex = String(test.parent.tests.indexOf(test) + 1).padStart(3, '0');
|
|
773
|
-
return `TC-${testIndex}-${title}`;
|
|
774
|
-
}
|
|
775
|
-
|
|
776
|
-
/**
|
|
777
|
-
* Generate unique key for test to track steps across retries
|
|
778
|
-
*/
|
|
779
|
-
private getTestKey(test: TestCase): string {
|
|
780
|
-
return `${test.location.file}::${test.title}`;
|
|
781
|
-
}
|
|
782
|
-
}
|
|
783
|
-
|
|
784
|
-
export default BugzyReporter;
|
|
1
|
+
import type {
|
|
2
|
+
Reporter,
|
|
3
|
+
FullConfig,
|
|
4
|
+
Suite,
|
|
5
|
+
TestCase,
|
|
6
|
+
TestResult,
|
|
7
|
+
FullResult,
|
|
8
|
+
TestStep,
|
|
9
|
+
} from '@playwright/test/reporter';
|
|
10
|
+
import * as fs from 'fs';
|
|
11
|
+
import * as path from 'path';
|
|
12
|
+
|
|
13
|
+
/**
|
|
14
|
+
* Step data for steps.json
|
|
15
|
+
*/
|
|
16
|
+
interface StepData {
|
|
17
|
+
index: number;
|
|
18
|
+
timestamp: string;
|
|
19
|
+
videoTimeSeconds: number;
|
|
20
|
+
action: string;
|
|
21
|
+
status: 'success' | 'failed' | 'skipped';
|
|
22
|
+
description: string;
|
|
23
|
+
technicalDetails: string;
|
|
24
|
+
duration?: number;
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Manifest execution entry
|
|
29
|
+
*/
|
|
30
|
+
interface ManifestExecution {
|
|
31
|
+
number: number;
|
|
32
|
+
status: string;
|
|
33
|
+
duration: number;
|
|
34
|
+
videoFile: string | null;
|
|
35
|
+
hasTrace: boolean;
|
|
36
|
+
hasScreenshots: boolean;
|
|
37
|
+
error: string | null;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Manifest test case entry
|
|
42
|
+
*/
|
|
43
|
+
interface ManifestTestCase {
|
|
44
|
+
id: string;
|
|
45
|
+
name: string;
|
|
46
|
+
totalExecutions: number;
|
|
47
|
+
finalStatus: string;
|
|
48
|
+
executions: ManifestExecution[];
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Failure classification entry for new vs known failures
|
|
53
|
+
*/
|
|
54
|
+
interface FailureClassification {
|
|
55
|
+
id: string;
|
|
56
|
+
name: string;
|
|
57
|
+
error: string | null;
|
|
58
|
+
lastPassedRun: string | null;
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
/**
|
|
62
|
+
* Manifest structure for test run sessions
|
|
63
|
+
*/
|
|
64
|
+
interface Manifest {
|
|
65
|
+
bugzyExecutionId: string;
|
|
66
|
+
timestamp: string;
|
|
67
|
+
startTime: string;
|
|
68
|
+
endTime: string;
|
|
69
|
+
status: string;
|
|
70
|
+
stats: {
|
|
71
|
+
totalTests: number;
|
|
72
|
+
passed: number;
|
|
73
|
+
failed: number;
|
|
74
|
+
totalExecutions: number;
|
|
75
|
+
};
|
|
76
|
+
testCases: ManifestTestCase[];
|
|
77
|
+
new_failures?: FailureClassification[];
|
|
78
|
+
known_failures?: FailureClassification[];
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
/**
|
|
82
|
+
* Classify failures as new or known by checking previous test run manifests.
|
|
83
|
+
*
|
|
84
|
+
* A failure is "new" if the test passed in any of the last N runs.
|
|
85
|
+
* A failure is "known" if the test failed in ALL of the last N runs (or no prior data exists for that specific test).
|
|
86
|
+
* If there are no previous runs at all (first run), all failures are treated as "new".
|
|
87
|
+
*
|
|
88
|
+
* @param currentManifest - The current run's manifest
|
|
89
|
+
* @param testRunsRoot - Path to the test-runs/ directory
|
|
90
|
+
* @returns Object with newFailures and knownFailures arrays
|
|
91
|
+
*/
|
|
92
|
+
export function classifyFailures(
|
|
93
|
+
currentManifest: Manifest,
|
|
94
|
+
testRunsRoot: string
|
|
95
|
+
): { newFailures: FailureClassification[]; knownFailures: FailureClassification[] } {
|
|
96
|
+
const lookback = parseInt(process.env.BUGZY_FAILURE_LOOKBACK || '5', 10);
|
|
97
|
+
const newFailures: FailureClassification[] = [];
|
|
98
|
+
const knownFailures: FailureClassification[] = [];
|
|
99
|
+
|
|
100
|
+
// Get failed test cases from current manifest
|
|
101
|
+
const failedTests = currentManifest.testCases.filter(
|
|
102
|
+
tc => tc.finalStatus === 'failed' || tc.finalStatus === 'timedOut'
|
|
103
|
+
);
|
|
104
|
+
|
|
105
|
+
if (failedTests.length === 0) {
|
|
106
|
+
return { newFailures, knownFailures };
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
// Read previous manifests
|
|
110
|
+
const previousManifests: Manifest[] = [];
|
|
111
|
+
if (fs.existsSync(testRunsRoot)) {
|
|
112
|
+
const dirs = fs.readdirSync(testRunsRoot)
|
|
113
|
+
.filter(d => {
|
|
114
|
+
try {
|
|
115
|
+
return fs.statSync(path.join(testRunsRoot, d)).isDirectory();
|
|
116
|
+
} catch {
|
|
117
|
+
return false;
|
|
118
|
+
}
|
|
119
|
+
})
|
|
120
|
+
.sort()
|
|
121
|
+
.reverse(); // Latest first
|
|
122
|
+
|
|
123
|
+
for (const dir of dirs) {
|
|
124
|
+
// Skip current run
|
|
125
|
+
if (dir === currentManifest.timestamp) continue;
|
|
126
|
+
|
|
127
|
+
if (previousManifests.length >= lookback) break;
|
|
128
|
+
|
|
129
|
+
const manifestPath = path.join(testRunsRoot, dir, 'manifest.json');
|
|
130
|
+
if (fs.existsSync(manifestPath)) {
|
|
131
|
+
try {
|
|
132
|
+
const manifest: Manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
133
|
+
previousManifests.push(manifest);
|
|
134
|
+
} catch {
|
|
135
|
+
// Skip invalid manifests
|
|
136
|
+
}
|
|
137
|
+
}
|
|
138
|
+
}
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
// If no previous runs exist, all failures are new (first run)
|
|
142
|
+
if (previousManifests.length === 0) {
|
|
143
|
+
for (const tc of failedTests) {
|
|
144
|
+
const lastExec = tc.executions[tc.executions.length - 1];
|
|
145
|
+
newFailures.push({
|
|
146
|
+
id: tc.id,
|
|
147
|
+
name: tc.name,
|
|
148
|
+
error: lastExec?.error || null,
|
|
149
|
+
lastPassedRun: null,
|
|
150
|
+
});
|
|
151
|
+
}
|
|
152
|
+
return { newFailures, knownFailures };
|
|
153
|
+
}
|
|
154
|
+
|
|
155
|
+
// For each failed test, check if it passed in any previous run
|
|
156
|
+
for (const tc of failedTests) {
|
|
157
|
+
const lastExec = tc.executions[tc.executions.length - 1];
|
|
158
|
+
let lastPassedRun: string | null = null;
|
|
159
|
+
|
|
160
|
+
for (const prevManifest of previousManifests) {
|
|
161
|
+
const prevTc = prevManifest.testCases.find(ptc => ptc.id === tc.id);
|
|
162
|
+
if (prevTc && (prevTc.finalStatus === 'passed')) {
|
|
163
|
+
lastPassedRun = prevManifest.timestamp;
|
|
164
|
+
break;
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
|
|
168
|
+
if (lastPassedRun) {
|
|
169
|
+
// Test passed recently, so this is a new failure
|
|
170
|
+
newFailures.push({
|
|
171
|
+
id: tc.id,
|
|
172
|
+
name: tc.name,
|
|
173
|
+
error: lastExec?.error || null,
|
|
174
|
+
lastPassedRun,
|
|
175
|
+
});
|
|
176
|
+
} else {
|
|
177
|
+
// Check if test exists in any previous run at all
|
|
178
|
+
const existsInPrevious = previousManifests.some(
|
|
179
|
+
pm => pm.testCases.some(ptc => ptc.id === tc.id)
|
|
180
|
+
);
|
|
181
|
+
|
|
182
|
+
if (!existsInPrevious) {
|
|
183
|
+
// New test that doesn't exist in history - treat as new failure
|
|
184
|
+
newFailures.push({
|
|
185
|
+
id: tc.id,
|
|
186
|
+
name: tc.name,
|
|
187
|
+
error: lastExec?.error || null,
|
|
188
|
+
lastPassedRun: null,
|
|
189
|
+
});
|
|
190
|
+
} else {
|
|
191
|
+
// Failed in all previous runs - known failure
|
|
192
|
+
knownFailures.push({
|
|
193
|
+
id: tc.id,
|
|
194
|
+
name: tc.name,
|
|
195
|
+
error: lastExec?.error || null,
|
|
196
|
+
lastPassedRun: null,
|
|
197
|
+
});
|
|
198
|
+
}
|
|
199
|
+
}
|
|
200
|
+
}
|
|
201
|
+
|
|
202
|
+
return { newFailures, knownFailures };
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/**
|
|
206
|
+
* Merge an existing manifest with the current run's manifest.
|
|
207
|
+
* If existing is null, returns current as-is.
|
|
208
|
+
* Deduplicates executions by number (current run wins on collision).
|
|
209
|
+
* Recalculates stats from the merged data.
|
|
210
|
+
*/
|
|
211
|
+
export function mergeManifests(existing: Manifest | null, current: Manifest): Manifest {
|
|
212
|
+
if (!existing) {
|
|
213
|
+
return current;
|
|
214
|
+
}
|
|
215
|
+
|
|
216
|
+
// Build map of test cases by id from existing manifest
|
|
217
|
+
const testCaseMap = new Map<string, ManifestTestCase>();
|
|
218
|
+
for (const tc of existing.testCases) {
|
|
219
|
+
testCaseMap.set(tc.id, { ...tc, executions: [...tc.executions] });
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
// Merge current run's test cases
|
|
223
|
+
for (const tc of current.testCases) {
|
|
224
|
+
const existingTc = testCaseMap.get(tc.id);
|
|
225
|
+
if (existingTc) {
|
|
226
|
+
// Merge executions: build a map keyed by execution number
|
|
227
|
+
const execMap = new Map<number, ManifestExecution>();
|
|
228
|
+
for (const exec of existingTc.executions) {
|
|
229
|
+
execMap.set(exec.number, exec);
|
|
230
|
+
}
|
|
231
|
+
// Current run's executions overwrite on collision
|
|
232
|
+
for (const exec of tc.executions) {
|
|
233
|
+
execMap.set(exec.number, exec);
|
|
234
|
+
}
|
|
235
|
+
// Sort by execution number
|
|
236
|
+
const mergedExecs = Array.from(execMap.values()).sort((a, b) => a.number - b.number);
|
|
237
|
+
const finalStatus = mergedExecs[mergedExecs.length - 1].status;
|
|
238
|
+
|
|
239
|
+
testCaseMap.set(tc.id, {
|
|
240
|
+
id: tc.id,
|
|
241
|
+
name: tc.name,
|
|
242
|
+
totalExecutions: mergedExecs.length,
|
|
243
|
+
finalStatus,
|
|
244
|
+
executions: mergedExecs,
|
|
245
|
+
});
|
|
246
|
+
} else {
|
|
247
|
+
// New test case from current run
|
|
248
|
+
testCaseMap.set(tc.id, { ...tc, executions: [...tc.executions] });
|
|
249
|
+
}
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// Build merged test cases array
|
|
253
|
+
const mergedTestCases = Array.from(testCaseMap.values());
|
|
254
|
+
|
|
255
|
+
// Recalculate stats
|
|
256
|
+
let totalTests = 0;
|
|
257
|
+
let totalExecutions = 0;
|
|
258
|
+
let passedTests = 0;
|
|
259
|
+
let failedTests = 0;
|
|
260
|
+
|
|
261
|
+
for (const tc of mergedTestCases) {
|
|
262
|
+
totalTests++;
|
|
263
|
+
totalExecutions += tc.executions.length;
|
|
264
|
+
if (tc.finalStatus === 'passed') {
|
|
265
|
+
passedTests++;
|
|
266
|
+
} else {
|
|
267
|
+
failedTests++;
|
|
268
|
+
}
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
// Use earliest startTime, latest endTime
|
|
272
|
+
const startTime = new Date(existing.startTime) < new Date(current.startTime)
|
|
273
|
+
? existing.startTime
|
|
274
|
+
: current.startTime;
|
|
275
|
+
const endTime = new Date(existing.endTime) > new Date(current.endTime)
|
|
276
|
+
? existing.endTime
|
|
277
|
+
: current.endTime;
|
|
278
|
+
|
|
279
|
+
// Status: if any test case failed, overall is failed
|
|
280
|
+
const hasFailure = mergedTestCases.some(tc => tc.finalStatus === 'failed' || tc.finalStatus === 'timedOut');
|
|
281
|
+
const status = hasFailure ? 'failed' : current.status;
|
|
282
|
+
|
|
283
|
+
const merged: Manifest = {
|
|
284
|
+
bugzyExecutionId: current.bugzyExecutionId,
|
|
285
|
+
timestamp: existing.timestamp, // Keep original session timestamp
|
|
286
|
+
startTime,
|
|
287
|
+
endTime,
|
|
288
|
+
status,
|
|
289
|
+
stats: {
|
|
290
|
+
totalTests,
|
|
291
|
+
passed: passedTests,
|
|
292
|
+
failed: failedTests,
|
|
293
|
+
totalExecutions,
|
|
294
|
+
},
|
|
295
|
+
testCases: mergedTestCases,
|
|
296
|
+
};
|
|
297
|
+
|
|
298
|
+
// Preserve failure classification (current run's classification wins)
|
|
299
|
+
if (current.new_failures) {
|
|
300
|
+
merged.new_failures = current.new_failures;
|
|
301
|
+
} else if (existing.new_failures) {
|
|
302
|
+
merged.new_failures = existing.new_failures;
|
|
303
|
+
}
|
|
304
|
+
|
|
305
|
+
if (current.known_failures) {
|
|
306
|
+
merged.known_failures = current.known_failures;
|
|
307
|
+
} else if (existing.known_failures) {
|
|
308
|
+
merged.known_failures = existing.known_failures;
|
|
309
|
+
}
|
|
310
|
+
|
|
311
|
+
return merged;
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
/**
|
|
315
|
+
* Bugzy Custom Playwright Reporter
|
|
316
|
+
*
|
|
317
|
+
* Records test executions in hierarchical structure:
|
|
318
|
+
* test-runs/YYYYMMDD-HHMMSS/TC-{id}/exec-{num}/
|
|
319
|
+
*
|
|
320
|
+
* Features:
|
|
321
|
+
* - Groups multiple test runs under same directory when BUGZY_EXECUTION_ID matches
|
|
322
|
+
* - Checks latest directory's manifest to reuse existing session directory
|
|
323
|
+
* - Tracks multiple execution attempts per test
|
|
324
|
+
* - Records videos for all tests
|
|
325
|
+
* - Captures traces/screenshots for failures only
|
|
326
|
+
* - Links to BUGZY_EXECUTION_ID for session tracking
|
|
327
|
+
* - Generates manifest.json with execution summary
|
|
328
|
+
* - Generates steps.json with video timestamps for test.step() calls
|
|
329
|
+
*/
|
|
330
|
+
class BugzyReporter implements Reporter {
|
|
331
|
+
private testRunDir!: string;
|
|
332
|
+
private timestamp!: string;
|
|
333
|
+
private bugzyExecutionId!: string;
|
|
334
|
+
private startTime!: Date;
|
|
335
|
+
private testResults: Map<string, Array<any>> = new Map();
|
|
336
|
+
private testSteps: Map<string, Array<StepData>> = new Map();
|
|
337
|
+
private testStartTimes: Map<string, number> = new Map();
|
|
338
|
+
|
|
339
|
+
constructor() {
|
|
340
|
+
// No longer need to read execution number from environment
|
|
341
|
+
// It will be auto-detected per test case
|
|
342
|
+
}
|
|
343
|
+
|
|
344
|
+
/**
|
|
345
|
+
* Called once before running tests
|
|
346
|
+
*/
|
|
347
|
+
onBegin(config: FullConfig, suite: Suite): void {
|
|
348
|
+
this.startTime = new Date();
|
|
349
|
+
|
|
350
|
+
// Generate timestamp in YYYYMMDD-HHMMSS format
|
|
351
|
+
this.timestamp = this.startTime
|
|
352
|
+
.toISOString()
|
|
353
|
+
.replace(/[-:]/g, '')
|
|
354
|
+
.replace(/T/, '-')
|
|
355
|
+
.slice(0, 15);
|
|
356
|
+
|
|
357
|
+
const testRunsRoot = path.join(process.cwd(), 'test-runs');
|
|
358
|
+
|
|
359
|
+
// Check if we should reuse an existing session
|
|
360
|
+
let reuseDir: string | null = null;
|
|
361
|
+
|
|
362
|
+
// If BUGZY_EXECUTION_ID is provided, use it directly
|
|
363
|
+
if (process.env.BUGZY_EXECUTION_ID) {
|
|
364
|
+
this.bugzyExecutionId = process.env.BUGZY_EXECUTION_ID;
|
|
365
|
+
} else {
|
|
366
|
+
// For local runs, check if we can reuse the latest session
|
|
367
|
+
// Reuse if the latest manifest is within 60 minutes
|
|
368
|
+
if (fs.existsSync(testRunsRoot)) {
|
|
369
|
+
const dirs = fs.readdirSync(testRunsRoot)
|
|
370
|
+
.filter(d => fs.statSync(path.join(testRunsRoot, d)).isDirectory())
|
|
371
|
+
.sort()
|
|
372
|
+
.reverse(); // Sort descending (latest first)
|
|
373
|
+
|
|
374
|
+
if (dirs.length > 0) {
|
|
375
|
+
const latestDir = dirs[0];
|
|
376
|
+
const manifestPath = path.join(testRunsRoot, latestDir, 'manifest.json');
|
|
377
|
+
|
|
378
|
+
if (fs.existsSync(manifestPath)) {
|
|
379
|
+
try {
|
|
380
|
+
const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
381
|
+
const manifestTime = new Date(manifest.startTime).getTime();
|
|
382
|
+
const currentTime = this.startTime.getTime();
|
|
383
|
+
const minutesDiff = (currentTime - manifestTime) / (1000 * 60);
|
|
384
|
+
|
|
385
|
+
// Reuse if within 60 minutes and has a local execution ID
|
|
386
|
+
if (minutesDiff <= 60 && manifest.bugzyExecutionId?.startsWith('local-')) {
|
|
387
|
+
this.bugzyExecutionId = manifest.bugzyExecutionId;
|
|
388
|
+
reuseDir = latestDir;
|
|
389
|
+
}
|
|
390
|
+
} catch (err) {
|
|
391
|
+
// Ignore parsing errors
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
// If no session to reuse, generate new local ID
|
|
398
|
+
if (!this.bugzyExecutionId) {
|
|
399
|
+
this.bugzyExecutionId = 'local-' + this.timestamp;
|
|
400
|
+
}
|
|
401
|
+
}
|
|
402
|
+
|
|
403
|
+
// If we have a specific execution ID but haven't found a reuse dir yet, check for matching session
|
|
404
|
+
if (!reuseDir && fs.existsSync(testRunsRoot)) {
|
|
405
|
+
const dirs = fs.readdirSync(testRunsRoot)
|
|
406
|
+
.filter(d => fs.statSync(path.join(testRunsRoot, d)).isDirectory())
|
|
407
|
+
.sort()
|
|
408
|
+
.reverse();
|
|
409
|
+
|
|
410
|
+
for (const dir of dirs) {
|
|
411
|
+
const manifestPath = path.join(testRunsRoot, dir, 'manifest.json');
|
|
412
|
+
if (fs.existsSync(manifestPath)) {
|
|
413
|
+
try {
|
|
414
|
+
const manifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
415
|
+
if (manifest.bugzyExecutionId === this.bugzyExecutionId) {
|
|
416
|
+
reuseDir = dir;
|
|
417
|
+
break;
|
|
418
|
+
}
|
|
419
|
+
} catch (err) {
|
|
420
|
+
// Ignore parsing errors
|
|
421
|
+
}
|
|
422
|
+
}
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
if (reuseDir) {
|
|
427
|
+
this.testRunDir = path.join(testRunsRoot, reuseDir);
|
|
428
|
+
console.log(`\n🔄 Continuing test run: ${reuseDir}`);
|
|
429
|
+
console.log(`📋 Execution ID: ${this.bugzyExecutionId}`);
|
|
430
|
+
console.log(`📁 Output directory: ${this.testRunDir}\n`);
|
|
431
|
+
} else {
|
|
432
|
+
this.testRunDir = path.join(testRunsRoot, this.timestamp);
|
|
433
|
+
fs.mkdirSync(this.testRunDir, { recursive: true });
|
|
434
|
+
console.log(`\n🆕 New test run: ${this.timestamp}`);
|
|
435
|
+
console.log(`📋 Execution ID: ${this.bugzyExecutionId}`);
|
|
436
|
+
console.log(`📁 Output directory: ${this.testRunDir}\n`);
|
|
437
|
+
}
|
|
438
|
+
}
|
|
439
|
+
|
|
440
|
+
/**
|
|
441
|
+
* Called after each test completes
|
|
442
|
+
*/
|
|
443
|
+
onTestEnd(test: TestCase, result: TestResult): void {
|
|
444
|
+
// Extract test ID from test title or file path
|
|
445
|
+
const testId = this.extractTestId(test);
|
|
446
|
+
|
|
447
|
+
// Create test case directory
|
|
448
|
+
const testCaseDir = path.join(this.testRunDir, testId);
|
|
449
|
+
fs.mkdirSync(testCaseDir, { recursive: true });
|
|
450
|
+
|
|
451
|
+
// Auto-detect execution number from existing folders
|
|
452
|
+
let executionNum = 1;
|
|
453
|
+
if (fs.existsSync(testCaseDir)) {
|
|
454
|
+
const existingExecs = fs.readdirSync(testCaseDir)
|
|
455
|
+
.filter(d => d.startsWith('exec-') && fs.statSync(path.join(testCaseDir, d)).isDirectory())
|
|
456
|
+
.map(d => parseInt(d.replace('exec-', ''), 10))
|
|
457
|
+
.filter(n => !isNaN(n));
|
|
458
|
+
|
|
459
|
+
if (existingExecs.length > 0) {
|
|
460
|
+
executionNum = Math.max(...existingExecs) + 1;
|
|
461
|
+
}
|
|
462
|
+
}
|
|
463
|
+
|
|
464
|
+
// Create execution directory
|
|
465
|
+
const execDir = path.join(testCaseDir, `exec-${executionNum}`);
|
|
466
|
+
fs.mkdirSync(execDir, { recursive: true });
|
|
467
|
+
|
|
468
|
+
// Prepare result data in Playwright format
|
|
469
|
+
const resultData = {
|
|
470
|
+
status: result.status,
|
|
471
|
+
duration: result.duration,
|
|
472
|
+
errors: result.errors,
|
|
473
|
+
retry: result.retry,
|
|
474
|
+
startTime: result.startTime.toISOString(),
|
|
475
|
+
attachments: [] as Array<{ name: string; path: string; contentType: string }>,
|
|
476
|
+
};
|
|
477
|
+
|
|
478
|
+
// Handle attachments (videos, traces, screenshots)
|
|
479
|
+
let hasVideo = false;
|
|
480
|
+
let hasTrace = false;
|
|
481
|
+
let hasScreenshots = false;
|
|
482
|
+
|
|
483
|
+
for (const attachment of result.attachments) {
|
|
484
|
+
if (attachment.name === 'video' && attachment.path) {
|
|
485
|
+
// Copy video file to execution directory
|
|
486
|
+
const videoFileName = 'video.webm';
|
|
487
|
+
const videoDestPath = path.join(execDir, videoFileName);
|
|
488
|
+
|
|
489
|
+
try {
|
|
490
|
+
fs.copyFileSync(attachment.path, videoDestPath);
|
|
491
|
+
resultData.attachments.push({
|
|
492
|
+
name: 'video',
|
|
493
|
+
path: videoFileName,
|
|
494
|
+
contentType: attachment.contentType || 'video/webm',
|
|
495
|
+
});
|
|
496
|
+
hasVideo = true;
|
|
497
|
+
} catch (err) {
|
|
498
|
+
console.error(`Failed to copy video: ${err}`);
|
|
499
|
+
}
|
|
500
|
+
} else if (attachment.name === 'trace' && attachment.path) {
|
|
501
|
+
// Copy trace file to execution directory (only for failures)
|
|
502
|
+
if (result.status === 'failed' || result.status === 'timedOut') {
|
|
503
|
+
const traceFileName = 'trace.zip';
|
|
504
|
+
const traceDestPath = path.join(execDir, traceFileName);
|
|
505
|
+
|
|
506
|
+
try {
|
|
507
|
+
fs.copyFileSync(attachment.path, traceDestPath);
|
|
508
|
+
resultData.attachments.push({
|
|
509
|
+
name: 'trace',
|
|
510
|
+
path: traceFileName,
|
|
511
|
+
contentType: attachment.contentType || 'application/zip',
|
|
512
|
+
});
|
|
513
|
+
hasTrace = true;
|
|
514
|
+
} catch (err) {
|
|
515
|
+
console.error(`Failed to copy trace: ${err}`);
|
|
516
|
+
}
|
|
517
|
+
}
|
|
518
|
+
} else if (attachment.name === 'screenshot' && attachment.path) {
|
|
519
|
+
// Copy screenshots to execution directory (only for failures)
|
|
520
|
+
if (result.status === 'failed' || result.status === 'timedOut') {
|
|
521
|
+
const screenshotsDir = path.join(execDir, 'screenshots');
|
|
522
|
+
fs.mkdirSync(screenshotsDir, { recursive: true });
|
|
523
|
+
|
|
524
|
+
const screenshotFileName = path.basename(attachment.path);
|
|
525
|
+
const screenshotDestPath = path.join(screenshotsDir, screenshotFileName);
|
|
526
|
+
|
|
527
|
+
try {
|
|
528
|
+
fs.copyFileSync(attachment.path, screenshotDestPath);
|
|
529
|
+
resultData.attachments.push({
|
|
530
|
+
name: 'screenshot',
|
|
531
|
+
path: path.join('screenshots', screenshotFileName),
|
|
532
|
+
contentType: attachment.contentType || 'image/png',
|
|
533
|
+
});
|
|
534
|
+
hasScreenshots = true;
|
|
535
|
+
} catch (err) {
|
|
536
|
+
console.error(`Failed to copy screenshot: ${err}`);
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
}
|
|
541
|
+
|
|
542
|
+
// Write result.json
|
|
543
|
+
const resultPath = path.join(execDir, 'result.json');
|
|
544
|
+
fs.writeFileSync(resultPath, JSON.stringify(resultData, null, 2));
|
|
545
|
+
|
|
546
|
+
// Store execution info for manifest
|
|
547
|
+
if (!this.testResults.has(testId)) {
|
|
548
|
+
this.testResults.set(testId, []);
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
this.testResults.get(testId)!.push({
|
|
552
|
+
number: executionNum,
|
|
553
|
+
status: result.status,
|
|
554
|
+
duration: result.duration,
|
|
555
|
+
videoFile: hasVideo ? 'video.webm' : null,
|
|
556
|
+
hasTrace,
|
|
557
|
+
hasScreenshots,
|
|
558
|
+
error: result.errors.length > 0 ? result.errors[0].message : null,
|
|
559
|
+
});
|
|
560
|
+
|
|
561
|
+
// Generate steps.json if test has steps
|
|
562
|
+
const testKey = this.getTestKey(test);
|
|
563
|
+
const steps = this.testSteps.get(testKey);
|
|
564
|
+
if (steps && steps.length > 0) {
|
|
565
|
+
const stepsData = {
|
|
566
|
+
steps,
|
|
567
|
+
summary: {
|
|
568
|
+
totalSteps: steps.length,
|
|
569
|
+
successfulSteps: steps.filter(s => s.status === 'success').length,
|
|
570
|
+
failedSteps: steps.filter(s => s.status === 'failed').length,
|
|
571
|
+
skippedSteps: steps.filter(s => s.status === 'skipped').length,
|
|
572
|
+
},
|
|
573
|
+
};
|
|
574
|
+
|
|
575
|
+
const stepsPath = path.join(execDir, 'steps.json');
|
|
576
|
+
fs.writeFileSync(stepsPath, JSON.stringify(stepsData, null, 2));
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
// Log execution result
|
|
580
|
+
const statusIcon = result.status === 'passed' ? '✅' : result.status === 'failed' ? '❌' : '⚠️';
|
|
581
|
+
console.log(`${statusIcon} ${testId} [exec-${executionNum}] - ${result.status} (${result.duration}ms)`);
|
|
582
|
+
}
|
|
583
|
+
|
|
584
|
+
/**
|
|
585
|
+
* Called when a test step begins
|
|
586
|
+
*/
|
|
587
|
+
onStepBegin(test: TestCase, _result: TestResult, step: TestStep): void {
|
|
588
|
+
// Only track test.step() calls (not hooks, fixtures, or expects)
|
|
589
|
+
if (step.category !== 'test.step') {
|
|
590
|
+
return;
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
const testKey = this.getTestKey(test);
|
|
594
|
+
|
|
595
|
+
// Record test start time on first step
|
|
596
|
+
if (!this.testStartTimes.has(testKey)) {
|
|
597
|
+
this.testStartTimes.set(testKey, step.startTime.getTime());
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
// Initialize steps array for this test
|
|
601
|
+
if (!this.testSteps.has(testKey)) {
|
|
602
|
+
this.testSteps.set(testKey, []);
|
|
603
|
+
}
|
|
604
|
+
|
|
605
|
+
const steps = this.testSteps.get(testKey)!;
|
|
606
|
+
const testStartTime = this.testStartTimes.get(testKey)!;
|
|
607
|
+
const videoTimeSeconds = Math.floor((step.startTime.getTime() - testStartTime) / 1000);
|
|
608
|
+
|
|
609
|
+
steps.push({
|
|
610
|
+
index: steps.length + 1,
|
|
611
|
+
timestamp: step.startTime.toISOString(),
|
|
612
|
+
videoTimeSeconds,
|
|
613
|
+
action: step.title,
|
|
614
|
+
status: 'success', // Will be updated in onStepEnd if it fails
|
|
615
|
+
description: `${step.title} - in progress`,
|
|
616
|
+
technicalDetails: 'test.step',
|
|
617
|
+
});
|
|
618
|
+
}
|
|
619
|
+
|
|
620
|
+
/**
|
|
621
|
+
* Called when a test step ends
|
|
622
|
+
*/
|
|
623
|
+
onStepEnd(test: TestCase, _result: TestResult, step: TestStep): void {
|
|
624
|
+
// Only track test.step() calls
|
|
625
|
+
if (step.category !== 'test.step') {
|
|
626
|
+
return;
|
|
627
|
+
}
|
|
628
|
+
|
|
629
|
+
const testKey = this.getTestKey(test);
|
|
630
|
+
const steps = this.testSteps.get(testKey);
|
|
631
|
+
|
|
632
|
+
if (!steps || steps.length === 0) {
|
|
633
|
+
return;
|
|
634
|
+
}
|
|
635
|
+
|
|
636
|
+
// Update the last step with final status and duration
|
|
637
|
+
const lastStep = steps[steps.length - 1];
|
|
638
|
+
lastStep.duration = step.duration;
|
|
639
|
+
|
|
640
|
+
if (step.error) {
|
|
641
|
+
lastStep.status = 'failed';
|
|
642
|
+
lastStep.description = `${step.title} - failed: ${step.error.message}`;
|
|
643
|
+
} else {
|
|
644
|
+
lastStep.status = 'success';
|
|
645
|
+
lastStep.description = `${step.title} - completed successfully`;
|
|
646
|
+
}
|
|
647
|
+
}
|
|
648
|
+
|
|
649
|
+
/**
|
|
650
|
+
* Called after all tests complete
|
|
651
|
+
*/
|
|
652
|
+
onEnd(result: FullResult): void {
|
|
653
|
+
const endTime = new Date();
|
|
654
|
+
|
|
655
|
+
// Calculate statistics
|
|
656
|
+
let totalTests = 0;
|
|
657
|
+
let totalExecutions = 0;
|
|
658
|
+
let passedTests = 0;
|
|
659
|
+
let failedTests = 0;
|
|
660
|
+
|
|
661
|
+
const testCases: Array<any> = [];
|
|
662
|
+
|
|
663
|
+
for (const [testId, executions] of this.testResults.entries()) {
|
|
664
|
+
totalTests++;
|
|
665
|
+
totalExecutions += executions.length;
|
|
666
|
+
|
|
667
|
+
const finalStatus = executions[executions.length - 1].status;
|
|
668
|
+
if (finalStatus === 'passed') {
|
|
669
|
+
passedTests++;
|
|
670
|
+
} else {
|
|
671
|
+
failedTests++;
|
|
672
|
+
}
|
|
673
|
+
|
|
674
|
+
testCases.push({
|
|
675
|
+
id: testId,
|
|
676
|
+
name: testId.replace(/^TC-\d+-/, '').replace(/-/g, ' '),
|
|
677
|
+
totalExecutions: executions.length,
|
|
678
|
+
finalStatus,
|
|
679
|
+
executions,
|
|
680
|
+
});
|
|
681
|
+
}
|
|
682
|
+
|
|
683
|
+
// Build current run's manifest
|
|
684
|
+
const currentManifest: Manifest = {
|
|
685
|
+
bugzyExecutionId: this.bugzyExecutionId,
|
|
686
|
+
timestamp: this.timestamp,
|
|
687
|
+
startTime: this.startTime.toISOString(),
|
|
688
|
+
endTime: endTime.toISOString(),
|
|
689
|
+
status: result.status,
|
|
690
|
+
stats: {
|
|
691
|
+
totalTests,
|
|
692
|
+
passed: passedTests,
|
|
693
|
+
failed: failedTests,
|
|
694
|
+
totalExecutions,
|
|
695
|
+
},
|
|
696
|
+
testCases,
|
|
697
|
+
};
|
|
698
|
+
|
|
699
|
+
// Read existing manifest for merge (if session is being reused)
|
|
700
|
+
const manifestPath = path.join(this.testRunDir, 'manifest.json');
|
|
701
|
+
let existingManifest: Manifest | null = null;
|
|
702
|
+
if (fs.existsSync(manifestPath)) {
|
|
703
|
+
try {
|
|
704
|
+
existingManifest = JSON.parse(fs.readFileSync(manifestPath, 'utf-8'));
|
|
705
|
+
} catch (err) {
|
|
706
|
+
console.warn(`⚠️ Could not parse existing manifest, will overwrite: ${err}`);
|
|
707
|
+
}
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
// Merge with existing manifest data
|
|
711
|
+
const merged = mergeManifests(existingManifest, currentManifest);
|
|
712
|
+
|
|
713
|
+
// Classify failures as new vs known
|
|
714
|
+
if (merged.stats.failed > 0) {
|
|
715
|
+
try {
|
|
716
|
+
const testRunsRoot = path.join(process.cwd(), 'test-runs');
|
|
717
|
+
const { newFailures, knownFailures } = classifyFailures(merged, testRunsRoot);
|
|
718
|
+
if (newFailures.length > 0) {
|
|
719
|
+
merged.new_failures = newFailures;
|
|
720
|
+
}
|
|
721
|
+
if (knownFailures.length > 0) {
|
|
722
|
+
merged.known_failures = knownFailures;
|
|
723
|
+
}
|
|
724
|
+
|
|
725
|
+
console.log(`\n🔍 Failure Classification:`);
|
|
726
|
+
console.log(` New failures: ${newFailures.length}`);
|
|
727
|
+
console.log(` Known failures: ${knownFailures.length}`);
|
|
728
|
+
} catch (err) {
|
|
729
|
+
console.warn(`⚠️ Could not classify failures: ${err}`);
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
// Write atomically (temp file + rename)
|
|
734
|
+
const tmpPath = manifestPath + '.tmp';
|
|
735
|
+
fs.writeFileSync(tmpPath, JSON.stringify(merged, null, 2));
|
|
736
|
+
fs.renameSync(tmpPath, manifestPath);
|
|
737
|
+
|
|
738
|
+
console.log(`\n📊 Test Run Summary (this run):`);
|
|
739
|
+
console.log(` Total tests: ${totalTests}`);
|
|
740
|
+
console.log(` Passed: ${passedTests}`);
|
|
741
|
+
console.log(` Failed: ${failedTests}`);
|
|
742
|
+
console.log(` Total executions: ${totalExecutions}`);
|
|
743
|
+
|
|
744
|
+
if (existingManifest) {
|
|
745
|
+
console.log(`\n🔗 Merged with previous session data:`);
|
|
746
|
+
console.log(` Session total tests: ${merged.stats.totalTests}`);
|
|
747
|
+
console.log(` Session total executions: ${merged.stats.totalExecutions}`);
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
console.log(` Manifest: ${manifestPath}\n`);
|
|
751
|
+
}
|
|
752
|
+
|
|
753
|
+
/**
|
|
754
|
+
* Extract test ID from test case
|
|
755
|
+
* Generates TC-XXX-{test-name} format
|
|
756
|
+
*/
|
|
757
|
+
private extractTestId(test: TestCase): string {
|
|
758
|
+
// Try to extract from test title
|
|
759
|
+
const title = test.title.toLowerCase().replace(/\s+/g, '-');
|
|
760
|
+
|
|
761
|
+
// Get test file name without extension
|
|
762
|
+
const fileName = path.basename(test.location.file, path.extname(test.location.file));
|
|
763
|
+
|
|
764
|
+
// Extract number from filename if it follows TC-XXX pattern
|
|
765
|
+
const tcMatch = fileName.match(/TC-(\d+)/i);
|
|
766
|
+
if (tcMatch) {
|
|
767
|
+
return `TC-${tcMatch[1]}-${title}`;
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
// Otherwise generate from index
|
|
771
|
+
// This is a simple fallback - you may want to improve this
|
|
772
|
+
const testIndex = String(test.parent.tests.indexOf(test) + 1).padStart(3, '0');
|
|
773
|
+
return `TC-${testIndex}-${title}`;
|
|
774
|
+
}
|
|
775
|
+
|
|
776
|
+
/**
|
|
777
|
+
* Generate unique key for test to track steps across retries
|
|
778
|
+
*/
|
|
779
|
+
private getTestKey(test: TestCase): string {
|
|
780
|
+
return `${test.location.file}::${test.title}`;
|
|
781
|
+
}
|
|
782
|
+
}
|
|
783
|
+
|
|
784
|
+
export default BugzyReporter;
|