@empiricalrun/test-run 0.13.1 → 0.14.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/CHANGELOG.md +18 -0
  2. package/README.md +42 -3
  3. package/dist/bin/commands/estimate-time-shard.d.ts +3 -0
  4. package/dist/bin/commands/estimate-time-shard.d.ts.map +1 -0
  5. package/dist/bin/commands/estimate-time-shard.js +122 -0
  6. package/dist/bin/commands/failed-list.d.ts +3 -0
  7. package/dist/bin/commands/failed-list.d.ts.map +1 -0
  8. package/dist/bin/commands/failed-list.js +34 -0
  9. package/dist/bin/commands/merge.d.ts +3 -0
  10. package/dist/bin/commands/merge.d.ts.map +1 -0
  11. package/dist/bin/commands/merge.js +20 -0
  12. package/dist/bin/commands/optimize-shards.d.ts +3 -0
  13. package/dist/bin/commands/optimize-shards.d.ts.map +1 -0
  14. package/dist/bin/commands/optimize-shards.js +400 -0
  15. package/dist/bin/commands/run.d.ts +3 -0
  16. package/dist/bin/commands/run.d.ts.map +1 -0
  17. package/dist/bin/commands/run.js +132 -0
  18. package/dist/bin/index.js +15 -132
  19. package/dist/cmd.d.ts +2 -0
  20. package/dist/cmd.d.ts.map +1 -0
  21. package/dist/cmd.js +5 -0
  22. package/dist/failed-test-list.d.ts +35 -0
  23. package/dist/failed-test-list.d.ts.map +1 -0
  24. package/dist/failed-test-list.js +267 -0
  25. package/dist/index.d.ts +1 -0
  26. package/dist/index.d.ts.map +1 -1
  27. package/dist/index.js +3 -1
  28. package/dist/lib/cancellation-watcher.js +1 -1
  29. package/dist/lib/merge-reports/index.d.ts.map +1 -1
  30. package/dist/lib/merge-reports/index.js +36 -7
  31. package/dist/lib/merge-reports/types.d.ts +2 -0
  32. package/dist/lib/merge-reports/types.d.ts.map +1 -1
  33. package/package.json +8 -4
  34. package/tsconfig.tsbuildinfo +1 -1
  35. package/dist/bin/merge-reports.d.ts +0 -3
  36. package/dist/bin/merge-reports.d.ts.map +0 -1
  37. package/dist/bin/merge-reports.js +0 -26
@@ -0,0 +1,400 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.registerOptimizeShardsCommand = registerOptimizeShardsCommand;
7
+ const reporter_1 = require("@empiricalrun/reporter");
8
+ const child_process_1 = require("child_process");
9
+ const fs_1 = __importDefault(require("fs"));
10
+ const path_1 = __importDefault(require("path"));
11
+ const SUITES_DELIMITER = " › ";
12
+ const BATCH_SIZE = 100;
13
+ async function fetchTestHistoryBatch(testCaseIds, dashboardUrl, apiKey, cacheKey) {
14
+ const url = `${dashboardUrl}/api/test-cases/history/batch`;
15
+ const response = await fetch(url, {
16
+ method: "POST",
17
+ headers: {
18
+ "Content-Type": "application/json",
19
+ Authorization: `Bearer ${apiKey}`,
20
+ },
21
+ body: JSON.stringify({
22
+ test_case_ids: testCaseIds,
23
+ ...(cacheKey && { cache_key: cacheKey }),
24
+ }),
25
+ });
26
+ if (!response.ok) {
27
+ throw new Error(`Failed to fetch test history: ${response.status}`);
28
+ }
29
+ return response.json();
30
+ }
31
+ async function fetchTestHistory(testCaseIds, dashboardUrl, cacheKey) {
32
+ const apiKey = process.env.EMPIRICALRUN_API_KEY;
33
+ if (!apiKey) {
34
+ throw new Error("EMPIRICALRUN_API_KEY environment variable is required");
35
+ }
36
+ const result = {};
37
+ for (let i = 0; i < testCaseIds.length; i += BATCH_SIZE) {
38
+ const batch = testCaseIds.slice(i, i + BATCH_SIZE);
39
+ const batchResult = await fetchTestHistoryBatch(batch, dashboardUrl, apiKey, cacheKey);
40
+ Object.assign(result, batchResult);
41
+ }
42
+ return result;
43
+ }
44
+ function getPlaywrightTestList(shard) {
45
+ const shardArg = shard ? `--shard ${shard}` : "";
46
+ try {
47
+ const output = (0, child_process_1.execSync)(`npx playwright test ${shardArg} --list --reporter=json`.trim(), {
48
+ encoding: "utf-8",
49
+ stdio: ["pipe", "pipe", "pipe"],
50
+ });
51
+ return JSON.parse(output);
52
+ }
53
+ catch (error) {
54
+ if (error.stdout) {
55
+ try {
56
+ return JSON.parse(error.stdout);
57
+ }
58
+ catch {
59
+ throw new Error(`Failed to parse playwright JSON output: ${error.message}`);
60
+ }
61
+ }
62
+ throw new Error(`Failed to get test list from playwright: ${error.message}`);
63
+ }
64
+ }
65
+ function getShardInfoReporterPath() {
66
+ try {
67
+ const reporterPath = require.resolve("@empiricalrun/reporter/shard-info-reporter");
68
+ return reporterPath;
69
+ }
70
+ catch {
71
+ const fallbackPath = path_1.default.resolve(__dirname, "../../../../reporter/dist/shard-info-reporter.js");
72
+ if (fs_1.default.existsSync(fallbackPath)) {
73
+ return fallbackPath;
74
+ }
75
+ throw new Error("Could not resolve @empiricalrun/reporter/shard-info-reporter. " +
76
+ "Make sure @empiricalrun/reporter is installed.");
77
+ }
78
+ }
79
+ function getShardInfoReport() {
80
+ const reporterPath = getShardInfoReporterPath();
81
+ try {
82
+ const output = (0, child_process_1.execSync)(`npx playwright test --list --reporter=${reporterPath}`, {
83
+ encoding: "utf-8",
84
+ stdio: ["pipe", "pipe", "pipe"],
85
+ });
86
+ return JSON.parse(output);
87
+ }
88
+ catch (error) {
89
+ if (error.stdout) {
90
+ try {
91
+ return JSON.parse(error.stdout);
92
+ }
93
+ catch {
94
+ throw new Error(`Failed to parse shard info output: ${error.message}`);
95
+ }
96
+ }
97
+ throw new Error(`Failed to get shard info from playwright: ${error.message}`);
98
+ }
99
+ }
100
+ function extractSuiteMetadata(suites, configFullyParallel) {
101
+ const metadata = new Map();
102
+ function traverse(suite, parentParallelMode) {
103
+ let effectiveMode;
104
+ if (suite.parallelMode !== "none") {
105
+ effectiveMode = suite.parallelMode;
106
+ }
107
+ else if (parentParallelMode !== "none") {
108
+ effectiveMode = parentParallelMode;
109
+ }
110
+ else if (configFullyParallel) {
111
+ effectiveMode = "parallel";
112
+ }
113
+ else {
114
+ effectiveMode = "default";
115
+ }
116
+ const specIds = suite.specs.map((s) => s.id);
117
+ if (specIds.length > 0) {
118
+ const key = `${suite.file}:${suite.line}`;
119
+ metadata.set(key, {
120
+ file: suite.file,
121
+ line: suite.line,
122
+ parallelMode: effectiveMode,
123
+ hasBeforeAllHooks: suite.hasBeforeAllHooks,
124
+ hasAfterAllHooks: suite.hasAfterAllHooks,
125
+ specIds,
126
+ });
127
+ }
128
+ for (const child of suite.suites) {
129
+ traverse(child, effectiveMode);
130
+ }
131
+ }
132
+ for (const suite of suites) {
133
+ traverse(suite, "none");
134
+ }
135
+ return metadata;
136
+ }
137
+ function buildSpecToGroupKeyMap(suiteMetadata) {
138
+ const specToGroupKey = new Map();
139
+ for (const [suiteKey, meta] of suiteMetadata) {
140
+ for (const specId of meta.specIds) {
141
+ if (meta.parallelMode === "serial" || meta.parallelMode === "default") {
142
+ specToGroupKey.set(specId, `file:${meta.file}`);
143
+ }
144
+ else if (meta.hasBeforeAllHooks || meta.hasAfterAllHooks) {
145
+ specToGroupKey.set(specId, `hooks:${suiteKey}`);
146
+ }
147
+ else {
148
+ specToGroupKey.set(specId, `test:${specId}`);
149
+ }
150
+ }
151
+ }
152
+ return specToGroupKey;
153
+ }
154
+ function flattenedSpecsToTestInfos(specs, historyResponse, includeRetries, defaultDuration, specToGroupKey) {
155
+ const tests = [];
156
+ for (const spec of specs) {
157
+ for (const test of spec.tests) {
158
+ const history = historyResponse[spec.id] || [];
159
+ let estimatedDuration = defaultDuration;
160
+ if (history.length > 0) {
161
+ const durationField = includeRetries
162
+ ? "duration_total"
163
+ : "duration_per_retry";
164
+ const durations = history
165
+ .map((r) => r[durationField])
166
+ .sort((a, b) => a - b);
167
+ const p75Index = Math.floor(durations.length * 0.75);
168
+ estimatedDuration =
169
+ durations[Math.min(p75Index, durations.length - 1)] ??
170
+ defaultDuration;
171
+ }
172
+ const baseGroupKey = specToGroupKey?.get(spec.id) ?? `test:${spec.id}`;
173
+ const groupKey = `${test.projectName}:${baseGroupKey}`;
174
+ tests.push({
175
+ id: spec.id,
176
+ title: spec.title,
177
+ file: spec.file,
178
+ projectName: test.projectName,
179
+ nesting: spec.nesting,
180
+ suitesString: spec.suitesString,
181
+ estimatedDuration,
182
+ groupKey,
183
+ });
184
+ }
185
+ }
186
+ return tests;
187
+ }
188
+ function buildTestGroups(tests) {
189
+ const groupMap = new Map();
190
+ for (const test of tests) {
191
+ let group = groupMap.get(test.groupKey);
192
+ if (!group) {
193
+ group = {
194
+ key: test.groupKey,
195
+ tests: [],
196
+ totalDuration: 0,
197
+ };
198
+ groupMap.set(test.groupKey, group);
199
+ }
200
+ group.tests.push(test);
201
+ group.totalDuration += test.estimatedDuration;
202
+ }
203
+ return [...groupMap.values()];
204
+ }
205
+ function packGroupsIntoShards(groups, numShards) {
206
+ // Deterministic sort: by duration DESC, then by key ASC for stability
207
+ const sorted = [...groups].sort((a, b) => {
208
+ const durationDiff = b.totalDuration - a.totalDuration;
209
+ if (durationDiff !== 0)
210
+ return durationDiff;
211
+ return a.key.localeCompare(b.key);
212
+ });
213
+ const shards = Array.from({ length: numShards }, (_, i) => ({
214
+ index: i + 1,
215
+ groups: [],
216
+ tests: [],
217
+ totalDuration: 0,
218
+ }));
219
+ if (shards.length === 0) {
220
+ return shards;
221
+ }
222
+ for (const group of sorted) {
223
+ // Deterministic selection: pick lowest index shard when durations are equal
224
+ let targetShard = shards[0];
225
+ for (const shard of shards) {
226
+ if (shard.totalDuration < targetShard.totalDuration) {
227
+ targetShard = shard;
228
+ }
229
+ }
230
+ targetShard.groups.push(group);
231
+ targetShard.tests.push(...group.tests);
232
+ targetShard.totalDuration += group.totalDuration;
233
+ }
234
+ return shards;
235
+ }
236
+ function formatTestListLine(test) {
237
+ const suites = test.nesting.slice(1, -1);
238
+ const suitesAndTitle = suites.length > 0
239
+ ? [...suites, test.title].join(SUITES_DELIMITER)
240
+ : test.title;
241
+ return `[${test.projectName}] › ${test.file} › ${suitesAndTitle}`;
242
+ }
243
+ function generateTestListContent(shard, workers) {
244
+ const parallelizedDuration = shard.totalDuration / workers;
245
+ const totalSeconds = Math.round(parallelizedDuration / 1000);
246
+ const minutes = Math.floor(totalSeconds / 60);
247
+ const seconds = totalSeconds % 60;
248
+ const lines = [
249
+ `# Shard ${shard.index} - Optimized bin packing`,
250
+ `# Tests: ${shard.tests.length}`,
251
+ `# Total duration: ${Math.round(shard.totalDuration)}ms`,
252
+ `# Estimated with ${workers} workers: ${minutes}m ${seconds}s`,
253
+ `# Generated: ${new Date().toISOString()}`,
254
+ "",
255
+ ];
256
+ for (const test of shard.tests) {
257
+ lines.push(formatTestListLine(test));
258
+ }
259
+ return lines.join("\n");
260
+ }
261
+ function formatDuration(ms, workers) {
262
+ const parallelizedDuration = ms / workers;
263
+ const totalSeconds = Math.round(parallelizedDuration / 1000);
264
+ const minutes = Math.floor(totalSeconds / 60);
265
+ const seconds = totalSeconds % 60;
266
+ return `${minutes}m ${seconds}s`;
267
+ }
268
+ function registerOptimizeShardsCommand(program) {
269
+ program
270
+ .command("optimize-shards")
271
+ .description("Generate optimized shard test-list files using bin packing algorithm")
272
+ .requiredOption("--shards <count>", "Number of shards to create")
273
+ .option("--dashboard-url <url>", "Dashboard URL for fetching test history", process.env.DASHBOARD_DOMAIN || "https://dash.empirical.run")
274
+ .option("--workers <workers>", "Number of parallel workers per shard", "8")
275
+ .option("--include-retries", "Use total duration including retries (accounts for flakiness)", false)
276
+ .option("--default-duration <ms>", "Default duration for tests without history", "30000")
277
+ .option("--output-dir <dir>", "Output directory for test-list files", "./shards")
278
+ .option("--cache-key <key>", "Cache key for duration data (e.g., commit SHA). Ensures all workers see same data.")
279
+ .action(async (options) => {
280
+ const { shards: shardCountStr, dashboardUrl, includeRetries, outputDir, cacheKey, } = options;
281
+ const workers = parseInt(options.workers, 10);
282
+ const shardCount = parseInt(shardCountStr, 10);
283
+ const defaultDuration = parseInt(options.defaultDuration, 10);
284
+ if (shardCount < 1) {
285
+ console.error("Shard count must be at least 1");
286
+ process.exit(1);
287
+ }
288
+ console.log(`Fetching test metadata from playwright...`);
289
+ const shardInfoReport = getShardInfoReport();
290
+ const allTestsReport = getPlaywrightTestList();
291
+ const allSpecs = (0, reporter_1.getFlattenedTestList)(allTestsReport.suites);
292
+ if (allSpecs.length === 0) {
293
+ console.log("No tests found.");
294
+ process.exit(0);
295
+ }
296
+ const testCaseIds = allSpecs.map((spec) => spec.id);
297
+ console.log(`Found ${testCaseIds.length} tests. Fetching history...`);
298
+ let historyResponse = {};
299
+ try {
300
+ historyResponse = await fetchTestHistory(testCaseIds, dashboardUrl, cacheKey);
301
+ }
302
+ catch (error) {
303
+ console.error("Failed to fetch test history:", error.message);
304
+ process.exit(1);
305
+ }
306
+ const testsWithHistory = testCaseIds.filter((id) => (historyResponse[id]?.length ?? 0) > 0).length;
307
+ console.log(`History found for ${testsWithHistory}/${testCaseIds.length} tests`);
308
+ const configFullyParallel = shardInfoReport.config.fullyParallel;
309
+ console.log(`Config fullyParallel: ${configFullyParallel}`);
310
+ const suiteMetadata = extractSuiteMetadata(shardInfoReport.suites, configFullyParallel);
311
+ const specToGroupKey = buildSpecToGroupKeyMap(suiteMetadata);
312
+ const serialCount = [...specToGroupKey.values()].filter((k) => k.startsWith("file:")).length;
313
+ const parallelCount = [...specToGroupKey.values()].filter((k) => k.startsWith("test:")).length;
314
+ const hooksCount = [...specToGroupKey.values()].filter((k) => k.startsWith("hooks:")).length;
315
+ console.log(`Test grouping: ${parallelCount} parallel, ${serialCount} serial/default, ${hooksCount} with hooks`);
316
+ const allTests = flattenedSpecsToTestInfos(allSpecs, historyResponse, includeRetries, defaultDuration, specToGroupKey);
317
+ console.log(`\n--- Playwright Default Sharding ---`);
318
+ const playwrightShardEstimates = [];
319
+ for (let i = 1; i <= shardCount; i++) {
320
+ const shardReport = getPlaywrightTestList(`${i}/${shardCount}`);
321
+ const shardSpecs = (0, reporter_1.getFlattenedTestList)(shardReport.suites);
322
+ const shardTests = flattenedSpecsToTestInfos(shardSpecs, historyResponse, includeRetries, defaultDuration);
323
+ const totalDuration = shardTests.reduce((sum, t) => sum + t.estimatedDuration, 0);
324
+ playwrightShardEstimates.push({
325
+ shard: i,
326
+ tests: shardTests.length,
327
+ durationMs: totalDuration,
328
+ });
329
+ console.log(` Shard ${i}/${shardCount}: ${shardTests.length} tests, ~${formatDuration(totalDuration, workers)}`);
330
+ }
331
+ const playwrightMaxDuration = Math.max(...playwrightShardEstimates.map((s) => s.durationMs));
332
+ const playwrightMinDuration = Math.min(...playwrightShardEstimates.map((s) => s.durationMs));
333
+ console.log(` Makespan (max shard): ${formatDuration(playwrightMaxDuration, workers)}`);
334
+ console.log(` Imbalance: ${formatDuration(playwrightMaxDuration - playwrightMinDuration, workers)}`);
335
+ console.log(`\n--- Optimized Bin Packing (LPT) ---`);
336
+ const testGroups = buildTestGroups(allTests);
337
+ console.log(` Built ${testGroups.length} test groups from ${allTests.length} tests`);
338
+ const optimizedShards = packGroupsIntoShards(testGroups, shardCount);
339
+ for (const shard of optimizedShards) {
340
+ console.log(` Shard ${shard.index}/${shardCount}: ${shard.tests.length} tests, ~${formatDuration(shard.totalDuration, workers)}`);
341
+ }
342
+ const optimizedMaxDuration = Math.max(...optimizedShards.map((s) => s.totalDuration));
343
+ const optimizedMinDuration = Math.min(...optimizedShards.map((s) => s.totalDuration));
344
+ console.log(` Makespan (max shard): ${formatDuration(optimizedMaxDuration, workers)}`);
345
+ console.log(` Imbalance: ${formatDuration(optimizedMaxDuration - optimizedMinDuration, workers)}`);
346
+ const improvement = ((playwrightMaxDuration - optimizedMaxDuration) /
347
+ playwrightMaxDuration) *
348
+ 100;
349
+ console.log(`\n Improvement: ${improvement.toFixed(1)}% faster makespan`);
350
+ const absoluteOutputDir = path_1.default.isAbsolute(outputDir)
351
+ ? outputDir
352
+ : path_1.default.join(process.cwd(), outputDir);
353
+ if (!fs_1.default.existsSync(absoluteOutputDir)) {
354
+ fs_1.default.mkdirSync(absoluteOutputDir, { recursive: true });
355
+ }
356
+ console.log(`\n--- Writing Test List Files ---`);
357
+ for (const shard of optimizedShards) {
358
+ const content = generateTestListContent(shard, workers);
359
+ const filePath = path_1.default.join(absoluteOutputDir, `shard-${shard.index}.txt`);
360
+ fs_1.default.writeFileSync(filePath, content, "utf-8");
361
+ console.log(` Written: ${filePath}`);
362
+ }
363
+ const comparison = optimizedShards.map((os) => {
364
+ const pw = playwrightShardEstimates.find((p) => p.shard === os.index);
365
+ return {
366
+ shardIndex: os.index,
367
+ playwrightTestCount: pw.tests,
368
+ playwrightEstimatedMs: Math.round(pw.durationMs / workers),
369
+ optimizedTestCount: os.tests.length,
370
+ optimizedEstimatedMs: Math.round(os.totalDuration / workers),
371
+ };
372
+ });
373
+ const summaryPath = path_1.default.join(absoluteOutputDir, "summary.json");
374
+ fs_1.default.writeFileSync(summaryPath, JSON.stringify({
375
+ generatedAt: new Date().toISOString(),
376
+ shardCount,
377
+ workers,
378
+ totalTests: allTests.length,
379
+ testsWithHistory,
380
+ testsWithoutHistory: allTests.length - testsWithHistory,
381
+ includeRetries,
382
+ defaultDurationMs: defaultDuration,
383
+ playwright: {
384
+ makespanMs: Math.round(playwrightMaxDuration / workers),
385
+ imbalanceMs: Math.round((playwrightMaxDuration - playwrightMinDuration) / workers),
386
+ },
387
+ optimized: {
388
+ makespanMs: Math.round(optimizedMaxDuration / workers),
389
+ imbalanceMs: Math.round((optimizedMaxDuration - optimizedMinDuration) / workers),
390
+ },
391
+ improvementPercent: parseFloat(improvement.toFixed(1)),
392
+ shards: comparison,
393
+ }, null, 2), "utf-8");
394
+ console.log(` Written: ${summaryPath}`);
395
+ console.log(`\nDone! Run shards with:`);
396
+ for (let i = 1; i <= shardCount; i++) {
397
+ console.log(` npx playwright test --test-list ${path_1.default.join(outputDir, `shard-${i}.txt`)}`);
398
+ }
399
+ });
400
+ }
@@ -0,0 +1,3 @@
1
+ import type { Command } from "commander";
2
+ export declare function registerRunCommand(program: Command): void;
3
+ //# sourceMappingURL=run.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"run.d.ts","sourceRoot":"","sources":["../../../src/bin/commands/run.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,EAAE,OAAO,EAAE,MAAM,WAAW,CAAC;AAkBzC,wBAAgB,kBAAkB,CAAC,OAAO,EAAE,OAAO,QAwJlD"}
@@ -0,0 +1,132 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.registerRunCommand = registerRunCommand;
4
+ const dashboard_1 = require("../../dashboard");
5
+ const cmd_1 = require("../../lib/cmd");
6
+ const run_all_tests_1 = require("../../lib/run-all-tests");
7
+ const run_specific_test_1 = require("../../lib/run-specific-test");
8
+ const types_1 = require("../../types");
9
+ const utils_1 = require("../../utils");
10
+ const config_parser_1 = require("../../utils/config-parser");
11
+ function registerRunCommand(program) {
12
+ program
13
+ .command("run", { isDefault: true })
14
+ .description("Run Playwright tests")
15
+ .option("-n, --name <test-name>", "Name of the test to run")
16
+ .option("-s, --suites <suites>", "Suites under which the test is defined")
17
+ .option("-d, --dir <test-dir>", "Path to the test directory")
18
+ .option("-f, --file <test-file-path>", "Path to the test file")
19
+ .option("-p, --project <project-name...>", "Test projects to run")
20
+ .option("--payload <payload>", "Payload to run tests")
21
+ .option("--skip-teardown", "This options skips running teardown tests")
22
+ .option("--forbid-only", `This options forbids the use of ".only" in the test files`)
23
+ .allowUnknownOption()
24
+ .action(async (options, command) => {
25
+ const repoDir = process.cwd();
26
+ if (options.name && options.forbidOnly) {
27
+ console.error("--name and --forbid-only options cannot be used together");
28
+ process.exit(1);
29
+ }
30
+ options.project = options.project || ["*"];
31
+ const optionsToStrip = [
32
+ "-n",
33
+ "--name",
34
+ "--skip-teardown",
35
+ "-f",
36
+ "--file",
37
+ "-d",
38
+ "--dir",
39
+ "-p",
40
+ "--project",
41
+ "-s",
42
+ "--suites",
43
+ "--payload",
44
+ options.skipTeardown,
45
+ options.name,
46
+ options.dir,
47
+ options.file,
48
+ options.suites,
49
+ options.payload,
50
+ ...options.project,
51
+ ];
52
+ const pwOptions = command.args.filter((arg) => !optionsToStrip.includes(arg));
53
+ const projectName = process.env.PROJECT_NAME || (await (0, utils_1.pickNameFromPackageJson)());
54
+ if (!projectName) {
55
+ throw new Error("Project name is required");
56
+ }
57
+ const directory = options.dir || "tests";
58
+ const suites = options.suites && options.suites.trim() !== ""
59
+ ? options.suites?.split(",")
60
+ : undefined;
61
+ let tests = (0, config_parser_1.parseToken)(options.payload)?.tests ||
62
+ (options.name
63
+ ? [
64
+ {
65
+ name: options.name,
66
+ dir: directory,
67
+ filePath: options.file,
68
+ suites,
69
+ },
70
+ ]
71
+ : undefined);
72
+ const environmentSlug = process.env.TEST_RUN_ENVIRONMENT || "";
73
+ const environmentVariables = await (0, dashboard_1.fetchEnvironmentVariables)();
74
+ const envOverrides = {};
75
+ environmentVariables.forEach((envVar) => {
76
+ envOverrides[envVar.name] = envVar.value;
77
+ });
78
+ if (Object.keys(envOverrides).length > 0) {
79
+ console.log(`Loaded environment variables: ${Object.keys(envOverrides).join(", ")}`);
80
+ }
81
+ let environmentSpecificProjects = [];
82
+ let platform = types_1.Platform.WEB;
83
+ try {
84
+ if (environmentSlug) {
85
+ const { environment, build: latestBuild } = await (0, dashboard_1.fetchEnvironmentAndBuild)(projectName, environmentSlug);
86
+ platform = environment.platform;
87
+ environmentSpecificProjects = environment.playwright_projects;
88
+ if (!process.env.BUILD_URL) {
89
+ process.env.BUILD_URL = latestBuild?.build_url;
90
+ }
91
+ const buildUrl = process.env.BUILD_URL;
92
+ await (0, utils_1.downloadBuild)(buildUrl);
93
+ }
94
+ const projectFilters = await (0, utils_1.generateProjectFilters)({
95
+ platform,
96
+ filteringSets: [...options.project, ...environmentSpecificProjects],
97
+ repoDir,
98
+ });
99
+ if (options.skipTeardown) {
100
+ await (0, utils_1.handleTeardownSkipFlag)(directory, repoDir);
101
+ }
102
+ const hasTestsFilter = tests && tests.length > 0;
103
+ let commandToRun;
104
+ if (hasTestsFilter) {
105
+ commandToRun = await (0, run_specific_test_1.runSpecificTestsCmd)({
106
+ tests,
107
+ projects: projectFilters,
108
+ passthroughArgs: pwOptions.join(" "),
109
+ platform,
110
+ envOverrides,
111
+ repoDir,
112
+ });
113
+ }
114
+ else {
115
+ commandToRun = (0, run_all_tests_1.runAllTestsCmd)({
116
+ projects: projectFilters,
117
+ passthroughArgs: pwOptions.join(" "),
118
+ platform,
119
+ envOverrides,
120
+ });
121
+ }
122
+ const { hasTestPassed } = await (0, cmd_1.runTestsForCmd)(commandToRun, repoDir);
123
+ if (!hasTestPassed) {
124
+ process.exit(1);
125
+ }
126
+ }
127
+ catch (error) {
128
+ console.error("Error while running playwright test:", error.message);
129
+ process.exit(1);
130
+ }
131
+ });
132
+ }