@invariant-ai/cli 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -219,268 +219,293 @@ function categorizeTypeErrors(result, projectDir, changedFiles, changeSource, ch
219
219
  };
220
220
  }
221
221
  async function runCommand(options) {
222
- console.log(chalk_1.default.bold('\n🧪 Invariant - Running Tests\n'));
223
- const cwd = process.cwd();
224
- // Find playwright config (could be in current dir or frontend subdir)
225
- const configInfo = (0, detect_js_1.findPlaywrightConfig)(cwd);
226
- if (!configInfo.found) {
227
- console.log(chalk_1.default.red('✗ No playwright.config.ts found\n'));
228
- console.log(chalk_1.default.white('To fix this:'));
229
- console.log(chalk_1.default.gray(' 1. Run: ') + chalk_1.default.cyan('invariant init'));
230
- console.log(chalk_1.default.gray(' 2. Or create playwright.config.ts manually\n'));
231
- process.exit(1);
232
- }
233
- const testDir = configInfo.dir;
234
- // Clean up old runs (retention policy)
235
- const deletedRuns = cleanupOldRuns(testDir);
236
- if (deletedRuns > 0) {
237
- console.log(chalk_1.default.gray(`♻ Cleaned up ${deletedRuns} old run${deletedRuns > 1 ? 's' : ''} (>${RETENTION_DAYS} days)`));
238
- }
239
- // Check for test files
240
- const testsDir = (0, path_1.join)(testDir, 'tests', 'e2e');
241
- const testFileCount = countTestFiles(testsDir);
242
- if (testFileCount === 0) {
243
- console.log(chalk_1.default.red('✗ No test files found\n'));
244
- console.log(chalk_1.default.white('Expected test files in: ') + chalk_1.default.cyan('tests/e2e/*.spec.ts'));
245
- console.log(chalk_1.default.gray('\nTo fix this:'));
246
- console.log(chalk_1.default.gray(' 1. Create a test file: ') + chalk_1.default.cyan('tests/e2e/example.spec.ts'));
247
- console.log(chalk_1.default.gray(' 2. Or run: ') + chalk_1.default.cyan('invariant init') + chalk_1.default.gray(' to generate a sample test\n'));
248
- process.exit(1);
249
- }
250
- // Show pre-run checklist
251
- console.log(chalk_1.default.green('✓') + chalk_1.default.gray(` Project: ${testDir}`));
252
- console.log(chalk_1.default.green('✓') + chalk_1.default.gray(` Found ${testFileCount} test file${testFileCount > 1 ? 's' : ''}`));
253
- // Check for .invariant directory
254
- const invariantDir = (0, path_1.join)(testDir, '.invariant');
255
- if ((0, fs_1.existsSync)(invariantDir)) {
256
- console.log(chalk_1.default.green('✓') + chalk_1.default.gray(' Invariant configured'));
257
- }
258
- else {
259
- console.log(chalk_1.default.yellow('!') + chalk_1.default.gray(' No .invariant directory (run `invariant init`)'));
260
- }
261
- // Compute changed files and line ranges once (shared by TypeCheck + Lint)
262
- const { files: changedFiles, source: changeSource } = (0, changes_js_1.getChangedFiles)(testDir);
263
- const changedLineRanges = (0, changes_js_1.getChangedLineRanges)(testDir);
264
- // Run TypeScript type check if project uses TypeScript
265
- let categorizedTypeCheck = null;
266
- if (projectUsesTypeScript(testDir)) {
267
- const typeSpinner = (0, ora_1.default)({
268
- text: 'Running TypeScript type check...',
269
- spinner: 'dots'
270
- }).start();
271
- const rawResult = runTypeCheck(testDir);
272
- categorizedTypeCheck = categorizeTypeErrors(rawResult, testDir, changedFiles, changeSource, changedLineRanges);
273
- if (categorizedTypeCheck.totalErrorCount === 0) {
274
- typeSpinner.succeed('TypeScript: 0 errors');
275
- }
276
- else if (categorizedTypeCheck.noChangesDetected) {
277
- typeSpinner.warn(`TypeScript: ${categorizedTypeCheck.existingErrorCount} existing error${categorizedTypeCheck.existingErrorCount > 1 ? 's' : ''} (no tracked edits)`);
278
- }
279
- else if (categorizedTypeCheck.newErrorCount === 0) {
280
- typeSpinner.warn(`TypeScript: ${categorizedTypeCheck.existingErrorCount} existing error${categorizedTypeCheck.existingErrorCount > 1 ? 's' : ''} (warnings)`);
281
- }
282
- else if (categorizedTypeCheck.existingErrorCount === 0) {
283
- typeSpinner.fail(`TypeScript: ${categorizedTypeCheck.newErrorCount} new error${categorizedTypeCheck.newErrorCount > 1 ? 's' : ''}`);
222
+ const jsonMode = Boolean(options.json || options.ci);
223
+ const startedAt = new Date().toISOString();
224
+ let exitCode = 0;
225
+ let report;
226
+ let runDirForEnvelope;
227
+ let setupError;
228
+ if (options.ci) {
229
+ chalk_1.default.level = 0;
230
+ }
231
+ const log = (...args) => {
232
+ if (!jsonMode)
233
+ console.log(...args);
234
+ };
235
+ const createSpinner = (text) => {
236
+ if (jsonMode)
237
+ return null;
238
+ return (0, ora_1.default)({ text, spinner: 'dots' }).start();
239
+ };
240
+ try {
241
+ if (options.ci && options.headed) {
242
+ if (!jsonMode) {
243
+ console.log(chalk_1.default.red('✗ Invalid arguments: --ci cannot be combined with --headed\n'));
244
+ }
245
+ exitCode = 2;
246
+ setupError = { code: 'INVALID_ARGS', message: '--ci cannot be combined with --headed' };
247
+ return;
284
248
  }
285
- else {
286
- typeSpinner.fail(`TypeScript: ${categorizedTypeCheck.newErrorCount} new, ${categorizedTypeCheck.existingErrorCount} existing`);
249
+ log(chalk_1.default.bold('\n🧪 Invariant - Running Tests\n'));
250
+ const cwd = process.cwd();
251
+ const configInfo = (0, detect_js_1.findPlaywrightConfig)(cwd);
252
+ if (!configInfo.found) {
253
+ if (!jsonMode) {
254
+ console.log(chalk_1.default.red('✗ No playwright.config.ts found\n'));
255
+ console.log(chalk_1.default.white('To fix this:'));
256
+ console.log(chalk_1.default.gray(' 1. Run: ') + chalk_1.default.cyan('invariant init'));
257
+ console.log(chalk_1.default.gray(' 2. Or create playwright.config.ts manually\n'));
258
+ }
259
+ exitCode = 2;
260
+ setupError = { code: 'NO_CONFIG', message: 'No playwright.config.ts found' };
261
+ return;
287
262
  }
288
- }
289
- // Run lint check if linters are detected
290
- let lintResult = null;
291
- const detectedLinters = (0, lint_js_1.detectLinters)(testDir);
292
- if (detectedLinters.eslint || detectedLinters.pylint) {
293
- const lintSpinner = (0, ora_1.default)({
294
- text: 'Running lint checks...',
295
- spinner: 'dots'
296
- }).start();
297
- try {
298
- lintResult = (0, lint_js_1.runLintCheck)(testDir, changedFiles, changedLineRanges);
299
- if (lintResult.blockerCountTotal > 0) {
300
- lintSpinner.fail(`Lint: ${lintResult.blockerCountTotal} blockers`);
263
+ const testDir = configInfo.dir;
264
+ const deletedRuns = cleanupOldRuns(testDir);
265
+ if (deletedRuns > 0) {
266
+ log(chalk_1.default.gray(`♻ Cleaned up ${deletedRuns} old run${deletedRuns > 1 ? 's' : ''} (>${RETENTION_DAYS} days)`));
267
+ }
268
+ const testsDir = (0, path_1.join)(testDir, 'tests', 'e2e');
269
+ const testFileCount = countTestFiles(testsDir);
270
+ if (testFileCount === 0) {
271
+ if (!jsonMode) {
272
+ console.log(chalk_1.default.red('✗ No test files found\n'));
273
+ console.log(chalk_1.default.white('Expected test files in: ') + chalk_1.default.cyan('tests/e2e/*.spec.ts'));
274
+ console.log(chalk_1.default.gray('\nTo fix this:'));
275
+ console.log(chalk_1.default.gray(' 1. Create a test file: ') + chalk_1.default.cyan('tests/e2e/example.spec.ts'));
276
+ console.log(chalk_1.default.gray(' 2. Or run: ') + chalk_1.default.cyan('invariant init') + chalk_1.default.gray(' to generate a sample test\n'));
301
277
  }
302
- else if (lintResult.warningCountTotal > 0) {
303
- const existWarnCount = (lintResult.tools.eslint?.existingWarnings?.length || 0) + (lintResult.tools.pylint?.existingWarnings?.length || 0);
304
- const existBlockCount = (lintResult.tools.eslint?.existingBlockers?.length || 0) + (lintResult.tools.pylint?.existingBlockers?.length || 0);
305
- const parts = [];
306
- if (lintResult.newWarningCountTotal > 0)
307
- parts.push(`${lintResult.newWarningCountTotal} new`);
308
- if (existWarnCount > 0)
309
- parts.push(`${existWarnCount} existing warning${existWarnCount > 1 ? 's' : ''}`);
310
- if (existBlockCount > 0)
311
- parts.push(`${existBlockCount} existing severity-2`);
312
- lintSpinner.warn(`Lint: ${parts.join(', ')}`);
278
+ exitCode = 2;
279
+ setupError = { code: 'NO_TESTS', message: 'No test files found in tests/e2e' };
280
+ return;
281
+ }
282
+ log(chalk_1.default.green('✓') + chalk_1.default.gray(` Project: ${testDir}`));
283
+ log(chalk_1.default.green('✓') + chalk_1.default.gray(` Found ${testFileCount} test file${testFileCount > 1 ? 's' : ''}`));
284
+ const invariantDir = (0, path_1.join)(testDir, '.invariant');
285
+ if ((0, fs_1.existsSync)(invariantDir)) {
286
+ log(chalk_1.default.green('✓') + chalk_1.default.gray(' Invariant configured'));
287
+ }
288
+ else {
289
+ log(chalk_1.default.yellow('!') + chalk_1.default.gray(' No .invariant directory (run `invariant init`)'));
290
+ }
291
+ const { files: changedFiles, source: changeSource } = (0, changes_js_1.getChangedFiles)(testDir);
292
+ const changedLineRanges = (0, changes_js_1.getChangedLineRanges)(testDir);
293
+ let categorizedTypeCheck = null;
294
+ if (projectUsesTypeScript(testDir)) {
295
+ const typeSpinner = createSpinner('Running TypeScript type check...');
296
+ const rawResult = runTypeCheck(testDir);
297
+ categorizedTypeCheck = categorizeTypeErrors(rawResult, testDir, changedFiles, changeSource, changedLineRanges);
298
+ if (categorizedTypeCheck.totalErrorCount === 0) {
299
+ typeSpinner?.succeed('TypeScript: 0 errors');
300
+ }
301
+ else if (categorizedTypeCheck.noChangesDetected) {
302
+ typeSpinner?.warn(`TypeScript: ${categorizedTypeCheck.existingErrorCount} existing error${categorizedTypeCheck.existingErrorCount > 1 ? 's' : ''} (no tracked edits)`);
303
+ }
304
+ else if (categorizedTypeCheck.newErrorCount === 0) {
305
+ typeSpinner?.warn(`TypeScript: ${categorizedTypeCheck.existingErrorCount} existing error${categorizedTypeCheck.existingErrorCount > 1 ? 's' : ''} (warnings)`);
306
+ }
307
+ else if (categorizedTypeCheck.existingErrorCount === 0) {
308
+ typeSpinner?.fail(`TypeScript: ${categorizedTypeCheck.newErrorCount} new error${categorizedTypeCheck.newErrorCount > 1 ? 's' : ''}`);
313
309
  }
314
310
  else {
315
- const allSkipped = (lintResult.tools.eslint?.skipped ?? true) &&
316
- (lintResult.tools.pylint?.skipped ?? true);
317
- if (allSkipped) {
318
- lintSpinner.info('Lint: Skipped (no targets)');
311
+ typeSpinner?.fail(`TypeScript: ${categorizedTypeCheck.newErrorCount} new, ${categorizedTypeCheck.existingErrorCount} existing`);
312
+ }
313
+ }
314
+ let lintResult = null;
315
+ const detectedLinters = (0, lint_js_1.detectLinters)(testDir);
316
+ if (detectedLinters.eslint || detectedLinters.pylint) {
317
+ const lintSpinner = createSpinner('Running lint checks...');
318
+ try {
319
+ lintResult = (0, lint_js_1.runLintCheck)(testDir, changedFiles, changedLineRanges);
320
+ if (lintResult.blockerCountTotal > 0) {
321
+ lintSpinner?.fail(`Lint: ${lintResult.blockerCountTotal} blockers`);
322
+ }
323
+ else if (lintResult.warningCountTotal > 0) {
324
+ const existWarnCount = (lintResult.tools.eslint?.existingWarnings?.length || 0) + (lintResult.tools.pylint?.existingWarnings?.length || 0);
325
+ const existBlockCount = (lintResult.tools.eslint?.existingBlockers?.length || 0) + (lintResult.tools.pylint?.existingBlockers?.length || 0);
326
+ const parts = [];
327
+ if (lintResult.newWarningCountTotal > 0)
328
+ parts.push(`${lintResult.newWarningCountTotal} new`);
329
+ if (existWarnCount > 0)
330
+ parts.push(`${existWarnCount} existing warning${existWarnCount > 1 ? 's' : ''}`);
331
+ if (existBlockCount > 0)
332
+ parts.push(`${existBlockCount} existing severity-2`);
333
+ lintSpinner?.warn(`Lint: ${parts.join(', ')}`);
319
334
  }
320
335
  else {
321
- lintSpinner.succeed('Lint: No issues');
336
+ const allSkipped = (lintResult.tools.eslint?.skipped ?? true) &&
337
+ (lintResult.tools.pylint?.skipped ?? true);
338
+ if (allSkipped) {
339
+ lintSpinner?.info('Lint: Skipped (no targets)');
340
+ }
341
+ else {
342
+ lintSpinner?.succeed('Lint: No issues');
343
+ }
344
+ }
345
+ if (!jsonMode && lintResult.tools.eslint?.skipReason === 'error') {
346
+ console.log(chalk_1.default.yellow(`\n⚠ ESLint did not run: ${lintResult.tools.eslint.runError?.slice(0, 100)}`));
347
+ console.log(chalk_1.default.gray(' Proof may be weaker - ESLint errors not checked.\n'));
348
+ }
349
+ if (!jsonMode && lintResult.tools.pylint?.skipReason === 'error') {
350
+ console.log(chalk_1.default.yellow(`\n⚠ Pylint did not run: ${lintResult.tools.pylint.runError?.slice(0, 100)}`));
351
+ console.log(chalk_1.default.gray(' Proof may be weaker - Pylint errors not checked.\n'));
352
+ }
353
+ if (!jsonMode && lintResult.context.setupWarnings) {
354
+ for (const warning of lintResult.context.setupWarnings) {
355
+ console.log(chalk_1.default.yellow(`\n⚠ ${warning}`));
356
+ }
322
357
  }
323
358
  }
324
- // Surface "did not run" warnings
325
- if (lintResult.tools.eslint?.skipReason === 'error') {
326
- console.log(chalk_1.default.yellow(`\n⚠ ESLint did not run: ${lintResult.tools.eslint.runError?.slice(0, 100)}`));
327
- console.log(chalk_1.default.gray(' Proof may be weaker - ESLint errors not checked.\n'));
328
- }
329
- if (lintResult.tools.pylint?.skipReason === 'error') {
330
- console.log(chalk_1.default.yellow(`\n⚠ Pylint did not run: ${lintResult.tools.pylint.runError?.slice(0, 100)}`));
331
- console.log(chalk_1.default.gray(' Proof may be weaker - Pylint errors not checked.\n'));
359
+ catch (error) {
360
+ lintSpinner?.warn(`Lint: Error - ${error.message?.slice(0, 100)}`);
332
361
  }
333
- // Surface setup warnings (e.g. TS files skipped because no parser)
334
- if (lintResult.context.setupWarnings) {
335
- for (const warning of lintResult.context.setupWarnings) {
336
- console.log(chalk_1.default.yellow(`\n⚠ ${warning}`));
362
+ }
363
+ log();
364
+ const args = ['playwright', 'test'];
365
+ if (options.headed) {
366
+ args.push('--headed');
367
+ }
368
+ args.push('--reporter=list,json');
369
+ if (options.quick) {
370
+ args.push('--max-failures=1');
371
+ }
372
+ const timestamp = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
373
+ const runDir = (0, path_1.join)(testDir, '.invariant', 'runs', timestamp);
374
+ runDirForEnvelope = runDir;
375
+ (0, fs_1.mkdirSync)(runDir, { recursive: true });
376
+ const spinner = createSpinner('Running Playwright tests...');
377
+ const startTime = Date.now();
378
+ try {
379
+ const result = (0, child_process_1.execSync)(`npx ${args.join(' ')}`, {
380
+ cwd: testDir,
381
+ encoding: 'utf-8',
382
+ stdio: ['pipe', 'pipe', 'pipe'],
383
+ env: {
384
+ ...process.env,
385
+ PLAYWRIGHT_OUTPUT_DIR: (0, path_1.join)(testDir, 'test-results'),
386
+ PLAYWRIGHT_JSON_OUTPUT_NAME: (0, path_1.join)(testDir, 'test-results', 'report.json'),
387
+ INVARIANT_RUN_DIR: runDir,
337
388
  }
389
+ });
390
+ const duration = ((Date.now() - startTime) / 1000).toFixed(1);
391
+ spinner?.succeed(`Tests completed in ${duration}s`);
392
+ const visualResult = (0, visual_diff_js_1.processVisualRegression)(testDir, runDir);
393
+ report = await (0, report_js_1.generateProofReport)(testDir, runDir, {
394
+ passed: true,
395
+ duration: parseFloat(duration),
396
+ output: result,
397
+ typeCheck: categorizedTypeCheck ? {
398
+ enabled: true,
399
+ passed: categorizedTypeCheck.passed,
400
+ totalErrorCount: categorizedTypeCheck.totalErrorCount,
401
+ newErrorCount: categorizedTypeCheck.newErrorCount,
402
+ existingErrorCount: categorizedTypeCheck.existingErrorCount,
403
+ newErrors: categorizedTypeCheck.newErrors,
404
+ existingErrors: categorizedTypeCheck.existingErrors,
405
+ noChangesDetected: categorizedTypeCheck.noChangesDetected,
406
+ } : undefined,
407
+ lintCheck: lintResult ?? undefined,
408
+ visual: visualResult.enabled ? visualResult : undefined,
409
+ });
410
+ if (!jsonMode) {
411
+ printProofReport(report, runDir, testDir);
338
412
  }
413
+ await (0, auto_sync_js_1.autoSyncRun)(runDir, timestamp, jsonMode);
414
+ exitCode = report.passed ? 0 : 1;
339
415
  }
340
416
  catch (error) {
341
- lintSpinner.warn(`Lint: Error - ${error.message?.slice(0, 100)}`);
417
+ const duration = ((Date.now() - startTime) / 1000).toFixed(1);
418
+ const errorOutput = error.stderr || error.message || '';
419
+ const stdout = error.stdout || '';
420
+ if (errorOutput.includes('Executable doesn\'t exist') || errorOutput.includes('browserType.launch')) {
421
+ spinner?.fail('Playwright browser not installed');
422
+ if (!jsonMode) {
423
+ console.log(chalk_1.default.yellow('\nBrowsers are required to run tests.\n'));
424
+ console.log(chalk_1.default.white('To fix this, run:'));
425
+ console.log(chalk_1.default.cyan(` cd ${testDir} && npx playwright install chromium\n`));
426
+ }
427
+ exitCode = 2;
428
+ setupError = { code: 'NO_BROWSER', message: 'Playwright browser not installed' };
429
+ return;
430
+ }
431
+ const playwrightTimeoutPatterns = [
432
+ /Test timeout of \d+ms exceeded/i,
433
+ /Timeout \d+ms exceeded/i,
434
+ /waiting for locator.*Timeout/i,
435
+ /page\.waitFor.*Timeout/i,
436
+ /locator\..*Timeout \d+ms exceeded/i,
437
+ /exceeded timeout/i,
438
+ ];
439
+ const combinedOutput = errorOutput + stdout;
440
+ const isPlaywrightTimeout = playwrightTimeoutPatterns.some(pattern => pattern.test(combinedOutput));
441
+ if (isPlaywrightTimeout) {
442
+ spinner?.fail(`Tests timed out after ${duration}s`);
443
+ if (!jsonMode) {
444
+ console.log(chalk_1.default.yellow('\nTests took too long to complete.\n'));
445
+ console.log(chalk_1.default.white('Possible causes:'));
446
+ console.log(chalk_1.default.gray(' - Web server not running (start with npm run dev)'));
447
+ console.log(chalk_1.default.gray(' - Slow network or application'));
448
+ console.log(chalk_1.default.gray(' - Incorrect selectors in tests\n'));
449
+ }
450
+ }
451
+ else {
452
+ spinner?.stop();
453
+ }
454
+ const visualResult = (0, visual_diff_js_1.processVisualRegression)(testDir, runDir);
455
+ report = await (0, report_js_1.generateProofReport)(testDir, runDir, {
456
+ passed: false,
457
+ duration: parseFloat(duration),
458
+ output: stdout,
459
+ error: errorOutput,
460
+ typeCheck: categorizedTypeCheck ? {
461
+ enabled: true,
462
+ passed: categorizedTypeCheck.passed,
463
+ totalErrorCount: categorizedTypeCheck.totalErrorCount,
464
+ newErrorCount: categorizedTypeCheck.newErrorCount,
465
+ existingErrorCount: categorizedTypeCheck.existingErrorCount,
466
+ newErrors: categorizedTypeCheck.newErrors,
467
+ existingErrors: categorizedTypeCheck.existingErrors,
468
+ noChangesDetected: categorizedTypeCheck.noChangesDetected,
469
+ } : undefined,
470
+ lintCheck: lintResult ?? undefined,
471
+ visual: visualResult.enabled ? visualResult : undefined,
472
+ });
473
+ if (report.passed) {
474
+ spinner?.warn(`Tests completed with warnings in ${duration}s`);
475
+ }
476
+ else {
477
+ spinner?.fail(`Tests failed after ${duration}s`);
478
+ }
479
+ if (!jsonMode) {
480
+ printProofReport(report, runDir, testDir);
481
+ }
482
+ await (0, auto_sync_js_1.autoSyncRun)(runDir, timestamp, jsonMode);
483
+ exitCode = report.passed ? 0 : 1;
342
484
  }
343
485
  }
344
- console.log();
345
- // Build playwright command
346
- const args = ['playwright', 'test'];
347
- if (options.headed) {
348
- args.push('--headed');
349
- }
350
- // Always include JSON reporter for structured proof data alongside list for terminal output
351
- args.push('--reporter=list,json');
352
- if (options.quick) {
353
- args.push('--max-failures=1');
354
- }
355
- // Create timestamp for this run
356
- const timestamp = new Date().toISOString().replace(/[:.]/g, '-').slice(0, 19);
357
- const runDir = (0, path_1.join)(testDir, '.invariant', 'runs', timestamp);
358
- (0, fs_1.mkdirSync)(runDir, { recursive: true });
359
- const spinner = (0, ora_1.default)({
360
- text: 'Running Playwright tests...',
361
- spinner: 'dots'
362
- }).start();
363
- const startTime = Date.now();
364
- try {
365
- // Run playwright tests
366
- const result = (0, child_process_1.execSync)(`npx ${args.join(' ')}`, {
367
- cwd: testDir,
368
- encoding: 'utf-8',
369
- stdio: ['pipe', 'pipe', 'pipe'],
370
- env: {
371
- ...process.env,
372
- // Ensure artifacts go to the right place
373
- PLAYWRIGHT_OUTPUT_DIR: (0, path_1.join)(testDir, 'test-results'),
374
- // JSON reporter output for structured proof data
375
- PLAYWRIGHT_JSON_OUTPUT_NAME: (0, path_1.join)(testDir, 'test-results', 'report.json'),
376
- // Tell visual fixture where to save screenshots
377
- INVARIANT_RUN_DIR: runDir,
378
- }
379
- });
380
- const duration = ((Date.now() - startTime) / 1000).toFixed(1);
381
- spinner.succeed(`Tests completed in ${duration}s`);
382
- // Process visual regression (auto-capture screenshots compared to baselines)
383
- const visualResult = (0, visual_diff_js_1.processVisualRegression)(testDir, runDir);
384
- // Parse results and generate proof report
385
- const report = await (0, report_js_1.generateProofReport)(testDir, runDir, {
386
- passed: true,
387
- duration: parseFloat(duration),
388
- output: result,
389
- typeCheck: categorizedTypeCheck ? {
390
- enabled: true,
391
- passed: categorizedTypeCheck.passed,
392
- totalErrorCount: categorizedTypeCheck.totalErrorCount,
393
- newErrorCount: categorizedTypeCheck.newErrorCount,
394
- existingErrorCount: categorizedTypeCheck.existingErrorCount,
395
- newErrors: categorizedTypeCheck.newErrors,
396
- existingErrors: categorizedTypeCheck.existingErrors,
397
- noChangesDetected: categorizedTypeCheck.noChangesDetected,
398
- } : undefined,
399
- lintCheck: lintResult ?? undefined,
400
- visual: visualResult.enabled ? visualResult : undefined,
401
- });
402
- printProofReport(report, runDir, testDir);
403
- // Auto-sync to cloud if logged in
404
- await (0, auto_sync_js_1.autoSyncRun)(runDir, timestamp);
486
+ catch (err) {
487
+ exitCode = 2;
488
+ setupError = {
489
+ code: 'INTERNAL_ERROR',
490
+ message: err?.message || 'Unexpected internal error',
491
+ };
405
492
  }
406
- catch (error) {
407
- const duration = ((Date.now() - startTime) / 1000).toFixed(1);
408
- // Check for common error types
409
- const errorOutput = error.stderr || error.message || '';
410
- const stdout = error.stdout || '';
411
- // Browser not installed
412
- if (errorOutput.includes('Executable doesn\'t exist') || errorOutput.includes('browserType.launch')) {
413
- spinner.fail('Playwright browser not installed');
414
- console.log(chalk_1.default.yellow('\nBrowsers are required to run tests.\n'));
415
- console.log(chalk_1.default.white('To fix this, run:'));
416
- console.log(chalk_1.default.cyan(` cd ${testDir} && npx playwright install chromium\n`));
417
- process.exit(1);
418
- }
419
- // Timeout error - match Playwright-specific timeout patterns, not just any text with "Timeout"
420
- const playwrightTimeoutPatterns = [
421
- /Test timeout of \d+ms exceeded/i,
422
- /Timeout \d+ms exceeded/i,
423
- /waiting for locator.*Timeout/i,
424
- /page\.waitFor.*Timeout/i,
425
- /locator\..*Timeout \d+ms exceeded/i,
426
- /exceeded timeout/i,
427
- ];
428
- const combinedOutput = errorOutput + stdout;
429
- const isPlaywrightTimeout = playwrightTimeoutPatterns.some(pattern => pattern.test(combinedOutput));
430
- if (isPlaywrightTimeout) {
431
- spinner.fail(`Tests timed out after ${duration}s`);
432
- console.log(chalk_1.default.yellow('\nTests took too long to complete.\n'));
433
- console.log(chalk_1.default.white('Possible causes:'));
434
- console.log(chalk_1.default.gray(' - Web server not running (start with npm run dev)'));
435
- console.log(chalk_1.default.gray(' - Slow network or application'));
436
- console.log(chalk_1.default.gray(' - Incorrect selectors in tests\n'));
437
- }
438
- else {
439
- // Don't show fail message yet - we need to check if it's just warnings
440
- spinner.stop();
441
- }
442
- // Process visual regression even on failure (captures may still exist)
443
- const visualResult = (0, visual_diff_js_1.processVisualRegression)(testDir, runDir);
444
- // Generate proof report for failures
445
- const report = await (0, report_js_1.generateProofReport)(testDir, runDir, {
446
- passed: false,
447
- duration: parseFloat(duration),
448
- output: stdout,
449
- error: errorOutput,
450
- typeCheck: categorizedTypeCheck ? {
451
- enabled: true,
452
- passed: categorizedTypeCheck.passed,
453
- totalErrorCount: categorizedTypeCheck.totalErrorCount,
454
- newErrorCount: categorizedTypeCheck.newErrorCount,
455
- existingErrorCount: categorizedTypeCheck.existingErrorCount,
456
- newErrors: categorizedTypeCheck.newErrors,
457
- existingErrors: categorizedTypeCheck.existingErrors,
458
- noChangesDetected: categorizedTypeCheck.noChangesDetected,
459
- } : undefined,
460
- lintCheck: lintResult ?? undefined,
461
- visual: visualResult.enabled ? visualResult : undefined,
462
- });
463
- // Now show appropriate spinner message based on actual result
464
- if (report.passed) {
465
- // Only warnings (visual/a11y) - show warning message, not failure
466
- spinner.warn(`Tests completed with warnings in ${duration}s`);
467
- }
468
- else {
469
- // Real failures
470
- spinner.fail(`Tests failed after ${duration}s`);
471
- }
472
- printProofReport(report, runDir, testDir);
473
- // Check if proof actually passed (only warnings like visual/a11y, no real failures)
474
- // If so, don't exit with error code - AI agents can continue
475
- if (report.passed) {
476
- // Auto-sync to cloud if logged in
477
- await (0, auto_sync_js_1.autoSyncRun)(runDir, timestamp);
478
- // Proof passed (only warnings) - exit cleanly
479
- return;
493
+ finally {
494
+ if (jsonMode) {
495
+ const envelope = {
496
+ envelopeVersion: 1,
497
+ ok: exitCode === 0,
498
+ exitCode,
499
+ timestamp: new Date().toISOString(),
500
+ ...(report ? { report } : {}),
501
+ ...(setupError ? { error: setupError } : {}),
502
+ ...(runDirForEnvelope ? { runDir: runDirForEnvelope } : {}),
503
+ };
504
+ process.stdout.write(`${JSON.stringify(envelope)}\n`);
505
+ }
506
+ if (exitCode !== 0) {
507
+ process.exit(exitCode);
480
508
  }
481
- // Real failures - still sync, then exit with error code
482
- await (0, auto_sync_js_1.autoSyncRun)(runDir, timestamp);
483
- process.exit(1);
484
509
  }
485
510
  }
486
511
  // === Box formatting helpers ===