@zibby/cli 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,926 @@
1
+ import { runTest, logger, DEFAULT_OUTPUT_BASE, SESSIONS_DIR, DEFAULT_MODELS } from '@zibby/core';
2
+ import { readFileSync, existsSync, statSync } from 'fs';
3
+ import { resolve, join } from 'path';
4
+ import { glob } from 'glob';
5
+ import chalk from 'chalk';
6
+ import ora from 'ora';
7
+ import dotenv from 'dotenv';
8
+ import fetch from 'node-fetch';
9
+ import open from 'open';
10
+ import { getApiUrl, getAccountApiUrl, getCurrentEnvironment, getFrontendUrl } from '../config/environments.js';
11
+ import { getSessionToken, getUserInfo } from '../config/config.js';
12
+
13
+ process.env.DOTENV_CONFIG_QUIET = 'true';
14
+
15
+ const nodeEnv = process.env.NODE_ENV || 'development';
16
+ const envFiles = [
17
+ resolve(process.cwd(), '.env.local'),
18
+ resolve(process.cwd(), `.env.${nodeEnv}`),
19
+ resolve(process.cwd(), '.env')
20
+ ];
21
+
22
+ envFiles.forEach(envFile => {
23
+ if (existsSync(envFile)) {
24
+ dotenv.config({ path: envFile, override: false });
25
+ }
26
+ });
27
+
28
+ function getGeneratedTestPath(specPath, config) {
29
+ const specsDir = config?.paths?.specs || 'test-specs';
30
+ const generatedDir = config?.paths?.generated || 'tests';
31
+
32
+ return specPath
33
+ .replace(specsDir, generatedDir)
34
+ .replace(/\.txt$/, '.spec.js');
35
+ }
36
+ async function openTestResultsInBrowser(executionId, projectId) {
37
+ const frontendUrl = getFrontendUrl();
38
+
39
+ const ulidRegex = /^[0-9A-HJKMNP-TV-Z]{26}$/;
40
+ const uuidRegex = /^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
41
+
42
+ if (!ulidRegex.test(executionId) && !uuidRegex.test(executionId)) {
43
+ console.log(chalk.red(`\n❌ Invalid execution ID format (security check)\n`));
44
+ return false;
45
+ }
46
+
47
+ if (!uuidRegex.test(projectId)) {
48
+ console.log(chalk.red(`\n❌ Invalid project ID format (security check)\n`));
49
+ return false;
50
+ }
51
+
52
+ const parsedUrl = new URL(frontendUrl);
53
+ const isProd = !frontendUrl.includes('localhost') && !frontendUrl.includes('127.0.0.1');
54
+ if (isProd && parsedUrl.protocol !== 'https:') {
55
+ console.log(chalk.red(`\n❌ Frontend URL must use HTTPS in production\n`));
56
+ return false;
57
+ }
58
+
59
+ const url = `${frontendUrl}/projects/${projectId}/runs/${executionId}`;
60
+
61
+ try {
62
+ console.log(chalk.cyan(`\nOpening test results in browser...`));
63
+ console.log(chalk.gray(` ${url}\n`));
64
+
65
+ await open(url);
66
+
67
+ return true;
68
+ } catch (_error) {
69
+ console.log(chalk.yellow(`Could not open browser automatically`));
70
+ console.log(chalk.gray(` Please open manually: ${url}\n`));
71
+ return false;
72
+ }
73
+ }
74
+
75
+ function getSourceTestPath(generatedPath, config) {
76
+ const specsDir = config?.paths?.specs || 'test-specs';
77
+ const generatedDir = config?.paths?.generated || 'tests';
78
+
79
+ return generatedPath
80
+ .replace(generatedDir, specsDir)
81
+ .replace(/\.spec\.js$/, '.txt');
82
+ }
83
+ async function uploadExecution({ apiKey, specPath, fullSpecPath, collectionIdOrName, folder, agent, config, result, shouldOpen: _shouldOpen }) {
84
+ const apiUrl = getApiUrl();
85
+ const env = getCurrentEnvironment();
86
+
87
+ const sessionToken = process.env.ZIBBY_USER_TOKEN || getSessionToken();
88
+ const userInfo = getUserInfo();
89
+
90
+ if (userInfo) {
91
+ console.log(chalk.gray(`Authenticated: ${userInfo.email} (${userInfo.name || 'User'})`));
92
+ } else {
93
+ console.log(chalk.gray('Authenticated: Using Personal Access Token'));
94
+ }
95
+
96
+ const uploadSpinner = ora(`Uploading to ${env.name}...`).start();
97
+
98
+ try {
99
+ const cwd = process.cwd();
100
+
101
+ // Use session path from the current run's result (not latest folder)
102
+ let latestSession = null;
103
+ if (result?.state?.sessionPath) {
104
+ const absSessionPath = result.state.sessionPath;
105
+ latestSession = absSessionPath.startsWith(cwd)
106
+ ? absSessionPath.slice(cwd.length + 1)
107
+ : absSessionPath;
108
+ }
109
+
110
+ if (!latestSession) {
111
+ const outputBase = config.paths?.output || DEFAULT_OUTPUT_BASE;
112
+ const sessionsPattern = `${outputBase}/${SESSIONS_DIR}/*`;
113
+
114
+ const sessionFolders = glob.sync(sessionsPattern, { cwd })
115
+ .filter(f => {
116
+ const stat = statSync(join(cwd, f));
117
+ return stat.isDirectory() && !/session_/.test(f);
118
+ })
119
+ .map(f => ({ path: f, stat: statSync(join(cwd, f)) }))
120
+ .sort((a, b) => b.stat.mtimeMs - a.stat.mtimeMs);
121
+
122
+ if (sessionFolders.length === 0) {
123
+ uploadSpinner.warn('No session folder found, skipping upload');
124
+ return;
125
+ }
126
+
127
+ latestSession = sessionFolders[0].path;
128
+ }
129
+ // Check multiple locations for video files:
130
+ // 1. execute_live/videos/*.webm (Cursor MCP browser)
131
+ // 2. execute_live/*.webm (legacy location)
132
+ // 3. *.webm or page*.webm (Claude Agent SDK MCP server)
133
+ let videoFiles = glob.sync(`${latestSession}/execute_live/videos/*.webm`, { cwd });
134
+ if (videoFiles.length === 0) {
135
+ videoFiles = glob.sync(`${latestSession}/execute_live/*.webm`, { cwd });
136
+ }
137
+ if (videoFiles.length === 0) {
138
+ videoFiles = glob.sync(`${latestSession}/*.webm`, { cwd });
139
+ }
140
+ const eventsPath = join(cwd, latestSession, 'execute_live', 'events.json');
141
+ const resultPath = join(cwd, latestSession, 'execute_live', 'result.json');
142
+ const titlePath = join(cwd, latestSession, 'title.txt');
143
+
144
+ // Get generated test from configured path (e.g., tests/auth/login.spec.js)
145
+ const generatedTestPath = getGeneratedTestPath(specPath, config);
146
+ const fullGeneratedTestPath = join(cwd, generatedTestPath);
147
+ const generatedTestExists = existsSync(fullGeneratedTestPath);
148
+
149
+ if (videoFiles.length === 0) {
150
+ uploadSpinner.warn('No video file found in session folder, skipping upload');
151
+ return;
152
+ }
153
+
154
+ const videoPath = join(cwd, videoFiles[0]);
155
+ const executionTitle = existsSync(titlePath) ? readFileSync(titlePath, 'utf-8').trim() : null;
156
+
157
+ logger.debug(`Video exists: ${existsSync(videoPath)}`);
158
+ logger.debug(`Events exists: ${existsSync(eventsPath)}`);
159
+
160
+ // Handle collection creation if needed (optional)
161
+ let collectionId = null;
162
+ let collectionName = null;
163
+
164
+ if (collectionIdOrName) {
165
+ uploadSpinner.text = 'Resolving collection...';
166
+ const accountApiUrl = getAccountApiUrl();
167
+ const collectionResponse = await fetch(`${accountApiUrl}/collections/resolve`, {
168
+ method: 'POST',
169
+ headers: {
170
+ 'Content-Type': 'application/json',
171
+ 'Authorization': `Bearer ${apiKey}`,
172
+ },
173
+ body: JSON.stringify({
174
+ collectionIdOrName
175
+ }),
176
+ });
177
+
178
+ if (!collectionResponse.ok) {
179
+ const errorData = await collectionResponse.json().catch(() => ({}));
180
+ throw new Error(errorData.error || 'Failed to resolve collection');
181
+ }
182
+
183
+ const collectionData = await collectionResponse.json();
184
+ collectionId = collectionData.collectionId;
185
+ collectionName = collectionData.collectionName;
186
+
187
+ if (collectionData.isNew) {
188
+ uploadSpinner.text = `Created new collection: ${collectionName}`;
189
+ }
190
+ }
191
+
192
+ // Step 1: Calculate file sizes first (for quota check)
193
+ uploadSpinner.text = 'Preparing upload...';
194
+
195
+ const fileSizes = {
196
+ video: existsSync(videoPath) ? readFileSync(videoPath).length : 0,
197
+ events: existsSync(eventsPath) ? readFileSync(eventsPath).length : 0,
198
+ instructions: (() => {
199
+ const sourceTestPath = getSourceTestPath(specPath, config);
200
+ const fullSourceTestPath = join(cwd, sourceTestPath);
201
+ return existsSync(fullSourceTestPath) ? readFileSync(fullSourceTestPath).length : 0;
202
+ })(),
203
+ result: existsSync(resultPath) ? readFileSync(resultPath).length : 0,
204
+ };
205
+
206
+ // Validate file sizes locally before requesting upload
207
+ const totalSize = fileSizes.video + fileSizes.events + fileSizes.instructions + fileSizes.result;
208
+ const totalSizeMB = (totalSize / 1024 / 1024).toFixed(2);
209
+
210
+ if (fileSizes.video > 50 * 1024 * 1024) {
211
+ throw new Error(`Video file too large: ${(fileSizes.video / 1024 / 1024).toFixed(2)}MB (max: 50MB)`);
212
+ }
213
+ if (totalSize > 60 * 1024 * 1024) {
214
+ throw new Error(`Total upload size too large: ${totalSizeMB}MB (max: 60MB)`);
215
+ }
216
+
217
+ // Step 2: Initiate upload (with file sizes for quota check)
218
+ uploadSpinner.text = `Getting upload URLs... (${totalSizeMB}MB)`;
219
+
220
+ const headers = {
221
+ 'Content-Type': 'application/json',
222
+ 'Authorization': `Bearer ${apiKey}`,
223
+ };
224
+
225
+ // Add user session token for two-layer auth (if logged in)
226
+ if (sessionToken) {
227
+ headers['X-User-Token'] = sessionToken;
228
+ }
229
+
230
+ const agentProvider = typeof agent === 'object' ? agent.provider : (agent || 'cursor');
231
+ const rawModel = typeof agent === 'object' ? (agent[agentProvider]?.model || null) : null;
232
+ const agentModel = (rawModel === 'auto' && agentProvider === 'claude')
233
+ ? DEFAULT_MODELS.CLAUDE
234
+ : rawModel;
235
+
236
+ const initResponse = await fetch(`${apiUrl}/executions/upload`, {
237
+ method: 'POST',
238
+ headers,
239
+ body: JSON.stringify({
240
+ collectionId: collectionId || null,
241
+ specPath,
242
+ specContent: (() => {
243
+ const sourceTestPath = getSourceTestPath(specPath, config);
244
+ const fullSourceTestPath = join(cwd, sourceTestPath);
245
+ if (existsSync(fullSourceTestPath)) {
246
+ return readFileSync(fullSourceTestPath, 'utf-8');
247
+ }
248
+ return existsSync(fullSpecPath) ? readFileSync(fullSpecPath, 'utf-8') : '';
249
+ })(),
250
+ title: executionTitle || null,
251
+ agentType: agentProvider,
252
+ model: agentModel,
253
+ metadata: {
254
+ agent: agent || 'cursor',
255
+ timestamp: new Date().toISOString(),
256
+ folder: folder || null,
257
+ collectionName: collectionName || null,
258
+ },
259
+ fileSizes,
260
+ }),
261
+ });
262
+
263
+ if (!initResponse.ok) {
264
+ const error = await initResponse.json();
265
+
266
+ // Handle file size limit errors (413 Payload Too Large)
267
+ if (initResponse.status === 413) {
268
+ if (error.details && Array.isArray(error.details)) {
269
+ throw new Error(
270
+ `File size limit exceeded:\n${
271
+ error.details.map(d => ` • ${d}`).join('\n')
272
+ }\n\nCompress your files or reduce recording quality.`
273
+ );
274
+ }
275
+ throw new Error(error.error || 'Upload files too large');
276
+ }
277
+
278
+ // Handle quota exceeded errors
279
+ if (initResponse.status === 429) {
280
+ if (error.quotaInfo) {
281
+ throw new Error(
282
+ `Video verification quota exceeded (${error.quotaInfo.used}/${error.quotaInfo.limit}).\n` +
283
+ `Plan: ${error.quotaInfo.planId}. Resets: ${new Date(error.quotaInfo.periodEnd).toLocaleDateString()}\n` +
284
+ `Upgrade at: https://studio.zibby.app/billing`
285
+ );
286
+ } else if (error.storageInfo) {
287
+ const usedGB = (error.storageInfo.used / (1024 * 1024 * 1024)).toFixed(2);
288
+ const limitGB = (error.storageInfo.limit / (1024 * 1024 * 1024)).toFixed(2);
289
+ const uploadMB = (error.storageInfo.uploadSize / (1024 * 1024)).toFixed(2);
290
+ throw new Error(
291
+ `Storage quota would be exceeded.\n` +
292
+ `Current usage: ${usedGB}GB / ${limitGB}GB\n` +
293
+ `This upload: ${uploadMB}MB\n` +
294
+ `Plan: ${error.storageInfo.planId}\n` +
295
+ `Upgrade at: https://studio.zibby.app/billing`
296
+ );
297
+ }
298
+ }
299
+
300
+ throw new Error(error.error || 'Failed to initiate upload');
301
+ }
302
+
303
+ const { executionId, uploadUrls, isOrphan, projectId } = await initResponse.json();
304
+
305
+ // Step 2: Upload video
306
+ uploadSpinner.text = `Uploading video... (${executionId})`;
307
+ const videoBuffer = readFileSync(videoPath);
308
+ logger.debug(`Video buffer size: ${videoBuffer.length} bytes`);
309
+ logger.debug('Upload URL received (redacted)');
310
+
311
+ const videoUpload = await fetch(uploadUrls.video, {
312
+ method: 'PUT',
313
+ body: videoBuffer,
314
+ headers: {
315
+ 'Content-Type': 'video/webm',
316
+ 'Content-Length': videoBuffer.length.toString(),
317
+ },
318
+ });
319
+
320
+ logger.debug(`Video upload status: ${videoUpload.status} ${videoUpload.statusText}`);
321
+
322
+ if (!videoUpload.ok) {
323
+ const errorText = await videoUpload.text();
324
+ throw new Error(`Failed to upload video: ${videoUpload.status} ${videoUpload.statusText} - ${errorText}`);
325
+ }
326
+
327
+ logger.info('Video uploaded successfully');
328
+
329
+ // Step 3: Upload events (if exists)
330
+ // Events are already enriched with AI descriptions by result-handler.js
331
+ if (existsSync(eventsPath) && uploadUrls.events) {
332
+ uploadSpinner.text = 'Uploading events...';
333
+ const eventsBuffer = readFileSync(eventsPath);
334
+
335
+ const eventsUpload = await fetch(uploadUrls.events, {
336
+ method: 'PUT',
337
+ body: eventsBuffer,
338
+ headers: {
339
+ 'Content-Type': 'application/json',
340
+ 'Content-Length': eventsBuffer.length.toString(),
341
+ },
342
+ });
343
+
344
+ logger.debug(`Events upload status: ${eventsUpload.status} ${eventsUpload.statusText}`);
345
+
346
+ if (!eventsUpload.ok) {
347
+ throw new Error('Failed to upload events');
348
+ }
349
+
350
+ logger.info('Events uploaded successfully');
351
+ } else {
352
+ logger.debug(`Skipping events upload - exists: ${existsSync(eventsPath)}, hasUrl: ${!!uploadUrls.events}`);
353
+ }
354
+
355
+ // Step 4: Upload generated test (if exists)
356
+ if (generatedTestExists && uploadUrls.generatedTest) {
357
+ uploadSpinner.text = 'Uploading generated test...';
358
+ const generatedTestBuffer = readFileSync(fullGeneratedTestPath);
359
+ const generatedTestUpload = await fetch(uploadUrls.generatedTest, {
360
+ method: 'PUT',
361
+ body: generatedTestBuffer,
362
+ headers: {
363
+ 'Content-Type': 'application/javascript',
364
+ 'Content-Length': generatedTestBuffer.length.toString(),
365
+ },
366
+ });
367
+
368
+ if (!generatedTestUpload.ok) {
369
+ throw new Error('Failed to upload generated test');
370
+ }
371
+ }
372
+
373
+ // Step 4.5: Upload test instructions (original .txt file if exists)
374
+ if (uploadUrls.testInstructions) {
375
+ const sourceTestPath = getSourceTestPath(specPath, config);
376
+ const fullSourceTestPath = join(cwd, sourceTestPath);
377
+
378
+ if (existsSync(fullSourceTestPath)) {
379
+ uploadSpinner.text = 'Uploading test instructions...';
380
+ const testInstructionsBuffer = readFileSync(fullSourceTestPath);
381
+ const testInstructionsUpload = await fetch(uploadUrls.testInstructions, {
382
+ method: 'PUT',
383
+ body: testInstructionsBuffer,
384
+ headers: {
385
+ 'Content-Type': 'text/plain',
386
+ 'Content-Length': testInstructionsBuffer.length.toString(),
387
+ },
388
+ });
389
+
390
+ if (!testInstructionsUpload.ok) {
391
+ logger.warn('Failed to upload test instructions (non-blocking)');
392
+ } else {
393
+ logger.debug('Test instructions uploaded successfully');
394
+ }
395
+ } else {
396
+ console.log(`ℹ️ No source test instructions found at ${sourceTestPath}`);
397
+ }
398
+ }
399
+
400
+ // Step 4.6: Upload result.json (execution results with assertions)
401
+ if (uploadUrls.result && existsSync(resultPath)) {
402
+ uploadSpinner.text = 'Uploading test results...';
403
+ const resultBuffer = readFileSync(resultPath);
404
+ const resultUpload = await fetch(uploadUrls.result, {
405
+ method: 'PUT',
406
+ body: resultBuffer,
407
+ headers: {
408
+ 'Content-Type': 'application/json',
409
+ 'Content-Length': resultBuffer.length.toString(),
410
+ },
411
+ });
412
+
413
+ if (!resultUpload.ok) {
414
+ logger.warn('Failed to upload result.json (non-blocking)');
415
+ } else {
416
+ logger.debug('result.json uploaded successfully');
417
+ }
418
+ }
419
+
420
+ // Step 5: Confirm upload (fileSizes already calculated above)
421
+ uploadSpinner.text = 'Confirming upload...';
422
+
423
+ const confirmResponse = await fetch(`${apiUrl}/executions/${executionId}/confirm`, {
424
+ method: 'POST',
425
+ headers: {
426
+ 'Content-Type': 'application/json',
427
+ 'Authorization': `Bearer ${apiKey}`,
428
+ },
429
+ body: JSON.stringify({
430
+ status: 'completed',
431
+ metadata: {
432
+ uploadedAt: new Date().toISOString(),
433
+ },
434
+ fileSizes,
435
+ }),
436
+ });
437
+
438
+ if (!confirmResponse.ok) {
439
+ throw new Error('Failed to confirm upload');
440
+ }
441
+
442
+ uploadSpinner.succeed('Uploaded to Zibby Cloud!');
443
+
444
+ console.log(chalk.gray(`\n${ '━'.repeat(50)}`));
445
+ console.log(chalk.white(`Execution ID: ${chalk.cyan(executionId)}`));
446
+
447
+ const frontendUrl = getFrontendUrl();
448
+
449
+ if (isOrphan) {
450
+ console.log(chalk.white(`Location: ${chalk.yellow('History tab')}`));
451
+ } else {
452
+ console.log(chalk.white(`Collection: ${chalk.cyan(collectionId)}`));
453
+ }
454
+
455
+ const executionUrl = projectId
456
+ ? `${frontendUrl}/projects/${projectId}/runs/${executionId}`
457
+ : `${frontendUrl}/history?execution=${executionId}`;
458
+ console.log(chalk.white(`View execution: ${chalk.cyan(executionUrl)}`));
459
+ console.log(chalk.gray(`${'━'.repeat(50) }\n`));
460
+
461
+ // Return execution info for --open flag
462
+ return {
463
+ success: true,
464
+ executionId,
465
+ projectId: projectId || null,
466
+ };
467
+
468
+ } catch (error) {
469
+ uploadSpinner.fail('Upload failed');
470
+ console.log(chalk.red(` ${error.message}`));
471
+ if (process.env.DEBUG) {
472
+ console.log(chalk.gray(` Stack: ${error.stack}`));
473
+ }
474
+ console.log(chalk.gray(' Continuing without upload...\n'));
475
+ return { success: false };
476
+ }
477
+ }
478
+
479
+ async function fetchSourceTestCases(executionId, sourceIds, apiKey) {
480
+ const apiUrl = getApiUrl();
481
+ const ids = sourceIds.join(',');
482
+ const response = await fetch(`${apiUrl}/executions/${executionId}/test-cases?ids=${ids}`, {
483
+ headers: { 'Authorization': `Bearer ${apiKey}` }
484
+ });
485
+ if (!response.ok) {
486
+ throw new Error(`Failed to fetch test cases: ${response.status} ${response.statusText}`);
487
+ }
488
+ const data = await response.json();
489
+ return data.testCases || [];
490
+ }
491
+
492
+ export async function runCommand(specPath, options) {
493
+ const _env = getCurrentEnvironment();
494
+
495
+ // Set up graceful shutdown on Ctrl+C
496
+ // Note: We don't handle SIGINT at this level anymore
497
+ // Let the agent's executePrompt handle it and propagate naturally
498
+
499
+ console.log(chalk.bold.cyan('\nZibby Test Automation\n'));
500
+ console.log(chalk.gray('━'.repeat(50)));
501
+
502
+ // --sources mode: fetch test cases from cloud and run them
503
+ if (options.sources) {
504
+ if (!options.execution) {
505
+ console.log(chalk.red('\n❌ --execution <id> is required when using --sources\n'));
506
+ process.exit(1);
507
+ }
508
+ const apiKey = options.project || process.env.ZIBBY_API_KEY;
509
+ if (!apiKey) {
510
+ console.log(chalk.red('\n❌ --project <id> or ZIBBY_API_KEY required when using --sources\n'));
511
+ process.exit(1);
512
+ }
513
+
514
+ const sourceIds = options.sources.split(',').map(s => s.trim());
515
+ console.log(chalk.white(`Fetching ${sourceIds.length} test case(s) from execution ${chalk.cyan(options.execution)}...`));
516
+
517
+ const testCases = await fetchSourceTestCases(options.execution, sourceIds, apiKey);
518
+ if (testCases.length === 0) {
519
+ console.log(chalk.red('\nNo test cases found for the given IDs\n'));
520
+ process.exit(1);
521
+ }
522
+
523
+ console.log(chalk.green(`✓ Fetched ${testCases.length} test case(s)`));
524
+
525
+ // Write each test case to a temp spec file and run sequentially
526
+ const { mkdirSync, writeFileSync } = await import('fs');
527
+ const tmpDir = resolve(process.cwd(), '.zibby-tmp');
528
+ mkdirSync(tmpDir, { recursive: true });
529
+
530
+ for (const tc of testCases) {
531
+ const specContent = `# ${tc.title}\n\n${tc.content}`;
532
+ const tmpSpec = resolve(tmpDir, `${tc.id}.txt`);
533
+ writeFileSync(tmpSpec, specContent, 'utf-8');
534
+ console.log(chalk.gray('━'.repeat(50)));
535
+ console.log(chalk.white(`\nRunning: ${chalk.cyan(tc.title)} (${tc.id})`));
536
+
537
+ // Recurse into runCommand with the temp spec file, carrying over options but without --sources
538
+ const childOptions = { ...options, sources: undefined, execution: undefined };
539
+ // Force sync on
540
+ childOptions.sync = true;
541
+ await runCommand(tmpSpec, childOptions);
542
+ }
543
+
544
+ // Cleanup
545
+ const { rmSync } = await import('fs');
546
+ rmSync(tmpDir, { recursive: true, force: true });
547
+ return;
548
+ }
549
+
550
+ if (!specPath && !options.sources) {
551
+ console.log(chalk.red('\n❌ Missing test spec path\n'));
552
+ console.log(chalk.white('Usage:'));
553
+ console.log(chalk.gray(' zibby run <spec-path> Run a test spec file'));
554
+ console.log(chalk.gray(' zibby run test-specs/login.txt Example\n'));
555
+ process.exit(1);
556
+ }
557
+
558
+ const fullSpecPath = resolve(process.cwd(), specPath);
559
+
560
+ if (!existsSync(fullSpecPath)) {
561
+ console.log(chalk.red(`\n❌ Test spec not found: ${specPath}\n`));
562
+ process.exit(1);
563
+ }
564
+
565
+ let config = {
566
+ headless: options.headless || false,
567
+ playwrightArtifacts: true, // Enable trace.zip generation for exact selectors
568
+ };
569
+
570
+ const configPath = resolve(process.cwd(), options.config);
571
+ if (existsSync(configPath)) {
572
+ try {
573
+ const userConfig = await import(configPath);
574
+ // Infer provider from agent config (claude/cursor key presence)
575
+ const inferProvider = (agentConfig) => {
576
+ if (agentConfig?.provider) return agentConfig.provider;
577
+ if (agentConfig?.claude) return 'claude';
578
+ if (agentConfig?.cursor) return 'cursor';
579
+ return 'claude';
580
+ };
581
+
582
+ config = {
583
+ ...config,
584
+ ...userConfig.default,
585
+ // Preserve full agent object, infer or override provider
586
+ agent: userConfig.default?.agent ? {
587
+ ...userConfig.default.agent,
588
+ provider: options.agent || inferProvider(userConfig.default.agent)
589
+ } : { provider: options.agent || 'claude' },
590
+ mcp: userConfig.default?.browser?.mcp || 'playwright',
591
+ headless: options.headless !== undefined ? options.headless : userConfig.default?.browser?.headless,
592
+ paths: userConfig.default?.paths || { specs: 'test-specs', generated: 'tests' },
593
+ cloudSync: userConfig.default?.cloudSync || false,
594
+ };
595
+ } catch (_error) {
596
+ console.log(chalk.yellow(`⚠️ Could not load config from ${options.config}`));
597
+ }
598
+ } else {
599
+ // No config file, use defaults
600
+ config.agent = { provider: options.agent || 'claude' };
601
+ config.cloudSync = false;
602
+ }
603
+
604
+ // Check cloud sync requirements BEFORE running test
605
+ let apiKey = process.env.ZIBBY_API_KEY;
606
+ const shouldSync = options.sync || (config.cloudSync === true && options.sync !== false);
607
+
608
+ // If --project flag is a token (starts with zby_), use it as API key override
609
+ if (options.project && options.project.startsWith('zby_')) {
610
+ apiKey = options.project;
611
+ console.log(chalk.gray('Using --project as API token (overriding .env)'));
612
+ }
613
+
614
+ if (shouldSync && !apiKey) {
615
+ console.log(chalk.red('\n❌ Error: Cloud sync is enabled but ZIBBY_API_KEY not set\n'));
616
+ console.log(chalk.white('Cloud sync is enabled in .zibby.config.js'));
617
+ console.log(chalk.white('You need to set ZIBBY_API_KEY in .env file\n'));
618
+ console.log(chalk.white('Option 1: Add to .env file:'));
619
+ console.log(chalk.gray(' ZIBBY_API_KEY=zby_your_key_here\n'));
620
+ console.log(chalk.white('Option 2: Disable cloud sync:'));
621
+ console.log(chalk.gray(' Edit .zibby.config.js → cloudSync: false\n'));
622
+ console.log(chalk.white('Get your API key from: https://zibby.dev/dashboard\n'));
623
+ process.exit(1);
624
+ }
625
+
626
+ if (shouldSync && apiKey) {
627
+ let sessionToken = process.env.ZIBBY_USER_TOKEN;
628
+ let userInfo;
629
+
630
+ if (sessionToken) {
631
+ console.log(chalk.gray('Using Personal Access Token from ZIBBY_USER_TOKEN'));
632
+ } else {
633
+ sessionToken = getSessionToken();
634
+ userInfo = getUserInfo();
635
+
636
+ if (!sessionToken || !userInfo) {
637
+ console.log(chalk.red('\n❌ Error: User authentication required for uploads\n'));
638
+ console.log(chalk.gray('Choose one of these options:\n'));
639
+ console.log(chalk.cyan(' Option 1 (Local Development):'));
640
+ console.log(chalk.gray(' zibby login\n'));
641
+ console.log(chalk.cyan(' Option 2 (CI/CD):'));
642
+ console.log(chalk.gray(' 1. Generate a Personal Access Token at:'));
643
+ console.log(chalk.gray(' https://zibby.app/settings/tokens'));
644
+ console.log(chalk.gray(' 2. Set environment variable:'));
645
+ console.log(chalk.gray(' export ZIBBY_USER_TOKEN=zby_pat_xxxxx\n'));
646
+ process.exit(1);
647
+ }
648
+
649
+ if (userInfo) {
650
+ console.log(chalk.gray(`Authenticated: ${userInfo.email} (${userInfo.name || 'User'})`));
651
+ }
652
+ }
653
+ }
654
+
655
+ if (shouldSync && apiKey) {
656
+ const apiUrl = getApiUrl();
657
+
658
+ try {
659
+ if (!apiKey.startsWith('zby_')) {
660
+ console.log(chalk.red(`\n❌ Invalid API token format\n`));
661
+ console.log(chalk.white('This CLI requires a project API token (starts with "zby_").'));
662
+ console.log(chalk.gray(` Format: zby_***\n`));
663
+ console.log(chalk.white('Get your project API token from settings:\n'));
664
+ console.log(chalk.gray(' Local: http://localhost:3001/settings'));
665
+ console.log(chalk.gray(' Prod: https://app.zibby.app/settings\n'));
666
+ process.exit(1);
667
+ }
668
+
669
+ const projectsResponse = await fetch(`${apiUrl}/projects`, {
670
+ headers: { 'Authorization': `Bearer ${apiKey}` }
671
+ });
672
+
673
+ if (!projectsResponse.ok) {
674
+ console.log(chalk.red(`\n❌ Unauthorized: Invalid project API token\n`));
675
+ console.log(chalk.white('Your project API token is invalid or does not have access.'));
676
+ console.log(chalk.gray(` Token format: zby_***\n`));
677
+ console.log(chalk.white('Get your project API token from settings:\n'));
678
+ console.log(chalk.gray(' Local: http://localhost:3001/settings'));
679
+ console.log(chalk.gray(' Prod: https://app.zibby.app/settings\n'));
680
+ process.exit(1);
681
+ }
682
+
683
+ const projectsData = await projectsResponse.json();
684
+ const projectsList = projectsData.projects || [];
685
+
686
+ if (!projectsList || projectsList.length === 0) {
687
+ console.log(chalk.red(`\n❌ No projects found for this token\n`));
688
+ process.exit(1);
689
+ }
690
+
691
+ // Use the first project (project tokens are scoped to a single project)
692
+ const project = projectsList[0];
693
+ console.log(chalk.green(`✓ Authorized for project: ${project.name} (${project.projectId})`));
694
+ } catch (error) {
695
+ console.log(chalk.red(`\n❌ Failed to validate API credentials: ${error.message}\n`));
696
+ process.exit(1);
697
+ }
698
+ }
699
+
700
+ if (config.agent === 'claude' && !process.env.ANTHROPIC_API_KEY) {
701
+ console.log(chalk.red('\n❌ Error: ANTHROPIC_API_KEY not set\n'));
702
+ console.log(chalk.white('Set your API key:'));
703
+ console.log(chalk.gray(' export ANTHROPIC_API_KEY="sk-ant-..."\n'));
704
+ console.log(chalk.white('Or add to .env file:'));
705
+ console.log(chalk.gray(' ANTHROPIC_API_KEY=sk-ant-...\n'));
706
+ console.log(chalk.white('Get your API key from: https://console.anthropic.com/\n'));
707
+ process.exit(1);
708
+ }
709
+
710
+ if (config.agent === 'cursor') {
711
+ const { checkCursorAgentInstalled, getCursorAgentInstallInstructions } = await import('@zibby/core');
712
+ const installed = await checkCursorAgentInstalled();
713
+
714
+ if (!installed) {
715
+ console.log(chalk.red(getCursorAgentInstallInstructions()));
716
+ process.exit(1);
717
+ }
718
+
719
+ // Check for CURSOR_API_KEY (required in CI, optional locally if Cursor IDE is installed)
720
+ const isCI = process.env.CI || process.env.GITLAB_CI || process.env.CIRCLECI || process.env.GITHUB_ACTIONS;
721
+
722
+ if (!process.env.CURSOR_API_KEY) {
723
+ if (isCI) {
724
+ // Required in CI environments
725
+ console.log(chalk.red('\n❌ Error: CURSOR_API_KEY not set\n'));
726
+ console.log(chalk.white('In CI/CD, you must provide your Cursor API key:'));
727
+ console.log(chalk.gray(' export CURSOR_API_KEY="your-cursor-token-here"\n'));
728
+ console.log(chalk.white('Get your API key from: https://cursor.com/settings\n'));
729
+ console.log(chalk.white('Add it to your CI/CD environment variables.\n'));
730
+ process.exit(1);
731
+ } else {
732
+ // Optional locally - show warning
733
+ console.log(chalk.yellow('⚠️ CURSOR_API_KEY not set (using locally stored Cursor credentials)\n'));
734
+ }
735
+ }
736
+ }
737
+
738
+ console.log(chalk.white(`Test Spec: ${chalk.cyan(specPath)}`));
739
+ const agentProvider = typeof config.agent === 'object' ? config.agent.provider : config.agent;
740
+ console.log(chalk.white(`Agent: ${chalk.cyan(agentProvider)}`));
741
+ console.log(chalk.white(`Browser: ${chalk.cyan(config.mcp || 'playwright')} ${config.headless ? chalk.gray('(headless)') : chalk.gray('(headed)')}`));
742
+ if (config.cloudSync) {
743
+ console.log(chalk.white(`Cloud Sync: ${chalk.green('enabled')}`));
744
+ }
745
+ if (options.autoApprove) {
746
+ console.log(chalk.white(`Auto-approve: ${chalk.yellow('enabled (CI/CD mode)')}`));
747
+ }
748
+ if (options.collection) {
749
+ console.log(chalk.white(`Collection: ${chalk.cyan(options.collection)}${options.folder ? ` / ${options.folder}` : ''}`));
750
+ } else {
751
+ console.log(chalk.gray(`Mode: Orphan execution (History tab)`));
752
+ }
753
+
754
+ if (process.env.ZIBBY_MEMORY) {
755
+ try {
756
+ const { DoltDB } = await import('@zibby/memory');
757
+ const doltAvailable = DoltDB.isAvailable();
758
+ const dbExists = existsSync(join(process.cwd(), '.zibby', 'memory', '.dolt'));
759
+
760
+ if (!doltAvailable) {
761
+ console.log(chalk.red(`\n❌ Memory requires Dolt but it's not installed\n`));
762
+ console.log(chalk.white(` Install:`));
763
+ console.log(chalk.gray(` brew install dolt # macOS`));
764
+ console.log(chalk.gray(` curl -L https://github.com/dolthub/dolt/releases/latest/download/install.sh | sudo bash # Linux\n`));
765
+ console.log(chalk.white(` Then initialize:`));
766
+ console.log(chalk.gray(` zibby init --mem\n`));
767
+ process.exit(1);
768
+ } else if (!dbExists) {
769
+ console.log(chalk.red(`\n❌ Memory database not initialized\n`));
770
+ console.log(chalk.white(` Run:`));
771
+ console.log(chalk.gray(` zibby init --mem\n`));
772
+ process.exit(1);
773
+ } else {
774
+ console.log(chalk.white(`Memory: ${chalk.green('enabled')}`));
775
+ }
776
+ } catch {
777
+ console.log(chalk.red(`\n❌ Memory requested but @zibby/memory package is not available\n`));
778
+ process.exit(1);
779
+ }
780
+ }
781
+
782
+ console.log(chalk.gray('━'.repeat(50)));
783
+
784
+ const spinner = ora('Initializing...').start();
785
+
786
+ try {
787
+ // Stop spinner before test execution to allow clean streaming output
788
+ spinner.stop();
789
+
790
+ const fallbackAgentModule = await import('@zibby/core/templates/browser-test-automation/graph.js').catch(() => null);
791
+
792
+ const result = await runTest(fullSpecPath, {
793
+ ...config,
794
+ specPath,
795
+ apiKey: process.env.ANTHROPIC_API_KEY,
796
+ autoApprove: options.autoApprove,
797
+ force: options.autoApprove,
798
+ singleNode: options.node,
799
+ sessionId: options.session,
800
+ workflow: options.workflow,
801
+ contextConfig: config.context,
802
+ fallbackAgentModule,
803
+ });
804
+
805
+ // Normalize output path - remove double slashes and make relative
806
+ let normalizedPath = result.outputPath || null;
807
+ if (normalizedPath) {
808
+ normalizedPath = normalizedPath.replace(/\/+/g, '/');
809
+ if (normalizedPath.includes(process.cwd())) {
810
+ normalizedPath = normalizedPath.replace(`${process.cwd() }/`, '');
811
+ }
812
+ }
813
+
814
+ // Only show success message if test was actually generated
815
+ const testGenerated = result.state?.generate_script?.scriptPath ||
816
+ result.state?.verify_script?.success;
817
+
818
+ console.log(chalk.gray(`\n${ '━'.repeat(50)}`));
819
+
820
+ if (testGenerated && normalizedPath) {
821
+ console.log(chalk.green('✓ Test execution completed'));
822
+ console.log(chalk.white(`Generated test: ${chalk.cyan(normalizedPath)}`));
823
+ if (result.toolCount !== undefined) {
824
+ console.log(chalk.white(`Tools used: ${chalk.cyan(result.toolCount)}`));
825
+ }
826
+ console.log(chalk.gray(`${'━'.repeat(50) }\n`));
827
+
828
+ console.log(chalk.cyan('Next steps:'));
829
+ console.log(chalk.white(` 1. Review generated test: ${normalizedPath}`));
830
+ console.log(chalk.white(` 2. Run with Playwright: npx playwright test ${normalizedPath}`));
831
+ console.log(chalk.white(` 3. View report: npx playwright show-report\n`));
832
+ } else {
833
+ // Workflow stopped early - check if execute_live completed
834
+ const executeLiveResult = result.state?.execute_live;
835
+
836
+ if (executeLiveResult) {
837
+ // execute_live ran but returned success=false
838
+ const hasRealExecution = executeLiveResult.steps?.length > 0 || executeLiveResult.actions?.length > 0;
839
+
840
+ if (hasRealExecution) {
841
+ // Test executed but found failures
842
+ console.log(chalk.white(`\nExecution Summary:`));
843
+ console.log(chalk.white(` Steps: ${executeLiveResult.steps?.length || 0}`));
844
+ console.log(chalk.white(` Actions: ${executeLiveResult.actions?.length || 0}`));
845
+ if (executeLiveResult.finalUrl) {
846
+ console.log(chalk.white(` Final URL: ${executeLiveResult.finalUrl}`));
847
+ }
848
+ if (executeLiveResult.notes) {
849
+ console.log(chalk.white(`\n📝 ${executeLiveResult.notes}`));
850
+ }
851
+ } else {
852
+ // execute_live couldn't run (no tools, etc.)
853
+ console.log(chalk.red('✖ Workflow did not complete'));
854
+ console.log(chalk.yellow(`\n⚠️ ${executeLiveResult.notes || 'Test execution failed'}`));
855
+ }
856
+ } else {
857
+ // Unknown failure
858
+ console.log(chalk.red('✖ Workflow did not complete'));
859
+ }
860
+
861
+ console.log(chalk.gray(`${'━'.repeat(50) }\n`));
862
+ }
863
+
864
+ // Auto-sync to cloud if enabled (already declared above)
865
+ // Skip upload for single node runs (incomplete artifacts)
866
+ if (options.node) {
867
+ console.log(chalk.gray('\nNote: Upload skipped for single node execution'));
868
+ console.log(chalk.gray(' Run full workflow to upload all artifacts\n'));
869
+ } else if (shouldSync) {
870
+ // Use --project flag as API key (preferred), otherwise fall back to ZIBBY_API_KEY
871
+ const uploadApiKey = options.project || process.env.ZIBBY_API_KEY;
872
+
873
+ if (!uploadApiKey) {
874
+ console.log(chalk.yellow('\nCloud sync enabled but no API key provided'));
875
+ console.log(chalk.gray(' Option 1: Use --project flag: --project zby_xxx'));
876
+ console.log(chalk.gray(' Option 2: Add to .env: ZIBBY_API_KEY=zby_xxx\n'));
877
+ } else {
878
+ const uploadResult = await uploadExecution({
879
+ apiKey: uploadApiKey,
880
+ specPath,
881
+ fullSpecPath,
882
+ projectId: null, // Backend extracts from API key
883
+ collectionIdOrName: options.collection || null,
884
+ folder: options.folder,
885
+ agent: config.agent,
886
+ config,
887
+ result, // Pass the result with codegenFiles
888
+ shouldOpen: options.open,
889
+ });
890
+
891
+ // Open browser if --open flag is set and upload succeeded
892
+ if (options.open && uploadResult?.success && uploadResult.executionId && uploadResult.projectId) {
893
+ await openTestResultsInBrowser(uploadResult.executionId, uploadResult.projectId);
894
+ }
895
+ }
896
+ }
897
+
898
+ } catch (error) {
899
+ // Stop spinner if still running
900
+ if (spinner.isSpinning) {
901
+ spinner.stop();
902
+ }
903
+
904
+ if (process.env.ZIBBY_MEMORY) {
905
+ try {
906
+ const { memoryEndRun } = await import('@zibby/memory');
907
+ memoryEndRun(process.cwd(), { passed: false });
908
+ } catch { /* @zibby/memory not available */ }
909
+ }
910
+
911
+ console.log(chalk.red('\n✖ Test failed\n'));
912
+
913
+ // Check if error is from user interruption
914
+ if (error.message.includes('Interrupted by user')) {
915
+ console.log(chalk.yellow('Test execution was interrupted\n'));
916
+ process.exit(130);
917
+ }
918
+
919
+ console.log(chalk.red(`Error: ${error.message}\n`));
920
+ if (error.stack) {
921
+ console.log(chalk.gray(error.stack));
922
+ }
923
+ process.exit(1);
924
+ }
925
+ }
926
+