@claude-flow/cli 3.0.0-alpha.120 → 3.0.0-alpha.122

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. package/README.md +163 -3
  2. package/dist/src/commands/index.d.ts +2 -0
  3. package/dist/src/commands/index.d.ts.map +1 -1
  4. package/dist/src/commands/index.js +7 -0
  5. package/dist/src/commands/index.js.map +1 -1
  6. package/dist/src/commands/ruvector/backup.d.ts +11 -0
  7. package/dist/src/commands/ruvector/backup.d.ts.map +1 -0
  8. package/dist/src/commands/ruvector/backup.js +746 -0
  9. package/dist/src/commands/ruvector/backup.js.map +1 -0
  10. package/dist/src/commands/ruvector/benchmark.d.ts +11 -0
  11. package/dist/src/commands/ruvector/benchmark.d.ts.map +1 -0
  12. package/dist/src/commands/ruvector/benchmark.js +480 -0
  13. package/dist/src/commands/ruvector/benchmark.js.map +1 -0
  14. package/dist/src/commands/ruvector/import.d.ts +18 -0
  15. package/dist/src/commands/ruvector/import.d.ts.map +1 -0
  16. package/dist/src/commands/ruvector/import.js +349 -0
  17. package/dist/src/commands/ruvector/import.js.map +1 -0
  18. package/dist/src/commands/ruvector/index.d.ts +29 -0
  19. package/dist/src/commands/ruvector/index.d.ts.map +1 -0
  20. package/dist/src/commands/ruvector/index.js +129 -0
  21. package/dist/src/commands/ruvector/index.js.map +1 -0
  22. package/dist/src/commands/ruvector/init.d.ts +11 -0
  23. package/dist/src/commands/ruvector/init.d.ts.map +1 -0
  24. package/dist/src/commands/ruvector/init.js +431 -0
  25. package/dist/src/commands/ruvector/init.js.map +1 -0
  26. package/dist/src/commands/ruvector/migrate.d.ts +11 -0
  27. package/dist/src/commands/ruvector/migrate.d.ts.map +1 -0
  28. package/dist/src/commands/ruvector/migrate.js +481 -0
  29. package/dist/src/commands/ruvector/migrate.js.map +1 -0
  30. package/dist/src/commands/ruvector/optimize.d.ts +11 -0
  31. package/dist/src/commands/ruvector/optimize.d.ts.map +1 -0
  32. package/dist/src/commands/ruvector/optimize.js +503 -0
  33. package/dist/src/commands/ruvector/optimize.js.map +1 -0
  34. package/dist/src/commands/ruvector/setup.d.ts +18 -0
  35. package/dist/src/commands/ruvector/setup.d.ts.map +1 -0
  36. package/dist/src/commands/ruvector/setup.js +765 -0
  37. package/dist/src/commands/ruvector/setup.js.map +1 -0
  38. package/dist/src/commands/ruvector/status.d.ts +11 -0
  39. package/dist/src/commands/ruvector/status.d.ts.map +1 -0
  40. package/dist/src/commands/ruvector/status.js +456 -0
  41. package/dist/src/commands/ruvector/status.js.map +1 -0
  42. package/dist/tsconfig.tsbuildinfo +1 -1
  43. package/package.json +1 -1
@@ -0,0 +1,746 @@
1
+ /**
2
+ * V3 CLI RuVector Backup Command
3
+ * Backup and restore for RuVector PostgreSQL data
4
+ */
5
+ import { output } from '../../output.js';
6
+ import { confirm, input } from '../../prompt.js';
7
+ /**
8
+ * Get PostgreSQL connection config from context
9
+ */
10
+ function getConnectionConfig(ctx) {
11
+ return {
12
+ host: ctx.flags.host || process.env.PGHOST || 'localhost',
13
+ port: parseInt(ctx.flags.port || process.env.PGPORT || '5432', 10),
14
+ database: ctx.flags.database || process.env.PGDATABASE || '',
15
+ user: ctx.flags.user || process.env.PGUSER || 'postgres',
16
+ password: ctx.flags.password || process.env.PGPASSWORD || '',
17
+ ssl: ctx.flags.ssl || process.env.PGSSLMODE === 'require',
18
+ schema: ctx.flags.schema || 'claude_flow',
19
+ };
20
+ }
21
+ /**
22
+ * Format bytes to human readable
23
+ */
24
+ function formatBytes(bytes) {
25
+ if (bytes === 0)
26
+ return '0 B';
27
+ const k = 1024;
28
+ const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
29
+ const i = Math.floor(Math.log(bytes) / Math.log(k));
30
+ return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i];
31
+ }
32
+ /**
33
+ * RuVector backup subcommand
34
+ */
35
+ const backupSubcommand = {
36
+ name: 'create',
37
+ description: 'Create a backup of RuVector data',
38
+ options: [
39
+ {
40
+ name: 'output',
41
+ short: 'o',
42
+ description: 'Output file path',
43
+ type: 'string',
44
+ required: true,
45
+ },
46
+ {
47
+ name: 'tables',
48
+ short: 't',
49
+ description: 'Specific tables (comma-separated)',
50
+ type: 'string',
51
+ },
52
+ {
53
+ name: 'format',
54
+ short: 'f',
55
+ description: 'Output format',
56
+ type: 'string',
57
+ default: 'sql',
58
+ choices: ['sql', 'json', 'csv'],
59
+ },
60
+ {
61
+ name: 'compress',
62
+ short: 'c',
63
+ description: 'Compress output (gzip)',
64
+ type: 'boolean',
65
+ default: false,
66
+ },
67
+ {
68
+ name: 'include-indexes',
69
+ description: 'Include index definitions',
70
+ type: 'boolean',
71
+ default: true,
72
+ },
73
+ {
74
+ name: 'host',
75
+ short: 'h',
76
+ description: 'PostgreSQL host',
77
+ type: 'string',
78
+ default: 'localhost',
79
+ },
80
+ {
81
+ name: 'port',
82
+ short: 'p',
83
+ description: 'PostgreSQL port',
84
+ type: 'number',
85
+ default: 5432,
86
+ },
87
+ {
88
+ name: 'database',
89
+ short: 'd',
90
+ description: 'Database name',
91
+ type: 'string',
92
+ },
93
+ {
94
+ name: 'user',
95
+ short: 'u',
96
+ description: 'Database user',
97
+ type: 'string',
98
+ },
99
+ {
100
+ name: 'password',
101
+ description: 'Database password',
102
+ type: 'string',
103
+ },
104
+ {
105
+ name: 'ssl',
106
+ description: 'Enable SSL',
107
+ type: 'boolean',
108
+ default: false,
109
+ },
110
+ {
111
+ name: 'schema',
112
+ short: 's',
113
+ description: 'Schema name',
114
+ type: 'string',
115
+ default: 'claude_flow',
116
+ },
117
+ ],
118
+ examples: [
119
+ { command: 'claude-flow ruvector backup create -o backup.sql', description: 'Create SQL backup' },
120
+ { command: 'claude-flow ruvector backup create -o backup.json --format json', description: 'Create JSON backup' },
121
+ { command: 'claude-flow ruvector backup create -o backup.sql.gz --compress', description: 'Compressed backup' },
122
+ ],
123
+ action: async (ctx) => {
124
+ const config = getConnectionConfig(ctx);
125
+ let outputPath = ctx.flags.output;
126
+ const tablesFilter = ctx.flags.tables;
127
+ const format = ctx.flags.format || 'sql';
128
+ const compress = ctx.flags.compress;
129
+ const includeIndexes = ctx.flags['include-indexes'] !== false;
130
+ output.writeln();
131
+ output.writeln(output.bold('RuVector Backup'));
132
+ output.writeln(output.dim('='.repeat(60)));
133
+ output.writeln();
134
+ if (!config.database) {
135
+ output.printError('Database name is required. Use --database or -d flag, or set PGDATABASE env.');
136
+ return { success: false, exitCode: 1 };
137
+ }
138
+ // Interactive mode
139
+ if (!outputPath && ctx.interactive) {
140
+ outputPath = await input({
141
+ message: 'Output file path:',
142
+ default: `ruvector_backup_${new Date().toISOString().split('T')[0]}.${format}`,
143
+ validate: (v) => v.length > 0 || 'Output path is required',
144
+ });
145
+ }
146
+ if (!outputPath) {
147
+ output.printError('Output path is required. Use --output or -o flag.');
148
+ return { success: false, exitCode: 1 };
149
+ }
150
+ const spinner = output.createSpinner({ text: 'Connecting to PostgreSQL...', spinner: 'dots' });
151
+ spinner.start();
152
+ try {
153
+ // Import dependencies
154
+ const fs = await import('fs');
155
+ const path = await import('path');
156
+ const { promisify } = await import('util');
157
+ let pg = null;
158
+ try {
159
+ pg = await import('pg');
160
+ }
161
+ catch {
162
+ spinner.fail('PostgreSQL driver not found');
163
+ output.printError('Install pg package: npm install pg');
164
+ return { success: false, exitCode: 1 };
165
+ }
166
+ const client = new pg.Client({
167
+ host: config.host,
168
+ port: config.port,
169
+ database: config.database,
170
+ user: config.user,
171
+ password: config.password,
172
+ ssl: config.ssl ? { rejectUnauthorized: false } : false,
173
+ });
174
+ await client.connect();
175
+ spinner.succeed('Connected to PostgreSQL');
176
+ // Get tables to backup
177
+ spinner.setText('Discovering tables...');
178
+ spinner.start();
179
+ let tables = [];
180
+ if (tablesFilter) {
181
+ tables = tablesFilter.split(',').map(t => t.trim());
182
+ }
183
+ else {
184
+ const tablesResult = await client.query(`
185
+ SELECT table_name FROM information_schema.tables
186
+ WHERE table_schema = $1 AND table_type = 'BASE TABLE'
187
+ ORDER BY table_name
188
+ `, [config.schema]);
189
+ tables = tablesResult.rows.map(r => r.table_name);
190
+ }
191
+ spinner.succeed(`Found ${tables.length} tables to backup`);
192
+ // Prepare backup data
193
+ const backupData = {
194
+ metadata: {
195
+ backupDate: new Date().toISOString(),
196
+ database: config.database,
197
+ schema: config.schema,
198
+ format,
199
+ version: '1.0.0',
200
+ },
201
+ schema: config.schema,
202
+ tables: [],
203
+ indexes: [],
204
+ };
205
+ let totalRows = 0;
206
+ // Export each table
207
+ for (const tableName of tables) {
208
+ spinner.setText(`Exporting ${tableName}...`);
209
+ spinner.start();
210
+ // Get columns
211
+ const columnsResult = await client.query(`
212
+ SELECT column_name, data_type
213
+ FROM information_schema.columns
214
+ WHERE table_schema = $1 AND table_name = $2
215
+ ORDER BY ordinal_position
216
+ `, [config.schema, tableName]);
217
+ const columns = columnsResult.rows.map(r => r.column_name);
218
+ // Get data
219
+ const dataResult = await client.query(`
220
+ SELECT * FROM ${config.schema}.${tableName}
221
+ `);
222
+ backupData.tables.push({
223
+ name: tableName,
224
+ columns,
225
+ rows: dataResult.rows,
226
+ });
227
+ totalRows += dataResult.rows.length;
228
+ spinner.setText(`Exporting ${tableName}... (${dataResult.rows.length} rows)`);
229
+ }
230
+ spinner.succeed(`Exported ${totalRows.toLocaleString()} rows from ${tables.length} tables`);
231
+ // Get indexes
232
+ if (includeIndexes) {
233
+ spinner.setText('Exporting index definitions...');
234
+ spinner.start();
235
+ const indexResult = await client.query(`
236
+ SELECT pg_get_indexdef(i.oid) as indexdef
237
+ FROM pg_index idx
238
+ JOIN pg_class i ON i.oid = idx.indexrelid
239
+ JOIN pg_class t ON t.oid = idx.indrelid
240
+ JOIN pg_namespace n ON n.oid = t.relnamespace
241
+ WHERE n.nspname = $1
242
+ AND NOT idx.indisprimary
243
+ `, [config.schema]);
244
+ backupData.indexes = indexResult.rows.map(r => r.indexdef);
245
+ spinner.succeed(`Exported ${backupData.indexes.length} index definitions`);
246
+ }
247
+ await client.end();
248
+ // Write backup file
249
+ spinner.setText(`Writing backup to ${outputPath}...`);
250
+ spinner.start();
251
+ let content;
252
+ if (format === 'sql') {
253
+ // Generate SQL format
254
+ const lines = [];
255
+ lines.push(`-- RuVector Backup`);
256
+ lines.push(`-- Generated: ${backupData.metadata.backupDate}`);
257
+ lines.push(`-- Database: ${config.database}`);
258
+ lines.push(`-- Schema: ${config.schema}`);
259
+ lines.push('');
260
+ lines.push(`CREATE SCHEMA IF NOT EXISTS ${config.schema};`);
261
+ lines.push('');
262
+ for (const table of backupData.tables) {
263
+ lines.push(`-- Table: ${table.name}`);
264
+ lines.push(`-- Rows: ${table.rows.length}`);
265
+ lines.push('');
266
+ if (table.rows.length > 0) {
267
+ for (const row of table.rows) {
268
+ const values = table.columns.map(col => {
269
+ const val = row[col];
270
+ if (val === null || val === undefined)
271
+ return 'NULL';
272
+ if (typeof val === 'string')
273
+ return `'${val.replace(/'/g, "''")}'`;
274
+ if (typeof val === 'object')
275
+ return `'${JSON.stringify(val).replace(/'/g, "''")}'`;
276
+ return String(val);
277
+ });
278
+ lines.push(`INSERT INTO ${config.schema}.${table.name} (${table.columns.join(', ')}) VALUES (${values.join(', ')});`);
279
+ }
280
+ lines.push('');
281
+ }
282
+ }
283
+ // Add indexes
284
+ if (includeIndexes && backupData.indexes.length > 0) {
285
+ lines.push('-- Indexes');
286
+ for (const idx of backupData.indexes) {
287
+ lines.push(`${idx};`);
288
+ }
289
+ }
290
+ content = lines.join('\n');
291
+ }
292
+ else if (format === 'json') {
293
+ content = JSON.stringify(backupData, null, 2);
294
+ }
295
+ else {
296
+ // CSV format - one file per table would be better, but we'll concatenate
297
+ const lines = [];
298
+ for (const table of backupData.tables) {
299
+ lines.push(`# Table: ${table.name}`);
300
+ lines.push(table.columns.join(','));
301
+ for (const row of table.rows) {
302
+ const values = table.columns.map(col => {
303
+ const val = row[col];
304
+ if (val === null || val === undefined)
305
+ return '';
306
+ const str = typeof val === 'object' ? JSON.stringify(val) : String(val);
307
+ return str.includes(',') || str.includes('"') ? `"${str.replace(/"/g, '""')}"` : str;
308
+ });
309
+ lines.push(values.join(','));
310
+ }
311
+ lines.push('');
312
+ }
313
+ content = lines.join('\n');
314
+ }
315
+ // Compress if requested
316
+ if (compress) {
317
+ const zlib = await import('zlib');
318
+ const gzip = promisify(zlib.gzip);
319
+ const compressed = await gzip(Buffer.from(content, 'utf-8'));
320
+ outputPath = outputPath.endsWith('.gz') ? outputPath : `${outputPath}.gz`;
321
+ fs.writeFileSync(outputPath, compressed);
322
+ }
323
+ else {
324
+ fs.writeFileSync(outputPath, content, 'utf-8');
325
+ }
326
+ const fileSize = fs.statSync(outputPath).size;
327
+ spinner.succeed(`Backup written to ${outputPath} (${formatBytes(fileSize)})`);
328
+ output.writeln();
329
+ output.printSuccess('Backup completed successfully!');
330
+ output.writeln();
331
+ output.printBox([
332
+ `Output: ${outputPath}`,
333
+ `Format: ${format.toUpperCase()}${compress ? ' (gzip compressed)' : ''}`,
334
+ `Size: ${formatBytes(fileSize)}`,
335
+ `Tables: ${tables.length}`,
336
+ `Total Rows: ${totalRows.toLocaleString()}`,
337
+ `Indexes: ${backupData.indexes.length}`,
338
+ ].join('\n'), 'Backup Summary');
339
+ return {
340
+ success: true,
341
+ data: {
342
+ outputPath,
343
+ format,
344
+ compressed: compress,
345
+ tables: tables.length,
346
+ totalRows,
347
+ indexes: backupData.indexes.length,
348
+ fileSize,
349
+ },
350
+ };
351
+ }
352
+ catch (error) {
353
+ spinner.fail('Backup failed');
354
+ output.printError(error instanceof Error ? error.message : String(error));
355
+ return { success: false, exitCode: 1 };
356
+ }
357
+ },
358
+ };
359
+ /**
360
+ * RuVector restore subcommand
361
+ */
362
+ const restoreSubcommand = {
363
+ name: 'restore',
364
+ description: 'Restore RuVector data from backup',
365
+ options: [
366
+ {
367
+ name: 'input',
368
+ short: 'i',
369
+ description: 'Input file path',
370
+ type: 'string',
371
+ required: true,
372
+ },
373
+ {
374
+ name: 'clean',
375
+ description: 'Drop existing tables first',
376
+ type: 'boolean',
377
+ default: false,
378
+ },
379
+ {
380
+ name: 'dry-run',
381
+ description: 'Show what would be restored without executing',
382
+ type: 'boolean',
383
+ default: false,
384
+ },
385
+ {
386
+ name: 'host',
387
+ short: 'h',
388
+ description: 'PostgreSQL host',
389
+ type: 'string',
390
+ default: 'localhost',
391
+ },
392
+ {
393
+ name: 'port',
394
+ short: 'p',
395
+ description: 'PostgreSQL port',
396
+ type: 'number',
397
+ default: 5432,
398
+ },
399
+ {
400
+ name: 'database',
401
+ short: 'd',
402
+ description: 'Database name',
403
+ type: 'string',
404
+ },
405
+ {
406
+ name: 'user',
407
+ short: 'u',
408
+ description: 'Database user',
409
+ type: 'string',
410
+ },
411
+ {
412
+ name: 'password',
413
+ description: 'Database password',
414
+ type: 'string',
415
+ },
416
+ {
417
+ name: 'ssl',
418
+ description: 'Enable SSL',
419
+ type: 'boolean',
420
+ default: false,
421
+ },
422
+ {
423
+ name: 'schema',
424
+ short: 's',
425
+ description: 'Schema name',
426
+ type: 'string',
427
+ default: 'claude_flow',
428
+ },
429
+ ],
430
+ examples: [
431
+ { command: 'claude-flow ruvector backup restore -i backup.sql', description: 'Restore from SQL backup' },
432
+ { command: 'claude-flow ruvector backup restore -i backup.json --clean', description: 'Clean restore' },
433
+ { command: 'claude-flow ruvector backup restore -i backup.sql --dry-run', description: 'Preview restore' },
434
+ ],
435
+ action: async (ctx) => {
436
+ const config = getConnectionConfig(ctx);
437
+ const inputPath = ctx.flags.input;
438
+ const clean = ctx.flags.clean;
439
+ const dryRun = ctx.flags['dry-run'];
440
+ output.writeln();
441
+ output.writeln(output.bold('RuVector Restore'));
442
+ output.writeln(output.dim('='.repeat(60)));
443
+ output.writeln();
444
+ if (!config.database) {
445
+ output.printError('Database name is required. Use --database or -d flag, or set PGDATABASE env.');
446
+ return { success: false, exitCode: 1 };
447
+ }
448
+ if (!inputPath) {
449
+ output.printError('Input path is required. Use --input or -i flag.');
450
+ return { success: false, exitCode: 1 };
451
+ }
452
+ const spinner = output.createSpinner({ text: 'Reading backup file...', spinner: 'dots' });
453
+ spinner.start();
454
+ try {
455
+ const fs = await import('fs');
456
+ const path = await import('path');
457
+ const { promisify } = await import('util');
458
+ // Check file exists
459
+ if (!fs.existsSync(inputPath)) {
460
+ spinner.fail('Backup file not found');
461
+ output.printError(`File not found: ${inputPath}`);
462
+ return { success: false, exitCode: 1 };
463
+ }
464
+ // Read file
465
+ let content;
466
+ if (inputPath.endsWith('.gz')) {
467
+ const zlib = await import('zlib');
468
+ const gunzip = promisify(zlib.gunzip);
469
+ const compressed = fs.readFileSync(inputPath);
470
+ const decompressed = await gunzip(compressed);
471
+ content = decompressed.toString('utf-8');
472
+ }
473
+ else {
474
+ content = fs.readFileSync(inputPath, 'utf-8');
475
+ }
476
+ const fileSize = fs.statSync(inputPath).size;
477
+ spinner.succeed(`Read backup file (${formatBytes(fileSize)})`);
478
+ // Determine format
479
+ const isJson = content.trim().startsWith('{');
480
+ const format = isJson ? 'json' : 'sql';
481
+ if (dryRun) {
482
+ output.printInfo('Dry run mode: showing what would be restored');
483
+ output.writeln();
484
+ if (isJson) {
485
+ const data = JSON.parse(content);
486
+ output.writeln(output.highlight('Backup metadata:'));
487
+ output.printTable({
488
+ columns: [
489
+ { key: 'property', header: 'Property', width: 20 },
490
+ { key: 'value', header: 'Value', width: 40 },
491
+ ],
492
+ data: [
493
+ { property: 'Backup Date', value: data.metadata?.backupDate || 'Unknown' },
494
+ { property: 'Database', value: data.metadata?.database || 'Unknown' },
495
+ { property: 'Schema', value: data.schema || 'Unknown' },
496
+ { property: 'Tables', value: String(data.tables?.length || 0) },
497
+ { property: 'Total Rows', value: String(data.tables?.reduce((sum, t) => sum + t.rows.length, 0) || 0) },
498
+ { property: 'Indexes', value: String(data.indexes?.length || 0) },
499
+ ],
500
+ });
501
+ }
502
+ else {
503
+ // Count SQL statements
504
+ const insertCount = (content.match(/INSERT INTO/gi) || []).length;
505
+ const createCount = (content.match(/CREATE (TABLE|INDEX)/gi) || []).length;
506
+ output.writeln(`SQL statements: ${insertCount} inserts, ${createCount} creates`);
507
+ }
508
+ return { success: true, data: { dryRun: true } };
509
+ }
510
+ // Confirm clean operation
511
+ if (clean && ctx.interactive) {
512
+ const confirmClean = await confirm({
513
+ message: 'This will drop existing tables. Continue?',
514
+ default: false,
515
+ });
516
+ if (!confirmClean) {
517
+ output.printInfo('Restore cancelled');
518
+ return { success: false, exitCode: 0 };
519
+ }
520
+ }
521
+ // Connect and restore
522
+ let pg = null;
523
+ try {
524
+ pg = await import('pg');
525
+ }
526
+ catch {
527
+ spinner.fail('PostgreSQL driver not found');
528
+ output.printError('Install pg package: npm install pg');
529
+ return { success: false, exitCode: 1 };
530
+ }
531
+ const client = new pg.Client({
532
+ host: config.host,
533
+ port: config.port,
534
+ database: config.database,
535
+ user: config.user,
536
+ password: config.password,
537
+ ssl: config.ssl ? { rejectUnauthorized: false } : false,
538
+ });
539
+ await client.connect();
540
+ spinner.succeed('Connected to PostgreSQL');
541
+ // Clean if requested
542
+ if (clean) {
543
+ spinner.setText(`Dropping schema "${config.schema}"...`);
544
+ spinner.start();
545
+ await client.query(`DROP SCHEMA IF EXISTS ${config.schema} CASCADE`);
546
+ await client.query(`CREATE SCHEMA ${config.schema}`);
547
+ spinner.succeed('Schema cleaned');
548
+ }
549
+ // Restore
550
+ let restoredRows = 0;
551
+ let restoredTables = 0;
552
+ let restoredIndexes = 0;
553
+ if (isJson) {
554
+ // Restore from JSON
555
+ const data = JSON.parse(content);
556
+ for (const table of data.tables || []) {
557
+ spinner.setText(`Restoring ${table.name}...`);
558
+ spinner.start();
559
+ // Create table if needed (assuming schema matches)
560
+ for (const row of table.rows) {
561
+ const columns = Object.keys(row);
562
+ const values = columns.map((col, idx) => `$${idx + 1}`);
563
+ const params = columns.map(col => {
564
+ const val = row[col];
565
+ return typeof val === 'object' ? JSON.stringify(val) : val;
566
+ });
567
+ try {
568
+ await client.query(`
569
+ INSERT INTO ${config.schema}.${table.name} (${columns.join(', ')})
570
+ VALUES (${values.join(', ')})
571
+ ON CONFLICT DO NOTHING
572
+ `, params);
573
+ restoredRows++;
574
+ }
575
+ catch {
576
+ // Skip conflicts
577
+ }
578
+ }
579
+ restoredTables++;
580
+ spinner.setText(`Restoring ${table.name}... (${table.rows.length} rows)`);
581
+ }
582
+ spinner.succeed(`Restored ${restoredTables} tables, ${restoredRows} rows`);
583
+ // Restore indexes
584
+ if (data.indexes && data.indexes.length > 0) {
585
+ spinner.setText('Restoring indexes...');
586
+ spinner.start();
587
+ for (const indexDef of data.indexes) {
588
+ try {
589
+ await client.query(indexDef);
590
+ restoredIndexes++;
591
+ }
592
+ catch {
593
+ // Index may already exist
594
+ }
595
+ }
596
+ spinner.succeed(`Restored ${restoredIndexes} indexes`);
597
+ }
598
+ }
599
+ else {
600
+ // Restore from SQL
601
+ spinner.setText('Executing SQL restore...');
602
+ spinner.start();
603
+ // Split by semicolons and execute
604
+ const statements = content
605
+ .split(';')
606
+ .map(s => s.trim())
607
+ .filter(s => s.length > 0 && !s.startsWith('--'));
608
+ let executed = 0;
609
+ for (const stmt of statements) {
610
+ try {
611
+ await client.query(stmt);
612
+ executed++;
613
+ if (stmt.toUpperCase().includes('INSERT INTO')) {
614
+ restoredRows++;
615
+ }
616
+ else if (stmt.toUpperCase().includes('CREATE INDEX')) {
617
+ restoredIndexes++;
618
+ }
619
+ }
620
+ catch (error) {
621
+ // Log but continue
622
+ if (process.env.DEBUG) {
623
+ console.error('Statement failed:', stmt.substring(0, 100));
624
+ }
625
+ }
626
+ if (executed % 100 === 0) {
627
+ spinner.setText(`Executing SQL restore... ${executed}/${statements.length}`);
628
+ }
629
+ }
630
+ spinner.succeed(`Executed ${executed} SQL statements`);
631
+ }
632
+ await client.end();
633
+ output.writeln();
634
+ output.printSuccess('Restore completed successfully!');
635
+ output.writeln();
636
+ output.printBox([
637
+ `Source: ${inputPath}`,
638
+ `Format: ${format.toUpperCase()}`,
639
+ `Tables Restored: ${restoredTables}`,
640
+ `Rows Restored: ${restoredRows.toLocaleString()}`,
641
+ `Indexes Restored: ${restoredIndexes}`,
642
+ ].join('\n'), 'Restore Summary');
643
+ return {
644
+ success: true,
645
+ data: {
646
+ inputPath,
647
+ format,
648
+ restoredTables,
649
+ restoredRows,
650
+ restoredIndexes,
651
+ },
652
+ };
653
+ }
654
+ catch (error) {
655
+ spinner.fail('Restore failed');
656
+ output.printError(error instanceof Error ? error.message : String(error));
657
+ return { success: false, exitCode: 1 };
658
+ }
659
+ },
660
+ };
661
+ /**
662
+ * RuVector backup main command
663
+ */
664
+ export const backupCommand = {
665
+ name: 'backup',
666
+ description: 'Backup and restore RuVector data',
667
+ subcommands: [backupSubcommand, restoreSubcommand],
668
+ options: [
669
+ {
670
+ name: 'host',
671
+ short: 'h',
672
+ description: 'PostgreSQL host',
673
+ type: 'string',
674
+ default: 'localhost',
675
+ },
676
+ {
677
+ name: 'port',
678
+ short: 'p',
679
+ description: 'PostgreSQL port',
680
+ type: 'number',
681
+ default: 5432,
682
+ },
683
+ {
684
+ name: 'database',
685
+ short: 'd',
686
+ description: 'Database name',
687
+ type: 'string',
688
+ },
689
+ {
690
+ name: 'user',
691
+ short: 'u',
692
+ description: 'Database user',
693
+ type: 'string',
694
+ },
695
+ {
696
+ name: 'password',
697
+ description: 'Database password',
698
+ type: 'string',
699
+ },
700
+ {
701
+ name: 'ssl',
702
+ description: 'Enable SSL',
703
+ type: 'boolean',
704
+ default: false,
705
+ },
706
+ {
707
+ name: 'schema',
708
+ short: 's',
709
+ description: 'Schema name',
710
+ type: 'string',
711
+ default: 'claude_flow',
712
+ },
713
+ ],
714
+ examples: [
715
+ { command: 'claude-flow ruvector backup create -o backup.sql', description: 'Create backup' },
716
+ { command: 'claude-flow ruvector backup restore -i backup.sql', description: 'Restore backup' },
717
+ ],
718
+ action: async (ctx) => {
719
+ output.writeln();
720
+ output.writeln(output.bold('RuVector Backup'));
721
+ output.writeln(output.dim('='.repeat(60)));
722
+ output.writeln();
723
+ output.printBox([
724
+ 'RuVector Backup provides data backup and restore capabilities:',
725
+ '',
726
+ ' create Create a backup of RuVector data',
727
+ ' restore Restore RuVector data from backup',
728
+ '',
729
+ 'Supported formats:',
730
+ ' SQL - PostgreSQL-compatible SQL statements',
731
+ ' JSON - Portable JSON format with metadata',
732
+ ' CSV - Comma-separated values',
733
+ '',
734
+ 'Features:',
735
+ ' - Selective table backup',
736
+ ' - Gzip compression',
737
+ ' - Index preservation',
738
+ ' - Incremental restore',
739
+ ].join('\n'), 'Backup Commands');
740
+ output.writeln();
741
+ output.printInfo('Run `claude-flow ruvector backup <command> --help` for details');
742
+ return { success: true };
743
+ },
744
+ };
745
+ export default backupCommand;
746
+ //# sourceMappingURL=backup.js.map