genbox 1.0.65 → 1.0.66
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/db-sync.js +55 -14
- package/dist/db-utils.js +120 -0
- package/package.json +1 -1
package/dist/commands/db-sync.js
CHANGED
|
@@ -147,7 +147,34 @@ exports.dbSyncCommand
|
|
|
147
147
|
if (snapshotChoice === 'existing') {
|
|
148
148
|
snapshotId = existingSnapshot._id;
|
|
149
149
|
snapshotS3Key = existingSnapshot.s3Key;
|
|
150
|
-
|
|
150
|
+
// Download the existing snapshot from S3
|
|
151
|
+
const downloadSpinner = (0, ora_1.default)('Downloading existing snapshot...').start();
|
|
152
|
+
try {
|
|
153
|
+
const downloadUrlResponse = await (0, api_1.getSnapshotDownloadUrl)(snapshotId);
|
|
154
|
+
const downloadResult = await (0, db_utils_1.downloadSnapshotFromS3)(downloadUrlResponse.downloadUrl, {
|
|
155
|
+
onProgress: (msg) => downloadSpinner.text = msg,
|
|
156
|
+
});
|
|
157
|
+
if (!downloadResult.success) {
|
|
158
|
+
downloadSpinner.fail(chalk_1.default.red('Failed to download snapshot'));
|
|
159
|
+
console.log(chalk_1.default.dim(` Error: ${downloadResult.error}`));
|
|
160
|
+
console.log(chalk_1.default.dim(' Creating fresh snapshot instead...'));
|
|
161
|
+
// Reset to create fresh
|
|
162
|
+
snapshotId = undefined;
|
|
163
|
+
snapshotS3Key = undefined;
|
|
164
|
+
}
|
|
165
|
+
else {
|
|
166
|
+
downloadSpinner.succeed(chalk_1.default.green('Snapshot downloaded'));
|
|
167
|
+
localDumpPath = downloadResult.dumpPath;
|
|
168
|
+
}
|
|
169
|
+
}
|
|
170
|
+
catch (error) {
|
|
171
|
+
downloadSpinner.fail(chalk_1.default.red('Failed to download snapshot'));
|
|
172
|
+
console.log(chalk_1.default.dim(` Error: ${error.message}`));
|
|
173
|
+
console.log(chalk_1.default.dim(' Creating fresh snapshot instead...'));
|
|
174
|
+
// Reset to create fresh
|
|
175
|
+
snapshotId = undefined;
|
|
176
|
+
snapshotS3Key = undefined;
|
|
177
|
+
}
|
|
151
178
|
}
|
|
152
179
|
}
|
|
153
180
|
}
|
|
@@ -215,20 +242,37 @@ exports.dbSyncCommand
|
|
|
215
242
|
}
|
|
216
243
|
}
|
|
217
244
|
}
|
|
218
|
-
//
|
|
219
|
-
if (!
|
|
220
|
-
console.log(chalk_1.default.red('No
|
|
245
|
+
// Restore directly via SCP/SSH (user's own SSH key, more reliable than API async job)
|
|
246
|
+
if (!localDumpPath) {
|
|
247
|
+
console.log(chalk_1.default.red('No dump file available to restore'));
|
|
221
248
|
return;
|
|
222
249
|
}
|
|
223
|
-
const
|
|
250
|
+
const ipAddress = genbox.ipAddress;
|
|
251
|
+
if (!ipAddress) {
|
|
252
|
+
console.log(chalk_1.default.red('Genbox has no IP address'));
|
|
253
|
+
return;
|
|
254
|
+
}
|
|
255
|
+
const restoreSpinner = (0, ora_1.default)('Uploading dump to genbox...').start();
|
|
224
256
|
try {
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
snapshotId,
|
|
229
|
-
s3Key: snapshotS3Key,
|
|
230
|
-
}),
|
|
257
|
+
// SCP dump file to genbox
|
|
258
|
+
const uploadResult = await (0, db_utils_1.uploadDumpToGenbox)(localDumpPath, ipAddress, {
|
|
259
|
+
onProgress: (msg) => restoreSpinner.text = msg,
|
|
231
260
|
});
|
|
261
|
+
if (!uploadResult.success) {
|
|
262
|
+
restoreSpinner.fail(chalk_1.default.red('Upload failed'));
|
|
263
|
+
console.error(chalk_1.default.red(` Error: ${uploadResult.error}`));
|
|
264
|
+
return;
|
|
265
|
+
}
|
|
266
|
+
restoreSpinner.text = 'Restoring database...';
|
|
267
|
+
// SSH to run mongorestore with dynamic port detection
|
|
268
|
+
const restoreResult = await (0, db_utils_1.runRemoteMongoRestoreDynamic)(ipAddress, {
|
|
269
|
+
onProgress: (msg) => restoreSpinner.text = msg,
|
|
270
|
+
});
|
|
271
|
+
if (!restoreResult.success) {
|
|
272
|
+
restoreSpinner.fail(chalk_1.default.red('Restore failed'));
|
|
273
|
+
console.error(chalk_1.default.red(` Error: ${restoreResult.error}`));
|
|
274
|
+
return;
|
|
275
|
+
}
|
|
232
276
|
restoreSpinner.succeed(chalk_1.default.green('Database sync completed!'));
|
|
233
277
|
console.log('');
|
|
234
278
|
console.log(chalk_1.default.dim(` Database has been restored from ${source} snapshot.`));
|
|
@@ -236,9 +280,6 @@ exports.dbSyncCommand
|
|
|
236
280
|
catch (error) {
|
|
237
281
|
restoreSpinner.fail(chalk_1.default.red('Database restore failed'));
|
|
238
282
|
console.error(chalk_1.default.red(` Error: ${error.message}`));
|
|
239
|
-
if (error instanceof api_1.AuthenticationError) {
|
|
240
|
-
console.log(chalk_1.default.yellow('\nRun: genbox login'));
|
|
241
|
-
}
|
|
242
283
|
}
|
|
243
284
|
}
|
|
244
285
|
catch (error) {
|
package/dist/db-utils.js
CHANGED
|
@@ -47,9 +47,11 @@ exports.getMongoDumpInstallInstructions = getMongoDumpInstallInstructions;
|
|
|
47
47
|
exports.runLocalMongoDump = runLocalMongoDump;
|
|
48
48
|
exports.uploadDumpToGenbox = uploadDumpToGenbox;
|
|
49
49
|
exports.runRemoteMongoRestore = runRemoteMongoRestore;
|
|
50
|
+
exports.runRemoteMongoRestoreDynamic = runRemoteMongoRestoreDynamic;
|
|
50
51
|
exports.cleanupDump = cleanupDump;
|
|
51
52
|
exports.formatBytes = formatBytes;
|
|
52
53
|
exports.waitForSshAccess = waitForSshAccess;
|
|
54
|
+
exports.downloadSnapshotFromS3 = downloadSnapshotFromS3;
|
|
53
55
|
exports.uploadDumpToS3 = uploadDumpToS3;
|
|
54
56
|
exports.createAndUploadSnapshot = createAndUploadSnapshot;
|
|
55
57
|
const child_process_1 = require("child_process");
|
|
@@ -276,6 +278,94 @@ async function runRemoteMongoRestore(ipAddress, dbName, options = {}) {
|
|
|
276
278
|
});
|
|
277
279
|
});
|
|
278
280
|
}
|
|
281
|
+
/**
|
|
282
|
+
* Run mongorestore on genbox via SSH with dynamic port detection
|
|
283
|
+
* This version doesn't hardcode container names or ports - it detects them dynamically
|
|
284
|
+
*/
|
|
285
|
+
async function runRemoteMongoRestoreDynamic(ipAddress, options = {}) {
|
|
286
|
+
return new Promise((resolve) => {
|
|
287
|
+
options.onProgress?.('Detecting MongoDB and restoring database...');
|
|
288
|
+
// The restore command - detects MongoDB port dynamically
|
|
289
|
+
const restoreCmd = `
|
|
290
|
+
set -e
|
|
291
|
+
|
|
292
|
+
# Detect MongoDB port from running docker containers
|
|
293
|
+
# Different projects use different port mappings (e.g., 27037, 27117, etc.)
|
|
294
|
+
MONGO_PORT=$(docker ps --format '{{.Ports}}' | grep -oP '\\d+(?=->27017)' | head -1)
|
|
295
|
+
|
|
296
|
+
if [ -z "$MONGO_PORT" ]; then
|
|
297
|
+
echo "ERROR: Could not detect MongoDB port from running containers"
|
|
298
|
+
docker ps
|
|
299
|
+
exit 1
|
|
300
|
+
fi
|
|
301
|
+
|
|
302
|
+
echo "MongoDB detected on port: $MONGO_PORT"
|
|
303
|
+
|
|
304
|
+
# Wait for MongoDB to be responsive
|
|
305
|
+
for i in {1..30}; do
|
|
306
|
+
if mongosh --quiet --host localhost --port $MONGO_PORT --eval "db.runCommand({ping:1})" 2>/dev/null; then
|
|
307
|
+
echo "MongoDB is ready"
|
|
308
|
+
break
|
|
309
|
+
fi
|
|
310
|
+
if [ $i -eq 30 ]; then
|
|
311
|
+
echo "ERROR: MongoDB not responding after 30 attempts"
|
|
312
|
+
exit 1
|
|
313
|
+
fi
|
|
314
|
+
echo "Waiting for MongoDB... ($i/30)"
|
|
315
|
+
sleep 2
|
|
316
|
+
done
|
|
317
|
+
|
|
318
|
+
# Restore the database from the uploaded dump
|
|
319
|
+
if [ -f /home/dev/.db-dump.gz ]; then
|
|
320
|
+
echo "Restoring database..."
|
|
321
|
+
# The dump is a raw mongodump --archive file (gzipped), not a tar archive
|
|
322
|
+
# Don't use --db flag with --archive as it causes issues
|
|
323
|
+
mongorestore --host localhost:$MONGO_PORT --drop --gzip --archive=/home/dev/.db-dump.gz
|
|
324
|
+
rm -f /home/dev/.db-dump.gz
|
|
325
|
+
echo "Database restored successfully"
|
|
326
|
+
else
|
|
327
|
+
echo "Error: Dump file not found at /home/dev/.db-dump.gz"
|
|
328
|
+
exit 1
|
|
329
|
+
fi
|
|
330
|
+
`;
|
|
331
|
+
const proc = (0, child_process_1.spawn)('ssh', [
|
|
332
|
+
'-o', 'StrictHostKeyChecking=no',
|
|
333
|
+
'-o', 'UserKnownHostsFile=/dev/null',
|
|
334
|
+
'-o', 'ConnectTimeout=30',
|
|
335
|
+
`dev@${ipAddress}`,
|
|
336
|
+
'bash', '-c', `'${restoreCmd.replace(/'/g, "'\\''")}'`,
|
|
337
|
+
], {
|
|
338
|
+
stdio: ['ignore', 'pipe', 'pipe'],
|
|
339
|
+
});
|
|
340
|
+
let stdout = '';
|
|
341
|
+
let stderr = '';
|
|
342
|
+
proc.stdout?.on('data', (data) => {
|
|
343
|
+
const line = data.toString();
|
|
344
|
+
stdout += line;
|
|
345
|
+
// Report progress
|
|
346
|
+
if (line.includes('MongoDB detected') || line.includes('Restoring') || line.includes('restored') || line.includes('Waiting')) {
|
|
347
|
+
options.onProgress?.(line.trim());
|
|
348
|
+
}
|
|
349
|
+
});
|
|
350
|
+
proc.stderr?.on('data', (data) => {
|
|
351
|
+
stderr += data.toString();
|
|
352
|
+
});
|
|
353
|
+
proc.on('close', (code) => {
|
|
354
|
+
if (code === 0) {
|
|
355
|
+
resolve({ success: true });
|
|
356
|
+
}
|
|
357
|
+
else {
|
|
358
|
+
resolve({
|
|
359
|
+
success: false,
|
|
360
|
+
error: stderr || stdout || 'Restore failed',
|
|
361
|
+
});
|
|
362
|
+
}
|
|
363
|
+
});
|
|
364
|
+
proc.on('error', (err) => {
|
|
365
|
+
resolve({ success: false, error: `Failed to run ssh: ${err.message}` });
|
|
366
|
+
});
|
|
367
|
+
});
|
|
368
|
+
}
|
|
279
369
|
/**
|
|
280
370
|
* Clean up temporary dump files
|
|
281
371
|
*/
|
|
@@ -324,6 +414,36 @@ async function waitForSshAccess(ipAddress, maxWaitSeconds = 300, onProgress) {
|
|
|
324
414
|
}
|
|
325
415
|
return false;
|
|
326
416
|
}
|
|
417
|
+
/**
|
|
418
|
+
* Download a snapshot from S3 using pre-signed URL
|
|
419
|
+
*/
|
|
420
|
+
async function downloadSnapshotFromS3(downloadUrl, options = {}) {
|
|
421
|
+
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), 'genbox-dbdump-'));
|
|
422
|
+
const dumpPath = path.join(tempDir, 'dump.gz');
|
|
423
|
+
options.onProgress?.('Downloading snapshot from cloud...');
|
|
424
|
+
try {
|
|
425
|
+
const response = await fetch(downloadUrl);
|
|
426
|
+
if (!response.ok) {
|
|
427
|
+
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
428
|
+
return {
|
|
429
|
+
success: false,
|
|
430
|
+
error: `Download failed: ${response.status} ${response.statusText}`,
|
|
431
|
+
};
|
|
432
|
+
}
|
|
433
|
+
const buffer = await response.arrayBuffer();
|
|
434
|
+
fs.writeFileSync(dumpPath, Buffer.from(buffer));
|
|
435
|
+
const stats = fs.statSync(dumpPath);
|
|
436
|
+
options.onProgress?.(`Downloaded snapshot (${formatBytes(stats.size)})`);
|
|
437
|
+
return { success: true, dumpPath };
|
|
438
|
+
}
|
|
439
|
+
catch (error) {
|
|
440
|
+
fs.rmSync(tempDir, { recursive: true, force: true });
|
|
441
|
+
return {
|
|
442
|
+
success: false,
|
|
443
|
+
error: `Download failed: ${error.message}`,
|
|
444
|
+
};
|
|
445
|
+
}
|
|
446
|
+
}
|
|
327
447
|
/**
|
|
328
448
|
* Upload dump file to S3 using pre-signed URL
|
|
329
449
|
*/
|