genbox 1.0.63 → 1.0.64
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/rebuild.js +506 -8
- package/package.json +1 -1
package/dist/commands/rebuild.js
CHANGED
|
@@ -44,12 +44,351 @@ const ora_1 = __importDefault(require("ora"));
|
|
|
44
44
|
const fs = __importStar(require("fs"));
|
|
45
45
|
const path = __importStar(require("path"));
|
|
46
46
|
const os = __importStar(require("os"));
|
|
47
|
+
const child_process_1 = require("child_process");
|
|
47
48
|
const config_loader_1 = require("../config-loader");
|
|
48
49
|
const profile_resolver_1 = require("../profile-resolver");
|
|
49
50
|
const api_1 = require("../api");
|
|
50
51
|
const genbox_selector_1 = require("../genbox-selector");
|
|
51
52
|
const schema_v4_1 = require("../schema-v4");
|
|
52
53
|
const db_utils_1 = require("../db-utils");
|
|
54
|
+
// ============================================================================
|
|
55
|
+
// SSH Utilities for Soft Rebuild
|
|
56
|
+
// ============================================================================
|
|
57
|
+
function getPrivateSshKeyPath() {
|
|
58
|
+
const home = os.homedir();
|
|
59
|
+
const potentialKeys = [
|
|
60
|
+
path.join(home, '.ssh', 'id_ed25519'),
|
|
61
|
+
path.join(home, '.ssh', 'id_rsa'),
|
|
62
|
+
];
|
|
63
|
+
for (const keyPath of potentialKeys) {
|
|
64
|
+
if (fs.existsSync(keyPath)) {
|
|
65
|
+
return keyPath;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
throw new Error('No SSH private key found in ~/.ssh/');
|
|
69
|
+
}
|
|
70
|
+
function sshExec(ip, keyPath, command, timeoutSecs = 30) {
|
|
71
|
+
const sshOpts = `-i ${keyPath} -o IdentitiesOnly=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -o ConnectTimeout=${timeoutSecs}`;
|
|
72
|
+
try {
|
|
73
|
+
const result = (0, child_process_1.execSync)(`ssh ${sshOpts} dev@${ip} "${command.replace(/"/g, '\\"')}"`, {
|
|
74
|
+
encoding: 'utf8',
|
|
75
|
+
timeout: (timeoutSecs + 10) * 1000,
|
|
76
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
77
|
+
});
|
|
78
|
+
return { success: true, output: result.trim() };
|
|
79
|
+
}
|
|
80
|
+
catch (error) {
|
|
81
|
+
const stderr = error.stderr?.toString().trim() || '';
|
|
82
|
+
const stdout = error.stdout?.toString().trim() || '';
|
|
83
|
+
return { success: false, output: stdout, error: stderr || error.message };
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
async function sshExecStream(ip, keyPath, command, options = {}) {
|
|
87
|
+
return new Promise((resolve) => {
|
|
88
|
+
const sshArgs = [
|
|
89
|
+
'-i', keyPath,
|
|
90
|
+
'-o', 'IdentitiesOnly=yes',
|
|
91
|
+
'-o', 'StrictHostKeyChecking=no',
|
|
92
|
+
'-o', 'UserKnownHostsFile=/dev/null',
|
|
93
|
+
'-o', 'LogLevel=ERROR',
|
|
94
|
+
'-o', `ConnectTimeout=${options.timeoutSecs || 30}`,
|
|
95
|
+
`dev@${ip}`,
|
|
96
|
+
command,
|
|
97
|
+
];
|
|
98
|
+
const child = (0, child_process_1.spawn)('ssh', sshArgs, { stdio: ['pipe', 'pipe', 'pipe'] });
|
|
99
|
+
child.stdout?.on('data', (data) => {
|
|
100
|
+
const lines = data.toString().split('\n');
|
|
101
|
+
for (const line of lines) {
|
|
102
|
+
if (line.trim() && options.onStdout) {
|
|
103
|
+
options.onStdout(line);
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
});
|
|
107
|
+
child.stderr?.on('data', (data) => {
|
|
108
|
+
const lines = data.toString().split('\n');
|
|
109
|
+
for (const line of lines) {
|
|
110
|
+
if (line.trim() && options.onStderr) {
|
|
111
|
+
options.onStderr(line);
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
});
|
|
115
|
+
child.on('close', (code) => {
|
|
116
|
+
resolve({ success: code === 0, code: code || 0 });
|
|
117
|
+
});
|
|
118
|
+
child.on('error', () => {
|
|
119
|
+
resolve({ success: false, code: -1 });
|
|
120
|
+
});
|
|
121
|
+
});
|
|
122
|
+
}
|
|
123
|
+
async function scpUpload(localPath, ip, remotePath, keyPath) {
|
|
124
|
+
return new Promise((resolve) => {
|
|
125
|
+
const scpArgs = [
|
|
126
|
+
'-o', 'StrictHostKeyChecking=no',
|
|
127
|
+
'-o', 'UserKnownHostsFile=/dev/null',
|
|
128
|
+
'-o', 'ConnectTimeout=30',
|
|
129
|
+
'-i', keyPath,
|
|
130
|
+
localPath,
|
|
131
|
+
`dev@${ip}:${remotePath}`,
|
|
132
|
+
];
|
|
133
|
+
const child = (0, child_process_1.spawn)('scp', scpArgs, { stdio: ['ignore', 'pipe', 'pipe'] });
|
|
134
|
+
let stderr = '';
|
|
135
|
+
child.stderr?.on('data', (data) => {
|
|
136
|
+
stderr += data.toString();
|
|
137
|
+
});
|
|
138
|
+
child.on('close', (code) => {
|
|
139
|
+
if (code === 0) {
|
|
140
|
+
resolve({ success: true });
|
|
141
|
+
}
|
|
142
|
+
else {
|
|
143
|
+
resolve({ success: false, error: stderr.trim() || 'SCP failed' });
|
|
144
|
+
}
|
|
145
|
+
});
|
|
146
|
+
child.on('error', (err) => {
|
|
147
|
+
resolve({ success: false, error: err.message });
|
|
148
|
+
});
|
|
149
|
+
});
|
|
150
|
+
}
|
|
151
|
+
async function runSoftRebuild(options) {
|
|
152
|
+
const { genbox, resolved, config, keyPath, envFiles, snapshotId, snapshotS3Key, gitToken, onStep, onLog } = options;
|
|
153
|
+
const ip = genbox.ipAddress;
|
|
154
|
+
const log = (line, type) => {
|
|
155
|
+
if (onLog) {
|
|
156
|
+
onLog(line, type);
|
|
157
|
+
}
|
|
158
|
+
};
|
|
159
|
+
try {
|
|
160
|
+
// Step 1: Stop all services
|
|
161
|
+
onStep?.('Stopping services...');
|
|
162
|
+
log('Stopping PM2 processes...', 'dim');
|
|
163
|
+
await sshExec(ip, keyPath, 'source ~/.nvm/nvm.sh 2>/dev/null; pm2 kill 2>/dev/null || true', 30);
|
|
164
|
+
// Find and stop docker compose in repo directories
|
|
165
|
+
for (const repo of resolved.repos) {
|
|
166
|
+
log(`Stopping Docker Compose in ${repo.path}...`, 'dim');
|
|
167
|
+
await sshExec(ip, keyPath, `cd ${repo.path} 2>/dev/null && docker compose down 2>/dev/null || true`, 60);
|
|
168
|
+
}
|
|
169
|
+
// Step 2: Clean up repo directories
|
|
170
|
+
onStep?.('Cleaning up repositories...');
|
|
171
|
+
for (const repo of resolved.repos) {
|
|
172
|
+
log(`Removing ${repo.path}...`, 'dim');
|
|
173
|
+
const cleanResult = await sshExec(ip, keyPath, `rm -rf ${repo.path}`, 30);
|
|
174
|
+
if (!cleanResult.success) {
|
|
175
|
+
log(`Warning: Failed to clean ${repo.path}: ${cleanResult.error}`, 'error');
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
// Step 3: Clone repositories with correct branches
|
|
179
|
+
onStep?.('Cloning repositories...');
|
|
180
|
+
for (const repo of resolved.repos) {
|
|
181
|
+
const repoUrl = gitToken && repo.url.startsWith('https://')
|
|
182
|
+
? repo.url.replace('https://', `https://${gitToken}@`)
|
|
183
|
+
: repo.url;
|
|
184
|
+
// Determine branch to checkout
|
|
185
|
+
const sourceBranch = repo.sourceBranch || repo.branch || 'main';
|
|
186
|
+
const targetBranch = repo.newBranch || repo.branch || 'main';
|
|
187
|
+
log(`Cloning ${repo.name} (branch: ${sourceBranch})...`, 'info');
|
|
188
|
+
// Create parent directory if needed
|
|
189
|
+
const parentDir = path.dirname(repo.path);
|
|
190
|
+
await sshExec(ip, keyPath, `mkdir -p ${parentDir}`, 10);
|
|
191
|
+
// Clone the repo
|
|
192
|
+
const cloneCmd = `git clone --depth=1 --single-branch --branch ${sourceBranch} '${repoUrl}' ${repo.path}`;
|
|
193
|
+
const cloneResult = await sshExecStream(ip, keyPath, cloneCmd, {
|
|
194
|
+
onStdout: (line) => log(line, 'dim'),
|
|
195
|
+
onStderr: (line) => {
|
|
196
|
+
// Git outputs progress to stderr, filter out non-errors
|
|
197
|
+
if (!line.includes('Cloning into') && !line.includes('Receiving objects') && !line.includes('Resolving deltas')) {
|
|
198
|
+
log(line, 'dim');
|
|
199
|
+
}
|
|
200
|
+
},
|
|
201
|
+
timeoutSecs: 120,
|
|
202
|
+
});
|
|
203
|
+
if (!cloneResult.success) {
|
|
204
|
+
return { success: false, error: `Failed to clone ${repo.name}` };
|
|
205
|
+
}
|
|
206
|
+
// Create new branch if needed
|
|
207
|
+
if (repo.newBranch && repo.newBranch !== sourceBranch) {
|
|
208
|
+
log(`Creating new branch: ${repo.newBranch}`, 'info');
|
|
209
|
+
const branchResult = await sshExec(ip, keyPath, `cd ${repo.path} && git checkout -b ${repo.newBranch}`, 10);
|
|
210
|
+
if (!branchResult.success) {
|
|
211
|
+
log(`Warning: Failed to create branch ${repo.newBranch}: ${branchResult.error}`, 'error');
|
|
212
|
+
}
|
|
213
|
+
}
|
|
214
|
+
log(`✓ Cloned ${repo.name}`, 'success');
|
|
215
|
+
}
|
|
216
|
+
// Step 4: Upload and place .env files
|
|
217
|
+
if (envFiles.length > 0) {
|
|
218
|
+
onStep?.('Setting up environment files...');
|
|
219
|
+
// Create staging directory
|
|
220
|
+
await sshExec(ip, keyPath, 'mkdir -p ~/.env-staging', 10);
|
|
221
|
+
for (const envFile of envFiles) {
|
|
222
|
+
log(`Writing ${envFile.stagingName}...`, 'dim');
|
|
223
|
+
// Write env file content to a temp file locally, then SCP it
|
|
224
|
+
const tempLocalPath = path.join(os.tmpdir(), `genbox-env-${Date.now()}-${envFile.stagingName}`);
|
|
225
|
+
fs.writeFileSync(tempLocalPath, envFile.content);
|
|
226
|
+
const scpResult = await scpUpload(tempLocalPath, ip, `~/.env-staging/${envFile.stagingName}`, keyPath);
|
|
227
|
+
fs.unlinkSync(tempLocalPath);
|
|
228
|
+
if (!scpResult.success) {
|
|
229
|
+
log(`Warning: Failed to upload ${envFile.stagingName}: ${scpResult.error}`, 'error');
|
|
230
|
+
continue;
|
|
231
|
+
}
|
|
232
|
+
// Move to final location
|
|
233
|
+
const parentDir = path.dirname(envFile.remotePath);
|
|
234
|
+
await sshExec(ip, keyPath, `mkdir -p ${parentDir}`, 10);
|
|
235
|
+
const moveResult = await sshExec(ip, keyPath, `mv ~/.env-staging/${envFile.stagingName} ${envFile.remotePath}`, 10);
|
|
236
|
+
if (!moveResult.success) {
|
|
237
|
+
log(`Warning: Failed to move ${envFile.stagingName} to ${envFile.remotePath}`, 'error');
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
241
|
+
// Step 5: Install dependencies
|
|
242
|
+
onStep?.('Installing dependencies...');
|
|
243
|
+
for (const repo of resolved.repos) {
|
|
244
|
+
// Check for package.json
|
|
245
|
+
const hasPackageJson = await sshExec(ip, keyPath, `test -f ${repo.path}/package.json && echo yes || echo no`, 10);
|
|
246
|
+
if (hasPackageJson.output === 'yes') {
|
|
247
|
+
log(`Installing npm dependencies in ${repo.name}...`, 'info');
|
|
248
|
+
// Determine package manager (prefer pnpm)
|
|
249
|
+
const hasPnpmLock = await sshExec(ip, keyPath, `test -f ${repo.path}/pnpm-lock.yaml && echo yes || echo no`, 10);
|
|
250
|
+
const installCmd = hasPnpmLock.output === 'yes'
|
|
251
|
+
? `cd ${repo.path} && source ~/.nvm/nvm.sh && pnpm install --frozen-lockfile`
|
|
252
|
+
: `cd ${repo.path} && source ~/.nvm/nvm.sh && npm install`;
|
|
253
|
+
const installResult = await sshExecStream(ip, keyPath, installCmd, {
|
|
254
|
+
onStdout: (line) => {
|
|
255
|
+
if (line.includes('Packages:') || line.includes('added') || line.includes('Done')) {
|
|
256
|
+
log(line, 'dim');
|
|
257
|
+
}
|
|
258
|
+
},
|
|
259
|
+
timeoutSecs: 300,
|
|
260
|
+
});
|
|
261
|
+
if (!installResult.success) {
|
|
262
|
+
log(`Warning: npm install failed in ${repo.name}`, 'error');
|
|
263
|
+
}
|
|
264
|
+
else {
|
|
265
|
+
log(`✓ Dependencies installed in ${repo.name}`, 'success');
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
}
|
|
269
|
+
// Step 6: Start Docker Compose services
|
|
270
|
+
onStep?.('Starting Docker services...');
|
|
271
|
+
for (const repo of resolved.repos) {
|
|
272
|
+
const hasDockerCompose = await sshExec(ip, keyPath, `test -f ${repo.path}/docker-compose.yml -o -f ${repo.path}/docker-compose.yaml -o -f ${repo.path}/compose.yaml && echo yes || echo no`, 10);
|
|
273
|
+
if (hasDockerCompose.output === 'yes') {
|
|
274
|
+
log(`Starting Docker Compose in ${repo.name}...`, 'info');
|
|
275
|
+
const composeResult = await sshExecStream(ip, keyPath, `cd ${repo.path} && docker compose up -d`, {
|
|
276
|
+
onStdout: (line) => log(line, 'dim'),
|
|
277
|
+
timeoutSecs: 180,
|
|
278
|
+
});
|
|
279
|
+
if (!composeResult.success) {
|
|
280
|
+
log(`Warning: Docker Compose failed in ${repo.name}`, 'error');
|
|
281
|
+
}
|
|
282
|
+
else {
|
|
283
|
+
log(`✓ Docker services started in ${repo.name}`, 'success');
|
|
284
|
+
}
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
// Step 7: Restore database if snapshot provided
|
|
288
|
+
if (snapshotId && snapshotS3Key) {
|
|
289
|
+
onStep?.('Restoring database...');
|
|
290
|
+
log('Fetching database snapshot...', 'info');
|
|
291
|
+
try {
|
|
292
|
+
// Get download URL from API
|
|
293
|
+
const downloadInfo = await (0, api_1.getSnapshotDownloadUrl)(snapshotId);
|
|
294
|
+
// Download snapshot on the server
|
|
295
|
+
log('Downloading snapshot to server...', 'dim');
|
|
296
|
+
const downloadCmd = `curl -sL -o /tmp/db-snapshot.gz '${downloadInfo.downloadUrl}'`;
|
|
297
|
+
const downloadResult = await sshExecStream(ip, keyPath, downloadCmd, {
|
|
298
|
+
timeoutSecs: 300,
|
|
299
|
+
});
|
|
300
|
+
if (!downloadResult.success) {
|
|
301
|
+
log('Warning: Failed to download database snapshot', 'error');
|
|
302
|
+
}
|
|
303
|
+
else {
|
|
304
|
+
// Wait for MongoDB to be ready
|
|
305
|
+
log('Waiting for MongoDB...', 'dim');
|
|
306
|
+
await sshExec(ip, keyPath, `
|
|
307
|
+
for i in {1..30}; do
|
|
308
|
+
if docker ps --format '{{.Names}}' | grep -q mongodb; then
|
|
309
|
+
container=$(docker ps --format '{{.Names}}' | grep mongodb | head -1)
|
|
310
|
+
if docker exec $container mongosh --quiet --eval "db.runCommand({ping:1})" 2>/dev/null; then
|
|
311
|
+
break
|
|
312
|
+
fi
|
|
313
|
+
fi
|
|
314
|
+
sleep 2
|
|
315
|
+
done
|
|
316
|
+
`, 120);
|
|
317
|
+
// Get MongoDB container name and port
|
|
318
|
+
const containerResult = await sshExec(ip, keyPath, "docker ps --format '{{.Names}}' | grep -i mongo | head -1", 10);
|
|
319
|
+
const mongoContainer = containerResult.output || 'mongodb';
|
|
320
|
+
// Get the MongoDB port
|
|
321
|
+
const portResult = await sshExec(ip, keyPath, `docker port ${mongoContainer} 27017 2>/dev/null | cut -d: -f2 || echo 27017`, 10);
|
|
322
|
+
const mongoPort = portResult.output.trim() || '27017';
|
|
323
|
+
// Restore the database
|
|
324
|
+
log('Restoring database from snapshot...', 'info');
|
|
325
|
+
const restoreCmd = `mongorestore --host localhost --port ${mongoPort} --archive=/tmp/db-snapshot.gz --gzip --drop`;
|
|
326
|
+
const restoreResult = await sshExecStream(ip, keyPath, restoreCmd, {
|
|
327
|
+
onStdout: (line) => log(line, 'dim'),
|
|
328
|
+
timeoutSecs: 300,
|
|
329
|
+
});
|
|
330
|
+
if (!restoreResult.success) {
|
|
331
|
+
log('Warning: Database restore may have failed (non-zero exit)', 'error');
|
|
332
|
+
}
|
|
333
|
+
else {
|
|
334
|
+
log('✓ Database restored', 'success');
|
|
335
|
+
}
|
|
336
|
+
// Cleanup
|
|
337
|
+
await sshExec(ip, keyPath, 'rm -f /tmp/db-snapshot.gz', 10);
|
|
338
|
+
}
|
|
339
|
+
}
|
|
340
|
+
catch (error) {
|
|
341
|
+
log(`Warning: Database restore failed: ${error.message}`, 'error');
|
|
342
|
+
}
|
|
343
|
+
}
|
|
344
|
+
// Step 8: Start PM2 services
|
|
345
|
+
onStep?.('Starting application services...');
|
|
346
|
+
for (const app of resolved.apps) {
|
|
347
|
+
const appConfig = config.apps[app.name];
|
|
348
|
+
if (!appConfig)
|
|
349
|
+
continue;
|
|
350
|
+
// Find the repo path for this app
|
|
351
|
+
const appPath = appConfig.path || app.name;
|
|
352
|
+
const repoPath = resolved.repos.find(r => r.name === app.name)?.path ||
|
|
353
|
+
(resolved.repos[0]?.path ? `${resolved.repos[0].path}/${appPath}` : null);
|
|
354
|
+
if (!repoPath)
|
|
355
|
+
continue;
|
|
356
|
+
// Check for PM2 ecosystem file or package.json scripts
|
|
357
|
+
const hasPm2Config = await sshExec(ip, keyPath, `test -f ${repoPath}/ecosystem.config.js -o -f ${repoPath}/ecosystem.config.cjs && echo yes || echo no`, 10);
|
|
358
|
+
if (hasPm2Config.output === 'yes') {
|
|
359
|
+
log(`Starting ${app.name} with PM2...`, 'info');
|
|
360
|
+
const pm2Result = await sshExecStream(ip, keyPath, `cd ${repoPath} && source ~/.nvm/nvm.sh && pm2 start ecosystem.config.js 2>/dev/null || pm2 start ecosystem.config.cjs`, {
|
|
361
|
+
onStdout: (line) => log(line, 'dim'),
|
|
362
|
+
timeoutSecs: 60,
|
|
363
|
+
});
|
|
364
|
+
if (pm2Result.success) {
|
|
365
|
+
log(`✓ Started ${app.name}`, 'success');
|
|
366
|
+
}
|
|
367
|
+
}
|
|
368
|
+
else {
|
|
369
|
+
// Check for start script in package.json
|
|
370
|
+
const hasStartScript = await sshExec(ip, keyPath, `grep -q '"start"' ${repoPath}/package.json 2>/dev/null && echo yes || echo no`, 10);
|
|
371
|
+
if (hasStartScript.output === 'yes') {
|
|
372
|
+
log(`Starting ${app.name} with PM2 (npm start)...`, 'info');
|
|
373
|
+
const pm2Result = await sshExecStream(ip, keyPath, `cd ${repoPath} && source ~/.nvm/nvm.sh && pm2 start npm --name ${app.name} -- start`, {
|
|
374
|
+
onStdout: (line) => log(line, 'dim'),
|
|
375
|
+
timeoutSecs: 60,
|
|
376
|
+
});
|
|
377
|
+
if (pm2Result.success) {
|
|
378
|
+
log(`✓ Started ${app.name}`, 'success');
|
|
379
|
+
}
|
|
380
|
+
}
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
// Step 9: Save PM2 process list
|
|
384
|
+
log('Saving PM2 process list...', 'dim');
|
|
385
|
+
await sshExec(ip, keyPath, 'source ~/.nvm/nvm.sh && pm2 save 2>/dev/null || true', 10);
|
|
386
|
+
return { success: true };
|
|
387
|
+
}
|
|
388
|
+
catch (error) {
|
|
389
|
+
return { success: false, error: error.message };
|
|
390
|
+
}
|
|
391
|
+
}
|
|
53
392
|
function getPublicSshKey() {
|
|
54
393
|
const home = os.homedir();
|
|
55
394
|
const potentialKeys = [
|
|
@@ -253,8 +592,6 @@ function buildRebuildPayload(resolved, config, publicKey, privateKey, configLoad
|
|
|
253
592
|
// Build repos
|
|
254
593
|
const repos = {};
|
|
255
594
|
for (const repo of resolved.repos) {
|
|
256
|
-
// DEBUG: Log repo values
|
|
257
|
-
console.log(chalk_1.default.dim(` [DEBUG] Repo ${repo.name}: branch=${repo.branch}, newBranch=${repo.newBranch}, sourceBranch=${repo.sourceBranch}`));
|
|
258
595
|
repos[repo.name] = {
|
|
259
596
|
url: repo.url,
|
|
260
597
|
path: repo.path,
|
|
@@ -387,6 +724,7 @@ exports.rebuildCommand = new commander_1.Command('rebuild')
|
|
|
387
724
|
.option('--db-source <source>', 'Database source for copy mode: staging, production')
|
|
388
725
|
.option('--db-dump <path>', 'Path to existing database dump file')
|
|
389
726
|
.option('-y, --yes', 'Skip interactive prompts')
|
|
727
|
+
.option('--hard', 'Full rebuild (reinstall OS) instead of soft rebuild')
|
|
390
728
|
.action(async (name, options) => {
|
|
391
729
|
try {
|
|
392
730
|
// Select genbox (interactive if no name provided)
|
|
@@ -522,11 +860,20 @@ exports.rebuildCommand = new commander_1.Command('rebuild')
|
|
|
522
860
|
// Confirm rebuild
|
|
523
861
|
if (!options.yes) {
|
|
524
862
|
console.log('');
|
|
525
|
-
|
|
526
|
-
|
|
863
|
+
if (options.hard) {
|
|
864
|
+
console.log(chalk_1.default.yellow('Warning: Hard rebuild will reinstall the OS and rerun setup.'));
|
|
865
|
+
console.log(chalk_1.default.yellow('All unsaved work on the server will be lost.'));
|
|
866
|
+
}
|
|
867
|
+
else {
|
|
868
|
+
console.log(chalk_1.default.blue('Soft rebuild will:'));
|
|
869
|
+
console.log(chalk_1.default.dim(' • Stop services (PM2, Docker Compose)'));
|
|
870
|
+
console.log(chalk_1.default.dim(' • Delete and re-clone repositories'));
|
|
871
|
+
console.log(chalk_1.default.dim(' • Reinstall dependencies and restart services'));
|
|
872
|
+
console.log(chalk_1.default.yellow('Note: Uncommitted changes in repos will be lost.'));
|
|
873
|
+
}
|
|
527
874
|
console.log('');
|
|
528
875
|
const confirm = await prompts.confirm({
|
|
529
|
-
message:
|
|
876
|
+
message: `${options.hard ? 'Hard rebuild' : 'Rebuild'} genbox '${selectedName}'?`,
|
|
530
877
|
default: false,
|
|
531
878
|
});
|
|
532
879
|
if (!confirm) {
|
|
@@ -698,6 +1045,157 @@ exports.rebuildCommand = new commander_1.Command('rebuild')
|
|
|
698
1045
|
return;
|
|
699
1046
|
}
|
|
700
1047
|
}
|
|
1048
|
+
// ================================================================
|
|
1049
|
+
// SOFT REBUILD (default)
|
|
1050
|
+
// ================================================================
|
|
1051
|
+
if (!options.hard) {
|
|
1052
|
+
// Verify server is accessible
|
|
1053
|
+
if (!genbox.ipAddress) {
|
|
1054
|
+
console.log(chalk_1.default.red('Error: Genbox has no IP address. It may still be provisioning.'));
|
|
1055
|
+
console.log(chalk_1.default.dim(' Use --hard for a full rebuild, or wait for provisioning to complete.'));
|
|
1056
|
+
return;
|
|
1057
|
+
}
|
|
1058
|
+
// Get SSH key path
|
|
1059
|
+
let sshKeyPath;
|
|
1060
|
+
try {
|
|
1061
|
+
sshKeyPath = getPrivateSshKeyPath();
|
|
1062
|
+
}
|
|
1063
|
+
catch (error) {
|
|
1064
|
+
console.log(chalk_1.default.red(error.message));
|
|
1065
|
+
return;
|
|
1066
|
+
}
|
|
1067
|
+
// Test SSH connection
|
|
1068
|
+
console.log('');
|
|
1069
|
+
console.log(chalk_1.default.dim('Testing SSH connection...'));
|
|
1070
|
+
const testResult = sshExec(genbox.ipAddress, sshKeyPath, 'echo ok', 10);
|
|
1071
|
+
if (!testResult.success || testResult.output !== 'ok') {
|
|
1072
|
+
console.log(chalk_1.default.red('Error: Cannot connect to genbox via SSH.'));
|
|
1073
|
+
console.log(chalk_1.default.dim(` ${testResult.error || 'Connection failed'}`));
|
|
1074
|
+
console.log(chalk_1.default.dim(' Use --hard for a full rebuild if the server is unresponsive.'));
|
|
1075
|
+
return;
|
|
1076
|
+
}
|
|
1077
|
+
console.log(chalk_1.default.green('✓ SSH connection verified'));
|
|
1078
|
+
// Build env files for soft rebuild
|
|
1079
|
+
const envFilesForSoftRebuild = [];
|
|
1080
|
+
const envGenboxPath = path.join(process.cwd(), '.env.genbox');
|
|
1081
|
+
if (fs.existsSync(envGenboxPath)) {
|
|
1082
|
+
const rawEnvContent = fs.readFileSync(envGenboxPath, 'utf-8');
|
|
1083
|
+
const sections = parseEnvGenboxSections(rawEnvContent);
|
|
1084
|
+
const globalSection = sections.get('GLOBAL') || '';
|
|
1085
|
+
const envVarsFromFile = {};
|
|
1086
|
+
for (const line of globalSection.split('\n')) {
|
|
1087
|
+
const match = line.match(/^([A-Z_][A-Z0-9_]*)=(.*)$/);
|
|
1088
|
+
if (match) {
|
|
1089
|
+
let value = match[2].trim();
|
|
1090
|
+
if ((value.startsWith('"') && value.endsWith('"')) ||
|
|
1091
|
+
(value.startsWith("'") && value.endsWith("'"))) {
|
|
1092
|
+
value = value.slice(1, -1);
|
|
1093
|
+
}
|
|
1094
|
+
envVarsFromFile[match[1]] = value;
|
|
1095
|
+
}
|
|
1096
|
+
}
|
|
1097
|
+
let connectTo;
|
|
1098
|
+
if (resolved.profile && config.profiles?.[resolved.profile]) {
|
|
1099
|
+
const profile = config.profiles[resolved.profile];
|
|
1100
|
+
connectTo = (0, config_loader_1.getProfileConnection)(profile);
|
|
1101
|
+
}
|
|
1102
|
+
const serviceUrlMap = buildServiceUrlMap(envVarsFromFile, connectTo);
|
|
1103
|
+
for (const app of resolved.apps) {
|
|
1104
|
+
const appConfig = config.apps[app.name];
|
|
1105
|
+
const appPath = appConfig?.path || app.name;
|
|
1106
|
+
const repoPath = resolved.repos.find(r => r.name === app.name)?.path ||
|
|
1107
|
+
(resolved.repos[0]?.path ? `${resolved.repos[0].path}/${appPath}` : `/home/dev/${config.project.name}/${appPath}`);
|
|
1108
|
+
const servicesSections = Array.from(sections.keys()).filter(s => s.startsWith(`${app.name}/`));
|
|
1109
|
+
if (servicesSections.length > 0) {
|
|
1110
|
+
for (const serviceSectionName of servicesSections) {
|
|
1111
|
+
const serviceName = serviceSectionName.split('/')[1];
|
|
1112
|
+
const serviceEnvContent = buildAppEnvContent(sections, serviceSectionName, serviceUrlMap);
|
|
1113
|
+
envFilesForSoftRebuild.push({
|
|
1114
|
+
stagingName: `${app.name}-${serviceName}.env`,
|
|
1115
|
+
remotePath: `${repoPath}/apps/${serviceName}/.env`,
|
|
1116
|
+
content: serviceEnvContent,
|
|
1117
|
+
});
|
|
1118
|
+
}
|
|
1119
|
+
}
|
|
1120
|
+
else {
|
|
1121
|
+
const appEnvContent = buildAppEnvContent(sections, app.name, serviceUrlMap);
|
|
1122
|
+
envFilesForSoftRebuild.push({
|
|
1123
|
+
stagingName: `${app.name}.env`,
|
|
1124
|
+
remotePath: `${repoPath}/.env`,
|
|
1125
|
+
content: appEnvContent,
|
|
1126
|
+
});
|
|
1127
|
+
}
|
|
1128
|
+
}
|
|
1129
|
+
}
|
|
1130
|
+
// Get git token from env
|
|
1131
|
+
const envVars = configLoader.loadEnvVars(process.cwd());
|
|
1132
|
+
// Run soft rebuild
|
|
1133
|
+
console.log('');
|
|
1134
|
+
console.log(chalk_1.default.blue('=== Starting Soft Rebuild ==='));
|
|
1135
|
+
console.log('');
|
|
1136
|
+
let currentStep = '';
|
|
1137
|
+
const rebuildResult = await runSoftRebuild({
|
|
1138
|
+
genbox,
|
|
1139
|
+
resolved,
|
|
1140
|
+
config,
|
|
1141
|
+
keyPath: sshKeyPath,
|
|
1142
|
+
envFiles: envFilesForSoftRebuild,
|
|
1143
|
+
snapshotId,
|
|
1144
|
+
snapshotS3Key,
|
|
1145
|
+
gitToken: envVars.GIT_TOKEN,
|
|
1146
|
+
onStep: (step) => {
|
|
1147
|
+
currentStep = step;
|
|
1148
|
+
console.log(chalk_1.default.blue(`\n=== ${step} ===`));
|
|
1149
|
+
},
|
|
1150
|
+
onLog: (line, type) => {
|
|
1151
|
+
switch (type) {
|
|
1152
|
+
case 'error':
|
|
1153
|
+
console.log(chalk_1.default.red(` ${line}`));
|
|
1154
|
+
break;
|
|
1155
|
+
case 'success':
|
|
1156
|
+
console.log(chalk_1.default.green(` ${line}`));
|
|
1157
|
+
break;
|
|
1158
|
+
case 'info':
|
|
1159
|
+
console.log(chalk_1.default.cyan(` ${line}`));
|
|
1160
|
+
break;
|
|
1161
|
+
case 'dim':
|
|
1162
|
+
default:
|
|
1163
|
+
console.log(chalk_1.default.dim(` ${line}`));
|
|
1164
|
+
break;
|
|
1165
|
+
}
|
|
1166
|
+
},
|
|
1167
|
+
});
|
|
1168
|
+
console.log('');
|
|
1169
|
+
if (rebuildResult.success) {
|
|
1170
|
+
console.log(chalk_1.default.green(`✓ Soft rebuild completed successfully!`));
|
|
1171
|
+
console.log('');
|
|
1172
|
+
console.log(`Run ${chalk_1.default.cyan(`genbox status ${selectedName}`)} to check service status.`);
|
|
1173
|
+
}
|
|
1174
|
+
else {
|
|
1175
|
+
console.log(chalk_1.default.red(`✗ Soft rebuild failed: ${rebuildResult.error}`));
|
|
1176
|
+
console.log(chalk_1.default.dim(` Failed during: ${currentStep}`));
|
|
1177
|
+
console.log(chalk_1.default.dim(` Use --hard for a full OS reinstall if needed.`));
|
|
1178
|
+
}
|
|
1179
|
+
// Notify API about the rebuild (for tracking)
|
|
1180
|
+
try {
|
|
1181
|
+
await (0, api_1.fetchApi)(`/genboxes/${genbox._id}/soft-rebuild-completed`, {
|
|
1182
|
+
method: 'POST',
|
|
1183
|
+
body: JSON.stringify({
|
|
1184
|
+
success: rebuildResult.success,
|
|
1185
|
+
branch: resolved.repos[0]?.branch,
|
|
1186
|
+
newBranch: resolved.repos[0]?.newBranch,
|
|
1187
|
+
sourceBranch: resolved.repos[0]?.sourceBranch,
|
|
1188
|
+
}),
|
|
1189
|
+
});
|
|
1190
|
+
}
|
|
1191
|
+
catch {
|
|
1192
|
+
// Silently ignore API notification failures
|
|
1193
|
+
}
|
|
1194
|
+
return;
|
|
1195
|
+
}
|
|
1196
|
+
// ================================================================
|
|
1197
|
+
// HARD REBUILD (--hard flag)
|
|
1198
|
+
// ================================================================
|
|
701
1199
|
// Build payload
|
|
702
1200
|
const payload = buildRebuildPayload(resolved, config, publicKey, privateKeyContent, configLoader);
|
|
703
1201
|
// Add database info to payload if we have a snapshot
|
|
@@ -712,11 +1210,11 @@ exports.rebuildCommand = new commander_1.Command('rebuild')
|
|
|
712
1210
|
else if (dbMode === 'local' || dbMode === 'fresh') {
|
|
713
1211
|
payload.database = { mode: 'local' };
|
|
714
1212
|
}
|
|
715
|
-
// Execute rebuild
|
|
716
|
-
const rebuildSpinner = (0, ora_1.default)(`
|
|
1213
|
+
// Execute hard rebuild via API
|
|
1214
|
+
const rebuildSpinner = (0, ora_1.default)(`Hard rebuilding Genbox '${selectedName}'...`).start();
|
|
717
1215
|
try {
|
|
718
1216
|
await rebuildGenbox(genbox._id, payload);
|
|
719
|
-
rebuildSpinner.succeed(chalk_1.default.green(`Genbox '${selectedName}' rebuild initiated!`));
|
|
1217
|
+
rebuildSpinner.succeed(chalk_1.default.green(`Genbox '${selectedName}' hard rebuild initiated!`));
|
|
720
1218
|
console.log('');
|
|
721
1219
|
console.log(chalk_1.default.dim('Server is rebuilding. This may take a few minutes.'));
|
|
722
1220
|
console.log(chalk_1.default.dim('SSH connection will be temporarily unavailable.'));
|