ante-erp-cli 1.10.2 ā 1.10.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/ante-cli.js +1 -0
- package/package.json +1 -1
- package/src/commands/backup.js +74 -2
- package/src/commands/clone-db.js +8 -2
- package/src/commands/restore.js +32 -17
- package/src/utils/docker.js +38 -3
package/bin/ante-cli.js
CHANGED
|
@@ -94,6 +94,7 @@ const backupCmd = program
|
|
|
94
94
|
.description('Create backup of ANTE ERP databases (PostgreSQL and MongoDB)')
|
|
95
95
|
.option('-o, --output <path>', 'Output file path')
|
|
96
96
|
.option('-k, --keyword <text>', 'Add keyword to backup filename for easier identification')
|
|
97
|
+
.option('--clean', 'Clean up backups older than 24 hours before creating new backup')
|
|
97
98
|
.action(backup);
|
|
98
99
|
|
|
99
100
|
backupCmd
|
package/package.json
CHANGED
package/src/commands/backup.js
CHANGED
|
@@ -2,7 +2,7 @@ import chalk from 'chalk';
|
|
|
2
2
|
import ora from 'ora';
|
|
3
3
|
import inquirer from 'inquirer';
|
|
4
4
|
import { join } from 'path';
|
|
5
|
-
import { readdirSync, readFileSync } from 'fs';
|
|
5
|
+
import { readdirSync, readFileSync, statSync } from 'fs';
|
|
6
6
|
import { execa } from 'execa';
|
|
7
7
|
import Table from 'cli-table3';
|
|
8
8
|
import { getInstallDir } from '../utils/config.js';
|
|
@@ -31,6 +31,37 @@ function generateTimestamp() {
|
|
|
31
31
|
return `${year}-${month}-${day}_${hoursStr}-${minutes}-${seconds}-${ampm}`;
|
|
32
32
|
}
|
|
33
33
|
|
|
34
|
+
/**
|
|
35
|
+
* Clean up backups older than 24 hours
|
|
36
|
+
* @param {string} backupDir - Path to backups directory
|
|
37
|
+
* @returns {Promise<Array>} Array of deleted backup info
|
|
38
|
+
*/
|
|
39
|
+
async function cleanOldBackups(backupDir) {
|
|
40
|
+
try {
|
|
41
|
+
const files = readdirSync(backupDir).filter(f => f.endsWith('.tar.gz'));
|
|
42
|
+
const now = Date.now();
|
|
43
|
+
const twentyFourHours = 24 * 60 * 60 * 1000; // 24 hours in milliseconds
|
|
44
|
+
|
|
45
|
+
const deleted = [];
|
|
46
|
+
|
|
47
|
+
for (const file of files) {
|
|
48
|
+
const filePath = join(backupDir, file);
|
|
49
|
+
const stats = statSync(filePath);
|
|
50
|
+
const fileAge = now - stats.mtimeMs;
|
|
51
|
+
|
|
52
|
+
if (fileAge > twentyFourHours) {
|
|
53
|
+
await execa('rm', [filePath]);
|
|
54
|
+
const ageInHours = Math.floor(fileAge / (60 * 60 * 1000));
|
|
55
|
+
deleted.push({ file, age: ageInHours });
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
return deleted;
|
|
60
|
+
} catch (error) {
|
|
61
|
+
throw new Error(`Cleanup failed: ${error.message}`);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
|
|
34
65
|
/**
|
|
35
66
|
* Create backup of ANTE ERP databases (PostgreSQL and MongoDB)
|
|
36
67
|
*/
|
|
@@ -40,6 +71,27 @@ export async function backup(options) {
|
|
|
40
71
|
const composeFile = join(installDir, 'docker-compose.yml');
|
|
41
72
|
const backupDir = join(installDir, 'backups');
|
|
42
73
|
|
|
74
|
+
// Clean old backups if --clean flag is provided
|
|
75
|
+
if (options.clean) {
|
|
76
|
+
console.log(chalk.cyan('\nš§¹ Cleaning up old backups...\n'));
|
|
77
|
+
|
|
78
|
+
try {
|
|
79
|
+
const deleted = await cleanOldBackups(backupDir);
|
|
80
|
+
|
|
81
|
+
if (deleted.length > 0) {
|
|
82
|
+
console.log(chalk.green(`ā Deleted ${deleted.length} backup(s) older than 24 hours:\n`));
|
|
83
|
+
deleted.forEach(({ file, age }) => {
|
|
84
|
+
console.log(chalk.gray(` - ${file} (${age} hours old)`));
|
|
85
|
+
});
|
|
86
|
+
console.log();
|
|
87
|
+
} else {
|
|
88
|
+
console.log(chalk.gray('No backups older than 24 hours found.\n'));
|
|
89
|
+
}
|
|
90
|
+
} catch (cleanupError) {
|
|
91
|
+
console.log(chalk.yellow(`ā Warning: ${cleanupError.message}\n`));
|
|
92
|
+
}
|
|
93
|
+
}
|
|
94
|
+
|
|
43
95
|
// Prompt for keyword if not provided via flag
|
|
44
96
|
let keyword = options.keyword;
|
|
45
97
|
if (!keyword) {
|
|
@@ -87,7 +139,18 @@ export async function backup(options) {
|
|
|
87
139
|
'ante_db'
|
|
88
140
|
]);
|
|
89
141
|
|
|
142
|
+
// Get PostgreSQL dump size for progress indication
|
|
143
|
+
const { stdout: pgSizeOutput } = await execa('docker', [
|
|
144
|
+
'exec',
|
|
145
|
+
'ante-postgres',
|
|
146
|
+
'sh',
|
|
147
|
+
'-c',
|
|
148
|
+
'du -h /tmp/postgres.dump | cut -f1'
|
|
149
|
+
]);
|
|
150
|
+
const pgSize = pgSizeOutput.trim();
|
|
151
|
+
|
|
90
152
|
// Copy dump file from container to temp directory
|
|
153
|
+
spinner.text = `Copying PostgreSQL dump from container (${pgSize})...`;
|
|
91
154
|
await execa('docker', [
|
|
92
155
|
'cp',
|
|
93
156
|
'ante-postgres:/tmp/postgres.dump',
|
|
@@ -139,7 +202,12 @@ export async function backup(options) {
|
|
|
139
202
|
throw new Error(`MongoDB backup failed: ${mongoResult.error}`);
|
|
140
203
|
}
|
|
141
204
|
|
|
205
|
+
// Get MongoDB dump size for progress indication
|
|
206
|
+
const { stdout: mongoSizeOutput } = await execa('du', ['-sh', join(mongoDumpTempDir, mongoInfo.database)]);
|
|
207
|
+
const mongoSize = mongoSizeOutput.split('\t')[0];
|
|
208
|
+
|
|
142
209
|
// Move MongoDB dump to final structure (mongodb/ folder)
|
|
210
|
+
spinner.text = `Organizing MongoDB dump (${mongoSize})...`;
|
|
143
211
|
await execa('mv', [
|
|
144
212
|
join(mongoDumpTempDir, mongoInfo.database),
|
|
145
213
|
join(tempDir, 'mongodb')
|
|
@@ -148,8 +216,12 @@ export async function backup(options) {
|
|
|
148
216
|
// Remove temporary MongoDB dump directory
|
|
149
217
|
await execa('rm', ['-rf', mongoDumpTempDir]);
|
|
150
218
|
|
|
219
|
+
// Get total size before compression
|
|
220
|
+
const { stdout: totalSizeOutput } = await execa('du', ['-sh', tempDir]);
|
|
221
|
+
const totalSize = totalSizeOutput.split('\t')[0];
|
|
222
|
+
|
|
151
223
|
// Create tar.gz archive
|
|
152
|
-
spinner.text =
|
|
224
|
+
spinner.text = `Creating compressed archive (${totalSize})...`;
|
|
153
225
|
await execa('mkdir', ['-p', backupDir]);
|
|
154
226
|
await execa('tar', [
|
|
155
227
|
'-czf',
|
package/src/commands/clone-db.js
CHANGED
|
@@ -261,32 +261,38 @@ export async function cloneDb(sourceUrl, options = {}) {
|
|
|
261
261
|
|
|
262
262
|
// Generate Prisma client
|
|
263
263
|
const generateSpinner = ora('Generating Prisma client...').start();
|
|
264
|
+
const generateStartTime = Date.now();
|
|
264
265
|
try {
|
|
265
266
|
await execInContainer(
|
|
266
267
|
composeFile,
|
|
267
268
|
'backend',
|
|
268
269
|
['npx', 'prisma', 'generate']
|
|
269
270
|
);
|
|
270
|
-
|
|
271
|
+
const generateDuration = ((Date.now() - generateStartTime) / 1000).toFixed(1);
|
|
272
|
+
generateSpinner.succeed(chalk.green(`Prisma client generated (${generateDuration}s)`));
|
|
271
273
|
generateSuccess = true;
|
|
272
274
|
} catch (error) {
|
|
273
275
|
generateSpinner.fail(chalk.red('Prisma generate failed'));
|
|
274
276
|
console.log(chalk.yellow(' Warning: You may need to run "npx prisma generate" manually in the backend container'));
|
|
277
|
+
console.log(chalk.gray(` Error: ${error.message}`));
|
|
275
278
|
}
|
|
276
279
|
|
|
277
280
|
// Run Prisma migrations
|
|
278
281
|
const migrateSpinner = ora('Running Prisma migrations...').start();
|
|
282
|
+
const migrateStartTime = Date.now();
|
|
279
283
|
try {
|
|
280
284
|
await execInContainer(
|
|
281
285
|
composeFile,
|
|
282
286
|
'backend',
|
|
283
287
|
['npx', 'prisma', 'migrate', 'deploy']
|
|
284
288
|
);
|
|
285
|
-
|
|
289
|
+
const migrateDuration = ((Date.now() - migrateStartTime) / 1000).toFixed(1);
|
|
290
|
+
migrateSpinner.succeed(chalk.green(`Prisma migrations completed (${migrateDuration}s)`));
|
|
286
291
|
migrateSuccess = true;
|
|
287
292
|
} catch (error) {
|
|
288
293
|
migrateSpinner.fail(chalk.red('Prisma migrate failed'));
|
|
289
294
|
console.log(chalk.yellow(' Note: Database schema already exists from restore. Migrations may not be needed.'));
|
|
295
|
+
console.log(chalk.gray(` Error: ${error.message}`));
|
|
290
296
|
}
|
|
291
297
|
console.log('');
|
|
292
298
|
}
|
package/src/commands/restore.js
CHANGED
|
@@ -2,10 +2,10 @@ import chalk from 'chalk';
|
|
|
2
2
|
import ora from 'ora';
|
|
3
3
|
import inquirer from 'inquirer';
|
|
4
4
|
import { join, basename } from 'path';
|
|
5
|
-
import { existsSync, mkdirSync, readdirSync } from 'fs';
|
|
5
|
+
import { existsSync, mkdirSync, readdirSync, statSync } from 'fs';
|
|
6
6
|
import { execa } from 'execa';
|
|
7
7
|
import { getInstallDir } from '../utils/config.js';
|
|
8
|
-
import { execInContainer, stopServices, startServices } from '../utils/docker.js';
|
|
8
|
+
import { execInContainer, stopServices, startServices, waitForHealthy } from '../utils/docker.js';
|
|
9
9
|
|
|
10
10
|
/**
|
|
11
11
|
* Restore ANTE ERP databases from backup (PostgreSQL and MongoDB only)
|
|
@@ -96,46 +96,62 @@ export async function restore(backupFile, options = {}) {
|
|
|
96
96
|
spinner.text = 'Stopping services...';
|
|
97
97
|
await stopServices(composeFile);
|
|
98
98
|
|
|
99
|
-
// Wait for services to stop
|
|
100
|
-
await new Promise(resolve => setTimeout(resolve, 3000));
|
|
101
|
-
|
|
102
99
|
// Start only database services for restore
|
|
103
100
|
spinner.text = 'Starting database services...';
|
|
104
101
|
await startServices(composeFile, ['postgres', 'mongodb', 'redis']);
|
|
105
102
|
|
|
106
|
-
// Wait for databases to be
|
|
107
|
-
|
|
103
|
+
// Wait for databases to be healthy (replaces fixed waits)
|
|
104
|
+
spinner.text = 'Waiting for databases to be ready...';
|
|
105
|
+
await waitForHealthy(composeFile, ['postgres', 'mongodb', 'redis'], 60);
|
|
108
106
|
|
|
109
107
|
// Restore PostgreSQL
|
|
110
|
-
spinner.text = 'Restoring PostgreSQL database...';
|
|
111
108
|
const pgDumpFile = join(tempDir, 'postgres.dump');
|
|
112
109
|
if (existsSync(pgDumpFile)) {
|
|
113
|
-
//
|
|
110
|
+
// Get dump size for progress indication
|
|
111
|
+
const stats = statSync(pgDumpFile);
|
|
112
|
+
const sizeInMB = Math.round(stats.size / (1024 * 1024));
|
|
113
|
+
|
|
114
|
+
// Copy dump file to container (file-based restore, zero memory usage)
|
|
115
|
+
spinner.text = `Copying PostgreSQL dump to container (${sizeInMB}MB)...`;
|
|
116
|
+
await execa('docker', [
|
|
117
|
+
'cp',
|
|
118
|
+
pgDumpFile,
|
|
119
|
+
'ante-postgres:/tmp/postgres.dump'
|
|
120
|
+
]);
|
|
121
|
+
|
|
122
|
+
// Drop database (connect to postgres database to avoid connection errors)
|
|
123
|
+
spinner.text = 'Preparing PostgreSQL database...';
|
|
114
124
|
await execInContainer(
|
|
115
125
|
composeFile,
|
|
116
126
|
'postgres',
|
|
117
|
-
['psql', '-U', 'ante', '-c', 'DROP DATABASE IF EXISTS ante_db;']
|
|
127
|
+
['psql', '-U', 'ante', '-d', 'postgres', '-c', 'DROP DATABASE IF EXISTS ante_db;']
|
|
118
128
|
);
|
|
129
|
+
|
|
130
|
+
// Create database (connect to postgres database)
|
|
119
131
|
await execInContainer(
|
|
120
132
|
composeFile,
|
|
121
133
|
'postgres',
|
|
122
|
-
['psql', '-U', 'ante', '-c', 'CREATE DATABASE ante_db;']
|
|
134
|
+
['psql', '-U', 'ante', '-d', 'postgres', '-c', 'CREATE DATABASE ante_db;']
|
|
123
135
|
);
|
|
124
136
|
|
|
125
|
-
// Restore from
|
|
126
|
-
|
|
137
|
+
// Restore from file inside container (streaming, zero memory)
|
|
138
|
+
spinner.text = `Restoring PostgreSQL database (${sizeInMB}MB)...`;
|
|
127
139
|
await execInContainer(
|
|
128
140
|
composeFile,
|
|
129
141
|
'postgres',
|
|
130
|
-
['
|
|
131
|
-
{ input: dumpContent.stdout }
|
|
142
|
+
['sh', '-c', 'pg_restore -U ante -d ante_db /tmp/postgres.dump && rm /tmp/postgres.dump']
|
|
132
143
|
);
|
|
133
144
|
}
|
|
134
145
|
|
|
135
146
|
// Restore MongoDB
|
|
136
|
-
spinner.text = 'Restoring MongoDB database...';
|
|
137
147
|
const mongoDumpDir = join(tempDir, 'mongodb');
|
|
138
148
|
if (existsSync(mongoDumpDir)) {
|
|
149
|
+
// Get folder size for progress indication
|
|
150
|
+
const { stdout: mongoSize } = await execa('du', ['-sh', mongoDumpDir]);
|
|
151
|
+
const sizeStr = mongoSize.split('\t')[0];
|
|
152
|
+
|
|
153
|
+
spinner.text = `Restoring MongoDB database (${sizeStr})...`;
|
|
154
|
+
|
|
139
155
|
// Copy MongoDB dump directory to container
|
|
140
156
|
// The backup structure is: mongodb/ contains the database files directly
|
|
141
157
|
await execa('docker', [
|
|
@@ -145,7 +161,6 @@ export async function restore(backupFile, options = {}) {
|
|
|
145
161
|
]);
|
|
146
162
|
|
|
147
163
|
// Restore from the mongodb directory (contains BSON files)
|
|
148
|
-
// Use --nsInclude to restore all collections from the dump
|
|
149
164
|
await execInContainer(
|
|
150
165
|
composeFile,
|
|
151
166
|
'mongodb',
|
package/src/utils/docker.js
CHANGED
|
@@ -182,18 +182,53 @@ export async function isServiceHealthy(composeFile, service) {
|
|
|
182
182
|
*/
|
|
183
183
|
export async function waitForServiceHealthy(composeFile, service, timeout = 120) {
|
|
184
184
|
const startTime = Date.now();
|
|
185
|
-
|
|
185
|
+
|
|
186
186
|
while (Date.now() - startTime < timeout * 1000) {
|
|
187
187
|
if (await isServiceHealthy(composeFile, service)) {
|
|
188
188
|
return true;
|
|
189
189
|
}
|
|
190
|
-
|
|
190
|
+
|
|
191
191
|
await new Promise(resolve => setTimeout(resolve, 2000));
|
|
192
192
|
}
|
|
193
|
-
|
|
193
|
+
|
|
194
194
|
return false;
|
|
195
195
|
}
|
|
196
196
|
|
|
197
|
+
/**
|
|
198
|
+
* Wait for multiple services to be healthy
|
|
199
|
+
* @param {string} composeFile - Path to docker-compose.yml
|
|
200
|
+
* @param {string[]} services - Array of service names
|
|
201
|
+
* @param {number} timeout - Timeout in seconds (default: 60)
|
|
202
|
+
* @returns {Promise<boolean>} True if all services are healthy
|
|
203
|
+
*/
|
|
204
|
+
export async function waitForHealthy(composeFile, services, timeout = 60) {
|
|
205
|
+
const endTime = Date.now() + (timeout * 1000);
|
|
206
|
+
|
|
207
|
+
while (Date.now() < endTime) {
|
|
208
|
+
try {
|
|
209
|
+
const serviceStatuses = await getServiceStatus(composeFile);
|
|
210
|
+
|
|
211
|
+
const allHealthy = services.every(serviceName => {
|
|
212
|
+
const service = serviceStatuses.find(s => s.Service === serviceName);
|
|
213
|
+
if (!service) return false;
|
|
214
|
+
|
|
215
|
+
// Service is healthy if: running AND (healthy OR no healthcheck)
|
|
216
|
+
return service.State === 'running' &&
|
|
217
|
+
(service.Health === 'healthy' || service.Health === undefined || service.Health === '');
|
|
218
|
+
});
|
|
219
|
+
|
|
220
|
+
if (allHealthy) return true;
|
|
221
|
+
|
|
222
|
+
} catch (error) {
|
|
223
|
+
// Continue waiting
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
await new Promise(resolve => setTimeout(resolve, 2000)); // Check every 2s
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
throw new Error(`Services failed to become healthy within ${timeout}s: ${services.join(', ')}`);
|
|
230
|
+
}
|
|
231
|
+
|
|
197
232
|
/**
|
|
198
233
|
* Down all services and optionally remove volumes
|
|
199
234
|
* @param {string} composeFile - Path to docker-compose.yml
|