smoonb 0.0.88 → 0.0.89
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -1,10 +1,17 @@
|
|
|
1
1
|
const chalk = require('chalk');
|
|
2
2
|
const path = require('path');
|
|
3
3
|
const fs = require('fs').promises;
|
|
4
|
+
const readline = require('readline');
|
|
4
5
|
const { spawn } = require('child_process');
|
|
5
6
|
const { t } = require('../../../i18n');
|
|
6
7
|
const { getPostgresServerMajor } = require('../utils');
|
|
7
8
|
|
|
9
|
+
const DUMP_SIZE_FACTOR_DEFAULT = 1.4;
|
|
10
|
+
const BAR_WIDTH = 24;
|
|
11
|
+
const EMA_ALPHA = 0.25;
|
|
12
|
+
const ETA_MIN_TICKS = 4;
|
|
13
|
+
const ESTIMATE_TIMEOUT_MS = 10000;
|
|
14
|
+
|
|
8
15
|
function formatBytes(bytes) {
|
|
9
16
|
if (bytes === 0) return '0 B';
|
|
10
17
|
const k = 1024;
|
|
@@ -32,6 +39,79 @@ async function exists(filePath) {
|
|
|
32
39
|
}
|
|
33
40
|
}
|
|
34
41
|
|
|
42
|
+
/**
|
|
43
|
+
* Estima tamanho total do cluster (soma dos bancos) via SQL. Timeout 10s; falha = null (fallback sem estimativa).
|
|
44
|
+
* @returns {Promise<number|null>} bytes ou null
|
|
45
|
+
*/
|
|
46
|
+
async function estimateClusterBytes({ postgresImage, username, password, host, port }) {
|
|
47
|
+
const query = "SELECT COALESCE(sum(pg_database_size(datname)),0) FROM pg_database WHERE datistemplate = false;";
|
|
48
|
+
const args = [
|
|
49
|
+
'run', '--rm', '--network', 'host',
|
|
50
|
+
'-e', `PGPASSWORD=${password}`,
|
|
51
|
+
postgresImage, 'psql',
|
|
52
|
+
'-h', host, '-p', port, '-U', username, '-d', 'postgres',
|
|
53
|
+
'-t', '-A', '-c', query
|
|
54
|
+
];
|
|
55
|
+
return new Promise((resolve) => {
|
|
56
|
+
const proc = spawn('docker', args, { stdio: ['ignore', 'pipe', 'pipe'] });
|
|
57
|
+
let stdout = '';
|
|
58
|
+
let done = false;
|
|
59
|
+
const timeout = setTimeout(() => {
|
|
60
|
+
if (done) return;
|
|
61
|
+
done = true;
|
|
62
|
+
try { proc.kill('SIGKILL'); } catch { }
|
|
63
|
+
resolve(null); // timeout: seguir sem estimativa
|
|
64
|
+
}, ESTIMATE_TIMEOUT_MS);
|
|
65
|
+
proc.stdout.on('data', (chunk) => { stdout += chunk.toString(); });
|
|
66
|
+
proc.on('close', (code) => {
|
|
67
|
+
if (done) return;
|
|
68
|
+
done = true;
|
|
69
|
+
clearTimeout(timeout);
|
|
70
|
+
if (code !== 0) {
|
|
71
|
+
resolve(null);
|
|
72
|
+
return;
|
|
73
|
+
}
|
|
74
|
+
const trimmed = stdout.trim();
|
|
75
|
+
const bytes = parseInt(trimmed, 10);
|
|
76
|
+
if (Number.isNaN(bytes) || bytes < 0) {
|
|
77
|
+
resolve(null);
|
|
78
|
+
return;
|
|
79
|
+
}
|
|
80
|
+
resolve(bytes);
|
|
81
|
+
});
|
|
82
|
+
proc.on('error', () => {
|
|
83
|
+
if (done) return;
|
|
84
|
+
done = true;
|
|
85
|
+
clearTimeout(timeout);
|
|
86
|
+
resolve(null);
|
|
87
|
+
});
|
|
88
|
+
});
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Monta uma linha de progresso: barra opcional + tamanho, tempo, velocidade, ETA (quando estimado).
|
|
93
|
+
* percent/etaSeconds null = modo indeterminado (sem % nem ETA).
|
|
94
|
+
*/
|
|
95
|
+
function renderProgressLine({ percent, width, sizeBytes, elapsedMs, speedBps, etaSeconds, estimated, getT }) {
|
|
96
|
+
const sizeStr = formatBytes(sizeBytes);
|
|
97
|
+
const elapsedStr = formatDuration(elapsedMs);
|
|
98
|
+
const speedStr = formatBytes(Math.max(0, speedBps)) + '/s';
|
|
99
|
+
let bar = '';
|
|
100
|
+
if (estimated && percent != null && percent >= 0) {
|
|
101
|
+
const filled = Math.round((percent / 100) * width);
|
|
102
|
+
const n = Math.min(filled, width);
|
|
103
|
+
bar = `[${'#'.repeat(n)}${'-'.repeat(width - n)}] ${Math.min(99, Math.floor(percent))}% (~) `;
|
|
104
|
+
} else {
|
|
105
|
+
bar = ' … ';
|
|
106
|
+
}
|
|
107
|
+
let etaStr = '';
|
|
108
|
+
if (estimated && etaSeconds != null && etaSeconds > 0) {
|
|
109
|
+
const etaLabel = getT ? getT('backup.steps.database.progress.eta') : 'ETA';
|
|
110
|
+
etaStr = ` | ${etaLabel} ~${formatDuration(etaSeconds * 1000)}`;
|
|
111
|
+
}
|
|
112
|
+
return ` ${bar}| ${sizeStr} | ${elapsedStr} | ${speedStr}${etaStr}`;
|
|
113
|
+
}
|
|
114
|
+
|
|
35
115
|
/**
|
|
36
116
|
* Etapa 2: Backup Database via pg_dumpall Docker (idêntico ao Dashboard)
|
|
37
117
|
* Com feedback de progresso: tamanho do arquivo, velocidade e tempo decorrido.
|
|
@@ -75,6 +155,18 @@ module.exports = async ({ databaseUrl, backupDir, postgresMajor: contextMajor })
|
|
|
75
155
|
const backupDirAbs = path.resolve(backupDir);
|
|
76
156
|
const outputPath = path.join(backupDirAbs, fileName);
|
|
77
157
|
|
|
158
|
+
// Estimativa opcional: tamanho do cluster * fator (dump lógico costuma ser maior). Falha = sem %/ETA.
|
|
159
|
+
let expectedBytes = null;
|
|
160
|
+
try {
|
|
161
|
+
const clusterBytes = await estimateClusterBytes({ postgresImage, username, password, host, port });
|
|
162
|
+
if (clusterBytes != null && clusterBytes > 0) {
|
|
163
|
+
const factor = parseFloat(process.env.SMOONB_DUMP_SIZE_FACTOR || '', 10) || DUMP_SIZE_FACTOR_DEFAULT;
|
|
164
|
+
expectedBytes = Math.floor(clusterBytes * factor);
|
|
165
|
+
}
|
|
166
|
+
} catch {
|
|
167
|
+
expectedBytes = null;
|
|
168
|
+
}
|
|
169
|
+
|
|
78
170
|
const dockerArgs = [
|
|
79
171
|
'run', '--rm', '--network', 'host',
|
|
80
172
|
'-v', `${backupDirAbs}:/host`,
|
|
@@ -92,39 +184,107 @@ module.exports = async ({ databaseUrl, backupDir, postgresMajor: contextMajor })
|
|
|
92
184
|
let lastSize = 0;
|
|
93
185
|
let lastTime = startTime;
|
|
94
186
|
let ticker = null;
|
|
187
|
+
let tickCount = 0;
|
|
188
|
+
let smoothedSpeed = 0;
|
|
189
|
+
const useTty = Boolean(process.stdout.isTTY);
|
|
190
|
+
let lastProgressLine = '';
|
|
95
191
|
|
|
96
192
|
const runDump = () => new Promise((resolve, reject) => {
|
|
97
193
|
const proc = spawn('docker', dockerArgs, { stdio: ['ignore', 'pipe', 'pipe'] });
|
|
98
194
|
|
|
99
|
-
proc.stderr.on('data', (chunk) =>
|
|
195
|
+
proc.stderr.on('data', (chunk) => {
|
|
196
|
+
process.stderr.write(chunk);
|
|
197
|
+
if (useTty && lastProgressLine) {
|
|
198
|
+
readline.cursorTo(process.stdout, 0);
|
|
199
|
+
process.stdout.write(lastProgressLine);
|
|
200
|
+
}
|
|
201
|
+
});
|
|
100
202
|
|
|
101
203
|
const pollFile = async () => {
|
|
102
204
|
if (!(await exists(outputPath))) return;
|
|
103
205
|
const stat = await fs.stat(outputPath).catch(() => null);
|
|
104
206
|
if (!stat) return;
|
|
105
|
-
|
|
106
|
-
const
|
|
107
|
-
const
|
|
108
|
-
const
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
207
|
+
tickCount++;
|
|
208
|
+
const currentSize = stat.size;
|
|
209
|
+
const now = Date.now();
|
|
210
|
+
const elapsed = now - startTime;
|
|
211
|
+
const deltaTime = (now - lastTime) / 1000;
|
|
212
|
+
const speed = deltaTime > 0 ? (currentSize - lastSize) / deltaTime : 0;
|
|
213
|
+
if (speed > 0) {
|
|
214
|
+
smoothedSpeed = tickCount === 1 ? speed : EMA_ALPHA * speed + (1 - EMA_ALPHA) * smoothedSpeed;
|
|
215
|
+
}
|
|
216
|
+
lastSize = currentSize;
|
|
217
|
+
lastTime = now;
|
|
218
|
+
|
|
219
|
+
const estimated = expectedBytes != null && expectedBytes > 0;
|
|
220
|
+
let percent = null;
|
|
221
|
+
let etaSeconds = null;
|
|
222
|
+
if (estimated) {
|
|
223
|
+
percent = Math.min(99, Math.floor((currentSize / expectedBytes) * 100));
|
|
224
|
+
if (smoothedSpeed > 0 && currentSize < expectedBytes && tickCount >= ETA_MIN_TICKS) {
|
|
225
|
+
etaSeconds = (expectedBytes - currentSize) / smoothedSpeed;
|
|
226
|
+
}
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
const line = renderProgressLine({
|
|
230
|
+
percent,
|
|
231
|
+
width: BAR_WIDTH,
|
|
232
|
+
sizeBytes: currentSize,
|
|
233
|
+
elapsedMs: elapsed,
|
|
234
|
+
speedBps: smoothedSpeed || speed,
|
|
235
|
+
etaSeconds,
|
|
236
|
+
estimated,
|
|
237
|
+
getT
|
|
238
|
+
});
|
|
239
|
+
lastProgressLine = line;
|
|
240
|
+
|
|
241
|
+
if (useTty) {
|
|
242
|
+
readline.clearLine(process.stdout, 0);
|
|
243
|
+
readline.cursorTo(process.stdout, 0);
|
|
244
|
+
process.stdout.write(chalk.white(line));
|
|
245
|
+
} else if (tickCount % 30 === 1 && elapsed >= 15000) {
|
|
246
|
+
process.stdout.write(chalk.white(line) + '\n');
|
|
247
|
+
}
|
|
113
248
|
};
|
|
114
249
|
|
|
115
250
|
ticker = setInterval(pollFile, 500);
|
|
116
251
|
|
|
117
|
-
proc.on('close', (code) => {
|
|
252
|
+
proc.on('close', async (code) => {
|
|
118
253
|
if (ticker) {
|
|
119
254
|
clearInterval(ticker);
|
|
120
255
|
ticker = null;
|
|
121
256
|
}
|
|
122
|
-
|
|
257
|
+
if (useTty) {
|
|
258
|
+
readline.clearLine(process.stdout, 0);
|
|
259
|
+
readline.cursorTo(process.stdout, 0);
|
|
260
|
+
}
|
|
123
261
|
if (code !== 0) {
|
|
124
262
|
reject(new Error(`pg_dumpall exited with code ${code}`));
|
|
125
|
-
|
|
126
|
-
resolve();
|
|
263
|
+
return;
|
|
127
264
|
}
|
|
265
|
+
// Sucesso: mostrar 100% uma vez (estimativa) com tamanho final real
|
|
266
|
+
if (expectedBytes != null && expectedBytes > 0) {
|
|
267
|
+
let finalSize = lastSize;
|
|
268
|
+
try {
|
|
269
|
+
const stat = await fs.stat(outputPath).catch(() => null);
|
|
270
|
+
if (stat) finalSize = stat.size;
|
|
271
|
+
} catch { }
|
|
272
|
+
const finalLine = renderProgressLine({
|
|
273
|
+
percent: 100,
|
|
274
|
+
width: BAR_WIDTH,
|
|
275
|
+
sizeBytes: finalSize,
|
|
276
|
+
elapsedMs: Date.now() - startTime,
|
|
277
|
+
speedBps: smoothedSpeed,
|
|
278
|
+
etaSeconds: null,
|
|
279
|
+
estimated: true,
|
|
280
|
+
getT
|
|
281
|
+
});
|
|
282
|
+
process.stdout.write(chalk.white(finalLine) + '\n');
|
|
283
|
+
}
|
|
284
|
+
if (useTty) {
|
|
285
|
+
readline.cursorTo(process.stdout, 0);
|
|
286
|
+
}
|
|
287
|
+
resolve();
|
|
128
288
|
});
|
|
129
289
|
|
|
130
290
|
proc.on('error', (err) => {
|
|
@@ -157,7 +317,12 @@ module.exports = async ({ databaseUrl, backupDir, postgresMajor: contextMajor })
|
|
|
157
317
|
if (!stat) return;
|
|
158
318
|
const size = stat.size;
|
|
159
319
|
const elapsed = Date.now() - gzipStart;
|
|
160
|
-
process.stdout.
|
|
320
|
+
if (process.stdout.isTTY) {
|
|
321
|
+
readline.clearLine(process.stdout, 0);
|
|
322
|
+
readline.cursorTo(process.stdout, 0);
|
|
323
|
+
}
|
|
324
|
+
process.stdout.write(` 📦 ${formatBytes(size)} | ${formatDuration(elapsed)}`);
|
|
325
|
+
if (process.stdout.isTTY) process.stdout.write('\r');
|
|
161
326
|
};
|
|
162
327
|
|
|
163
328
|
gzipTicker = setInterval(pollGzip, 300);
|
|
@@ -167,7 +332,10 @@ module.exports = async ({ databaseUrl, backupDir, postgresMajor: contextMajor })
|
|
|
167
332
|
clearInterval(gzipTicker);
|
|
168
333
|
gzipTicker = null;
|
|
169
334
|
}
|
|
170
|
-
process.stdout.
|
|
335
|
+
if (process.stdout.isTTY) {
|
|
336
|
+
readline.clearLine(process.stdout, 0);
|
|
337
|
+
readline.cursorTo(process.stdout, 0);
|
|
338
|
+
}
|
|
171
339
|
if (code !== 0) {
|
|
172
340
|
reject(new Error(`gzip exited with code ${code}`));
|
|
173
341
|
} else {
|
package/src/i18n/locales/en.json
CHANGED
|
@@ -334,6 +334,7 @@
|
|
|
334
334
|
"backup.steps.database.creating": "Creating full backup via pg_dumpall...",
|
|
335
335
|
"backup.steps.database.postgresImage": "Postgres server major {major} detected. Using image: {image}",
|
|
336
336
|
"backup.steps.database.executing": "Executing pg_dumpall via Docker...",
|
|
337
|
+
"backup.steps.database.progress.eta": "ETA",
|
|
337
338
|
"backup.steps.database.separated.title": "PostgreSQL Database Backup (separate SQL files)...",
|
|
338
339
|
"backup.steps.database.separated.creating": "Creating separate SQL backups via Supabase CLI...",
|
|
339
340
|
"backup.steps.database.separated.exportingSchema": "Exporting schema...",
|
|
@@ -334,6 +334,7 @@
|
|
|
334
334
|
"backup.steps.database.creating": "Criando backup completo via pg_dumpall...",
|
|
335
335
|
"backup.steps.database.postgresImage": "Postgres servidor major {major} detectado. Usando imagem: {image}",
|
|
336
336
|
"backup.steps.database.executing": "Executando pg_dumpall via Docker...",
|
|
337
|
+
"backup.steps.database.progress.eta": "ETA",
|
|
337
338
|
"backup.steps.database.separated.title": "Backup da Database PostgreSQL (arquivos SQL separados)...",
|
|
338
339
|
"backup.steps.database.separated.creating": "Criando backups SQL separados via Supabase CLI...",
|
|
339
340
|
"backup.steps.database.separated.exportingSchema": "Exportando schema...",
|