smoonb 1.0.5 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -4,6 +4,21 @@ All notable changes to this project will be documented in this file.
4
4
 
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
6
6
 
7
+ ## [1.0.6] - 2026-02-21
8
+
9
+ ### Changed
10
+
11
+ - **Storage backup (step 6) — robustness and feedback:**
12
+ - Added `withTimeout` helper: each Storage `.list()` call times out after 30s; each file `.download()` times out after 2 minutes.
13
+ - Added `withRetry` helper: up to 3 attempts with exponential backoff (2s → 4s → 8s) on both listing and download failures.
14
+ - Inline progress during file listing: live `\r`-updated line shows the current folder being scanned and the running file count (`→ Scanning bucket/folder (N found)`). No more silent hangs.
15
+ - After listing completes, a summary line shows the total files found.
16
+ - Retry warnings show attempt number, failure reason and next retry delay.
17
+ - Download failures after all retries are counted as `filesSkipped` and reported in the bucket summary.
18
+ - Bucket summary now distinguishes between full success and partial success with skips.
19
+
20
+ ---
21
+
7
22
  ## [1.0.5] - 2026-02-21
8
23
 
9
24
  ### Added
@@ -49,5 +64,6 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
49
64
 
50
65
  ---
51
66
 
67
+ [1.0.6]: https://github.com/almmello/smoonb/releases/tag/v1.0.6
52
68
  [1.0.5]: https://github.com/almmello/smoonb/releases/tag/v1.0.5
53
69
  [1.0.4]: https://github.com/almmello/smoonb/releases/tag/v1.0.4
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "smoonb",
3
- "version": "1.0.5",
3
+ "version": "1.0.6",
4
4
  "description": "Complete Supabase backup and migration tool. https://www.smoonb.com/#price",
5
5
  "preferGlobal": false,
6
6
  "preventGlobalInstall": true,
@@ -7,9 +7,52 @@ const { ensureDir, writeJson } = require('../../../utils/fsx');
7
7
  const { confirm } = require('../../../utils/prompt');
8
8
  const { t } = require('../../../i18n');
9
9
 
10
+ const TIMEOUT_LIST_MS = 30_000; // 30s por chamada de listagem
11
+ const TIMEOUT_DOWNLOAD_MS = 360_000; // 2min por download de arquivo
12
+ const MAX_RETRIES = 7;
13
+ const RETRY_BASE_DELAY_MS = 2_000; // backoff: 2s → 4s → 8s
14
+
15
+ /**
16
+ * Executa uma promise com timeout. Lança Error se o tempo esgotar.
17
+ * @param {Promise} promise
18
+ * @param {number} ms
19
+ * @returns {Promise}
20
+ */
21
+ function withTimeout(promise, ms) {
22
+ let id;
23
+ const timer = new Promise((_, reject) => {
24
+ id = setTimeout(() => reject(new Error(`Timeout (${ms / 1000}s)`)), ms);
25
+ });
26
+ return Promise.race([promise, timer]).finally(() => clearTimeout(id));
27
+ }
28
+
29
+ /**
30
+ * Executa fn() com retry automático e backoff exponencial.
31
+ * @param {Function} fn - Fábrica de Promise (chamada a cada tentativa)
32
+ * @param {number} maxAttempts
33
+ * @param {number} baseDelayMs
34
+ * @param {Function} [onRetry] - Callback(attempt, max, err, delayMs)
35
+ */
36
+ async function withRetry(fn, maxAttempts = MAX_RETRIES, baseDelayMs = RETRY_BASE_DELAY_MS, onRetry) {
37
+ let lastError;
38
+ for (let attempt = 1; attempt <= maxAttempts; attempt++) {
39
+ try {
40
+ return await fn();
41
+ } catch (err) {
42
+ lastError = err;
43
+ if (attempt < maxAttempts) {
44
+ const delay = baseDelayMs * Math.pow(2, attempt - 1);
45
+ if (onRetry) onRetry(attempt, maxAttempts, err, delay);
46
+ await new Promise(r => setTimeout(r, delay));
47
+ }
48
+ }
49
+ }
50
+ throw lastError;
51
+ }
52
+
10
53
  /**
11
54
  * Etapa 6: Backup Storage via Supabase API
12
- * Agora faz backup completo: metadados + download de todos os arquivos + ZIP no padrão do Dashboard
55
+ * Backup completo: metadados + download de todos os arquivos + ZIP no padrão do Dashboard
13
56
  */
14
57
  module.exports = async ({ projectId, accessToken, backupDir, supabaseUrl, supabaseServiceKey }) => {
15
58
  try {
@@ -18,15 +61,14 @@ module.exports = async ({ projectId, accessToken, backupDir, supabaseUrl, supaba
18
61
  await ensureDir(storageDir);
19
62
 
20
63
  console.log(chalk.white(` - ${getT('backup.steps.storage.listing')}`));
21
-
22
- // Usar fetch direto para Management API com Personal Access Token
64
+
23
65
  const storageResponse = await fetch(`https://api.supabase.com/v1/projects/${projectId}/storage/buckets`, {
24
- headers: {
66
+ headers: {
25
67
  'Authorization': `Bearer ${accessToken}`,
26
68
  'Content-Type': 'application/json'
27
69
  }
28
70
  });
29
-
71
+
30
72
  if (!storageResponse.ok) {
31
73
  console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.listBucketsError', { status: storageResponse.status, statusText: storageResponse.statusText })}`));
32
74
  return { success: false, buckets: [] };
@@ -44,20 +86,16 @@ module.exports = async ({ projectId, accessToken, backupDir, supabaseUrl, supaba
44
86
 
45
87
  console.log(chalk.white(` - ${getT('backup.steps.storage.found', { count: buckets.length })}`));
46
88
 
47
- // Validar credenciais do Supabase para download de arquivos
48
89
  if (!supabaseUrl || !supabaseServiceKey) {
49
90
  console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.credentialsNotAvailable')}`));
50
91
  return await backupMetadataOnly(buckets, storageDir, projectId, accessToken);
51
92
  }
52
93
 
53
- // Criar cliente Supabase para download de arquivos
54
94
  const supabase = createClient(supabaseUrl, supabaseServiceKey);
55
95
 
56
- // Criar estrutura temporária para armazenar arquivos baixados
57
96
  const tempStorageDir = path.join(backupDir, 'storage_temp');
58
97
  await ensureDir(tempStorageDir);
59
-
60
- // Criar estrutura: storage_temp/project-id/bucket-name/arquivos...
98
+
61
99
  const projectStorageDir = path.join(tempStorageDir, projectId);
62
100
  await ensureDir(projectStorageDir);
63
101
 
@@ -67,10 +105,9 @@ module.exports = async ({ projectId, accessToken, backupDir, supabaseUrl, supaba
67
105
  for (const bucket of buckets || []) {
68
106
  try {
69
107
  console.log(chalk.white(` - ${getT('backup.steps.storage.processing', { bucketName: bucket.name })}`));
70
-
71
- // Listar objetos do bucket via Management API com Personal Access Token
108
+
72
109
  const objectsResponse = await fetch(`https://api.supabase.com/v1/projects/${projectId}/storage/buckets/${bucket.name}/objects`, {
73
- headers: {
110
+ headers: {
74
111
  'Authorization': `Bearer ${accessToken}`,
75
112
  'Content-Type': 'application/json'
76
113
  }
@@ -90,51 +127,85 @@ module.exports = async ({ projectId, accessToken, backupDir, supabaseUrl, supaba
90
127
  objects: objects || []
91
128
  };
92
129
 
93
- // Salvar informações do bucket
94
130
  const bucketPath = path.join(storageDir, `${bucket.name}.json`);
95
131
  await writeJson(bucketPath, bucketInfo);
96
132
 
97
- // Baixar todos os arquivos do bucket
98
133
  const bucketDir = path.join(projectStorageDir, bucket.name);
99
134
  await ensureDir(bucketDir);
100
-
101
- // Listar todos os arquivos recursivamente usando Supabase client
135
+
136
+ // ── Listagem com progresso inline ──────────────────────────────
102
137
  console.log(chalk.white(` - ${getT('backup.steps.storage.listingFiles', { bucketName: bucket.name })}`));
103
- const allFiles = await listAllFilesRecursively(supabase, bucket.name, '');
104
-
138
+ const counter = { total: 0 };
139
+ let allFiles = [];
140
+ let listingFailed = false;
141
+
142
+ try {
143
+ allFiles = await listAllFilesRecursively(supabase, bucket.name, '', counter, getT);
144
+ } catch (listErr) {
145
+ listingFailed = true;
146
+ console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.listFailed', { bucketName: bucket.name, message: listErr.message })}`));
147
+ } finally {
148
+ // Encerra a linha de progresso (\r) iniciada por listAllFilesRecursively
149
+ process.stdout.write('\n');
150
+ }
151
+
152
+ if (!listingFailed) {
153
+ console.log(chalk.white(` - ${getT('backup.steps.storage.totalFound', { count: allFiles.length })}`));
154
+ }
155
+
156
+ // ── Download com retry ──────────────────────────────────────────
105
157
  let filesDownloaded = 0;
158
+ let filesSkipped = 0;
159
+
106
160
  if (allFiles.length > 0) {
107
161
  console.log(chalk.white(` - ${getT('backup.steps.storage.downloading', { count: allFiles.length, bucketName: bucket.name })}`));
108
-
162
+
109
163
  for (const filePath of allFiles) {
110
164
  try {
111
- // Baixar arquivo do Storage
112
- const { data: fileData, error: downloadError } = await supabase.storage
113
- .from(bucket.name)
114
- .download(filePath);
165
+ let fileData = null;
166
+ let downloadError = null;
167
+
168
+ await withRetry(
169
+ async () => {
170
+ const result = await withTimeout(
171
+ supabase.storage.from(bucket.name).download(filePath),
172
+ TIMEOUT_DOWNLOAD_MS
173
+ );
174
+ // Erros estruturais do Supabase (ex.: 404) não devem ser retentados
175
+ if (result.error) {
176
+ downloadError = result.error;
177
+ fileData = null;
178
+ } else {
179
+ fileData = result.data;
180
+ downloadError = null;
181
+ }
182
+ },
183
+ MAX_RETRIES,
184
+ RETRY_BASE_DELAY_MS,
185
+ (attempt, max, err, delay) => {
186
+ console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.downloadRetry', { path: filePath, attempt, max, delay: delay / 1000 })}`));
187
+ }
188
+ );
115
189
 
116
190
  if (downloadError) {
191
+ filesSkipped++;
117
192
  console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.downloadError', { path: filePath, message: downloadError.message })}`));
118
193
  continue;
119
194
  }
120
195
 
121
- // Criar estrutura de pastas local se necessário
122
196
  const localFilePath = path.join(bucketDir, filePath);
123
- const localFileDir = path.dirname(localFilePath);
124
- await ensureDir(localFileDir);
197
+ await ensureDir(path.dirname(localFilePath));
125
198
 
126
- // Salvar arquivo localmente
127
- const arrayBuffer = await fileData.arrayBuffer();
128
- const buffer = Buffer.from(arrayBuffer);
199
+ const buffer = Buffer.from(await fileData.arrayBuffer());
129
200
  await fs.writeFile(localFilePath, buffer);
130
201
  filesDownloaded++;
131
202
 
132
- // Mostrar progresso a cada 10 arquivos ou se for o último
133
203
  if (filesDownloaded % 10 === 0 || filesDownloaded === allFiles.length) {
134
204
  console.log(chalk.white(` - ${getT('backup.steps.storage.downloaded', { current: filesDownloaded, total: allFiles.length })}`));
135
205
  }
136
206
  } catch (fileError) {
137
- console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.processFileError', { path: filePath, message: fileError.message })}`));
207
+ filesSkipped++;
208
+ console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.downloadFailed', { path: filePath, message: fileError.message })}`));
138
209
  }
139
210
  }
140
211
  }
@@ -143,37 +214,36 @@ module.exports = async ({ projectId, accessToken, backupDir, supabaseUrl, supaba
143
214
  processedBuckets.push({
144
215
  name: bucket.name,
145
216
  objectCount: objects?.length || 0,
146
- filesDownloaded: filesDownloaded,
217
+ filesDownloaded,
218
+ filesSkipped,
147
219
  totalFiles: allFiles.length
148
220
  });
149
221
 
150
- console.log(chalk.green(` ✅ ${getT('backup.steps.storage.bucketDone', { bucketName: bucket.name, downloaded: filesDownloaded, total: allFiles.length })}`));
222
+ if (filesSkipped > 0) {
223
+ console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.bucketDoneWithSkips', { bucketName: bucket.name, downloaded: filesDownloaded, skipped: filesSkipped, total: allFiles.length })}`));
224
+ } else {
225
+ console.log(chalk.green(` ✅ ${getT('backup.steps.storage.bucketDone', { bucketName: bucket.name, downloaded: filesDownloaded, total: allFiles.length })}`));
226
+ }
151
227
  } catch (error) {
152
228
  console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.processBucketError', { bucketName: bucket.name, message: error.message })}`));
153
229
  }
154
230
  }
155
231
 
156
- // Criar ZIP no padrão do Dashboard: {project-id}.storage.zip
232
+ // ── Criar ZIP no padrão do Dashboard ───────────────────────────────
157
233
  console.log(chalk.white(`\n - ${getT('backup.steps.storage.creatingZip')}`));
158
234
  const zipFileName = `${projectId}.storage.zip`;
159
235
  const zipFilePath = path.join(backupDir, zipFileName);
160
-
236
+
161
237
  const zip = new AdmZip();
162
-
163
- // Adicionar toda a estrutura de pastas ao ZIP
164
- // Estrutura: project-id/bucket-name/arquivos...
165
238
  await addDirectoryToZip(zip, projectStorageDir, projectId);
166
-
167
- // Salvar ZIP
168
239
  zip.writeZip(zipFilePath);
169
240
  const zipStats = await fs.stat(zipFilePath);
170
241
  const zipSizeMB = (zipStats.size / (1024 * 1024)).toFixed(2);
171
-
242
+
172
243
  console.log(chalk.green(` ✅ ${getT('backup.steps.storage.zipCreated', { fileName: zipFileName, size: zipSizeMB })}`));
173
244
 
174
- // Perguntar ao usuário se deseja limpar a estrutura temporária
175
245
  const shouldCleanup = await confirm(` ${getT('backup.steps.storage.cleanup')}`, false);
176
-
246
+
177
247
  if (shouldCleanup) {
178
248
  console.log(chalk.white(` - ${getT('backup.steps.storage.cleanupRemoving')}`));
179
249
  try {
@@ -187,11 +257,11 @@ module.exports = async ({ projectId, accessToken, backupDir, supabaseUrl, supaba
187
257
  }
188
258
 
189
259
  console.log(chalk.green(`✅ ${getT('backup.steps.storage.done', { buckets: processedBuckets.length, files: totalFilesDownloaded })}`));
190
- return {
191
- success: true,
260
+ return {
261
+ success: true,
192
262
  buckets: processedBuckets,
193
263
  zipFile: zipFileName,
194
- zipSizeMB: zipSizeMB,
264
+ zipSizeMB,
195
265
  totalFiles: totalFilesDownloaded,
196
266
  tempDirCleaned: shouldCleanup
197
267
  };
@@ -202,8 +272,10 @@ module.exports = async ({ projectId, accessToken, backupDir, supabaseUrl, supaba
202
272
  }
203
273
  };
204
274
 
275
+ // ── Funções auxiliares ────────────────────────────────────────────────────────
276
+
205
277
  /**
206
- * Backup apenas de metadados (fallback quando não há credenciais do Supabase)
278
+ * Backup apenas de metadados (fallback sem credenciais Supabase)
207
279
  */
208
280
  async function backupMetadataOnly(buckets, storageDir, projectId, accessToken) {
209
281
  const processedBuckets = [];
@@ -211,9 +283,9 @@ async function backupMetadataOnly(buckets, storageDir, projectId, accessToken) {
211
283
  for (const bucket of buckets || []) {
212
284
  try {
213
285
  console.log(chalk.white(` - Processando bucket: ${bucket.name}`));
214
-
286
+
215
287
  const objectsResponse = await fetch(`https://api.supabase.com/v1/projects/${projectId}/storage/buckets/${bucket.name}/objects`, {
216
- headers: {
288
+ headers: {
217
289
  'Authorization': `Bearer ${accessToken}`,
218
290
  'Content-Type': 'application/json'
219
291
  }
@@ -236,11 +308,7 @@ async function backupMetadataOnly(buckets, storageDir, projectId, accessToken) {
236
308
  const bucketPath = path.join(storageDir, `${bucket.name}.json`);
237
309
  await writeJson(bucketPath, bucketInfo);
238
310
 
239
- processedBuckets.push({
240
- name: bucket.name,
241
- objectCount: objects?.length || 0
242
- });
243
-
311
+ processedBuckets.push({ name: bucket.name, objectCount: objects?.length || 0 });
244
312
  console.log(chalk.green(` ✅ Bucket ${bucket.name}: ${objects?.length || 0} objetos`));
245
313
  } catch (error) {
246
314
  console.log(chalk.yellow(` ⚠️ Erro ao processar bucket ${bucket.name}: ${error.message}`));
@@ -252,59 +320,97 @@ async function backupMetadataOnly(buckets, storageDir, projectId, accessToken) {
252
320
  }
253
321
 
254
322
  /**
255
- * Lista todos os arquivos recursivamente de um bucket do Storage
323
+ * Lista recursivamente todos os arquivos de um bucket com:
324
+ * - progresso inline via \r (pasta atual + total encontrado)
325
+ * - timeout por chamada (TIMEOUT_LIST_MS)
326
+ * - retry com backoff exponencial (MAX_RETRIES)
327
+ *
328
+ * O CALLER é responsável por emitir \n após o retorno para encerrar a linha \r.
329
+ *
330
+ * @param {object} supabase
331
+ * @param {string} bucketName
332
+ * @param {string} folderPath
333
+ * @param {{ total: number }} counter - Contador compartilhado entre chamadas recursivas
334
+ * @param {Function} getT - Função de tradução
335
+ * @returns {Promise<string[]>}
256
336
  */
257
- async function listAllFilesRecursively(supabase, bucketName, folderPath = '') {
337
+ async function listAllFilesRecursively(supabase, bucketName, folderPath = '', counter = { total: 0 }, getT) {
258
338
  const allFiles = [];
259
- const getT = global.smoonbI18n?.t || t;
260
-
339
+ const label = folderPath ? `${bucketName}/${folderPath}` : `${bucketName}/`;
340
+ const displayLabel = label.length > 55 ? `...${label.slice(-52)}` : label;
341
+
342
+ // Progresso inline: mostra pasta atual + arquivos encontrados até agora
343
+ process.stdout.write(
344
+ chalk.gray(`\r → ${getT('backup.steps.storage.scanningFolder', { path: displayLabel, count: counter.total })} `)
345
+ );
346
+
347
+ let result;
261
348
  try {
262
- // Listar arquivos e pastas no caminho atual
263
- const { data: items, error } = await supabase.storage
264
- .from(bucketName)
265
- .list(folderPath, {
266
- limit: 1000,
267
- sortBy: { column: 'name', order: 'asc' }
268
- });
349
+ result = await withRetry(
350
+ () => withTimeout(
351
+ supabase.storage.from(bucketName).list(folderPath, {
352
+ limit: 1000,
353
+ sortBy: { column: 'name', order: 'asc' }
354
+ }),
355
+ TIMEOUT_LIST_MS
356
+ ),
357
+ MAX_RETRIES,
358
+ RETRY_BASE_DELAY_MS,
359
+ (attempt, max, err, delay) => {
360
+ // Encerra linha \r antes de imprimir o aviso
361
+ process.stdout.write('\n');
362
+ console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.listRetry', { path: displayLabel, attempt, max, delay: delay / 1000, message: err.message })}`));
363
+ }
364
+ );
365
+ } catch (err) {
366
+ // Esgotou as tentativas — propaga para o caller lidar
367
+ throw err;
368
+ }
269
369
 
270
- if (error) {
271
- console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.listError', { path: folderPath || 'raiz', message: error.message })}`));
272
- return allFiles;
273
- }
370
+ const { data: items, error } = result;
274
371
 
275
- if (!items || items.length === 0) {
276
- return allFiles;
277
- }
372
+ if (error) {
373
+ process.stdout.write('\n');
374
+ console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.listError', { path: label, message: error.message })}`));
375
+ return allFiles;
376
+ }
278
377
 
279
- for (const item of items) {
280
- const itemPath = folderPath ? `${folderPath}/${item.name}` : item.name;
281
-
282
- if (item.id === null) {
283
- // É uma pasta, listar recursivamente
284
- const subFiles = await listAllFilesRecursively(supabase, bucketName, itemPath);
285
- allFiles.push(...subFiles);
286
- } else {
287
- // É um arquivo
288
- allFiles.push(itemPath);
289
- }
378
+ if (!items || items.length === 0) {
379
+ return allFiles;
380
+ }
381
+
382
+ for (const item of items) {
383
+ const itemPath = folderPath ? `${folderPath}/${item.name}` : item.name;
384
+
385
+ if (item.id === null) {
386
+ // Pasta listar recursivamente
387
+ const subFiles = await listAllFilesRecursively(supabase, bucketName, itemPath, counter, getT);
388
+ allFiles.push(...subFiles);
389
+ } else {
390
+ // Arquivo
391
+ allFiles.push(itemPath);
392
+ counter.total++;
393
+ // Atualizar display com novo total
394
+ const dl = label.length > 55 ? `...${label.slice(-52)}` : label;
395
+ process.stdout.write(
396
+ chalk.gray(`\r → ${getT('backup.steps.storage.scanningFolder', { path: dl, count: counter.total })} `)
397
+ );
290
398
  }
291
- } catch (error) {
292
- console.log(chalk.yellow(` ⚠️ ${getT('backup.steps.storage.processError', { path: folderPath || 'raiz', message: error.message })}`));
293
399
  }
294
400
 
295
401
  return allFiles;
296
402
  }
297
403
 
298
404
  /**
299
- * Adiciona um diretório recursivamente ao ZIP mantendo a estrutura de pastas
405
+ * Adiciona diretório recursivamente ao ZIP mantendo estrutura de pastas
300
406
  */
301
407
  async function addDirectoryToZip(zip, dirPath, basePath = '') {
302
408
  const entries = await fs.readdir(dirPath, { withFileTypes: true });
303
-
409
+
304
410
  for (const entry of entries) {
305
411
  const fullPath = path.join(dirPath, entry.name);
306
412
  const zipPath = basePath ? `${basePath}/${entry.name}` : entry.name;
307
-
413
+
308
414
  if (entry.isDirectory()) {
309
415
  await addDirectoryToZip(zip, fullPath, zipPath);
310
416
  } else {
@@ -313,4 +419,3 @@ async function addDirectoryToZip(zip, dirPath, basePath = '') {
313
419
  }
314
420
  }
315
421
  }
316
-
@@ -435,9 +435,16 @@
435
435
  "backup.steps.storage.found": "Found {count} buckets",
436
436
  "backup.steps.storage.processing": "Processing bucket: {bucketName}",
437
437
  "backup.steps.storage.listingFiles": "Listing files from bucket {bucketName}...",
438
+ "backup.steps.storage.scanningFolder": "Scanning {path} ({count} found)",
439
+ "backup.steps.storage.totalFound": "Total: {count} file(s) found",
440
+ "backup.steps.storage.listRetry": "Attempt {attempt}/{max} to list {path} failed ({message}). Retrying in {delay}s...",
441
+ "backup.steps.storage.listFailed": "Failed to list bucket {bucketName} after all attempts: {message}",
438
442
  "backup.steps.storage.downloading": "Downloading {count} file(s) from bucket {bucketName}...",
439
443
  "backup.steps.storage.downloaded": "Downloaded {current}/{total} file(s)...",
444
+ "backup.steps.storage.downloadRetry": "File {path}: attempt {attempt}/{max} failed. Retrying in {delay}s...",
445
+ "backup.steps.storage.downloadFailed": "File {path}: failed after all attempts: {message}",
440
446
  "backup.steps.storage.bucketDone": "Bucket {bucketName}: {downloaded}/{total} file(s) downloaded",
447
+ "backup.steps.storage.bucketDoneWithSkips": "Bucket {bucketName}: {downloaded}/{total} file(s) downloaded, {skipped} skipped",
441
448
  "backup.steps.storage.creatingZip": "Creating ZIP file in Dashboard format...",
442
449
  "backup.steps.storage.zipCreated": "ZIP file created: {fileName} ({size} MB)",
443
450
  "backup.steps.storage.cleanup": "Do you want to clean storage_temp after backup",
@@ -435,9 +435,16 @@
435
435
  "backup.steps.storage.found": "Encontrados {count} buckets",
436
436
  "backup.steps.storage.processing": "Processando bucket: {bucketName}",
437
437
  "backup.steps.storage.listingFiles": "Listando arquivos do bucket {bucketName}...",
438
+ "backup.steps.storage.scanningFolder": "Escaneando {path} ({count} encontrado(s))",
439
+ "backup.steps.storage.totalFound": "Total: {count} arquivo(s) encontrado(s)",
440
+ "backup.steps.storage.listRetry": "Tentativa {attempt}/{max} ao listar {path} falhou ({message}). Retentando em {delay}s...",
441
+ "backup.steps.storage.listFailed": "Falha ao listar bucket {bucketName} após {max} tentativas: {message}",
438
442
  "backup.steps.storage.downloading": "Baixando {count} arquivo(s) do bucket {bucketName}...",
439
443
  "backup.steps.storage.downloaded": "Baixados {current}/{total} arquivo(s)...",
444
+ "backup.steps.storage.downloadRetry": "Arquivo {path}: tentativa {attempt}/{max} falhou. Retentando em {delay}s...",
445
+ "backup.steps.storage.downloadFailed": "Arquivo {path}: falhou após todas as tentativas: {message}",
440
446
  "backup.steps.storage.bucketDone": "Bucket {bucketName}: {downloaded}/{total} arquivo(s) baixado(s)",
447
+ "backup.steps.storage.bucketDoneWithSkips": "Bucket {bucketName}: {downloaded}/{total} arquivo(s) baixado(s), {skipped} pulado(s)",
441
448
  "backup.steps.storage.creatingZip": "Criando arquivo ZIP no padrão do Dashboard...",
442
449
  "backup.steps.storage.zipCreated": "Arquivo ZIP criado: {fileName} ({size} MB)",
443
450
  "backup.steps.storage.cleanup": "Deseja limpar storage_temp após o backup",