underpost 3.1.3 → 3.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (92) hide show
  1. package/.env.example +0 -2
  2. package/.github/workflows/ghpkg.ci.yml +4 -4
  3. package/.github/workflows/npmpkg.ci.yml +28 -11
  4. package/.github/workflows/publish.ci.yml +6 -0
  5. package/.github/workflows/pwa-microservices-template-page.cd.yml +4 -5
  6. package/.github/workflows/pwa-microservices-template-test.ci.yml +3 -3
  7. package/.github/workflows/release.cd.yml +13 -8
  8. package/CHANGELOG.md +396 -1
  9. package/CLI-HELP.md +53 -6
  10. package/Dockerfile +4 -2
  11. package/README.md +3 -2
  12. package/bin/build.js +18 -12
  13. package/bin/deploy.js +177 -124
  14. package/bin/file.js +3 -0
  15. package/conf.js +3 -2
  16. package/manifests/cronjobs/dd-cron/dd-cron-backup.yaml +5 -2
  17. package/manifests/cronjobs/dd-cron/dd-cron-dns.yaml +5 -2
  18. package/manifests/deployment/dd-default-development/deployment.yaml +2 -2
  19. package/manifests/deployment/dd-test-development/deployment.yaml +88 -74
  20. package/manifests/deployment/dd-test-development/proxy.yaml +13 -4
  21. package/manifests/deployment/playwright/deployment.yaml +1 -1
  22. package/nodemon.json +1 -1
  23. package/package.json +22 -15
  24. package/scripts/rhel-grpc-setup.sh +56 -0
  25. package/src/api/file/file.ref.json +18 -0
  26. package/src/api/user/user.service.js +8 -7
  27. package/src/cli/cluster.js +7 -7
  28. package/src/cli/db.js +726 -825
  29. package/src/cli/deploy.js +151 -93
  30. package/src/cli/env.js +19 -0
  31. package/src/cli/fs.js +5 -2
  32. package/src/cli/index.js +45 -2
  33. package/src/cli/kubectl.js +211 -0
  34. package/src/cli/release.js +284 -0
  35. package/src/cli/repository.js +434 -75
  36. package/src/cli/run.js +189 -34
  37. package/src/cli/secrets.js +73 -0
  38. package/src/cli/test.js +3 -3
  39. package/src/client/Default.index.js +3 -4
  40. package/src/client/components/core/AppStore.js +69 -0
  41. package/src/client/components/core/CalendarCore.js +2 -2
  42. package/src/client/components/core/DropDown.js +137 -17
  43. package/src/client/components/core/Keyboard.js +2 -2
  44. package/src/client/components/core/LogIn.js +2 -2
  45. package/src/client/components/core/LogOut.js +2 -2
  46. package/src/client/components/core/Modal.js +0 -1
  47. package/src/client/components/core/Panel.js +0 -1
  48. package/src/client/components/core/PanelForm.js +19 -19
  49. package/src/client/components/core/SocketIo.js +82 -29
  50. package/src/client/components/core/SocketIoHandler.js +75 -0
  51. package/src/client/components/core/Stream.js +143 -95
  52. package/src/client/components/core/Webhook.js +40 -7
  53. package/src/client/components/default/AppStoreDefault.js +5 -0
  54. package/src/client/components/default/LogInDefault.js +3 -3
  55. package/src/client/components/default/LogOutDefault.js +2 -2
  56. package/src/client/components/default/MenuDefault.js +5 -5
  57. package/src/client/components/default/SocketIoDefault.js +3 -51
  58. package/src/client/services/core/core.service.js +20 -8
  59. package/src/client/services/user/user.management.js +2 -2
  60. package/src/index.js +24 -1
  61. package/src/runtime/express/Dockerfile +4 -0
  62. package/src/runtime/express/Express.js +18 -1
  63. package/src/runtime/lampp/Dockerfile +13 -2
  64. package/src/runtime/lampp/Lampp.js +27 -4
  65. package/src/runtime/wp/Dockerfile +68 -0
  66. package/src/runtime/wp/Wp.js +639 -0
  67. package/src/server/auth.js +24 -1
  68. package/src/server/backup.js +57 -23
  69. package/src/server/client-build-docs.js +9 -2
  70. package/src/server/client-build.js +31 -31
  71. package/src/server/client-formatted.js +109 -57
  72. package/src/server/cron.js +23 -18
  73. package/src/server/ipfs-client.js +24 -1
  74. package/src/server/peer.js +8 -0
  75. package/src/server/runtime.js +25 -1
  76. package/src/server/start.js +3 -2
  77. package/src/ws/IoInterface.js +1 -10
  78. package/src/ws/IoServer.js +14 -33
  79. package/src/ws/core/channels/core.ws.chat.js +65 -20
  80. package/src/ws/core/channels/core.ws.mailer.js +113 -32
  81. package/src/ws/core/channels/core.ws.stream.js +90 -31
  82. package/src/ws/core/core.ws.connection.js +12 -33
  83. package/src/ws/core/core.ws.emit.js +10 -26
  84. package/src/ws/core/core.ws.server.js +25 -58
  85. package/src/ws/default/channels/default.ws.main.js +53 -12
  86. package/src/ws/default/default.ws.connection.js +26 -13
  87. package/src/ws/default/default.ws.server.js +30 -12
  88. package/src/client/components/default/ElementsDefault.js +0 -38
  89. package/src/ws/core/management/core.ws.chat.js +0 -8
  90. package/src/ws/core/management/core.ws.mailer.js +0 -16
  91. package/src/ws/core/management/core.ws.stream.js +0 -8
  92. package/src/ws/default/management/default.ws.main.js +0 -8
package/src/cli/db.js CHANGED
@@ -13,23 +13,10 @@ import fs from 'fs-extra';
13
13
  import { DataBaseProvider } from '../db/DataBaseProvider.js';
14
14
  import { loadReplicas, pathPortAssignmentFactory, loadCronDeployEnv } from '../server/conf.js';
15
15
  import Underpost from '../index.js';
16
+ import { timer } from '../client/components/core/CommonJs.js';
17
+ import isInsideContainer from 'is-inside-container';
16
18
  const logger = loggerFactory(import.meta);
17
19
 
18
- /**
19
- * Redacts credentials from shell command strings before logging.
20
- * Masks passwords in `-p<password>`, `--password=<password>`, and `-P <password>` patterns.
21
- * @param {string} cmd - The raw command string.
22
- * @memberof UnderpostDB
23
- * @returns {string} The command with credentials replaced by `***`.
24
- */
25
- const sanitizeCommand = (cmd) => {
26
- if (typeof cmd !== 'string') return cmd;
27
- return cmd
28
- .replace(/-p['"]?[^\s'"]+/g, '-p***')
29
- .replace(/--password=['"]?[^\s'"]+/g, '--password=***')
30
- .replace(/-P\s+['"]?[^\s'"]+/g, '-P ***');
31
- };
32
-
33
20
  /**
34
21
  * Constants for database operations
35
22
  * @constant {number} MAX_BACKUP_RETENTION - Maximum number of backups to retain
@@ -98,132 +85,6 @@ class UnderpostDB {
98
85
  * @memberof UnderpostDB
99
86
  */
100
87
  static API = {
101
- /**
102
- * Helper: Gets filtered pods based on criteria.
103
- * @method _getFilteredPods
104
- * @memberof UnderpostDB
105
- * @param {Object} criteria - Filter criteria.
106
- * @param {string} [criteria.podNames] - Comma-separated pod name patterns.
107
- * @param {string} [criteria.namespace='default'] - Kubernetes namespace.
108
- * @param {string} [criteria.deployId] - Deployment ID pattern.
109
- * @return {Array<PodInfo>} Filtered pod list.
110
- */
111
- _getFilteredPods(criteria = {}) {
112
- const { podNames, namespace = 'default', deployId } = criteria;
113
-
114
- try {
115
- // Get all pods using Underpost.deploy.get
116
- let pods = Underpost.deploy.get(deployId || '', 'pods', namespace);
117
-
118
- // Filter by pod names if specified
119
- if (podNames) {
120
- const patterns = podNames.split(',').map((p) => p.trim());
121
- pods = pods.filter((pod) => {
122
- return patterns.some((pattern) => {
123
- // Support wildcards
124
- const regex = new RegExp('^' + pattern.replace(/\*/g, '.*') + '$');
125
- return regex.test(pod.NAME);
126
- });
127
- });
128
- }
129
-
130
- logger.info(`Found ${pods.length} pod(s) matching criteria`, { criteria, podNames: pods.map((p) => p.NAME) });
131
- return pods;
132
- } catch (error) {
133
- logger.error('Error filtering pods', { error: error.message, criteria });
134
- return [];
135
- }
136
- },
137
-
138
- /**
139
- * Helper: Executes kubectl command with error handling.
140
- * @method _executeKubectl
141
- * @memberof UnderpostDB
142
- * @param {string} command - kubectl command to execute.
143
- * @param {Object} [options={}] - Execution options.
144
- * @param {string} [options.context=''] - Command context for logging.
145
- * @return {string|null} Command output or null on error.
146
- */
147
- _executeKubectl(command, options = {}) {
148
- const { context = '' } = options;
149
-
150
- try {
151
- logger.info(`Executing kubectl command`, { command: sanitizeCommand(command), context });
152
- return shellExec(command, { stdout: true, disableLog: true });
153
- } catch (error) {
154
- logger.error(`kubectl command failed`, { command: sanitizeCommand(command), error: error.message, context });
155
- throw error;
156
- }
157
- },
158
-
159
- /**
160
- * Helper: Copies file to pod.
161
- * @method _copyToPod
162
- * @memberof UnderpostDB
163
- * @param {Object} params - Copy parameters.
164
- * @param {string} params.sourcePath - Source file path.
165
- * @param {string} params.podName - Target pod name.
166
- * @param {string} params.namespace - Pod namespace.
167
- * @param {string} params.destPath - Destination path in pod.
168
- * @return {boolean} Success status.
169
- */
170
- _copyToPod({ sourcePath, podName, namespace, destPath }) {
171
- try {
172
- const command = `sudo kubectl cp ${sourcePath} ${namespace}/${podName}:${destPath}`;
173
- Underpost.db._executeKubectl(command, { context: `copy to pod ${podName}` });
174
- return true;
175
- } catch (error) {
176
- logger.error('Failed to copy file to pod', { sourcePath, podName, destPath, error: error.message });
177
- return false;
178
- }
179
- },
180
-
181
- /**
182
- * Helper: Copies file from pod.
183
- * @method _copyFromPod
184
- * @memberof UnderpostDB
185
- * @param {Object} params - Copy parameters.
186
- * @param {string} params.podName - Source pod name.
187
- * @param {string} params.namespace - Pod namespace.
188
- * @param {string} params.sourcePath - Source path in pod.
189
- * @param {string} params.destPath - Destination file path.
190
- * @return {boolean} Success status.
191
- */
192
- _copyFromPod({ podName, namespace, sourcePath, destPath }) {
193
- try {
194
- const command = `sudo kubectl cp ${namespace}/${podName}:${sourcePath} ${destPath}`;
195
- Underpost.db._executeKubectl(command, { context: `copy from pod ${podName}` });
196
- return true;
197
- } catch (error) {
198
- logger.error('Failed to copy file from pod', { podName, sourcePath, destPath, error: error.message });
199
- return false;
200
- }
201
- },
202
-
203
- /**
204
- * Helper: Executes command in pod.
205
- * @method _execInPod
206
- * @memberof UnderpostDB
207
- * @param {Object} params - Execution parameters.
208
- * @param {string} params.podName - Pod name.
209
- * @param {string} params.namespace - Pod namespace.
210
- * @param {string} params.command - Command to execute.
211
- * @return {string|null} Command output or null.
212
- */
213
- _execInPod({ podName, namespace, command }) {
214
- try {
215
- const kubectlCmd = `sudo kubectl exec -n ${namespace} -i ${podName} -- sh -c "${command}"`;
216
- return Underpost.db._executeKubectl(kubectlCmd, { context: `exec in pod ${podName}` });
217
- } catch (error) {
218
- logger.error('Failed to execute command in pod', {
219
- podName,
220
- command: sanitizeCommand(command),
221
- error: error.message,
222
- });
223
- throw error;
224
- }
225
- },
226
-
227
88
  /**
228
89
  * Helper: Resolves the latest backup timestamp from an existing backup directory.
229
90
  * Scans the directory for numeric (epoch) sub-folders and returns the most recent one.
@@ -239,76 +100,6 @@ class UnderpostDB {
239
100
  return entries.sort((a, b) => parseInt(b) - parseInt(a))[0];
240
101
  },
241
102
 
242
- /**
243
- * Helper: Manages Git repository for backups.
244
- * @method _manageGitRepo
245
- * @memberof UnderpostDB
246
- * @param {Object} params - Git parameters.
247
- * @param {string} params.repoName - Repository name.
248
- * @param {string} params.operation - Operation (clone, pull, commit, push).
249
- * @param {string} [params.message=''] - Commit message.
250
- * @param {boolean} [params.forceClone=false] - Force remove and re-clone repository.
251
- * @return {boolean} Success status.
252
- */
253
- _manageGitRepo({ repoName, operation, message = '', forceClone = false }) {
254
- try {
255
- const username = process.env.GITHUB_USERNAME;
256
- if (!username) {
257
- logger.error('GITHUB_USERNAME environment variable not set');
258
- return false;
259
- }
260
-
261
- const repoPath = `../${repoName}`;
262
-
263
- switch (operation) {
264
- case 'clone':
265
- if (forceClone && fs.existsSync(repoPath)) {
266
- logger.info(`Force clone enabled, removing existing repository: ${repoName}`);
267
- fs.removeSync(repoPath);
268
- }
269
- if (!fs.existsSync(repoPath)) {
270
- shellExec(`cd .. && underpost clone ${username}/${repoName}`);
271
- logger.info(`Cloned repository: ${repoName}`);
272
- }
273
- break;
274
-
275
- case 'pull':
276
- if (fs.existsSync(repoPath)) {
277
- shellExec(`cd ${repoPath} && git checkout . && git clean -f -d`);
278
- shellExec(`cd ${repoPath} && underpost pull . ${username}/${repoName}`, {
279
- silent: true,
280
- });
281
- logger.info(`Pulled repository: ${repoName}`);
282
- }
283
- break;
284
-
285
- case 'commit':
286
- if (fs.existsSync(repoPath)) {
287
- shellExec(`cd ${repoPath} && git add .`);
288
- shellExec(`underpost cmt ${repoPath} backup '' '${message}'`);
289
- logger.info(`Committed to repository: ${repoName}`, { message });
290
- }
291
- break;
292
-
293
- case 'push':
294
- if (fs.existsSync(repoPath)) {
295
- shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName}`, { silent: true });
296
- logger.info(`Pushed repository: ${repoName}`);
297
- }
298
- break;
299
-
300
- default:
301
- logger.warn(`Unknown git operation: ${operation}`);
302
- return false;
303
- }
304
-
305
- return true;
306
- } catch (error) {
307
- logger.error(`Git operation failed`, { repoName, operation, error: error.message });
308
- return false;
309
- }
310
- },
311
-
312
103
  /**
313
104
  * Helper: Performs MariaDB import operation.
314
105
  * @method _importMariaDB
@@ -329,8 +120,20 @@ class UnderpostDB {
329
120
 
330
121
  logger.info('Importing MariaDB database', { podName, dbName });
331
122
 
123
+ // Always ensure the database exists first — required for WP even when no backup is available
124
+ Underpost.kubectl.run(
125
+ `kubectl exec -n ${namespace} -i ${podName} -- mariadb -p${password} -e 'CREATE DATABASE IF NOT EXISTS ${dbName};'`,
126
+ { context: `create database ${dbName}` },
127
+ );
128
+
129
+ // If no SQL file is available, the empty database is enough — return early
130
+ if (!sqlPath || !fs.existsSync(sqlPath)) {
131
+ logger.warn('No SQL backup file found — empty database ensured', { podName, dbName, sqlPath });
132
+ return true;
133
+ }
134
+
332
135
  // Remove existing SQL file in container
333
- Underpost.db._execInPod({
136
+ Underpost.kubectl.exec({
334
137
  podName,
335
138
  namespace,
336
139
  command: `rm -rf ${containerSqlPath}`,
@@ -338,7 +141,7 @@ class UnderpostDB {
338
141
 
339
142
  // Copy SQL file to pod
340
143
  if (
341
- !Underpost.db._copyToPod({
144
+ !Underpost.kubectl.cpTo({
342
145
  sourcePath: sqlPath,
343
146
  podName,
344
147
  namespace,
@@ -348,15 +151,9 @@ class UnderpostDB {
348
151
  return false;
349
152
  }
350
153
 
351
- // Create database if it doesn't exist
352
- Underpost.db._executeKubectl(
353
- `kubectl exec -n ${namespace} -i ${podName} -- mariadb -p${password} -e 'CREATE DATABASE IF NOT EXISTS ${dbName};'`,
354
- { context: `create database ${dbName}` },
355
- );
356
-
357
154
  // Import SQL file
358
155
  const importCmd = `mariadb -u ${user} -p${password} ${dbName} < ${containerSqlPath}`;
359
- Underpost.db._execInPod({ podName, namespace, command: importCmd });
156
+ Underpost.kubectl.exec({ podName, namespace, command: importCmd });
360
157
 
361
158
  logger.info('Successfully imported MariaDB database', { podName, dbName });
362
159
  return true;
@@ -387,7 +184,7 @@ class UnderpostDB {
387
184
  logger.info('Exporting MariaDB database', { podName, dbName });
388
185
 
389
186
  // Remove existing SQL file in container
390
- Underpost.db._execInPod({
187
+ Underpost.kubectl.exec({
391
188
  podName,
392
189
  namespace,
393
190
  command: `rm -rf ${containerSqlPath}`,
@@ -395,11 +192,11 @@ class UnderpostDB {
395
192
 
396
193
  // Dump database
397
194
  const dumpCmd = `mariadb-dump --user=${user} --password=${password} --lock-tables ${dbName} > ${containerSqlPath}`;
398
- Underpost.db._execInPod({ podName, namespace, command: dumpCmd });
195
+ Underpost.kubectl.exec({ podName, namespace, command: dumpCmd });
399
196
 
400
197
  // Copy SQL file from pod
401
198
  if (
402
- !Underpost.db._copyFromPod({
199
+ !Underpost.kubectl.cpFrom({
403
200
  podName,
404
201
  namespace,
405
202
  sourcePath: containerSqlPath,
@@ -442,8 +239,18 @@ class UnderpostDB {
442
239
 
443
240
  logger.info('Importing MongoDB database', { podName, dbName });
444
241
 
242
+ // If no BSON directory is available, skip — MongoDB creates the DB on first write
243
+ if (!bsonPath || !fs.existsSync(bsonPath)) {
244
+ logger.warn('No BSON backup directory found — database will be created on first write', {
245
+ podName,
246
+ dbName,
247
+ bsonPath,
248
+ });
249
+ return true;
250
+ }
251
+
445
252
  // Remove existing BSON directory in container
446
- Underpost.db._execInPod({
253
+ Underpost.kubectl.exec({
447
254
  podName,
448
255
  namespace,
449
256
  command: `rm -rf ${containerBsonPath}`,
@@ -451,7 +258,7 @@ class UnderpostDB {
451
258
 
452
259
  // Copy BSON directory to pod
453
260
  if (
454
- !Underpost.db._copyToPod({
261
+ !Underpost.kubectl.cpTo({
455
262
  sourcePath: bsonPath,
456
263
  podName,
457
264
  namespace,
@@ -465,7 +272,7 @@ class UnderpostDB {
465
272
  const restoreCmd = `mongorestore -d ${dbName} ${containerBsonPath}${drop ? ' --drop' : ''}${
466
273
  preserveUUID ? ' --preserveUUID' : ''
467
274
  }`;
468
- Underpost.db._execInPod({ podName, namespace, command: restoreCmd });
275
+ Underpost.kubectl.exec({ podName, namespace, command: restoreCmd });
469
276
 
470
277
  logger.info('Successfully imported MongoDB database', { podName, dbName });
471
278
  return true;
@@ -495,7 +302,7 @@ class UnderpostDB {
495
302
  logger.info('Exporting MongoDB database', { podName, dbName, collections });
496
303
 
497
304
  // Remove existing BSON directory in container
498
- Underpost.db._execInPod({
305
+ Underpost.kubectl.exec({
499
306
  podName,
500
307
  namespace,
501
308
  command: `rm -rf ${containerBsonPath}`,
@@ -506,16 +313,16 @@ class UnderpostDB {
506
313
  const collectionList = collections.split(',').map((c) => c.trim());
507
314
  for (const collection of collectionList) {
508
315
  const dumpCmd = `mongodump -d ${dbName} --collection ${collection} -o /`;
509
- Underpost.db._execInPod({ podName, namespace, command: dumpCmd });
316
+ Underpost.kubectl.exec({ podName, namespace, command: dumpCmd });
510
317
  }
511
318
  } else {
512
319
  const dumpCmd = `mongodump -d ${dbName} -o /`;
513
- Underpost.db._execInPod({ podName, namespace, command: dumpCmd });
320
+ Underpost.kubectl.exec({ podName, namespace, command: dumpCmd });
514
321
  }
515
322
 
516
323
  // Copy BSON directory from pod
517
324
  if (
518
- !Underpost.db._copyFromPod({
325
+ !Underpost.kubectl.cpFrom({
519
326
  podName,
520
327
  namespace,
521
328
  sourcePath: containerBsonPath,
@@ -743,6 +550,7 @@ class UnderpostDB {
743
550
  * @param {boolean} [options.k3s=false] - k3s cluster flag.
744
551
  * @param {boolean} [options.kubeadm=false] - kubeadm cluster flag.
745
552
  * @param {boolean} [options.kind=false] - kind cluster flag.
553
+ * @param {boolean} [options.repoBackup=false] - Backs up repositories (git commit+push) inside deployment pods via kubectl exec.
746
554
  * @return {Promise<void>} Resolves when operation is complete.
747
555
  */
748
556
  async callback(
@@ -771,346 +579,381 @@ class UnderpostDB {
771
579
  k3s: false,
772
580
  kubeadm: false,
773
581
  kind: false,
582
+ repoBackup: false,
774
583
  },
775
584
  ) {
776
- loadCronDeployEnv();
777
- const newBackupTimestamp = new Date().getTime();
778
- const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
779
-
780
- if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
781
-
782
- // Handle clean-fs-collection operation
783
- if (options.cleanFsCollection || options.cleanFsDryRun) {
784
- logger.info('Starting File collection cleanup operation', { deployList });
785
- await Underpost.db.cleanFsCollection(deployList, {
786
- hosts: options.hosts,
787
- paths: options.paths,
788
- dryRun: options.cleanFsDryRun,
789
- });
790
- return;
791
- }
585
+ // Ensure engine-private is available (clone ephemerally if inside a deployment
586
+ // container where globalSecretClean has already removed it).
587
+ const firstDeployId = deployList !== 'dd' ? deployList.split(',')[0].trim() : '';
588
+ const { ephemeral } = Underpost.repo.privateEngineRepoFactory(firstDeployId || undefined);
589
+ try {
590
+ loadCronDeployEnv();
591
+ const newBackupTimestamp = new Date().getTime();
592
+ const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
593
+
594
+ if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
595
+
596
+ // Handle repository backup (git commit+push inside deployment pod)
597
+ if (options.repoBackup) {
598
+ const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
599
+ for (const _deployId of deployList.split(',')) {
600
+ const deployId = _deployId.trim();
601
+ if (!deployId) continue;
602
+ logger.info('Starting pod repository backup', { deployId, namespace });
603
+ Underpost.repo.backupPodRepositories({
604
+ deployId,
605
+ namespace,
606
+ env: options.dev ? 'development' : 'production',
607
+ });
608
+ }
609
+ return;
610
+ }
792
611
 
793
- logger.info('Starting database operation', {
794
- deployList,
795
- namespace,
796
- import: options.import,
797
- export: options.export,
798
- });
612
+ // Handle clean-fs-collection operation
613
+ if (options.cleanFsCollection || options.cleanFsDryRun) {
614
+ logger.info('Starting File collection cleanup operation', { deployList });
615
+ await Underpost.db.cleanFsCollection(deployList, {
616
+ hosts: options.hosts,
617
+ paths: options.paths,
618
+ dryRun: options.cleanFsDryRun,
619
+ });
620
+ return;
621
+ }
799
622
 
800
- if (options.primaryPodEnsure) {
801
- const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: options.primaryPodEnsure });
802
- if (!primaryPodName) {
803
- const baseCommand = options.dev ? 'node bin' : 'underpost';
804
- const baseClusterCommand = options.dev ? ' --dev' : '';
805
- let clusterFlag = options.k3s ? ' --k3s' : options.kubeadm ? ' --kubeadm' : '';
806
- shellExec(`${baseCommand} cluster${baseClusterCommand}${clusterFlag} --mongodb`);
623
+ logger.info('Starting database operation', {
624
+ deployList,
625
+ namespace,
626
+ import: options.import,
627
+ export: options.export,
628
+ });
629
+
630
+ if (options.primaryPodEnsure) {
631
+ const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: options.primaryPodEnsure });
632
+ if (!primaryPodName) {
633
+ const baseCommand = options.dev ? 'node bin' : 'underpost';
634
+ const baseClusterCommand = options.dev ? ' --dev' : '';
635
+ let clusterFlag = options.k3s ? ' --k3s' : options.kubeadm ? ' --kubeadm' : '';
636
+ shellExec(`${baseCommand} cluster${baseClusterCommand}${clusterFlag} --mongodb`);
637
+ }
638
+ return;
807
639
  }
808
- return;
809
- }
810
640
 
811
- // Track processed repositories to avoid duplicate Git operations
812
- const processedRepos = new Set();
813
- // Track processed host+path combinations to avoid duplicates
814
- const processedHostPaths = new Set();
641
+ // Track processed repositories to avoid duplicate Git operations
642
+ const processedRepos = new Set();
643
+ // Track processed host+path combinations to avoid duplicates
644
+ const processedHostPaths = new Set();
815
645
 
816
- for (const _deployId of deployList.split(',')) {
817
- const deployId = _deployId.trim();
818
- if (!deployId) continue;
646
+ for (const _deployId of deployList.split(',')) {
647
+ const deployId = _deployId.trim();
648
+ if (!deployId) continue;
819
649
 
820
- logger.info('Processing deployment', { deployId });
650
+ logger.info('Processing deployment', { deployId });
821
651
 
822
- /** @type {Object.<string, Object.<string, DatabaseConfig>>} */
823
- const dbs = {};
824
- const repoName = `engine-${deployId.includes('dd-') ? deployId.split('dd-')[1] : deployId}-cron-backups`;
652
+ /** @type {Object.<string, Object.<string, DatabaseConfig>>} */
653
+ const dbs = {};
654
+ const repoName = `engine-${deployId.includes('dd-') ? deployId.split('dd-')[1] : deployId}-cron-backups`;
825
655
 
826
- // Load server configuration
827
- const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
828
- if (!fs.existsSync(confServerPath)) {
829
- logger.error('Configuration file not found', { path: confServerPath });
830
- continue;
831
- }
656
+ // Load server configuration
657
+ const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
658
+ if (!fs.existsSync(confServerPath)) {
659
+ logger.error('Configuration file not found', { path: confServerPath });
660
+ continue;
661
+ }
832
662
 
833
- const confServer = loadConfServerJson(confServerPath, { resolve: true });
834
-
835
- // Build database configuration map
836
- for (const host of Object.keys(confServer)) {
837
- for (const path of Object.keys(confServer[host])) {
838
- const { db } = confServer[host][path];
839
- if (db) {
840
- const { provider, name, user, password } = db;
841
- if (!dbs[provider]) dbs[provider] = {};
842
-
843
- if (!(name in dbs[provider])) {
844
- dbs[provider][name] = {
845
- user,
846
- password,
847
- hostFolder: host + path.replaceAll('/', '-'),
848
- host,
849
- path,
850
- };
663
+ const confServer = loadConfServerJson(confServerPath, { resolve: true });
664
+
665
+ // Build database configuration map
666
+ for (const host of Object.keys(confServer)) {
667
+ for (const path of Object.keys(confServer[host])) {
668
+ const { db } = confServer[host][path];
669
+ if (db) {
670
+ const { provider, name, user, password } = db;
671
+ if (!dbs[provider]) dbs[provider] = {};
672
+
673
+ if (!(name in dbs[provider])) {
674
+ dbs[provider][name] = {
675
+ user,
676
+ password,
677
+ hostFolder: host + path.replaceAll('/', '-'),
678
+ host,
679
+ path,
680
+ };
681
+ }
851
682
  }
852
683
  }
853
684
  }
854
- }
855
-
856
- // Handle Git operations - execute only once per repository
857
- if (!processedRepos.has(repoName)) {
858
- logger.info('Processing Git operations for repository', { repoName, deployId });
859
- if (options.git === true) {
860
- Underpost.db._manageGitRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
861
- Underpost.db._manageGitRepo({ repoName, operation: 'pull' });
862
- }
863
685
 
864
- if (options.macroRollbackExport) {
865
- // Only clone if not already done by git option above
866
- if (options.git !== true) {
867
- Underpost.db._manageGitRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
868
- Underpost.db._manageGitRepo({ repoName, operation: 'pull' });
686
+ // Handle Git operations - execute only once per repository
687
+ if (!processedRepos.has(repoName)) {
688
+ logger.info('Processing Git operations for repository', { repoName, deployId });
689
+ if (options.git === true) {
690
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
691
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'pull' });
869
692
  }
870
693
 
871
- const nCommits = parseInt(options.macroRollbackExport);
872
- const repoPath = `../${repoName}`;
873
- const username = process.env.GITHUB_USERNAME;
874
-
875
- if (fs.existsSync(repoPath) && username) {
876
- logger.info('Executing macro rollback export', { repoName, nCommits });
877
- shellExec(`cd ${repoPath} && underpost cmt . reset ${nCommits}`);
878
- shellExec(`cd ${repoPath} && git reset`);
879
- shellExec(`cd ${repoPath} && git checkout .`);
880
- shellExec(`cd ${repoPath} && git clean -f -d`);
881
- shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName} -f`);
882
- } else {
883
- if (!username) logger.error('GITHUB_USERNAME environment variable not set');
884
- logger.warn('Repository not found for macro rollback', { repoPath });
885
- }
886
- }
694
+ if (options.macroRollbackExport) {
695
+ // Only clone if not already done by git option above
696
+ if (options.git !== true) {
697
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
698
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'pull' });
699
+ }
887
700
 
888
- processedRepos.add(repoName);
889
- logger.info('Repository marked as processed', { repoName });
890
- } else {
891
- logger.info('Skipping Git operations for already processed repository', { repoName, deployId });
892
- }
701
+ const nCommits = parseInt(options.macroRollbackExport);
702
+ const repoPath = `../${repoName}`;
703
+ const username = process.env.GITHUB_USERNAME;
704
+
705
+ if (fs.existsSync(repoPath) && username) {
706
+ logger.info('Executing macro rollback export', { repoName, nCommits });
707
+ shellExec(`cd ${repoPath} && underpost cmt . reset ${nCommits}`);
708
+ shellExec(`cd ${repoPath} && git reset`);
709
+ shellExec(`cd ${repoPath} && git checkout .`);
710
+ shellExec(`cd ${repoPath} && git clean -f -d`);
711
+ shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName} -f`);
712
+ } else {
713
+ if (!username) logger.error('GITHUB_USERNAME environment variable not set');
714
+ logger.warn('Repository not found for macro rollback', { repoPath });
715
+ }
716
+ }
893
717
 
894
- // Process each database provider
895
- for (const provider of Object.keys(dbs)) {
896
- for (const dbName of Object.keys(dbs[provider])) {
897
- const { hostFolder, user, password, host, path } = dbs[provider][dbName];
718
+ processedRepos.add(repoName);
719
+ logger.info('Repository marked as processed', { repoName });
720
+ } else {
721
+ logger.info('Skipping Git operations for already processed repository', { repoName, deployId });
722
+ }
898
723
 
899
- // Create unique identifier for host+path combination
900
- const hostPathKey = `${deployId}:${host}:${path}`;
724
+ // Process each database provider
725
+ for (const provider of Object.keys(dbs)) {
726
+ for (const dbName of Object.keys(dbs[provider])) {
727
+ const { hostFolder, user, password, host, path } = dbs[provider][dbName];
901
728
 
902
- // Skip if this host+path combination was already processed
903
- if (processedHostPaths.has(hostPathKey)) {
904
- logger.info('Skipping already processed host/path', { dbName, host, path, deployId });
905
- continue;
906
- }
729
+ // Create unique identifier for host+path combination
730
+ const hostPathKey = `${deployId}:${host}:${path}`;
907
731
 
908
- // Filter by hosts and paths if specified
909
- if (
910
- (options.hosts &&
911
- !options.hosts
912
- .split(',')
913
- .map((h) => h.trim())
914
- .includes(host)) ||
915
- (options.paths &&
916
- !options.paths
917
- .split(',')
918
- .map((p) => p.trim())
919
- .includes(path))
920
- ) {
921
- logger.info('Skipping database due to host/path filter', { dbName, host, path });
922
- continue;
923
- }
732
+ // Skip if this host+path combination was already processed
733
+ if (processedHostPaths.has(hostPathKey)) {
734
+ logger.info('Skipping already processed host/path', { dbName, host, path, deployId });
735
+ continue;
736
+ }
924
737
 
925
- if (!hostFolder) {
926
- logger.warn('No hostFolder defined for database', { dbName, provider });
927
- continue;
928
- }
738
+ // Filter by hosts and paths if specified
739
+ if (
740
+ (options.hosts &&
741
+ !options.hosts
742
+ .split(',')
743
+ .map((h) => h.trim())
744
+ .includes(host)) ||
745
+ (options.paths &&
746
+ !options.paths
747
+ .split(',')
748
+ .map((p) => p.trim())
749
+ .includes(path))
750
+ ) {
751
+ logger.info('Skipping database due to host/path filter', { dbName, host, path });
752
+ continue;
753
+ }
929
754
 
930
- logger.info('Processing database', { hostFolder, provider, dbName, deployId });
755
+ if (!hostFolder) {
756
+ logger.warn('No hostFolder defined for database', { dbName, provider });
757
+ continue;
758
+ }
931
759
 
932
- const latestBackupTimestamp = Underpost.db._getLatestBackupTimestamp(`../${repoName}/${hostFolder}`);
760
+ logger.info('Processing database', { hostFolder, provider, dbName, deployId });
933
761
 
934
- dbs[provider][dbName].currentBackupTimestamp = latestBackupTimestamp;
762
+ const latestBackupTimestamp = Underpost.db._getLatestBackupTimestamp(`../${repoName}/${hostFolder}`);
935
763
 
936
- const currentTimestamp = latestBackupTimestamp || newBackupTimestamp;
937
- const sqlContainerPath = `/home/${dbName}.sql`;
938
- const fromPartsPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}-parths.json`;
939
- const toSqlPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}.sql`;
940
- const toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
941
- const toBsonPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}`;
942
- const toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
764
+ dbs[provider][dbName].currentBackupTimestamp = latestBackupTimestamp;
943
765
 
944
- // Merge split SQL files if needed for import
945
- if (options.import === true && fs.existsSync(fromPartsPath) && !fs.existsSync(toSqlPath)) {
946
- const names = JSON.parse(fs.readFileSync(fromPartsPath, 'utf8')).map((_path) => {
947
- return `../${repoName}/${hostFolder}/${currentTimestamp}/${_path.split('/').pop()}`;
948
- });
949
- logger.info('Merging backup parts', { fromPartsPath, toSqlPath, parts: names.length });
950
- await mergeFile(names, toSqlPath);
951
- }
766
+ const currentTimestamp = latestBackupTimestamp || newBackupTimestamp;
767
+ const sqlContainerPath = `/home/${dbName}.sql`;
768
+ const fromPartsPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}-parths.json`;
769
+ const toSqlPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}.sql`;
770
+ const toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
771
+ const toBsonPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}`;
772
+ const toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
952
773
 
953
- // Get target pods based on provider and options
954
- let targetPods = [];
955
- const podCriteria = {
956
- podNames: options.podName,
957
- namespace,
958
- deployId: provider === 'mariadb' ? 'mariadb' : 'mongo',
959
- };
774
+ // Merge split SQL files if needed for import
775
+ if (options.import === true && fs.existsSync(fromPartsPath) && !fs.existsSync(toSqlPath)) {
776
+ const names = JSON.parse(fs.readFileSync(fromPartsPath, 'utf8')).map((_path) => {
777
+ return `../${repoName}/${hostFolder}/${currentTimestamp}/${_path.split('/').pop()}`;
778
+ });
779
+ logger.info('Merging backup parts', { fromPartsPath, toSqlPath, parts: names.length });
780
+ await mergeFile(names, toSqlPath);
781
+ }
960
782
 
961
- targetPods = Underpost.db._getFilteredPods(podCriteria);
783
+ // Get target pods based on provider and options
784
+ let targetPods = [];
785
+ const podCriteria = {
786
+ podNames: options.podName,
787
+ namespace,
788
+ deployId: provider === 'mariadb' ? 'mariadb' : 'mongo',
789
+ };
962
790
 
963
- // Fallback to default if no custom pods specified
964
- if (targetPods.length === 0 && !options.podName) {
965
- const defaultPods = Underpost.deploy.get(provider === 'mariadb' ? 'mariadb' : 'mongo', 'pods', namespace);
966
- console.log('defaultPods', defaultPods);
967
- targetPods = defaultPods;
968
- }
791
+ targetPods = Underpost.kubectl.getFilteredPods(podCriteria);
792
+
793
+ // Fallback to default if no custom pods specified
794
+ if (targetPods.length === 0 && !options.podName) {
795
+ const defaultPods = Underpost.kubectl.get(
796
+ provider === 'mariadb' ? 'mariadb' : 'mongo',
797
+ 'pods',
798
+ namespace,
799
+ );
800
+ console.log('defaultPods', defaultPods);
801
+ targetPods = defaultPods;
802
+ }
969
803
 
970
- if (targetPods.length === 0) {
971
- logger.warn('No pods found matching criteria', { provider, criteria: podCriteria });
972
- continue;
973
- }
804
+ if (targetPods.length === 0) {
805
+ logger.warn('No pods found matching criteria', { provider, criteria: podCriteria });
806
+ continue;
807
+ }
974
808
 
975
- // Handle primary pod detection for MongoDB
976
- let podsToProcess = [];
977
- if (provider === 'mongoose' && !options.allPods) {
978
- // For MongoDB, always use primary pod unless allPods is true
979
- if (!targetPods || targetPods.length === 0) {
980
- logger.warn('No MongoDB pods available to check for primary');
981
- podsToProcess = [];
982
- } else {
983
- const firstPod = targetPods[0].NAME;
984
- const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: firstPod });
985
-
986
- if (primaryPodName) {
987
- const primaryPod = targetPods.find((p) => p.NAME === primaryPodName);
988
- if (primaryPod) {
989
- podsToProcess = [primaryPod];
990
- logger.info('Using MongoDB primary pod', { primaryPod: primaryPodName });
809
+ // Handle primary pod detection for MongoDB
810
+ let podsToProcess = [];
811
+ if (provider === 'mongoose' && !options.allPods) {
812
+ // For MongoDB, always use primary pod unless allPods is true
813
+ if (!targetPods || targetPods.length === 0) {
814
+ logger.warn('No MongoDB pods available to check for primary');
815
+ podsToProcess = [];
816
+ } else {
817
+ const firstPod = targetPods[0].NAME;
818
+ const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: firstPod });
819
+
820
+ if (primaryPodName) {
821
+ const primaryPod = targetPods.find((p) => p.NAME === primaryPodName);
822
+ if (primaryPod) {
823
+ podsToProcess = [primaryPod];
824
+ logger.info('Using MongoDB primary pod', { primaryPod: primaryPodName });
825
+ } else {
826
+ logger.warn('Primary pod not in filtered list, using first pod', { primaryPodName });
827
+ podsToProcess = [targetPods[0]];
828
+ }
991
829
  } else {
992
- logger.warn('Primary pod not in filtered list, using first pod', { primaryPodName });
830
+ logger.warn('Could not detect primary pod, using first pod');
993
831
  podsToProcess = [targetPods[0]];
994
832
  }
995
- } else {
996
- logger.warn('Could not detect primary pod, using first pod');
997
- podsToProcess = [targetPods[0]];
998
833
  }
834
+ } else {
835
+ // For MariaDB or when allPods is true, limit to first pod unless allPods is true
836
+ podsToProcess = options.allPods === true ? targetPods : [targetPods[0]];
999
837
  }
1000
- } else {
1001
- // For MariaDB or when allPods is true, limit to first pod unless allPods is true
1002
- podsToProcess = options.allPods === true ? targetPods : [targetPods[0]];
1003
- }
1004
838
 
1005
- logger.info(`Processing ${podsToProcess.length} pod(s) for ${provider}`, {
1006
- dbName,
1007
- pods: podsToProcess.map((p) => p.NAME),
1008
- });
839
+ logger.info(`Processing ${podsToProcess.length} pod(s) for ${provider}`, {
840
+ dbName,
841
+ pods: podsToProcess.map((p) => p.NAME),
842
+ });
1009
843
 
1010
- // Process each pod
1011
- for (const pod of podsToProcess) {
1012
- logger.info('Processing pod', { podName: pod.NAME, node: pod.NODE, status: pod.STATUS });
1013
-
1014
- switch (provider) {
1015
- case 'mariadb': {
1016
- if (options.stats === true) {
1017
- const stats = Underpost.db._getMariaDBStats({
1018
- podName: pod.NAME,
1019
- namespace,
1020
- dbName,
1021
- user,
1022
- password,
1023
- });
1024
- if (stats) {
1025
- Underpost.db._displayStats({ provider, dbName, stats });
844
+ // Process each pod
845
+ for (const pod of podsToProcess) {
846
+ logger.info('Processing pod', { podName: pod.NAME, node: pod.NODE, status: pod.STATUS });
847
+
848
+ switch (provider) {
849
+ case 'mariadb': {
850
+ if (options.stats === true) {
851
+ const stats = Underpost.db._getMariaDBStats({
852
+ podName: pod.NAME,
853
+ namespace,
854
+ dbName,
855
+ user,
856
+ password,
857
+ });
858
+ if (stats) {
859
+ Underpost.db._displayStats({ provider, dbName, stats });
860
+ }
1026
861
  }
1027
- }
1028
862
 
1029
- if (options.import === true) {
1030
- Underpost.db._importMariaDB({
1031
- pod,
1032
- namespace,
1033
- dbName,
1034
- user,
1035
- password,
1036
- sqlPath: toSqlPath,
1037
- });
1038
- }
863
+ if (options.import === true) {
864
+ Underpost.db._importMariaDB({
865
+ pod,
866
+ namespace,
867
+ dbName,
868
+ user,
869
+ password,
870
+ sqlPath: toSqlPath,
871
+ });
872
+ }
1039
873
 
1040
- if (options.export === true) {
1041
- const outputPath = options.outPath || toNewSqlPath;
1042
- await Underpost.db._exportMariaDB({
1043
- pod,
1044
- namespace,
1045
- dbName,
1046
- user,
1047
- password,
1048
- outputPath,
1049
- });
874
+ if (options.export === true) {
875
+ const outputPath = options.outPath || toNewSqlPath;
876
+ await Underpost.db._exportMariaDB({
877
+ pod,
878
+ namespace,
879
+ dbName,
880
+ user,
881
+ password,
882
+ outputPath,
883
+ });
884
+ }
885
+ break;
1050
886
  }
1051
- break;
1052
- }
1053
887
 
1054
- case 'mongoose': {
1055
- if (options.stats === true) {
1056
- const stats = Underpost.db._getMongoStats({
1057
- podName: pod.NAME,
1058
- namespace,
1059
- dbName,
1060
- });
1061
- if (stats) {
1062
- Underpost.db._displayStats({ provider, dbName, stats });
888
+ case 'mongoose': {
889
+ if (options.stats === true) {
890
+ const stats = Underpost.db._getMongoStats({
891
+ podName: pod.NAME,
892
+ namespace,
893
+ dbName,
894
+ });
895
+ if (stats) {
896
+ Underpost.db._displayStats({ provider, dbName, stats });
897
+ }
1063
898
  }
1064
- }
1065
899
 
1066
- if (options.import === true) {
1067
- const bsonPath = options.outPath || toBsonPath;
1068
- Underpost.db._importMongoDB({
1069
- pod,
1070
- namespace,
1071
- dbName,
1072
- bsonPath,
1073
- drop: options.drop,
1074
- preserveUUID: options.preserveUUID,
1075
- });
1076
- }
900
+ if (options.import === true) {
901
+ const bsonPath = options.outPath || toBsonPath;
902
+ Underpost.db._importMongoDB({
903
+ pod,
904
+ namespace,
905
+ dbName,
906
+ bsonPath,
907
+ drop: options.drop,
908
+ preserveUUID: options.preserveUUID,
909
+ });
910
+ }
1077
911
 
1078
- if (options.export === true) {
1079
- const outputPath = options.outPath || toNewBsonPath;
1080
- Underpost.db._exportMongoDB({
1081
- pod,
1082
- namespace,
1083
- dbName,
1084
- outputPath,
1085
- collections: options.collections,
1086
- });
912
+ if (options.export === true) {
913
+ const outputPath = options.outPath || toNewBsonPath;
914
+ Underpost.db._exportMongoDB({
915
+ pod,
916
+ namespace,
917
+ dbName,
918
+ outputPath,
919
+ collections: options.collections,
920
+ });
921
+ }
922
+ break;
1087
923
  }
1088
- break;
1089
- }
1090
924
 
1091
- default:
1092
- logger.warn('Unsupported database provider', { provider });
1093
- break;
925
+ default:
926
+ logger.warn('Unsupported database provider', { provider });
927
+ break;
928
+ }
1094
929
  }
930
+
931
+ // Mark this host+path combination as processed
932
+ processedHostPaths.add(hostPathKey);
1095
933
  }
934
+ }
1096
935
 
1097
- // Mark this host+path combination as processed
1098
- processedHostPaths.add(hostPathKey);
936
+ // Commit and push to Git if enabled - execute only once per repository
937
+ if (options.export === true && options.git === true && !processedRepos.has(`${repoName}-committed`)) {
938
+ const commitMessage = `${new Date(newBackupTimestamp).toLocaleDateString()} ${new Date(
939
+ newBackupTimestamp,
940
+ ).toLocaleTimeString()}`;
941
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'commit', message: commitMessage });
942
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'push' });
943
+ processedRepos.add(`${repoName}-committed`);
1099
944
  }
1100
945
  }
1101
946
 
1102
- // Commit and push to Git if enabled - execute only once per repository
1103
- if (options.export === true && options.git === true && !processedRepos.has(`${repoName}-committed`)) {
1104
- const commitMessage = `${new Date(newBackupTimestamp).toLocaleDateString()} ${new Date(
1105
- newBackupTimestamp,
1106
- ).toLocaleTimeString()}`;
1107
- Underpost.db._manageGitRepo({ repoName, operation: 'commit', message: commitMessage });
1108
- Underpost.db._manageGitRepo({ repoName, operation: 'push' });
1109
- processedRepos.add(`${repoName}-committed`);
947
+ logger.info('Database operation completed successfully');
948
+ } catch (error) {
949
+ logger.error('Database operation failed', { error: error.message });
950
+ throw error;
951
+ } finally {
952
+ if (ephemeral && isInsideContainer()) {
953
+ Underpost.repo.cleanupPrivateEngineRepo();
954
+ Underpost.env.clean();
1110
955
  }
1111
956
  }
1112
-
1113
- logger.info('Database operation completed successfully');
1114
957
  },
1115
958
 
1116
959
  /**
@@ -1130,160 +973,185 @@ class UnderpostDB {
1130
973
  host = process.env.DEFAULT_DEPLOY_HOST,
1131
974
  path = process.env.DEFAULT_DEPLOY_PATH,
1132
975
  ) {
1133
- loadCronDeployEnv();
1134
- deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
1135
- host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
1136
- path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
976
+ const { ephemeral } = Underpost.repo.privateEngineRepoFactory(deployId || undefined);
977
+ try {
978
+ loadCronDeployEnv();
979
+ deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
980
+ host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
981
+ path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
1137
982
 
1138
- logger.info('Creating cluster metadata', { deployId, host, path });
983
+ logger.info('Creating cluster metadata', { deployId, host, path });
1139
984
 
1140
- const env = 'production';
1141
- const deployListPath = './engine-private/deploy/dd.router';
985
+ const env = 'production';
986
+ const deployListPath = './engine-private/deploy/dd.router';
1142
987
 
1143
- if (!fs.existsSync(deployListPath)) {
1144
- logger.error('Deploy router file not found', { path: deployListPath });
1145
- throw new Error(`Deploy router file not found: ${deployListPath}`);
1146
- }
988
+ if (!fs.existsSync(deployListPath)) {
989
+ logger.error('Deploy router file not found', { path: deployListPath });
990
+ throw new Error(`Deploy router file not found: ${deployListPath}`);
991
+ }
1147
992
 
1148
- const deployList = fs.readFileSync(deployListPath, 'utf8').split(',');
993
+ const deployList = fs.readFileSync(deployListPath, 'utf8').split(',');
1149
994
 
1150
- const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1151
- if (!fs.existsSync(confServerPath)) {
1152
- logger.error('Server configuration not found', { path: confServerPath });
1153
- throw new Error(`Server configuration not found: ${confServerPath}`);
1154
- }
995
+ const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
996
+ if (!fs.existsSync(confServerPath)) {
997
+ logger.error('Server configuration not found', { path: confServerPath });
998
+ throw new Error(`Server configuration not found: ${confServerPath}`);
999
+ }
1155
1000
 
1156
- const { db } = loadConfServerJson(confServerPath, { resolve: true })[host][path];
1001
+ const { db } = loadConfServerJson(confServerPath, { resolve: true })[host][path];
1157
1002
 
1158
- try {
1159
- await DataBaseProvider.load({ apis: ['instance', 'cron'], host, path, db });
1003
+ const maxRetries = 5;
1004
+ const retryDelay = 3000;
1005
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
1006
+ try {
1007
+ await DataBaseProvider.load({ apis: ['instance', 'cron'], host, path, db });
1008
+ break;
1009
+ } catch (err) {
1010
+ if (attempt === maxRetries) {
1011
+ logger.error('Failed to connect to database after retries', { attempts: maxRetries, error: err.message });
1012
+ throw err;
1013
+ }
1014
+ logger.warn('Database connection failed, retrying...', { attempt, maxRetries, error: err.message });
1015
+ await timer(retryDelay);
1016
+ }
1017
+ }
1160
1018
 
1161
- /** @type {import('../api/instance/instance.model.js').InstanceModel} */
1162
- const Instance = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Instance;
1019
+ try {
1020
+ /** @type {import('../api/instance/instance.model.js').InstanceModel} */
1021
+ const Instance = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Instance;
1163
1022
 
1164
- await Instance.deleteMany();
1165
- logger.info('Cleared existing instance metadata');
1023
+ await Instance.deleteMany();
1024
+ logger.info('Cleared existing instance metadata');
1166
1025
 
1167
- for (const _deployId of deployList) {
1168
- const deployId = _deployId.trim();
1169
- if (!deployId) continue;
1026
+ for (const _deployId of deployList) {
1027
+ const deployId = _deployId.trim();
1028
+ if (!deployId) continue;
1170
1029
 
1171
- logger.info('Processing deployment for metadata', { deployId });
1030
+ logger.info('Processing deployment for metadata', { deployId });
1172
1031
 
1173
- const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1174
- if (!fs.existsSync(confServerPath)) {
1175
- logger.warn('Configuration not found for deployment', { deployId, path: confServerPath });
1176
- continue;
1177
- }
1032
+ const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1033
+ if (!fs.existsSync(confServerPath)) {
1034
+ logger.warn('Configuration not found for deployment', { deployId, path: confServerPath });
1035
+ continue;
1036
+ }
1178
1037
 
1179
- const confServer = loadReplicas(deployId, loadConfServerJson(confServerPath, { resolve: true }));
1180
- const router = await Underpost.deploy.routerFactory(deployId, env);
1181
- const pathPortAssignmentData = await pathPortAssignmentFactory(deployId, router, confServer);
1038
+ const confServer = loadReplicas(deployId, loadConfServerJson(confServerPath, { resolve: true }));
1039
+ const router = await Underpost.deploy.routerFactory(deployId, env);
1040
+ const pathPortAssignmentData = await pathPortAssignmentFactory(deployId, router, confServer);
1182
1041
 
1183
- for (const host of Object.keys(confServer)) {
1184
- for (const { path, port } of pathPortAssignmentData[host]) {
1185
- if (!confServer[host][path]) continue;
1042
+ for (const host of Object.keys(confServer)) {
1043
+ for (const { path, port } of pathPortAssignmentData[host]) {
1044
+ if (!confServer[host][path]) continue;
1186
1045
 
1187
- const { client, runtime, apis, peer } = confServer[host][path];
1046
+ const { client, runtime, apis, peer } = confServer[host][path];
1188
1047
 
1189
- // Save main instance
1190
- {
1191
- const body = {
1192
- deployId,
1193
- host,
1194
- path,
1195
- port,
1196
- client,
1197
- runtime,
1198
- apis,
1199
- };
1048
+ // Save main instance
1049
+ {
1050
+ const body = {
1051
+ deployId,
1052
+ host,
1053
+ path,
1054
+ port,
1055
+ client,
1056
+ runtime,
1057
+ apis,
1058
+ };
1059
+
1060
+ logger.info('Saving instance metadata', body);
1061
+ await new Instance(body).save();
1062
+ }
1200
1063
 
1201
- logger.info('Saving instance metadata', body);
1202
- await new Instance(body).save();
1064
+ // Save peer instance if exists
1065
+ if (peer) {
1066
+ const body = {
1067
+ deployId,
1068
+ host,
1069
+ path: path === '/' ? '/peer' : `${path}/peer`,
1070
+ port: port + 1,
1071
+ runtime: 'nodejs',
1072
+ };
1073
+
1074
+ logger.info('Saving peer instance metadata', body);
1075
+ await new Instance(body).save();
1076
+ }
1203
1077
  }
1078
+ }
1204
1079
 
1205
- // Save peer instance if exists
1206
- if (peer) {
1080
+ // Process additional instances
1081
+ const confInstancesPath = `./engine-private/conf/${deployId}/conf.instances.json`;
1082
+ if (fs.existsSync(confInstancesPath)) {
1083
+ const confInstances = JSON.parse(fs.readFileSync(confInstancesPath, 'utf8'));
1084
+ for (const instance of confInstances) {
1085
+ const { id, host, path, fromPort, metadata } = instance;
1086
+ const { runtime } = metadata;
1207
1087
  const body = {
1208
1088
  deployId,
1209
1089
  host,
1210
- path: path === '/' ? '/peer' : `${path}/peer`,
1211
- port: port + 1,
1212
- runtime: 'nodejs',
1090
+ path,
1091
+ port: fromPort,
1092
+ client: id,
1093
+ runtime,
1213
1094
  };
1214
-
1215
- logger.info('Saving peer instance metadata', body);
1095
+ logger.info('Saving additional instance metadata', body);
1216
1096
  await new Instance(body).save();
1217
1097
  }
1218
1098
  }
1219
1099
  }
1220
-
1221
- // Process additional instances
1222
- const confInstancesPath = `./engine-private/conf/${deployId}/conf.instances.json`;
1223
- if (fs.existsSync(confInstancesPath)) {
1224
- const confInstances = JSON.parse(fs.readFileSync(confInstancesPath, 'utf8'));
1225
- for (const instance of confInstances) {
1226
- const { id, host, path, fromPort, metadata } = instance;
1227
- const { runtime } = metadata;
1228
- const body = {
1229
- deployId,
1230
- host,
1231
- path,
1232
- port: fromPort,
1233
- client: id,
1234
- runtime,
1235
- };
1236
- logger.info('Saving additional instance metadata', body);
1237
- await new Instance(body).save();
1238
- }
1239
- }
1100
+ } catch (error) {
1101
+ logger.error('Failed to create instance metadata', { error: error.message });
1102
+ throw error;
1240
1103
  }
1241
- } catch (error) {
1242
- logger.error('Failed to create instance metadata', { error: error.message });
1243
- throw error;
1244
- }
1245
1104
 
1246
- try {
1247
- const cronDeployPath = './engine-private/deploy/dd.cron';
1248
- if (!fs.existsSync(cronDeployPath)) {
1249
- logger.warn('Cron deploy file not found', { path: cronDeployPath });
1250
- return;
1251
- }
1105
+ try {
1106
+ const cronDeployPath = './engine-private/deploy/dd.cron';
1107
+ if (!fs.existsSync(cronDeployPath)) {
1108
+ logger.warn('Cron deploy file not found', { path: cronDeployPath });
1109
+ return;
1110
+ }
1252
1111
 
1253
- const cronDeployId = fs.readFileSync(cronDeployPath, 'utf8').trim();
1254
- const confCronPath = `./engine-private/conf/${cronDeployId}/conf.cron.json`;
1112
+ const cronDeployId = fs.readFileSync(cronDeployPath, 'utf8').trim();
1113
+ const confCronPath = `./engine-private/conf/${cronDeployId}/conf.cron.json`;
1255
1114
 
1256
- if (!fs.existsSync(confCronPath)) {
1257
- logger.warn('Cron configuration not found', { path: confCronPath });
1258
- return;
1259
- }
1115
+ if (!fs.existsSync(confCronPath)) {
1116
+ logger.warn('Cron configuration not found', { path: confCronPath });
1117
+ return;
1118
+ }
1260
1119
 
1261
- const confCron = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
1120
+ const confCron = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
1262
1121
 
1263
- await DataBaseProvider.load({ apis: ['cron'], host, path, db });
1122
+ await DataBaseProvider.load({ apis: ['cron'], host, path, db });
1264
1123
 
1265
- /** @type {import('../api/cron/cron.model.js').CronModel} */
1266
- const Cron = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Cron;
1124
+ /** @type {import('../api/cron/cron.model.js').CronModel} */
1125
+ const Cron = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Cron;
1267
1126
 
1268
- await Cron.deleteMany();
1269
- logger.info('Cleared existing cron metadata');
1127
+ await Cron.deleteMany();
1128
+ logger.info('Cleared existing cron metadata');
1270
1129
 
1271
- for (const jobId of Object.keys(confCron.jobs)) {
1272
- const body = {
1273
- jobId,
1274
- deployId: Underpost.cron.getRelatedDeployIdList(jobId),
1275
- expression: confCron.jobs[jobId].expression,
1276
- enabled: confCron.jobs[jobId].enabled,
1277
- };
1278
- logger.info('Saving cron metadata', body);
1279
- await new Cron(body).save();
1130
+ for (const jobId of Object.keys(confCron.jobs)) {
1131
+ const body = {
1132
+ jobId,
1133
+ deployId: Underpost.cron.getRelatedDeployIdList(jobId),
1134
+ expression: confCron.jobs[jobId].expression,
1135
+ enabled: confCron.jobs[jobId].enabled,
1136
+ };
1137
+ logger.info('Saving cron metadata', body);
1138
+ await new Cron(body).save();
1139
+ }
1140
+ } catch (error) {
1141
+ logger.error('Failed to create cron metadata', { error: error.message });
1280
1142
  }
1143
+
1144
+ await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
1145
+ logger.info('Cluster metadata creation completed');
1281
1146
  } catch (error) {
1282
- logger.error('Failed to create cron metadata', { error: error.message });
1147
+ logger.error('Cluster metadata creation failed', { error: error.message });
1148
+ throw error;
1149
+ } finally {
1150
+ if (ephemeral && isInsideContainer()) {
1151
+ Underpost.repo.cleanupPrivateEngineRepo();
1152
+ Underpost.env.clean();
1153
+ }
1283
1154
  }
1284
-
1285
- await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
1286
- logger.info('Cluster metadata creation completed');
1287
1155
  },
1288
1156
 
1289
1157
  /**
@@ -1307,201 +1175,223 @@ class UnderpostDB {
1307
1175
  dryRun: false,
1308
1176
  },
1309
1177
  ) {
1310
- loadCronDeployEnv();
1311
- if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
1312
-
1313
- logger.info('Starting File collection cleanup', { deployList, options });
1314
-
1315
- // Load file.ref.json to know which models reference File
1316
- const fileRefPath = './src/api/file/file.ref.json';
1317
- if (!fs.existsSync(fileRefPath)) {
1318
- logger.error('file.ref.json not found', { path: fileRefPath });
1319
- return;
1320
- }
1178
+ const firstDeployId = deployList !== 'dd' ? deployList.split(',')[0].trim() : '';
1179
+ const { ephemeral } = Underpost.repo.privateEngineRepoFactory(firstDeployId || undefined);
1180
+ try {
1181
+ loadCronDeployEnv();
1182
+ if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
1321
1183
 
1322
- const fileRefData = JSON.parse(fs.readFileSync(fileRefPath, 'utf8'));
1323
- logger.info('Loaded file reference configuration', { apis: fileRefData.length });
1184
+ logger.info('Starting File collection cleanup', { deployList, options });
1324
1185
 
1325
- // Filter hosts and paths if specified
1326
- const filterHosts = options.hosts ? options.hosts.split(',').map((h) => h.trim()) : [];
1327
- const filterPaths = options.paths ? options.paths.split(',').map((p) => p.trim()) : [];
1186
+ // Load file.ref.json to know which models reference File
1187
+ const fileRefPath = './src/api/file/file.ref.json';
1188
+ if (!fs.existsSync(fileRefPath)) {
1189
+ logger.error('file.ref.json not found', { path: fileRefPath });
1190
+ return;
1191
+ }
1328
1192
 
1329
- // Track all connections to close them at the end
1330
- const connectionsToClose = [];
1193
+ const fileRefData = JSON.parse(fs.readFileSync(fileRefPath, 'utf8'));
1194
+ logger.info('Loaded file reference configuration', { apis: fileRefData.length });
1331
1195
 
1332
- for (const _deployId of deployList.split(',')) {
1333
- const deployId = _deployId.trim();
1334
- if (!deployId) continue;
1196
+ // Filter hosts and paths if specified
1197
+ const filterHosts = options.hosts ? options.hosts.split(',').map((h) => h.trim()) : [];
1198
+ const filterPaths = options.paths ? options.paths.split(',').map((p) => p.trim()) : [];
1335
1199
 
1336
- logger.info('Processing deployment for File cleanup', { deployId });
1200
+ // Track all connections to close them at the end
1201
+ const connectionsToClose = [];
1337
1202
 
1338
- // Load server configuration
1339
- const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1340
- if (!fs.existsSync(confServerPath)) {
1341
- logger.error('Configuration file not found', { path: confServerPath });
1342
- continue;
1343
- }
1203
+ for (const _deployId of deployList.split(',')) {
1204
+ const deployId = _deployId.trim();
1205
+ if (!deployId) continue;
1344
1206
 
1345
- const confServer = loadConfServerJson(confServerPath, { resolve: true });
1207
+ logger.info('Processing deployment for File cleanup', { deployId });
1346
1208
 
1347
- // Process each host+path combination
1348
- for (const host of Object.keys(confServer)) {
1349
- if (filterHosts.length > 0 && !filterHosts.includes(host)) continue;
1209
+ // Load server configuration
1210
+ const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1211
+ if (!fs.existsSync(confServerPath)) {
1212
+ logger.error('Configuration file not found', { path: confServerPath });
1213
+ continue;
1214
+ }
1350
1215
 
1351
- for (const path of Object.keys(confServer[host])) {
1352
- if (filterPaths.length > 0 && !filterPaths.includes(path)) continue;
1216
+ const confServer = loadConfServerJson(confServerPath, { resolve: true });
1353
1217
 
1354
- const { db, apis } = confServer[host][path];
1355
- if (!db || !apis) continue;
1218
+ // Process each host+path combination
1219
+ for (const host of Object.keys(confServer)) {
1220
+ if (filterHosts.length > 0 && !filterHosts.includes(host)) continue;
1356
1221
 
1357
- // Check if 'file' api is in the apis list
1358
- if (!apis.includes('file')) {
1359
- logger.info('Skipping - no file api in configuration', { host, path });
1360
- continue;
1361
- }
1222
+ for (const path of Object.keys(confServer[host])) {
1223
+ if (filterPaths.length > 0 && !filterPaths.includes(path)) continue;
1362
1224
 
1363
- // logger.info('Processing host+path with file api', { host, path, db: db.name });
1225
+ const { db, apis } = confServer[host][path];
1226
+ if (!db || !apis) continue;
1364
1227
 
1365
- try {
1366
- // Connect to database
1367
- const dbProvider = await DataBaseProvider.load({ apis, host, path, db });
1368
- if (!dbProvider || !dbProvider.models) {
1369
- logger.error('Failed to load database provider', { host, path });
1228
+ // Check if 'file' api is in the apis list
1229
+ if (!apis.includes('file')) {
1230
+ logger.info('Skipping - no file api in configuration', { host, path });
1370
1231
  continue;
1371
1232
  }
1372
1233
 
1373
- const { models } = dbProvider;
1234
+ // logger.info('Processing host+path with file api', { host, path, db: db.name });
1235
+
1236
+ try {
1237
+ // Connect to database with retry
1238
+ let dbProvider;
1239
+ for (let attempt = 1; attempt <= 3; attempt++) {
1240
+ try {
1241
+ dbProvider = await DataBaseProvider.load({ apis, host, path, db });
1242
+ break;
1243
+ } catch (err) {
1244
+ if (attempt === 3) throw err;
1245
+ logger.warn('Database connection failed, retrying...', { attempt, host, path, error: err.message });
1246
+ await timer(3000);
1247
+ }
1248
+ }
1249
+ if (!dbProvider || !dbProvider.models) {
1250
+ logger.error('Failed to load database provider', { host, path });
1251
+ continue;
1252
+ }
1374
1253
 
1375
- // Track this connection for cleanup
1376
- connectionsToClose.push({ host, path, dbProvider });
1254
+ const { models } = dbProvider;
1377
1255
 
1378
- // Check if File model exists
1379
- if (!models.File) {
1380
- logger.warn('File model not loaded', { host, path });
1381
- continue;
1382
- }
1256
+ // Track this connection for cleanup
1257
+ connectionsToClose.push({ host, path, dbProvider });
1383
1258
 
1384
- // Get all File documents
1385
- const allFiles = await models.File.find({}, '_id').lean();
1386
- logger.info('Found File documents', { count: allFiles.length, host, path });
1259
+ // Check if File model exists
1260
+ if (!models.File) {
1261
+ logger.warn('File model not loaded', { host, path });
1262
+ continue;
1263
+ }
1387
1264
 
1388
- if (allFiles.length === 0) continue;
1265
+ // Get all File documents
1266
+ const allFiles = await models.File.find({}, '_id').lean();
1267
+ logger.info('Found File documents', { count: allFiles.length, host, path });
1389
1268
 
1390
- // Track which File IDs are referenced
1391
- const referencedFileIds = new Set();
1269
+ if (allFiles.length === 0) continue;
1392
1270
 
1393
- // Check each API from file.ref.json
1394
- for (const refConfig of fileRefData) {
1395
- const { api, model: modelFields } = refConfig;
1271
+ // Track which File IDs are referenced
1272
+ const referencedFileIds = new Set();
1396
1273
 
1397
- // Check if this API is loaded in current context
1398
- const modelName = api
1399
- .split('-')
1400
- .map((w) => w.charAt(0).toUpperCase() + w.slice(1))
1401
- .join('');
1402
- const Model = models[modelName];
1274
+ // Check each API from file.ref.json
1275
+ for (const refConfig of fileRefData) {
1276
+ const { api, model: modelFields } = refConfig;
1403
1277
 
1404
- if (!Model) {
1405
- logger.debug('Model not loaded in current context', { api, modelName, host, path });
1406
- continue;
1407
- }
1278
+ // Check if this API is loaded in current context
1279
+ const modelName = api
1280
+ .split('-')
1281
+ .map((w) => w.charAt(0).toUpperCase() + w.slice(1))
1282
+ .join('');
1283
+ const Model = models[modelName];
1408
1284
 
1409
- logger.info('Checking references in model', { api, modelName });
1285
+ if (!Model) {
1286
+ logger.debug('Model not loaded in current context', { api, modelName, host, path });
1287
+ continue;
1288
+ }
1410
1289
 
1411
- // Helper function to recursively check field references
1412
- const checkFieldReferences = async (fieldPath, fieldConfig) => {
1413
- for (const [fieldName, fieldValue] of Object.entries(fieldConfig)) {
1414
- const currentPath = fieldPath ? `${fieldPath}.${fieldName}` : fieldName;
1290
+ logger.info('Checking references in model', { api, modelName });
1415
1291
 
1416
- if (fieldValue === true) {
1417
- // This is a File reference field
1418
- const query = {};
1419
- query[currentPath] = { $exists: true, $ne: null };
1292
+ // Helper function to recursively check field references
1293
+ const checkFieldReferences = async (fieldPath, fieldConfig) => {
1294
+ for (const [fieldName, fieldValue] of Object.entries(fieldConfig)) {
1295
+ const currentPath = fieldPath ? `${fieldPath}.${fieldName}` : fieldName;
1420
1296
 
1421
- const docs = await Model.find(query, currentPath).lean();
1297
+ if (fieldValue === true) {
1298
+ // This is a File reference field
1299
+ const query = {};
1300
+ query[currentPath] = { $exists: true, $ne: null };
1422
1301
 
1423
- for (const doc of docs) {
1424
- // Navigate to the nested field
1425
- const parts = currentPath.split('.');
1426
- let value = doc;
1427
- for (const part of parts) {
1428
- value = value?.[part];
1429
- }
1302
+ const docs = await Model.find(query, currentPath).lean();
1303
+
1304
+ for (const doc of docs) {
1305
+ // Navigate to the nested field
1306
+ const parts = currentPath.split('.');
1307
+ let value = doc;
1308
+ for (const part of parts) {
1309
+ value = value?.[part];
1310
+ }
1430
1311
 
1431
- if (value) {
1432
- if (Array.isArray(value)) {
1433
- value.forEach((id) => id && referencedFileIds.add(id.toString()));
1434
- } else {
1435
- referencedFileIds.add(value.toString());
1312
+ if (value) {
1313
+ if (Array.isArray(value)) {
1314
+ value.forEach((id) => id && referencedFileIds.add(id.toString()));
1315
+ } else {
1316
+ referencedFileIds.add(value.toString());
1317
+ }
1436
1318
  }
1437
1319
  }
1438
- }
1439
1320
 
1440
- logger.info('Found references', {
1441
- model: modelName,
1442
- field: currentPath,
1443
- count: docs.length,
1444
- });
1445
- } else if (typeof fieldValue === 'object') {
1446
- // Nested object, recurse
1447
- await checkFieldReferences(currentPath, fieldValue);
1321
+ logger.info('Found references', {
1322
+ model: modelName,
1323
+ field: currentPath,
1324
+ count: docs.length,
1325
+ });
1326
+ } else if (typeof fieldValue === 'object') {
1327
+ // Nested object, recurse
1328
+ await checkFieldReferences(currentPath, fieldValue);
1329
+ }
1448
1330
  }
1449
- }
1450
- };
1331
+ };
1451
1332
 
1452
- await checkFieldReferences('', modelFields);
1453
- }
1454
-
1455
- logger.info('Total referenced File IDs', { count: referencedFileIds.size, host, path });
1333
+ await checkFieldReferences('', modelFields);
1334
+ }
1456
1335
 
1457
- // Find orphaned files
1458
- const orphanedFiles = allFiles.filter((file) => !referencedFileIds.has(file._id.toString()));
1336
+ logger.info('Total referenced File IDs', { count: referencedFileIds.size, host, path });
1459
1337
 
1460
- if (orphanedFiles.length === 0) {
1461
- logger.info('No orphaned files found', { host, path });
1462
- } else {
1463
- logger.info('Found orphaned files', { count: orphanedFiles.length, host, path });
1338
+ // Find orphaned files
1339
+ const orphanedFiles = allFiles.filter((file) => !referencedFileIds.has(file._id.toString()));
1464
1340
 
1465
- if (options.dryRun) {
1466
- logger.info('Dry run - would delete files', {
1467
- count: orphanedFiles.length,
1468
- ids: orphanedFiles.map((f) => f._id.toString()),
1469
- });
1341
+ if (orphanedFiles.length === 0) {
1342
+ logger.info('No orphaned files found', { host, path });
1470
1343
  } else {
1471
- const orphanedIds = orphanedFiles.map((f) => f._id);
1472
- const deleteResult = await models.File.deleteMany({ _id: { $in: orphanedIds } });
1473
- logger.info('Deleted orphaned files', {
1474
- deletedCount: deleteResult.deletedCount,
1475
- host,
1476
- path,
1477
- });
1344
+ logger.info('Found orphaned files', { count: orphanedFiles.length, host, path });
1345
+
1346
+ if (options.dryRun) {
1347
+ logger.info('Dry run - would delete files', {
1348
+ count: orphanedFiles.length,
1349
+ ids: orphanedFiles.map((f) => f._id.toString()),
1350
+ });
1351
+ } else {
1352
+ const orphanedIds = orphanedFiles.map((f) => f._id);
1353
+ const deleteResult = await models.File.deleteMany({ _id: { $in: orphanedIds } });
1354
+ logger.info('Deleted orphaned files', {
1355
+ deletedCount: deleteResult.deletedCount,
1356
+ host,
1357
+ path,
1358
+ });
1359
+ }
1478
1360
  }
1361
+ } catch (error) {
1362
+ logger.error('Error processing host+path', {
1363
+ host,
1364
+ path,
1365
+ error: error.message,
1366
+ });
1479
1367
  }
1480
- } catch (error) {
1481
- logger.error('Error processing host+path', {
1482
- host,
1483
- path,
1484
- error: error.message,
1485
- });
1486
1368
  }
1487
1369
  }
1488
1370
  }
1489
- }
1490
1371
 
1491
- // Close all connections
1492
- logger.info('Closing all database connections', { count: connectionsToClose.length });
1493
- for (const { host, path, dbProvider } of connectionsToClose) {
1494
- try {
1495
- if (dbProvider && dbProvider.close) {
1496
- await dbProvider.close();
1497
- logger.info('Connection closed', { host, path });
1372
+ // Close all connections
1373
+ logger.info('Closing all database connections', { count: connectionsToClose.length });
1374
+ for (const { host, path, dbProvider } of connectionsToClose) {
1375
+ try {
1376
+ if (dbProvider && dbProvider.close) {
1377
+ await dbProvider.close();
1378
+ logger.info('Connection closed', { host, path });
1379
+ }
1380
+ } catch (error) {
1381
+ logger.error('Error closing connection', { host, path, error: error.message });
1498
1382
  }
1499
- } catch (error) {
1500
- logger.error('Error closing connection', { host, path, error: error.message });
1501
1383
  }
1502
- }
1503
1384
 
1504
- logger.info('File collection cleanup completed');
1385
+ logger.info('File collection cleanup completed');
1386
+ } catch (error) {
1387
+ logger.error('File collection cleanup failed', { error: error.message });
1388
+ throw error;
1389
+ } finally {
1390
+ if (ephemeral && isInsideContainer()) {
1391
+ Underpost.repo.cleanupPrivateEngineRepo();
1392
+ Underpost.env.clean();
1393
+ }
1394
+ }
1505
1395
  },
1506
1396
 
1507
1397
  /**
@@ -1535,68 +1425,79 @@ class UnderpostDB {
1535
1425
  crons: false,
1536
1426
  },
1537
1427
  ) {
1538
- loadCronDeployEnv();
1539
- deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
1540
- host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
1541
- path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
1542
-
1543
- logger.info('Starting cluster metadata backup operation', {
1544
- deployId,
1545
- host,
1546
- path,
1547
- options,
1548
- });
1549
-
1550
- if (options.generate === true) {
1551
- logger.info('Generating cluster metadata');
1552
- await Underpost.db.clusterMetadataFactory(deployId, host, path);
1553
- }
1428
+ const { ephemeral } = Underpost.repo.privateEngineRepoFactory(deployId || undefined);
1429
+ try {
1430
+ loadCronDeployEnv();
1431
+ deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
1432
+ host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
1433
+ path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
1434
+
1435
+ logger.info('Starting cluster metadata backup operation', {
1436
+ deployId,
1437
+ host,
1438
+ path,
1439
+ options,
1440
+ });
1554
1441
 
1555
- if (options.instances === true) {
1556
- const outputPath = './engine-private/instances';
1557
- if (!fs.existsSync(outputPath)) {
1558
- fs.mkdirSync(outputPath, { recursive: true });
1442
+ if (options.generate === true) {
1443
+ logger.info('Generating cluster metadata');
1444
+ await Underpost.db.clusterMetadataFactory(deployId, host, path);
1559
1445
  }
1560
- const collection = 'instances';
1561
1446
 
1562
- if (options.export === true) {
1563
- logger.info('Exporting instances collection', { outputPath });
1564
- shellExec(
1565
- `node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1566
- );
1567
- }
1447
+ if (options.instances === true) {
1448
+ const outputPath = './engine-private/instances';
1449
+ if (!fs.existsSync(outputPath)) {
1450
+ fs.mkdirSync(outputPath, { recursive: true });
1451
+ }
1452
+ const collection = 'instances';
1568
1453
 
1569
- if (options.import === true) {
1570
- logger.info('Importing instances collection', { outputPath });
1571
- shellExec(
1572
- `node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1573
- );
1574
- }
1575
- }
1454
+ if (options.export === true) {
1455
+ logger.info('Exporting instances collection', { outputPath });
1456
+ shellExec(
1457
+ `node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1458
+ );
1459
+ }
1576
1460
 
1577
- if (options.crons === true) {
1578
- const outputPath = './engine-private/crons';
1579
- if (!fs.existsSync(outputPath)) {
1580
- fs.mkdirSync(outputPath, { recursive: true });
1461
+ if (options.import === true) {
1462
+ logger.info('Importing instances collection', { outputPath });
1463
+ shellExec(
1464
+ `node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1465
+ );
1466
+ }
1581
1467
  }
1582
- const collection = 'crons';
1583
1468
 
1584
- if (options.export === true) {
1585
- logger.info('Exporting crons collection', { outputPath });
1586
- shellExec(
1587
- `node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1588
- );
1469
+ if (options.crons === true) {
1470
+ const outputPath = './engine-private/crons';
1471
+ if (!fs.existsSync(outputPath)) {
1472
+ fs.mkdirSync(outputPath, { recursive: true });
1473
+ }
1474
+ const collection = 'crons';
1475
+
1476
+ if (options.export === true) {
1477
+ logger.info('Exporting crons collection', { outputPath });
1478
+ shellExec(
1479
+ `node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1480
+ );
1481
+ }
1482
+
1483
+ if (options.import === true) {
1484
+ logger.info('Importing crons collection', { outputPath });
1485
+ shellExec(
1486
+ `node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1487
+ );
1488
+ }
1589
1489
  }
1590
1490
 
1591
- if (options.import === true) {
1592
- logger.info('Importing crons collection', { outputPath });
1593
- shellExec(
1594
- `node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1595
- );
1491
+ logger.info('Cluster metadata backup operation completed');
1492
+ } catch (error) {
1493
+ logger.error('Cluster metadata backup operation failed', { error: error.message });
1494
+ throw error;
1495
+ } finally {
1496
+ if (ephemeral && isInsideContainer()) {
1497
+ Underpost.repo.cleanupPrivateEngineRepo();
1498
+ Underpost.env.clean();
1596
1499
  }
1597
1500
  }
1598
-
1599
- logger.info('Cluster metadata backup operation completed');
1600
1501
  },
1601
1502
  };
1602
1503
  }