underpost 2.92.0 → 2.95.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/.github/workflows/pwa-microservices-template-page.cd.yml +5 -4
  2. package/README.md +4 -5
  3. package/bin/build.js +6 -1
  4. package/bin/deploy.js +2 -69
  5. package/cli.md +99 -92
  6. package/manifests/deployment/dd-default-development/deployment.yaml +4 -4
  7. package/manifests/deployment/dd-test-development/deployment.yaml +2 -2
  8. package/package.json +1 -1
  9. package/scripts/disk-clean.sh +216 -0
  10. package/scripts/ssh-cluster-info.sh +4 -3
  11. package/src/cli/cluster.js +1 -1
  12. package/src/cli/db.js +71 -80
  13. package/src/cli/deploy.js +77 -13
  14. package/src/cli/image.js +198 -133
  15. package/src/cli/index.js +59 -81
  16. package/src/cli/lxd.js +73 -74
  17. package/src/cli/monitor.js +20 -9
  18. package/src/cli/repository.js +86 -3
  19. package/src/cli/run.js +167 -63
  20. package/src/cli/ssh.js +351 -134
  21. package/src/index.js +1 -1
  22. package/src/monitor.js +11 -1
  23. package/src/server/backup.js +1 -1
  24. package/src/server/conf.js +1 -1
  25. package/src/server/dns.js +88 -1
  26. package/src/server/process.js +6 -1
  27. package/scripts/snap-clean.sh +0 -26
  28. package/src/client/public/default/plantuml/client-conf.svg +0 -1
  29. package/src/client/public/default/plantuml/client-schema.svg +0 -1
  30. package/src/client/public/default/plantuml/cron-conf.svg +0 -1
  31. package/src/client/public/default/plantuml/cron-schema.svg +0 -1
  32. package/src/client/public/default/plantuml/server-conf.svg +0 -1
  33. package/src/client/public/default/plantuml/server-schema.svg +0 -1
  34. package/src/client/public/default/plantuml/ssr-conf.svg +0 -1
  35. package/src/client/public/default/plantuml/ssr-schema.svg +0 -1
@@ -0,0 +1,216 @@
1
+ #!/usr/bin/env bash
2
+ # disk-clean.sh
3
+ # Safe, interactive disk cleanup for Rocky/RHEL-like systems.
4
+
5
+ set -u # Detect undefined variables (removed -e to handle errors manually where it matters)
6
+ IFS=$'\n\t'
7
+
8
+ AUTO_YES=0
9
+ LXD_FLAG=0
10
+ VACUUM_SIZE="500M"
11
+ TMP_AGE_DAYS=7
12
+ LOG_GZ_AGE_DAYS=90
13
+ ROOT_CACHE_AGE_DAYS=30
14
+ AGGRESSIVE=0
15
+
16
+ # Colors for better readability
17
+ RED='\033[0;31m'
18
+ GREEN='\033[0;32m'
19
+ YELLOW='\033[1;33m'
20
+ NC='\033[0m' # No Color
21
+
22
+ usage() {
23
+ cat <<EOF
24
+ Usage: $0 [--yes] [--aggressive] [--lxd] [--vacuum-size SIZE]
25
+
26
+ Options:
27
+ --yes run destructive actions without asking
28
+ --aggressive clean user caches (npm, pip, conda, root .cache)
29
+ --lxd enable lxc image prune
30
+ --vacuum-size X set journalctl --vacuum-size (default: $VACUUM_SIZE)
31
+ -h, --help show this help
32
+ EOF
33
+ }
34
+
35
+ # Parse args
36
+ while [[ $# -gt 0 ]]; do
37
+ case "$1" in
38
+ --yes) AUTO_YES=1; shift;;
39
+ --aggressive) AGGRESSIVE=1; shift;;
40
+ --lxd) LXD_FLAG=1; shift;;
41
+ --vacuum-size) VACUUM_SIZE="$2"; shift 2;;
42
+ -h|--help) usage; exit 0;;
43
+ *) echo "Unknown argument: $1"; usage; exit 2;;
44
+ esac
45
+ done
46
+
47
+ log() { echo -e "${GREEN}[INFO]${NC} $*"; }
48
+ warn() { echo -e "${YELLOW}[WARN]${NC} $*"; }
49
+ error() { echo -e "${RED}[ERROR]${NC} $*"; }
50
+
51
+ run() {
52
+ # Runs the command safely
53
+ echo "+ $*"
54
+ # Execute the command passed as arguments, preserving quotes/spaces
55
+ "$@" || {
56
+ warn "Command failed (non-critical): $*"
57
+ return 0
58
+ }
59
+ }
60
+
61
+ confirm() {
62
+ if [[ $AUTO_YES -eq 1 ]]; then
63
+ return 0
64
+ fi
65
+ # Use </dev/tty to ensure we read from user even inside a pipe loop
66
+ read -r -p "$1 [y/N]: " ans </dev/tty
67
+ case "$ans" in
68
+ [Yy]|[Yy][Ee][Ss]) return 0;;
69
+ *) return 1;;
70
+ esac
71
+ }
72
+
73
+ require_root() {
74
+ if [[ $EUID -ne 0 ]]; then
75
+ error "This script must be run as root."
76
+ exit 1
77
+ fi
78
+ }
79
+
80
+ command_exists() {
81
+ command -v "$1" >/dev/null 2>&1
82
+ }
83
+
84
+ require_root
85
+
86
+ log "Starting cleanup (aggressive=$AGGRESSIVE)"
87
+
88
+ # 1) Package Manager (DNF)
89
+ if command_exists dnf; then
90
+ log "Cleaning DNF caches"
91
+ run dnf clean all
92
+ run rm -rf /var/cache/dnf
93
+ if confirm "Run 'dnf autoremove -y' for orphan packages?"; then
94
+ run dnf autoremove -y
95
+ else
96
+ log "Skipped dnf autoremove"
97
+ fi
98
+ else
99
+ warn "dnf not found"
100
+ fi
101
+
102
+ # 2) Journal logs
103
+ if command_exists journalctl; then
104
+ log "Current Journal disk usage:"
105
+ journalctl --disk-usage || true
106
+
107
+ if confirm "Run 'journalctl --vacuum-size=$VACUUM_SIZE'?"; then
108
+ run journalctl --vacuum-size="$VACUUM_SIZE"
109
+ else
110
+ log "Skipped journal vacuum"
111
+ fi
112
+ fi
113
+
114
+ # 3) /var/tmp
115
+ if [[ -d /var/tmp ]]; then
116
+ if confirm "Delete files in /var/tmp older than $TMP_AGE_DAYS days?"; then
117
+ find /var/tmp -mindepth 1 -mtime +$TMP_AGE_DAYS -delete
118
+ fi
119
+ fi
120
+
121
+ # 4) Old compressed logs
122
+ if [[ -d /var/log ]]; then
123
+ if confirm "Delete compressed logs (.gz) in /var/log older than $LOG_GZ_AGE_DAYS days?"; then
124
+ find /var/log -type f -name '*.gz' -mtime +$LOG_GZ_AGE_DAYS -delete
125
+ fi
126
+ fi
127
+
128
+ # 5) Snap: disabled revisions
129
+ if command_exists snap; then
130
+ log "Searching for old Snap revisions"
131
+ # Save to variable only if successful
132
+ disabled_snaps=$(snap list --all 2>/dev/null | awk '/disabled/ {print $1, $3}') || disabled_snaps=""
133
+
134
+ if [[ -n "$disabled_snaps" ]]; then
135
+ echo "Disabled snap revisions found:"
136
+ echo "$disabled_snaps"
137
+ if confirm "Remove all disabled revisions?"; then
138
+ while read -r pkg rev; do
139
+ [[ -z "$pkg" ]] && continue
140
+ log "Removing snap $pkg (revision $rev)"
141
+ run snap remove "$pkg" --revision="$rev"
142
+ done <<< "$disabled_snaps"
143
+
144
+ log "Setting snap retention to 2"
145
+ run snap set system refresh.retain=2
146
+ fi
147
+ else
148
+ log "No disabled snap revisions found."
149
+ fi
150
+ fi
151
+
152
+ # 6) LXD
153
+ if command_exists lxc; then
154
+ if [[ $LXD_FLAG -eq 1 ]]; then
155
+ if confirm "Run 'lxc image prune'?"; then
156
+ run lxc image prune -f
157
+ fi
158
+ else
159
+ log "Skipping LXD (use --lxd to enable)"
160
+ fi
161
+ fi
162
+
163
+ # 7) Docker / Containerd
164
+ if command_exists docker; then
165
+ if confirm "Run 'docker system prune -a --volumes'? (WARNING: Removes stopped containers)"; then
166
+ run docker system prune -a --volumes -f
167
+ fi
168
+ elif command_exists crictl; then
169
+ if confirm "Attempt to remove images with crictl?"; then
170
+ run crictl rmi --prune
171
+ fi
172
+ fi
173
+
174
+ # 8) Large Files (>1G) - Completely rewritten logic
175
+ log "Scanning for files larger than 1G (this may take a while)..."
176
+
177
+ # Use while loop with find -print0 to handle filenames with spaces safely
178
+ FOUND_LARGE=0
179
+ # Note: find does not modify filesystem here, safe to run always
180
+ while IFS= read -r -d '' file; do
181
+ FOUND_LARGE=1
182
+ # Get readable size to show user
183
+ filesize=$(du -h "$file" | cut -f1)
184
+
185
+ echo -e "Large file found: ${YELLOW}$file${NC} (Size: $filesize)"
186
+
187
+ if confirm " -> Delete this file?"; then
188
+ run rm -vf "$file"
189
+ else
190
+ log "Skipped: $file"
191
+ fi
192
+ done < <(find / -xdev -type f -size +1G -print0 2>/dev/null)
193
+
194
+ if [[ $FOUND_LARGE -eq 0 ]]; then
195
+ log "No files >1G found in /"
196
+ fi
197
+
198
+ # 9) Aggressive Caches
199
+ if [[ $AGGRESSIVE -eq 1 ]]; then
200
+ log "Aggressive mode enabled"
201
+ command_exists npm && confirm "Run 'npm cache clean --force'?" && run npm cache clean --force
202
+ command_exists pip && confirm "Run 'pip cache purge'?" && run pip cache purge
203
+ command_exists conda && confirm "Run 'conda clean --all -y'?" && run conda clean --all -y
204
+
205
+ if [[ -d /root/.cache ]]; then
206
+ if confirm "Delete /root/.cache (> $ROOT_CACHE_AGE_DAYS days)?"; then
207
+ find /root/.cache -type f -mtime +$ROOT_CACHE_AGE_DAYS -delete
208
+ fi
209
+ fi
210
+ fi
211
+
212
+ # 10) Final
213
+ log "Final disk usage:"
214
+ df -h --total | grep total || df -h /
215
+
216
+ log "Cleanup finished."
@@ -3,12 +3,13 @@ set -euo pipefail
3
3
 
4
4
  REMOTE_USER=$(node bin config get --plain DEFAULT_SSH_USER)
5
5
  REMOTE_HOST=$(node bin config get --plain DEFAULT_SSH_HOST)
6
+ REMOTE_PORT=$(node bin config get --plain DEFAULT_SSH_PORT)
6
7
  SSH_KEY=$(node bin config get --plain DEFAULT_SSH_KEY_PATH)
7
8
 
8
9
  chmod 600 "$SSH_KEY"
9
10
 
10
- ssh -i "$SSH_KEY" -o BatchMode=yes "${REMOTE_USER}@${REMOTE_HOST}" sh <<EOF
11
+ ssh -i "$SSH_KEY" -o BatchMode=yes "${REMOTE_USER}@${REMOTE_HOST}" -p ${REMOTE_PORT} sh <<EOF
11
12
  cd /home/dd/engine
12
- node bin deploy dd production --status
13
- kubectl get pods -A
13
+ sudo -n -- /bin/bash -lc "node bin deploy dd production --status"
14
+ sudo -n -- /bin/bash -lc "kubectl get pods -A"
14
15
  EOF
@@ -558,7 +558,7 @@ net.ipv4.ip_forward = 1' | sudo tee ${iptableConfPath}`,
558
558
  }
559
559
 
560
560
  if (kubeconfigPath) {
561
- shellExec(`sudo -E cp -i ${kubeconfigPath} ~/.kube/config`);
561
+ shellExec(`sudo -E cp -i -f ${kubeconfigPath} ~/.kube/config`);
562
562
  shellExec(`sudo -E chown $(id -u):$(id -g) ~/.kube/config`);
563
563
  } else if (clusterType === 'kind') {
564
564
  // For Kind, the kubeconfig is usually merged automatically or can be explicitly exported
package/src/cli/db.js CHANGED
@@ -49,7 +49,6 @@ const KUBECTL_TIMEOUT = 300000; // 5 minutes
49
49
  * @property {string} [paths=''] - Comma-separated list of paths to include
50
50
  * @property {string} [labelSelector=''] - Kubernetes label selector for pods
51
51
  * @property {boolean} [allPods=false] - Flag to target all matching pods
52
- * @property {boolean} [dryRun=false] - Flag to simulate operations without executing
53
52
  * @property {boolean} [primaryPod=false] - Flag to automatically detect and use MongoDB primary pod
54
53
  * @property {boolean} [stats=false] - Flag to display collection/table statistics
55
54
  */
@@ -171,17 +170,11 @@ class UnderpostDB {
171
170
  * @private
172
171
  * @param {string} command - kubectl command to execute
173
172
  * @param {Object} options - Execution options
174
- * @param {boolean} [options.dryRun=false] - Dry run mode
175
173
  * @param {string} [options.context=''] - Command context for logging
176
174
  * @returns {string|null} Command output or null on error
177
175
  */
178
176
  _executeKubectl(command, options = {}) {
179
- const { dryRun = false, context = '' } = options;
180
-
181
- if (dryRun) {
182
- logger.info(`[DRY RUN] Would execute: ${command}`, { context });
183
- return null;
184
- }
177
+ const { context = '' } = options;
185
178
 
186
179
  try {
187
180
  logger.info(`Executing kubectl command`, { command, context });
@@ -200,13 +193,12 @@ class UnderpostDB {
200
193
  * @param {string} params.podName - Target pod name
201
194
  * @param {string} params.namespace - Pod namespace
202
195
  * @param {string} params.destPath - Destination path in pod
203
- * @param {boolean} [params.dryRun=false] - Dry run mode
204
196
  * @returns {boolean} Success status
205
197
  */
206
- _copyToPod({ sourcePath, podName, namespace, destPath, dryRun = false }) {
198
+ _copyToPod({ sourcePath, podName, namespace, destPath }) {
207
199
  try {
208
200
  const command = `sudo kubectl cp ${sourcePath} ${namespace}/${podName}:${destPath}`;
209
- UnderpostDB.API._executeKubectl(command, { dryRun, context: `copy to pod ${podName}` });
201
+ UnderpostDB.API._executeKubectl(command, { context: `copy to pod ${podName}` });
210
202
  return true;
211
203
  } catch (error) {
212
204
  logger.error('Failed to copy file to pod', { sourcePath, podName, destPath, error: error.message });
@@ -222,13 +214,12 @@ class UnderpostDB {
222
214
  * @param {string} params.namespace - Pod namespace
223
215
  * @param {string} params.sourcePath - Source path in pod
224
216
  * @param {string} params.destPath - Destination file path
225
- * @param {boolean} [params.dryRun=false] - Dry run mode
226
217
  * @returns {boolean} Success status
227
218
  */
228
- _copyFromPod({ podName, namespace, sourcePath, destPath, dryRun = false }) {
219
+ _copyFromPod({ podName, namespace, sourcePath, destPath }) {
229
220
  try {
230
221
  const command = `sudo kubectl cp ${namespace}/${podName}:${sourcePath} ${destPath}`;
231
- UnderpostDB.API._executeKubectl(command, { dryRun, context: `copy from pod ${podName}` });
222
+ UnderpostDB.API._executeKubectl(command, { context: `copy from pod ${podName}` });
232
223
  return true;
233
224
  } catch (error) {
234
225
  logger.error('Failed to copy file from pod', { podName, sourcePath, destPath, error: error.message });
@@ -243,13 +234,12 @@ class UnderpostDB {
243
234
  * @param {string} params.podName - Pod name
244
235
  * @param {string} params.namespace - Pod namespace
245
236
  * @param {string} params.command - Command to execute
246
- * @param {boolean} [params.dryRun=false] - Dry run mode
247
237
  * @returns {string|null} Command output or null
248
238
  */
249
- _execInPod({ podName, namespace, command, dryRun = false }) {
239
+ _execInPod({ podName, namespace, command }) {
250
240
  try {
251
241
  const kubectlCmd = `sudo kubectl exec -n ${namespace} -i ${podName} -- sh -c "${command}"`;
252
- return UnderpostDB.API._executeKubectl(kubectlCmd, { dryRun, context: `exec in pod ${podName}` });
242
+ return UnderpostDB.API._executeKubectl(kubectlCmd, { context: `exec in pod ${podName}` });
253
243
  } catch (error) {
254
244
  logger.error('Failed to execute command in pod', { podName, command, error: error.message });
255
245
  throw error;
@@ -286,7 +276,9 @@ class UnderpostDB {
286
276
  case 'pull':
287
277
  if (fs.existsSync(repoPath)) {
288
278
  shellExec(`cd ${repoPath} && git checkout . && git clean -f -d`);
289
- shellExec(`cd ${repoPath} && underpost pull . ${username}/${repoName}`);
279
+ shellExec(`cd ${repoPath} && underpost pull . ${username}/${repoName}`, {
280
+ silent: true,
281
+ });
290
282
  logger.info(`Pulled repository: ${repoName}`);
291
283
  }
292
284
  break;
@@ -301,7 +293,7 @@ class UnderpostDB {
301
293
 
302
294
  case 'push':
303
295
  if (fs.existsSync(repoPath)) {
304
- shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName}`, { disableLog: true });
296
+ shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName}`, { silent: true });
305
297
  logger.info(`Pushed repository: ${repoName}`);
306
298
  }
307
299
  break;
@@ -376,10 +368,9 @@ class UnderpostDB {
376
368
  * @param {string} params.user - Database user
377
369
  * @param {string} params.password - Database password
378
370
  * @param {string} params.sqlPath - SQL file path
379
- * @param {boolean} [params.dryRun=false] - Dry run mode
380
371
  * @returns {boolean} Success status
381
372
  */
382
- _importMariaDB({ pod, namespace, dbName, user, password, sqlPath, dryRun = false }) {
373
+ _importMariaDB({ pod, namespace, dbName, user, password, sqlPath }) {
383
374
  try {
384
375
  const podName = pod.NAME;
385
376
  const containerSqlPath = `/${dbName}.sql`;
@@ -391,7 +382,6 @@ class UnderpostDB {
391
382
  podName,
392
383
  namespace,
393
384
  command: `rm -rf ${containerSqlPath}`,
394
- dryRun,
395
385
  });
396
386
 
397
387
  // Copy SQL file to pod
@@ -401,7 +391,6 @@ class UnderpostDB {
401
391
  podName,
402
392
  namespace,
403
393
  destPath: containerSqlPath,
404
- dryRun,
405
394
  })
406
395
  ) {
407
396
  return false;
@@ -410,12 +399,12 @@ class UnderpostDB {
410
399
  // Create database if it doesn't exist
411
400
  UnderpostDB.API._executeKubectl(
412
401
  `kubectl exec -n ${namespace} -i ${podName} -- mariadb -p${password} -e 'CREATE DATABASE IF NOT EXISTS ${dbName};'`,
413
- { dryRun, context: `create database ${dbName}` },
402
+ { context: `create database ${dbName}` },
414
403
  );
415
404
 
416
405
  // Import SQL file
417
406
  const importCmd = `mariadb -u ${user} -p${password} ${dbName} < ${containerSqlPath}`;
418
- UnderpostDB.API._execInPod({ podName, namespace, command: importCmd, dryRun });
407
+ UnderpostDB.API._execInPod({ podName, namespace, command: importCmd });
419
408
 
420
409
  logger.info('Successfully imported MariaDB database', { podName, dbName });
421
410
  return true;
@@ -435,10 +424,9 @@ class UnderpostDB {
435
424
  * @param {string} params.user - Database user
436
425
  * @param {string} params.password - Database password
437
426
  * @param {string} params.outputPath - Output file path
438
- * @param {boolean} [params.dryRun=false] - Dry run mode
439
427
  * @returns {boolean} Success status
440
428
  */
441
- async _exportMariaDB({ pod, namespace, dbName, user, password, outputPath, dryRun = false }) {
429
+ async _exportMariaDB({ pod, namespace, dbName, user, password, outputPath }) {
442
430
  try {
443
431
  const podName = pod.NAME;
444
432
  const containerSqlPath = `/home/${dbName}.sql`;
@@ -450,12 +438,11 @@ class UnderpostDB {
450
438
  podName,
451
439
  namespace,
452
440
  command: `rm -rf ${containerSqlPath}`,
453
- dryRun,
454
441
  });
455
442
 
456
443
  // Dump database
457
444
  const dumpCmd = `mariadb-dump --user=${user} --password=${password} --lock-tables ${dbName} > ${containerSqlPath}`;
458
- UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd, dryRun });
445
+ UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd });
459
446
 
460
447
  // Copy SQL file from pod
461
448
  if (
@@ -464,14 +451,13 @@ class UnderpostDB {
464
451
  namespace,
465
452
  sourcePath: containerSqlPath,
466
453
  destPath: outputPath,
467
- dryRun,
468
454
  })
469
455
  ) {
470
456
  return false;
471
457
  }
472
458
 
473
459
  // Split file if it exists
474
- if (!dryRun && fs.existsSync(outputPath)) {
460
+ if (fs.existsSync(outputPath)) {
475
461
  await splitFileFactory(dbName, outputPath);
476
462
  }
477
463
 
@@ -493,10 +479,9 @@ class UnderpostDB {
493
479
  * @param {string} params.bsonPath - BSON directory path
494
480
  * @param {boolean} params.drop - Whether to drop existing database
495
481
  * @param {boolean} params.preserveUUID - Whether to preserve UUIDs
496
- * @param {boolean} [params.dryRun=false] - Dry run mode
497
482
  * @returns {boolean} Success status
498
483
  */
499
- _importMongoDB({ pod, namespace, dbName, bsonPath, drop, preserveUUID, dryRun = false }) {
484
+ _importMongoDB({ pod, namespace, dbName, bsonPath, drop, preserveUUID }) {
500
485
  try {
501
486
  const podName = pod.NAME;
502
487
  const containerBsonPath = `/${dbName}`;
@@ -508,7 +493,6 @@ class UnderpostDB {
508
493
  podName,
509
494
  namespace,
510
495
  command: `rm -rf ${containerBsonPath}`,
511
- dryRun,
512
496
  });
513
497
 
514
498
  // Copy BSON directory to pod
@@ -518,7 +502,6 @@ class UnderpostDB {
518
502
  podName,
519
503
  namespace,
520
504
  destPath: containerBsonPath,
521
- dryRun,
522
505
  })
523
506
  ) {
524
507
  return false;
@@ -528,7 +511,7 @@ class UnderpostDB {
528
511
  const restoreCmd = `mongorestore -d ${dbName} ${containerBsonPath}${drop ? ' --drop' : ''}${
529
512
  preserveUUID ? ' --preserveUUID' : ''
530
513
  }`;
531
- UnderpostDB.API._execInPod({ podName, namespace, command: restoreCmd, dryRun });
514
+ UnderpostDB.API._execInPod({ podName, namespace, command: restoreCmd });
532
515
 
533
516
  logger.info('Successfully imported MongoDB database', { podName, dbName });
534
517
  return true;
@@ -547,10 +530,9 @@ class UnderpostDB {
547
530
  * @param {string} params.dbName - Database name
548
531
  * @param {string} params.outputPath - Output directory path
549
532
  * @param {string} [params.collections=''] - Comma-separated collection list
550
- * @param {boolean} [params.dryRun=false] - Dry run mode
551
533
  * @returns {boolean} Success status
552
534
  */
553
- _exportMongoDB({ pod, namespace, dbName, outputPath, collections = '', dryRun = false }) {
535
+ _exportMongoDB({ pod, namespace, dbName, outputPath, collections = '' }) {
554
536
  try {
555
537
  const podName = pod.NAME;
556
538
  const containerBsonPath = `/${dbName}`;
@@ -562,7 +544,6 @@ class UnderpostDB {
562
544
  podName,
563
545
  namespace,
564
546
  command: `rm -rf ${containerBsonPath}`,
565
- dryRun,
566
547
  });
567
548
 
568
549
  // Dump database or specific collections
@@ -570,11 +551,11 @@ class UnderpostDB {
570
551
  const collectionList = collections.split(',').map((c) => c.trim());
571
552
  for (const collection of collectionList) {
572
553
  const dumpCmd = `mongodump -d ${dbName} --collection ${collection} -o /`;
573
- UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd, dryRun });
554
+ UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd });
574
555
  }
575
556
  } else {
576
557
  const dumpCmd = `mongodump -d ${dbName} -o /`;
577
- UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd, dryRun });
558
+ UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd });
578
559
  }
579
560
 
580
561
  // Copy BSON directory from pod
@@ -584,7 +565,6 @@ class UnderpostDB {
584
565
  namespace,
585
566
  sourcePath: containerBsonPath,
586
567
  destPath: outputPath,
587
- dryRun,
588
568
  })
589
569
  ) {
590
570
  return false;
@@ -782,33 +762,26 @@ class UnderpostDB {
782
762
  * database connections, backup storage, and optional Git integration for version control.
783
763
  * Supports targeting multiple specific pods, nodes, and namespaces with advanced filtering.
784
764
  * @param {string} [deployList='default'] - Comma-separated list of deployment IDs
785
- * @param {DatabaseOptions} [options] - Database operation options
786
- * @returns {Promise<void>}
765
+ * @param {Object} options - Backup options
766
+ * @param {boolean} [options.import=false] - Whether to perform import operation
767
+ * @param {boolean} [options.export=false] - Whether to perform export operation
768
+ * @param {string} [options.podName=''] - Comma-separated pod name patterns to target
769
+ * @param {string} [options.nodeName=''] - Comma-separated node names to target
770
+ * @param {string} [options.ns='default'] - Kubernetes namespace
771
+ * @param {string} [options.collections=''] - Comma-separated MongoDB collections for export
772
+ * @param {string} [options.outPath=''] - Output path for backups
773
+ * @param {boolean} [options.drop=false] - Whether to drop existing database on import
774
+ * @param {boolean} [options.preserveUUID=false] - Whether to preserve UUIDs on MongoDB import
775
+ * @param {boolean} [options.git=false] - Whether to use Git for backup versioning
776
+ * @param {string} [options.hosts=''] - Comma-separated list of hosts to filter databases
777
+ * @param {string} [options.paths=''] - Comma-separated list of paths to filter databases
778
+ * @param {string} [options.labelSelector=''] - Label selector for pod filtering
779
+ * @param {boolean} [options.allPods=false] - Whether to target all pods in deployment
780
+ * @param {boolean} [options.primaryPod=false] - Whether to target MongoDB primary pod only
781
+ * @param {boolean} [options.stats=false] - Whether to display database statistics
782
+ * @param {number} [options.macroRollbackExport=1] - Number of commits to rollback in macro export
783
+ * @returns {Promise<void>} Resolves when operation is complete
787
784
  * @memberof UnderpostDB
788
- * @example
789
- * // Export database from specific pods
790
- * await UnderpostDB.API.callback('dd-myapp', {
791
- * export: true,
792
- * podName: 'mariadb-statefulset-0,mariadb-statefulset-1',
793
- * ns: 'production'
794
- * });
795
- *
796
- * @example
797
- * // Import database to all matching pods on specific nodes
798
- * await UnderpostDB.API.callback('dd-myapp', {
799
- * import: true,
800
- * nodeName: 'node-1,node-2',
801
- * allPods: true,
802
- * ns: 'staging'
803
- * });
804
- *
805
- * @example
806
- * // Import to MongoDB primary pod only
807
- * await UnderpostDB.API.callback('dd-myapp', {
808
- * import: true,
809
- * primaryPod: true,
810
- * ns: 'production'
811
- * });
812
785
  */
813
786
  async callback(
814
787
  deployList = 'default',
@@ -827,9 +800,9 @@ class UnderpostDB {
827
800
  paths: '',
828
801
  labelSelector: '',
829
802
  allPods: false,
830
- dryRun: false,
831
803
  primaryPod: false,
832
804
  stats: false,
805
+ macroRollbackExport: 1,
833
806
  },
834
807
  ) {
835
808
  const newBackupTimestamp = new Date().getTime();
@@ -841,12 +814,13 @@ class UnderpostDB {
841
814
  throw new Error(`Invalid namespace: ${namespace}`);
842
815
  }
843
816
 
817
+ if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
818
+
844
819
  logger.info('Starting database operation', {
845
820
  deployList,
846
821
  namespace,
847
822
  import: options.import,
848
823
  export: options.export,
849
- dryRun: options.dryRun,
850
824
  });
851
825
 
852
826
  for (const _deployId of deployList.split(',')) {
@@ -895,6 +869,27 @@ class UnderpostDB {
895
869
  UnderpostDB.API._manageGitRepo({ repoName, operation: 'pull' });
896
870
  }
897
871
 
872
+ if (options.macroRollbackExport) {
873
+ UnderpostDB.API._manageGitRepo({ repoName, operation: 'clone' });
874
+ UnderpostDB.API._manageGitRepo({ repoName, operation: 'pull' });
875
+
876
+ const nCommits = parseInt(options.macroRollbackExport);
877
+ const repoPath = `../${repoName}`;
878
+ const username = process.env.GITHUB_USERNAME;
879
+
880
+ if (fs.existsSync(repoPath) && username) {
881
+ logger.info('Executing macro rollback export', { repoName, nCommits });
882
+ shellExec(`cd ${repoPath} && underpost cmt . reset ${nCommits}`);
883
+ shellExec(`cd ${repoPath} && git reset`);
884
+ shellExec(`cd ${repoPath} && git checkout .`);
885
+ shellExec(`cd ${repoPath} && git clean -f -d`);
886
+ shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName} -f`);
887
+ } else {
888
+ if (!username) logger.error('GITHUB_USERNAME environment variable not set');
889
+ logger.warn('Repository not found for macro rollback', { repoPath });
890
+ }
891
+ }
892
+
898
893
  // Process each database provider
899
894
  for (const provider of Object.keys(dbs)) {
900
895
  for (const dbName of Object.keys(dbs[provider])) {
@@ -1040,7 +1035,6 @@ class UnderpostDB {
1040
1035
  user,
1041
1036
  password,
1042
1037
  sqlPath: toSqlPath,
1043
- dryRun: options.dryRun,
1044
1038
  });
1045
1039
  }
1046
1040
 
@@ -1053,7 +1047,6 @@ class UnderpostDB {
1053
1047
  user,
1054
1048
  password,
1055
1049
  outputPath,
1056
- dryRun: options.dryRun,
1057
1050
  });
1058
1051
  }
1059
1052
  break;
@@ -1080,7 +1073,6 @@ class UnderpostDB {
1080
1073
  bsonPath,
1081
1074
  drop: options.drop,
1082
1075
  preserveUUID: options.preserveUUID,
1083
- dryRun: options.dryRun,
1084
1076
  });
1085
1077
  }
1086
1078
 
@@ -1092,7 +1084,6 @@ class UnderpostDB {
1092
1084
  dbName,
1093
1085
  outputPath,
1094
1086
  collections: options.collections,
1095
- dryRun: options.dryRun,
1096
1087
  });
1097
1088
  }
1098
1089
  break;
@@ -1136,9 +1127,9 @@ class UnderpostDB {
1136
1127
  host = process.env.DEFAULT_DEPLOY_HOST,
1137
1128
  path = process.env.DEFAULT_DEPLOY_PATH,
1138
1129
  ) {
1139
- deployId = deployId ?? process.env.DEFAULT_DEPLOY_ID;
1140
- host = host ?? process.env.DEFAULT_DEPLOY_HOST;
1141
- path = path ?? process.env.DEFAULT_DEPLOY_PATH;
1130
+ deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
1131
+ host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
1132
+ path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
1142
1133
 
1143
1134
  logger.info('Creating cluster metadata', { deployId, host, path });
1144
1135
 
@@ -1322,9 +1313,9 @@ class UnderpostDB {
1322
1313
  crons: false,
1323
1314
  },
1324
1315
  ) {
1325
- deployId = deployId ?? process.env.DEFAULT_DEPLOY_ID;
1326
- host = host ?? process.env.DEFAULT_DEPLOY_HOST;
1327
- path = path ?? process.env.DEFAULT_DEPLOY_PATH;
1316
+ deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
1317
+ host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
1318
+ path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
1328
1319
 
1329
1320
  logger.info('Starting cluster metadata backup operation', {
1330
1321
  deployId,