underpost 3.2.0 → 3.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/publish.ci.yml +6 -0
- package/.github/workflows/pwa-microservices-template-page.cd.yml +1 -1
- package/.github/workflows/release.cd.yml +10 -5
- package/CHANGELOG.md +73 -1
- package/CLI-HELP.md +5 -4
- package/Dockerfile +4 -2
- package/README.md +2 -2
- package/manifests/cronjobs/dd-cron/dd-cron-backup.yaml +5 -2
- package/manifests/cronjobs/dd-cron/dd-cron-dns.yaml +5 -2
- package/manifests/deployment/dd-default-development/deployment.yaml +2 -2
- package/manifests/deployment/dd-test-development/deployment.yaml +18 -26
- package/package.json +2 -2
- package/src/cli/db.js +687 -620
- package/src/cli/deploy.js +47 -28
- package/src/cli/env.js +18 -0
- package/src/cli/fs.js +3 -1
- package/src/cli/index.js +3 -1
- package/src/cli/repository.js +143 -0
- package/src/cli/run.js +1 -1
- package/src/cli/secrets.js +73 -0
- package/src/client/components/core/DropDown.js +13 -5
- package/src/index.js +1 -1
- package/src/runtime/express/Dockerfile +4 -0
- package/src/runtime/lampp/Dockerfile +4 -0
- package/src/runtime/lampp/Lampp.js +23 -1
- package/src/runtime/wp/Dockerfile +4 -0
- package/src/runtime/wp/Wp.js +148 -6
- package/src/server/backup.js +57 -41
- package/src/server/cron.js +23 -18
- package/src/server/start.js +2 -7
package/src/cli/db.js
CHANGED
|
@@ -14,6 +14,7 @@ import { DataBaseProvider } from '../db/DataBaseProvider.js';
|
|
|
14
14
|
import { loadReplicas, pathPortAssignmentFactory, loadCronDeployEnv } from '../server/conf.js';
|
|
15
15
|
import Underpost from '../index.js';
|
|
16
16
|
import { timer } from '../client/components/core/CommonJs.js';
|
|
17
|
+
import isInsideContainer from 'is-inside-container';
|
|
17
18
|
const logger = loggerFactory(import.meta);
|
|
18
19
|
|
|
19
20
|
/**
|
|
@@ -549,6 +550,7 @@ class UnderpostDB {
|
|
|
549
550
|
* @param {boolean} [options.k3s=false] - k3s cluster flag.
|
|
550
551
|
* @param {boolean} [options.kubeadm=false] - kubeadm cluster flag.
|
|
551
552
|
* @param {boolean} [options.kind=false] - kind cluster flag.
|
|
553
|
+
* @param {boolean} [options.repoBackup=false] - Backs up repositories (git commit+push) inside deployment pods via kubectl exec.
|
|
552
554
|
* @return {Promise<void>} Resolves when operation is complete.
|
|
553
555
|
*/
|
|
554
556
|
async callback(
|
|
@@ -577,350 +579,381 @@ class UnderpostDB {
|
|
|
577
579
|
k3s: false,
|
|
578
580
|
kubeadm: false,
|
|
579
581
|
kind: false,
|
|
582
|
+
repoBackup: false,
|
|
580
583
|
},
|
|
581
584
|
) {
|
|
582
|
-
|
|
583
|
-
|
|
584
|
-
const
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
591
|
-
|
|
592
|
-
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
585
|
+
// Ensure engine-private is available (clone ephemerally if inside a deployment
|
|
586
|
+
// container where globalSecretClean has already removed it).
|
|
587
|
+
const firstDeployId = deployList !== 'dd' ? deployList.split(',')[0].trim() : '';
|
|
588
|
+
const { ephemeral } = Underpost.repo.privateEngineRepoFactory(firstDeployId || undefined);
|
|
589
|
+
try {
|
|
590
|
+
loadCronDeployEnv();
|
|
591
|
+
const newBackupTimestamp = new Date().getTime();
|
|
592
|
+
const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
|
|
593
|
+
|
|
594
|
+
if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
|
|
595
|
+
|
|
596
|
+
// Handle repository backup (git commit+push inside deployment pod)
|
|
597
|
+
if (options.repoBackup) {
|
|
598
|
+
const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
|
|
599
|
+
for (const _deployId of deployList.split(',')) {
|
|
600
|
+
const deployId = _deployId.trim();
|
|
601
|
+
if (!deployId) continue;
|
|
602
|
+
logger.info('Starting pod repository backup', { deployId, namespace });
|
|
603
|
+
Underpost.repo.backupPodRepositories({
|
|
604
|
+
deployId,
|
|
605
|
+
namespace,
|
|
606
|
+
env: options.dev ? 'development' : 'production',
|
|
607
|
+
});
|
|
608
|
+
}
|
|
609
|
+
return;
|
|
610
|
+
}
|
|
598
611
|
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
612
|
+
// Handle clean-fs-collection operation
|
|
613
|
+
if (options.cleanFsCollection || options.cleanFsDryRun) {
|
|
614
|
+
logger.info('Starting File collection cleanup operation', { deployList });
|
|
615
|
+
await Underpost.db.cleanFsCollection(deployList, {
|
|
616
|
+
hosts: options.hosts,
|
|
617
|
+
paths: options.paths,
|
|
618
|
+
dryRun: options.cleanFsDryRun,
|
|
619
|
+
});
|
|
620
|
+
return;
|
|
621
|
+
}
|
|
622
|
+
|
|
623
|
+
logger.info('Starting database operation', {
|
|
624
|
+
deployList,
|
|
625
|
+
namespace,
|
|
626
|
+
import: options.import,
|
|
627
|
+
export: options.export,
|
|
628
|
+
});
|
|
605
629
|
|
|
606
|
-
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
630
|
+
if (options.primaryPodEnsure) {
|
|
631
|
+
const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: options.primaryPodEnsure });
|
|
632
|
+
if (!primaryPodName) {
|
|
633
|
+
const baseCommand = options.dev ? 'node bin' : 'underpost';
|
|
634
|
+
const baseClusterCommand = options.dev ? ' --dev' : '';
|
|
635
|
+
let clusterFlag = options.k3s ? ' --k3s' : options.kubeadm ? ' --kubeadm' : '';
|
|
636
|
+
shellExec(`${baseCommand} cluster${baseClusterCommand}${clusterFlag} --mongodb`);
|
|
637
|
+
}
|
|
638
|
+
return;
|
|
613
639
|
}
|
|
614
|
-
return;
|
|
615
|
-
}
|
|
616
640
|
|
|
617
|
-
|
|
618
|
-
|
|
619
|
-
|
|
620
|
-
|
|
641
|
+
// Track processed repositories to avoid duplicate Git operations
|
|
642
|
+
const processedRepos = new Set();
|
|
643
|
+
// Track processed host+path combinations to avoid duplicates
|
|
644
|
+
const processedHostPaths = new Set();
|
|
621
645
|
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
|
|
646
|
+
for (const _deployId of deployList.split(',')) {
|
|
647
|
+
const deployId = _deployId.trim();
|
|
648
|
+
if (!deployId) continue;
|
|
625
649
|
|
|
626
|
-
|
|
650
|
+
logger.info('Processing deployment', { deployId });
|
|
627
651
|
|
|
628
|
-
|
|
629
|
-
|
|
630
|
-
|
|
652
|
+
/** @type {Object.<string, Object.<string, DatabaseConfig>>} */
|
|
653
|
+
const dbs = {};
|
|
654
|
+
const repoName = `engine-${deployId.includes('dd-') ? deployId.split('dd-')[1] : deployId}-cron-backups`;
|
|
631
655
|
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
656
|
+
// Load server configuration
|
|
657
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
658
|
+
if (!fs.existsSync(confServerPath)) {
|
|
659
|
+
logger.error('Configuration file not found', { path: confServerPath });
|
|
660
|
+
continue;
|
|
661
|
+
}
|
|
638
662
|
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
663
|
+
const confServer = loadConfServerJson(confServerPath, { resolve: true });
|
|
664
|
+
|
|
665
|
+
// Build database configuration map
|
|
666
|
+
for (const host of Object.keys(confServer)) {
|
|
667
|
+
for (const path of Object.keys(confServer[host])) {
|
|
668
|
+
const { db } = confServer[host][path];
|
|
669
|
+
if (db) {
|
|
670
|
+
const { provider, name, user, password } = db;
|
|
671
|
+
if (!dbs[provider]) dbs[provider] = {};
|
|
672
|
+
|
|
673
|
+
if (!(name in dbs[provider])) {
|
|
674
|
+
dbs[provider][name] = {
|
|
675
|
+
user,
|
|
676
|
+
password,
|
|
677
|
+
hostFolder: host + path.replaceAll('/', '-'),
|
|
678
|
+
host,
|
|
679
|
+
path,
|
|
680
|
+
};
|
|
681
|
+
}
|
|
657
682
|
}
|
|
658
683
|
}
|
|
659
684
|
}
|
|
660
|
-
}
|
|
661
|
-
|
|
662
|
-
// Handle Git operations - execute only once per repository
|
|
663
|
-
if (!processedRepos.has(repoName)) {
|
|
664
|
-
logger.info('Processing Git operations for repository', { repoName, deployId });
|
|
665
|
-
if (options.git === true) {
|
|
666
|
-
Underpost.repo.manageBackupRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
|
|
667
|
-
Underpost.repo.manageBackupRepo({ repoName, operation: 'pull' });
|
|
668
|
-
}
|
|
669
685
|
|
|
670
|
-
|
|
671
|
-
|
|
672
|
-
|
|
686
|
+
// Handle Git operations - execute only once per repository
|
|
687
|
+
if (!processedRepos.has(repoName)) {
|
|
688
|
+
logger.info('Processing Git operations for repository', { repoName, deployId });
|
|
689
|
+
if (options.git === true) {
|
|
673
690
|
Underpost.repo.manageBackupRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
|
|
674
691
|
Underpost.repo.manageBackupRepo({ repoName, operation: 'pull' });
|
|
675
692
|
}
|
|
676
693
|
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
shellExec(`cd ${repoPath} && underpost cmt . reset ${nCommits}`);
|
|
684
|
-
shellExec(`cd ${repoPath} && git reset`);
|
|
685
|
-
shellExec(`cd ${repoPath} && git checkout .`);
|
|
686
|
-
shellExec(`cd ${repoPath} && git clean -f -d`);
|
|
687
|
-
shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName} -f`);
|
|
688
|
-
} else {
|
|
689
|
-
if (!username) logger.error('GITHUB_USERNAME environment variable not set');
|
|
690
|
-
logger.warn('Repository not found for macro rollback', { repoPath });
|
|
691
|
-
}
|
|
692
|
-
}
|
|
693
|
-
|
|
694
|
-
processedRepos.add(repoName);
|
|
695
|
-
logger.info('Repository marked as processed', { repoName });
|
|
696
|
-
} else {
|
|
697
|
-
logger.info('Skipping Git operations for already processed repository', { repoName, deployId });
|
|
698
|
-
}
|
|
694
|
+
if (options.macroRollbackExport) {
|
|
695
|
+
// Only clone if not already done by git option above
|
|
696
|
+
if (options.git !== true) {
|
|
697
|
+
Underpost.repo.manageBackupRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
|
|
698
|
+
Underpost.repo.manageBackupRepo({ repoName, operation: 'pull' });
|
|
699
|
+
}
|
|
699
700
|
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
|
|
703
|
-
|
|
701
|
+
const nCommits = parseInt(options.macroRollbackExport);
|
|
702
|
+
const repoPath = `../${repoName}`;
|
|
703
|
+
const username = process.env.GITHUB_USERNAME;
|
|
704
|
+
|
|
705
|
+
if (fs.existsSync(repoPath) && username) {
|
|
706
|
+
logger.info('Executing macro rollback export', { repoName, nCommits });
|
|
707
|
+
shellExec(`cd ${repoPath} && underpost cmt . reset ${nCommits}`);
|
|
708
|
+
shellExec(`cd ${repoPath} && git reset`);
|
|
709
|
+
shellExec(`cd ${repoPath} && git checkout .`);
|
|
710
|
+
shellExec(`cd ${repoPath} && git clean -f -d`);
|
|
711
|
+
shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName} -f`);
|
|
712
|
+
} else {
|
|
713
|
+
if (!username) logger.error('GITHUB_USERNAME environment variable not set');
|
|
714
|
+
logger.warn('Repository not found for macro rollback', { repoPath });
|
|
715
|
+
}
|
|
716
|
+
}
|
|
704
717
|
|
|
705
|
-
|
|
706
|
-
|
|
718
|
+
processedRepos.add(repoName);
|
|
719
|
+
logger.info('Repository marked as processed', { repoName });
|
|
720
|
+
} else {
|
|
721
|
+
logger.info('Skipping Git operations for already processed repository', { repoName, deployId });
|
|
722
|
+
}
|
|
707
723
|
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
}
|
|
724
|
+
// Process each database provider
|
|
725
|
+
for (const provider of Object.keys(dbs)) {
|
|
726
|
+
for (const dbName of Object.keys(dbs[provider])) {
|
|
727
|
+
const { hostFolder, user, password, host, path } = dbs[provider][dbName];
|
|
713
728
|
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
(options.hosts &&
|
|
717
|
-
!options.hosts
|
|
718
|
-
.split(',')
|
|
719
|
-
.map((h) => h.trim())
|
|
720
|
-
.includes(host)) ||
|
|
721
|
-
(options.paths &&
|
|
722
|
-
!options.paths
|
|
723
|
-
.split(',')
|
|
724
|
-
.map((p) => p.trim())
|
|
725
|
-
.includes(path))
|
|
726
|
-
) {
|
|
727
|
-
logger.info('Skipping database due to host/path filter', { dbName, host, path });
|
|
728
|
-
continue;
|
|
729
|
-
}
|
|
729
|
+
// Create unique identifier for host+path combination
|
|
730
|
+
const hostPathKey = `${deployId}:${host}:${path}`;
|
|
730
731
|
|
|
731
|
-
|
|
732
|
-
|
|
733
|
-
|
|
734
|
-
|
|
732
|
+
// Skip if this host+path combination was already processed
|
|
733
|
+
if (processedHostPaths.has(hostPathKey)) {
|
|
734
|
+
logger.info('Skipping already processed host/path', { dbName, host, path, deployId });
|
|
735
|
+
continue;
|
|
736
|
+
}
|
|
735
737
|
|
|
736
|
-
|
|
738
|
+
// Filter by hosts and paths if specified
|
|
739
|
+
if (
|
|
740
|
+
(options.hosts &&
|
|
741
|
+
!options.hosts
|
|
742
|
+
.split(',')
|
|
743
|
+
.map((h) => h.trim())
|
|
744
|
+
.includes(host)) ||
|
|
745
|
+
(options.paths &&
|
|
746
|
+
!options.paths
|
|
747
|
+
.split(',')
|
|
748
|
+
.map((p) => p.trim())
|
|
749
|
+
.includes(path))
|
|
750
|
+
) {
|
|
751
|
+
logger.info('Skipping database due to host/path filter', { dbName, host, path });
|
|
752
|
+
continue;
|
|
753
|
+
}
|
|
737
754
|
|
|
738
|
-
|
|
755
|
+
if (!hostFolder) {
|
|
756
|
+
logger.warn('No hostFolder defined for database', { dbName, provider });
|
|
757
|
+
continue;
|
|
758
|
+
}
|
|
739
759
|
|
|
740
|
-
|
|
760
|
+
logger.info('Processing database', { hostFolder, provider, dbName, deployId });
|
|
741
761
|
|
|
742
|
-
|
|
743
|
-
const sqlContainerPath = `/home/${dbName}.sql`;
|
|
744
|
-
const fromPartsPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}-parths.json`;
|
|
745
|
-
const toSqlPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}.sql`;
|
|
746
|
-
const toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
|
|
747
|
-
const toBsonPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}`;
|
|
748
|
-
const toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
|
|
762
|
+
const latestBackupTimestamp = Underpost.db._getLatestBackupTimestamp(`../${repoName}/${hostFolder}`);
|
|
749
763
|
|
|
750
|
-
|
|
751
|
-
if (options.import === true && fs.existsSync(fromPartsPath) && !fs.existsSync(toSqlPath)) {
|
|
752
|
-
const names = JSON.parse(fs.readFileSync(fromPartsPath, 'utf8')).map((_path) => {
|
|
753
|
-
return `../${repoName}/${hostFolder}/${currentTimestamp}/${_path.split('/').pop()}`;
|
|
754
|
-
});
|
|
755
|
-
logger.info('Merging backup parts', { fromPartsPath, toSqlPath, parts: names.length });
|
|
756
|
-
await mergeFile(names, toSqlPath);
|
|
757
|
-
}
|
|
764
|
+
dbs[provider][dbName].currentBackupTimestamp = latestBackupTimestamp;
|
|
758
765
|
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
|
|
766
|
+
const currentTimestamp = latestBackupTimestamp || newBackupTimestamp;
|
|
767
|
+
const sqlContainerPath = `/home/${dbName}.sql`;
|
|
768
|
+
const fromPartsPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}-parths.json`;
|
|
769
|
+
const toSqlPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}.sql`;
|
|
770
|
+
const toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
|
|
771
|
+
const toBsonPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}`;
|
|
772
|
+
const toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
|
|
766
773
|
|
|
767
|
-
|
|
774
|
+
// Merge split SQL files if needed for import
|
|
775
|
+
if (options.import === true && fs.existsSync(fromPartsPath) && !fs.existsSync(toSqlPath)) {
|
|
776
|
+
const names = JSON.parse(fs.readFileSync(fromPartsPath, 'utf8')).map((_path) => {
|
|
777
|
+
return `../${repoName}/${hostFolder}/${currentTimestamp}/${_path.split('/').pop()}`;
|
|
778
|
+
});
|
|
779
|
+
logger.info('Merging backup parts', { fromPartsPath, toSqlPath, parts: names.length });
|
|
780
|
+
await mergeFile(names, toSqlPath);
|
|
781
|
+
}
|
|
768
782
|
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
const
|
|
772
|
-
|
|
773
|
-
'pods',
|
|
783
|
+
// Get target pods based on provider and options
|
|
784
|
+
let targetPods = [];
|
|
785
|
+
const podCriteria = {
|
|
786
|
+
podNames: options.podName,
|
|
774
787
|
namespace,
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
targetPods = defaultPods;
|
|
778
|
-
}
|
|
788
|
+
deployId: provider === 'mariadb' ? 'mariadb' : 'mongo',
|
|
789
|
+
};
|
|
779
790
|
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
791
|
+
targetPods = Underpost.kubectl.getFilteredPods(podCriteria);
|
|
792
|
+
|
|
793
|
+
// Fallback to default if no custom pods specified
|
|
794
|
+
if (targetPods.length === 0 && !options.podName) {
|
|
795
|
+
const defaultPods = Underpost.kubectl.get(
|
|
796
|
+
provider === 'mariadb' ? 'mariadb' : 'mongo',
|
|
797
|
+
'pods',
|
|
798
|
+
namespace,
|
|
799
|
+
);
|
|
800
|
+
console.log('defaultPods', defaultPods);
|
|
801
|
+
targetPods = defaultPods;
|
|
802
|
+
}
|
|
784
803
|
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
798
|
-
|
|
799
|
-
|
|
800
|
-
|
|
804
|
+
if (targetPods.length === 0) {
|
|
805
|
+
logger.warn('No pods found matching criteria', { provider, criteria: podCriteria });
|
|
806
|
+
continue;
|
|
807
|
+
}
|
|
808
|
+
|
|
809
|
+
// Handle primary pod detection for MongoDB
|
|
810
|
+
let podsToProcess = [];
|
|
811
|
+
if (provider === 'mongoose' && !options.allPods) {
|
|
812
|
+
// For MongoDB, always use primary pod unless allPods is true
|
|
813
|
+
if (!targetPods || targetPods.length === 0) {
|
|
814
|
+
logger.warn('No MongoDB pods available to check for primary');
|
|
815
|
+
podsToProcess = [];
|
|
816
|
+
} else {
|
|
817
|
+
const firstPod = targetPods[0].NAME;
|
|
818
|
+
const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: firstPod });
|
|
819
|
+
|
|
820
|
+
if (primaryPodName) {
|
|
821
|
+
const primaryPod = targetPods.find((p) => p.NAME === primaryPodName);
|
|
822
|
+
if (primaryPod) {
|
|
823
|
+
podsToProcess = [primaryPod];
|
|
824
|
+
logger.info('Using MongoDB primary pod', { primaryPod: primaryPodName });
|
|
825
|
+
} else {
|
|
826
|
+
logger.warn('Primary pod not in filtered list, using first pod', { primaryPodName });
|
|
827
|
+
podsToProcess = [targetPods[0]];
|
|
828
|
+
}
|
|
801
829
|
} else {
|
|
802
|
-
logger.warn('
|
|
830
|
+
logger.warn('Could not detect primary pod, using first pod');
|
|
803
831
|
podsToProcess = [targetPods[0]];
|
|
804
832
|
}
|
|
805
|
-
} else {
|
|
806
|
-
logger.warn('Could not detect primary pod, using first pod');
|
|
807
|
-
podsToProcess = [targetPods[0]];
|
|
808
833
|
}
|
|
834
|
+
} else {
|
|
835
|
+
// For MariaDB or when allPods is true, limit to first pod unless allPods is true
|
|
836
|
+
podsToProcess = options.allPods === true ? targetPods : [targetPods[0]];
|
|
809
837
|
}
|
|
810
|
-
} else {
|
|
811
|
-
// For MariaDB or when allPods is true, limit to first pod unless allPods is true
|
|
812
|
-
podsToProcess = options.allPods === true ? targetPods : [targetPods[0]];
|
|
813
|
-
}
|
|
814
838
|
|
|
815
|
-
|
|
816
|
-
|
|
817
|
-
|
|
818
|
-
|
|
839
|
+
logger.info(`Processing ${podsToProcess.length} pod(s) for ${provider}`, {
|
|
840
|
+
dbName,
|
|
841
|
+
pods: podsToProcess.map((p) => p.NAME),
|
|
842
|
+
});
|
|
819
843
|
|
|
820
|
-
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
|
|
824
|
-
|
|
825
|
-
|
|
826
|
-
|
|
827
|
-
|
|
828
|
-
|
|
829
|
-
|
|
830
|
-
|
|
831
|
-
|
|
832
|
-
|
|
833
|
-
|
|
834
|
-
|
|
835
|
-
|
|
844
|
+
// Process each pod
|
|
845
|
+
for (const pod of podsToProcess) {
|
|
846
|
+
logger.info('Processing pod', { podName: pod.NAME, node: pod.NODE, status: pod.STATUS });
|
|
847
|
+
|
|
848
|
+
switch (provider) {
|
|
849
|
+
case 'mariadb': {
|
|
850
|
+
if (options.stats === true) {
|
|
851
|
+
const stats = Underpost.db._getMariaDBStats({
|
|
852
|
+
podName: pod.NAME,
|
|
853
|
+
namespace,
|
|
854
|
+
dbName,
|
|
855
|
+
user,
|
|
856
|
+
password,
|
|
857
|
+
});
|
|
858
|
+
if (stats) {
|
|
859
|
+
Underpost.db._displayStats({ provider, dbName, stats });
|
|
860
|
+
}
|
|
836
861
|
}
|
|
837
|
-
}
|
|
838
862
|
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
863
|
+
if (options.import === true) {
|
|
864
|
+
Underpost.db._importMariaDB({
|
|
865
|
+
pod,
|
|
866
|
+
namespace,
|
|
867
|
+
dbName,
|
|
868
|
+
user,
|
|
869
|
+
password,
|
|
870
|
+
sqlPath: toSqlPath,
|
|
871
|
+
});
|
|
872
|
+
}
|
|
849
873
|
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
874
|
+
if (options.export === true) {
|
|
875
|
+
const outputPath = options.outPath || toNewSqlPath;
|
|
876
|
+
await Underpost.db._exportMariaDB({
|
|
877
|
+
pod,
|
|
878
|
+
namespace,
|
|
879
|
+
dbName,
|
|
880
|
+
user,
|
|
881
|
+
password,
|
|
882
|
+
outputPath,
|
|
883
|
+
});
|
|
884
|
+
}
|
|
885
|
+
break;
|
|
860
886
|
}
|
|
861
|
-
break;
|
|
862
|
-
}
|
|
863
887
|
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
872
|
-
|
|
888
|
+
case 'mongoose': {
|
|
889
|
+
if (options.stats === true) {
|
|
890
|
+
const stats = Underpost.db._getMongoStats({
|
|
891
|
+
podName: pod.NAME,
|
|
892
|
+
namespace,
|
|
893
|
+
dbName,
|
|
894
|
+
});
|
|
895
|
+
if (stats) {
|
|
896
|
+
Underpost.db._displayStats({ provider, dbName, stats });
|
|
897
|
+
}
|
|
873
898
|
}
|
|
874
|
-
}
|
|
875
899
|
|
|
876
|
-
|
|
877
|
-
|
|
878
|
-
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
|
|
882
|
-
|
|
883
|
-
|
|
884
|
-
|
|
885
|
-
|
|
886
|
-
|
|
900
|
+
if (options.import === true) {
|
|
901
|
+
const bsonPath = options.outPath || toBsonPath;
|
|
902
|
+
Underpost.db._importMongoDB({
|
|
903
|
+
pod,
|
|
904
|
+
namespace,
|
|
905
|
+
dbName,
|
|
906
|
+
bsonPath,
|
|
907
|
+
drop: options.drop,
|
|
908
|
+
preserveUUID: options.preserveUUID,
|
|
909
|
+
});
|
|
910
|
+
}
|
|
887
911
|
|
|
888
|
-
|
|
889
|
-
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
893
|
-
|
|
894
|
-
|
|
895
|
-
|
|
896
|
-
|
|
912
|
+
if (options.export === true) {
|
|
913
|
+
const outputPath = options.outPath || toNewBsonPath;
|
|
914
|
+
Underpost.db._exportMongoDB({
|
|
915
|
+
pod,
|
|
916
|
+
namespace,
|
|
917
|
+
dbName,
|
|
918
|
+
outputPath,
|
|
919
|
+
collections: options.collections,
|
|
920
|
+
});
|
|
921
|
+
}
|
|
922
|
+
break;
|
|
897
923
|
}
|
|
898
|
-
break;
|
|
899
|
-
}
|
|
900
924
|
|
|
901
|
-
|
|
902
|
-
|
|
903
|
-
|
|
925
|
+
default:
|
|
926
|
+
logger.warn('Unsupported database provider', { provider });
|
|
927
|
+
break;
|
|
928
|
+
}
|
|
904
929
|
}
|
|
930
|
+
|
|
931
|
+
// Mark this host+path combination as processed
|
|
932
|
+
processedHostPaths.add(hostPathKey);
|
|
905
933
|
}
|
|
934
|
+
}
|
|
906
935
|
|
|
907
|
-
|
|
908
|
-
|
|
936
|
+
// Commit and push to Git if enabled - execute only once per repository
|
|
937
|
+
if (options.export === true && options.git === true && !processedRepos.has(`${repoName}-committed`)) {
|
|
938
|
+
const commitMessage = `${new Date(newBackupTimestamp).toLocaleDateString()} ${new Date(
|
|
939
|
+
newBackupTimestamp,
|
|
940
|
+
).toLocaleTimeString()}`;
|
|
941
|
+
Underpost.repo.manageBackupRepo({ repoName, operation: 'commit', message: commitMessage });
|
|
942
|
+
Underpost.repo.manageBackupRepo({ repoName, operation: 'push' });
|
|
943
|
+
processedRepos.add(`${repoName}-committed`);
|
|
909
944
|
}
|
|
910
945
|
}
|
|
911
946
|
|
|
912
|
-
|
|
913
|
-
|
|
914
|
-
|
|
915
|
-
|
|
916
|
-
|
|
917
|
-
|
|
918
|
-
Underpost.repo.
|
|
919
|
-
|
|
947
|
+
logger.info('Database operation completed successfully');
|
|
948
|
+
} catch (error) {
|
|
949
|
+
logger.error('Database operation failed', { error: error.message });
|
|
950
|
+
throw error;
|
|
951
|
+
} finally {
|
|
952
|
+
if (ephemeral && isInsideContainer()) {
|
|
953
|
+
Underpost.repo.cleanupPrivateEngineRepo();
|
|
954
|
+
Underpost.env.clean();
|
|
920
955
|
}
|
|
921
956
|
}
|
|
922
|
-
|
|
923
|
-
logger.info('Database operation completed successfully');
|
|
924
957
|
},
|
|
925
958
|
|
|
926
959
|
/**
|
|
@@ -940,174 +973,185 @@ class UnderpostDB {
|
|
|
940
973
|
host = process.env.DEFAULT_DEPLOY_HOST,
|
|
941
974
|
path = process.env.DEFAULT_DEPLOY_PATH,
|
|
942
975
|
) {
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
976
|
+
const { ephemeral } = Underpost.repo.privateEngineRepoFactory(deployId || undefined);
|
|
977
|
+
try {
|
|
978
|
+
loadCronDeployEnv();
|
|
979
|
+
deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
|
|
980
|
+
host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
|
|
981
|
+
path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
|
|
949
982
|
|
|
950
|
-
|
|
951
|
-
const deployListPath = './engine-private/deploy/dd.router';
|
|
983
|
+
logger.info('Creating cluster metadata', { deployId, host, path });
|
|
952
984
|
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
throw new Error(`Deploy router file not found: ${deployListPath}`);
|
|
956
|
-
}
|
|
985
|
+
const env = 'production';
|
|
986
|
+
const deployListPath = './engine-private/deploy/dd.router';
|
|
957
987
|
|
|
958
|
-
|
|
988
|
+
if (!fs.existsSync(deployListPath)) {
|
|
989
|
+
logger.error('Deploy router file not found', { path: deployListPath });
|
|
990
|
+
throw new Error(`Deploy router file not found: ${deployListPath}`);
|
|
991
|
+
}
|
|
959
992
|
|
|
960
|
-
|
|
961
|
-
if (!fs.existsSync(confServerPath)) {
|
|
962
|
-
logger.error('Server configuration not found', { path: confServerPath });
|
|
963
|
-
throw new Error(`Server configuration not found: ${confServerPath}`);
|
|
964
|
-
}
|
|
993
|
+
const deployList = fs.readFileSync(deployListPath, 'utf8').split(',');
|
|
965
994
|
|
|
966
|
-
|
|
995
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
996
|
+
if (!fs.existsSync(confServerPath)) {
|
|
997
|
+
logger.error('Server configuration not found', { path: confServerPath });
|
|
998
|
+
throw new Error(`Server configuration not found: ${confServerPath}`);
|
|
999
|
+
}
|
|
967
1000
|
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
|
|
973
|
-
|
|
974
|
-
|
|
975
|
-
|
|
976
|
-
|
|
977
|
-
|
|
1001
|
+
const { db } = loadConfServerJson(confServerPath, { resolve: true })[host][path];
|
|
1002
|
+
|
|
1003
|
+
const maxRetries = 5;
|
|
1004
|
+
const retryDelay = 3000;
|
|
1005
|
+
for (let attempt = 1; attempt <= maxRetries; attempt++) {
|
|
1006
|
+
try {
|
|
1007
|
+
await DataBaseProvider.load({ apis: ['instance', 'cron'], host, path, db });
|
|
1008
|
+
break;
|
|
1009
|
+
} catch (err) {
|
|
1010
|
+
if (attempt === maxRetries) {
|
|
1011
|
+
logger.error('Failed to connect to database after retries', { attempts: maxRetries, error: err.message });
|
|
1012
|
+
throw err;
|
|
1013
|
+
}
|
|
1014
|
+
logger.warn('Database connection failed, retrying...', { attempt, maxRetries, error: err.message });
|
|
1015
|
+
await timer(retryDelay);
|
|
978
1016
|
}
|
|
979
|
-
logger.warn('Database connection failed, retrying...', { attempt, maxRetries, error: err.message });
|
|
980
|
-
await timer(retryDelay);
|
|
981
1017
|
}
|
|
982
|
-
}
|
|
983
1018
|
|
|
984
|
-
|
|
985
|
-
|
|
986
|
-
|
|
1019
|
+
try {
|
|
1020
|
+
/** @type {import('../api/instance/instance.model.js').InstanceModel} */
|
|
1021
|
+
const Instance = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Instance;
|
|
987
1022
|
|
|
988
|
-
|
|
989
|
-
|
|
1023
|
+
await Instance.deleteMany();
|
|
1024
|
+
logger.info('Cleared existing instance metadata');
|
|
990
1025
|
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
1026
|
+
for (const _deployId of deployList) {
|
|
1027
|
+
const deployId = _deployId.trim();
|
|
1028
|
+
if (!deployId) continue;
|
|
994
1029
|
|
|
995
|
-
|
|
1030
|
+
logger.info('Processing deployment for metadata', { deployId });
|
|
996
1031
|
|
|
997
|
-
|
|
998
|
-
|
|
999
|
-
|
|
1000
|
-
|
|
1001
|
-
|
|
1032
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
1033
|
+
if (!fs.existsSync(confServerPath)) {
|
|
1034
|
+
logger.warn('Configuration not found for deployment', { deployId, path: confServerPath });
|
|
1035
|
+
continue;
|
|
1036
|
+
}
|
|
1002
1037
|
|
|
1003
|
-
|
|
1004
|
-
|
|
1005
|
-
|
|
1038
|
+
const confServer = loadReplicas(deployId, loadConfServerJson(confServerPath, { resolve: true }));
|
|
1039
|
+
const router = await Underpost.deploy.routerFactory(deployId, env);
|
|
1040
|
+
const pathPortAssignmentData = await pathPortAssignmentFactory(deployId, router, confServer);
|
|
1006
1041
|
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
|
|
1042
|
+
for (const host of Object.keys(confServer)) {
|
|
1043
|
+
for (const { path, port } of pathPortAssignmentData[host]) {
|
|
1044
|
+
if (!confServer[host][path]) continue;
|
|
1010
1045
|
|
|
1011
|
-
|
|
1046
|
+
const { client, runtime, apis, peer } = confServer[host][path];
|
|
1012
1047
|
|
|
1013
|
-
|
|
1014
|
-
|
|
1015
|
-
|
|
1016
|
-
|
|
1017
|
-
|
|
1018
|
-
|
|
1019
|
-
|
|
1020
|
-
|
|
1021
|
-
|
|
1022
|
-
|
|
1023
|
-
|
|
1048
|
+
// Save main instance
|
|
1049
|
+
{
|
|
1050
|
+
const body = {
|
|
1051
|
+
deployId,
|
|
1052
|
+
host,
|
|
1053
|
+
path,
|
|
1054
|
+
port,
|
|
1055
|
+
client,
|
|
1056
|
+
runtime,
|
|
1057
|
+
apis,
|
|
1058
|
+
};
|
|
1059
|
+
|
|
1060
|
+
logger.info('Saving instance metadata', body);
|
|
1061
|
+
await new Instance(body).save();
|
|
1062
|
+
}
|
|
1024
1063
|
|
|
1025
|
-
|
|
1026
|
-
|
|
1064
|
+
// Save peer instance if exists
|
|
1065
|
+
if (peer) {
|
|
1066
|
+
const body = {
|
|
1067
|
+
deployId,
|
|
1068
|
+
host,
|
|
1069
|
+
path: path === '/' ? '/peer' : `${path}/peer`,
|
|
1070
|
+
port: port + 1,
|
|
1071
|
+
runtime: 'nodejs',
|
|
1072
|
+
};
|
|
1073
|
+
|
|
1074
|
+
logger.info('Saving peer instance metadata', body);
|
|
1075
|
+
await new Instance(body).save();
|
|
1076
|
+
}
|
|
1027
1077
|
}
|
|
1078
|
+
}
|
|
1028
1079
|
|
|
1029
|
-
|
|
1030
|
-
|
|
1080
|
+
// Process additional instances
|
|
1081
|
+
const confInstancesPath = `./engine-private/conf/${deployId}/conf.instances.json`;
|
|
1082
|
+
if (fs.existsSync(confInstancesPath)) {
|
|
1083
|
+
const confInstances = JSON.parse(fs.readFileSync(confInstancesPath, 'utf8'));
|
|
1084
|
+
for (const instance of confInstances) {
|
|
1085
|
+
const { id, host, path, fromPort, metadata } = instance;
|
|
1086
|
+
const { runtime } = metadata;
|
|
1031
1087
|
const body = {
|
|
1032
1088
|
deployId,
|
|
1033
1089
|
host,
|
|
1034
|
-
path
|
|
1035
|
-
port:
|
|
1036
|
-
|
|
1090
|
+
path,
|
|
1091
|
+
port: fromPort,
|
|
1092
|
+
client: id,
|
|
1093
|
+
runtime,
|
|
1037
1094
|
};
|
|
1038
|
-
|
|
1039
|
-
logger.info('Saving peer instance metadata', body);
|
|
1095
|
+
logger.info('Saving additional instance metadata', body);
|
|
1040
1096
|
await new Instance(body).save();
|
|
1041
1097
|
}
|
|
1042
1098
|
}
|
|
1043
1099
|
}
|
|
1044
|
-
|
|
1045
|
-
|
|
1046
|
-
|
|
1047
|
-
if (fs.existsSync(confInstancesPath)) {
|
|
1048
|
-
const confInstances = JSON.parse(fs.readFileSync(confInstancesPath, 'utf8'));
|
|
1049
|
-
for (const instance of confInstances) {
|
|
1050
|
-
const { id, host, path, fromPort, metadata } = instance;
|
|
1051
|
-
const { runtime } = metadata;
|
|
1052
|
-
const body = {
|
|
1053
|
-
deployId,
|
|
1054
|
-
host,
|
|
1055
|
-
path,
|
|
1056
|
-
port: fromPort,
|
|
1057
|
-
client: id,
|
|
1058
|
-
runtime,
|
|
1059
|
-
};
|
|
1060
|
-
logger.info('Saving additional instance metadata', body);
|
|
1061
|
-
await new Instance(body).save();
|
|
1062
|
-
}
|
|
1063
|
-
}
|
|
1100
|
+
} catch (error) {
|
|
1101
|
+
logger.error('Failed to create instance metadata', { error: error.message });
|
|
1102
|
+
throw error;
|
|
1064
1103
|
}
|
|
1065
|
-
} catch (error) {
|
|
1066
|
-
logger.error('Failed to create instance metadata', { error: error.message });
|
|
1067
|
-
throw error;
|
|
1068
|
-
}
|
|
1069
1104
|
|
|
1070
|
-
|
|
1071
|
-
|
|
1072
|
-
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1105
|
+
try {
|
|
1106
|
+
const cronDeployPath = './engine-private/deploy/dd.cron';
|
|
1107
|
+
if (!fs.existsSync(cronDeployPath)) {
|
|
1108
|
+
logger.warn('Cron deploy file not found', { path: cronDeployPath });
|
|
1109
|
+
return;
|
|
1110
|
+
}
|
|
1076
1111
|
|
|
1077
|
-
|
|
1078
|
-
|
|
1112
|
+
const cronDeployId = fs.readFileSync(cronDeployPath, 'utf8').trim();
|
|
1113
|
+
const confCronPath = `./engine-private/conf/${cronDeployId}/conf.cron.json`;
|
|
1079
1114
|
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
|
|
1115
|
+
if (!fs.existsSync(confCronPath)) {
|
|
1116
|
+
logger.warn('Cron configuration not found', { path: confCronPath });
|
|
1117
|
+
return;
|
|
1118
|
+
}
|
|
1084
1119
|
|
|
1085
|
-
|
|
1120
|
+
const confCron = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
|
|
1086
1121
|
|
|
1087
|
-
|
|
1122
|
+
await DataBaseProvider.load({ apis: ['cron'], host, path, db });
|
|
1088
1123
|
|
|
1089
|
-
|
|
1090
|
-
|
|
1124
|
+
/** @type {import('../api/cron/cron.model.js').CronModel} */
|
|
1125
|
+
const Cron = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Cron;
|
|
1091
1126
|
|
|
1092
|
-
|
|
1093
|
-
|
|
1127
|
+
await Cron.deleteMany();
|
|
1128
|
+
logger.info('Cleared existing cron metadata');
|
|
1094
1129
|
|
|
1095
|
-
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
|
|
1102
|
-
|
|
1103
|
-
|
|
1130
|
+
for (const jobId of Object.keys(confCron.jobs)) {
|
|
1131
|
+
const body = {
|
|
1132
|
+
jobId,
|
|
1133
|
+
deployId: Underpost.cron.getRelatedDeployIdList(jobId),
|
|
1134
|
+
expression: confCron.jobs[jobId].expression,
|
|
1135
|
+
enabled: confCron.jobs[jobId].enabled,
|
|
1136
|
+
};
|
|
1137
|
+
logger.info('Saving cron metadata', body);
|
|
1138
|
+
await new Cron(body).save();
|
|
1139
|
+
}
|
|
1140
|
+
} catch (error) {
|
|
1141
|
+
logger.error('Failed to create cron metadata', { error: error.message });
|
|
1104
1142
|
}
|
|
1143
|
+
|
|
1144
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
1145
|
+
logger.info('Cluster metadata creation completed');
|
|
1105
1146
|
} catch (error) {
|
|
1106
|
-
logger.error('
|
|
1147
|
+
logger.error('Cluster metadata creation failed', { error: error.message });
|
|
1148
|
+
throw error;
|
|
1149
|
+
} finally {
|
|
1150
|
+
if (ephemeral && isInsideContainer()) {
|
|
1151
|
+
Underpost.repo.cleanupPrivateEngineRepo();
|
|
1152
|
+
Underpost.env.clean();
|
|
1153
|
+
}
|
|
1107
1154
|
}
|
|
1108
|
-
|
|
1109
|
-
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
1110
|
-
logger.info('Cluster metadata creation completed');
|
|
1111
1155
|
},
|
|
1112
1156
|
|
|
1113
1157
|
/**
|
|
@@ -1131,211 +1175,223 @@ class UnderpostDB {
|
|
|
1131
1175
|
dryRun: false,
|
|
1132
1176
|
},
|
|
1133
1177
|
) {
|
|
1134
|
-
|
|
1135
|
-
|
|
1136
|
-
|
|
1137
|
-
|
|
1178
|
+
const firstDeployId = deployList !== 'dd' ? deployList.split(',')[0].trim() : '';
|
|
1179
|
+
const { ephemeral } = Underpost.repo.privateEngineRepoFactory(firstDeployId || undefined);
|
|
1180
|
+
try {
|
|
1181
|
+
loadCronDeployEnv();
|
|
1182
|
+
if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
|
|
1138
1183
|
|
|
1139
|
-
|
|
1140
|
-
const fileRefPath = './src/api/file/file.ref.json';
|
|
1141
|
-
if (!fs.existsSync(fileRefPath)) {
|
|
1142
|
-
logger.error('file.ref.json not found', { path: fileRefPath });
|
|
1143
|
-
return;
|
|
1144
|
-
}
|
|
1184
|
+
logger.info('Starting File collection cleanup', { deployList, options });
|
|
1145
1185
|
|
|
1146
|
-
|
|
1147
|
-
|
|
1186
|
+
// Load file.ref.json to know which models reference File
|
|
1187
|
+
const fileRefPath = './src/api/file/file.ref.json';
|
|
1188
|
+
if (!fs.existsSync(fileRefPath)) {
|
|
1189
|
+
logger.error('file.ref.json not found', { path: fileRefPath });
|
|
1190
|
+
return;
|
|
1191
|
+
}
|
|
1148
1192
|
|
|
1149
|
-
|
|
1150
|
-
|
|
1151
|
-
const filterPaths = options.paths ? options.paths.split(',').map((p) => p.trim()) : [];
|
|
1193
|
+
const fileRefData = JSON.parse(fs.readFileSync(fileRefPath, 'utf8'));
|
|
1194
|
+
logger.info('Loaded file reference configuration', { apis: fileRefData.length });
|
|
1152
1195
|
|
|
1153
|
-
|
|
1154
|
-
|
|
1196
|
+
// Filter hosts and paths if specified
|
|
1197
|
+
const filterHosts = options.hosts ? options.hosts.split(',').map((h) => h.trim()) : [];
|
|
1198
|
+
const filterPaths = options.paths ? options.paths.split(',').map((p) => p.trim()) : [];
|
|
1155
1199
|
|
|
1156
|
-
|
|
1157
|
-
const
|
|
1158
|
-
if (!deployId) continue;
|
|
1200
|
+
// Track all connections to close them at the end
|
|
1201
|
+
const connectionsToClose = [];
|
|
1159
1202
|
|
|
1160
|
-
|
|
1203
|
+
for (const _deployId of deployList.split(',')) {
|
|
1204
|
+
const deployId = _deployId.trim();
|
|
1205
|
+
if (!deployId) continue;
|
|
1161
1206
|
|
|
1162
|
-
|
|
1163
|
-
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
1164
|
-
if (!fs.existsSync(confServerPath)) {
|
|
1165
|
-
logger.error('Configuration file not found', { path: confServerPath });
|
|
1166
|
-
continue;
|
|
1167
|
-
}
|
|
1207
|
+
logger.info('Processing deployment for File cleanup', { deployId });
|
|
1168
1208
|
|
|
1169
|
-
|
|
1209
|
+
// Load server configuration
|
|
1210
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
1211
|
+
if (!fs.existsSync(confServerPath)) {
|
|
1212
|
+
logger.error('Configuration file not found', { path: confServerPath });
|
|
1213
|
+
continue;
|
|
1214
|
+
}
|
|
1170
1215
|
|
|
1171
|
-
|
|
1172
|
-
for (const host of Object.keys(confServer)) {
|
|
1173
|
-
if (filterHosts.length > 0 && !filterHosts.includes(host)) continue;
|
|
1216
|
+
const confServer = loadConfServerJson(confServerPath, { resolve: true });
|
|
1174
1217
|
|
|
1175
|
-
|
|
1176
|
-
|
|
1218
|
+
// Process each host+path combination
|
|
1219
|
+
for (const host of Object.keys(confServer)) {
|
|
1220
|
+
if (filterHosts.length > 0 && !filterHosts.includes(host)) continue;
|
|
1177
1221
|
|
|
1178
|
-
const
|
|
1179
|
-
|
|
1222
|
+
for (const path of Object.keys(confServer[host])) {
|
|
1223
|
+
if (filterPaths.length > 0 && !filterPaths.includes(path)) continue;
|
|
1180
1224
|
|
|
1181
|
-
|
|
1182
|
-
|
|
1183
|
-
logger.info('Skipping - no file api in configuration', { host, path });
|
|
1184
|
-
continue;
|
|
1185
|
-
}
|
|
1225
|
+
const { db, apis } = confServer[host][path];
|
|
1226
|
+
if (!db || !apis) continue;
|
|
1186
1227
|
|
|
1187
|
-
|
|
1188
|
-
|
|
1189
|
-
|
|
1190
|
-
// Connect to database with retry
|
|
1191
|
-
let dbProvider;
|
|
1192
|
-
for (let attempt = 1; attempt <= 3; attempt++) {
|
|
1193
|
-
try {
|
|
1194
|
-
dbProvider = await DataBaseProvider.load({ apis, host, path, db });
|
|
1195
|
-
break;
|
|
1196
|
-
} catch (err) {
|
|
1197
|
-
if (attempt === 3) throw err;
|
|
1198
|
-
logger.warn('Database connection failed, retrying...', { attempt, host, path, error: err.message });
|
|
1199
|
-
await timer(3000);
|
|
1200
|
-
}
|
|
1201
|
-
}
|
|
1202
|
-
if (!dbProvider || !dbProvider.models) {
|
|
1203
|
-
logger.error('Failed to load database provider', { host, path });
|
|
1228
|
+
// Check if 'file' api is in the apis list
|
|
1229
|
+
if (!apis.includes('file')) {
|
|
1230
|
+
logger.info('Skipping - no file api in configuration', { host, path });
|
|
1204
1231
|
continue;
|
|
1205
1232
|
}
|
|
1206
1233
|
|
|
1207
|
-
|
|
1234
|
+
// logger.info('Processing host+path with file api', { host, path, db: db.name });
|
|
1235
|
+
|
|
1236
|
+
try {
|
|
1237
|
+
// Connect to database with retry
|
|
1238
|
+
let dbProvider;
|
|
1239
|
+
for (let attempt = 1; attempt <= 3; attempt++) {
|
|
1240
|
+
try {
|
|
1241
|
+
dbProvider = await DataBaseProvider.load({ apis, host, path, db });
|
|
1242
|
+
break;
|
|
1243
|
+
} catch (err) {
|
|
1244
|
+
if (attempt === 3) throw err;
|
|
1245
|
+
logger.warn('Database connection failed, retrying...', { attempt, host, path, error: err.message });
|
|
1246
|
+
await timer(3000);
|
|
1247
|
+
}
|
|
1248
|
+
}
|
|
1249
|
+
if (!dbProvider || !dbProvider.models) {
|
|
1250
|
+
logger.error('Failed to load database provider', { host, path });
|
|
1251
|
+
continue;
|
|
1252
|
+
}
|
|
1208
1253
|
|
|
1209
|
-
|
|
1210
|
-
connectionsToClose.push({ host, path, dbProvider });
|
|
1254
|
+
const { models } = dbProvider;
|
|
1211
1255
|
|
|
1212
|
-
|
|
1213
|
-
|
|
1214
|
-
logger.warn('File model not loaded', { host, path });
|
|
1215
|
-
continue;
|
|
1216
|
-
}
|
|
1256
|
+
// Track this connection for cleanup
|
|
1257
|
+
connectionsToClose.push({ host, path, dbProvider });
|
|
1217
1258
|
|
|
1218
|
-
|
|
1219
|
-
|
|
1220
|
-
|
|
1259
|
+
// Check if File model exists
|
|
1260
|
+
if (!models.File) {
|
|
1261
|
+
logger.warn('File model not loaded', { host, path });
|
|
1262
|
+
continue;
|
|
1263
|
+
}
|
|
1221
1264
|
|
|
1222
|
-
|
|
1265
|
+
// Get all File documents
|
|
1266
|
+
const allFiles = await models.File.find({}, '_id').lean();
|
|
1267
|
+
logger.info('Found File documents', { count: allFiles.length, host, path });
|
|
1223
1268
|
|
|
1224
|
-
|
|
1225
|
-
const referencedFileIds = new Set();
|
|
1269
|
+
if (allFiles.length === 0) continue;
|
|
1226
1270
|
|
|
1227
|
-
|
|
1228
|
-
|
|
1229
|
-
const { api, model: modelFields } = refConfig;
|
|
1271
|
+
// Track which File IDs are referenced
|
|
1272
|
+
const referencedFileIds = new Set();
|
|
1230
1273
|
|
|
1231
|
-
// Check
|
|
1232
|
-
const
|
|
1233
|
-
|
|
1234
|
-
.map((w) => w.charAt(0).toUpperCase() + w.slice(1))
|
|
1235
|
-
.join('');
|
|
1236
|
-
const Model = models[modelName];
|
|
1274
|
+
// Check each API from file.ref.json
|
|
1275
|
+
for (const refConfig of fileRefData) {
|
|
1276
|
+
const { api, model: modelFields } = refConfig;
|
|
1237
1277
|
|
|
1238
|
-
|
|
1239
|
-
|
|
1240
|
-
|
|
1241
|
-
|
|
1278
|
+
// Check if this API is loaded in current context
|
|
1279
|
+
const modelName = api
|
|
1280
|
+
.split('-')
|
|
1281
|
+
.map((w) => w.charAt(0).toUpperCase() + w.slice(1))
|
|
1282
|
+
.join('');
|
|
1283
|
+
const Model = models[modelName];
|
|
1284
|
+
|
|
1285
|
+
if (!Model) {
|
|
1286
|
+
logger.debug('Model not loaded in current context', { api, modelName, host, path });
|
|
1287
|
+
continue;
|
|
1288
|
+
}
|
|
1242
1289
|
|
|
1243
|
-
|
|
1290
|
+
logger.info('Checking references in model', { api, modelName });
|
|
1244
1291
|
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
|
|
1248
|
-
|
|
1292
|
+
// Helper function to recursively check field references
|
|
1293
|
+
const checkFieldReferences = async (fieldPath, fieldConfig) => {
|
|
1294
|
+
for (const [fieldName, fieldValue] of Object.entries(fieldConfig)) {
|
|
1295
|
+
const currentPath = fieldPath ? `${fieldPath}.${fieldName}` : fieldName;
|
|
1249
1296
|
|
|
1250
|
-
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1297
|
+
if (fieldValue === true) {
|
|
1298
|
+
// This is a File reference field
|
|
1299
|
+
const query = {};
|
|
1300
|
+
query[currentPath] = { $exists: true, $ne: null };
|
|
1254
1301
|
|
|
1255
|
-
|
|
1302
|
+
const docs = await Model.find(query, currentPath).lean();
|
|
1256
1303
|
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1263
|
-
|
|
1304
|
+
for (const doc of docs) {
|
|
1305
|
+
// Navigate to the nested field
|
|
1306
|
+
const parts = currentPath.split('.');
|
|
1307
|
+
let value = doc;
|
|
1308
|
+
for (const part of parts) {
|
|
1309
|
+
value = value?.[part];
|
|
1310
|
+
}
|
|
1264
1311
|
|
|
1265
|
-
|
|
1266
|
-
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
|
|
1312
|
+
if (value) {
|
|
1313
|
+
if (Array.isArray(value)) {
|
|
1314
|
+
value.forEach((id) => id && referencedFileIds.add(id.toString()));
|
|
1315
|
+
} else {
|
|
1316
|
+
referencedFileIds.add(value.toString());
|
|
1317
|
+
}
|
|
1270
1318
|
}
|
|
1271
1319
|
}
|
|
1272
|
-
}
|
|
1273
1320
|
|
|
1274
|
-
|
|
1275
|
-
|
|
1276
|
-
|
|
1277
|
-
|
|
1278
|
-
|
|
1279
|
-
|
|
1280
|
-
|
|
1281
|
-
|
|
1321
|
+
logger.info('Found references', {
|
|
1322
|
+
model: modelName,
|
|
1323
|
+
field: currentPath,
|
|
1324
|
+
count: docs.length,
|
|
1325
|
+
});
|
|
1326
|
+
} else if (typeof fieldValue === 'object') {
|
|
1327
|
+
// Nested object, recurse
|
|
1328
|
+
await checkFieldReferences(currentPath, fieldValue);
|
|
1329
|
+
}
|
|
1282
1330
|
}
|
|
1283
|
-
}
|
|
1284
|
-
};
|
|
1285
|
-
|
|
1286
|
-
await checkFieldReferences('', modelFields);
|
|
1287
|
-
}
|
|
1331
|
+
};
|
|
1288
1332
|
|
|
1289
|
-
|
|
1333
|
+
await checkFieldReferences('', modelFields);
|
|
1334
|
+
}
|
|
1290
1335
|
|
|
1291
|
-
|
|
1292
|
-
const orphanedFiles = allFiles.filter((file) => !referencedFileIds.has(file._id.toString()));
|
|
1336
|
+
logger.info('Total referenced File IDs', { count: referencedFileIds.size, host, path });
|
|
1293
1337
|
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
} else {
|
|
1297
|
-
logger.info('Found orphaned files', { count: orphanedFiles.length, host, path });
|
|
1338
|
+
// Find orphaned files
|
|
1339
|
+
const orphanedFiles = allFiles.filter((file) => !referencedFileIds.has(file._id.toString()));
|
|
1298
1340
|
|
|
1299
|
-
if (
|
|
1300
|
-
logger.info('
|
|
1301
|
-
count: orphanedFiles.length,
|
|
1302
|
-
ids: orphanedFiles.map((f) => f._id.toString()),
|
|
1303
|
-
});
|
|
1341
|
+
if (orphanedFiles.length === 0) {
|
|
1342
|
+
logger.info('No orphaned files found', { host, path });
|
|
1304
1343
|
} else {
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1344
|
+
logger.info('Found orphaned files', { count: orphanedFiles.length, host, path });
|
|
1345
|
+
|
|
1346
|
+
if (options.dryRun) {
|
|
1347
|
+
logger.info('Dry run - would delete files', {
|
|
1348
|
+
count: orphanedFiles.length,
|
|
1349
|
+
ids: orphanedFiles.map((f) => f._id.toString()),
|
|
1350
|
+
});
|
|
1351
|
+
} else {
|
|
1352
|
+
const orphanedIds = orphanedFiles.map((f) => f._id);
|
|
1353
|
+
const deleteResult = await models.File.deleteMany({ _id: { $in: orphanedIds } });
|
|
1354
|
+
logger.info('Deleted orphaned files', {
|
|
1355
|
+
deletedCount: deleteResult.deletedCount,
|
|
1356
|
+
host,
|
|
1357
|
+
path,
|
|
1358
|
+
});
|
|
1359
|
+
}
|
|
1312
1360
|
}
|
|
1361
|
+
} catch (error) {
|
|
1362
|
+
logger.error('Error processing host+path', {
|
|
1363
|
+
host,
|
|
1364
|
+
path,
|
|
1365
|
+
error: error.message,
|
|
1366
|
+
});
|
|
1313
1367
|
}
|
|
1314
|
-
} catch (error) {
|
|
1315
|
-
logger.error('Error processing host+path', {
|
|
1316
|
-
host,
|
|
1317
|
-
path,
|
|
1318
|
-
error: error.message,
|
|
1319
|
-
});
|
|
1320
1368
|
}
|
|
1321
1369
|
}
|
|
1322
1370
|
}
|
|
1323
|
-
}
|
|
1324
1371
|
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1372
|
+
// Close all connections
|
|
1373
|
+
logger.info('Closing all database connections', { count: connectionsToClose.length });
|
|
1374
|
+
for (const { host, path, dbProvider } of connectionsToClose) {
|
|
1375
|
+
try {
|
|
1376
|
+
if (dbProvider && dbProvider.close) {
|
|
1377
|
+
await dbProvider.close();
|
|
1378
|
+
logger.info('Connection closed', { host, path });
|
|
1379
|
+
}
|
|
1380
|
+
} catch (error) {
|
|
1381
|
+
logger.error('Error closing connection', { host, path, error: error.message });
|
|
1332
1382
|
}
|
|
1333
|
-
} catch (error) {
|
|
1334
|
-
logger.error('Error closing connection', { host, path, error: error.message });
|
|
1335
1383
|
}
|
|
1336
|
-
}
|
|
1337
1384
|
|
|
1338
|
-
|
|
1385
|
+
logger.info('File collection cleanup completed');
|
|
1386
|
+
} catch (error) {
|
|
1387
|
+
logger.error('File collection cleanup failed', { error: error.message });
|
|
1388
|
+
throw error;
|
|
1389
|
+
} finally {
|
|
1390
|
+
if (ephemeral && isInsideContainer()) {
|
|
1391
|
+
Underpost.repo.cleanupPrivateEngineRepo();
|
|
1392
|
+
Underpost.env.clean();
|
|
1393
|
+
}
|
|
1394
|
+
}
|
|
1339
1395
|
},
|
|
1340
1396
|
|
|
1341
1397
|
/**
|
|
@@ -1369,68 +1425,79 @@ class UnderpostDB {
|
|
|
1369
1425
|
crons: false,
|
|
1370
1426
|
},
|
|
1371
1427
|
) {
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
logger.info('Generating cluster metadata');
|
|
1386
|
-
await Underpost.db.clusterMetadataFactory(deployId, host, path);
|
|
1387
|
-
}
|
|
1428
|
+
const { ephemeral } = Underpost.repo.privateEngineRepoFactory(deployId || undefined);
|
|
1429
|
+
try {
|
|
1430
|
+
loadCronDeployEnv();
|
|
1431
|
+
deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
|
|
1432
|
+
host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
|
|
1433
|
+
path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
|
|
1434
|
+
|
|
1435
|
+
logger.info('Starting cluster metadata backup operation', {
|
|
1436
|
+
deployId,
|
|
1437
|
+
host,
|
|
1438
|
+
path,
|
|
1439
|
+
options,
|
|
1440
|
+
});
|
|
1388
1441
|
|
|
1389
|
-
|
|
1390
|
-
|
|
1391
|
-
|
|
1392
|
-
fs.mkdirSync(outputPath, { recursive: true });
|
|
1442
|
+
if (options.generate === true) {
|
|
1443
|
+
logger.info('Generating cluster metadata');
|
|
1444
|
+
await Underpost.db.clusterMetadataFactory(deployId, host, path);
|
|
1393
1445
|
}
|
|
1394
|
-
const collection = 'instances';
|
|
1395
1446
|
|
|
1396
|
-
if (options.
|
|
1397
|
-
|
|
1398
|
-
|
|
1399
|
-
|
|
1400
|
-
|
|
1401
|
-
|
|
1447
|
+
if (options.instances === true) {
|
|
1448
|
+
const outputPath = './engine-private/instances';
|
|
1449
|
+
if (!fs.existsSync(outputPath)) {
|
|
1450
|
+
fs.mkdirSync(outputPath, { recursive: true });
|
|
1451
|
+
}
|
|
1452
|
+
const collection = 'instances';
|
|
1402
1453
|
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
}
|
|
1454
|
+
if (options.export === true) {
|
|
1455
|
+
logger.info('Exporting instances collection', { outputPath });
|
|
1456
|
+
shellExec(
|
|
1457
|
+
`node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
1458
|
+
);
|
|
1459
|
+
}
|
|
1410
1460
|
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
|
|
1414
|
-
|
|
1461
|
+
if (options.import === true) {
|
|
1462
|
+
logger.info('Importing instances collection', { outputPath });
|
|
1463
|
+
shellExec(
|
|
1464
|
+
`node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
1465
|
+
);
|
|
1466
|
+
}
|
|
1415
1467
|
}
|
|
1416
|
-
const collection = 'crons';
|
|
1417
1468
|
|
|
1418
|
-
if (options.
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1469
|
+
if (options.crons === true) {
|
|
1470
|
+
const outputPath = './engine-private/crons';
|
|
1471
|
+
if (!fs.existsSync(outputPath)) {
|
|
1472
|
+
fs.mkdirSync(outputPath, { recursive: true });
|
|
1473
|
+
}
|
|
1474
|
+
const collection = 'crons';
|
|
1475
|
+
|
|
1476
|
+
if (options.export === true) {
|
|
1477
|
+
logger.info('Exporting crons collection', { outputPath });
|
|
1478
|
+
shellExec(
|
|
1479
|
+
`node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
1480
|
+
);
|
|
1481
|
+
}
|
|
1482
|
+
|
|
1483
|
+
if (options.import === true) {
|
|
1484
|
+
logger.info('Importing crons collection', { outputPath });
|
|
1485
|
+
shellExec(
|
|
1486
|
+
`node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
1487
|
+
);
|
|
1488
|
+
}
|
|
1423
1489
|
}
|
|
1424
1490
|
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1491
|
+
logger.info('Cluster metadata backup operation completed');
|
|
1492
|
+
} catch (error) {
|
|
1493
|
+
logger.error('Cluster metadata backup operation failed', { error: error.message });
|
|
1494
|
+
throw error;
|
|
1495
|
+
} finally {
|
|
1496
|
+
if (ephemeral && isInsideContainer()) {
|
|
1497
|
+
Underpost.repo.cleanupPrivateEngineRepo();
|
|
1498
|
+
Underpost.env.clean();
|
|
1430
1499
|
}
|
|
1431
1500
|
}
|
|
1432
|
-
|
|
1433
|
-
logger.info('Cluster metadata backup operation completed');
|
|
1434
1501
|
},
|
|
1435
1502
|
};
|
|
1436
1503
|
}
|