underpost 2.90.4 → 2.92.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/workflows/release.cd.yml +7 -7
- package/README.md +5 -5
- package/bin/deploy.js +0 -127
- package/cli.md +93 -26
- package/manifests/deployment/dd-default-development/deployment.yaml +2 -2
- package/manifests/deployment/dd-test-development/deployment.yaml +2 -2
- package/package.json +1 -1
- package/scripts/rocky-setup.sh +1 -0
- package/src/cli/db.js +1148 -197
- package/src/cli/deploy.js +17 -12
- package/src/cli/env.js +2 -2
- package/src/cli/index.js +100 -11
- package/src/cli/repository.js +127 -3
- package/src/cli/run.js +40 -11
- package/src/cli/ssh.js +424 -13
- package/src/client/components/core/CommonJs.js +0 -1
- package/src/db/mongo/MongooseDB.js +5 -1
- package/src/index.js +1 -1
- package/src/server/dns.js +154 -0
- package/src/server/start.js +2 -0
package/src/cli/db.js
CHANGED
|
@@ -1,13 +1,16 @@
|
|
|
1
1
|
/**
|
|
2
|
-
* UnderpostDB CLI
|
|
2
|
+
* UnderpostDB CLI module
|
|
3
3
|
* @module src/cli/db.js
|
|
4
4
|
* @namespace UnderpostDB
|
|
5
|
+
* @description Manages database operations, backups, and cluster metadata for Kubernetes deployments.
|
|
6
|
+
* Supports MariaDB and MongoDB with import/export capabilities, Git integration, and multi-pod operations.
|
|
5
7
|
*/
|
|
6
8
|
|
|
7
9
|
import { mergeFile, splitFileFactory } from '../server/conf.js';
|
|
8
10
|
import { loggerFactory } from '../server/logger.js';
|
|
9
11
|
import { shellExec } from '../server/process.js';
|
|
10
12
|
import fs from 'fs-extra';
|
|
13
|
+
import os from 'os';
|
|
11
14
|
import UnderpostDeploy from './deploy.js';
|
|
12
15
|
import UnderpostCron from './cron.js';
|
|
13
16
|
import { DataBaseProvider } from '../db/DataBaseProvider.js';
|
|
@@ -16,42 +19,805 @@ import { loadReplicas, pathPortAssignmentFactory } from '../server/conf.js';
|
|
|
16
19
|
const logger = loggerFactory(import.meta);
|
|
17
20
|
|
|
18
21
|
/**
|
|
19
|
-
*
|
|
20
|
-
* @
|
|
21
|
-
*
|
|
22
|
-
|
|
23
|
-
|
|
22
|
+
* Constants for database operations
|
|
23
|
+
* @constant {number} MAX_BACKUP_RETENTION - Maximum number of backups to retain
|
|
24
|
+
* @memberof UnderpostDB
|
|
25
|
+
*/
|
|
26
|
+
const MAX_BACKUP_RETENTION = 5;
|
|
27
|
+
|
|
28
|
+
/**
|
|
29
|
+
* Timeout for kubectl operations in milliseconds
|
|
30
|
+
* @constant {number} KUBECTL_TIMEOUT
|
|
31
|
+
* @memberof UnderpostDB
|
|
32
|
+
*/
|
|
33
|
+
const KUBECTL_TIMEOUT = 300000; // 5 minutes
|
|
34
|
+
|
|
35
|
+
/**
|
|
36
|
+
* @typedef {Object} DatabaseOptions
|
|
37
|
+
* @memberof UnderpostDB
|
|
38
|
+
* @property {boolean} [import=false] - Flag to import data from a backup
|
|
39
|
+
* @property {boolean} [export=false] - Flag to export data to a backup
|
|
40
|
+
* @property {string} [podName=''] - Comma-separated list of pod names or patterns
|
|
41
|
+
* @property {string} [nodeName=''] - Comma-separated list of node names for pod filtering
|
|
42
|
+
* @property {string} [ns='default'] - Kubernetes namespace
|
|
43
|
+
* @property {string} [collections=''] - Comma-separated list of collections to include
|
|
44
|
+
* @property {string} [outPath=''] - Output path for backup files
|
|
45
|
+
* @property {boolean} [drop=false] - Flag to drop the database before importing
|
|
46
|
+
* @property {boolean} [preserveUUID=false] - Flag to preserve UUIDs during import
|
|
47
|
+
* @property {boolean} [git=false] - Flag to enable Git integration
|
|
48
|
+
* @property {string} [hosts=''] - Comma-separated list of hosts to include
|
|
49
|
+
* @property {string} [paths=''] - Comma-separated list of paths to include
|
|
50
|
+
* @property {string} [labelSelector=''] - Kubernetes label selector for pods
|
|
51
|
+
* @property {boolean} [allPods=false] - Flag to target all matching pods
|
|
52
|
+
* @property {boolean} [dryRun=false] - Flag to simulate operations without executing
|
|
53
|
+
* @property {boolean} [primaryPod=false] - Flag to automatically detect and use MongoDB primary pod
|
|
54
|
+
* @property {boolean} [stats=false] - Flag to display collection/table statistics
|
|
55
|
+
*/
|
|
56
|
+
|
|
57
|
+
/**
|
|
58
|
+
* @typedef {Object} PodInfo
|
|
59
|
+
* @memberof UnderpostDB
|
|
60
|
+
* @property {string} NAME - Pod name
|
|
61
|
+
* @property {string} NAMESPACE - Pod namespace
|
|
62
|
+
* @property {string} NODE - Node where pod is running
|
|
63
|
+
* @property {string} STATUS - Pod status
|
|
64
|
+
* @property {string} [IP] - Pod IP address
|
|
65
|
+
*/
|
|
66
|
+
|
|
67
|
+
/**
|
|
68
|
+
* @typedef {Object} DatabaseConfig
|
|
24
69
|
* @memberof UnderpostDB
|
|
70
|
+
* @property {string} provider - Database provider (mariadb, mongoose)
|
|
71
|
+
* @property {string} name - Database name
|
|
72
|
+
* @property {string} user - Database user
|
|
73
|
+
* @property {string} password - Database password
|
|
74
|
+
* @property {string} hostFolder - Host folder path
|
|
75
|
+
* @property {string} host - Host identifier
|
|
76
|
+
* @property {string} path - Path identifier
|
|
77
|
+
* @property {number} [currentBackupTimestamp] - Timestamp of current backup
|
|
78
|
+
*/
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* @class UnderpostDB
|
|
82
|
+
* @description Manages database operations and backups for Kubernetes-based deployments.
|
|
83
|
+
* Provides comprehensive database management including import/export, multi-pod targeting,
|
|
84
|
+
* Git integration, and cluster metadata management.
|
|
25
85
|
*/
|
|
26
86
|
class UnderpostDB {
|
|
27
87
|
static API = {
|
|
28
88
|
/**
|
|
89
|
+
* Helper: Validates namespace name
|
|
90
|
+
* @private
|
|
91
|
+
* @param {string} namespace - Namespace to validate
|
|
92
|
+
* @returns {boolean} True if valid
|
|
93
|
+
*/
|
|
94
|
+
_validateNamespace(namespace) {
|
|
95
|
+
if (!namespace || typeof namespace !== 'string') return false;
|
|
96
|
+
// Kubernetes namespace naming rules: lowercase alphanumeric, -, max 63 chars
|
|
97
|
+
return /^[a-z0-9]([-a-z0-9]*[a-z0-9])?$/.test(namespace) && namespace.length <= 63;
|
|
98
|
+
},
|
|
99
|
+
|
|
100
|
+
/**
|
|
101
|
+
* Helper: Validates pod name
|
|
102
|
+
* @private
|
|
103
|
+
* @param {string} podName - Pod name to validate
|
|
104
|
+
* @returns {boolean} True if valid
|
|
105
|
+
*/
|
|
106
|
+
_validatePodName(podName) {
|
|
107
|
+
if (!podName || typeof podName !== 'string') return false;
|
|
108
|
+
// Kubernetes pod naming rules: lowercase alphanumeric, -, max 253 chars
|
|
109
|
+
return /^[a-z0-9]([-a-z0-9]*[a-z0-9])?$/.test(podName) && podName.length <= 253;
|
|
110
|
+
},
|
|
111
|
+
|
|
112
|
+
/**
|
|
113
|
+
* Helper: Gets filtered pods based on criteria
|
|
114
|
+
* @private
|
|
115
|
+
* @param {Object} criteria - Filter criteria
|
|
116
|
+
* @param {string} [criteria.podNames] - Comma-separated pod name patterns
|
|
117
|
+
* @param {string} [criteria.nodeNames] - Comma-separated node names
|
|
118
|
+
* @param {string} [criteria.namespace='default'] - Kubernetes namespace
|
|
119
|
+
* @param {string} [criteria.labelSelector] - Label selector
|
|
120
|
+
* @param {string} [criteria.deployId] - Deployment ID pattern
|
|
121
|
+
* @returns {Array<PodInfo>} Filtered pod list
|
|
122
|
+
*/
|
|
123
|
+
_getFilteredPods(criteria = {}) {
|
|
124
|
+
const { podNames, nodeNames, namespace = 'default', labelSelector, deployId } = criteria;
|
|
125
|
+
|
|
126
|
+
try {
|
|
127
|
+
// Get all pods using UnderpostDeploy.API.get
|
|
128
|
+
let pods = UnderpostDeploy.API.get(deployId || '', 'pods', namespace);
|
|
129
|
+
|
|
130
|
+
// Filter by pod names if specified
|
|
131
|
+
if (podNames) {
|
|
132
|
+
const patterns = podNames.split(',').map((p) => p.trim());
|
|
133
|
+
pods = pods.filter((pod) => {
|
|
134
|
+
return patterns.some((pattern) => {
|
|
135
|
+
// Support wildcards
|
|
136
|
+
const regex = new RegExp('^' + pattern.replace(/\*/g, '.*') + '$');
|
|
137
|
+
return regex.test(pod.NAME);
|
|
138
|
+
});
|
|
139
|
+
});
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// Filter by node names if specified (only if NODE is not '<none>')
|
|
143
|
+
if (nodeNames) {
|
|
144
|
+
const nodes = nodeNames.split(',').map((n) => n.trim());
|
|
145
|
+
pods = pods.filter((pod) => {
|
|
146
|
+
// Skip filtering if NODE is '<none>' or undefined
|
|
147
|
+
if (!pod.NODE || pod.NODE === '<none>') {
|
|
148
|
+
return true;
|
|
149
|
+
}
|
|
150
|
+
return nodes.includes(pod.NODE);
|
|
151
|
+
});
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
// Filter by label selector if specified
|
|
155
|
+
if (labelSelector) {
|
|
156
|
+
// Note: UnderpostDeploy.API.get doesn't support label selectors directly
|
|
157
|
+
// This would require a separate kubectl command
|
|
158
|
+
logger.warn('Label selector filtering requires additional implementation');
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
logger.info(`Found ${pods.length} pod(s) matching criteria`, { criteria, podNames: pods.map((p) => p.NAME) });
|
|
162
|
+
return pods;
|
|
163
|
+
} catch (error) {
|
|
164
|
+
logger.error('Error filtering pods', { error: error.message, criteria });
|
|
165
|
+
return [];
|
|
166
|
+
}
|
|
167
|
+
},
|
|
168
|
+
|
|
169
|
+
/**
|
|
170
|
+
* Helper: Executes kubectl command with error handling
|
|
171
|
+
* @private
|
|
172
|
+
* @param {string} command - kubectl command to execute
|
|
173
|
+
* @param {Object} options - Execution options
|
|
174
|
+
* @param {boolean} [options.dryRun=false] - Dry run mode
|
|
175
|
+
* @param {string} [options.context=''] - Command context for logging
|
|
176
|
+
* @returns {string|null} Command output or null on error
|
|
177
|
+
*/
|
|
178
|
+
_executeKubectl(command, options = {}) {
|
|
179
|
+
const { dryRun = false, context = '' } = options;
|
|
180
|
+
|
|
181
|
+
if (dryRun) {
|
|
182
|
+
logger.info(`[DRY RUN] Would execute: ${command}`, { context });
|
|
183
|
+
return null;
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
try {
|
|
187
|
+
logger.info(`Executing kubectl command`, { command, context });
|
|
188
|
+
return shellExec(command, { stdout: true });
|
|
189
|
+
} catch (error) {
|
|
190
|
+
logger.error(`kubectl command failed`, { command, error: error.message, context });
|
|
191
|
+
throw error;
|
|
192
|
+
}
|
|
193
|
+
},
|
|
194
|
+
|
|
195
|
+
/**
|
|
196
|
+
* Helper: Copies file to pod
|
|
197
|
+
* @private
|
|
198
|
+
* @param {Object} params - Copy parameters
|
|
199
|
+
* @param {string} params.sourcePath - Source file path
|
|
200
|
+
* @param {string} params.podName - Target pod name
|
|
201
|
+
* @param {string} params.namespace - Pod namespace
|
|
202
|
+
* @param {string} params.destPath - Destination path in pod
|
|
203
|
+
* @param {boolean} [params.dryRun=false] - Dry run mode
|
|
204
|
+
* @returns {boolean} Success status
|
|
205
|
+
*/
|
|
206
|
+
_copyToPod({ sourcePath, podName, namespace, destPath, dryRun = false }) {
|
|
207
|
+
try {
|
|
208
|
+
const command = `sudo kubectl cp ${sourcePath} ${namespace}/${podName}:${destPath}`;
|
|
209
|
+
UnderpostDB.API._executeKubectl(command, { dryRun, context: `copy to pod ${podName}` });
|
|
210
|
+
return true;
|
|
211
|
+
} catch (error) {
|
|
212
|
+
logger.error('Failed to copy file to pod', { sourcePath, podName, destPath, error: error.message });
|
|
213
|
+
return false;
|
|
214
|
+
}
|
|
215
|
+
},
|
|
216
|
+
|
|
217
|
+
/**
|
|
218
|
+
* Helper: Copies file from pod
|
|
219
|
+
* @private
|
|
220
|
+
* @param {Object} params - Copy parameters
|
|
221
|
+
* @param {string} params.podName - Source pod name
|
|
222
|
+
* @param {string} params.namespace - Pod namespace
|
|
223
|
+
* @param {string} params.sourcePath - Source path in pod
|
|
224
|
+
* @param {string} params.destPath - Destination file path
|
|
225
|
+
* @param {boolean} [params.dryRun=false] - Dry run mode
|
|
226
|
+
* @returns {boolean} Success status
|
|
227
|
+
*/
|
|
228
|
+
_copyFromPod({ podName, namespace, sourcePath, destPath, dryRun = false }) {
|
|
229
|
+
try {
|
|
230
|
+
const command = `sudo kubectl cp ${namespace}/${podName}:${sourcePath} ${destPath}`;
|
|
231
|
+
UnderpostDB.API._executeKubectl(command, { dryRun, context: `copy from pod ${podName}` });
|
|
232
|
+
return true;
|
|
233
|
+
} catch (error) {
|
|
234
|
+
logger.error('Failed to copy file from pod', { podName, sourcePath, destPath, error: error.message });
|
|
235
|
+
return false;
|
|
236
|
+
}
|
|
237
|
+
},
|
|
238
|
+
|
|
239
|
+
/**
|
|
240
|
+
* Helper: Executes command in pod
|
|
241
|
+
* @private
|
|
242
|
+
* @param {Object} params - Execution parameters
|
|
243
|
+
* @param {string} params.podName - Pod name
|
|
244
|
+
* @param {string} params.namespace - Pod namespace
|
|
245
|
+
* @param {string} params.command - Command to execute
|
|
246
|
+
* @param {boolean} [params.dryRun=false] - Dry run mode
|
|
247
|
+
* @returns {string|null} Command output or null
|
|
248
|
+
*/
|
|
249
|
+
_execInPod({ podName, namespace, command, dryRun = false }) {
|
|
250
|
+
try {
|
|
251
|
+
const kubectlCmd = `sudo kubectl exec -n ${namespace} -i ${podName} -- sh -c "${command}"`;
|
|
252
|
+
return UnderpostDB.API._executeKubectl(kubectlCmd, { dryRun, context: `exec in pod ${podName}` });
|
|
253
|
+
} catch (error) {
|
|
254
|
+
logger.error('Failed to execute command in pod', { podName, command, error: error.message });
|
|
255
|
+
throw error;
|
|
256
|
+
}
|
|
257
|
+
},
|
|
258
|
+
|
|
259
|
+
/**
|
|
260
|
+
* Helper: Manages Git repository for backups
|
|
261
|
+
* @private
|
|
262
|
+
* @param {Object} params - Git parameters
|
|
263
|
+
* @param {string} params.repoName - Repository name
|
|
264
|
+
* @param {string} params.operation - Operation (clone, pull, commit, push)
|
|
265
|
+
* @param {string} [params.message=''] - Commit message
|
|
266
|
+
* @returns {boolean} Success status
|
|
267
|
+
*/
|
|
268
|
+
_manageGitRepo({ repoName, operation, message = '' }) {
|
|
269
|
+
try {
|
|
270
|
+
const username = process.env.GITHUB_USERNAME;
|
|
271
|
+
if (!username) {
|
|
272
|
+
logger.error('GITHUB_USERNAME environment variable not set');
|
|
273
|
+
return false;
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
const repoPath = `../${repoName}`;
|
|
277
|
+
|
|
278
|
+
switch (operation) {
|
|
279
|
+
case 'clone':
|
|
280
|
+
if (!fs.existsSync(repoPath)) {
|
|
281
|
+
shellExec(`cd .. && underpost clone ${username}/${repoName}`);
|
|
282
|
+
logger.info(`Cloned repository: ${repoName}`);
|
|
283
|
+
}
|
|
284
|
+
break;
|
|
285
|
+
|
|
286
|
+
case 'pull':
|
|
287
|
+
if (fs.existsSync(repoPath)) {
|
|
288
|
+
shellExec(`cd ${repoPath} && git checkout . && git clean -f -d`);
|
|
289
|
+
shellExec(`cd ${repoPath} && underpost pull . ${username}/${repoName}`);
|
|
290
|
+
logger.info(`Pulled repository: ${repoName}`);
|
|
291
|
+
}
|
|
292
|
+
break;
|
|
293
|
+
|
|
294
|
+
case 'commit':
|
|
295
|
+
if (fs.existsSync(repoPath)) {
|
|
296
|
+
shellExec(`cd ${repoPath} && git add .`);
|
|
297
|
+
shellExec(`underpost cmt ${repoPath} backup '' '${message}'`);
|
|
298
|
+
logger.info(`Committed to repository: ${repoName}`, { message });
|
|
299
|
+
}
|
|
300
|
+
break;
|
|
301
|
+
|
|
302
|
+
case 'push':
|
|
303
|
+
if (fs.existsSync(repoPath)) {
|
|
304
|
+
shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName}`, { disableLog: true });
|
|
305
|
+
logger.info(`Pushed repository: ${repoName}`);
|
|
306
|
+
}
|
|
307
|
+
break;
|
|
308
|
+
|
|
309
|
+
default:
|
|
310
|
+
logger.warn(`Unknown git operation: ${operation}`);
|
|
311
|
+
return false;
|
|
312
|
+
}
|
|
313
|
+
|
|
314
|
+
return true;
|
|
315
|
+
} catch (error) {
|
|
316
|
+
logger.error(`Git operation failed`, { repoName, operation, error: error.message });
|
|
317
|
+
return false;
|
|
318
|
+
}
|
|
319
|
+
},
|
|
320
|
+
|
|
321
|
+
/**
|
|
322
|
+
* Helper: Manages backup timestamps and cleanup
|
|
323
|
+
* @private
|
|
324
|
+
* @param {string} backupPath - Backup directory path
|
|
325
|
+
* @param {number} newTimestamp - New backup timestamp
|
|
326
|
+
* @param {boolean} shouldCleanup - Whether to cleanup old backups
|
|
327
|
+
* @returns {Object} Backup info with current and removed timestamps
|
|
328
|
+
*/
|
|
329
|
+
_manageBackupTimestamps(backupPath, newTimestamp, shouldCleanup) {
|
|
330
|
+
try {
|
|
331
|
+
if (!fs.existsSync(backupPath)) {
|
|
332
|
+
fs.mkdirSync(backupPath, { recursive: true });
|
|
333
|
+
}
|
|
334
|
+
|
|
335
|
+
// Delete empty folders
|
|
336
|
+
shellExec(`cd ${backupPath} && find . -type d -empty -delete`);
|
|
337
|
+
|
|
338
|
+
const times = fs.readdirSync(backupPath);
|
|
339
|
+
const validTimes = times.map((t) => parseInt(t)).filter((t) => !isNaN(t));
|
|
340
|
+
|
|
341
|
+
const currentBackupTimestamp = validTimes.length > 0 ? Math.max(...validTimes) : null;
|
|
342
|
+
const removeBackupTimestamp = validTimes.length > 0 ? Math.min(...validTimes) : null;
|
|
343
|
+
|
|
344
|
+
// Cleanup old backups if we have too many
|
|
345
|
+
if (shouldCleanup && validTimes.length >= MAX_BACKUP_RETENTION && removeBackupTimestamp) {
|
|
346
|
+
const removeDir = `${backupPath}/${removeBackupTimestamp}`;
|
|
347
|
+
logger.info('Removing old backup', { path: removeDir });
|
|
348
|
+
fs.removeSync(removeDir);
|
|
349
|
+
}
|
|
350
|
+
|
|
351
|
+
// Create new backup directory
|
|
352
|
+
if (shouldCleanup) {
|
|
353
|
+
const newBackupDir = `${backupPath}/${newTimestamp}`;
|
|
354
|
+
logger.info('Creating new backup directory', { path: newBackupDir });
|
|
355
|
+
fs.mkdirSync(newBackupDir, { recursive: true });
|
|
356
|
+
}
|
|
357
|
+
|
|
358
|
+
return {
|
|
359
|
+
current: currentBackupTimestamp,
|
|
360
|
+
removed: removeBackupTimestamp,
|
|
361
|
+
count: validTimes.length,
|
|
362
|
+
};
|
|
363
|
+
} catch (error) {
|
|
364
|
+
logger.error('Error managing backup timestamps', { backupPath, error: error.message });
|
|
365
|
+
return { current: null, removed: null, count: 0 };
|
|
366
|
+
}
|
|
367
|
+
},
|
|
368
|
+
|
|
369
|
+
/**
|
|
370
|
+
* Helper: Performs MariaDB import operation
|
|
371
|
+
* @private
|
|
372
|
+
* @param {Object} params - Import parameters
|
|
373
|
+
* @param {PodInfo} params.pod - Target pod
|
|
374
|
+
* @param {string} params.namespace - Namespace
|
|
375
|
+
* @param {string} params.dbName - Database name
|
|
376
|
+
* @param {string} params.user - Database user
|
|
377
|
+
* @param {string} params.password - Database password
|
|
378
|
+
* @param {string} params.sqlPath - SQL file path
|
|
379
|
+
* @param {boolean} [params.dryRun=false] - Dry run mode
|
|
380
|
+
* @returns {boolean} Success status
|
|
381
|
+
*/
|
|
382
|
+
_importMariaDB({ pod, namespace, dbName, user, password, sqlPath, dryRun = false }) {
|
|
383
|
+
try {
|
|
384
|
+
const podName = pod.NAME;
|
|
385
|
+
const containerSqlPath = `/${dbName}.sql`;
|
|
386
|
+
|
|
387
|
+
logger.info('Importing MariaDB database', { podName, dbName });
|
|
388
|
+
|
|
389
|
+
// Remove existing SQL file in container
|
|
390
|
+
UnderpostDB.API._execInPod({
|
|
391
|
+
podName,
|
|
392
|
+
namespace,
|
|
393
|
+
command: `rm -rf ${containerSqlPath}`,
|
|
394
|
+
dryRun,
|
|
395
|
+
});
|
|
396
|
+
|
|
397
|
+
// Copy SQL file to pod
|
|
398
|
+
if (
|
|
399
|
+
!UnderpostDB.API._copyToPod({
|
|
400
|
+
sourcePath: sqlPath,
|
|
401
|
+
podName,
|
|
402
|
+
namespace,
|
|
403
|
+
destPath: containerSqlPath,
|
|
404
|
+
dryRun,
|
|
405
|
+
})
|
|
406
|
+
) {
|
|
407
|
+
return false;
|
|
408
|
+
}
|
|
409
|
+
|
|
410
|
+
// Create database if it doesn't exist
|
|
411
|
+
UnderpostDB.API._executeKubectl(
|
|
412
|
+
`kubectl exec -n ${namespace} -i ${podName} -- mariadb -p${password} -e 'CREATE DATABASE IF NOT EXISTS ${dbName};'`,
|
|
413
|
+
{ dryRun, context: `create database ${dbName}` },
|
|
414
|
+
);
|
|
415
|
+
|
|
416
|
+
// Import SQL file
|
|
417
|
+
const importCmd = `mariadb -u ${user} -p${password} ${dbName} < ${containerSqlPath}`;
|
|
418
|
+
UnderpostDB.API._execInPod({ podName, namespace, command: importCmd, dryRun });
|
|
419
|
+
|
|
420
|
+
logger.info('Successfully imported MariaDB database', { podName, dbName });
|
|
421
|
+
return true;
|
|
422
|
+
} catch (error) {
|
|
423
|
+
logger.error('MariaDB import failed', { podName: pod.NAME, dbName, error: error.message });
|
|
424
|
+
return false;
|
|
425
|
+
}
|
|
426
|
+
},
|
|
427
|
+
|
|
428
|
+
/**
|
|
429
|
+
* Helper: Performs MariaDB export operation
|
|
430
|
+
* @private
|
|
431
|
+
* @param {Object} params - Export parameters
|
|
432
|
+
* @param {PodInfo} params.pod - Source pod
|
|
433
|
+
* @param {string} params.namespace - Namespace
|
|
434
|
+
* @param {string} params.dbName - Database name
|
|
435
|
+
* @param {string} params.user - Database user
|
|
436
|
+
* @param {string} params.password - Database password
|
|
437
|
+
* @param {string} params.outputPath - Output file path
|
|
438
|
+
* @param {boolean} [params.dryRun=false] - Dry run mode
|
|
439
|
+
* @returns {boolean} Success status
|
|
440
|
+
*/
|
|
441
|
+
async _exportMariaDB({ pod, namespace, dbName, user, password, outputPath, dryRun = false }) {
|
|
442
|
+
try {
|
|
443
|
+
const podName = pod.NAME;
|
|
444
|
+
const containerSqlPath = `/home/${dbName}.sql`;
|
|
445
|
+
|
|
446
|
+
logger.info('Exporting MariaDB database', { podName, dbName });
|
|
447
|
+
|
|
448
|
+
// Remove existing SQL file in container
|
|
449
|
+
UnderpostDB.API._execInPod({
|
|
450
|
+
podName,
|
|
451
|
+
namespace,
|
|
452
|
+
command: `rm -rf ${containerSqlPath}`,
|
|
453
|
+
dryRun,
|
|
454
|
+
});
|
|
455
|
+
|
|
456
|
+
// Dump database
|
|
457
|
+
const dumpCmd = `mariadb-dump --user=${user} --password=${password} --lock-tables ${dbName} > ${containerSqlPath}`;
|
|
458
|
+
UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd, dryRun });
|
|
459
|
+
|
|
460
|
+
// Copy SQL file from pod
|
|
461
|
+
if (
|
|
462
|
+
!UnderpostDB.API._copyFromPod({
|
|
463
|
+
podName,
|
|
464
|
+
namespace,
|
|
465
|
+
sourcePath: containerSqlPath,
|
|
466
|
+
destPath: outputPath,
|
|
467
|
+
dryRun,
|
|
468
|
+
})
|
|
469
|
+
) {
|
|
470
|
+
return false;
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
// Split file if it exists
|
|
474
|
+
if (!dryRun && fs.existsSync(outputPath)) {
|
|
475
|
+
await splitFileFactory(dbName, outputPath);
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
logger.info('Successfully exported MariaDB database', { podName, dbName, outputPath });
|
|
479
|
+
return true;
|
|
480
|
+
} catch (error) {
|
|
481
|
+
logger.error('MariaDB export failed', { podName: pod.NAME, dbName, error: error.message });
|
|
482
|
+
return false;
|
|
483
|
+
}
|
|
484
|
+
},
|
|
485
|
+
|
|
486
|
+
/**
|
|
487
|
+
* Helper: Performs MongoDB import operation
|
|
488
|
+
* @private
|
|
489
|
+
* @param {Object} params - Import parameters
|
|
490
|
+
* @param {PodInfo} params.pod - Target pod
|
|
491
|
+
* @param {string} params.namespace - Namespace
|
|
492
|
+
* @param {string} params.dbName - Database name
|
|
493
|
+
* @param {string} params.bsonPath - BSON directory path
|
|
494
|
+
* @param {boolean} params.drop - Whether to drop existing database
|
|
495
|
+
* @param {boolean} params.preserveUUID - Whether to preserve UUIDs
|
|
496
|
+
* @param {boolean} [params.dryRun=false] - Dry run mode
|
|
497
|
+
* @returns {boolean} Success status
|
|
498
|
+
*/
|
|
499
|
+
_importMongoDB({ pod, namespace, dbName, bsonPath, drop, preserveUUID, dryRun = false }) {
|
|
500
|
+
try {
|
|
501
|
+
const podName = pod.NAME;
|
|
502
|
+
const containerBsonPath = `/${dbName}`;
|
|
503
|
+
|
|
504
|
+
logger.info('Importing MongoDB database', { podName, dbName });
|
|
505
|
+
|
|
506
|
+
// Remove existing BSON directory in container
|
|
507
|
+
UnderpostDB.API._execInPod({
|
|
508
|
+
podName,
|
|
509
|
+
namespace,
|
|
510
|
+
command: `rm -rf ${containerBsonPath}`,
|
|
511
|
+
dryRun,
|
|
512
|
+
});
|
|
513
|
+
|
|
514
|
+
// Copy BSON directory to pod
|
|
515
|
+
if (
|
|
516
|
+
!UnderpostDB.API._copyToPod({
|
|
517
|
+
sourcePath: bsonPath,
|
|
518
|
+
podName,
|
|
519
|
+
namespace,
|
|
520
|
+
destPath: containerBsonPath,
|
|
521
|
+
dryRun,
|
|
522
|
+
})
|
|
523
|
+
) {
|
|
524
|
+
return false;
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
// Restore database
|
|
528
|
+
const restoreCmd = `mongorestore -d ${dbName} ${containerBsonPath}${drop ? ' --drop' : ''}${
|
|
529
|
+
preserveUUID ? ' --preserveUUID' : ''
|
|
530
|
+
}`;
|
|
531
|
+
UnderpostDB.API._execInPod({ podName, namespace, command: restoreCmd, dryRun });
|
|
532
|
+
|
|
533
|
+
logger.info('Successfully imported MongoDB database', { podName, dbName });
|
|
534
|
+
return true;
|
|
535
|
+
} catch (error) {
|
|
536
|
+
logger.error('MongoDB import failed', { podName: pod.NAME, dbName, error: error.message });
|
|
537
|
+
return false;
|
|
538
|
+
}
|
|
539
|
+
},
|
|
540
|
+
|
|
541
|
+
/**
|
|
542
|
+
* Helper: Performs MongoDB export operation
|
|
543
|
+
* @private
|
|
544
|
+
* @param {Object} params - Export parameters
|
|
545
|
+
* @param {PodInfo} params.pod - Source pod
|
|
546
|
+
* @param {string} params.namespace - Namespace
|
|
547
|
+
* @param {string} params.dbName - Database name
|
|
548
|
+
* @param {string} params.outputPath - Output directory path
|
|
549
|
+
* @param {string} [params.collections=''] - Comma-separated collection list
|
|
550
|
+
* @param {boolean} [params.dryRun=false] - Dry run mode
|
|
551
|
+
* @returns {boolean} Success status
|
|
552
|
+
*/
|
|
553
|
+
_exportMongoDB({ pod, namespace, dbName, outputPath, collections = '', dryRun = false }) {
|
|
554
|
+
try {
|
|
555
|
+
const podName = pod.NAME;
|
|
556
|
+
const containerBsonPath = `/${dbName}`;
|
|
557
|
+
|
|
558
|
+
logger.info('Exporting MongoDB database', { podName, dbName, collections });
|
|
559
|
+
|
|
560
|
+
// Remove existing BSON directory in container
|
|
561
|
+
UnderpostDB.API._execInPod({
|
|
562
|
+
podName,
|
|
563
|
+
namespace,
|
|
564
|
+
command: `rm -rf ${containerBsonPath}`,
|
|
565
|
+
dryRun,
|
|
566
|
+
});
|
|
567
|
+
|
|
568
|
+
// Dump database or specific collections
|
|
569
|
+
if (collections) {
|
|
570
|
+
const collectionList = collections.split(',').map((c) => c.trim());
|
|
571
|
+
for (const collection of collectionList) {
|
|
572
|
+
const dumpCmd = `mongodump -d ${dbName} --collection ${collection} -o /`;
|
|
573
|
+
UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd, dryRun });
|
|
574
|
+
}
|
|
575
|
+
} else {
|
|
576
|
+
const dumpCmd = `mongodump -d ${dbName} -o /`;
|
|
577
|
+
UnderpostDB.API._execInPod({ podName, namespace, command: dumpCmd, dryRun });
|
|
578
|
+
}
|
|
579
|
+
|
|
580
|
+
// Copy BSON directory from pod
|
|
581
|
+
if (
|
|
582
|
+
!UnderpostDB.API._copyFromPod({
|
|
583
|
+
podName,
|
|
584
|
+
namespace,
|
|
585
|
+
sourcePath: containerBsonPath,
|
|
586
|
+
destPath: outputPath,
|
|
587
|
+
dryRun,
|
|
588
|
+
})
|
|
589
|
+
) {
|
|
590
|
+
return false;
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
logger.info('Successfully exported MongoDB database', { podName, dbName, outputPath });
|
|
594
|
+
return true;
|
|
595
|
+
} catch (error) {
|
|
596
|
+
logger.error('MongoDB export failed', { podName: pod.NAME, dbName, error: error.message });
|
|
597
|
+
return false;
|
|
598
|
+
}
|
|
599
|
+
},
|
|
600
|
+
|
|
601
|
+
/**
|
|
602
|
+
* Helper: Gets MongoDB collection statistics
|
|
603
|
+
* @private
|
|
604
|
+
* @param {Object} params - Parameters
|
|
605
|
+
* @param {string} params.podName - Pod name
|
|
606
|
+
* @param {string} params.namespace - Namespace
|
|
607
|
+
* @param {string} params.dbName - Database name
|
|
608
|
+
* @returns {Object|null} Collection statistics or null on error
|
|
609
|
+
*/
|
|
610
|
+
_getMongoStats({ podName, namespace, dbName }) {
|
|
611
|
+
try {
|
|
612
|
+
logger.info('Getting MongoDB collection statistics', { podName, dbName });
|
|
613
|
+
|
|
614
|
+
// Use db.getSiblingDB() instead of 'use' command
|
|
615
|
+
const script = `db.getSiblingDB('${dbName}').getCollectionNames().map(function(c) { return { collection: c, count: db.getSiblingDB('${dbName}')[c].countDocuments() }; })`;
|
|
616
|
+
|
|
617
|
+
// Execute the script
|
|
618
|
+
const command = `sudo kubectl exec -n ${namespace} -i ${podName} -- mongosh --quiet --eval "${script}"`;
|
|
619
|
+
const output = shellExec(command, { stdout: true, silent: true });
|
|
620
|
+
|
|
621
|
+
if (!output || output.trim() === '') {
|
|
622
|
+
logger.warn('No collections found or empty output');
|
|
623
|
+
return null;
|
|
624
|
+
}
|
|
625
|
+
|
|
626
|
+
// Clean the output: remove newlines, handle EJSON format, replace single quotes with double quotes
|
|
627
|
+
let cleanedOutput = output
|
|
628
|
+
.trim()
|
|
629
|
+
.replace(/\n/g, '')
|
|
630
|
+
.replace(/\s+/g, ' ')
|
|
631
|
+
.replace(/NumberLong\("(\d+)"\)/g, '$1')
|
|
632
|
+
.replace(/NumberLong\((\d+)\)/g, '$1')
|
|
633
|
+
.replace(/NumberInt\("(\d+)"\)/g, '$1')
|
|
634
|
+
.replace(/NumberInt\((\d+)\)/g, '$1')
|
|
635
|
+
.replace(/ISODate\("([^"]+)"\)/g, '"$1"')
|
|
636
|
+
.replace(/'/g, '"')
|
|
637
|
+
.replace(/(\w+):/g, '"$1":');
|
|
638
|
+
|
|
639
|
+
try {
|
|
640
|
+
const stats = JSON.parse(cleanedOutput);
|
|
641
|
+
logger.info('MongoDB statistics retrieved', { dbName, collections: stats.length });
|
|
642
|
+
return stats;
|
|
643
|
+
} catch (parseError) {
|
|
644
|
+
logger.error('Failed to parse MongoDB output', {
|
|
645
|
+
podName,
|
|
646
|
+
dbName,
|
|
647
|
+
error: parseError.message,
|
|
648
|
+
rawOutput: output.substring(0, 200),
|
|
649
|
+
cleanedOutput: cleanedOutput.substring(0, 200),
|
|
650
|
+
});
|
|
651
|
+
return null;
|
|
652
|
+
}
|
|
653
|
+
} catch (error) {
|
|
654
|
+
logger.error('Failed to get MongoDB statistics', { podName, dbName, error: error.message });
|
|
655
|
+
return null;
|
|
656
|
+
}
|
|
657
|
+
},
|
|
658
|
+
|
|
659
|
+
/**
|
|
660
|
+
* Helper: Gets MariaDB table statistics
|
|
661
|
+
* @private
|
|
662
|
+
* @param {Object} params - Parameters
|
|
663
|
+
* @param {string} params.podName - Pod name
|
|
664
|
+
* @param {string} params.namespace - Namespace
|
|
665
|
+
* @param {string} params.dbName - Database name
|
|
666
|
+
* @param {string} params.user - Database user
|
|
667
|
+
* @param {string} params.password - Database password
|
|
668
|
+
* @returns {Object|null} Table statistics or null on error
|
|
669
|
+
*/
|
|
670
|
+
_getMariaDBStats({ podName, namespace, dbName, user, password }) {
|
|
671
|
+
try {
|
|
672
|
+
logger.info('Getting MariaDB table statistics', { podName, dbName });
|
|
673
|
+
|
|
674
|
+
const command = `sudo kubectl exec -n ${namespace} -i ${podName} -- mariadb -u ${user} -p${password} ${dbName} -e "SELECT TABLE_NAME as 'table', TABLE_ROWS as 'count' FROM information_schema.TABLES WHERE TABLE_SCHEMA = '${dbName}' ORDER BY TABLE_NAME;" --skip-column-names --batch`;
|
|
675
|
+
const output = shellExec(command, { stdout: true, silent: true });
|
|
676
|
+
|
|
677
|
+
if (!output || output.trim() === '') {
|
|
678
|
+
logger.warn('No tables found or empty output');
|
|
679
|
+
return null;
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
// Parse the output (tab-separated values)
|
|
683
|
+
const lines = output.trim().split('\n');
|
|
684
|
+
const stats = lines.map((line) => {
|
|
685
|
+
const [table, count] = line.split('\t');
|
|
686
|
+
return { table, count: parseInt(count) || 0 };
|
|
687
|
+
});
|
|
688
|
+
|
|
689
|
+
logger.info('MariaDB statistics retrieved', { dbName, tables: stats.length });
|
|
690
|
+
return stats;
|
|
691
|
+
} catch (error) {
|
|
692
|
+
logger.error('Failed to get MariaDB statistics', { podName, dbName, error: error.message });
|
|
693
|
+
return null;
|
|
694
|
+
}
|
|
695
|
+
},
|
|
696
|
+
|
|
697
|
+
/**
|
|
698
|
+
* Helper: Displays database statistics in table format
|
|
699
|
+
* @private
|
|
700
|
+
* @param {Object} params - Parameters
|
|
701
|
+
* @param {string} params.provider - Database provider
|
|
702
|
+
* @param {string} params.dbName - Database name
|
|
703
|
+
* @param {Array<Object>} params.stats - Statistics array
|
|
704
|
+
*/
|
|
705
|
+
_displayStats({ provider, dbName, stats }) {
|
|
706
|
+
if (!stats || stats.length === 0) {
|
|
707
|
+
logger.warn('No statistics to display', { provider, dbName });
|
|
708
|
+
return;
|
|
709
|
+
}
|
|
710
|
+
|
|
711
|
+
const title = provider === 'mongoose' ? 'Collections' : 'Tables';
|
|
712
|
+
const itemKey = provider === 'mongoose' ? 'collection' : 'table';
|
|
713
|
+
|
|
714
|
+
console.log('\n' + '='.repeat(70));
|
|
715
|
+
console.log(`DATABASE: ${dbName} (${provider.toUpperCase()})`);
|
|
716
|
+
console.log('='.repeat(70));
|
|
717
|
+
console.log(`${title.padEnd(50)} ${'Documents/Rows'.padStart(18)}`);
|
|
718
|
+
console.log('-'.repeat(70));
|
|
719
|
+
|
|
720
|
+
let totalCount = 0;
|
|
721
|
+
stats.forEach((item) => {
|
|
722
|
+
const name = item[itemKey] || 'Unknown';
|
|
723
|
+
const count = item.count || 0;
|
|
724
|
+
totalCount += count;
|
|
725
|
+
console.log(`${name.padEnd(50)} ${count.toString().padStart(18)}`);
|
|
726
|
+
});
|
|
727
|
+
|
|
728
|
+
console.log('-'.repeat(70));
|
|
729
|
+
console.log(`${'TOTAL'.padEnd(50)} ${totalCount.toString().padStart(18)}`);
|
|
730
|
+
console.log('='.repeat(70) + '\n');
|
|
731
|
+
},
|
|
732
|
+
|
|
733
|
+
/**
|
|
734
|
+
* Public API: Gets MongoDB primary pod name
|
|
735
|
+
* @public
|
|
736
|
+
* @param {Object} options - Options for getting primary pod
|
|
737
|
+
* @param {string} [options.namespace='default'] - Kubernetes namespace
|
|
738
|
+
* @param {string} [options.podName='mongodb-0'] - Initial pod name to query replica set status
|
|
739
|
+
* @returns {string|null} Primary pod name or null if not found
|
|
740
|
+
* @memberof UnderpostDB
|
|
741
|
+
* @example
|
|
742
|
+
* const primaryPod = UnderpostDB.API.getMongoPrimaryPodName({ namespace: 'production' });
|
|
743
|
+
* console.log(primaryPod); // 'mongodb-1'
|
|
744
|
+
*/
|
|
745
|
+
getMongoPrimaryPodName(options = { namespace: 'default', podName: 'mongodb-0' }) {
|
|
746
|
+
const { namespace = 'default', podName = 'mongodb-0' } = options;
|
|
747
|
+
|
|
748
|
+
try {
|
|
749
|
+
logger.info('Checking for MongoDB primary pod', { namespace, checkingPod: podName });
|
|
750
|
+
|
|
751
|
+
const command = `sudo kubectl exec -n ${namespace} -i ${podName} -- mongosh --quiet --eval 'rs.status().members.filter(m => m.stateStr=="PRIMARY").map(m=>m.name)'`;
|
|
752
|
+
const output = shellExec(command, { stdout: true, silent: true });
|
|
753
|
+
|
|
754
|
+
if (!output || output.trim() === '') {
|
|
755
|
+
logger.warn('No primary pod found in replica set');
|
|
756
|
+
return null;
|
|
757
|
+
}
|
|
758
|
+
|
|
759
|
+
// Parse the output to get the primary pod name
|
|
760
|
+
// Output format: [ 'mongodb-0:27017' ] or [ 'mongodb-1.mongodb-service:27017' ] or similar
|
|
761
|
+
const match = output.match(/['"]([^'"]+)['"]/);
|
|
762
|
+
if (match && match[1]) {
|
|
763
|
+
let primaryName = match[1].split(':')[0]; // Extract pod name without port
|
|
764
|
+
// Remove service suffix if present (e.g., "mongodb-1.mongodb-service" -> "mongodb-1")
|
|
765
|
+
primaryName = primaryName.split('.')[0];
|
|
766
|
+
logger.info('Found MongoDB primary pod', { primaryPod: primaryName });
|
|
767
|
+
return primaryName;
|
|
768
|
+
}
|
|
769
|
+
|
|
770
|
+
logger.warn('Could not parse primary pod from replica set status', { output });
|
|
771
|
+
return null;
|
|
772
|
+
} catch (error) {
|
|
773
|
+
logger.error('Failed to get MongoDB primary pod', { error: error.message });
|
|
774
|
+
return null;
|
|
775
|
+
}
|
|
776
|
+
},
|
|
777
|
+
|
|
778
|
+
/**
|
|
779
|
+
* Main callback: Initiates database backup workflow
|
|
29
780
|
* @method callback
|
|
30
|
-
* @description
|
|
31
|
-
* This method orchestrates the backup process for multiple deployments, handling
|
|
781
|
+
* @description Orchestrates the backup process for multiple deployments, handling
|
|
32
782
|
* database connections, backup storage, and optional Git integration for version control.
|
|
33
|
-
*
|
|
34
|
-
* @param {
|
|
35
|
-
* @param {
|
|
36
|
-
* @
|
|
37
|
-
* @param {string} [options.podName=false] - The name of the Kubernetes pod to use for database operations.
|
|
38
|
-
* @param {string} [options.ns=false] - The namespace to use for database operations.
|
|
39
|
-
* @param {string} [options.collections=''] - Comma-separated list of collections to include in the backup.
|
|
40
|
-
* @param {string} [options.outPath=''] - Output path for the backup file.
|
|
41
|
-
* @param {boolean} [options.drop=false] - Flag to drop the database before importing.
|
|
42
|
-
* @param {boolean} [options.preserveUUID=false] - Flag to preserve UUIDs during import.
|
|
43
|
-
* @param {boolean} [options.git=false] - Flag to enable Git integration for version control.
|
|
44
|
-
* @param {string} [options.hosts=''] - Comma-separated list of hosts to include in the backup.
|
|
45
|
-
* @param {string} [options.paths=''] - Comma-separated list of paths to include in the backup.
|
|
783
|
+
* Supports targeting multiple specific pods, nodes, and namespaces with advanced filtering.
|
|
784
|
+
* @param {string} [deployList='default'] - Comma-separated list of deployment IDs
|
|
785
|
+
* @param {DatabaseOptions} [options] - Database operation options
|
|
786
|
+
* @returns {Promise<void>}
|
|
46
787
|
* @memberof UnderpostDB
|
|
788
|
+
* @example
|
|
789
|
+
* // Export database from specific pods
|
|
790
|
+
* await UnderpostDB.API.callback('dd-myapp', {
|
|
791
|
+
* export: true,
|
|
792
|
+
* podName: 'mariadb-statefulset-0,mariadb-statefulset-1',
|
|
793
|
+
* ns: 'production'
|
|
794
|
+
* });
|
|
795
|
+
*
|
|
796
|
+
* @example
|
|
797
|
+
* // Import database to all matching pods on specific nodes
|
|
798
|
+
* await UnderpostDB.API.callback('dd-myapp', {
|
|
799
|
+
* import: true,
|
|
800
|
+
* nodeName: 'node-1,node-2',
|
|
801
|
+
* allPods: true,
|
|
802
|
+
* ns: 'staging'
|
|
803
|
+
* });
|
|
804
|
+
*
|
|
805
|
+
* @example
|
|
806
|
+
* // Import to MongoDB primary pod only
|
|
807
|
+
* await UnderpostDB.API.callback('dd-myapp', {
|
|
808
|
+
* import: true,
|
|
809
|
+
* primaryPod: true,
|
|
810
|
+
* ns: 'production'
|
|
811
|
+
* });
|
|
47
812
|
*/
|
|
48
813
|
async callback(
|
|
49
814
|
deployList = 'default',
|
|
50
815
|
options = {
|
|
51
816
|
import: false,
|
|
52
817
|
export: false,
|
|
53
|
-
podName:
|
|
54
|
-
|
|
818
|
+
podName: '',
|
|
819
|
+
nodeName: '',
|
|
820
|
+
ns: 'default',
|
|
55
821
|
collections: '',
|
|
56
822
|
outPath: '',
|
|
57
823
|
drop: false,
|
|
@@ -59,17 +825,50 @@ class UnderpostDB {
|
|
|
59
825
|
git: false,
|
|
60
826
|
hosts: '',
|
|
61
827
|
paths: '',
|
|
828
|
+
labelSelector: '',
|
|
829
|
+
allPods: false,
|
|
830
|
+
dryRun: false,
|
|
831
|
+
primaryPod: false,
|
|
832
|
+
stats: false,
|
|
62
833
|
},
|
|
63
834
|
) {
|
|
64
835
|
const newBackupTimestamp = new Date().getTime();
|
|
65
|
-
const
|
|
836
|
+
const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
|
|
837
|
+
|
|
838
|
+
// Validate namespace
|
|
839
|
+
if (!UnderpostDB.API._validateNamespace(namespace)) {
|
|
840
|
+
logger.error('Invalid namespace format', { namespace });
|
|
841
|
+
throw new Error(`Invalid namespace: ${namespace}`);
|
|
842
|
+
}
|
|
843
|
+
|
|
844
|
+
logger.info('Starting database operation', {
|
|
845
|
+
deployList,
|
|
846
|
+
namespace,
|
|
847
|
+
import: options.import,
|
|
848
|
+
export: options.export,
|
|
849
|
+
dryRun: options.dryRun,
|
|
850
|
+
});
|
|
851
|
+
|
|
66
852
|
for (const _deployId of deployList.split(',')) {
|
|
67
853
|
const deployId = _deployId.trim();
|
|
68
854
|
if (!deployId) continue;
|
|
855
|
+
|
|
856
|
+
logger.info('Processing deployment', { deployId });
|
|
857
|
+
|
|
858
|
+
/** @type {Object.<string, Object.<string, DatabaseConfig>>} */
|
|
69
859
|
const dbs = {};
|
|
70
860
|
const repoName = `engine-${deployId.split('dd-')[1]}-cron-backups`;
|
|
71
861
|
|
|
72
|
-
|
|
862
|
+
// Load server configuration
|
|
863
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
864
|
+
if (!fs.existsSync(confServerPath)) {
|
|
865
|
+
logger.error('Configuration file not found', { path: confServerPath });
|
|
866
|
+
continue;
|
|
867
|
+
}
|
|
868
|
+
|
|
869
|
+
const confServer = JSON.parse(fs.readFileSync(confServerPath, 'utf8'));
|
|
870
|
+
|
|
871
|
+
// Build database configuration map
|
|
73
872
|
for (const host of Object.keys(confServer)) {
|
|
74
873
|
for (const path of Object.keys(confServer[host])) {
|
|
75
874
|
const { db } = confServer[host][path];
|
|
@@ -77,178 +876,260 @@ class UnderpostDB {
|
|
|
77
876
|
const { provider, name, user, password } = db;
|
|
78
877
|
if (!dbs[provider]) dbs[provider] = {};
|
|
79
878
|
|
|
80
|
-
if (!(name in dbs[provider]))
|
|
81
|
-
dbs[provider][name] = {
|
|
879
|
+
if (!(name in dbs[provider])) {
|
|
880
|
+
dbs[provider][name] = {
|
|
881
|
+
user,
|
|
882
|
+
password,
|
|
883
|
+
hostFolder: host + path.replaceAll('/', '-'),
|
|
884
|
+
host,
|
|
885
|
+
path,
|
|
886
|
+
};
|
|
887
|
+
}
|
|
82
888
|
}
|
|
83
889
|
}
|
|
84
890
|
}
|
|
85
891
|
|
|
892
|
+
// Handle Git operations
|
|
86
893
|
if (options.git === true) {
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
} else {
|
|
90
|
-
shellExec(`cd ../${repoName} && git checkout . && git clean -f -d`);
|
|
91
|
-
shellExec(`cd ../${repoName} && underpost pull . ${process.env.GITHUB_USERNAME}/${repoName}`);
|
|
92
|
-
}
|
|
894
|
+
UnderpostDB.API._manageGitRepo({ repoName, operation: 'clone' });
|
|
895
|
+
UnderpostDB.API._manageGitRepo({ repoName, operation: 'pull' });
|
|
93
896
|
}
|
|
94
897
|
|
|
898
|
+
// Process each database provider
|
|
95
899
|
for (const provider of Object.keys(dbs)) {
|
|
96
900
|
for (const dbName of Object.keys(dbs[provider])) {
|
|
97
901
|
const { hostFolder, user, password, host, path } = dbs[provider][dbName];
|
|
902
|
+
|
|
903
|
+
// Filter by hosts and paths if specified
|
|
98
904
|
if (
|
|
99
|
-
(options.hosts &&
|
|
100
|
-
|
|
101
|
-
|
|
905
|
+
(options.hosts &&
|
|
906
|
+
!options.hosts
|
|
907
|
+
.split(',')
|
|
908
|
+
.map((h) => h.trim())
|
|
909
|
+
.includes(host)) ||
|
|
910
|
+
(options.paths &&
|
|
911
|
+
!options.paths
|
|
912
|
+
.split(',')
|
|
913
|
+
.map((p) => p.trim())
|
|
914
|
+
.includes(path))
|
|
915
|
+
) {
|
|
916
|
+
logger.info('Skipping database due to host/path filter', { dbName, host, path });
|
|
102
917
|
continue;
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
918
|
+
}
|
|
919
|
+
|
|
920
|
+
if (!hostFolder) {
|
|
921
|
+
logger.warn('No hostFolder defined for database', { dbName, provider });
|
|
922
|
+
continue;
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
logger.info('Processing database', { hostFolder, provider, dbName });
|
|
926
|
+
|
|
927
|
+
const backUpPath = `../${repoName}/${hostFolder}`;
|
|
928
|
+
const backupInfo = UnderpostDB.API._manageBackupTimestamps(
|
|
929
|
+
backUpPath,
|
|
930
|
+
newBackupTimestamp,
|
|
931
|
+
options.export === true,
|
|
932
|
+
);
|
|
933
|
+
|
|
934
|
+
dbs[provider][dbName].currentBackupTimestamp = backupInfo.current;
|
|
935
|
+
|
|
936
|
+
const currentTimestamp = backupInfo.current || newBackupTimestamp;
|
|
937
|
+
const sqlContainerPath = `/home/${dbName}.sql`;
|
|
938
|
+
const fromPartsPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}-parths.json`;
|
|
939
|
+
const toSqlPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}.sql`;
|
|
940
|
+
const toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
|
|
941
|
+
const toBsonPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}`;
|
|
942
|
+
const toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
|
|
943
|
+
|
|
944
|
+
// Merge split SQL files if needed for import
|
|
945
|
+
if (options.import === true && fs.existsSync(fromPartsPath) && !fs.existsSync(toSqlPath)) {
|
|
946
|
+
const names = JSON.parse(fs.readFileSync(fromPartsPath, 'utf8')).map((_path) => {
|
|
947
|
+
return `../${repoName}/${hostFolder}/${currentTimestamp}/${_path.split('/').pop()}`;
|
|
948
|
+
});
|
|
949
|
+
logger.info('Merging backup parts', { fromPartsPath, toSqlPath, parts: names.length });
|
|
950
|
+
await mergeFile(names, toSqlPath);
|
|
951
|
+
}
|
|
952
|
+
|
|
953
|
+
// Get target pods based on provider and options
|
|
954
|
+
let targetPods = [];
|
|
955
|
+
const podCriteria = {
|
|
956
|
+
podNames: options.podName,
|
|
957
|
+
nodeNames: options.nodeName,
|
|
958
|
+
namespace,
|
|
959
|
+
labelSelector: options.labelSelector,
|
|
960
|
+
deployId: provider === 'mariadb' ? 'mariadb' : 'mongo',
|
|
961
|
+
};
|
|
962
|
+
|
|
963
|
+
targetPods = UnderpostDB.API._getFilteredPods(podCriteria);
|
|
964
|
+
|
|
965
|
+
// Fallback to default if no custom pods specified
|
|
966
|
+
if (targetPods.length === 0 && !options.podName && !options.nodeName) {
|
|
967
|
+
const defaultPods = UnderpostDeploy.API.get(
|
|
968
|
+
provider === 'mariadb' ? 'mariadb' : 'mongo',
|
|
969
|
+
'pods',
|
|
970
|
+
namespace,
|
|
971
|
+
);
|
|
972
|
+
console.log('defaultPods', defaultPods);
|
|
973
|
+
targetPods = defaultPods;
|
|
974
|
+
}
|
|
975
|
+
|
|
976
|
+
if (targetPods.length === 0) {
|
|
977
|
+
logger.warn('No pods found matching criteria', { provider, criteria: podCriteria });
|
|
978
|
+
continue;
|
|
979
|
+
}
|
|
980
|
+
|
|
981
|
+
// Handle primary pod detection for MongoDB
|
|
982
|
+
let podsToProcess = [];
|
|
983
|
+
if (provider === 'mongoose' && !options.allPods) {
|
|
984
|
+
// For MongoDB, always use primary pod unless allPods is true
|
|
985
|
+
if (!targetPods || targetPods.length === 0) {
|
|
986
|
+
logger.warn('No MongoDB pods available to check for primary');
|
|
987
|
+
podsToProcess = [];
|
|
988
|
+
} else {
|
|
989
|
+
const firstPod = targetPods[0].NAME;
|
|
990
|
+
const primaryPodName = UnderpostDB.API.getMongoPrimaryPodName({ namespace, podName: firstPod });
|
|
132
991
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
992
|
+
if (primaryPodName) {
|
|
993
|
+
const primaryPod = targetPods.find((p) => p.NAME === primaryPodName);
|
|
994
|
+
if (primaryPod) {
|
|
995
|
+
podsToProcess = [primaryPod];
|
|
996
|
+
logger.info('Using MongoDB primary pod', { primaryPod: primaryPodName });
|
|
997
|
+
} else {
|
|
998
|
+
logger.warn('Primary pod not in filtered list, using first pod', { primaryPodName });
|
|
999
|
+
podsToProcess = [targetPods[0]];
|
|
1000
|
+
}
|
|
1001
|
+
} else {
|
|
1002
|
+
logger.warn('Could not detect primary pod, using first pod');
|
|
1003
|
+
podsToProcess = [targetPods[0]];
|
|
1004
|
+
}
|
|
138
1005
|
}
|
|
1006
|
+
} else {
|
|
1007
|
+
// For MariaDB or when allPods is true, limit to first pod unless allPods is true
|
|
1008
|
+
podsToProcess = options.allPods === true ? targetPods : [targetPods[0]];
|
|
1009
|
+
}
|
|
1010
|
+
|
|
1011
|
+
logger.info(`Processing ${podsToProcess.length} pod(s) for ${provider}`, {
|
|
1012
|
+
dbName,
|
|
1013
|
+
pods: podsToProcess.map((p) => p.NAME),
|
|
1014
|
+
});
|
|
1015
|
+
|
|
1016
|
+
// Process each pod
|
|
1017
|
+
for (const pod of podsToProcess) {
|
|
1018
|
+
logger.info('Processing pod', { podName: pod.NAME, node: pod.NODE, status: pod.STATUS });
|
|
139
1019
|
|
|
140
1020
|
switch (provider) {
|
|
141
1021
|
case 'mariadb': {
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
const cmd = `mariadb -u ${user} -p${password} ${dbName} < /${dbName}.sql`;
|
|
153
|
-
shellExec(
|
|
154
|
-
`kubectl exec -n ${nameSpace} -i ${podName} -- ${serviceName} -p${password} -e 'CREATE DATABASE ${dbName};'`,
|
|
155
|
-
);
|
|
156
|
-
shellExec(`sudo kubectl exec -n ${nameSpace} -i ${podName} -- sh -c "${cmd}"`);
|
|
157
|
-
}
|
|
158
|
-
if (options.export === true) {
|
|
159
|
-
shellExec(
|
|
160
|
-
`sudo kubectl exec -n ${nameSpace} -i ${podName} -- sh -c "rm -rf ${sqlContainerPath}"`,
|
|
161
|
-
);
|
|
162
|
-
const cmd = `mariadb-dump --user=${user} --password=${password} --lock-tables ${dbName} > ${sqlContainerPath}`;
|
|
163
|
-
shellExec(`sudo kubectl exec -n ${nameSpace} -i ${podName} -- sh -c "${cmd}"`);
|
|
164
|
-
shellExec(
|
|
165
|
-
`sudo kubectl cp ${nameSpace}/${podName}:${sqlContainerPath} ${
|
|
166
|
-
options.outPath ? options.outPath : _toNewSqlPath
|
|
167
|
-
}`,
|
|
168
|
-
);
|
|
169
|
-
await splitFileFactory(dbName, options.outPath ? options.outPath : _toNewSqlPath);
|
|
1022
|
+
if (options.stats === true) {
|
|
1023
|
+
const stats = UnderpostDB.API._getMariaDBStats({
|
|
1024
|
+
podName: pod.NAME,
|
|
1025
|
+
namespace,
|
|
1026
|
+
dbName,
|
|
1027
|
+
user,
|
|
1028
|
+
password,
|
|
1029
|
+
});
|
|
1030
|
+
if (stats) {
|
|
1031
|
+
UnderpostDB.API._displayStats({ provider, dbName, stats });
|
|
170
1032
|
}
|
|
171
1033
|
}
|
|
1034
|
+
|
|
1035
|
+
if (options.import === true) {
|
|
1036
|
+
UnderpostDB.API._importMariaDB({
|
|
1037
|
+
pod,
|
|
1038
|
+
namespace,
|
|
1039
|
+
dbName,
|
|
1040
|
+
user,
|
|
1041
|
+
password,
|
|
1042
|
+
sqlPath: toSqlPath,
|
|
1043
|
+
dryRun: options.dryRun,
|
|
1044
|
+
});
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
if (options.export === true) {
|
|
1048
|
+
const outputPath = options.outPath || toNewSqlPath;
|
|
1049
|
+
await UnderpostDB.API._exportMariaDB({
|
|
1050
|
+
pod,
|
|
1051
|
+
namespace,
|
|
1052
|
+
dbName,
|
|
1053
|
+
user,
|
|
1054
|
+
password,
|
|
1055
|
+
outputPath,
|
|
1056
|
+
dryRun: options.dryRun,
|
|
1057
|
+
});
|
|
1058
|
+
}
|
|
172
1059
|
break;
|
|
173
1060
|
}
|
|
174
1061
|
|
|
175
1062
|
case 'mongoose': {
|
|
176
|
-
if (options.
|
|
177
|
-
const
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
shellExec(`sudo kubectl exec -n ${nameSpace} -i ${podName} -- sh -c "rm -rf /${dbName}"`);
|
|
185
|
-
shellExec(
|
|
186
|
-
`sudo kubectl cp ${
|
|
187
|
-
options.outPath ? options.outPath : _toBsonPath
|
|
188
|
-
} ${nameSpace}/${podName}:/${dbName}`,
|
|
189
|
-
);
|
|
190
|
-
const cmd = `mongorestore -d ${dbName} /${dbName}${options.drop ? ' --drop' : ''}${
|
|
191
|
-
options.preserveUUID ? ' --preserveUUID' : ''
|
|
192
|
-
}`;
|
|
193
|
-
shellExec(`sudo kubectl exec -n ${nameSpace} -i ${podName} -- sh -c "${cmd}"`);
|
|
1063
|
+
if (options.stats === true) {
|
|
1064
|
+
const stats = UnderpostDB.API._getMongoStats({
|
|
1065
|
+
podName: pod.NAME,
|
|
1066
|
+
namespace,
|
|
1067
|
+
dbName,
|
|
1068
|
+
});
|
|
1069
|
+
if (stats) {
|
|
1070
|
+
UnderpostDB.API._displayStats({ provider, dbName, stats });
|
|
194
1071
|
}
|
|
195
1072
|
}
|
|
1073
|
+
|
|
1074
|
+
if (options.import === true) {
|
|
1075
|
+
const bsonPath = options.outPath || toBsonPath;
|
|
1076
|
+
UnderpostDB.API._importMongoDB({
|
|
1077
|
+
pod,
|
|
1078
|
+
namespace,
|
|
1079
|
+
dbName,
|
|
1080
|
+
bsonPath,
|
|
1081
|
+
drop: options.drop,
|
|
1082
|
+
preserveUUID: options.preserveUUID,
|
|
1083
|
+
dryRun: options.dryRun,
|
|
1084
|
+
});
|
|
1085
|
+
}
|
|
1086
|
+
|
|
196
1087
|
if (options.export === true) {
|
|
197
|
-
const
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
shellExec(
|
|
207
|
-
`sudo kubectl exec -n ${nameSpace} -i ${podName} -- sh -c "mongodump -d ${dbName} --collection ${collection} -o /"`,
|
|
208
|
-
);
|
|
209
|
-
else
|
|
210
|
-
shellExec(
|
|
211
|
-
`sudo kubectl exec -n ${nameSpace} -i ${podName} -- sh -c "mongodump -d ${dbName} -o /"`,
|
|
212
|
-
);
|
|
213
|
-
shellExec(
|
|
214
|
-
`sudo kubectl cp ${nameSpace}/${podName}:/${dbName} ${
|
|
215
|
-
options.outPath ? options.outPath : _toNewBsonPath
|
|
216
|
-
}`,
|
|
217
|
-
);
|
|
218
|
-
}
|
|
1088
|
+
const outputPath = options.outPath || toNewBsonPath;
|
|
1089
|
+
UnderpostDB.API._exportMongoDB({
|
|
1090
|
+
pod,
|
|
1091
|
+
namespace,
|
|
1092
|
+
dbName,
|
|
1093
|
+
outputPath,
|
|
1094
|
+
collections: options.collections,
|
|
1095
|
+
dryRun: options.dryRun,
|
|
1096
|
+
});
|
|
219
1097
|
}
|
|
220
1098
|
break;
|
|
221
1099
|
}
|
|
222
1100
|
|
|
223
1101
|
default:
|
|
1102
|
+
logger.warn('Unsupported database provider', { provider });
|
|
224
1103
|
break;
|
|
225
1104
|
}
|
|
226
1105
|
}
|
|
227
1106
|
}
|
|
228
1107
|
}
|
|
1108
|
+
|
|
1109
|
+
// Commit and push to Git if enabled
|
|
229
1110
|
if (options.export === true && options.git === true) {
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
);
|
|
236
|
-
shellExec(`cd ../${repoName} && underpost push . ${process.env.GITHUB_USERNAME}/${repoName}`, {
|
|
237
|
-
disableLog: true,
|
|
238
|
-
});
|
|
1111
|
+
const commitMessage = `${new Date(newBackupTimestamp).toLocaleDateString()} ${new Date(
|
|
1112
|
+
newBackupTimestamp,
|
|
1113
|
+
).toLocaleTimeString()}`;
|
|
1114
|
+
UnderpostDB.API._manageGitRepo({ repoName, operation: 'commit', message: commitMessage });
|
|
1115
|
+
UnderpostDB.API._manageGitRepo({ repoName, operation: 'push' });
|
|
239
1116
|
}
|
|
240
1117
|
}
|
|
1118
|
+
|
|
1119
|
+
logger.info('Database operation completed successfully');
|
|
241
1120
|
},
|
|
242
1121
|
|
|
243
1122
|
/**
|
|
1123
|
+
* Creates cluster metadata for the specified deployment
|
|
244
1124
|
* @method clusterMetadataFactory
|
|
245
|
-
* @description
|
|
246
|
-
*
|
|
247
|
-
*
|
|
248
|
-
* @param {string} [
|
|
249
|
-
* @param {string} [
|
|
250
|
-
* @
|
|
1125
|
+
* @description Loads database configuration and initializes cluster metadata including
|
|
1126
|
+
* instances and cron jobs. This method populates the database with deployment information.
|
|
1127
|
+
* @param {string} [deployId=process.env.DEFAULT_DEPLOY_ID] - The deployment ID
|
|
1128
|
+
* @param {string} [host=process.env.DEFAULT_DEPLOY_HOST] - The host identifier
|
|
1129
|
+
* @param {string} [path=process.env.DEFAULT_DEPLOY_PATH] - The path identifier
|
|
1130
|
+
* @returns {Promise<void>}
|
|
251
1131
|
* @memberof UnderpostDB
|
|
1132
|
+
* @throws {Error} If database configuration is invalid or connection fails
|
|
252
1133
|
*/
|
|
253
1134
|
async clusterMetadataFactory(
|
|
254
1135
|
deployId = process.env.DEFAULT_DEPLOY_ID,
|
|
@@ -258,12 +1139,27 @@ class UnderpostDB {
|
|
|
258
1139
|
deployId = deployId ?? process.env.DEFAULT_DEPLOY_ID;
|
|
259
1140
|
host = host ?? process.env.DEFAULT_DEPLOY_HOST;
|
|
260
1141
|
path = path ?? process.env.DEFAULT_DEPLOY_PATH;
|
|
1142
|
+
|
|
1143
|
+
logger.info('Creating cluster metadata', { deployId, host, path });
|
|
1144
|
+
|
|
261
1145
|
const env = 'production';
|
|
262
|
-
const
|
|
1146
|
+
const deployListPath = './engine-private/deploy/dd.router';
|
|
1147
|
+
|
|
1148
|
+
if (!fs.existsSync(deployListPath)) {
|
|
1149
|
+
logger.error('Deploy router file not found', { path: deployListPath });
|
|
1150
|
+
throw new Error(`Deploy router file not found: ${deployListPath}`);
|
|
1151
|
+
}
|
|
1152
|
+
|
|
1153
|
+
const deployList = fs.readFileSync(deployListPath, 'utf8').split(',');
|
|
1154
|
+
|
|
1155
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
1156
|
+
if (!fs.existsSync(confServerPath)) {
|
|
1157
|
+
logger.error('Server configuration not found', { path: confServerPath });
|
|
1158
|
+
throw new Error(`Server configuration not found: ${confServerPath}`);
|
|
1159
|
+
}
|
|
1160
|
+
|
|
1161
|
+
const { db } = JSON.parse(fs.readFileSync(confServerPath, 'utf8'))[host][path];
|
|
263
1162
|
|
|
264
|
-
const { db } = JSON.parse(fs.readFileSync(`./engine-private/conf/${deployId}/conf.server.json`, 'utf8'))[host][
|
|
265
|
-
path
|
|
266
|
-
];
|
|
267
1163
|
try {
|
|
268
1164
|
await DataBaseProvider.load({ apis: ['instance', 'cron'], host, path, db });
|
|
269
1165
|
|
|
@@ -271,14 +1167,21 @@ class UnderpostDB {
|
|
|
271
1167
|
const Instance = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Instance;
|
|
272
1168
|
|
|
273
1169
|
await Instance.deleteMany();
|
|
1170
|
+
logger.info('Cleared existing instance metadata');
|
|
274
1171
|
|
|
275
1172
|
for (const _deployId of deployList) {
|
|
276
1173
|
const deployId = _deployId.trim();
|
|
277
1174
|
if (!deployId) continue;
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
1175
|
+
|
|
1176
|
+
logger.info('Processing deployment for metadata', { deployId });
|
|
1177
|
+
|
|
1178
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
1179
|
+
if (!fs.existsSync(confServerPath)) {
|
|
1180
|
+
logger.warn('Configuration not found for deployment', { deployId, path: confServerPath });
|
|
1181
|
+
continue;
|
|
1182
|
+
}
|
|
1183
|
+
|
|
1184
|
+
const confServer = loadReplicas(deployId, JSON.parse(fs.readFileSync(confServerPath, 'utf8')));
|
|
282
1185
|
const router = await UnderpostDeploy.API.routerFactory(deployId, env);
|
|
283
1186
|
const pathPortAssignmentData = await pathPortAssignmentFactory(deployId, router, confServer);
|
|
284
1187
|
|
|
@@ -287,6 +1190,8 @@ class UnderpostDB {
|
|
|
287
1190
|
if (!confServer[host][path]) continue;
|
|
288
1191
|
|
|
289
1192
|
const { client, runtime, apis, peer } = confServer[host][path];
|
|
1193
|
+
|
|
1194
|
+
// Save main instance
|
|
290
1195
|
{
|
|
291
1196
|
const body = {
|
|
292
1197
|
deployId,
|
|
@@ -298,10 +1203,11 @@ class UnderpostDB {
|
|
|
298
1203
|
apis,
|
|
299
1204
|
};
|
|
300
1205
|
|
|
301
|
-
logger.info('
|
|
1206
|
+
logger.info('Saving instance metadata', body);
|
|
302
1207
|
await new Instance(body).save();
|
|
303
1208
|
}
|
|
304
1209
|
|
|
1210
|
+
// Save peer instance if exists
|
|
305
1211
|
if (peer) {
|
|
306
1212
|
const body = {
|
|
307
1213
|
deployId,
|
|
@@ -311,15 +1217,16 @@ class UnderpostDB {
|
|
|
311
1217
|
runtime: 'nodejs',
|
|
312
1218
|
};
|
|
313
1219
|
|
|
314
|
-
logger.info('
|
|
1220
|
+
logger.info('Saving peer instance metadata', body);
|
|
315
1221
|
await new Instance(body).save();
|
|
316
1222
|
}
|
|
317
1223
|
}
|
|
318
1224
|
}
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
1225
|
+
|
|
1226
|
+
// Process additional instances
|
|
1227
|
+
const confInstancesPath = `./engine-private/conf/${deployId}/conf.instances.json`;
|
|
1228
|
+
if (fs.existsSync(confInstancesPath)) {
|
|
1229
|
+
const confInstances = JSON.parse(fs.readFileSync(confInstancesPath, 'utf8'));
|
|
323
1230
|
for (const instance of confInstances) {
|
|
324
1231
|
const { id, host, path, fromPort, metadata } = instance;
|
|
325
1232
|
const { runtime } = metadata;
|
|
@@ -331,18 +1238,31 @@ class UnderpostDB {
|
|
|
331
1238
|
client: id,
|
|
332
1239
|
runtime,
|
|
333
1240
|
};
|
|
334
|
-
logger.info('
|
|
1241
|
+
logger.info('Saving additional instance metadata', body);
|
|
335
1242
|
await new Instance(body).save();
|
|
336
1243
|
}
|
|
337
1244
|
}
|
|
338
1245
|
}
|
|
339
1246
|
} catch (error) {
|
|
340
|
-
logger.error(error, error.stack);
|
|
1247
|
+
logger.error('Failed to create instance metadata', { error: error.message, stack: error.stack });
|
|
1248
|
+
throw error;
|
|
341
1249
|
}
|
|
342
1250
|
|
|
343
1251
|
try {
|
|
344
|
-
const
|
|
1252
|
+
const cronDeployPath = './engine-private/deploy/dd.cron';
|
|
1253
|
+
if (!fs.existsSync(cronDeployPath)) {
|
|
1254
|
+
logger.warn('Cron deploy file not found', { path: cronDeployPath });
|
|
1255
|
+
return;
|
|
1256
|
+
}
|
|
1257
|
+
|
|
1258
|
+
const cronDeployId = fs.readFileSync(cronDeployPath, 'utf8').trim();
|
|
345
1259
|
const confCronPath = `./engine-private/conf/${cronDeployId}/conf.cron.json`;
|
|
1260
|
+
|
|
1261
|
+
if (!fs.existsSync(confCronPath)) {
|
|
1262
|
+
logger.warn('Cron configuration not found', { path: confCronPath });
|
|
1263
|
+
return;
|
|
1264
|
+
}
|
|
1265
|
+
|
|
346
1266
|
const confCron = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
|
|
347
1267
|
|
|
348
1268
|
await DataBaseProvider.load({ apis: ['cron'], host, path, db });
|
|
@@ -351,6 +1271,7 @@ class UnderpostDB {
|
|
|
351
1271
|
const Cron = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Cron;
|
|
352
1272
|
|
|
353
1273
|
await Cron.deleteMany();
|
|
1274
|
+
logger.info('Cleared existing cron metadata');
|
|
354
1275
|
|
|
355
1276
|
for (const jobId of Object.keys(confCron.jobs)) {
|
|
356
1277
|
const body = {
|
|
@@ -359,33 +1280,36 @@ class UnderpostDB {
|
|
|
359
1280
|
expression: confCron.jobs[jobId].expression,
|
|
360
1281
|
enabled: confCron.jobs[jobId].enabled,
|
|
361
1282
|
};
|
|
362
|
-
logger.info('
|
|
1283
|
+
logger.info('Saving cron metadata', body);
|
|
363
1284
|
await new Cron(body).save();
|
|
364
1285
|
}
|
|
365
1286
|
} catch (error) {
|
|
366
|
-
logger.error(error, error.stack);
|
|
1287
|
+
logger.error('Failed to create cron metadata', { error: error.message, stack: error.stack });
|
|
367
1288
|
}
|
|
1289
|
+
|
|
368
1290
|
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
1291
|
+
logger.info('Cluster metadata creation completed');
|
|
369
1292
|
},
|
|
370
1293
|
|
|
371
1294
|
/**
|
|
1295
|
+
* Handles backup of cluster metadata
|
|
372
1296
|
* @method clusterMetadataBackupCallback
|
|
373
|
-
* @description
|
|
374
|
-
*
|
|
375
|
-
*
|
|
376
|
-
* @param {string} [
|
|
377
|
-
* @param {string} [
|
|
378
|
-
* @param {
|
|
379
|
-
* @param {
|
|
380
|
-
* @param {boolean} [options.
|
|
381
|
-
* @param {boolean} [options.
|
|
382
|
-
* @param {boolean} [options.
|
|
383
|
-
* @param {boolean} [options.
|
|
384
|
-
* @param {boolean} [options.
|
|
385
|
-
* @
|
|
1297
|
+
* @description Orchestrates backup and restore operations for cluster metadata including
|
|
1298
|
+
* instances and cron jobs. Supports import/export and metadata generation.
|
|
1299
|
+
* @param {string} [deployId=process.env.DEFAULT_DEPLOY_ID] - The deployment ID
|
|
1300
|
+
* @param {string} [host=process.env.DEFAULT_DEPLOY_HOST] - The host identifier
|
|
1301
|
+
* @param {string} [path=process.env.DEFAULT_DEPLOY_PATH] - The path identifier
|
|
1302
|
+
* @param {Object} [options] - Backup operation options
|
|
1303
|
+
* @param {boolean} [options.generate=false] - Generate cluster metadata
|
|
1304
|
+
* @param {boolean} [options.itc=false] - Execute in container context
|
|
1305
|
+
* @param {boolean} [options.import=false] - Import metadata from backup
|
|
1306
|
+
* @param {boolean} [options.export=false] - Export metadata to backup
|
|
1307
|
+
* @param {boolean} [options.instances=false] - Process instances collection
|
|
1308
|
+
* @param {boolean} [options.crons=false] - Process crons collection
|
|
1309
|
+
* @returns {void}
|
|
386
1310
|
* @memberof UnderpostDB
|
|
387
1311
|
*/
|
|
388
|
-
clusterMetadataBackupCallback(
|
|
1312
|
+
async clusterMetadataBackupCallback(
|
|
389
1313
|
deployId = process.env.DEFAULT_DEPLOY_ID,
|
|
390
1314
|
host = process.env.DEFAULT_DEPLOY_HOST,
|
|
391
1315
|
path = process.env.DEFAULT_DEPLOY_PATH,
|
|
@@ -402,36 +1326,63 @@ class UnderpostDB {
|
|
|
402
1326
|
host = host ?? process.env.DEFAULT_DEPLOY_HOST;
|
|
403
1327
|
path = path ?? process.env.DEFAULT_DEPLOY_PATH;
|
|
404
1328
|
|
|
1329
|
+
logger.info('Starting cluster metadata backup operation', {
|
|
1330
|
+
deployId,
|
|
1331
|
+
host,
|
|
1332
|
+
path,
|
|
1333
|
+
options,
|
|
1334
|
+
});
|
|
1335
|
+
|
|
405
1336
|
if (options.generate === true) {
|
|
406
|
-
|
|
1337
|
+
logger.info('Generating cluster metadata');
|
|
1338
|
+
await UnderpostDB.API.clusterMetadataFactory(deployId, host, path);
|
|
407
1339
|
}
|
|
408
1340
|
|
|
409
1341
|
if (options.instances === true) {
|
|
410
1342
|
const outputPath = './engine-private/instances';
|
|
411
|
-
if (fs.existsSync(outputPath))
|
|
1343
|
+
if (!fs.existsSync(outputPath)) {
|
|
1344
|
+
fs.mkdirSync(outputPath, { recursive: true });
|
|
1345
|
+
}
|
|
412
1346
|
const collection = 'instances';
|
|
413
|
-
|
|
1347
|
+
|
|
1348
|
+
if (options.export === true) {
|
|
1349
|
+
logger.info('Exporting instances collection', { outputPath });
|
|
414
1350
|
shellExec(
|
|
415
|
-
`node bin db --export --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
1351
|
+
`node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
416
1352
|
);
|
|
417
|
-
|
|
1353
|
+
}
|
|
1354
|
+
|
|
1355
|
+
if (options.import === true) {
|
|
1356
|
+
logger.info('Importing instances collection', { outputPath });
|
|
418
1357
|
shellExec(
|
|
419
|
-
`node bin db --import --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
1358
|
+
`node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
420
1359
|
);
|
|
1360
|
+
}
|
|
421
1361
|
}
|
|
1362
|
+
|
|
422
1363
|
if (options.crons === true) {
|
|
423
1364
|
const outputPath = './engine-private/crons';
|
|
424
|
-
if (fs.existsSync(outputPath))
|
|
1365
|
+
if (!fs.existsSync(outputPath)) {
|
|
1366
|
+
fs.mkdirSync(outputPath, { recursive: true });
|
|
1367
|
+
}
|
|
425
1368
|
const collection = 'crons';
|
|
426
|
-
|
|
1369
|
+
|
|
1370
|
+
if (options.export === true) {
|
|
1371
|
+
logger.info('Exporting crons collection', { outputPath });
|
|
427
1372
|
shellExec(
|
|
428
|
-
`node bin db --export --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
1373
|
+
`node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
429
1374
|
);
|
|
430
|
-
|
|
1375
|
+
}
|
|
1376
|
+
|
|
1377
|
+
if (options.import === true) {
|
|
1378
|
+
logger.info('Importing crons collection', { outputPath });
|
|
431
1379
|
shellExec(
|
|
432
|
-
`node bin db --import --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
1380
|
+
`node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
|
|
433
1381
|
);
|
|
1382
|
+
}
|
|
434
1383
|
}
|
|
1384
|
+
|
|
1385
|
+
logger.info('Cluster metadata backup operation completed');
|
|
435
1386
|
},
|
|
436
1387
|
};
|
|
437
1388
|
}
|