cyberia 3.1.3 → 3.2.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (208) hide show
  1. package/.env.example +0 -2
  2. package/.github/workflows/engine-cyberia.cd.yml +10 -8
  3. package/.github/workflows/engine-cyberia.ci.yml +12 -29
  4. package/.github/workflows/ghpkg.ci.yml +4 -4
  5. package/.github/workflows/npmpkg.ci.yml +28 -11
  6. package/.github/workflows/publish.ci.yml +21 -2
  7. package/.github/workflows/pwa-microservices-template-page.cd.yml +4 -5
  8. package/.github/workflows/pwa-microservices-template-test.ci.yml +3 -3
  9. package/.github/workflows/release.cd.yml +13 -8
  10. package/CHANGELOG.md +433 -1
  11. package/CLI-HELP.md +57 -7
  12. package/Dockerfile +4 -2
  13. package/README.md +347 -22
  14. package/bin/build.js +5 -2
  15. package/bin/cyberia.js +1789 -112
  16. package/bin/deploy.js +177 -124
  17. package/bin/file.js +3 -0
  18. package/bin/index.js +1789 -112
  19. package/conf.js +64 -8
  20. package/deployment.yaml +92 -20
  21. package/hardhat/hardhat.config.js +13 -13
  22. package/hardhat/ignition/modules/ObjectLayerToken.js +1 -1
  23. package/hardhat/package-lock.json +2554 -5859
  24. package/hardhat/package.json +13 -22
  25. package/hardhat/scripts/deployObjectLayerToken.js +1 -1
  26. package/hardhat/test/ObjectLayerToken.js +4 -2
  27. package/hardhat/types/ethers-contracts/ObjectLayerToken.ts +690 -0
  28. package/hardhat/types/ethers-contracts/common.ts +92 -0
  29. package/hardhat/types/ethers-contracts/factories/ObjectLayerToken__factory.ts +1055 -0
  30. package/hardhat/types/ethers-contracts/factories/index.ts +4 -0
  31. package/hardhat/types/ethers-contracts/hardhat.d.ts +47 -0
  32. package/hardhat/types/ethers-contracts/index.ts +6 -0
  33. package/jsdoc.dd-cyberia.json +64 -55
  34. package/jsdoc.json +64 -55
  35. package/manifests/cronjobs/dd-cron/dd-cron-backup.yaml +5 -4
  36. package/manifests/cronjobs/dd-cron/dd-cron-dns.yaml +5 -4
  37. package/manifests/deployment/dd-cyberia-development/deployment.yaml +92 -20
  38. package/manifests/deployment/dd-cyberia-development/proxy.yaml +54 -18
  39. package/manifests/deployment/dd-default-development/deployment.yaml +2 -2
  40. package/manifests/deployment/dd-test-development/deployment.yaml +88 -74
  41. package/manifests/deployment/dd-test-development/proxy.yaml +13 -4
  42. package/manifests/deployment/playwright/deployment.yaml +1 -1
  43. package/nodemon.json +1 -1
  44. package/package.json +22 -16
  45. package/proxy.yaml +54 -18
  46. package/scripts/rhel-grpc-setup.sh +56 -0
  47. package/src/api/atlas-sprite-sheet/atlas-sprite-sheet.controller.js +44 -0
  48. package/src/api/atlas-sprite-sheet/atlas-sprite-sheet.model.js +16 -0
  49. package/src/api/atlas-sprite-sheet/atlas-sprite-sheet.router.js +5 -0
  50. package/src/api/atlas-sprite-sheet/atlas-sprite-sheet.service.js +80 -7
  51. package/src/api/cyberia-dialogue/cyberia-dialogue.controller.js +93 -0
  52. package/src/api/cyberia-dialogue/cyberia-dialogue.model.js +36 -0
  53. package/src/api/cyberia-dialogue/cyberia-dialogue.router.js +29 -0
  54. package/src/api/cyberia-dialogue/cyberia-dialogue.service.js +51 -0
  55. package/src/api/cyberia-entity/cyberia-entity.controller.js +74 -0
  56. package/src/api/cyberia-entity/cyberia-entity.model.js +24 -0
  57. package/src/api/cyberia-entity/cyberia-entity.router.js +27 -0
  58. package/src/api/cyberia-entity/cyberia-entity.service.js +42 -0
  59. package/src/api/cyberia-instance/cyberia-fallback-world.js +368 -0
  60. package/src/api/cyberia-instance/cyberia-instance.controller.js +92 -0
  61. package/src/api/cyberia-instance/cyberia-instance.model.js +84 -0
  62. package/src/api/cyberia-instance/cyberia-instance.router.js +63 -0
  63. package/src/api/cyberia-instance/cyberia-instance.service.js +191 -0
  64. package/src/api/cyberia-instance/cyberia-portal-connector.js +486 -0
  65. package/src/api/cyberia-instance-conf/cyberia-instance-conf.controller.js +74 -0
  66. package/src/api/cyberia-instance-conf/cyberia-instance-conf.defaults.js +413 -0
  67. package/src/api/cyberia-instance-conf/cyberia-instance-conf.model.js +228 -0
  68. package/src/api/cyberia-instance-conf/cyberia-instance-conf.router.js +27 -0
  69. package/src/api/cyberia-instance-conf/cyberia-instance-conf.service.js +42 -0
  70. package/src/api/cyberia-map/cyberia-map.controller.js +79 -0
  71. package/src/api/cyberia-map/cyberia-map.model.js +30 -0
  72. package/src/api/cyberia-map/cyberia-map.router.js +40 -0
  73. package/src/api/cyberia-map/cyberia-map.service.js +74 -0
  74. package/src/api/file/file.ref.json +18 -0
  75. package/src/api/ipfs/ipfs.controller.js +4 -25
  76. package/src/api/ipfs/ipfs.model.js +43 -34
  77. package/src/api/ipfs/ipfs.router.js +8 -13
  78. package/src/api/ipfs/ipfs.service.js +54 -102
  79. package/src/api/object-layer/README.md +347 -22
  80. package/src/api/object-layer/object-layer.router.js +30 -0
  81. package/src/api/object-layer/object-layer.service.js +114 -31
  82. package/src/api/user/user.service.js +8 -7
  83. package/src/cli/cluster.js +7 -7
  84. package/src/cli/db.js +710 -827
  85. package/src/cli/deploy.js +151 -93
  86. package/src/cli/env.js +29 -0
  87. package/src/cli/fs.js +5 -2
  88. package/src/cli/index.js +48 -2
  89. package/src/cli/kubectl.js +211 -0
  90. package/src/cli/release.js +284 -0
  91. package/src/cli/repository.js +438 -75
  92. package/src/cli/run.js +195 -35
  93. package/src/cli/secrets.js +73 -0
  94. package/src/cli/test.js +3 -3
  95. package/src/client/Cryptokoyn.index.js +3 -4
  96. package/src/client/CyberiaPortal.index.js +3 -4
  97. package/src/client/Default.index.js +3 -4
  98. package/src/client/Itemledger.index.js +3 -4
  99. package/src/client/Underpost.index.js +3 -4
  100. package/src/client/components/core/AppStore.js +69 -0
  101. package/src/client/components/core/CalendarCore.js +2 -2
  102. package/src/client/components/core/DropDown.js +137 -17
  103. package/src/client/components/core/Keyboard.js +2 -2
  104. package/src/client/components/core/LogIn.js +2 -2
  105. package/src/client/components/core/LogOut.js +2 -2
  106. package/src/client/components/core/Modal.js +0 -1
  107. package/src/client/components/core/Panel.js +0 -1
  108. package/src/client/components/core/PanelForm.js +19 -19
  109. package/src/client/components/core/SocketIo.js +82 -29
  110. package/src/client/components/core/SocketIoHandler.js +75 -0
  111. package/src/client/components/core/Stream.js +143 -95
  112. package/src/client/components/core/Webhook.js +40 -7
  113. package/src/client/components/cryptokoyn/AppStoreCryptokoyn.js +5 -0
  114. package/src/client/components/cryptokoyn/LogInCryptokoyn.js +3 -3
  115. package/src/client/components/cryptokoyn/LogOutCryptokoyn.js +2 -2
  116. package/src/client/components/cryptokoyn/MenuCryptokoyn.js +3 -3
  117. package/src/client/components/cryptokoyn/SocketIoCryptokoyn.js +3 -51
  118. package/src/client/components/cyberia/InstanceEngineCyberia.js +700 -0
  119. package/src/client/components/cyberia/MapEngineCyberia.js +1359 -2
  120. package/src/client/components/cyberia/ObjectLayerEngineModal.js +17 -6
  121. package/src/client/components/cyberia/ObjectLayerEngineViewer.js +92 -54
  122. package/src/client/components/cyberia-portal/AppStoreCyberiaPortal.js +5 -0
  123. package/src/client/components/cyberia-portal/CommonCyberiaPortal.js +216 -30
  124. package/src/client/components/cyberia-portal/LogInCyberiaPortal.js +3 -3
  125. package/src/client/components/cyberia-portal/LogOutCyberiaPortal.js +2 -2
  126. package/src/client/components/cyberia-portal/MenuCyberiaPortal.js +40 -7
  127. package/src/client/components/cyberia-portal/RoutesCyberiaPortal.js +4 -0
  128. package/src/client/components/cyberia-portal/SocketIoCyberiaPortal.js +3 -49
  129. package/src/client/components/cyberia-portal/TranslateCyberiaPortal.js +4 -0
  130. package/src/client/components/default/AppStoreDefault.js +5 -0
  131. package/src/client/components/default/LogInDefault.js +3 -3
  132. package/src/client/components/default/LogOutDefault.js +2 -2
  133. package/src/client/components/default/MenuDefault.js +5 -5
  134. package/src/client/components/default/SocketIoDefault.js +3 -51
  135. package/src/client/components/itemledger/AppStoreItemledger.js +5 -0
  136. package/src/client/components/itemledger/LogInItemledger.js +3 -3
  137. package/src/client/components/itemledger/LogOutItemledger.js +2 -2
  138. package/src/client/components/itemledger/MenuItemledger.js +3 -3
  139. package/src/client/components/itemledger/SocketIoItemledger.js +3 -51
  140. package/src/client/components/underpost/AppStoreUnderpost.js +5 -0
  141. package/src/client/components/underpost/LogInUnderpost.js +3 -3
  142. package/src/client/components/underpost/LogOutUnderpost.js +2 -2
  143. package/src/client/components/underpost/MenuUnderpost.js +5 -5
  144. package/src/client/components/underpost/SocketIoUnderpost.js +3 -51
  145. package/src/client/services/core/core.service.js +20 -8
  146. package/src/client/services/cyberia-dialogue/cyberia-dialogue.service.js +105 -0
  147. package/src/client/services/cyberia-entity/cyberia-entity.management.js +57 -0
  148. package/src/client/services/cyberia-entity/cyberia-entity.service.js +105 -0
  149. package/src/client/services/cyberia-instance/cyberia-instance.management.js +194 -0
  150. package/src/client/services/cyberia-instance/cyberia-instance.service.js +122 -0
  151. package/src/client/services/cyberia-instance-conf/cyberia-instance-conf.service.js +105 -0
  152. package/src/client/services/cyberia-map/cyberia-map.management.js +193 -0
  153. package/src/client/services/cyberia-map/cyberia-map.service.js +126 -0
  154. package/src/client/services/instance/instance.management.js +2 -2
  155. package/src/client/services/ipfs/ipfs.service.js +3 -23
  156. package/src/client/services/object-layer/object-layer.management.js +3 -3
  157. package/src/client/services/object-layer/object-layer.service.js +21 -0
  158. package/src/client/services/user/user.management.js +2 -2
  159. package/src/client/ssr/pages/CyberiaServerMetrics.js +1 -1
  160. package/src/grpc/cyberia/OFF_CHAIN_ECONOMY.md +305 -0
  161. package/src/grpc/cyberia/README.md +326 -0
  162. package/src/grpc/cyberia/grpc-server.js +530 -0
  163. package/src/index.js +24 -1
  164. package/src/runtime/express/Dockerfile +4 -0
  165. package/src/runtime/express/Express.js +18 -1
  166. package/src/runtime/lampp/Dockerfile +13 -2
  167. package/src/runtime/lampp/Lampp.js +27 -4
  168. package/src/runtime/wp/Dockerfile +68 -0
  169. package/src/runtime/wp/Wp.js +639 -0
  170. package/src/server/auth.js +24 -1
  171. package/src/server/backup.js +37 -9
  172. package/src/server/client-build-docs.js +9 -2
  173. package/src/server/client-build.js +31 -31
  174. package/src/server/client-formatted.js +109 -57
  175. package/src/server/conf.js +24 -9
  176. package/src/server/cron.js +25 -23
  177. package/src/server/dns.js +2 -1
  178. package/src/server/ipfs-client.js +24 -1
  179. package/src/server/object-layer.js +149 -108
  180. package/src/server/peer.js +8 -0
  181. package/src/server/runtime.js +25 -1
  182. package/src/server/semantic-layer-generator-floor.js +359 -0
  183. package/src/server/semantic-layer-generator-skin.js +1294 -0
  184. package/src/server/semantic-layer-generator.js +116 -555
  185. package/src/server/start.js +2 -2
  186. package/src/ws/IoInterface.js +1 -10
  187. package/src/ws/IoServer.js +14 -33
  188. package/src/ws/core/channels/core.ws.chat.js +65 -20
  189. package/src/ws/core/channels/core.ws.mailer.js +113 -32
  190. package/src/ws/core/channels/core.ws.stream.js +90 -31
  191. package/src/ws/core/core.ws.connection.js +12 -33
  192. package/src/ws/core/core.ws.emit.js +10 -26
  193. package/src/ws/core/core.ws.server.js +25 -58
  194. package/src/ws/default/channels/default.ws.main.js +53 -12
  195. package/src/ws/default/default.ws.connection.js +26 -13
  196. package/src/ws/default/default.ws.server.js +30 -12
  197. package/src/client/components/cryptokoyn/CommonCryptokoyn.js +0 -29
  198. package/src/client/components/cryptokoyn/ElementsCryptokoyn.js +0 -38
  199. package/src/client/components/cyberia-portal/ElementsCyberiaPortal.js +0 -38
  200. package/src/client/components/default/ElementsDefault.js +0 -38
  201. package/src/client/components/itemledger/CommonItemledger.js +0 -29
  202. package/src/client/components/itemledger/ElementsItemledger.js +0 -38
  203. package/src/client/components/underpost/CommonUnderpost.js +0 -29
  204. package/src/client/components/underpost/ElementsUnderpost.js +0 -38
  205. package/src/ws/core/management/core.ws.chat.js +0 -8
  206. package/src/ws/core/management/core.ws.mailer.js +0 -16
  207. package/src/ws/core/management/core.ws.stream.js +0 -8
  208. package/src/ws/default/management/default.ws.main.js +0 -8
package/src/cli/db.js CHANGED
@@ -13,23 +13,9 @@ import fs from 'fs-extra';
13
13
  import { DataBaseProvider } from '../db/DataBaseProvider.js';
14
14
  import { loadReplicas, pathPortAssignmentFactory, loadCronDeployEnv } from '../server/conf.js';
15
15
  import Underpost from '../index.js';
16
+ import { timer } from '../client/components/core/CommonJs.js';
16
17
  const logger = loggerFactory(import.meta);
17
18
 
18
- /**
19
- * Redacts credentials from shell command strings before logging.
20
- * Masks passwords in `-p<password>`, `--password=<password>`, and `-P <password>` patterns.
21
- * @param {string} cmd - The raw command string.
22
- * @memberof UnderpostDB
23
- * @returns {string} The command with credentials replaced by `***`.
24
- */
25
- const sanitizeCommand = (cmd) => {
26
- if (typeof cmd !== 'string') return cmd;
27
- return cmd
28
- .replace(/-p['"]?[^\s'"]+/g, '-p***')
29
- .replace(/--password=['"]?[^\s'"]+/g, '--password=***')
30
- .replace(/-P\s+['"]?[^\s'"]+/g, '-P ***');
31
- };
32
-
33
19
  /**
34
20
  * Constants for database operations
35
21
  * @constant {number} MAX_BACKUP_RETENTION - Maximum number of backups to retain
@@ -98,132 +84,6 @@ class UnderpostDB {
98
84
  * @memberof UnderpostDB
99
85
  */
100
86
  static API = {
101
- /**
102
- * Helper: Gets filtered pods based on criteria.
103
- * @method _getFilteredPods
104
- * @memberof UnderpostDB
105
- * @param {Object} criteria - Filter criteria.
106
- * @param {string} [criteria.podNames] - Comma-separated pod name patterns.
107
- * @param {string} [criteria.namespace='default'] - Kubernetes namespace.
108
- * @param {string} [criteria.deployId] - Deployment ID pattern.
109
- * @return {Array<PodInfo>} Filtered pod list.
110
- */
111
- _getFilteredPods(criteria = {}) {
112
- const { podNames, namespace = 'default', deployId } = criteria;
113
-
114
- try {
115
- // Get all pods using Underpost.deploy.get
116
- let pods = Underpost.deploy.get(deployId || '', 'pods', namespace);
117
-
118
- // Filter by pod names if specified
119
- if (podNames) {
120
- const patterns = podNames.split(',').map((p) => p.trim());
121
- pods = pods.filter((pod) => {
122
- return patterns.some((pattern) => {
123
- // Support wildcards
124
- const regex = new RegExp('^' + pattern.replace(/\*/g, '.*') + '$');
125
- return regex.test(pod.NAME);
126
- });
127
- });
128
- }
129
-
130
- logger.info(`Found ${pods.length} pod(s) matching criteria`, { criteria, podNames: pods.map((p) => p.NAME) });
131
- return pods;
132
- } catch (error) {
133
- logger.error('Error filtering pods', { error: error.message, criteria });
134
- return [];
135
- }
136
- },
137
-
138
- /**
139
- * Helper: Executes kubectl command with error handling.
140
- * @method _executeKubectl
141
- * @memberof UnderpostDB
142
- * @param {string} command - kubectl command to execute.
143
- * @param {Object} [options={}] - Execution options.
144
- * @param {string} [options.context=''] - Command context for logging.
145
- * @return {string|null} Command output or null on error.
146
- */
147
- _executeKubectl(command, options = {}) {
148
- const { context = '' } = options;
149
-
150
- try {
151
- logger.info(`Executing kubectl command`, { command: sanitizeCommand(command), context });
152
- return shellExec(command, { stdout: true, disableLog: true });
153
- } catch (error) {
154
- logger.error(`kubectl command failed`, { command: sanitizeCommand(command), error: error.message, context });
155
- throw error;
156
- }
157
- },
158
-
159
- /**
160
- * Helper: Copies file to pod.
161
- * @method _copyToPod
162
- * @memberof UnderpostDB
163
- * @param {Object} params - Copy parameters.
164
- * @param {string} params.sourcePath - Source file path.
165
- * @param {string} params.podName - Target pod name.
166
- * @param {string} params.namespace - Pod namespace.
167
- * @param {string} params.destPath - Destination path in pod.
168
- * @return {boolean} Success status.
169
- */
170
- _copyToPod({ sourcePath, podName, namespace, destPath }) {
171
- try {
172
- const command = `sudo kubectl cp ${sourcePath} ${namespace}/${podName}:${destPath}`;
173
- Underpost.db._executeKubectl(command, { context: `copy to pod ${podName}` });
174
- return true;
175
- } catch (error) {
176
- logger.error('Failed to copy file to pod', { sourcePath, podName, destPath, error: error.message });
177
- return false;
178
- }
179
- },
180
-
181
- /**
182
- * Helper: Copies file from pod.
183
- * @method _copyFromPod
184
- * @memberof UnderpostDB
185
- * @param {Object} params - Copy parameters.
186
- * @param {string} params.podName - Source pod name.
187
- * @param {string} params.namespace - Pod namespace.
188
- * @param {string} params.sourcePath - Source path in pod.
189
- * @param {string} params.destPath - Destination file path.
190
- * @return {boolean} Success status.
191
- */
192
- _copyFromPod({ podName, namespace, sourcePath, destPath }) {
193
- try {
194
- const command = `sudo kubectl cp ${namespace}/${podName}:${sourcePath} ${destPath}`;
195
- Underpost.db._executeKubectl(command, { context: `copy from pod ${podName}` });
196
- return true;
197
- } catch (error) {
198
- logger.error('Failed to copy file from pod', { podName, sourcePath, destPath, error: error.message });
199
- return false;
200
- }
201
- },
202
-
203
- /**
204
- * Helper: Executes command in pod.
205
- * @method _execInPod
206
- * @memberof UnderpostDB
207
- * @param {Object} params - Execution parameters.
208
- * @param {string} params.podName - Pod name.
209
- * @param {string} params.namespace - Pod namespace.
210
- * @param {string} params.command - Command to execute.
211
- * @return {string|null} Command output or null.
212
- */
213
- _execInPod({ podName, namespace, command }) {
214
- try {
215
- const kubectlCmd = `sudo kubectl exec -n ${namespace} -i ${podName} -- sh -c "${command}"`;
216
- return Underpost.db._executeKubectl(kubectlCmd, { context: `exec in pod ${podName}` });
217
- } catch (error) {
218
- logger.error('Failed to execute command in pod', {
219
- podName,
220
- command: sanitizeCommand(command),
221
- error: error.message,
222
- });
223
- throw error;
224
- }
225
- },
226
-
227
87
  /**
228
88
  * Helper: Resolves the latest backup timestamp from an existing backup directory.
229
89
  * Scans the directory for numeric (epoch) sub-folders and returns the most recent one.
@@ -239,76 +99,6 @@ class UnderpostDB {
239
99
  return entries.sort((a, b) => parseInt(b) - parseInt(a))[0];
240
100
  },
241
101
 
242
- /**
243
- * Helper: Manages Git repository for backups.
244
- * @method _manageGitRepo
245
- * @memberof UnderpostDB
246
- * @param {Object} params - Git parameters.
247
- * @param {string} params.repoName - Repository name.
248
- * @param {string} params.operation - Operation (clone, pull, commit, push).
249
- * @param {string} [params.message=''] - Commit message.
250
- * @param {boolean} [params.forceClone=false] - Force remove and re-clone repository.
251
- * @return {boolean} Success status.
252
- */
253
- _manageGitRepo({ repoName, operation, message = '', forceClone = false }) {
254
- try {
255
- const username = process.env.GITHUB_USERNAME;
256
- if (!username) {
257
- logger.error('GITHUB_USERNAME environment variable not set');
258
- return false;
259
- }
260
-
261
- const repoPath = `../${repoName}`;
262
-
263
- switch (operation) {
264
- case 'clone':
265
- if (forceClone && fs.existsSync(repoPath)) {
266
- logger.info(`Force clone enabled, removing existing repository: ${repoName}`);
267
- fs.removeSync(repoPath);
268
- }
269
- if (!fs.existsSync(repoPath)) {
270
- shellExec(`cd .. && underpost clone ${username}/${repoName}`);
271
- logger.info(`Cloned repository: ${repoName}`);
272
- }
273
- break;
274
-
275
- case 'pull':
276
- if (fs.existsSync(repoPath)) {
277
- shellExec(`cd ${repoPath} && git checkout . && git clean -f -d`);
278
- shellExec(`cd ${repoPath} && underpost pull . ${username}/${repoName}`, {
279
- silent: true,
280
- });
281
- logger.info(`Pulled repository: ${repoName}`);
282
- }
283
- break;
284
-
285
- case 'commit':
286
- if (fs.existsSync(repoPath)) {
287
- shellExec(`cd ${repoPath} && git add .`);
288
- shellExec(`underpost cmt ${repoPath} backup '' '${message}'`);
289
- logger.info(`Committed to repository: ${repoName}`, { message });
290
- }
291
- break;
292
-
293
- case 'push':
294
- if (fs.existsSync(repoPath)) {
295
- shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName}`, { silent: true });
296
- logger.info(`Pushed repository: ${repoName}`);
297
- }
298
- break;
299
-
300
- default:
301
- logger.warn(`Unknown git operation: ${operation}`);
302
- return false;
303
- }
304
-
305
- return true;
306
- } catch (error) {
307
- logger.error(`Git operation failed`, { repoName, operation, error: error.message });
308
- return false;
309
- }
310
- },
311
-
312
102
  /**
313
103
  * Helper: Performs MariaDB import operation.
314
104
  * @method _importMariaDB
@@ -329,8 +119,20 @@ class UnderpostDB {
329
119
 
330
120
  logger.info('Importing MariaDB database', { podName, dbName });
331
121
 
122
+ // Always ensure the database exists first — required for WP even when no backup is available
123
+ Underpost.kubectl.run(
124
+ `kubectl exec -n ${namespace} -i ${podName} -- mariadb -p${password} -e 'CREATE DATABASE IF NOT EXISTS ${dbName};'`,
125
+ { context: `create database ${dbName}` },
126
+ );
127
+
128
+ // If no SQL file is available, the empty database is enough — return early
129
+ if (!sqlPath || !fs.existsSync(sqlPath)) {
130
+ logger.warn('No SQL backup file found — empty database ensured', { podName, dbName, sqlPath });
131
+ return true;
132
+ }
133
+
332
134
  // Remove existing SQL file in container
333
- Underpost.db._execInPod({
135
+ Underpost.kubectl.exec({
334
136
  podName,
335
137
  namespace,
336
138
  command: `rm -rf ${containerSqlPath}`,
@@ -338,7 +140,7 @@ class UnderpostDB {
338
140
 
339
141
  // Copy SQL file to pod
340
142
  if (
341
- !Underpost.db._copyToPod({
143
+ !Underpost.kubectl.cpTo({
342
144
  sourcePath: sqlPath,
343
145
  podName,
344
146
  namespace,
@@ -348,15 +150,9 @@ class UnderpostDB {
348
150
  return false;
349
151
  }
350
152
 
351
- // Create database if it doesn't exist
352
- Underpost.db._executeKubectl(
353
- `kubectl exec -n ${namespace} -i ${podName} -- mariadb -p${password} -e 'CREATE DATABASE IF NOT EXISTS ${dbName};'`,
354
- { context: `create database ${dbName}` },
355
- );
356
-
357
153
  // Import SQL file
358
154
  const importCmd = `mariadb -u ${user} -p${password} ${dbName} < ${containerSqlPath}`;
359
- Underpost.db._execInPod({ podName, namespace, command: importCmd });
155
+ Underpost.kubectl.exec({ podName, namespace, command: importCmd });
360
156
 
361
157
  logger.info('Successfully imported MariaDB database', { podName, dbName });
362
158
  return true;
@@ -387,7 +183,7 @@ class UnderpostDB {
387
183
  logger.info('Exporting MariaDB database', { podName, dbName });
388
184
 
389
185
  // Remove existing SQL file in container
390
- Underpost.db._execInPod({
186
+ Underpost.kubectl.exec({
391
187
  podName,
392
188
  namespace,
393
189
  command: `rm -rf ${containerSqlPath}`,
@@ -395,11 +191,11 @@ class UnderpostDB {
395
191
 
396
192
  // Dump database
397
193
  const dumpCmd = `mariadb-dump --user=${user} --password=${password} --lock-tables ${dbName} > ${containerSqlPath}`;
398
- Underpost.db._execInPod({ podName, namespace, command: dumpCmd });
194
+ Underpost.kubectl.exec({ podName, namespace, command: dumpCmd });
399
195
 
400
196
  // Copy SQL file from pod
401
197
  if (
402
- !Underpost.db._copyFromPod({
198
+ !Underpost.kubectl.cpFrom({
403
199
  podName,
404
200
  namespace,
405
201
  sourcePath: containerSqlPath,
@@ -442,8 +238,18 @@ class UnderpostDB {
442
238
 
443
239
  logger.info('Importing MongoDB database', { podName, dbName });
444
240
 
241
+ // If no BSON directory is available, skip — MongoDB creates the DB on first write
242
+ if (!bsonPath || !fs.existsSync(bsonPath)) {
243
+ logger.warn('No BSON backup directory found — database will be created on first write', {
244
+ podName,
245
+ dbName,
246
+ bsonPath,
247
+ });
248
+ return true;
249
+ }
250
+
445
251
  // Remove existing BSON directory in container
446
- Underpost.db._execInPod({
252
+ Underpost.kubectl.exec({
447
253
  podName,
448
254
  namespace,
449
255
  command: `rm -rf ${containerBsonPath}`,
@@ -451,7 +257,7 @@ class UnderpostDB {
451
257
 
452
258
  // Copy BSON directory to pod
453
259
  if (
454
- !Underpost.db._copyToPod({
260
+ !Underpost.kubectl.cpTo({
455
261
  sourcePath: bsonPath,
456
262
  podName,
457
263
  namespace,
@@ -465,7 +271,7 @@ class UnderpostDB {
465
271
  const restoreCmd = `mongorestore -d ${dbName} ${containerBsonPath}${drop ? ' --drop' : ''}${
466
272
  preserveUUID ? ' --preserveUUID' : ''
467
273
  }`;
468
- Underpost.db._execInPod({ podName, namespace, command: restoreCmd });
274
+ Underpost.kubectl.exec({ podName, namespace, command: restoreCmd });
469
275
 
470
276
  logger.info('Successfully imported MongoDB database', { podName, dbName });
471
277
  return true;
@@ -495,7 +301,7 @@ class UnderpostDB {
495
301
  logger.info('Exporting MongoDB database', { podName, dbName, collections });
496
302
 
497
303
  // Remove existing BSON directory in container
498
- Underpost.db._execInPod({
304
+ Underpost.kubectl.exec({
499
305
  podName,
500
306
  namespace,
501
307
  command: `rm -rf ${containerBsonPath}`,
@@ -506,16 +312,16 @@ class UnderpostDB {
506
312
  const collectionList = collections.split(',').map((c) => c.trim());
507
313
  for (const collection of collectionList) {
508
314
  const dumpCmd = `mongodump -d ${dbName} --collection ${collection} -o /`;
509
- Underpost.db._execInPod({ podName, namespace, command: dumpCmd });
315
+ Underpost.kubectl.exec({ podName, namespace, command: dumpCmd });
510
316
  }
511
317
  } else {
512
318
  const dumpCmd = `mongodump -d ${dbName} -o /`;
513
- Underpost.db._execInPod({ podName, namespace, command: dumpCmd });
319
+ Underpost.kubectl.exec({ podName, namespace, command: dumpCmd });
514
320
  }
515
321
 
516
322
  // Copy BSON directory from pod
517
323
  if (
518
- !Underpost.db._copyFromPod({
324
+ !Underpost.kubectl.cpFrom({
519
325
  podName,
520
326
  namespace,
521
327
  sourcePath: containerBsonPath,
@@ -743,6 +549,7 @@ class UnderpostDB {
743
549
  * @param {boolean} [options.k3s=false] - k3s cluster flag.
744
550
  * @param {boolean} [options.kubeadm=false] - kubeadm cluster flag.
745
551
  * @param {boolean} [options.kind=false] - kind cluster flag.
552
+ * @param {boolean} [options.repoBackup=false] - Backs up repositories (git commit+push) inside deployment pods via kubectl exec.
746
553
  * @return {Promise<void>} Resolves when operation is complete.
747
554
  */
748
555
  async callback(
@@ -771,346 +578,375 @@ class UnderpostDB {
771
578
  k3s: false,
772
579
  kubeadm: false,
773
580
  kind: false,
581
+ repoBackup: false,
774
582
  },
775
583
  ) {
776
- loadCronDeployEnv();
777
- const newBackupTimestamp = new Date().getTime();
778
- const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
779
-
780
- if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
781
-
782
- // Handle clean-fs-collection operation
783
- if (options.cleanFsCollection || options.cleanFsDryRun) {
784
- logger.info('Starting File collection cleanup operation', { deployList });
785
- await Underpost.db.cleanFsCollection(deployList, {
786
- hosts: options.hosts,
787
- paths: options.paths,
788
- dryRun: options.cleanFsDryRun,
789
- });
790
- return;
791
- }
584
+ // Ensure engine-private is available (clone if inside a deployment
585
+ // container where globalSecretClean has already removed it).
586
+ const firstDeployId = deployList !== 'dd' ? deployList.split(',')[0].trim() : '';
587
+ try {
588
+ loadCronDeployEnv();
589
+ const newBackupTimestamp = new Date().getTime();
590
+ const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
591
+
592
+ if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
593
+
594
+ // Handle repository backup (git commit+push inside deployment pod)
595
+ if (options.repoBackup) {
596
+ const namespace = options.ns && typeof options.ns === 'string' ? options.ns : 'default';
597
+ for (const _deployId of deployList.split(',')) {
598
+ const deployId = _deployId.trim();
599
+ if (!deployId) continue;
600
+ logger.info('Starting pod repository backup', { deployId, namespace });
601
+ Underpost.repo.backupPodRepositories({
602
+ deployId,
603
+ namespace,
604
+ env: options.dev ? 'development' : 'production',
605
+ });
606
+ }
607
+ return;
608
+ }
792
609
 
793
- logger.info('Starting database operation', {
794
- deployList,
795
- namespace,
796
- import: options.import,
797
- export: options.export,
798
- });
610
+ // Handle clean-fs-collection operation
611
+ if (options.cleanFsCollection || options.cleanFsDryRun) {
612
+ logger.info('Starting File collection cleanup operation', { deployList });
613
+ await Underpost.db.cleanFsCollection(deployList, {
614
+ hosts: options.hosts,
615
+ paths: options.paths,
616
+ dryRun: options.cleanFsDryRun,
617
+ });
618
+ return;
619
+ }
620
+
621
+ logger.info('Starting database operation', {
622
+ deployList,
623
+ namespace,
624
+ import: options.import,
625
+ export: options.export,
626
+ });
799
627
 
800
- if (options.primaryPodEnsure) {
801
- const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: options.primaryPodEnsure });
802
- if (!primaryPodName) {
803
- const baseCommand = options.dev ? 'node bin' : 'underpost';
804
- const baseClusterCommand = options.dev ? ' --dev' : '';
805
- let clusterFlag = options.k3s ? ' --k3s' : options.kubeadm ? ' --kubeadm' : '';
806
- shellExec(`${baseCommand} cluster${baseClusterCommand}${clusterFlag} --mongodb`);
628
+ if (options.primaryPodEnsure) {
629
+ const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: options.primaryPodEnsure });
630
+ if (!primaryPodName) {
631
+ const baseCommand = options.dev ? 'node bin' : 'underpost';
632
+ const baseClusterCommand = options.dev ? ' --dev' : '';
633
+ let clusterFlag = options.k3s ? ' --k3s' : options.kubeadm ? ' --kubeadm' : '';
634
+ shellExec(`${baseCommand} cluster${baseClusterCommand}${clusterFlag} --mongodb`);
635
+ }
636
+ return;
807
637
  }
808
- return;
809
- }
810
638
 
811
- // Track processed repositories to avoid duplicate Git operations
812
- const processedRepos = new Set();
813
- // Track processed host+path combinations to avoid duplicates
814
- const processedHostPaths = new Set();
639
+ // Track processed repositories to avoid duplicate Git operations
640
+ const processedRepos = new Set();
641
+ // Track processed host+path combinations to avoid duplicates
642
+ const processedHostPaths = new Set();
815
643
 
816
- for (const _deployId of deployList.split(',')) {
817
- const deployId = _deployId.trim();
818
- if (!deployId) continue;
644
+ for (const _deployId of deployList.split(',')) {
645
+ const deployId = _deployId.trim();
646
+ if (!deployId) continue;
819
647
 
820
- logger.info('Processing deployment', { deployId });
648
+ logger.info('Processing deployment', { deployId });
821
649
 
822
- /** @type {Object.<string, Object.<string, DatabaseConfig>>} */
823
- const dbs = {};
824
- const repoName = `engine-${deployId.includes('dd-') ? deployId.split('dd-')[1] : deployId}-cron-backups`;
650
+ /** @type {Object.<string, Object.<string, DatabaseConfig>>} */
651
+ const dbs = {};
652
+ const repoName = `engine-${deployId.includes('dd-') ? deployId.split('dd-')[1] : deployId}-cron-backups`;
825
653
 
826
- // Load server configuration
827
- const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
828
- if (!fs.existsSync(confServerPath)) {
829
- logger.error('Configuration file not found', { path: confServerPath });
830
- continue;
831
- }
654
+ // Load server configuration
655
+ const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
656
+ if (!fs.existsSync(confServerPath)) {
657
+ logger.error('Configuration file not found', { path: confServerPath });
658
+ continue;
659
+ }
832
660
 
833
- const confServer = loadConfServerJson(confServerPath, { resolve: true });
834
-
835
- // Build database configuration map
836
- for (const host of Object.keys(confServer)) {
837
- for (const path of Object.keys(confServer[host])) {
838
- const { db } = confServer[host][path];
839
- if (db) {
840
- const { provider, name, user, password } = db;
841
- if (!dbs[provider]) dbs[provider] = {};
842
-
843
- if (!(name in dbs[provider])) {
844
- dbs[provider][name] = {
845
- user,
846
- password,
847
- hostFolder: host + path.replaceAll('/', '-'),
848
- host,
849
- path,
850
- };
661
+ const confServer = loadConfServerJson(confServerPath, { resolve: true });
662
+
663
+ // Build database configuration map
664
+ for (const host of Object.keys(confServer)) {
665
+ for (const path of Object.keys(confServer[host])) {
666
+ const { db } = confServer[host][path];
667
+ if (db) {
668
+ const { provider, name, user, password } = db;
669
+ if (!dbs[provider]) dbs[provider] = {};
670
+
671
+ if (!(name in dbs[provider])) {
672
+ dbs[provider][name] = {
673
+ user,
674
+ password,
675
+ hostFolder: host + path.replaceAll('/', '-'),
676
+ host,
677
+ path,
678
+ };
679
+ }
851
680
  }
852
681
  }
853
682
  }
854
- }
855
-
856
- // Handle Git operations - execute only once per repository
857
- if (!processedRepos.has(repoName)) {
858
- logger.info('Processing Git operations for repository', { repoName, deployId });
859
- if (options.git === true) {
860
- Underpost.db._manageGitRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
861
- Underpost.db._manageGitRepo({ repoName, operation: 'pull' });
862
- }
863
683
 
864
- if (options.macroRollbackExport) {
865
- // Only clone if not already done by git option above
866
- if (options.git !== true) {
867
- Underpost.db._manageGitRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
868
- Underpost.db._manageGitRepo({ repoName, operation: 'pull' });
684
+ // Handle Git operations - execute only once per repository
685
+ if (!processedRepos.has(repoName)) {
686
+ logger.info('Processing Git operations for repository', { repoName, deployId });
687
+ if (options.git === true) {
688
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
689
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'pull' });
869
690
  }
870
691
 
871
- const nCommits = parseInt(options.macroRollbackExport);
872
- const repoPath = `../${repoName}`;
873
- const username = process.env.GITHUB_USERNAME;
874
-
875
- if (fs.existsSync(repoPath) && username) {
876
- logger.info('Executing macro rollback export', { repoName, nCommits });
877
- shellExec(`cd ${repoPath} && underpost cmt . reset ${nCommits}`);
878
- shellExec(`cd ${repoPath} && git reset`);
879
- shellExec(`cd ${repoPath} && git checkout .`);
880
- shellExec(`cd ${repoPath} && git clean -f -d`);
881
- shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName} -f`);
882
- } else {
883
- if (!username) logger.error('GITHUB_USERNAME environment variable not set');
884
- logger.warn('Repository not found for macro rollback', { repoPath });
885
- }
886
- }
692
+ if (options.macroRollbackExport) {
693
+ // Only clone if not already done by git option above
694
+ if (options.git !== true) {
695
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'clone', forceClone: options.forceClone });
696
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'pull' });
697
+ }
887
698
 
888
- processedRepos.add(repoName);
889
- logger.info('Repository marked as processed', { repoName });
890
- } else {
891
- logger.info('Skipping Git operations for already processed repository', { repoName, deployId });
892
- }
699
+ const nCommits = parseInt(options.macroRollbackExport);
700
+ const repoPath = `../${repoName}`;
701
+ const username = process.env.GITHUB_USERNAME;
702
+
703
+ if (fs.existsSync(repoPath) && username) {
704
+ logger.info('Executing macro rollback export', { repoName, nCommits });
705
+ shellExec(`cd ${repoPath} && underpost cmt . reset ${nCommits}`);
706
+ shellExec(`cd ${repoPath} && git reset`);
707
+ shellExec(`cd ${repoPath} && git checkout .`);
708
+ shellExec(`cd ${repoPath} && git clean -f -d`);
709
+ shellExec(`cd ${repoPath} && underpost push . ${username}/${repoName} -f`);
710
+ } else {
711
+ if (!username) logger.error('GITHUB_USERNAME environment variable not set');
712
+ logger.warn('Repository not found for macro rollback', { repoPath });
713
+ }
714
+ }
893
715
 
894
- // Process each database provider
895
- for (const provider of Object.keys(dbs)) {
896
- for (const dbName of Object.keys(dbs[provider])) {
897
- const { hostFolder, user, password, host, path } = dbs[provider][dbName];
716
+ processedRepos.add(repoName);
717
+ logger.info('Repository marked as processed', { repoName });
718
+ } else {
719
+ logger.info('Skipping Git operations for already processed repository', { repoName, deployId });
720
+ }
898
721
 
899
- // Create unique identifier for host+path combination
900
- const hostPathKey = `${deployId}:${host}:${path}`;
722
+ // Process each database provider
723
+ for (const provider of Object.keys(dbs)) {
724
+ for (const dbName of Object.keys(dbs[provider])) {
725
+ const { hostFolder, user, password, host, path } = dbs[provider][dbName];
901
726
 
902
- // Skip if this host+path combination was already processed
903
- if (processedHostPaths.has(hostPathKey)) {
904
- logger.info('Skipping already processed host/path', { dbName, host, path, deployId });
905
- continue;
906
- }
727
+ // Create unique identifier for host+path combination
728
+ const hostPathKey = `${deployId}:${host}:${path}`;
907
729
 
908
- // Filter by hosts and paths if specified
909
- if (
910
- (options.hosts &&
911
- !options.hosts
912
- .split(',')
913
- .map((h) => h.trim())
914
- .includes(host)) ||
915
- (options.paths &&
916
- !options.paths
917
- .split(',')
918
- .map((p) => p.trim())
919
- .includes(path))
920
- ) {
921
- logger.info('Skipping database due to host/path filter', { dbName, host, path });
922
- continue;
923
- }
730
+ // Skip if this host+path combination was already processed
731
+ if (processedHostPaths.has(hostPathKey)) {
732
+ logger.info('Skipping already processed host/path', { dbName, host, path, deployId });
733
+ continue;
734
+ }
924
735
 
925
- if (!hostFolder) {
926
- logger.warn('No hostFolder defined for database', { dbName, provider });
927
- continue;
928
- }
736
+ // Filter by hosts and paths if specified
737
+ if (
738
+ (options.hosts &&
739
+ !options.hosts
740
+ .split(',')
741
+ .map((h) => h.trim())
742
+ .includes(host)) ||
743
+ (options.paths &&
744
+ !options.paths
745
+ .split(',')
746
+ .map((p) => p.trim())
747
+ .includes(path))
748
+ ) {
749
+ logger.info('Skipping database due to host/path filter', { dbName, host, path });
750
+ continue;
751
+ }
929
752
 
930
- logger.info('Processing database', { hostFolder, provider, dbName, deployId });
753
+ if (!hostFolder) {
754
+ logger.warn('No hostFolder defined for database', { dbName, provider });
755
+ continue;
756
+ }
931
757
 
932
- const latestBackupTimestamp = Underpost.db._getLatestBackupTimestamp(`../${repoName}/${hostFolder}`);
758
+ logger.info('Processing database', { hostFolder, provider, dbName, deployId });
933
759
 
934
- dbs[provider][dbName].currentBackupTimestamp = latestBackupTimestamp;
760
+ const latestBackupTimestamp = Underpost.db._getLatestBackupTimestamp(`../${repoName}/${hostFolder}`);
935
761
 
936
- const currentTimestamp = latestBackupTimestamp || newBackupTimestamp;
937
- const sqlContainerPath = `/home/${dbName}.sql`;
938
- const fromPartsPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}-parths.json`;
939
- const toSqlPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}.sql`;
940
- const toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
941
- const toBsonPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}`;
942
- const toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
762
+ dbs[provider][dbName].currentBackupTimestamp = latestBackupTimestamp;
943
763
 
944
- // Merge split SQL files if needed for import
945
- if (options.import === true && fs.existsSync(fromPartsPath) && !fs.existsSync(toSqlPath)) {
946
- const names = JSON.parse(fs.readFileSync(fromPartsPath, 'utf8')).map((_path) => {
947
- return `../${repoName}/${hostFolder}/${currentTimestamp}/${_path.split('/').pop()}`;
948
- });
949
- logger.info('Merging backup parts', { fromPartsPath, toSqlPath, parts: names.length });
950
- await mergeFile(names, toSqlPath);
951
- }
764
+ const currentTimestamp = latestBackupTimestamp || newBackupTimestamp;
765
+ const sqlContainerPath = `/home/${dbName}.sql`;
766
+ const fromPartsPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}-parths.json`;
767
+ const toSqlPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}.sql`;
768
+ const toNewSqlPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}.sql`;
769
+ const toBsonPath = `../${repoName}/${hostFolder}/${currentTimestamp}/${dbName}`;
770
+ const toNewBsonPath = `../${repoName}/${hostFolder}/${newBackupTimestamp}/${dbName}`;
952
771
 
953
- // Get target pods based on provider and options
954
- let targetPods = [];
955
- const podCriteria = {
956
- podNames: options.podName,
957
- namespace,
958
- deployId: provider === 'mariadb' ? 'mariadb' : 'mongo',
959
- };
772
+ // Merge split SQL files if needed for import
773
+ if (options.import === true && fs.existsSync(fromPartsPath) && !fs.existsSync(toSqlPath)) {
774
+ const names = JSON.parse(fs.readFileSync(fromPartsPath, 'utf8')).map((_path) => {
775
+ return `../${repoName}/${hostFolder}/${currentTimestamp}/${_path.split('/').pop()}`;
776
+ });
777
+ logger.info('Merging backup parts', { fromPartsPath, toSqlPath, parts: names.length });
778
+ await mergeFile(names, toSqlPath);
779
+ }
960
780
 
961
- targetPods = Underpost.db._getFilteredPods(podCriteria);
781
+ // Get target pods based on provider and options
782
+ let targetPods = [];
783
+ const podCriteria = {
784
+ podNames: options.podName,
785
+ namespace,
786
+ deployId: provider === 'mariadb' ? 'mariadb' : 'mongo',
787
+ };
962
788
 
963
- // Fallback to default if no custom pods specified
964
- if (targetPods.length === 0 && !options.podName) {
965
- const defaultPods = Underpost.deploy.get(provider === 'mariadb' ? 'mariadb' : 'mongo', 'pods', namespace);
966
- console.log('defaultPods', defaultPods);
967
- targetPods = defaultPods;
968
- }
789
+ targetPods = Underpost.kubectl.getFilteredPods(podCriteria);
790
+
791
+ // Fallback to default if no custom pods specified
792
+ if (targetPods.length === 0 && !options.podName) {
793
+ const defaultPods = Underpost.kubectl.get(
794
+ provider === 'mariadb' ? 'mariadb' : 'mongo',
795
+ 'pods',
796
+ namespace,
797
+ );
798
+ console.log('defaultPods', defaultPods);
799
+ targetPods = defaultPods;
800
+ }
969
801
 
970
- if (targetPods.length === 0) {
971
- logger.warn('No pods found matching criteria', { provider, criteria: podCriteria });
972
- continue;
973
- }
802
+ if (targetPods.length === 0) {
803
+ logger.warn('No pods found matching criteria', { provider, criteria: podCriteria });
804
+ continue;
805
+ }
974
806
 
975
- // Handle primary pod detection for MongoDB
976
- let podsToProcess = [];
977
- if (provider === 'mongoose' && !options.allPods) {
978
- // For MongoDB, always use primary pod unless allPods is true
979
- if (!targetPods || targetPods.length === 0) {
980
- logger.warn('No MongoDB pods available to check for primary');
981
- podsToProcess = [];
982
- } else {
983
- const firstPod = targetPods[0].NAME;
984
- const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: firstPod });
985
-
986
- if (primaryPodName) {
987
- const primaryPod = targetPods.find((p) => p.NAME === primaryPodName);
988
- if (primaryPod) {
989
- podsToProcess = [primaryPod];
990
- logger.info('Using MongoDB primary pod', { primaryPod: primaryPodName });
807
+ // Handle primary pod detection for MongoDB
808
+ let podsToProcess = [];
809
+ if (provider === 'mongoose' && !options.allPods) {
810
+ // For MongoDB, always use primary pod unless allPods is true
811
+ if (!targetPods || targetPods.length === 0) {
812
+ logger.warn('No MongoDB pods available to check for primary');
813
+ podsToProcess = [];
814
+ } else {
815
+ const firstPod = targetPods[0].NAME;
816
+ const primaryPodName = Underpost.db.getMongoPrimaryPodName({ namespace, podName: firstPod });
817
+
818
+ if (primaryPodName) {
819
+ const primaryPod = targetPods.find((p) => p.NAME === primaryPodName);
820
+ if (primaryPod) {
821
+ podsToProcess = [primaryPod];
822
+ logger.info('Using MongoDB primary pod', { primaryPod: primaryPodName });
823
+ } else {
824
+ logger.warn('Primary pod not in filtered list, using first pod', { primaryPodName });
825
+ podsToProcess = [targetPods[0]];
826
+ }
991
827
  } else {
992
- logger.warn('Primary pod not in filtered list, using first pod', { primaryPodName });
828
+ logger.warn('Could not detect primary pod, using first pod');
993
829
  podsToProcess = [targetPods[0]];
994
830
  }
995
- } else {
996
- logger.warn('Could not detect primary pod, using first pod');
997
- podsToProcess = [targetPods[0]];
998
831
  }
832
+ } else {
833
+ // For MariaDB or when allPods is true, limit to first pod unless allPods is true
834
+ podsToProcess = options.allPods === true ? targetPods : [targetPods[0]];
999
835
  }
1000
- } else {
1001
- // For MariaDB or when allPods is true, limit to first pod unless allPods is true
1002
- podsToProcess = options.allPods === true ? targetPods : [targetPods[0]];
1003
- }
1004
836
 
1005
- logger.info(`Processing ${podsToProcess.length} pod(s) for ${provider}`, {
1006
- dbName,
1007
- pods: podsToProcess.map((p) => p.NAME),
1008
- });
837
+ logger.info(`Processing ${podsToProcess.length} pod(s) for ${provider}`, {
838
+ dbName,
839
+ pods: podsToProcess.map((p) => p.NAME),
840
+ });
1009
841
 
1010
- // Process each pod
1011
- for (const pod of podsToProcess) {
1012
- logger.info('Processing pod', { podName: pod.NAME, node: pod.NODE, status: pod.STATUS });
1013
-
1014
- switch (provider) {
1015
- case 'mariadb': {
1016
- if (options.stats === true) {
1017
- const stats = Underpost.db._getMariaDBStats({
1018
- podName: pod.NAME,
1019
- namespace,
1020
- dbName,
1021
- user,
1022
- password,
1023
- });
1024
- if (stats) {
1025
- Underpost.db._displayStats({ provider, dbName, stats });
842
+ // Process each pod
843
+ for (const pod of podsToProcess) {
844
+ logger.info('Processing pod', { podName: pod.NAME, node: pod.NODE, status: pod.STATUS });
845
+
846
+ switch (provider) {
847
+ case 'mariadb': {
848
+ if (options.stats === true) {
849
+ const stats = Underpost.db._getMariaDBStats({
850
+ podName: pod.NAME,
851
+ namespace,
852
+ dbName,
853
+ user,
854
+ password,
855
+ });
856
+ if (stats) {
857
+ Underpost.db._displayStats({ provider, dbName, stats });
858
+ }
1026
859
  }
1027
- }
1028
860
 
1029
- if (options.import === true) {
1030
- Underpost.db._importMariaDB({
1031
- pod,
1032
- namespace,
1033
- dbName,
1034
- user,
1035
- password,
1036
- sqlPath: toSqlPath,
1037
- });
1038
- }
861
+ if (options.import === true) {
862
+ Underpost.db._importMariaDB({
863
+ pod,
864
+ namespace,
865
+ dbName,
866
+ user,
867
+ password,
868
+ sqlPath: toSqlPath,
869
+ });
870
+ }
1039
871
 
1040
- if (options.export === true) {
1041
- const outputPath = options.outPath || toNewSqlPath;
1042
- await Underpost.db._exportMariaDB({
1043
- pod,
1044
- namespace,
1045
- dbName,
1046
- user,
1047
- password,
1048
- outputPath,
1049
- });
872
+ if (options.export === true) {
873
+ const outputPath = options.outPath || toNewSqlPath;
874
+ await Underpost.db._exportMariaDB({
875
+ pod,
876
+ namespace,
877
+ dbName,
878
+ user,
879
+ password,
880
+ outputPath,
881
+ });
882
+ }
883
+ break;
1050
884
  }
1051
- break;
1052
- }
1053
885
 
1054
- case 'mongoose': {
1055
- if (options.stats === true) {
1056
- const stats = Underpost.db._getMongoStats({
1057
- podName: pod.NAME,
1058
- namespace,
1059
- dbName,
1060
- });
1061
- if (stats) {
1062
- Underpost.db._displayStats({ provider, dbName, stats });
886
+ case 'mongoose': {
887
+ if (options.stats === true) {
888
+ const stats = Underpost.db._getMongoStats({
889
+ podName: pod.NAME,
890
+ namespace,
891
+ dbName,
892
+ });
893
+ if (stats) {
894
+ Underpost.db._displayStats({ provider, dbName, stats });
895
+ }
1063
896
  }
1064
- }
1065
897
 
1066
- if (options.import === true) {
1067
- const bsonPath = options.outPath || toBsonPath;
1068
- Underpost.db._importMongoDB({
1069
- pod,
1070
- namespace,
1071
- dbName,
1072
- bsonPath,
1073
- drop: options.drop,
1074
- preserveUUID: options.preserveUUID,
1075
- });
1076
- }
898
+ if (options.import === true) {
899
+ const bsonPath = options.outPath || toBsonPath;
900
+ Underpost.db._importMongoDB({
901
+ pod,
902
+ namespace,
903
+ dbName,
904
+ bsonPath,
905
+ drop: options.drop,
906
+ preserveUUID: options.preserveUUID,
907
+ });
908
+ }
1077
909
 
1078
- if (options.export === true) {
1079
- const outputPath = options.outPath || toNewBsonPath;
1080
- Underpost.db._exportMongoDB({
1081
- pod,
1082
- namespace,
1083
- dbName,
1084
- outputPath,
1085
- collections: options.collections,
1086
- });
910
+ if (options.export === true) {
911
+ const outputPath = options.outPath || toNewBsonPath;
912
+ Underpost.db._exportMongoDB({
913
+ pod,
914
+ namespace,
915
+ dbName,
916
+ outputPath,
917
+ collections: options.collections,
918
+ });
919
+ }
920
+ break;
1087
921
  }
1088
- break;
1089
- }
1090
922
 
1091
- default:
1092
- logger.warn('Unsupported database provider', { provider });
1093
- break;
923
+ default:
924
+ logger.warn('Unsupported database provider', { provider });
925
+ break;
926
+ }
1094
927
  }
928
+
929
+ // Mark this host+path combination as processed
930
+ processedHostPaths.add(hostPathKey);
1095
931
  }
932
+ }
1096
933
 
1097
- // Mark this host+path combination as processed
1098
- processedHostPaths.add(hostPathKey);
934
+ // Commit and push to Git if enabled - execute only once per repository
935
+ if (options.export === true && options.git === true && !processedRepos.has(`${repoName}-committed`)) {
936
+ const commitMessage = `${new Date(newBackupTimestamp).toLocaleDateString()} ${new Date(
937
+ newBackupTimestamp,
938
+ ).toLocaleTimeString()}`;
939
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'commit', message: commitMessage });
940
+ Underpost.repo.manageBackupRepo({ repoName, operation: 'push' });
941
+ processedRepos.add(`${repoName}-committed`);
1099
942
  }
1100
943
  }
1101
944
 
1102
- // Commit and push to Git if enabled - execute only once per repository
1103
- if (options.export === true && options.git === true && !processedRepos.has(`${repoName}-committed`)) {
1104
- const commitMessage = `${new Date(newBackupTimestamp).toLocaleDateString()} ${new Date(
1105
- newBackupTimestamp,
1106
- ).toLocaleTimeString()}`;
1107
- Underpost.db._manageGitRepo({ repoName, operation: 'commit', message: commitMessage });
1108
- Underpost.db._manageGitRepo({ repoName, operation: 'push' });
1109
- processedRepos.add(`${repoName}-committed`);
1110
- }
945
+ logger.info('Database operation completed successfully');
946
+ } catch (error) {
947
+ logger.error('Database operation failed', { error: error.message });
948
+ throw error;
1111
949
  }
1112
-
1113
- logger.info('Database operation completed successfully');
1114
950
  },
1115
951
 
1116
952
  /**
@@ -1122,6 +958,8 @@ class UnderpostDB {
1122
958
  * @param {string} [deployId=process.env.DEFAULT_DEPLOY_ID] - The deployment ID.
1123
959
  * @param {string} [host=process.env.DEFAULT_DEPLOY_HOST] - The host identifier.
1124
960
  * @param {string} [path=process.env.DEFAULT_DEPLOY_PATH] - The path identifier.
961
+ * @param {object} [options] - Options.
962
+ * @param {boolean} [options.dev=false] - Development mode flag.
1125
963
  * @return {Promise<void>} Resolves when metadata creation is complete.
1126
964
  * @throws {Error} If database configuration is invalid or connection fails.
1127
965
  */
@@ -1129,161 +967,181 @@ class UnderpostDB {
1129
967
  deployId = process.env.DEFAULT_DEPLOY_ID,
1130
968
  host = process.env.DEFAULT_DEPLOY_HOST,
1131
969
  path = process.env.DEFAULT_DEPLOY_PATH,
970
+ options = { dev: false },
1132
971
  ) {
1133
- loadCronDeployEnv();
1134
- deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
1135
- host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
1136
- path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
972
+ try {
973
+ loadCronDeployEnv();
974
+ deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
975
+ host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
976
+ path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
1137
977
 
1138
- logger.info('Creating cluster metadata', { deployId, host, path });
978
+ logger.info('Creating cluster metadata', { deployId, host, path });
1139
979
 
1140
- const env = 'production';
1141
- const deployListPath = './engine-private/deploy/dd.router';
980
+ const env = 'production';
981
+ const deployListPath = './engine-private/deploy/dd.router';
1142
982
 
1143
- if (!fs.existsSync(deployListPath)) {
1144
- logger.error('Deploy router file not found', { path: deployListPath });
1145
- throw new Error(`Deploy router file not found: ${deployListPath}`);
1146
- }
983
+ if (!fs.existsSync(deployListPath)) {
984
+ logger.error('Deploy router file not found', { path: deployListPath });
985
+ throw new Error(`Deploy router file not found: ${deployListPath}`);
986
+ }
1147
987
 
1148
- const deployList = fs.readFileSync(deployListPath, 'utf8').split(',');
988
+ const deployList = fs.readFileSync(deployListPath, 'utf8').split(',');
1149
989
 
1150
- const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1151
- if (!fs.existsSync(confServerPath)) {
1152
- logger.error('Server configuration not found', { path: confServerPath });
1153
- throw new Error(`Server configuration not found: ${confServerPath}`);
1154
- }
990
+ const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
991
+ if (!fs.existsSync(confServerPath)) {
992
+ logger.error('Server configuration not found', { path: confServerPath });
993
+ throw new Error(`Server configuration not found: ${confServerPath}`);
994
+ }
1155
995
 
1156
- const { db } = loadConfServerJson(confServerPath, { resolve: true })[host][path];
996
+ const { db } = loadConfServerJson(confServerPath, { resolve: true })[host][path];
1157
997
 
1158
- try {
1159
- await DataBaseProvider.load({ apis: ['instance', 'cron'], host, path, db });
998
+ const maxRetries = 5;
999
+ const retryDelay = 3000;
1000
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
1001
+ try {
1002
+ await DataBaseProvider.load({ apis: ['instance', 'cron'], host, path, db });
1003
+ break;
1004
+ } catch (err) {
1005
+ if (attempt === maxRetries) {
1006
+ logger.error('Failed to connect to database after retries', { attempts: maxRetries, error: err.message });
1007
+ throw err;
1008
+ }
1009
+ logger.warn('Database connection failed, retrying...', { attempt, maxRetries, error: err.message });
1010
+ await timer(retryDelay);
1011
+ }
1012
+ }
1160
1013
 
1161
- /** @type {import('../api/instance/instance.model.js').InstanceModel} */
1162
- const Instance = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Instance;
1014
+ try {
1015
+ /** @type {import('../api/instance/instance.model.js').InstanceModel} */
1016
+ const Instance = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Instance;
1163
1017
 
1164
- await Instance.deleteMany();
1165
- logger.info('Cleared existing instance metadata');
1018
+ await Instance.deleteMany();
1019
+ logger.info('Cleared existing instance metadata');
1166
1020
 
1167
- for (const _deployId of deployList) {
1168
- const deployId = _deployId.trim();
1169
- if (!deployId) continue;
1021
+ for (const _deployId of deployList) {
1022
+ const deployId = _deployId.trim();
1023
+ if (!deployId) continue;
1170
1024
 
1171
- logger.info('Processing deployment for metadata', { deployId });
1025
+ logger.info('Processing deployment for metadata', { deployId });
1172
1026
 
1173
- const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1174
- if (!fs.existsSync(confServerPath)) {
1175
- logger.warn('Configuration not found for deployment', { deployId, path: confServerPath });
1176
- continue;
1177
- }
1027
+ const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1028
+ if (!fs.existsSync(confServerPath)) {
1029
+ logger.warn('Configuration not found for deployment', { deployId, path: confServerPath });
1030
+ continue;
1031
+ }
1178
1032
 
1179
- const confServer = loadReplicas(deployId, loadConfServerJson(confServerPath, { resolve: true }));
1180
- const router = await Underpost.deploy.routerFactory(deployId, env);
1181
- const pathPortAssignmentData = await pathPortAssignmentFactory(deployId, router, confServer);
1033
+ const confServer = loadReplicas(deployId, loadConfServerJson(confServerPath, { resolve: true }));
1034
+ const router = await Underpost.deploy.routerFactory(deployId, env);
1035
+ const pathPortAssignmentData = await pathPortAssignmentFactory(deployId, router, confServer);
1182
1036
 
1183
- for (const host of Object.keys(confServer)) {
1184
- for (const { path, port } of pathPortAssignmentData[host]) {
1185
- if (!confServer[host][path]) continue;
1037
+ for (const host of Object.keys(confServer)) {
1038
+ for (const { path, port } of pathPortAssignmentData[host]) {
1039
+ if (!confServer[host][path]) continue;
1186
1040
 
1187
- const { client, runtime, apis, peer } = confServer[host][path];
1041
+ const { client, runtime, apis, peer } = confServer[host][path];
1188
1042
 
1189
- // Save main instance
1190
- {
1191
- const body = {
1192
- deployId,
1193
- host,
1194
- path,
1195
- port,
1196
- client,
1197
- runtime,
1198
- apis,
1199
- };
1043
+ // Save main instance
1044
+ {
1045
+ const body = {
1046
+ deployId,
1047
+ host,
1048
+ path,
1049
+ port,
1050
+ client,
1051
+ runtime,
1052
+ apis,
1053
+ };
1054
+
1055
+ logger.info('Saving instance metadata', body);
1056
+ await new Instance(body).save();
1057
+ }
1200
1058
 
1201
- logger.info('Saving instance metadata', body);
1202
- await new Instance(body).save();
1059
+ // Save peer instance if exists
1060
+ if (peer) {
1061
+ const body = {
1062
+ deployId,
1063
+ host,
1064
+ path: path === '/' ? '/peer' : `${path}/peer`,
1065
+ port: port + 1,
1066
+ runtime: 'nodejs',
1067
+ };
1068
+
1069
+ logger.info('Saving peer instance metadata', body);
1070
+ await new Instance(body).save();
1071
+ }
1203
1072
  }
1073
+ }
1204
1074
 
1205
- // Save peer instance if exists
1206
- if (peer) {
1075
+ // Process additional instances
1076
+ const confInstancesPath = `./engine-private/conf/${deployId}/conf.instances.json`;
1077
+ if (fs.existsSync(confInstancesPath)) {
1078
+ const confInstances = JSON.parse(fs.readFileSync(confInstancesPath, 'utf8'));
1079
+ for (const instance of confInstances) {
1080
+ const { id, host, path, fromPort, metadata } = instance;
1081
+ const { runtime } = metadata;
1207
1082
  const body = {
1208
1083
  deployId,
1209
1084
  host,
1210
- path: path === '/' ? '/peer' : `${path}/peer`,
1211
- port: port + 1,
1212
- runtime: 'nodejs',
1085
+ path,
1086
+ port: fromPort,
1087
+ client: id,
1088
+ runtime,
1213
1089
  };
1214
-
1215
- logger.info('Saving peer instance metadata', body);
1090
+ logger.info('Saving additional instance metadata', body);
1216
1091
  await new Instance(body).save();
1217
1092
  }
1218
1093
  }
1219
1094
  }
1220
-
1221
- // Process additional instances
1222
- const confInstancesPath = `./engine-private/conf/${deployId}/conf.instances.json`;
1223
- if (fs.existsSync(confInstancesPath)) {
1224
- const confInstances = JSON.parse(fs.readFileSync(confInstancesPath, 'utf8'));
1225
- for (const instance of confInstances) {
1226
- const { id, host, path, fromPort, metadata } = instance;
1227
- const { runtime } = metadata;
1228
- const body = {
1229
- deployId,
1230
- host,
1231
- path,
1232
- port: fromPort,
1233
- client: id,
1234
- runtime,
1235
- };
1236
- logger.info('Saving additional instance metadata', body);
1237
- await new Instance(body).save();
1238
- }
1239
- }
1095
+ } catch (error) {
1096
+ logger.error('Failed to create instance metadata', { error: error.message });
1097
+ throw error;
1240
1098
  }
1241
- } catch (error) {
1242
- logger.error('Failed to create instance metadata', { error: error.message });
1243
- throw error;
1244
- }
1245
1099
 
1246
- try {
1247
- const cronDeployPath = './engine-private/deploy/dd.cron';
1248
- if (!fs.existsSync(cronDeployPath)) {
1249
- logger.warn('Cron deploy file not found', { path: cronDeployPath });
1250
- return;
1251
- }
1100
+ try {
1101
+ const cronDeployPath = './engine-private/deploy/dd.cron';
1102
+ if (!fs.existsSync(cronDeployPath)) {
1103
+ logger.warn('Cron deploy file not found', { path: cronDeployPath });
1104
+ return;
1105
+ }
1252
1106
 
1253
- const cronDeployId = fs.readFileSync(cronDeployPath, 'utf8').trim();
1254
- const confCronPath = `./engine-private/conf/${cronDeployId}/conf.cron.json`;
1107
+ const cronDeployId = fs.readFileSync(cronDeployPath, 'utf8').trim();
1108
+ const confCronPath = `./engine-private/conf/${cronDeployId}/conf.cron.json`;
1255
1109
 
1256
- if (!fs.existsSync(confCronPath)) {
1257
- logger.warn('Cron configuration not found', { path: confCronPath });
1258
- return;
1259
- }
1110
+ if (!fs.existsSync(confCronPath)) {
1111
+ logger.warn('Cron configuration not found', { path: confCronPath });
1112
+ return;
1113
+ }
1260
1114
 
1261
- const confCron = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
1115
+ const confCron = JSON.parse(fs.readFileSync(confCronPath, 'utf8'));
1262
1116
 
1263
- await DataBaseProvider.load({ apis: ['cron'], host, path, db });
1117
+ await DataBaseProvider.load({ apis: ['cron'], host, path, db });
1264
1118
 
1265
- /** @type {import('../api/cron/cron.model.js').CronModel} */
1266
- const Cron = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Cron;
1119
+ /** @type {import('../api/cron/cron.model.js').CronModel} */
1120
+ const Cron = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Cron;
1267
1121
 
1268
- await Cron.deleteMany();
1269
- logger.info('Cleared existing cron metadata');
1122
+ await Cron.deleteMany();
1123
+ logger.info('Cleared existing cron metadata');
1270
1124
 
1271
- for (const jobId of Object.keys(confCron.jobs)) {
1272
- const body = {
1273
- jobId,
1274
- deployId: Underpost.cron.getRelatedDeployIdList(jobId),
1275
- expression: confCron.jobs[jobId].expression,
1276
- enabled: confCron.jobs[jobId].enabled,
1277
- };
1278
- logger.info('Saving cron metadata', body);
1279
- await new Cron(body).save();
1125
+ for (const jobId of Object.keys(confCron.jobs)) {
1126
+ const body = {
1127
+ jobId,
1128
+ deployId: Underpost.cron.getRelatedDeployIdList(jobId),
1129
+ expression: confCron.jobs[jobId].expression,
1130
+ enabled: confCron.jobs[jobId].enabled,
1131
+ };
1132
+ logger.info('Saving cron metadata', body);
1133
+ await new Cron(body).save();
1134
+ }
1135
+ } catch (error) {
1136
+ logger.error('Failed to create cron metadata', { error: error.message });
1280
1137
  }
1138
+
1139
+ await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
1140
+ logger.info('Cluster metadata creation completed');
1281
1141
  } catch (error) {
1282
- logger.error('Failed to create cron metadata', { error: error.message });
1142
+ logger.error('Cluster metadata creation failed', { error: error.message });
1143
+ throw error;
1283
1144
  }
1284
-
1285
- await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
1286
- logger.info('Cluster metadata creation completed');
1287
1145
  },
1288
1146
 
1289
1147
  /**
@@ -1297,6 +1155,7 @@ class UnderpostDB {
1297
1155
  * @param {string} [options.hosts=''] - Comma-separated list of hosts to filter.
1298
1156
  * @param {string} [options.paths=''] - Comma-separated list of paths to filter.
1299
1157
  * @param {boolean} [options.dryRun=false] - If true, only reports what would be deleted.
1158
+ * @param {boolean} [options.dev=false] - Development mode flag.
1300
1159
  * @return {Promise<void>} Resolves when clean operation is complete.
1301
1160
  */
1302
1161
  async cleanFsCollection(
@@ -1305,203 +1164,220 @@ class UnderpostDB {
1305
1164
  hosts: '',
1306
1165
  paths: '',
1307
1166
  dryRun: false,
1167
+ dev: false,
1308
1168
  },
1309
1169
  ) {
1310
- loadCronDeployEnv();
1311
- if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
1312
-
1313
- logger.info('Starting File collection cleanup', { deployList, options });
1314
-
1315
- // Load file.ref.json to know which models reference File
1316
- const fileRefPath = './src/api/file/file.ref.json';
1317
- if (!fs.existsSync(fileRefPath)) {
1318
- logger.error('file.ref.json not found', { path: fileRefPath });
1319
- return;
1320
- }
1170
+ const firstDeployId = deployList !== 'dd' ? deployList.split(',')[0].trim() : '';
1171
+ try {
1172
+ loadCronDeployEnv();
1173
+ if (deployList === 'dd') deployList = fs.readFileSync(`./engine-private/deploy/dd.router`, 'utf8');
1321
1174
 
1322
- const fileRefData = JSON.parse(fs.readFileSync(fileRefPath, 'utf8'));
1323
- logger.info('Loaded file reference configuration', { apis: fileRefData.length });
1175
+ logger.info('Starting File collection cleanup', { deployList, options });
1324
1176
 
1325
- // Filter hosts and paths if specified
1326
- const filterHosts = options.hosts ? options.hosts.split(',').map((h) => h.trim()) : [];
1327
- const filterPaths = options.paths ? options.paths.split(',').map((p) => p.trim()) : [];
1177
+ // Load file.ref.json to know which models reference File
1178
+ const fileRefPath = './src/api/file/file.ref.json';
1179
+ if (!fs.existsSync(fileRefPath)) {
1180
+ logger.error('file.ref.json not found', { path: fileRefPath });
1181
+ return;
1182
+ }
1328
1183
 
1329
- // Track all connections to close them at the end
1330
- const connectionsToClose = [];
1184
+ const fileRefData = JSON.parse(fs.readFileSync(fileRefPath, 'utf8'));
1185
+ logger.info('Loaded file reference configuration', { apis: fileRefData.length });
1331
1186
 
1332
- for (const _deployId of deployList.split(',')) {
1333
- const deployId = _deployId.trim();
1334
- if (!deployId) continue;
1187
+ // Filter hosts and paths if specified
1188
+ const filterHosts = options.hosts ? options.hosts.split(',').map((h) => h.trim()) : [];
1189
+ const filterPaths = options.paths ? options.paths.split(',').map((p) => p.trim()) : [];
1335
1190
 
1336
- logger.info('Processing deployment for File cleanup', { deployId });
1191
+ // Track all connections to close them at the end
1192
+ const connectionsToClose = [];
1337
1193
 
1338
- // Load server configuration
1339
- const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1340
- if (!fs.existsSync(confServerPath)) {
1341
- logger.error('Configuration file not found', { path: confServerPath });
1342
- continue;
1343
- }
1194
+ for (const _deployId of deployList.split(',')) {
1195
+ const deployId = _deployId.trim();
1196
+ if (!deployId) continue;
1344
1197
 
1345
- const confServer = loadConfServerJson(confServerPath, { resolve: true });
1198
+ logger.info('Processing deployment for File cleanup', { deployId });
1346
1199
 
1347
- // Process each host+path combination
1348
- for (const host of Object.keys(confServer)) {
1349
- if (filterHosts.length > 0 && !filterHosts.includes(host)) continue;
1200
+ // Load server configuration
1201
+ const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
1202
+ if (!fs.existsSync(confServerPath)) {
1203
+ logger.error('Configuration file not found', { path: confServerPath });
1204
+ continue;
1205
+ }
1350
1206
 
1351
- for (const path of Object.keys(confServer[host])) {
1352
- if (filterPaths.length > 0 && !filterPaths.includes(path)) continue;
1207
+ const confServer = loadConfServerJson(confServerPath, { resolve: true });
1353
1208
 
1354
- const { db, apis } = confServer[host][path];
1355
- if (!db || !apis) continue;
1209
+ // Process each host+path combination
1210
+ for (const host of Object.keys(confServer)) {
1211
+ if (filterHosts.length > 0 && !filterHosts.includes(host)) continue;
1356
1212
 
1357
- // Check if 'file' api is in the apis list
1358
- if (!apis.includes('file')) {
1359
- logger.info('Skipping - no file api in configuration', { host, path });
1360
- continue;
1361
- }
1213
+ for (const path of Object.keys(confServer[host])) {
1214
+ if (filterPaths.length > 0 && !filterPaths.includes(path)) continue;
1362
1215
 
1363
- // logger.info('Processing host+path with file api', { host, path, db: db.name });
1216
+ const { db, apis } = confServer[host][path];
1217
+ if (!db || !apis) continue;
1364
1218
 
1365
- try {
1366
- // Connect to database
1367
- const dbProvider = await DataBaseProvider.load({ apis, host, path, db });
1368
- if (!dbProvider || !dbProvider.models) {
1369
- logger.error('Failed to load database provider', { host, path });
1219
+ // Check if 'file' api is in the apis list
1220
+ if (!apis.includes('file')) {
1221
+ logger.info('Skipping - no file api in configuration', { host, path });
1370
1222
  continue;
1371
1223
  }
1372
1224
 
1373
- const { models } = dbProvider;
1225
+ // logger.info('Processing host+path with file api', { host, path, db: db.name });
1226
+
1227
+ try {
1228
+ // Connect to database with retry
1229
+ let dbProvider;
1230
+ for (let attempt = 1; attempt <= 3; attempt++) {
1231
+ try {
1232
+ dbProvider = await DataBaseProvider.load({ apis, host, path, db });
1233
+ break;
1234
+ } catch (err) {
1235
+ if (attempt === 3) throw err;
1236
+ logger.warn('Database connection failed, retrying...', { attempt, host, path, error: err.message });
1237
+ await timer(3000);
1238
+ }
1239
+ }
1240
+ if (!dbProvider || !dbProvider.models) {
1241
+ logger.error('Failed to load database provider', { host, path });
1242
+ continue;
1243
+ }
1374
1244
 
1375
- // Track this connection for cleanup
1376
- connectionsToClose.push({ host, path, dbProvider });
1245
+ const { models } = dbProvider;
1377
1246
 
1378
- // Check if File model exists
1379
- if (!models.File) {
1380
- logger.warn('File model not loaded', { host, path });
1381
- continue;
1382
- }
1247
+ // Track this connection for cleanup
1248
+ connectionsToClose.push({ host, path, dbProvider });
1383
1249
 
1384
- // Get all File documents
1385
- const allFiles = await models.File.find({}, '_id').lean();
1386
- logger.info('Found File documents', { count: allFiles.length, host, path });
1250
+ // Check if File model exists
1251
+ if (!models.File) {
1252
+ logger.warn('File model not loaded', { host, path });
1253
+ continue;
1254
+ }
1387
1255
 
1388
- if (allFiles.length === 0) continue;
1256
+ // Get all File documents
1257
+ const allFiles = await models.File.find({}, '_id').lean();
1258
+ logger.info('Found File documents', { count: allFiles.length, host, path });
1389
1259
 
1390
- // Track which File IDs are referenced
1391
- const referencedFileIds = new Set();
1260
+ if (allFiles.length === 0) continue;
1392
1261
 
1393
- // Check each API from file.ref.json
1394
- for (const refConfig of fileRefData) {
1395
- const { api, model: modelFields } = refConfig;
1262
+ // Track which File IDs are referenced
1263
+ const referencedFileIds = new Set();
1396
1264
 
1397
- // Check if this API is loaded in current context
1398
- const modelName = api
1399
- .split('-')
1400
- .map((w) => w.charAt(0).toUpperCase() + w.slice(1))
1401
- .join('');
1402
- const Model = models[modelName];
1265
+ // Check each API from file.ref.json
1266
+ for (const refConfig of fileRefData) {
1267
+ const { api, model: modelFields } = refConfig;
1403
1268
 
1404
- if (!Model) {
1405
- logger.debug('Model not loaded in current context', { api, modelName, host, path });
1406
- continue;
1407
- }
1269
+ // Check if this API is loaded in current context
1270
+ const modelName = api
1271
+ .split('-')
1272
+ .map((w) => w.charAt(0).toUpperCase() + w.slice(1))
1273
+ .join('');
1274
+ const Model = models[modelName];
1408
1275
 
1409
- logger.info('Checking references in model', { api, modelName });
1276
+ if (!Model) {
1277
+ logger.debug('Model not loaded in current context', { api, modelName, host, path });
1278
+ continue;
1279
+ }
1410
1280
 
1411
- // Helper function to recursively check field references
1412
- const checkFieldReferences = async (fieldPath, fieldConfig) => {
1413
- for (const [fieldName, fieldValue] of Object.entries(fieldConfig)) {
1414
- const currentPath = fieldPath ? `${fieldPath}.${fieldName}` : fieldName;
1281
+ logger.info('Checking references in model', { api, modelName });
1415
1282
 
1416
- if (fieldValue === true) {
1417
- // This is a File reference field
1418
- const query = {};
1419
- query[currentPath] = { $exists: true, $ne: null };
1283
+ // Helper function to recursively check field references
1284
+ const checkFieldReferences = async (fieldPath, fieldConfig) => {
1285
+ for (const [fieldName, fieldValue] of Object.entries(fieldConfig)) {
1286
+ const currentPath = fieldPath ? `${fieldPath}.${fieldName}` : fieldName;
1420
1287
 
1421
- const docs = await Model.find(query, currentPath).lean();
1288
+ if (fieldValue === true) {
1289
+ // This is a File reference field
1290
+ const query = {};
1291
+ query[currentPath] = { $exists: true, $ne: null };
1422
1292
 
1423
- for (const doc of docs) {
1424
- // Navigate to the nested field
1425
- const parts = currentPath.split('.');
1426
- let value = doc;
1427
- for (const part of parts) {
1428
- value = value?.[part];
1429
- }
1293
+ const docs = await Model.find(query, currentPath).lean();
1430
1294
 
1431
- if (value) {
1432
- if (Array.isArray(value)) {
1433
- value.forEach((id) => id && referencedFileIds.add(id.toString()));
1434
- } else {
1435
- referencedFileIds.add(value.toString());
1295
+ for (const doc of docs) {
1296
+ // Navigate to the nested field
1297
+ const parts = currentPath.split('.');
1298
+ let value = doc;
1299
+ for (const part of parts) {
1300
+ value = value?.[part];
1301
+ }
1302
+
1303
+ if (value) {
1304
+ if (Array.isArray(value)) {
1305
+ value.forEach((id) => id && referencedFileIds.add(id.toString()));
1306
+ } else {
1307
+ referencedFileIds.add(value.toString());
1308
+ }
1436
1309
  }
1437
1310
  }
1438
- }
1439
1311
 
1440
- logger.info('Found references', {
1441
- model: modelName,
1442
- field: currentPath,
1443
- count: docs.length,
1444
- });
1445
- } else if (typeof fieldValue === 'object') {
1446
- // Nested object, recurse
1447
- await checkFieldReferences(currentPath, fieldValue);
1312
+ logger.info('Found references', {
1313
+ model: modelName,
1314
+ field: currentPath,
1315
+ count: docs.length,
1316
+ });
1317
+ } else if (typeof fieldValue === 'object') {
1318
+ // Nested object, recurse
1319
+ await checkFieldReferences(currentPath, fieldValue);
1320
+ }
1448
1321
  }
1449
- }
1450
- };
1451
-
1452
- await checkFieldReferences('', modelFields);
1453
- }
1322
+ };
1454
1323
 
1455
- logger.info('Total referenced File IDs', { count: referencedFileIds.size, host, path });
1324
+ await checkFieldReferences('', modelFields);
1325
+ }
1456
1326
 
1457
- // Find orphaned files
1458
- const orphanedFiles = allFiles.filter((file) => !referencedFileIds.has(file._id.toString()));
1327
+ logger.info('Total referenced File IDs', { count: referencedFileIds.size, host, path });
1459
1328
 
1460
- if (orphanedFiles.length === 0) {
1461
- logger.info('No orphaned files found', { host, path });
1462
- } else {
1463
- logger.info('Found orphaned files', { count: orphanedFiles.length, host, path });
1329
+ // Find orphaned files
1330
+ const orphanedFiles = allFiles.filter((file) => !referencedFileIds.has(file._id.toString()));
1464
1331
 
1465
- if (options.dryRun) {
1466
- logger.info('Dry run - would delete files', {
1467
- count: orphanedFiles.length,
1468
- ids: orphanedFiles.map((f) => f._id.toString()),
1469
- });
1332
+ if (orphanedFiles.length === 0) {
1333
+ logger.info('No orphaned files found', { host, path });
1470
1334
  } else {
1471
- const orphanedIds = orphanedFiles.map((f) => f._id);
1472
- const deleteResult = await models.File.deleteMany({ _id: { $in: orphanedIds } });
1473
- logger.info('Deleted orphaned files', {
1474
- deletedCount: deleteResult.deletedCount,
1475
- host,
1476
- path,
1477
- });
1335
+ logger.info('Found orphaned files', { count: orphanedFiles.length, host, path });
1336
+
1337
+ if (options.dryRun) {
1338
+ logger.info('Dry run - would delete files', {
1339
+ count: orphanedFiles.length,
1340
+ ids: orphanedFiles.map((f) => f._id.toString()),
1341
+ });
1342
+ } else {
1343
+ const orphanedIds = orphanedFiles.map((f) => f._id);
1344
+ const deleteResult = await models.File.deleteMany({ _id: { $in: orphanedIds } });
1345
+ logger.info('Deleted orphaned files', {
1346
+ deletedCount: deleteResult.deletedCount,
1347
+ host,
1348
+ path,
1349
+ });
1350
+ }
1478
1351
  }
1352
+ } catch (error) {
1353
+ logger.error('Error processing host+path', {
1354
+ host,
1355
+ path,
1356
+ error: error.message,
1357
+ });
1479
1358
  }
1480
- } catch (error) {
1481
- logger.error('Error processing host+path', {
1482
- host,
1483
- path,
1484
- error: error.message,
1485
- });
1486
1359
  }
1487
1360
  }
1488
1361
  }
1489
- }
1490
1362
 
1491
- // Close all connections
1492
- logger.info('Closing all database connections', { count: connectionsToClose.length });
1493
- for (const { host, path, dbProvider } of connectionsToClose) {
1494
- try {
1495
- if (dbProvider && dbProvider.close) {
1496
- await dbProvider.close();
1497
- logger.info('Connection closed', { host, path });
1363
+ // Close all connections
1364
+ logger.info('Closing all database connections', { count: connectionsToClose.length });
1365
+ for (const { host, path, dbProvider } of connectionsToClose) {
1366
+ try {
1367
+ if (dbProvider && dbProvider.close) {
1368
+ await dbProvider.close();
1369
+ logger.info('Connection closed', { host, path });
1370
+ }
1371
+ } catch (error) {
1372
+ logger.error('Error closing connection', { host, path, error: error.message });
1498
1373
  }
1499
- } catch (error) {
1500
- logger.error('Error closing connection', { host, path, error: error.message });
1501
1374
  }
1502
- }
1503
1375
 
1504
- logger.info('File collection cleanup completed');
1376
+ logger.info('File collection cleanup completed');
1377
+ } catch (error) {
1378
+ logger.error('File collection cleanup failed', { error: error.message });
1379
+ throw error;
1380
+ }
1505
1381
  },
1506
1382
 
1507
1383
  /**
@@ -1520,6 +1396,7 @@ class UnderpostDB {
1520
1396
  * @param {boolean} [options.export=false] - Export metadata to backup.
1521
1397
  * @param {boolean} [options.instances=false] - Process instances collection.
1522
1398
  * @param {boolean} [options.crons=false] - Process crons collection.
1399
+ * @param {boolean} [options.dev=false] - Development mode flag.
1523
1400
  * @return {Promise<void>} Resolves when backup operation is complete.
1524
1401
  */
1525
1402
  async clusterMetadataBackupCallback(
@@ -1533,70 +1410,76 @@ class UnderpostDB {
1533
1410
  export: false,
1534
1411
  instances: false,
1535
1412
  crons: false,
1413
+ dev: false,
1536
1414
  },
1537
1415
  ) {
1538
- loadCronDeployEnv();
1539
- deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
1540
- host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
1541
- path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
1542
-
1543
- logger.info('Starting cluster metadata backup operation', {
1544
- deployId,
1545
- host,
1546
- path,
1547
- options,
1548
- });
1549
-
1550
- if (options.generate === true) {
1551
- logger.info('Generating cluster metadata');
1552
- await Underpost.db.clusterMetadataFactory(deployId, host, path);
1553
- }
1416
+ try {
1417
+ loadCronDeployEnv();
1418
+ deployId = deployId ? deployId : process.env.DEFAULT_DEPLOY_ID;
1419
+ host = host ? host : process.env.DEFAULT_DEPLOY_HOST;
1420
+ path = path ? path : process.env.DEFAULT_DEPLOY_PATH;
1421
+
1422
+ logger.info('Starting cluster metadata backup operation', {
1423
+ deployId,
1424
+ host,
1425
+ path,
1426
+ options,
1427
+ });
1554
1428
 
1555
- if (options.instances === true) {
1556
- const outputPath = './engine-private/instances';
1557
- if (!fs.existsSync(outputPath)) {
1558
- fs.mkdirSync(outputPath, { recursive: true });
1429
+ if (options.generate === true) {
1430
+ logger.info('Generating cluster metadata');
1431
+ await Underpost.db.clusterMetadataFactory(deployId, host, path);
1559
1432
  }
1560
- const collection = 'instances';
1561
1433
 
1562
- if (options.export === true) {
1563
- logger.info('Exporting instances collection', { outputPath });
1564
- shellExec(
1565
- `node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1566
- );
1567
- }
1434
+ if (options.instances === true) {
1435
+ const outputPath = './engine-private/instances';
1436
+ if (!fs.existsSync(outputPath)) {
1437
+ fs.mkdirSync(outputPath, { recursive: true });
1438
+ }
1439
+ const collection = 'instances';
1568
1440
 
1569
- if (options.import === true) {
1570
- logger.info('Importing instances collection', { outputPath });
1571
- shellExec(
1572
- `node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1573
- );
1574
- }
1575
- }
1441
+ if (options.export === true) {
1442
+ logger.info('Exporting instances collection', { outputPath });
1443
+ shellExec(
1444
+ `node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1445
+ );
1446
+ }
1576
1447
 
1577
- if (options.crons === true) {
1578
- const outputPath = './engine-private/crons';
1579
- if (!fs.existsSync(outputPath)) {
1580
- fs.mkdirSync(outputPath, { recursive: true });
1448
+ if (options.import === true) {
1449
+ logger.info('Importing instances collection', { outputPath });
1450
+ shellExec(
1451
+ `node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1452
+ );
1453
+ }
1581
1454
  }
1582
- const collection = 'crons';
1583
1455
 
1584
- if (options.export === true) {
1585
- logger.info('Exporting crons collection', { outputPath });
1586
- shellExec(
1587
- `node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1588
- );
1589
- }
1456
+ if (options.crons === true) {
1457
+ const outputPath = './engine-private/crons';
1458
+ if (!fs.existsSync(outputPath)) {
1459
+ fs.mkdirSync(outputPath, { recursive: true });
1460
+ }
1461
+ const collection = 'crons';
1462
+
1463
+ if (options.export === true) {
1464
+ logger.info('Exporting crons collection', { outputPath });
1465
+ shellExec(
1466
+ `node bin db --export --primary-pod --collections ${collection} --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1467
+ );
1468
+ }
1590
1469
 
1591
- if (options.import === true) {
1592
- logger.info('Importing crons collection', { outputPath });
1593
- shellExec(
1594
- `node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1595
- );
1470
+ if (options.import === true) {
1471
+ logger.info('Importing crons collection', { outputPath });
1472
+ shellExec(
1473
+ `node bin db --import --primary-pod --drop --preserveUUID --out-path ${outputPath} --hosts ${host} --paths '${path}' ${deployId}`,
1474
+ );
1475
+ }
1596
1476
  }
1597
- }
1598
1477
 
1599
- logger.info('Cluster metadata backup operation completed');
1478
+ logger.info('Cluster metadata backup operation completed');
1479
+ } catch (error) {
1480
+ logger.error('Cluster metadata backup operation failed', { error: error.message });
1481
+ throw error;
1482
+ }
1600
1483
  },
1601
1484
  };
1602
1485
  }