cyberia 3.0.3 → 3.2.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/{.env.production → .env.example} +20 -4
- package/.github/workflows/engine-cyberia.cd.yml +43 -10
- package/.github/workflows/engine-cyberia.ci.yml +48 -26
- package/.github/workflows/ghpkg.ci.yml +5 -5
- package/.github/workflows/gitlab.ci.yml +1 -1
- package/.github/workflows/hardhat.ci.yml +82 -0
- package/.github/workflows/npmpkg.ci.yml +60 -14
- package/.github/workflows/publish.ci.yml +26 -7
- package/.github/workflows/publish.cyberia.ci.yml +5 -5
- package/.github/workflows/pwa-microservices-template-page.cd.yml +6 -7
- package/.github/workflows/pwa-microservices-template-test.ci.yml +4 -4
- package/.github/workflows/release.cd.yml +14 -8
- package/.vscode/extensions.json +9 -8
- package/.vscode/settings.json +3 -2
- package/CHANGELOG.md +643 -1
- package/CLI-HELP.md +132 -57
- package/Dockerfile +4 -2
- package/README.md +347 -22
- package/WHITE-PAPER.md +1540 -0
- package/bin/build.js +21 -12
- package/bin/cyberia.js +2640 -106
- package/bin/deploy.js +258 -372
- package/bin/file.js +5 -1
- package/bin/index.js +2640 -106
- package/bin/vs.js +3 -3
- package/conf.js +169 -105
- package/deployment.yaml +236 -20
- package/hardhat/.env.example +31 -0
- package/hardhat/README.md +531 -0
- package/hardhat/WHITE-PAPER.md +1540 -0
- package/hardhat/contracts/ObjectLayerToken.sol +391 -0
- package/hardhat/deployments/.gitkeep +0 -0
- package/hardhat/deployments/hardhat-ObjectLayerToken.json +11 -0
- package/hardhat/hardhat.config.js +136 -0
- package/hardhat/ignition/modules/ObjectLayerToken.js +21 -0
- package/hardhat/networks/besu-object-layer.network.json +138 -0
- package/hardhat/package-lock.json +4323 -0
- package/hardhat/package.json +36 -0
- package/hardhat/scripts/deployObjectLayerToken.js +98 -0
- package/hardhat/test/ObjectLayerToken.js +592 -0
- package/hardhat/types/ethers-contracts/ObjectLayerToken.ts +690 -0
- package/hardhat/types/ethers-contracts/common.ts +92 -0
- package/hardhat/types/ethers-contracts/factories/ObjectLayerToken__factory.ts +1055 -0
- package/hardhat/types/ethers-contracts/factories/index.ts +4 -0
- package/hardhat/types/ethers-contracts/hardhat.d.ts +47 -0
- package/hardhat/types/ethers-contracts/index.ts +6 -0
- package/jsdoc.dd-cyberia.json +68 -0
- package/jsdoc.json +65 -49
- package/manifests/cronjobs/dd-cron/dd-cron-backup.yaml +5 -4
- package/manifests/cronjobs/dd-cron/dd-cron-dns.yaml +5 -4
- package/manifests/deployment/dd-cyberia-development/deployment.yaml +562 -0
- package/manifests/deployment/dd-cyberia-development/proxy.yaml +297 -0
- package/manifests/deployment/dd-cyberia-development/pv-pvc.yaml +132 -0
- package/manifests/deployment/dd-default-development/deployment.yaml +2 -2
- package/manifests/deployment/dd-test-development/deployment.yaml +88 -74
- package/manifests/deployment/dd-test-development/proxy.yaml +13 -4
- package/manifests/deployment/playwright/deployment.yaml +1 -1
- package/manifests/pv-pvc-dd.yaml +1 -1
- package/nodemon.json +1 -1
- package/package.json +60 -48
- package/proxy.yaml +118 -10
- package/pv-pvc.yaml +132 -0
- package/scripts/k3s-node-setup.sh +1 -1
- package/scripts/ports-ls.sh +2 -0
- package/scripts/rhel-grpc-setup.sh +56 -0
- package/src/api/atlas-sprite-sheet/atlas-sprite-sheet.controller.js +47 -1
- package/src/api/atlas-sprite-sheet/atlas-sprite-sheet.model.js +17 -2
- package/src/api/atlas-sprite-sheet/atlas-sprite-sheet.router.js +5 -0
- package/src/api/atlas-sprite-sheet/atlas-sprite-sheet.service.js +80 -7
- package/src/api/cyberia-dialogue/cyberia-dialogue.controller.js +93 -0
- package/src/api/cyberia-dialogue/cyberia-dialogue.model.js +36 -0
- package/src/api/cyberia-dialogue/cyberia-dialogue.router.js +29 -0
- package/src/api/cyberia-dialogue/cyberia-dialogue.service.js +51 -0
- package/src/api/cyberia-entity/cyberia-entity.controller.js +74 -0
- package/src/api/cyberia-entity/cyberia-entity.model.js +24 -0
- package/src/api/cyberia-entity/cyberia-entity.router.js +27 -0
- package/src/api/cyberia-entity/cyberia-entity.service.js +42 -0
- package/src/api/cyberia-instance/cyberia-fallback-world.js +368 -0
- package/src/api/cyberia-instance/cyberia-instance.controller.js +92 -0
- package/src/api/cyberia-instance/cyberia-instance.model.js +84 -0
- package/src/api/cyberia-instance/cyberia-instance.router.js +63 -0
- package/src/api/cyberia-instance/cyberia-instance.service.js +191 -0
- package/src/api/cyberia-instance/cyberia-portal-connector.js +486 -0
- package/src/api/cyberia-instance-conf/cyberia-instance-conf.controller.js +74 -0
- package/src/api/cyberia-instance-conf/cyberia-instance-conf.defaults.js +413 -0
- package/src/api/cyberia-instance-conf/cyberia-instance-conf.model.js +228 -0
- package/src/api/cyberia-instance-conf/cyberia-instance-conf.router.js +27 -0
- package/src/api/cyberia-instance-conf/cyberia-instance-conf.service.js +42 -0
- package/src/api/cyberia-map/cyberia-map.controller.js +79 -0
- package/src/api/cyberia-map/cyberia-map.model.js +30 -0
- package/src/api/cyberia-map/cyberia-map.router.js +40 -0
- package/src/api/cyberia-map/cyberia-map.service.js +74 -0
- package/src/api/document/document.service.js +1 -1
- package/src/api/file/file.controller.js +3 -1
- package/src/api/file/file.ref.json +18 -0
- package/src/api/file/file.service.js +28 -5
- package/src/api/ipfs/ipfs.controller.js +4 -25
- package/src/api/ipfs/ipfs.model.js +43 -34
- package/src/api/ipfs/ipfs.router.js +8 -13
- package/src/api/ipfs/ipfs.service.js +56 -104
- package/src/api/object-layer/README.md +347 -22
- package/src/api/object-layer/object-layer.controller.js +6 -2
- package/src/api/object-layer/object-layer.model.js +12 -8
- package/src/api/object-layer/object-layer.router.js +698 -42
- package/src/api/object-layer/object-layer.service.js +119 -37
- package/src/api/object-layer-render-frames/object-layer-render-frames.model.js +1 -2
- package/src/api/user/user.router.js +10 -5
- package/src/api/user/user.service.js +15 -14
- package/src/cli/baremetal.js +6 -10
- package/src/cli/cloud-init.js +0 -3
- package/src/cli/cluster.js +7 -7
- package/src/cli/db.js +723 -857
- package/src/cli/deploy.js +215 -105
- package/src/cli/env.js +34 -5
- package/src/cli/fs.js +5 -4
- package/src/cli/image.js +0 -3
- package/src/cli/index.js +83 -15
- package/src/cli/kubectl.js +211 -0
- package/src/cli/monitor.js +5 -6
- package/src/cli/release.js +284 -0
- package/src/cli/repository.js +708 -62
- package/src/cli/run.js +371 -151
- package/src/cli/secrets.js +73 -2
- package/src/cli/ssh.js +1 -1
- package/src/cli/test.js +3 -3
- package/src/client/Cryptokoyn.index.js +3 -4
- package/src/client/CyberiaPortal.index.js +3 -4
- package/src/client/Default.index.js +3 -4
- package/src/client/Itemledger.index.js +4 -963
- package/src/client/Underpost.index.js +3 -4
- package/src/client/components/core/AgGrid.js +20 -5
- package/src/client/components/core/Alert.js +2 -2
- package/src/client/components/core/AppStore.js +69 -0
- package/src/client/components/core/CalendarCore.js +2 -2
- package/src/client/components/core/Content.js +22 -3
- package/src/client/components/core/Docs.js +30 -6
- package/src/client/components/core/DropDown.js +137 -17
- package/src/client/components/core/FileExplorer.js +71 -4
- package/src/client/components/core/Input.js +1 -1
- package/src/client/components/core/Keyboard.js +2 -2
- package/src/client/components/core/LogIn.js +2 -2
- package/src/client/components/core/LogOut.js +2 -2
- package/src/client/components/core/Modal.js +20 -7
- package/src/client/components/core/Panel.js +0 -1
- package/src/client/components/core/PanelForm.js +19 -19
- package/src/client/components/core/RichText.js +1 -2
- package/src/client/components/core/SocketIo.js +82 -29
- package/src/client/components/core/SocketIoHandler.js +75 -0
- package/src/client/components/core/Stream.js +143 -95
- package/src/client/components/core/Webhook.js +40 -7
- package/src/client/components/cryptokoyn/AppStoreCryptokoyn.js +5 -0
- package/src/client/components/cryptokoyn/LogInCryptokoyn.js +3 -3
- package/src/client/components/cryptokoyn/LogOutCryptokoyn.js +2 -2
- package/src/client/components/cryptokoyn/MenuCryptokoyn.js +3 -3
- package/src/client/components/cryptokoyn/SocketIoCryptokoyn.js +3 -51
- package/src/client/components/cyberia/InstanceEngineCyberia.js +700 -0
- package/src/client/components/cyberia/MapEngineCyberia.js +1359 -2
- package/src/client/components/cyberia/ObjectLayerEngineModal.js +17 -6
- package/src/client/components/cyberia/ObjectLayerEngineViewer.js +92 -54
- package/src/client/components/cyberia-portal/AppStoreCyberiaPortal.js +5 -0
- package/src/client/components/cyberia-portal/CommonCyberiaPortal.js +217 -30
- package/src/client/components/cyberia-portal/CssCyberiaPortal.js +44 -2
- package/src/client/components/cyberia-portal/LogInCyberiaPortal.js +3 -4
- package/src/client/components/cyberia-portal/LogOutCyberiaPortal.js +2 -2
- package/src/client/components/cyberia-portal/MenuCyberiaPortal.js +104 -9
- package/src/client/components/cyberia-portal/RoutesCyberiaPortal.js +5 -0
- package/src/client/components/cyberia-portal/SocketIoCyberiaPortal.js +3 -49
- package/src/client/components/cyberia-portal/TranslateCyberiaPortal.js +4 -0
- package/src/client/components/default/AppStoreDefault.js +5 -0
- package/src/client/components/default/LogInDefault.js +3 -3
- package/src/client/components/default/LogOutDefault.js +2 -2
- package/src/client/components/default/MenuDefault.js +5 -5
- package/src/client/components/default/SocketIoDefault.js +3 -51
- package/src/client/components/itemledger/AppStoreItemledger.js +5 -0
- package/src/client/components/itemledger/LogInItemledger.js +3 -3
- package/src/client/components/itemledger/LogOutItemledger.js +2 -2
- package/src/client/components/itemledger/MenuItemledger.js +3 -3
- package/src/client/components/itemledger/SocketIoItemledger.js +3 -51
- package/src/client/components/underpost/AppStoreUnderpost.js +5 -0
- package/src/client/components/underpost/CssUnderpost.js +59 -0
- package/src/client/components/underpost/LogInUnderpost.js +6 -3
- package/src/client/components/underpost/LogOutUnderpost.js +4 -2
- package/src/client/components/underpost/MenuUnderpost.js +104 -18
- package/src/client/components/underpost/RoutesUnderpost.js +2 -0
- package/src/client/components/underpost/SocketIoUnderpost.js +3 -51
- package/src/client/public/cryptokoyn/assets/logo/base-icon.png +0 -0
- package/src/client/public/cryptokoyn/browserconfig.xml +12 -0
- package/src/client/public/cryptokoyn/microdata.json +85 -0
- package/src/client/public/cryptokoyn/site.webmanifest +57 -0
- package/src/client/public/cryptokoyn/sitemap +3 -3
- package/src/client/public/default/sitemap +3 -3
- package/src/client/public/itemledger/browserconfig.xml +2 -2
- package/src/client/public/itemledger/manifest.webmanifest +4 -4
- package/src/client/public/itemledger/microdata.json +71 -0
- package/src/client/public/itemledger/sitemap +3 -3
- package/src/client/public/itemledger/yandex-browser-manifest.json +2 -2
- package/src/client/public/test/sitemap +3 -3
- package/src/client/services/core/core.service.js +20 -8
- package/src/client/services/cyberia-dialogue/cyberia-dialogue.service.js +105 -0
- package/src/client/services/cyberia-entity/cyberia-entity.management.js +57 -0
- package/src/client/services/cyberia-entity/cyberia-entity.service.js +105 -0
- package/src/client/services/cyberia-instance/cyberia-instance.management.js +194 -0
- package/src/client/services/cyberia-instance/cyberia-instance.service.js +122 -0
- package/src/client/services/cyberia-instance-conf/cyberia-instance-conf.service.js +105 -0
- package/src/client/services/cyberia-map/cyberia-map.management.js +193 -0
- package/src/client/services/cyberia-map/cyberia-map.service.js +126 -0
- package/src/client/services/instance/instance.management.js +2 -2
- package/src/client/services/ipfs/ipfs.service.js +3 -23
- package/src/client/services/object-layer/object-layer.management.js +3 -3
- package/src/client/services/object-layer/object-layer.service.js +21 -0
- package/src/client/services/user/user.management.js +2 -2
- package/src/client/ssr/body/404.js +15 -11
- package/src/client/ssr/body/500.js +15 -11
- package/src/client/ssr/body/SwaggerDarkMode.js +285 -0
- package/src/client/ssr/head/PwaItemledger.js +60 -0
- package/src/client/ssr/offline/NoNetworkConnection.js +11 -10
- package/src/client/ssr/pages/CyberiaServerMetrics.js +1 -1
- package/src/client/ssr/pages/Test.js +11 -10
- package/src/client.build.js +0 -3
- package/src/client.dev.js +0 -3
- package/src/db/DataBaseProvider.js +17 -2
- package/src/db/mariadb/MariaDB.js +14 -9
- package/src/db/mongo/MongooseDB.js +17 -1
- package/src/grpc/cyberia/OFF_CHAIN_ECONOMY.md +305 -0
- package/src/grpc/cyberia/README.md +326 -0
- package/src/grpc/cyberia/grpc-server.js +530 -0
- package/src/index.js +24 -1
- package/src/proxy.js +0 -3
- package/src/runtime/express/Dockerfile +4 -0
- package/src/runtime/express/Express.js +33 -10
- package/src/runtime/lampp/Dockerfile +13 -2
- package/src/runtime/lampp/Lampp.js +33 -17
- package/src/runtime/wp/Dockerfile +68 -0
- package/src/runtime/wp/Wp.js +639 -0
- package/src/server/auth.js +36 -15
- package/src/server/backup.js +39 -12
- package/src/server/besu-genesis-generator.js +1630 -0
- package/src/server/client-build-docs.js +133 -17
- package/src/server/client-build-live.js +9 -18
- package/src/server/client-build.js +229 -101
- package/src/server/client-dev-server.js +14 -13
- package/src/server/client-formatted.js +109 -57
- package/src/server/conf.js +391 -164
- package/src/server/cron.js +27 -24
- package/src/server/dns.js +29 -12
- package/src/server/downloader.js +0 -2
- package/src/server/ipfs-client.js +24 -1
- package/src/server/logger.js +27 -9
- package/src/server/object-layer.js +217 -103
- package/src/server/peer.js +8 -2
- package/src/server/process.js +1 -50
- package/src/server/proxy.js +4 -8
- package/src/server/runtime.js +30 -9
- package/src/server/semantic-layer-generator-floor.js +359 -0
- package/src/server/semantic-layer-generator-skin.js +1294 -0
- package/src/server/semantic-layer-generator.js +116 -555
- package/src/server/ssr.js +0 -3
- package/src/server/start.js +19 -12
- package/src/server/tls.js +0 -2
- package/src/server.js +0 -4
- package/src/ws/IoInterface.js +1 -10
- package/src/ws/IoServer.js +14 -33
- package/src/ws/core/channels/core.ws.chat.js +65 -20
- package/src/ws/core/channels/core.ws.mailer.js +113 -32
- package/src/ws/core/channels/core.ws.stream.js +90 -31
- package/src/ws/core/core.ws.connection.js +12 -33
- package/src/ws/core/core.ws.emit.js +10 -26
- package/src/ws/core/core.ws.server.js +25 -58
- package/src/ws/default/channels/default.ws.main.js +53 -12
- package/src/ws/default/default.ws.connection.js +26 -13
- package/src/ws/default/default.ws.server.js +30 -12
- package/.env.development +0 -43
- package/.env.test +0 -43
- package/hardhat/contracts/CryptoKoyn.sol +0 -59
- package/hardhat/contracts/ItemLedger.sol +0 -73
- package/hardhat/contracts/Lock.sol +0 -34
- package/hardhat/hardhat.config.cjs +0 -45
- package/hardhat/ignition/modules/Lock.js +0 -18
- package/hardhat/networks/cryptokoyn-itemledger.network.json +0 -29
- package/hardhat/scripts/deployCryptokoyn.cjs +0 -25
- package/hardhat/scripts/deployItemledger.cjs +0 -25
- package/hardhat/test/Lock.js +0 -126
- package/hardhat/white-paper.md +0 -581
- package/src/client/components/cryptokoyn/CommonCryptokoyn.js +0 -29
- package/src/client/components/cryptokoyn/ElementsCryptokoyn.js +0 -38
- package/src/client/components/cyberia-portal/ElementsCyberiaPortal.js +0 -38
- package/src/client/components/default/ElementsDefault.js +0 -38
- package/src/client/components/itemledger/CommonItemledger.js +0 -29
- package/src/client/components/itemledger/ElementsItemledger.js +0 -38
- package/src/client/components/underpost/CommonUnderpost.js +0 -29
- package/src/client/components/underpost/ElementsUnderpost.js +0 -38
- package/src/ws/core/management/core.ws.chat.js +0 -8
- package/src/ws/core/management/core.ws.mailer.js +0 -16
- package/src/ws/core/management/core.ws.stream.js +0 -8
- package/src/ws/default/management/default.ws.main.js +0 -8
- package/white-paper.md +0 -581
package/bin/index.js
CHANGED
|
@@ -18,14 +18,17 @@ import { Command } from 'commander';
|
|
|
18
18
|
import fs from 'fs-extra';
|
|
19
19
|
import { shellExec } from '../src/server/process.js';
|
|
20
20
|
import { loggerFactory } from '../src/server/logger.js';
|
|
21
|
+
import { generateBesuManifests, deployBesu, removeBesu } from '../src/server/besu-genesis-generator.js';
|
|
21
22
|
import { DataBaseProvider } from '../src/db/DataBaseProvider.js';
|
|
23
|
+
import { loadConfServerJson } from '../src/server/conf.js';
|
|
22
24
|
import {
|
|
23
25
|
ObjectLayerEngine,
|
|
26
|
+
resolveCanonicalCid,
|
|
24
27
|
pngDirectoryIteratorByObjectLayerType,
|
|
25
28
|
getKeyFramesDirectionsFromNumberFolderDirection,
|
|
26
29
|
buildImgFromTile,
|
|
27
|
-
itemTypes,
|
|
28
30
|
} from '../src/server/object-layer.js';
|
|
31
|
+
import { ITEM_TYPES as itemTypes } from '../src/api/cyberia-instance-conf/cyberia-instance-conf.defaults.js';
|
|
29
32
|
import { AtlasSpriteSheetGenerator } from '../src/server/atlas-sprite-sheet-generator.js';
|
|
30
33
|
import {
|
|
31
34
|
generateFrame,
|
|
@@ -39,6 +42,47 @@ import { program as underpostProgram } from '../src/cli/index.js';
|
|
|
39
42
|
import crypto from 'crypto';
|
|
40
43
|
import nodePath from 'path';
|
|
41
44
|
import Underpost from '../src/index.js';
|
|
45
|
+
import {
|
|
46
|
+
DefaultCyberiaItems,
|
|
47
|
+
DefaultSkillConfig,
|
|
48
|
+
DefaultCyberiaDialogues,
|
|
49
|
+
} from '../src/client/components/cyberia-portal/CommonCyberiaPortal.js';
|
|
50
|
+
|
|
51
|
+
/**
|
|
52
|
+
* Connect to the project MongoDB instance using the standard env / conf layout.
|
|
53
|
+
*
|
|
54
|
+
* @async
|
|
55
|
+
* @function connectDbForChain
|
|
56
|
+
* @param {Object} params
|
|
57
|
+
* @param {string} params.envPath – path to .env file.
|
|
58
|
+
* @param {string} [params.mongoHost] – optional mongo host override.
|
|
59
|
+
* @returns {Promise<{ ObjectLayer: import('mongoose').Model, host: string, path: string }>}
|
|
60
|
+
* @memberof CyberiaCLI
|
|
61
|
+
*/
|
|
62
|
+
async function connectDbForChain({ envPath, mongoHost }) {
|
|
63
|
+
const deployId = process.env.DEFAULT_DEPLOY_ID;
|
|
64
|
+
const host = process.env.DEFAULT_DEPLOY_HOST;
|
|
65
|
+
const path = process.env.DEFAULT_DEPLOY_PATH;
|
|
66
|
+
|
|
67
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
68
|
+
if (!fs.existsSync(confServerPath)) {
|
|
69
|
+
throw new Error(`Server config not found: ${confServerPath}. Ensure DEFAULT_DEPLOY_ID is set.`);
|
|
70
|
+
}
|
|
71
|
+
const confServer = loadConfServerJson(confServerPath, { resolve: true });
|
|
72
|
+
const { db } = confServer[host][path];
|
|
73
|
+
|
|
74
|
+
db.host = mongoHost ? mongoHost : db.host.replace('127.0.0.1', 'mongodb-0.mongodb-service');
|
|
75
|
+
|
|
76
|
+
await DataBaseProvider.load({
|
|
77
|
+
apis: ['object-layer'],
|
|
78
|
+
host,
|
|
79
|
+
path,
|
|
80
|
+
db,
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
const ObjectLayer = DataBaseProvider.instance[`${host}${path}`].mongoose.models.ObjectLayer;
|
|
84
|
+
return { ObjectLayer, host, path };
|
|
85
|
+
}
|
|
42
86
|
|
|
43
87
|
/** @type {Function} */
|
|
44
88
|
const logger = loggerFactory(import.meta);
|
|
@@ -64,7 +108,11 @@ try {
|
|
|
64
108
|
'Convert object layers to atlas sprite sheets, specify dimension (default: auto-calculated based on frame count)',
|
|
65
109
|
)
|
|
66
110
|
.option('--show-atlas-sprite-sheet', 'Show consolidated atlas sprite sheet PNG for given item-id')
|
|
67
|
-
.option(
|
|
111
|
+
.option(
|
|
112
|
+
'--import',
|
|
113
|
+
'Import specific item-id(s) passed as comma-separated command argument (e.g. ol hatchet,sword --import)',
|
|
114
|
+
)
|
|
115
|
+
.option('--import-types [object-layer-type]', 'Batch import by object layer type e.g. skin,floors or all')
|
|
68
116
|
.option('--show-frame [direction-frame]', 'View object layer frame for given item-id e.g. 08_0 (default: 08_0)')
|
|
69
117
|
.option('--generate', 'Generate procedural object layers from semantic item-id (e.g. floor-desert)')
|
|
70
118
|
.option('--count <count>', 'Shape element count multiplier for --generate (default: 3)', parseFloat)
|
|
@@ -76,6 +124,9 @@ try {
|
|
|
76
124
|
.option('--mongo-host <mongo-host>', 'Mongo host override')
|
|
77
125
|
.option('--storage-file-path <storage-file-path>', 'Storage file path override')
|
|
78
126
|
.option('--drop', 'Drop existing data before importing')
|
|
127
|
+
.option('--client-public', 'When used with --drop, also remove static asset folders for dropped items')
|
|
128
|
+
.option('--git-clean', 'When used with --drop, run underpost clean on the cyberia asset directory')
|
|
129
|
+
.option('--dev', 'Force development environment (loads .env.development for IPFS localhost, etc.)')
|
|
79
130
|
.action(
|
|
80
131
|
/**
|
|
81
132
|
* Main action handler for the `ol` command.
|
|
@@ -83,7 +134,8 @@ try {
|
|
|
83
134
|
*
|
|
84
135
|
* @param {string|undefined} itemId - Optional item ID argument.
|
|
85
136
|
* @param {Object} options - Command options parsed by Commander.
|
|
86
|
-
* @param {boolean
|
|
137
|
+
* @param {boolean} options.import - Import specific item-id(s) from the command argument (comma-separated).
|
|
138
|
+
* @param {boolean|string} options.importTypes - Object layer types to batch import (e.g., 'all', 'skin,floor') or `false`.
|
|
87
139
|
* @param {boolean|string} options.showFrame - Direction-frame string (e.g., '08_0') or `true` for default.
|
|
88
140
|
* @param {string} options.envPath - Path to the `.env` file.
|
|
89
141
|
* @param {string} options.mongoHost - MongoDB host override.
|
|
@@ -91,6 +143,9 @@ try {
|
|
|
91
143
|
* @param {boolean|string} options.toAtlasSpriteSheet - Atlas dimension or `true` for auto-calc.
|
|
92
144
|
* @param {boolean} options.showAtlasSpriteSheet - Whether to display the atlas sprite sheet.
|
|
93
145
|
* @param {boolean} options.drop - Whether to drop existing data before importing.
|
|
146
|
+
* @param {boolean} options.clientPublic - Also remove static asset folders when dropping.
|
|
147
|
+
* @param {boolean} options.gitClean - Run underpost clean on the cyberia asset directory when dropping.
|
|
148
|
+
* @param {boolean} options.dev - Force development environment.
|
|
94
149
|
* @param {boolean} options.generate - Whether to run procedural generation for the item-id.
|
|
95
150
|
* @param {number} options.count - Shape element count multiplier for generation.
|
|
96
151
|
* @param {string} options.seed - Deterministic seed string for generation.
|
|
@@ -104,12 +159,17 @@ try {
|
|
|
104
159
|
itemId,
|
|
105
160
|
options = {
|
|
106
161
|
import: false,
|
|
162
|
+
importTypes: false,
|
|
107
163
|
showFrame: '',
|
|
108
164
|
envPath: '',
|
|
109
165
|
mongoHost: '',
|
|
110
166
|
storageFilePath: '',
|
|
111
167
|
toAtlasSpriteSheet: '',
|
|
112
168
|
showAtlasSpriteSheet: false,
|
|
169
|
+
drop: false,
|
|
170
|
+
clientPublic: false,
|
|
171
|
+
gitClean: false,
|
|
172
|
+
dev: false,
|
|
113
173
|
generate: false,
|
|
114
174
|
count: 3,
|
|
115
175
|
seed: '',
|
|
@@ -121,6 +181,14 @@ try {
|
|
|
121
181
|
if (!options.envPath) options.envPath = `./.env`;
|
|
122
182
|
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
123
183
|
|
|
184
|
+
// --dev: force development environment (IPFS localhost, etc.)
|
|
185
|
+
if (options.dev && process.env.DEFAULT_DEPLOY_ID) {
|
|
186
|
+
const deployDevEnvPath = `./engine-private/conf/${process.env.DEFAULT_DEPLOY_ID}/.env.development`;
|
|
187
|
+
if (fs.existsSync(deployDevEnvPath)) {
|
|
188
|
+
dotenv.config({ path: deployDevEnvPath, override: true });
|
|
189
|
+
}
|
|
190
|
+
}
|
|
191
|
+
|
|
124
192
|
/** @type {string} */
|
|
125
193
|
const deployId = process.env.DEFAULT_DEPLOY_ID;
|
|
126
194
|
/** @type {string} */
|
|
@@ -129,10 +197,14 @@ try {
|
|
|
129
197
|
const path = process.env.DEFAULT_DEPLOY_PATH;
|
|
130
198
|
|
|
131
199
|
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
132
|
-
const confServer =
|
|
200
|
+
const confServer = loadConfServerJson(confServerPath, { resolve: true });
|
|
133
201
|
const { db } = confServer[host][path];
|
|
134
202
|
|
|
135
|
-
db.host = options.mongoHost
|
|
203
|
+
db.host = options.mongoHost
|
|
204
|
+
? options.mongoHost
|
|
205
|
+
: options.dev
|
|
206
|
+
? db.host
|
|
207
|
+
: db.host.replace('127.0.0.1', 'mongodb-0.mongodb-service');
|
|
136
208
|
|
|
137
209
|
logger.info('env', {
|
|
138
210
|
env: options.envPath,
|
|
@@ -158,23 +230,519 @@ try {
|
|
|
158
230
|
const AtlasSpriteSheet = DataBaseProvider.instance[`${host}${path}`].mongoose.models.AtlasSpriteSheet;
|
|
159
231
|
/** @type {import('mongoose').Model} */
|
|
160
232
|
const File = DataBaseProvider.instance[`${host}${path}`].mongoose.models.File;
|
|
233
|
+
/** @type {import('mongoose').Model} */
|
|
234
|
+
const Ipfs = DataBaseProvider.instance[`${host}${path}`].mongoose.models.Ipfs;
|
|
161
235
|
|
|
162
236
|
if (options.drop) {
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
237
|
+
// Parse comma-separated item IDs for targeted drop; if none provided, drop everything
|
|
238
|
+
const dropItemIds = itemId
|
|
239
|
+
? itemId
|
|
240
|
+
.split(',')
|
|
241
|
+
.map((id) => id.trim())
|
|
242
|
+
.filter(Boolean)
|
|
243
|
+
: null;
|
|
244
|
+
const isTargetedDrop = dropItemIds && dropItemIds.length > 0;
|
|
245
|
+
|
|
246
|
+
if (isTargetedDrop) {
|
|
247
|
+
logger.info(`Targeted drop for item(s): ${dropItemIds.join(', ')}`);
|
|
248
|
+
} else {
|
|
249
|
+
logger.info('Dropping ALL object layer data');
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
// Build query filter: targeted or all
|
|
253
|
+
const olFilter = isTargetedDrop ? { 'data.item.id': { $in: dropItemIds } } : {};
|
|
254
|
+
const atlasFilter = isTargetedDrop ? { 'metadata.itemKey': { $in: dropItemIds } } : {};
|
|
255
|
+
|
|
256
|
+
// Collect data before deletion
|
|
257
|
+
const olDocs = await ObjectLayer.find(olFilter, {
|
|
258
|
+
cid: 1,
|
|
259
|
+
'data.item.id': 1,
|
|
260
|
+
'data.item.type': 1,
|
|
261
|
+
'data.render': 1,
|
|
262
|
+
objectLayerRenderFramesId: 1,
|
|
263
|
+
atlasSpriteSheetId: 1,
|
|
264
|
+
}).lean();
|
|
265
|
+
const atlasDocs = await AtlasSpriteSheet.find(atlasFilter, { fileId: 1, cid: 1 }).lean();
|
|
266
|
+
|
|
267
|
+
const cidsToUnpin = new Set();
|
|
268
|
+
const itemIdsToClean = new Set();
|
|
269
|
+
const renderFrameIds = [];
|
|
270
|
+
const atlasIds = [];
|
|
271
|
+
|
|
272
|
+
for (const doc of olDocs) {
|
|
273
|
+
if (doc.cid) cidsToUnpin.add(doc.cid);
|
|
274
|
+
if (doc.data?.render?.cid) cidsToUnpin.add(doc.data.render.cid);
|
|
275
|
+
if (doc.data?.render?.metadataCid) cidsToUnpin.add(doc.data.render.metadataCid);
|
|
276
|
+
if (doc.data?.item?.id) itemIdsToClean.add(doc.data.item.id);
|
|
277
|
+
if (doc.objectLayerRenderFramesId) renderFrameIds.push(doc.objectLayerRenderFramesId);
|
|
278
|
+
if (doc.atlasSpriteSheetId) atlasIds.push(doc.atlasSpriteSheetId);
|
|
279
|
+
}
|
|
280
|
+
|
|
281
|
+
const atlasFileIds = atlasDocs.map((a) => a.fileId).filter(Boolean);
|
|
282
|
+
for (const atlas of atlasDocs) {
|
|
283
|
+
if (atlas.cid) cidsToUnpin.add(atlas.cid);
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
const olCount = olDocs.length;
|
|
287
|
+
const atlasCount = atlasDocs.length;
|
|
288
|
+
|
|
289
|
+
// Delete targeted documents
|
|
290
|
+
if (isTargetedDrop) {
|
|
291
|
+
const olIds = olDocs.map((d) => d._id);
|
|
292
|
+
if (olIds.length > 0) await ObjectLayer.deleteMany({ _id: { $in: olIds } });
|
|
293
|
+
if (renderFrameIds.length > 0) await ObjectLayerRenderFrames.deleteMany({ _id: { $in: renderFrameIds } });
|
|
294
|
+
if (atlasIds.length > 0) await AtlasSpriteSheet.deleteMany({ _id: { $in: atlasIds } });
|
|
295
|
+
} else {
|
|
296
|
+
await ObjectLayer.deleteMany();
|
|
297
|
+
await ObjectLayerRenderFrames.deleteMany();
|
|
298
|
+
await AtlasSpriteSheet.deleteMany();
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
const rfCount = renderFrameIds.length;
|
|
302
|
+
|
|
303
|
+
// Remove only the File documents that were referenced by atlas sprite sheets
|
|
304
|
+
let fileCount = 0;
|
|
305
|
+
if (atlasFileIds.length > 0) {
|
|
306
|
+
const result = await File.deleteMany({ _id: { $in: atlasFileIds } });
|
|
307
|
+
fileCount = result.deletedCount || 0;
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
// Delete IPFS pin registry records for all collected CIDs
|
|
311
|
+
if (cidsToUnpin.size > 0) {
|
|
312
|
+
const ipfsResult = await Ipfs.deleteMany({ cid: { $in: [...cidsToUnpin] } });
|
|
313
|
+
logger.info(`Dropped ${ipfsResult.deletedCount} Ipfs pin record(s)`);
|
|
314
|
+
}
|
|
315
|
+
|
|
316
|
+
// Unpin CIDs from IPFS Cluster + Kubo and remove MFS directories
|
|
317
|
+
let unpinCount = 0;
|
|
318
|
+
let mfsCount = 0;
|
|
319
|
+
for (const cid of cidsToUnpin) {
|
|
320
|
+
const ok = await IpfsClient.unpinCid(cid);
|
|
321
|
+
if (ok) unpinCount++;
|
|
322
|
+
}
|
|
323
|
+
for (const itemKey of itemIdsToClean) {
|
|
324
|
+
const ok = await IpfsClient.removeMfsPath(`/object-layer/${itemKey}`);
|
|
325
|
+
if (ok) mfsCount++;
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
logger.info(
|
|
329
|
+
`Dropped: ${olCount} ObjectLayer, ${rfCount} RenderFrames, ${atlasCount} AtlasSpriteSheet, ${fileCount} File (atlas)`,
|
|
330
|
+
);
|
|
331
|
+
logger.info(
|
|
332
|
+
`IPFS cleanup: ${unpinCount}/${cidsToUnpin.size} CIDs unpinned, ${mfsCount}/${itemIdsToClean.size} MFS paths removed`,
|
|
333
|
+
);
|
|
334
|
+
if (options.gitClean) {
|
|
335
|
+
shellExec(`cd src/client/public/cyberia && underpost run clean .`);
|
|
336
|
+
logger.info('Asset directory cleaned');
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
// --client-public: remove static asset folders for dropped items
|
|
340
|
+
if (options.clientPublic) {
|
|
341
|
+
const srcBase = './src/client/public/cyberia/assets';
|
|
342
|
+
const publicBase = `./public/${host}${path}/assets`;
|
|
343
|
+
let removedCount = 0;
|
|
344
|
+
for (const doc of olDocs) {
|
|
345
|
+
const docItemId = doc.data?.item?.id;
|
|
346
|
+
const docItemType = doc.data?.item?.type;
|
|
347
|
+
if (!docItemId || !docItemType) continue;
|
|
348
|
+
for (const base of [srcBase, publicBase]) {
|
|
349
|
+
const folder = `${base}/${docItemType}/${docItemId}`;
|
|
350
|
+
if (fs.existsSync(folder)) {
|
|
351
|
+
fs.removeSync(folder);
|
|
352
|
+
removedCount++;
|
|
353
|
+
logger.info(`Removed static folder: ${folder}`);
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
logger.info(`Static asset cleanup: ${removedCount} folder(s) removed`);
|
|
358
|
+
}
|
|
166
359
|
}
|
|
167
360
|
|
|
168
361
|
/** @type {Object|null} */
|
|
169
362
|
const storage = options.storageFilePath ? JSON.parse(fs.readFileSync(options.storageFilePath, 'utf8')) : null;
|
|
170
363
|
|
|
171
|
-
// ── Handle --import
|
|
364
|
+
// ── Handle --import (specific item-id(s)) ─────────────────────
|
|
172
365
|
if (options.import) {
|
|
366
|
+
if (!itemId) {
|
|
367
|
+
logger.error('item-id is required for --import (comma-separated item IDs, e.g. ol hatchet,sword --import)');
|
|
368
|
+
process.exit(1);
|
|
369
|
+
}
|
|
370
|
+
|
|
371
|
+
const itemIds = itemId
|
|
372
|
+
.split(',')
|
|
373
|
+
.map((id) => id.trim())
|
|
374
|
+
.filter(Boolean);
|
|
375
|
+
logger.info(`Importing specific item(s): ${itemIds.join(', ')}`);
|
|
376
|
+
|
|
377
|
+
for (const currentItemId of itemIds) {
|
|
378
|
+
// Search across all asset type directories to find which type contains this item-id
|
|
379
|
+
let foundType = null;
|
|
380
|
+
let foundFolder = null;
|
|
381
|
+
for (const type of Object.keys(itemTypes)) {
|
|
382
|
+
const candidateFolder = `./src/client/public/cyberia/assets/${type}/${currentItemId}`;
|
|
383
|
+
if (fs.existsSync(candidateFolder) && fs.statSync(candidateFolder).isDirectory()) {
|
|
384
|
+
foundType = type;
|
|
385
|
+
foundFolder = candidateFolder;
|
|
386
|
+
break;
|
|
387
|
+
}
|
|
388
|
+
}
|
|
389
|
+
|
|
390
|
+
if (!foundType) {
|
|
391
|
+
logger.error(
|
|
392
|
+
`Item-id '${currentItemId}' not found in any asset type directory (${Object.keys(itemTypes).join(', ')})`,
|
|
393
|
+
);
|
|
394
|
+
continue;
|
|
395
|
+
}
|
|
396
|
+
|
|
397
|
+
logger.info(`Found item '${currentItemId}' in type '${foundType}' at ${foundFolder}`);
|
|
398
|
+
|
|
399
|
+
const { objectLayerRenderFramesData, objectLayerData } =
|
|
400
|
+
await ObjectLayerEngine.buildObjectLayerDataFromDirectory({
|
|
401
|
+
folder: foundFolder,
|
|
402
|
+
objectLayerType: foundType,
|
|
403
|
+
objectLayerId: currentItemId,
|
|
404
|
+
});
|
|
405
|
+
|
|
406
|
+
// Write processed frames back to disk so WebP matches atlas
|
|
407
|
+
const srcBasePath = './src/client/public/cyberia/';
|
|
408
|
+
const publicBasePath = `./public/${host}${path}`;
|
|
409
|
+
await ObjectLayerEngine.writeStaticFrameAssets({
|
|
410
|
+
basePaths: [srcBasePath, publicBasePath],
|
|
411
|
+
itemType: foundType,
|
|
412
|
+
itemId: currentItemId,
|
|
413
|
+
objectLayerRenderFramesData,
|
|
414
|
+
objectLayerData,
|
|
415
|
+
cellPixelDim: 20,
|
|
416
|
+
});
|
|
417
|
+
|
|
418
|
+
// Check if an ObjectLayer with the same item.id already exists (upsert by item ID)
|
|
419
|
+
const existingOL = await ObjectLayer.findOne({ 'data.item.id': currentItemId });
|
|
420
|
+
let objectLayer;
|
|
421
|
+
|
|
422
|
+
if (existingOL) {
|
|
423
|
+
// ── Cut-over consistency: stage everything in memory before touching the live document ──
|
|
424
|
+
logger.info(`ObjectLayer '${currentItemId}' already exists (${existingOL._id}), staging update...`);
|
|
425
|
+
|
|
426
|
+
// 1. Prepare staging data entirely in memory (no DB writes yet)
|
|
427
|
+
const stagingData = JSON.parse(JSON.stringify(objectLayerData.data));
|
|
428
|
+
if (!stagingData.render) stagingData.render = {};
|
|
429
|
+
stagingData.render.cid = '';
|
|
430
|
+
stagingData.render.metadataCid = '';
|
|
431
|
+
|
|
432
|
+
// 2. Generate atlas, pin to IPFS, compute SHA-256 — all in memory
|
|
433
|
+
let cutoverReady = false;
|
|
434
|
+
let stagingFileDoc = null;
|
|
435
|
+
let stagingAtlasDoc = null;
|
|
436
|
+
let stagingCid = '';
|
|
437
|
+
let stagingSha256 = '';
|
|
438
|
+
try {
|
|
439
|
+
const itemKey = currentItemId;
|
|
440
|
+
|
|
441
|
+
// Generate atlas from in-memory render frames data (plain object, no DB doc needed)
|
|
442
|
+
const { buffer, metadata } = await AtlasSpriteSheetGenerator.generateAtlas(
|
|
443
|
+
objectLayerRenderFramesData,
|
|
444
|
+
itemKey,
|
|
445
|
+
20,
|
|
446
|
+
);
|
|
447
|
+
|
|
448
|
+
stagingFileDoc = await new File({
|
|
449
|
+
name: `${itemKey}-atlas.png`,
|
|
450
|
+
data: buffer,
|
|
451
|
+
size: buffer.length,
|
|
452
|
+
mimetype: 'image/png',
|
|
453
|
+
md5: crypto.createHash('md5').update(buffer).digest('hex'),
|
|
454
|
+
}).save();
|
|
455
|
+
|
|
456
|
+
let importItemCid = '';
|
|
457
|
+
let importItemMetadataCid = '';
|
|
458
|
+
try {
|
|
459
|
+
const ipfsResult = await IpfsClient.addBufferToIpfs(
|
|
460
|
+
buffer,
|
|
461
|
+
`${itemKey}_atlas_sprite_sheet.png`,
|
|
462
|
+
`/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
463
|
+
);
|
|
464
|
+
if (ipfsResult) {
|
|
465
|
+
importItemCid = ipfsResult.cid;
|
|
466
|
+
logger.info(`[staging] Atlas pinned to IPFS – CID: ${importItemCid}`);
|
|
467
|
+
try {
|
|
468
|
+
await createPinRecord({
|
|
469
|
+
cid: importItemCid,
|
|
470
|
+
resourceType: 'atlas-sprite-sheet',
|
|
471
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
472
|
+
options: { host, path },
|
|
473
|
+
});
|
|
474
|
+
} catch (prErr) {
|
|
475
|
+
logger.warn('[staging] Failed to create atlas pin record:', prErr.message);
|
|
476
|
+
}
|
|
477
|
+
}
|
|
478
|
+
} catch (ipfsError) {
|
|
479
|
+
logger.warn('[staging] Failed to add atlas to IPFS:', ipfsError.message);
|
|
480
|
+
}
|
|
481
|
+
|
|
482
|
+
try {
|
|
483
|
+
const metadataIpfsResult = await IpfsClient.addJsonToIpfs(
|
|
484
|
+
metadata,
|
|
485
|
+
`${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
486
|
+
`/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
487
|
+
);
|
|
488
|
+
if (metadataIpfsResult) {
|
|
489
|
+
importItemMetadataCid = metadataIpfsResult.cid;
|
|
490
|
+
logger.info(`[staging] Atlas metadata pinned to IPFS – CID: ${importItemMetadataCid}`);
|
|
491
|
+
try {
|
|
492
|
+
await createPinRecord({
|
|
493
|
+
cid: importItemMetadataCid,
|
|
494
|
+
resourceType: 'atlas-metadata',
|
|
495
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
496
|
+
options: { host, path },
|
|
497
|
+
});
|
|
498
|
+
} catch (prErr) {
|
|
499
|
+
logger.warn('[staging] Failed to create atlas-metadata pin record:', prErr.message);
|
|
500
|
+
}
|
|
501
|
+
}
|
|
502
|
+
} catch (ipfsError) {
|
|
503
|
+
logger.warn('[staging] Failed to add atlas metadata to IPFS:', ipfsError.message);
|
|
504
|
+
}
|
|
505
|
+
|
|
506
|
+
// Persist atlas doc (or update existing one for this itemKey)
|
|
507
|
+
stagingAtlasDoc = await AtlasSpriteSheet.findOne({ 'metadata.itemKey': itemKey });
|
|
508
|
+
if (stagingAtlasDoc) {
|
|
509
|
+
if (stagingAtlasDoc.fileId) await File.findByIdAndDelete(stagingAtlasDoc.fileId);
|
|
510
|
+
stagingAtlasDoc.fileId = stagingFileDoc._id;
|
|
511
|
+
stagingAtlasDoc.cid = importItemCid;
|
|
512
|
+
stagingAtlasDoc.metadata = metadata;
|
|
513
|
+
await stagingAtlasDoc.save();
|
|
514
|
+
} else {
|
|
515
|
+
stagingAtlasDoc = await new AtlasSpriteSheet({
|
|
516
|
+
fileId: stagingFileDoc._id,
|
|
517
|
+
cid: importItemCid,
|
|
518
|
+
metadata,
|
|
519
|
+
}).save();
|
|
520
|
+
}
|
|
521
|
+
|
|
522
|
+
// Finalize staging data in memory with render CIDs
|
|
523
|
+
stagingData.render.cid = importItemCid;
|
|
524
|
+
stagingData.render.metadataCid = importItemMetadataCid;
|
|
525
|
+
|
|
526
|
+
// Pin data JSON to IPFS (compute final SHA-256 in memory)
|
|
527
|
+
stagingSha256 = ObjectLayerEngine.computeSha256(stagingData);
|
|
528
|
+
try {
|
|
529
|
+
const ipfsDataResult = await IpfsClient.addJsonToIpfs(
|
|
530
|
+
stagingData,
|
|
531
|
+
`${itemKey}_data.json`,
|
|
532
|
+
`/object-layer/${itemKey}/${itemKey}_data.json`,
|
|
533
|
+
);
|
|
534
|
+
if (ipfsDataResult) {
|
|
535
|
+
stagingCid = ipfsDataResult.cid;
|
|
536
|
+
logger.info(`[staging] Data JSON pinned to IPFS – CID: ${stagingCid}`);
|
|
537
|
+
try {
|
|
538
|
+
await createPinRecord({
|
|
539
|
+
cid: stagingCid,
|
|
540
|
+
resourceType: 'object-layer-data',
|
|
541
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_data.json`,
|
|
542
|
+
options: { host, path },
|
|
543
|
+
});
|
|
544
|
+
} catch (prErr) {
|
|
545
|
+
logger.warn('[staging] Failed to create data pin record:', prErr.message);
|
|
546
|
+
}
|
|
547
|
+
}
|
|
548
|
+
} catch (ipfsError) {
|
|
549
|
+
logger.warn('[staging] Failed to pin data JSON to IPFS:', ipfsError.message);
|
|
550
|
+
}
|
|
551
|
+
|
|
552
|
+
cutoverReady = true;
|
|
553
|
+
logger.info(`[staging] Item '${itemKey}' fully staged in memory, ready for cut-over`);
|
|
554
|
+
} catch (atlasError) {
|
|
555
|
+
logger.error(`[staging] Failed for ${currentItemId}, live document untouched:`, atlasError);
|
|
556
|
+
}
|
|
557
|
+
|
|
558
|
+
// 3. Atomic cut-over: create new RenderFrames, swap live ObjectLayer in a single update
|
|
559
|
+
if (cutoverReady) {
|
|
560
|
+
const oldRenderFramesId = existingOL.objectLayerRenderFramesId;
|
|
561
|
+
|
|
562
|
+
// Create the new RenderFrames doc (only now touches DB)
|
|
563
|
+
const newRenderFrames = await ObjectLayerRenderFrames.create(objectLayerRenderFramesData);
|
|
564
|
+
|
|
565
|
+
// Single atomic update of the live document
|
|
566
|
+
await ObjectLayer.findByIdAndUpdate(existingOL._id, {
|
|
567
|
+
data: stagingData,
|
|
568
|
+
sha256: stagingSha256,
|
|
569
|
+
cid: stagingCid,
|
|
570
|
+
objectLayerRenderFramesId: newRenderFrames._id,
|
|
571
|
+
atlasSpriteSheetId: stagingAtlasDoc._id,
|
|
572
|
+
});
|
|
573
|
+
|
|
574
|
+
// Clean up old render frames
|
|
575
|
+
if (oldRenderFramesId) {
|
|
576
|
+
await ObjectLayerRenderFrames.findByIdAndDelete(oldRenderFramesId);
|
|
577
|
+
}
|
|
578
|
+
|
|
579
|
+
logger.info(`[cut-over] Live document ${existingOL._id} updated atomically`);
|
|
580
|
+
} else {
|
|
581
|
+
// Rollback: only File/AtlasSpriteSheet were written, clean those up
|
|
582
|
+
if (stagingFileDoc) await File.findByIdAndDelete(stagingFileDoc._id);
|
|
583
|
+
logger.warn(`[cut-over] Staging rolled back for ${currentItemId}, live document preserved`);
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
objectLayer = await ObjectLayer.findById(existingOL._id);
|
|
587
|
+
} else {
|
|
588
|
+
// ── New item: stage everything before creating (same cut-over pattern) ──
|
|
589
|
+
logger.info(`ObjectLayer '${currentItemId}' is new, staging creation...`);
|
|
590
|
+
|
|
591
|
+
const itemKey = currentItemId;
|
|
592
|
+
const stagingData = JSON.parse(JSON.stringify(objectLayerData.data));
|
|
593
|
+
if (!stagingData.render) stagingData.render = {};
|
|
594
|
+
stagingData.render.cid = '';
|
|
595
|
+
stagingData.render.metadataCid = '';
|
|
596
|
+
|
|
597
|
+
let cutoverReady = false;
|
|
598
|
+
let stagingFileDoc = null;
|
|
599
|
+
let stagingAtlasDoc = null;
|
|
600
|
+
let stagingCid = '';
|
|
601
|
+
let stagingSha256 = '';
|
|
602
|
+
try {
|
|
603
|
+
const { buffer, metadata } = await AtlasSpriteSheetGenerator.generateAtlas(
|
|
604
|
+
objectLayerRenderFramesData,
|
|
605
|
+
itemKey,
|
|
606
|
+
20,
|
|
607
|
+
);
|
|
608
|
+
|
|
609
|
+
stagingFileDoc = await new File({
|
|
610
|
+
name: `${itemKey}-atlas.png`,
|
|
611
|
+
data: buffer,
|
|
612
|
+
size: buffer.length,
|
|
613
|
+
mimetype: 'image/png',
|
|
614
|
+
md5: crypto.createHash('md5').update(buffer).digest('hex'),
|
|
615
|
+
}).save();
|
|
616
|
+
|
|
617
|
+
let importItemCid = '';
|
|
618
|
+
let importItemMetadataCid = '';
|
|
619
|
+
try {
|
|
620
|
+
const ipfsResult = await IpfsClient.addBufferToIpfs(
|
|
621
|
+
buffer,
|
|
622
|
+
`${itemKey}_atlas_sprite_sheet.png`,
|
|
623
|
+
`/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
624
|
+
);
|
|
625
|
+
if (ipfsResult) {
|
|
626
|
+
importItemCid = ipfsResult.cid;
|
|
627
|
+
logger.info(`[staging] Atlas pinned to IPFS – CID: ${importItemCid}`);
|
|
628
|
+
try {
|
|
629
|
+
await createPinRecord({
|
|
630
|
+
cid: importItemCid,
|
|
631
|
+
resourceType: 'atlas-sprite-sheet',
|
|
632
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
633
|
+
options: { host, path },
|
|
634
|
+
});
|
|
635
|
+
} catch (prErr) {
|
|
636
|
+
logger.warn('[staging] Failed to create atlas pin record:', prErr.message);
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
} catch (ipfsError) {
|
|
640
|
+
logger.warn('[staging] Failed to add atlas to IPFS:', ipfsError.message);
|
|
641
|
+
}
|
|
642
|
+
|
|
643
|
+
try {
|
|
644
|
+
const metadataIpfsResult = await IpfsClient.addJsonToIpfs(
|
|
645
|
+
metadata,
|
|
646
|
+
`${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
647
|
+
`/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
648
|
+
);
|
|
649
|
+
if (metadataIpfsResult) {
|
|
650
|
+
importItemMetadataCid = metadataIpfsResult.cid;
|
|
651
|
+
logger.info(`[staging] Atlas metadata pinned to IPFS – CID: ${importItemMetadataCid}`);
|
|
652
|
+
try {
|
|
653
|
+
await createPinRecord({
|
|
654
|
+
cid: importItemMetadataCid,
|
|
655
|
+
resourceType: 'atlas-metadata',
|
|
656
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
657
|
+
options: { host, path },
|
|
658
|
+
});
|
|
659
|
+
} catch (prErr) {
|
|
660
|
+
logger.warn('[staging] Failed to create atlas-metadata pin record:', prErr.message);
|
|
661
|
+
}
|
|
662
|
+
}
|
|
663
|
+
} catch (ipfsError) {
|
|
664
|
+
logger.warn('[staging] Failed to add atlas metadata to IPFS:', ipfsError.message);
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
stagingAtlasDoc = await AtlasSpriteSheet.findOne({ 'metadata.itemKey': itemKey });
|
|
668
|
+
if (stagingAtlasDoc) {
|
|
669
|
+
if (stagingAtlasDoc.fileId) await File.findByIdAndDelete(stagingAtlasDoc.fileId);
|
|
670
|
+
stagingAtlasDoc.fileId = stagingFileDoc._id;
|
|
671
|
+
stagingAtlasDoc.cid = importItemCid;
|
|
672
|
+
stagingAtlasDoc.metadata = metadata;
|
|
673
|
+
await stagingAtlasDoc.save();
|
|
674
|
+
} else {
|
|
675
|
+
stagingAtlasDoc = await new AtlasSpriteSheet({
|
|
676
|
+
fileId: stagingFileDoc._id,
|
|
677
|
+
cid: importItemCid,
|
|
678
|
+
metadata,
|
|
679
|
+
}).save();
|
|
680
|
+
}
|
|
681
|
+
|
|
682
|
+
stagingData.render.cid = importItemCid;
|
|
683
|
+
stagingData.render.metadataCid = importItemMetadataCid;
|
|
684
|
+
|
|
685
|
+
stagingSha256 = ObjectLayerEngine.computeSha256(stagingData);
|
|
686
|
+
try {
|
|
687
|
+
const ipfsDataResult = await IpfsClient.addJsonToIpfs(
|
|
688
|
+
stagingData,
|
|
689
|
+
`${itemKey}_data.json`,
|
|
690
|
+
`/object-layer/${itemKey}/${itemKey}_data.json`,
|
|
691
|
+
);
|
|
692
|
+
if (ipfsDataResult) {
|
|
693
|
+
stagingCid = ipfsDataResult.cid;
|
|
694
|
+
logger.info(`[staging] Data JSON pinned to IPFS – CID: ${stagingCid}`);
|
|
695
|
+
try {
|
|
696
|
+
await createPinRecord({
|
|
697
|
+
cid: stagingCid,
|
|
698
|
+
resourceType: 'object-layer-data',
|
|
699
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_data.json`,
|
|
700
|
+
options: { host, path },
|
|
701
|
+
});
|
|
702
|
+
} catch (prErr) {
|
|
703
|
+
logger.warn('[staging] Failed to create data pin record:', prErr.message);
|
|
704
|
+
}
|
|
705
|
+
}
|
|
706
|
+
} catch (ipfsError) {
|
|
707
|
+
logger.warn('[staging] Failed to pin data JSON to IPFS:', ipfsError.message);
|
|
708
|
+
}
|
|
709
|
+
|
|
710
|
+
cutoverReady = true;
|
|
711
|
+
logger.info(`[staging] Item '${itemKey}' fully staged in memory, ready for creation`);
|
|
712
|
+
} catch (atlasError) {
|
|
713
|
+
logger.error(`[staging] Failed for ${currentItemId}, no document created:`, atlasError);
|
|
714
|
+
}
|
|
715
|
+
|
|
716
|
+
if (cutoverReady) {
|
|
717
|
+
const newRenderFrames = await ObjectLayerRenderFrames.create(objectLayerRenderFramesData);
|
|
718
|
+
objectLayer = await ObjectLayer.create({
|
|
719
|
+
data: stagingData,
|
|
720
|
+
sha256: stagingSha256,
|
|
721
|
+
cid: stagingCid,
|
|
722
|
+
objectLayerRenderFramesId: newRenderFrames._id,
|
|
723
|
+
atlasSpriteSheetId: stagingAtlasDoc._id,
|
|
724
|
+
});
|
|
725
|
+
logger.info(`[cut-over] New ObjectLayer ${objectLayer._id} created with all CIDs populated`);
|
|
726
|
+
} else {
|
|
727
|
+
if (stagingFileDoc) await File.findByIdAndDelete(stagingFileDoc._id);
|
|
728
|
+
logger.warn(`[cut-over] Staging failed for ${currentItemId}, no ObjectLayer created`);
|
|
729
|
+
continue;
|
|
730
|
+
}
|
|
731
|
+
}
|
|
732
|
+
|
|
733
|
+
// Reload final state to include CID and render updates
|
|
734
|
+
const finalObjectLayer = await ObjectLayer.findById(objectLayer._id).populate('objectLayerRenderFramesId');
|
|
735
|
+
console.log(finalObjectLayer.toObject());
|
|
736
|
+
}
|
|
737
|
+
}
|
|
738
|
+
|
|
739
|
+
// ── Handle --import-types (batch by type) ────────────────────────
|
|
740
|
+
if (options.importTypes) {
|
|
173
741
|
/** @type {boolean} */
|
|
174
|
-
const isImportAll = options.
|
|
742
|
+
const isImportAll = options.importTypes === 'all';
|
|
175
743
|
|
|
176
744
|
/** @type {string[]} */
|
|
177
|
-
const argItemTypes = isImportAll ? Object.keys(itemTypes) : options.
|
|
745
|
+
const argItemTypes = isImportAll ? Object.keys(itemTypes) : options.importTypes.split(',');
|
|
178
746
|
|
|
179
747
|
/**
|
|
180
748
|
* Accumulated object layer data keyed by objectLayerId.
|
|
@@ -182,6 +750,19 @@ try {
|
|
|
182
750
|
*/
|
|
183
751
|
const objectLayers = {};
|
|
184
752
|
|
|
753
|
+
// When importing all types, pre-fetch existing item IDs so we can skip them entirely
|
|
754
|
+
/** @type {Set<string>} */
|
|
755
|
+
const existingItemIds = new Set();
|
|
756
|
+
if (isImportAll) {
|
|
757
|
+
const existingDocs = await ObjectLayer.find({}, { 'data.item.id': 1 }).lean();
|
|
758
|
+
for (const doc of existingDocs) {
|
|
759
|
+
if (doc.data?.item?.id) existingItemIds.add(doc.data.item.id);
|
|
760
|
+
}
|
|
761
|
+
if (existingItemIds.size > 0) {
|
|
762
|
+
logger.info(`Skipping ${existingItemIds.size} existing item(s): ${[...existingItemIds].join(', ')}`);
|
|
763
|
+
}
|
|
764
|
+
}
|
|
765
|
+
|
|
185
766
|
for (const argItemType of argItemTypes) {
|
|
186
767
|
await pngDirectoryIteratorByObjectLayerType(
|
|
187
768
|
argItemType,
|
|
@@ -192,6 +773,9 @@ try {
|
|
|
192
773
|
)
|
|
193
774
|
return;
|
|
194
775
|
|
|
776
|
+
// Skip items that already exist in the database (bulk import only)
|
|
777
|
+
if (isImportAll && existingItemIds.has(objectLayerId)) return;
|
|
778
|
+
|
|
195
779
|
console.log(framePath, { objectLayerType, objectLayerId, direction, frame });
|
|
196
780
|
|
|
197
781
|
// On first encounter of an objectLayerId, build its data from the asset directory
|
|
@@ -204,6 +788,18 @@ try {
|
|
|
204
788
|
objectLayerId,
|
|
205
789
|
});
|
|
206
790
|
|
|
791
|
+
// Write processed frames back to disk so WebP matches atlas
|
|
792
|
+
const srcBasePath = './src/client/public/cyberia/';
|
|
793
|
+
const publicBasePath = `./public/${host}${path}`;
|
|
794
|
+
await ObjectLayerEngine.writeStaticFrameAssets({
|
|
795
|
+
basePaths: [srcBasePath, publicBasePath],
|
|
796
|
+
itemType: objectLayerType,
|
|
797
|
+
itemId: objectLayerId,
|
|
798
|
+
objectLayerRenderFramesData,
|
|
799
|
+
objectLayerData,
|
|
800
|
+
cellPixelDim: 20,
|
|
801
|
+
});
|
|
802
|
+
|
|
207
803
|
objectLayers[objectLayerId] = {
|
|
208
804
|
...objectLayerData,
|
|
209
805
|
objectLayerRenderFramesData,
|
|
@@ -222,116 +818,375 @@ try {
|
|
|
222
818
|
const shouldGenerateAtlas = !isImportAll;
|
|
223
819
|
|
|
224
820
|
if (shouldGenerateAtlas) {
|
|
225
|
-
//
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
}
|
|
236
|
-
|
|
821
|
+
// Check if an ObjectLayer with the same item.id already exists (upsert by item ID)
|
|
822
|
+
const existingOL = await ObjectLayer.findOne({ 'data.item.id': objectLayerId });
|
|
823
|
+
let objectLayer;
|
|
824
|
+
|
|
825
|
+
if (existingOL) {
|
|
826
|
+
// ── Cut-over consistency: stage everything in memory before touching the live document ──
|
|
827
|
+
logger.info(`ObjectLayer '${objectLayerId}' already exists (${existingOL._id}), staging update...`);
|
|
828
|
+
|
|
829
|
+
// 1. Prepare staging data entirely in memory (no DB writes yet)
|
|
830
|
+
const stagingData = JSON.parse(JSON.stringify(entry.data));
|
|
831
|
+
if (!stagingData.render) stagingData.render = {};
|
|
832
|
+
stagingData.render.cid = '';
|
|
833
|
+
stagingData.render.metadataCid = '';
|
|
834
|
+
|
|
835
|
+
// 2. Generate atlas, pin to IPFS, compute SHA-256 — all in memory
|
|
836
|
+
let cutoverReady = false;
|
|
837
|
+
let stagingFileDoc = null;
|
|
838
|
+
let stagingAtlasDoc = null;
|
|
839
|
+
let stagingCid = '';
|
|
840
|
+
let stagingSha256 = '';
|
|
841
|
+
try {
|
|
842
|
+
const itemKey = objectLayerId;
|
|
237
843
|
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
844
|
+
// Generate atlas from in-memory render frames data (plain object, no DB doc needed)
|
|
845
|
+
const { buffer, metadata } = await AtlasSpriteSheetGenerator.generateAtlas(
|
|
846
|
+
entry.objectLayerRenderFramesData,
|
|
847
|
+
itemKey,
|
|
848
|
+
20,
|
|
849
|
+
);
|
|
244
850
|
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
851
|
+
stagingFileDoc = await new File({
|
|
852
|
+
name: `${itemKey}-atlas.png`,
|
|
853
|
+
data: buffer,
|
|
854
|
+
size: buffer.length,
|
|
855
|
+
mimetype: 'image/png',
|
|
856
|
+
md5: crypto.createHash('md5').update(buffer).digest('hex'),
|
|
857
|
+
}).save();
|
|
250
858
|
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
859
|
+
let importAtlasCid = '';
|
|
860
|
+
let importAtlasMetadataCid = '';
|
|
861
|
+
try {
|
|
862
|
+
const ipfsResult = await IpfsClient.addBufferToIpfs(
|
|
863
|
+
buffer,
|
|
864
|
+
`${itemKey}_atlas_sprite_sheet.png`,
|
|
865
|
+
`/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
866
|
+
);
|
|
867
|
+
if (ipfsResult) {
|
|
868
|
+
importAtlasCid = ipfsResult.cid;
|
|
869
|
+
logger.info(`[staging] Atlas pinned to IPFS – CID: ${importAtlasCid}`);
|
|
870
|
+
try {
|
|
871
|
+
await createPinRecord({
|
|
872
|
+
cid: importAtlasCid,
|
|
873
|
+
resourceType: 'atlas-sprite-sheet',
|
|
874
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
875
|
+
options: { host, path },
|
|
876
|
+
});
|
|
877
|
+
} catch (prErr) {
|
|
878
|
+
logger.warn('[staging] Failed to create atlas pin record:', prErr.message);
|
|
879
|
+
}
|
|
880
|
+
}
|
|
881
|
+
} catch (ipfsError) {
|
|
882
|
+
logger.warn('[staging] Failed to add atlas to IPFS:', ipfsError.message);
|
|
883
|
+
}
|
|
258
884
|
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
885
|
+
try {
|
|
886
|
+
const metadataIpfsResult = await IpfsClient.addJsonToIpfs(
|
|
887
|
+
metadata,
|
|
888
|
+
`${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
889
|
+
`/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
890
|
+
);
|
|
891
|
+
if (metadataIpfsResult) {
|
|
892
|
+
importAtlasMetadataCid = metadataIpfsResult.cid;
|
|
893
|
+
logger.info(`[staging] Atlas metadata pinned to IPFS – CID: ${importAtlasMetadataCid}`);
|
|
894
|
+
try {
|
|
895
|
+
await createPinRecord({
|
|
896
|
+
cid: importAtlasMetadataCid,
|
|
897
|
+
resourceType: 'atlas-metadata',
|
|
898
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
899
|
+
options: { host, path },
|
|
900
|
+
});
|
|
901
|
+
} catch (prErr) {
|
|
902
|
+
logger.warn('[staging] Failed to create atlas-metadata pin record:', prErr.message);
|
|
903
|
+
}
|
|
904
|
+
}
|
|
905
|
+
} catch (ipfsError) {
|
|
906
|
+
logger.warn('[staging] Failed to add atlas metadata to IPFS:', ipfsError.message);
|
|
271
907
|
}
|
|
272
|
-
|
|
273
|
-
|
|
908
|
+
|
|
909
|
+
stagingAtlasDoc = await AtlasSpriteSheet.findOne({ 'metadata.itemKey': itemKey });
|
|
910
|
+
if (stagingAtlasDoc) {
|
|
911
|
+
if (stagingAtlasDoc.fileId) await File.findByIdAndDelete(stagingAtlasDoc.fileId);
|
|
912
|
+
stagingAtlasDoc.fileId = stagingFileDoc._id;
|
|
913
|
+
stagingAtlasDoc.cid = importAtlasCid;
|
|
914
|
+
stagingAtlasDoc.metadata = metadata;
|
|
915
|
+
await stagingAtlasDoc.save();
|
|
916
|
+
} else {
|
|
917
|
+
stagingAtlasDoc = await new AtlasSpriteSheet({
|
|
918
|
+
fileId: stagingFileDoc._id,
|
|
919
|
+
cid: importAtlasCid,
|
|
920
|
+
metadata,
|
|
921
|
+
}).save();
|
|
922
|
+
}
|
|
923
|
+
|
|
924
|
+
// Finalize staging data in memory with render CIDs
|
|
925
|
+
stagingData.render.cid = importAtlasCid;
|
|
926
|
+
stagingData.render.metadataCid = importAtlasMetadataCid;
|
|
927
|
+
|
|
928
|
+
// Pin data JSON to IPFS (compute final SHA-256 in memory)
|
|
929
|
+
stagingSha256 = ObjectLayerEngine.computeSha256(stagingData);
|
|
930
|
+
try {
|
|
931
|
+
const ipfsDataResult = await IpfsClient.addJsonToIpfs(
|
|
932
|
+
stagingData,
|
|
933
|
+
`${itemKey}_data.json`,
|
|
934
|
+
`/object-layer/${itemKey}/${itemKey}_data.json`,
|
|
935
|
+
);
|
|
936
|
+
if (ipfsDataResult) {
|
|
937
|
+
stagingCid = ipfsDataResult.cid;
|
|
938
|
+
logger.info(`[staging] Data JSON pinned to IPFS – CID: ${stagingCid}`);
|
|
939
|
+
try {
|
|
940
|
+
await createPinRecord({
|
|
941
|
+
cid: stagingCid,
|
|
942
|
+
resourceType: 'object-layer-data',
|
|
943
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_data.json`,
|
|
944
|
+
options: { host, path },
|
|
945
|
+
});
|
|
946
|
+
} catch (prErr) {
|
|
947
|
+
logger.warn('[staging] Failed to create data pin record:', prErr.message);
|
|
948
|
+
}
|
|
949
|
+
}
|
|
950
|
+
} catch (ipfsError) {
|
|
951
|
+
logger.warn('[staging] Failed to pin data JSON to IPFS:', ipfsError.message);
|
|
952
|
+
}
|
|
953
|
+
|
|
954
|
+
cutoverReady = true;
|
|
955
|
+
logger.info(`[staging] Item '${itemKey}' fully staged in memory, ready for cut-over`);
|
|
956
|
+
} catch (atlasError) {
|
|
957
|
+
logger.error(`[staging] Failed for ${objectLayerId}, live document untouched:`, atlasError);
|
|
274
958
|
}
|
|
275
959
|
|
|
276
|
-
//
|
|
277
|
-
|
|
278
|
-
const
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
960
|
+
// 3. Atomic cut-over: create new RenderFrames, swap live ObjectLayer in a single update
|
|
961
|
+
if (cutoverReady) {
|
|
962
|
+
const oldRenderFramesId = existingOL.objectLayerRenderFramesId;
|
|
963
|
+
const newRenderFrames = await ObjectLayerRenderFrames.create(entry.objectLayerRenderFramesData);
|
|
964
|
+
|
|
965
|
+
await ObjectLayer.findByIdAndUpdate(existingOL._id, {
|
|
966
|
+
data: stagingData,
|
|
967
|
+
sha256: stagingSha256,
|
|
968
|
+
cid: stagingCid,
|
|
969
|
+
objectLayerRenderFramesId: newRenderFrames._id,
|
|
970
|
+
atlasSpriteSheetId: stagingAtlasDoc._id,
|
|
971
|
+
});
|
|
972
|
+
|
|
973
|
+
if (oldRenderFramesId) {
|
|
974
|
+
await ObjectLayerRenderFrames.findByIdAndDelete(oldRenderFramesId);
|
|
286
975
|
}
|
|
287
|
-
|
|
288
|
-
|
|
976
|
+
logger.info(`[cut-over] Live document ${existingOL._id} updated atomically`);
|
|
977
|
+
} else {
|
|
978
|
+
if (stagingFileDoc) await File.findByIdAndDelete(stagingFileDoc._id);
|
|
979
|
+
logger.warn(`[cut-over] Staging rolled back for ${objectLayerId}, live document preserved`);
|
|
289
980
|
}
|
|
290
981
|
|
|
291
|
-
|
|
982
|
+
objectLayer = await ObjectLayer.findById(existingOL._id);
|
|
983
|
+
} else {
|
|
984
|
+
// ── New item: stage everything before creating (same cut-over pattern) ──
|
|
985
|
+
logger.info(`ObjectLayer '${objectLayerId}' is new, staging creation...`);
|
|
986
|
+
|
|
987
|
+
const itemKey = objectLayerId;
|
|
988
|
+
const stagingData = JSON.parse(JSON.stringify(entry.data));
|
|
989
|
+
if (!stagingData.render) stagingData.render = {};
|
|
990
|
+
stagingData.render.cid = '';
|
|
991
|
+
stagingData.render.metadataCid = '';
|
|
992
|
+
|
|
993
|
+
let cutoverReady = false;
|
|
994
|
+
let stagingFileDoc = null;
|
|
995
|
+
let stagingAtlasDoc = null;
|
|
996
|
+
let stagingCid = '';
|
|
997
|
+
let stagingSha256 = '';
|
|
998
|
+
try {
|
|
999
|
+
const { buffer, metadata } = await AtlasSpriteSheetGenerator.generateAtlas(
|
|
1000
|
+
entry.objectLayerRenderFramesData,
|
|
1001
|
+
itemKey,
|
|
1002
|
+
20,
|
|
1003
|
+
);
|
|
292
1004
|
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
} else {
|
|
300
|
-
atlasDoc = await new AtlasSpriteSheet({
|
|
301
|
-
fileId: fileDoc._id,
|
|
302
|
-
cid: importAtlasCid,
|
|
303
|
-
metadata,
|
|
1005
|
+
stagingFileDoc = await new File({
|
|
1006
|
+
name: `${itemKey}-atlas.png`,
|
|
1007
|
+
data: buffer,
|
|
1008
|
+
size: buffer.length,
|
|
1009
|
+
mimetype: 'image/png',
|
|
1010
|
+
md5: crypto.createHash('md5').update(buffer).digest('hex'),
|
|
304
1011
|
}).save();
|
|
305
|
-
logger.info(`Created new AtlasSpriteSheet document: ${atlasDoc._id}`);
|
|
306
|
-
}
|
|
307
1012
|
|
|
308
|
-
|
|
309
|
-
|
|
310
|
-
|
|
311
|
-
|
|
312
|
-
|
|
313
|
-
|
|
1013
|
+
let importAtlasCid = '';
|
|
1014
|
+
let importAtlasMetadataCid = '';
|
|
1015
|
+
try {
|
|
1016
|
+
const ipfsResult = await IpfsClient.addBufferToIpfs(
|
|
1017
|
+
buffer,
|
|
1018
|
+
`${itemKey}_atlas_sprite_sheet.png`,
|
|
1019
|
+
`/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
1020
|
+
);
|
|
1021
|
+
if (ipfsResult) {
|
|
1022
|
+
importAtlasCid = ipfsResult.cid;
|
|
1023
|
+
logger.info(`[staging] Atlas pinned to IPFS – CID: ${importAtlasCid}`);
|
|
1024
|
+
try {
|
|
1025
|
+
await createPinRecord({
|
|
1026
|
+
cid: importAtlasCid,
|
|
1027
|
+
resourceType: 'atlas-sprite-sheet',
|
|
1028
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
1029
|
+
options: { host, path },
|
|
1030
|
+
});
|
|
1031
|
+
} catch (prErr) {
|
|
1032
|
+
logger.warn('[staging] Failed to create atlas pin record:', prErr.message);
|
|
1033
|
+
}
|
|
1034
|
+
}
|
|
1035
|
+
} catch (ipfsError) {
|
|
1036
|
+
logger.warn('[staging] Failed to add atlas to IPFS:', ipfsError.message);
|
|
1037
|
+
}
|
|
314
1038
|
|
|
315
|
-
|
|
316
|
-
|
|
317
|
-
|
|
1039
|
+
try {
|
|
1040
|
+
const metadataIpfsResult = await IpfsClient.addJsonToIpfs(
|
|
1041
|
+
metadata,
|
|
1042
|
+
`${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
1043
|
+
`/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
1044
|
+
);
|
|
1045
|
+
if (metadataIpfsResult) {
|
|
1046
|
+
importAtlasMetadataCid = metadataIpfsResult.cid;
|
|
1047
|
+
logger.info(`[staging] Atlas metadata pinned to IPFS – CID: ${importAtlasMetadataCid}`);
|
|
1048
|
+
try {
|
|
1049
|
+
await createPinRecord({
|
|
1050
|
+
cid: importAtlasMetadataCid,
|
|
1051
|
+
resourceType: 'atlas-metadata',
|
|
1052
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
1053
|
+
options: { host, path },
|
|
1054
|
+
});
|
|
1055
|
+
} catch (prErr) {
|
|
1056
|
+
logger.warn('[staging] Failed to create atlas-metadata pin record:', prErr.message);
|
|
1057
|
+
}
|
|
1058
|
+
}
|
|
1059
|
+
} catch (ipfsError) {
|
|
1060
|
+
logger.warn('[staging] Failed to add atlas metadata to IPFS:', ipfsError.message);
|
|
1061
|
+
}
|
|
1062
|
+
|
|
1063
|
+
stagingAtlasDoc = await AtlasSpriteSheet.findOne({ 'metadata.itemKey': itemKey });
|
|
1064
|
+
if (stagingAtlasDoc) {
|
|
1065
|
+
if (stagingAtlasDoc.fileId) await File.findByIdAndDelete(stagingAtlasDoc.fileId);
|
|
1066
|
+
stagingAtlasDoc.fileId = stagingFileDoc._id;
|
|
1067
|
+
stagingAtlasDoc.cid = importAtlasCid;
|
|
1068
|
+
stagingAtlasDoc.metadata = metadata;
|
|
1069
|
+
await stagingAtlasDoc.save();
|
|
1070
|
+
} else {
|
|
1071
|
+
stagingAtlasDoc = await new AtlasSpriteSheet({
|
|
1072
|
+
fileId: stagingFileDoc._id,
|
|
1073
|
+
cid: importAtlasCid,
|
|
1074
|
+
metadata,
|
|
1075
|
+
}).save();
|
|
1076
|
+
}
|
|
1077
|
+
|
|
1078
|
+
stagingData.render.cid = importAtlasCid;
|
|
1079
|
+
stagingData.render.metadataCid = importAtlasMetadataCid;
|
|
1080
|
+
|
|
1081
|
+
stagingSha256 = ObjectLayerEngine.computeSha256(stagingData);
|
|
1082
|
+
try {
|
|
1083
|
+
const ipfsDataResult = await IpfsClient.addJsonToIpfs(
|
|
1084
|
+
stagingData,
|
|
1085
|
+
`${itemKey}_data.json`,
|
|
1086
|
+
`/object-layer/${itemKey}/${itemKey}_data.json`,
|
|
1087
|
+
);
|
|
1088
|
+
if (ipfsDataResult) {
|
|
1089
|
+
stagingCid = ipfsDataResult.cid;
|
|
1090
|
+
logger.info(`[staging] Data JSON pinned to IPFS – CID: ${stagingCid}`);
|
|
1091
|
+
try {
|
|
1092
|
+
await createPinRecord({
|
|
1093
|
+
cid: stagingCid,
|
|
1094
|
+
resourceType: 'object-layer-data',
|
|
1095
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_data.json`,
|
|
1096
|
+
options: { host, path },
|
|
1097
|
+
});
|
|
1098
|
+
} catch (prErr) {
|
|
1099
|
+
logger.warn('[staging] Failed to create data pin record:', prErr.message);
|
|
1100
|
+
}
|
|
1101
|
+
}
|
|
1102
|
+
} catch (ipfsError) {
|
|
1103
|
+
logger.warn('[staging] Failed to pin data JSON to IPFS:', ipfsError.message);
|
|
1104
|
+
}
|
|
1105
|
+
|
|
1106
|
+
cutoverReady = true;
|
|
1107
|
+
logger.info(`[staging] Item '${itemKey}' fully staged in memory, ready for creation`);
|
|
1108
|
+
} catch (atlasError) {
|
|
1109
|
+
logger.error(`[staging] Failed for ${objectLayerId}, no document created:`, atlasError);
|
|
1110
|
+
}
|
|
1111
|
+
|
|
1112
|
+
if (cutoverReady) {
|
|
1113
|
+
const newRenderFrames = await ObjectLayerRenderFrames.create(entry.objectLayerRenderFramesData);
|
|
1114
|
+
objectLayer = await ObjectLayer.create({
|
|
1115
|
+
data: stagingData,
|
|
1116
|
+
sha256: stagingSha256,
|
|
1117
|
+
cid: stagingCid,
|
|
1118
|
+
objectLayerRenderFramesId: newRenderFrames._id,
|
|
1119
|
+
atlasSpriteSheetId: stagingAtlasDoc._id,
|
|
1120
|
+
});
|
|
1121
|
+
logger.info(`[cut-over] New ObjectLayer ${objectLayer._id} created with all CIDs populated`);
|
|
1122
|
+
} else {
|
|
1123
|
+
if (stagingFileDoc) await File.findByIdAndDelete(stagingFileDoc._id);
|
|
1124
|
+
logger.warn(`[cut-over] Staging failed for ${objectLayerId}, no ObjectLayer created`);
|
|
1125
|
+
continue;
|
|
1126
|
+
}
|
|
318
1127
|
}
|
|
319
1128
|
|
|
320
|
-
|
|
1129
|
+
// Reload final state to include CID and render updates
|
|
1130
|
+
const finalObjectLayer = await ObjectLayer.findById((objectLayer._id || objectLayer).toString()).populate(
|
|
1131
|
+
'objectLayerRenderFramesId',
|
|
1132
|
+
);
|
|
1133
|
+
console.log(finalObjectLayer.toObject());
|
|
321
1134
|
} else {
|
|
1135
|
+
// --import all: skip items that already exist in the database
|
|
1136
|
+
if (existingItemIds.has(objectLayerId)) continue;
|
|
1137
|
+
|
|
322
1138
|
// --import all: create documents without atlas generation
|
|
323
|
-
const
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
1139
|
+
const existingOL = await ObjectLayer.findOne({ 'data.item.id': objectLayerId });
|
|
1140
|
+
let objectLayer;
|
|
1141
|
+
|
|
1142
|
+
if (existingOL) {
|
|
1143
|
+
logger.info(
|
|
1144
|
+
`ObjectLayer '${objectLayerId}' already exists (${existingOL._id}), staging update (atlas skipped)...`,
|
|
1145
|
+
);
|
|
1146
|
+
|
|
1147
|
+
// ── In-memory staging (no atlas) ──────────────────────
|
|
1148
|
+
const stagingData = JSON.parse(JSON.stringify(entry.data));
|
|
1149
|
+
if (!stagingData.render) stagingData.render = {};
|
|
1150
|
+
stagingData.render.cid = '';
|
|
1151
|
+
stagingData.render.metadataCid = '';
|
|
1152
|
+
const stagingSha256 = ObjectLayerEngine.computeSha256(stagingData);
|
|
1153
|
+
|
|
1154
|
+
// Atomic cut-over: create new RenderFrames, swap live doc, delete old
|
|
1155
|
+
const newRenderFrames = await ObjectLayerRenderFrames.create(entry.objectLayerRenderFramesData);
|
|
1156
|
+
const oldRenderFramesId = existingOL.objectLayerRenderFramesId;
|
|
1157
|
+
|
|
1158
|
+
await ObjectLayer.findByIdAndUpdate(existingOL._id, {
|
|
1159
|
+
data: stagingData,
|
|
1160
|
+
sha256: stagingSha256,
|
|
1161
|
+
objectLayerRenderFramesId: newRenderFrames._id,
|
|
1162
|
+
});
|
|
1163
|
+
|
|
1164
|
+
if (oldRenderFramesId) {
|
|
1165
|
+
await ObjectLayerRenderFrames.findByIdAndDelete(oldRenderFramesId);
|
|
1166
|
+
}
|
|
332
1167
|
|
|
333
|
-
|
|
334
|
-
|
|
1168
|
+
objectLayer = await ObjectLayer.findById(existingOL._id);
|
|
1169
|
+
logger.info(`[cut-over] Live document ${existingOL._id} updated atomically (atlas skipped)`);
|
|
1170
|
+
} else {
|
|
1171
|
+
// New item: create with sha256 populated (no atlas for bulk import)
|
|
1172
|
+
const stagingData = JSON.parse(JSON.stringify(entry.data));
|
|
1173
|
+
if (!stagingData.render) stagingData.render = {};
|
|
1174
|
+
stagingData.render.cid = '';
|
|
1175
|
+
stagingData.render.metadataCid = '';
|
|
1176
|
+
const stagingSha256 = ObjectLayerEngine.computeSha256(stagingData);
|
|
1177
|
+
|
|
1178
|
+
const newRenderFrames = await ObjectLayerRenderFrames.create(entry.objectLayerRenderFramesData);
|
|
1179
|
+
objectLayer = await ObjectLayer.create({
|
|
1180
|
+
data: stagingData,
|
|
1181
|
+
sha256: stagingSha256,
|
|
1182
|
+
objectLayerRenderFramesId: newRenderFrames._id,
|
|
1183
|
+
});
|
|
1184
|
+
}
|
|
1185
|
+
|
|
1186
|
+
logger.info(
|
|
1187
|
+
`ObjectLayer ${existingOL ? 'updated' : 'created'} (atlas skipped for bulk import): ${objectLayerId}`,
|
|
1188
|
+
);
|
|
1189
|
+
console.log(objectLayer.toObject ? objectLayer.toObject() : objectLayer);
|
|
335
1190
|
}
|
|
336
1191
|
}
|
|
337
1192
|
}
|
|
@@ -384,7 +1239,9 @@ try {
|
|
|
384
1239
|
|
|
385
1240
|
if (frameIndexNum >= frames.length) {
|
|
386
1241
|
logger.error(
|
|
387
|
-
`Frame index ${frameIndexNum} out of range. Available frames: 0-${
|
|
1242
|
+
`Frame index ${frameIndexNum} out of range. Available frames: 0-${
|
|
1243
|
+
frames.length - 1
|
|
1244
|
+
} for direction ${objectLayerFrameDirection}`,
|
|
388
1245
|
);
|
|
389
1246
|
process.exit(1);
|
|
390
1247
|
}
|
|
@@ -486,6 +1343,15 @@ try {
|
|
|
486
1343
|
if (ipfsResult) {
|
|
487
1344
|
toAtlasCid = ipfsResult.cid;
|
|
488
1345
|
logger.info(`Atlas sprite sheet pinned to IPFS – CID: ${toAtlasCid}`);
|
|
1346
|
+
try {
|
|
1347
|
+
await createPinRecord({
|
|
1348
|
+
cid: toAtlasCid,
|
|
1349
|
+
resourceType: 'atlas-sprite-sheet',
|
|
1350
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet.png`,
|
|
1351
|
+
});
|
|
1352
|
+
} catch (e) {
|
|
1353
|
+
logger.warn('Failed to create pin record for atlas sprite sheet:', e.message);
|
|
1354
|
+
}
|
|
489
1355
|
}
|
|
490
1356
|
} catch (ipfsError) {
|
|
491
1357
|
logger.warn('Failed to add atlas sprite sheet to IPFS:', ipfsError.message);
|
|
@@ -501,6 +1367,15 @@ try {
|
|
|
501
1367
|
if (metadataIpfsResult) {
|
|
502
1368
|
toAtlasMetadataCid = metadataIpfsResult.cid;
|
|
503
1369
|
logger.info(`Atlas metadata pinned to IPFS – CID: ${toAtlasMetadataCid}`);
|
|
1370
|
+
try {
|
|
1371
|
+
await createPinRecord({
|
|
1372
|
+
cid: toAtlasMetadataCid,
|
|
1373
|
+
resourceType: 'atlas-metadata',
|
|
1374
|
+
mfsPath: `/object-layer/${itemKey}/${itemKey}_atlas_sprite_sheet_metadata.json`,
|
|
1375
|
+
});
|
|
1376
|
+
} catch (e) {
|
|
1377
|
+
logger.warn('Failed to create pin record for atlas metadata:', e.message);
|
|
1378
|
+
}
|
|
504
1379
|
}
|
|
505
1380
|
} catch (ipfsError) {
|
|
506
1381
|
logger.warn('Failed to add atlas metadata to IPFS:', ipfsError.message);
|
|
@@ -510,7 +1385,8 @@ try {
|
|
|
510
1385
|
let atlasDoc = await AtlasSpriteSheet.findOne({ 'metadata.itemKey': itemKey });
|
|
511
1386
|
|
|
512
1387
|
if (atlasDoc) {
|
|
513
|
-
// Update existing
|
|
1388
|
+
// Update existing – remove old File to prevent orphans
|
|
1389
|
+
if (atlasDoc.fileId) await File.findByIdAndDelete(atlasDoc.fileId);
|
|
514
1390
|
atlasDoc.fileId = fileDoc._id;
|
|
515
1391
|
atlasDoc.cid = toAtlasCid;
|
|
516
1392
|
atlasDoc.metadata = metadata;
|
|
@@ -534,6 +1410,13 @@ try {
|
|
|
534
1410
|
objectLayer.markModified('data.render');
|
|
535
1411
|
await objectLayer.save();
|
|
536
1412
|
|
|
1413
|
+
// Compute final SHA-256 and pin object layer data JSON to IPFS
|
|
1414
|
+
await ObjectLayerEngine.computeAndSaveFinalSha256({
|
|
1415
|
+
objectLayer,
|
|
1416
|
+
ipfsClient: IpfsClient,
|
|
1417
|
+
createPinRecord,
|
|
1418
|
+
});
|
|
1419
|
+
|
|
537
1420
|
logger.info(`Atlas sprite sheet completed for item: ${itemKey}`);
|
|
538
1421
|
}
|
|
539
1422
|
|
|
@@ -704,6 +1587,16 @@ try {
|
|
|
704
1587
|
if (ipfsResult) {
|
|
705
1588
|
atlasCid = ipfsResult.cid;
|
|
706
1589
|
logger.info(`Atlas sprite sheet pinned to IPFS – CID: ${atlasCid}`);
|
|
1590
|
+
try {
|
|
1591
|
+
await createPinRecord({
|
|
1592
|
+
cid: atlasCid,
|
|
1593
|
+
resourceType: 'atlas-sprite-sheet',
|
|
1594
|
+
mfsPath: `/object-layer/${atlasItemKey}/${atlasItemKey}_atlas_sprite_sheet.png`,
|
|
1595
|
+
options: { host, path },
|
|
1596
|
+
});
|
|
1597
|
+
} catch (e) {
|
|
1598
|
+
logger.warn('Failed to create pin record for atlas sprite sheet:', e.message);
|
|
1599
|
+
}
|
|
707
1600
|
}
|
|
708
1601
|
} catch (ipfsError) {
|
|
709
1602
|
logger.warn('Failed to add atlas sprite sheet to IPFS:', ipfsError.message);
|
|
@@ -719,6 +1612,16 @@ try {
|
|
|
719
1612
|
if (metadataIpfsResult) {
|
|
720
1613
|
atlasMetadataCid = metadataIpfsResult.cid;
|
|
721
1614
|
logger.info(`Atlas metadata pinned to IPFS – CID: ${atlasMetadataCid}`);
|
|
1615
|
+
try {
|
|
1616
|
+
await createPinRecord({
|
|
1617
|
+
cid: atlasMetadataCid,
|
|
1618
|
+
resourceType: 'atlas-metadata',
|
|
1619
|
+
mfsPath: `/object-layer/${atlasItemKey}/${atlasItemKey}_atlas_sprite_sheet_metadata.json`,
|
|
1620
|
+
options: { host, path },
|
|
1621
|
+
});
|
|
1622
|
+
} catch (e) {
|
|
1623
|
+
logger.warn('Failed to create pin record for atlas metadata:', e.message);
|
|
1624
|
+
}
|
|
722
1625
|
}
|
|
723
1626
|
} catch (ipfsError) {
|
|
724
1627
|
logger.warn('Failed to add atlas metadata to IPFS:', ipfsError.message);
|
|
@@ -727,6 +1630,7 @@ try {
|
|
|
727
1630
|
// Upsert AtlasSpriteSheet document (with CID)
|
|
728
1631
|
let atlasDoc = await AtlasSpriteSheet.findOne({ 'metadata.itemKey': atlasItemKey });
|
|
729
1632
|
if (atlasDoc) {
|
|
1633
|
+
if (atlasDoc.fileId) await File.findByIdAndDelete(atlasDoc.fileId);
|
|
730
1634
|
atlasDoc.fileId = fileDoc._id;
|
|
731
1635
|
atlasDoc.cid = atlasCid;
|
|
732
1636
|
atlasDoc.metadata = metadata;
|
|
@@ -770,7 +1674,6 @@ try {
|
|
|
770
1674
|
objectLayer: finalObjectLayer,
|
|
771
1675
|
ipfsClient: IpfsClient,
|
|
772
1676
|
createPinRecord,
|
|
773
|
-
userId: undefined, // CLI context has no authenticated user
|
|
774
1677
|
options: { host, path },
|
|
775
1678
|
});
|
|
776
1679
|
logger.info(`Final SHA-256: ${finalized.sha256}`);
|
|
@@ -797,7 +1700,1638 @@ try {
|
|
|
797
1700
|
)
|
|
798
1701
|
.description('Object layer management');
|
|
799
1702
|
|
|
800
|
-
|
|
1703
|
+
// ── instance: Cyberia instance backup / restore ─────────────────────────
|
|
1704
|
+
program
|
|
1705
|
+
.command('instance [instance-code]')
|
|
1706
|
+
.option('--export [path]', 'Export instance and related documents to a backup directory')
|
|
1707
|
+
.option('--import [path]', 'Import instance and related documents from a backup directory (preserveUUID, upsert)')
|
|
1708
|
+
.option('--drop', 'Drop existing instance, maps and object layers before importing')
|
|
1709
|
+
.option('--env-path <env-path>', 'Env path e.g. ./engine-private/conf/dd-cyberia/.env.development')
|
|
1710
|
+
.option('--mongo-host <mongo-host>', 'Mongo host override')
|
|
1711
|
+
.option('--dev', 'Force development environment')
|
|
1712
|
+
.description('Export/import a Cyberia instance with all related maps, entities and object layers')
|
|
1713
|
+
.action(async (instanceCode, options = {}) => {
|
|
1714
|
+
if (!instanceCode) {
|
|
1715
|
+
logger.error('instance-code argument is required');
|
|
1716
|
+
process.exit(1);
|
|
1717
|
+
}
|
|
1718
|
+
|
|
1719
|
+
if (!options.envPath) options.envPath = `./.env`;
|
|
1720
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
1721
|
+
|
|
1722
|
+
if (options.dev && process.env.DEFAULT_DEPLOY_ID) {
|
|
1723
|
+
const deployDevEnvPath = `./engine-private/conf/${process.env.DEFAULT_DEPLOY_ID}/.env.development`;
|
|
1724
|
+
if (fs.existsSync(deployDevEnvPath)) {
|
|
1725
|
+
dotenv.config({ path: deployDevEnvPath, override: true });
|
|
1726
|
+
}
|
|
1727
|
+
}
|
|
1728
|
+
|
|
1729
|
+
const deployId = process.env.DEFAULT_DEPLOY_ID;
|
|
1730
|
+
const host = process.env.DEFAULT_DEPLOY_HOST;
|
|
1731
|
+
const path = process.env.DEFAULT_DEPLOY_PATH;
|
|
1732
|
+
|
|
1733
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
1734
|
+
if (!fs.existsSync(confServerPath)) {
|
|
1735
|
+
logger.error(`Server config not found: ${confServerPath}`);
|
|
1736
|
+
process.exit(1);
|
|
1737
|
+
}
|
|
1738
|
+
const confServer = loadConfServerJson(confServerPath, { resolve: true });
|
|
1739
|
+
const { db } = confServer[host][path];
|
|
1740
|
+
|
|
1741
|
+
db.host = options.mongoHost
|
|
1742
|
+
? options.mongoHost
|
|
1743
|
+
: options.dev
|
|
1744
|
+
? db.host
|
|
1745
|
+
: db.host.replace('127.0.0.1', 'mongodb-0.mongodb-service');
|
|
1746
|
+
|
|
1747
|
+
logger.info('instance env', { env: options.envPath, deployId, host, path, db });
|
|
1748
|
+
|
|
1749
|
+
await DataBaseProvider.load({
|
|
1750
|
+
apis: [
|
|
1751
|
+
'cyberia-instance',
|
|
1752
|
+
'cyberia-map',
|
|
1753
|
+
'cyberia-entity',
|
|
1754
|
+
'object-layer',
|
|
1755
|
+
'object-layer-render-frames',
|
|
1756
|
+
'atlas-sprite-sheet',
|
|
1757
|
+
'file',
|
|
1758
|
+
'ipfs',
|
|
1759
|
+
],
|
|
1760
|
+
host,
|
|
1761
|
+
path,
|
|
1762
|
+
db,
|
|
1763
|
+
});
|
|
1764
|
+
|
|
1765
|
+
const dbModels = DataBaseProvider.instance[`${host}${path}`].mongoose.models;
|
|
1766
|
+
const CyberiaInstance = dbModels.CyberiaInstance;
|
|
1767
|
+
const CyberiaMap = dbModels.CyberiaMap;
|
|
1768
|
+
const ObjectLayer = dbModels.ObjectLayer;
|
|
1769
|
+
const ObjectLayerRenderFrames = dbModels.ObjectLayerRenderFrames;
|
|
1770
|
+
const AtlasSpriteSheet = dbModels.AtlasSpriteSheet;
|
|
1771
|
+
const File = dbModels.File;
|
|
1772
|
+
const Ipfs = dbModels.Ipfs;
|
|
1773
|
+
|
|
1774
|
+
// ── EXPORT ──────────────────────────────────────────────────────
|
|
1775
|
+
if (options.export !== undefined) {
|
|
1776
|
+
const instance = await CyberiaInstance.findOne({ code: instanceCode }).lean();
|
|
1777
|
+
if (!instance) {
|
|
1778
|
+
logger.error(`CyberiaInstance with code "${instanceCode}" not found`);
|
|
1779
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
1780
|
+
process.exit(1);
|
|
1781
|
+
}
|
|
1782
|
+
|
|
1783
|
+
const backupDir =
|
|
1784
|
+
typeof options.export === 'string' && options.export
|
|
1785
|
+
? options.export
|
|
1786
|
+
: `./engine-private/cyberia-instances/${instanceCode}`;
|
|
1787
|
+
|
|
1788
|
+
fs.ensureDirSync(backupDir);
|
|
1789
|
+
logger.info('Exporting instance', { code: instanceCode, backupDir });
|
|
1790
|
+
|
|
1791
|
+
fs.ensureDirSync(`${backupDir}/files`);
|
|
1792
|
+
|
|
1793
|
+
// Helper: export a File document to the files/ directory
|
|
1794
|
+
const exportFileDoc = async (fileId, fileKey) => {
|
|
1795
|
+
if (!fileId) return;
|
|
1796
|
+
const file = await File.findById(fileId).lean();
|
|
1797
|
+
if (!file) return;
|
|
1798
|
+
const fileExport = { ...file };
|
|
1799
|
+
// Handle both Node.js Buffer and BSON Binary types from .lean()
|
|
1800
|
+
if (fileExport.data) {
|
|
1801
|
+
const buf = Buffer.isBuffer(fileExport.data)
|
|
1802
|
+
? fileExport.data
|
|
1803
|
+
: Buffer.from(fileExport.data.buffer || fileExport.data);
|
|
1804
|
+
fileExport.data = { $base64: buf.toString('base64') };
|
|
1805
|
+
}
|
|
1806
|
+
fs.writeJsonSync(`${backupDir}/files/${fileKey}.json`, fileExport, { spaces: 2 });
|
|
1807
|
+
};
|
|
1808
|
+
|
|
1809
|
+
// 1. Save instance document + thumbnail
|
|
1810
|
+
fs.writeJsonSync(`${backupDir}/cyberia-instance.json`, instance, { spaces: 2 });
|
|
1811
|
+
if (instance.thumbnail) {
|
|
1812
|
+
await exportFileDoc(instance.thumbnail, `thumb-instance-${instanceCode}`);
|
|
1813
|
+
}
|
|
1814
|
+
logger.info('Exported CyberiaInstance', { code: instanceCode });
|
|
1815
|
+
|
|
1816
|
+
// 2. Collect all map codes (instance maps + portal targets)
|
|
1817
|
+
const mapCodes = new Set(instance.cyberiaMapCodes || []);
|
|
1818
|
+
for (const portal of instance.portals || []) {
|
|
1819
|
+
if (portal.sourceMapCode) mapCodes.add(portal.sourceMapCode);
|
|
1820
|
+
if (portal.targetMapCode) mapCodes.add(portal.targetMapCode);
|
|
1821
|
+
}
|
|
1822
|
+
|
|
1823
|
+
// 3. Export maps + thumbnails
|
|
1824
|
+
const maps = await CyberiaMap.find({ code: { $in: [...mapCodes] } }).lean();
|
|
1825
|
+
fs.ensureDirSync(`${backupDir}/maps`);
|
|
1826
|
+
for (const map of maps) {
|
|
1827
|
+
fs.writeJsonSync(`${backupDir}/maps/${map.code}.json`, map, { spaces: 2 });
|
|
1828
|
+
if (map.thumbnail) {
|
|
1829
|
+
await exportFileDoc(map.thumbnail, `thumb-map-${map.code}`);
|
|
1830
|
+
}
|
|
1831
|
+
}
|
|
1832
|
+
logger.info(`Exported ${maps.length} CyberiaMap document(s)`, { codes: maps.map((m) => m.code) });
|
|
1833
|
+
|
|
1834
|
+
// 4. Collect all objectLayerItemIds from map entities
|
|
1835
|
+
const objectLayerItemIds = new Set();
|
|
1836
|
+
for (const map of maps) {
|
|
1837
|
+
for (const entity of map.entities || []) {
|
|
1838
|
+
for (const itemId of entity.objectLayerItemIds || []) {
|
|
1839
|
+
objectLayerItemIds.add(itemId);
|
|
1840
|
+
}
|
|
1841
|
+
}
|
|
1842
|
+
}
|
|
1843
|
+
|
|
1844
|
+
// 5. Export object layers with related render frames, atlas, files, and IPFS records
|
|
1845
|
+
if (objectLayerItemIds.size > 0) {
|
|
1846
|
+
const objectLayers = await ObjectLayer.find({
|
|
1847
|
+
'data.item.id': { $in: [...objectLayerItemIds] },
|
|
1848
|
+
}).lean();
|
|
1849
|
+
|
|
1850
|
+
fs.ensureDirSync(`${backupDir}/object-layers`);
|
|
1851
|
+
fs.ensureDirSync(`${backupDir}/render-frames`);
|
|
1852
|
+
fs.ensureDirSync(`${backupDir}/atlas-sprite-sheets`);
|
|
1853
|
+
fs.ensureDirSync(`${backupDir}/ipfs`);
|
|
1854
|
+
|
|
1855
|
+
const allCids = new Set();
|
|
1856
|
+
|
|
1857
|
+
for (const ol of objectLayers) {
|
|
1858
|
+
const fileName = ol.data?.item?.id || ol._id.toString();
|
|
1859
|
+
fs.writeJsonSync(`${backupDir}/object-layers/${fileName}.json`, ol, { spaces: 2 });
|
|
1860
|
+
|
|
1861
|
+
// Export ObjectLayerRenderFrames
|
|
1862
|
+
if (ol.objectLayerRenderFramesId) {
|
|
1863
|
+
const rf = await ObjectLayerRenderFrames.findById(ol.objectLayerRenderFramesId).lean();
|
|
1864
|
+
if (rf) {
|
|
1865
|
+
fs.writeJsonSync(`${backupDir}/render-frames/${fileName}.json`, rf, { spaces: 2 });
|
|
1866
|
+
}
|
|
1867
|
+
}
|
|
1868
|
+
|
|
1869
|
+
// Export AtlasSpriteSheet + its File
|
|
1870
|
+
if (ol.atlasSpriteSheetId) {
|
|
1871
|
+
const atlas = await AtlasSpriteSheet.findById(ol.atlasSpriteSheetId).lean();
|
|
1872
|
+
if (atlas) {
|
|
1873
|
+
fs.writeJsonSync(`${backupDir}/atlas-sprite-sheets/${fileName}.json`, atlas, { spaces: 2 });
|
|
1874
|
+
if (atlas.fileId) {
|
|
1875
|
+
await exportFileDoc(atlas.fileId, `atlas-${fileName}`);
|
|
1876
|
+
}
|
|
1877
|
+
if (atlas.cid) allCids.add(atlas.cid);
|
|
1878
|
+
}
|
|
1879
|
+
}
|
|
1880
|
+
|
|
1881
|
+
// Collect CIDs for IPFS pin records
|
|
1882
|
+
if (ol.cid) allCids.add(ol.cid);
|
|
1883
|
+
if (ol.data?.render?.cid) allCids.add(ol.data.render.cid);
|
|
1884
|
+
if (ol.data?.render?.metadataCid) allCids.add(ol.data.render.metadataCid);
|
|
1885
|
+
}
|
|
1886
|
+
|
|
1887
|
+
// Export IPFS pin records for all collected CIDs
|
|
1888
|
+
if (allCids.size > 0) {
|
|
1889
|
+
const ipfsDocs = await Ipfs.find({ cid: { $in: [...allCids] } }).lean();
|
|
1890
|
+
if (ipfsDocs.length > 0) {
|
|
1891
|
+
fs.writeJsonSync(`${backupDir}/ipfs/pins.json`, ipfsDocs, { spaces: 2 });
|
|
1892
|
+
logger.info(`Exported ${ipfsDocs.length} Ipfs pin record(s)`);
|
|
1893
|
+
}
|
|
1894
|
+
}
|
|
1895
|
+
|
|
1896
|
+
logger.info(`Exported ${objectLayers.length} ObjectLayer document(s)`, {
|
|
1897
|
+
itemIds: [...objectLayerItemIds],
|
|
1898
|
+
});
|
|
1899
|
+
} else {
|
|
1900
|
+
logger.info('No ObjectLayer references found in map entities');
|
|
1901
|
+
}
|
|
1902
|
+
|
|
1903
|
+
logger.info('Instance export completed', { backupDir });
|
|
1904
|
+
}
|
|
1905
|
+
|
|
1906
|
+
// ── IMPORT ──────────────────────────────────────────────────────
|
|
1907
|
+
if (options.import !== undefined) {
|
|
1908
|
+
const backupDir =
|
|
1909
|
+
typeof options.import === 'string' && options.import
|
|
1910
|
+
? options.import
|
|
1911
|
+
: `./engine-private/cyberia-instances/${instanceCode}`;
|
|
1912
|
+
|
|
1913
|
+
if (!fs.existsSync(backupDir)) {
|
|
1914
|
+
logger.error(`Backup directory not found: ${backupDir}`);
|
|
1915
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
1916
|
+
process.exit(1);
|
|
1917
|
+
}
|
|
1918
|
+
|
|
1919
|
+
logger.info('Importing instance', { code: instanceCode, backupDir });
|
|
1920
|
+
|
|
1921
|
+
// 0. Drop existing documents if --drop is set
|
|
1922
|
+
if (options.drop) {
|
|
1923
|
+
const existingInstance = await CyberiaInstance.findOne({ code: instanceCode }).lean();
|
|
1924
|
+
if (existingInstance) {
|
|
1925
|
+
const dropMapCodes = new Set(existingInstance.cyberiaMapCodes || []);
|
|
1926
|
+
for (const portal of existingInstance.portals || []) {
|
|
1927
|
+
if (portal.sourceMapCode) dropMapCodes.add(portal.sourceMapCode);
|
|
1928
|
+
if (portal.targetMapCode) dropMapCodes.add(portal.targetMapCode);
|
|
1929
|
+
}
|
|
1930
|
+
|
|
1931
|
+
// Collect thumbnail File IDs to drop
|
|
1932
|
+
const thumbFileIds = [];
|
|
1933
|
+
if (existingInstance.thumbnail) thumbFileIds.push(existingInstance.thumbnail);
|
|
1934
|
+
|
|
1935
|
+
// Query other instances/maps for shared thumbnail exclusion
|
|
1936
|
+
const otherInstances = await CyberiaInstance.find({ code: { $ne: instanceCode } }, { thumbnail: 1 }).lean();
|
|
1937
|
+
|
|
1938
|
+
if (dropMapCodes.size > 0) {
|
|
1939
|
+
const dropMaps = await CyberiaMap.find({ code: { $in: [...dropMapCodes] } }).lean();
|
|
1940
|
+
const dropOlItemIds = new Set();
|
|
1941
|
+
for (const map of dropMaps) {
|
|
1942
|
+
if (map.thumbnail) thumbFileIds.push(map.thumbnail);
|
|
1943
|
+
for (const entity of map.entities || []) {
|
|
1944
|
+
for (const itemId of entity.objectLayerItemIds || []) {
|
|
1945
|
+
dropOlItemIds.add(itemId);
|
|
1946
|
+
}
|
|
1947
|
+
}
|
|
1948
|
+
}
|
|
1949
|
+
|
|
1950
|
+
// Exclude OL item IDs referenced by maps outside this instance
|
|
1951
|
+
const otherMaps = await CyberiaMap.find(
|
|
1952
|
+
{ code: { $nin: [...dropMapCodes] } },
|
|
1953
|
+
{ 'entities.objectLayerItemIds': 1, thumbnail: 1 },
|
|
1954
|
+
).lean();
|
|
1955
|
+
const sharedOlItemIds = new Set();
|
|
1956
|
+
for (const m of otherMaps) {
|
|
1957
|
+
for (const entity of m.entities || []) {
|
|
1958
|
+
for (const itemId of entity.objectLayerItemIds || []) {
|
|
1959
|
+
if (dropOlItemIds.has(itemId)) sharedOlItemIds.add(itemId);
|
|
1960
|
+
}
|
|
1961
|
+
}
|
|
1962
|
+
}
|
|
1963
|
+
for (const shared of sharedOlItemIds) dropOlItemIds.delete(shared);
|
|
1964
|
+
if (sharedOlItemIds.size > 0) {
|
|
1965
|
+
logger.info(`Preserved ${sharedOlItemIds.size} ObjectLayer(s) shared with other maps`);
|
|
1966
|
+
}
|
|
1967
|
+
|
|
1968
|
+
// Exclude thumbnail File IDs referenced by other instances or maps
|
|
1969
|
+
const otherMapThumbs = otherMaps.map((m) => m.thumbnail?.toString()).filter(Boolean);
|
|
1970
|
+
const otherInstThumbs = otherInstances.map((i) => i.thumbnail?.toString()).filter(Boolean);
|
|
1971
|
+
const sharedThumbIds = new Set([...otherMapThumbs, ...otherInstThumbs]);
|
|
1972
|
+
for (let i = thumbFileIds.length - 1; i >= 0; i--) {
|
|
1973
|
+
if (sharedThumbIds.has(thumbFileIds[i].toString())) thumbFileIds.splice(i, 1);
|
|
1974
|
+
}
|
|
1975
|
+
|
|
1976
|
+
if (dropOlItemIds.size > 0) {
|
|
1977
|
+
// Gather ObjectLayers to collect related doc IDs and CIDs
|
|
1978
|
+
const olDocs = await ObjectLayer.find(
|
|
1979
|
+
{ 'data.item.id': { $in: [...dropOlItemIds] } },
|
|
1980
|
+
{
|
|
1981
|
+
cid: 1,
|
|
1982
|
+
'data.item.id': 1,
|
|
1983
|
+
'data.render': 1,
|
|
1984
|
+
objectLayerRenderFramesId: 1,
|
|
1985
|
+
atlasSpriteSheetId: 1,
|
|
1986
|
+
},
|
|
1987
|
+
).lean();
|
|
1988
|
+
|
|
1989
|
+
const cidsToUnpin = new Set();
|
|
1990
|
+
const renderFrameIds = [];
|
|
1991
|
+
const atlasIds = [];
|
|
1992
|
+
const itemKeysToClean = new Set();
|
|
1993
|
+
|
|
1994
|
+
for (const doc of olDocs) {
|
|
1995
|
+
if (doc.cid) cidsToUnpin.add(doc.cid);
|
|
1996
|
+
if (doc.data?.render?.cid) cidsToUnpin.add(doc.data.render.cid);
|
|
1997
|
+
if (doc.data?.render?.metadataCid) cidsToUnpin.add(doc.data.render.metadataCid);
|
|
1998
|
+
if (doc.data?.item?.id) itemKeysToClean.add(doc.data.item.id);
|
|
1999
|
+
if (doc.objectLayerRenderFramesId) renderFrameIds.push(doc.objectLayerRenderFramesId);
|
|
2000
|
+
if (doc.atlasSpriteSheetId) atlasIds.push(doc.atlasSpriteSheetId);
|
|
2001
|
+
}
|
|
2002
|
+
|
|
2003
|
+
// Delete AtlasSpriteSheet + referenced File docs
|
|
2004
|
+
if (atlasIds.length > 0) {
|
|
2005
|
+
const atlasDocs = await AtlasSpriteSheet.find(
|
|
2006
|
+
{ _id: { $in: atlasIds } },
|
|
2007
|
+
{ fileId: 1, cid: 1 },
|
|
2008
|
+
).lean();
|
|
2009
|
+
const atlasFileIds = atlasDocs.map((a) => a.fileId).filter(Boolean);
|
|
2010
|
+
for (const atlas of atlasDocs) {
|
|
2011
|
+
if (atlas.cid) cidsToUnpin.add(atlas.cid);
|
|
2012
|
+
}
|
|
2013
|
+
if (atlasFileIds.length > 0) {
|
|
2014
|
+
const fileResult = await File.deleteMany({ _id: { $in: atlasFileIds } });
|
|
2015
|
+
logger.info(`Dropped ${fileResult.deletedCount} File document(s) (atlas)`);
|
|
2016
|
+
}
|
|
2017
|
+
const atlasResult = await AtlasSpriteSheet.deleteMany({ _id: { $in: atlasIds } });
|
|
2018
|
+
logger.info(`Dropped ${atlasResult.deletedCount} AtlasSpriteSheet document(s)`);
|
|
2019
|
+
}
|
|
2020
|
+
|
|
2021
|
+
// Delete RenderFrames
|
|
2022
|
+
if (renderFrameIds.length > 0) {
|
|
2023
|
+
const rfResult = await ObjectLayerRenderFrames.deleteMany({ _id: { $in: renderFrameIds } });
|
|
2024
|
+
logger.info(`Dropped ${rfResult.deletedCount} ObjectLayerRenderFrames document(s)`);
|
|
2025
|
+
}
|
|
2026
|
+
|
|
2027
|
+
// Delete IPFS pin records
|
|
2028
|
+
if (cidsToUnpin.size > 0) {
|
|
2029
|
+
const ipfsResult = await Ipfs.deleteMany({ cid: { $in: [...cidsToUnpin] } });
|
|
2030
|
+
logger.info(`Dropped ${ipfsResult.deletedCount} Ipfs pin record(s)`);
|
|
2031
|
+
}
|
|
2032
|
+
|
|
2033
|
+
// Unpin CIDs from IPFS Kubo + Cluster and remove MFS paths
|
|
2034
|
+
let unpinCount = 0;
|
|
2035
|
+
for (const cid of cidsToUnpin) {
|
|
2036
|
+
const ok = await IpfsClient.unpinCid(cid);
|
|
2037
|
+
if (ok) unpinCount++;
|
|
2038
|
+
}
|
|
2039
|
+
let mfsCount = 0;
|
|
2040
|
+
for (const itemKey of itemKeysToClean) {
|
|
2041
|
+
const ok = await IpfsClient.removeMfsPath(`/object-layer/${itemKey}`);
|
|
2042
|
+
if (ok) mfsCount++;
|
|
2043
|
+
}
|
|
2044
|
+
logger.info(
|
|
2045
|
+
`IPFS cleanup: ${unpinCount}/${cidsToUnpin.size} CIDs unpinned, ${mfsCount}/${itemKeysToClean.size} MFS paths removed`,
|
|
2046
|
+
);
|
|
2047
|
+
|
|
2048
|
+
const olResult = await ObjectLayer.deleteMany({ 'data.item.id': { $in: [...dropOlItemIds] } });
|
|
2049
|
+
logger.info(`Dropped ${olResult.deletedCount} ObjectLayer document(s)`);
|
|
2050
|
+
}
|
|
2051
|
+
|
|
2052
|
+
const mapResult = await CyberiaMap.deleteMany({ code: { $in: [...dropMapCodes] } });
|
|
2053
|
+
logger.info(`Dropped ${mapResult.deletedCount} CyberiaMap document(s)`);
|
|
2054
|
+
}
|
|
2055
|
+
|
|
2056
|
+
// Drop thumbnail File documents (instance + maps), excluding shared ones
|
|
2057
|
+
if (thumbFileIds.length > 0) {
|
|
2058
|
+
const thumbResult = await File.deleteMany({ _id: { $in: thumbFileIds } });
|
|
2059
|
+
logger.info(`Dropped ${thumbResult.deletedCount} File document(s) (thumbnails)`);
|
|
2060
|
+
}
|
|
2061
|
+
|
|
2062
|
+
await CyberiaInstance.deleteOne({ code: instanceCode });
|
|
2063
|
+
logger.info('Dropped CyberiaInstance', { code: instanceCode });
|
|
2064
|
+
} else {
|
|
2065
|
+
logger.info('No existing instance to drop', { code: instanceCode });
|
|
2066
|
+
}
|
|
2067
|
+
}
|
|
2068
|
+
|
|
2069
|
+
// 1. Import File documents first (atlas PNG + thumbnail dependencies)
|
|
2070
|
+
const filesDir = `${backupDir}/files`;
|
|
2071
|
+
if (fs.existsSync(filesDir)) {
|
|
2072
|
+
const fileFiles = fs.readdirSync(filesDir).filter((f) => f.endsWith('.json'));
|
|
2073
|
+
let fileCount = 0;
|
|
2074
|
+
for (const f of fileFiles) {
|
|
2075
|
+
const fileData = fs.readJsonSync(`${filesDir}/${f}`);
|
|
2076
|
+
// Restore base64-encoded Buffer (handle both $base64 and { type: 'Buffer', data: [...] })
|
|
2077
|
+
if (fileData.data) {
|
|
2078
|
+
if (fileData.data.$base64) {
|
|
2079
|
+
fileData.data = Buffer.from(fileData.data.$base64, 'base64');
|
|
2080
|
+
} else if (fileData.data.type === 'Buffer' && Array.isArray(fileData.data.data)) {
|
|
2081
|
+
fileData.data = Buffer.from(fileData.data.data);
|
|
2082
|
+
}
|
|
2083
|
+
}
|
|
2084
|
+
// preserveUUID: delete any existing doc with this _id then create with exact _id
|
|
2085
|
+
await File.deleteOne({ _id: fileData._id });
|
|
2086
|
+
await File.create(fileData);
|
|
2087
|
+
fileCount++;
|
|
2088
|
+
}
|
|
2089
|
+
logger.info(`Imported ${fileCount} File document(s)`);
|
|
2090
|
+
}
|
|
2091
|
+
|
|
2092
|
+
// 2. Import ObjectLayerRenderFrames
|
|
2093
|
+
const rfDir = `${backupDir}/render-frames`;
|
|
2094
|
+
if (fs.existsSync(rfDir)) {
|
|
2095
|
+
const rfFiles = fs.readdirSync(rfDir).filter((f) => f.endsWith('.json'));
|
|
2096
|
+
let rfCount = 0;
|
|
2097
|
+
for (const f of rfFiles) {
|
|
2098
|
+
const rfData = fs.readJsonSync(`${rfDir}/${f}`);
|
|
2099
|
+
if (rfData._id) {
|
|
2100
|
+
await ObjectLayerRenderFrames.deleteOne({ _id: rfData._id });
|
|
2101
|
+
await ObjectLayerRenderFrames.create(rfData);
|
|
2102
|
+
rfCount++;
|
|
2103
|
+
}
|
|
2104
|
+
}
|
|
2105
|
+
logger.info(`Imported ${rfCount} ObjectLayerRenderFrames document(s)`);
|
|
2106
|
+
}
|
|
2107
|
+
|
|
2108
|
+
// 3. Import AtlasSpriteSheet
|
|
2109
|
+
const atlasDir = `${backupDir}/atlas-sprite-sheets`;
|
|
2110
|
+
if (fs.existsSync(atlasDir)) {
|
|
2111
|
+
const atlasFiles = fs.readdirSync(atlasDir).filter((f) => f.endsWith('.json'));
|
|
2112
|
+
let atlasCount = 0;
|
|
2113
|
+
for (const f of atlasFiles) {
|
|
2114
|
+
const atlasData = fs.readJsonSync(`${atlasDir}/${f}`);
|
|
2115
|
+
await AtlasSpriteSheet.deleteOne({ _id: atlasData._id });
|
|
2116
|
+
await AtlasSpriteSheet.create(atlasData);
|
|
2117
|
+
atlasCount++;
|
|
2118
|
+
}
|
|
2119
|
+
logger.info(`Imported ${atlasCount} AtlasSpriteSheet document(s)`);
|
|
2120
|
+
}
|
|
2121
|
+
|
|
2122
|
+
// 4. Import object layers
|
|
2123
|
+
const olDir = `${backupDir}/object-layers`;
|
|
2124
|
+
if (fs.existsSync(olDir)) {
|
|
2125
|
+
const olFiles = fs.readdirSync(olDir).filter((f) => f.endsWith('.json'));
|
|
2126
|
+
let olCount = 0;
|
|
2127
|
+
for (const file of olFiles) {
|
|
2128
|
+
const olData = fs.readJsonSync(`${olDir}/${file}`);
|
|
2129
|
+
await ObjectLayer.deleteOne({ _id: olData._id });
|
|
2130
|
+
await ObjectLayer.create(olData);
|
|
2131
|
+
olCount++;
|
|
2132
|
+
}
|
|
2133
|
+
logger.info(`Imported ${olCount} ObjectLayer document(s)`);
|
|
2134
|
+
}
|
|
2135
|
+
|
|
2136
|
+
// 5. Import IPFS pin records and re-pin CIDs
|
|
2137
|
+
const ipfsFile = `${backupDir}/ipfs/pins.json`;
|
|
2138
|
+
if (fs.existsSync(ipfsFile)) {
|
|
2139
|
+
const ipfsDocs = fs.readJsonSync(ipfsFile);
|
|
2140
|
+
let ipfsCount = 0;
|
|
2141
|
+
const pinnedCids = new Set();
|
|
2142
|
+
for (const doc of ipfsDocs) {
|
|
2143
|
+
await Ipfs.deleteOne({ _id: doc._id });
|
|
2144
|
+
await Ipfs.create(doc);
|
|
2145
|
+
ipfsCount++;
|
|
2146
|
+
if (doc.cid) pinnedCids.add(doc.cid);
|
|
2147
|
+
}
|
|
2148
|
+
logger.info(`Imported ${ipfsCount} Ipfs pin record(s)`);
|
|
2149
|
+
|
|
2150
|
+
// Re-pin CIDs to IPFS Kubo + Cluster
|
|
2151
|
+
let repinCount = 0;
|
|
2152
|
+
for (const cid of pinnedCids) {
|
|
2153
|
+
const ok = await IpfsClient.pinCid(cid);
|
|
2154
|
+
if (ok) repinCount++;
|
|
2155
|
+
}
|
|
2156
|
+
logger.info(`IPFS re-pin: ${repinCount}/${pinnedCids.size} CIDs pinned`);
|
|
2157
|
+
}
|
|
2158
|
+
|
|
2159
|
+
// 6. Import maps (preserveUUID: delete by code then create with exact _id)
|
|
2160
|
+
const mapsDir = `${backupDir}/maps`;
|
|
2161
|
+
if (fs.existsSync(mapsDir)) {
|
|
2162
|
+
const mapFiles = fs.readdirSync(mapsDir).filter((f) => f.endsWith('.json'));
|
|
2163
|
+
let mapCount = 0;
|
|
2164
|
+
for (const file of mapFiles) {
|
|
2165
|
+
const mapData = fs.readJsonSync(`${mapsDir}/${file}`);
|
|
2166
|
+
// Remove any existing map with this code (may have different _id)
|
|
2167
|
+
await CyberiaMap.deleteOne({ code: mapData.code });
|
|
2168
|
+
// Also remove if an old doc with this _id exists
|
|
2169
|
+
await CyberiaMap.deleteOne({ _id: mapData._id });
|
|
2170
|
+
await CyberiaMap.create(mapData);
|
|
2171
|
+
mapCount++;
|
|
2172
|
+
}
|
|
2173
|
+
logger.info(`Imported ${mapCount} CyberiaMap document(s)`);
|
|
2174
|
+
}
|
|
2175
|
+
|
|
2176
|
+
// 7. Import instance (preserveUUID: delete by code then create with exact _id)
|
|
2177
|
+
const instancePath = `${backupDir}/cyberia-instance.json`;
|
|
2178
|
+
if (fs.existsSync(instancePath)) {
|
|
2179
|
+
const instanceData = fs.readJsonSync(instancePath);
|
|
2180
|
+
await CyberiaInstance.deleteOne({ code: instanceCode });
|
|
2181
|
+
await CyberiaInstance.deleteOne({ _id: instanceData._id });
|
|
2182
|
+
await CyberiaInstance.create(instanceData);
|
|
2183
|
+
logger.info('Imported CyberiaInstance', { code: instanceCode });
|
|
2184
|
+
} else {
|
|
2185
|
+
logger.warn(`Instance file not found: ${instancePath}`);
|
|
2186
|
+
}
|
|
2187
|
+
|
|
2188
|
+
logger.info('Instance import completed', { backupDir });
|
|
2189
|
+
}
|
|
2190
|
+
|
|
2191
|
+
// ── DROP (standalone) ───────────────────────────────────────────
|
|
2192
|
+
if (options.drop && options.import === undefined) {
|
|
2193
|
+
const existingInstance = await CyberiaInstance.findOne({ code: instanceCode }).lean();
|
|
2194
|
+
if (existingInstance) {
|
|
2195
|
+
const dropMapCodes = new Set(existingInstance.cyberiaMapCodes || []);
|
|
2196
|
+
for (const portal of existingInstance.portals || []) {
|
|
2197
|
+
if (portal.sourceMapCode) dropMapCodes.add(portal.sourceMapCode);
|
|
2198
|
+
if (portal.targetMapCode) dropMapCodes.add(portal.targetMapCode);
|
|
2199
|
+
}
|
|
2200
|
+
|
|
2201
|
+
// Collect thumbnail File IDs to drop
|
|
2202
|
+
const thumbFileIds = [];
|
|
2203
|
+
if (existingInstance.thumbnail) thumbFileIds.push(existingInstance.thumbnail);
|
|
2204
|
+
|
|
2205
|
+
// Query other instances for shared thumbnail exclusion
|
|
2206
|
+
const otherInstances = await CyberiaInstance.find({ code: { $ne: instanceCode } }, { thumbnail: 1 }).lean();
|
|
2207
|
+
|
|
2208
|
+
if (dropMapCodes.size > 0) {
|
|
2209
|
+
const dropMaps = await CyberiaMap.find({ code: { $in: [...dropMapCodes] } }).lean();
|
|
2210
|
+
const dropOlItemIds = new Set();
|
|
2211
|
+
for (const map of dropMaps) {
|
|
2212
|
+
if (map.thumbnail) thumbFileIds.push(map.thumbnail);
|
|
2213
|
+
for (const entity of map.entities || []) {
|
|
2214
|
+
for (const itemId of entity.objectLayerItemIds || []) {
|
|
2215
|
+
dropOlItemIds.add(itemId);
|
|
2216
|
+
}
|
|
2217
|
+
}
|
|
2218
|
+
}
|
|
2219
|
+
|
|
2220
|
+
// Exclude OL item IDs referenced by maps outside this instance
|
|
2221
|
+
const otherMaps = await CyberiaMap.find(
|
|
2222
|
+
{ code: { $nin: [...dropMapCodes] } },
|
|
2223
|
+
{ 'entities.objectLayerItemIds': 1, thumbnail: 1 },
|
|
2224
|
+
).lean();
|
|
2225
|
+
const sharedOlItemIds = new Set();
|
|
2226
|
+
for (const m of otherMaps) {
|
|
2227
|
+
for (const entity of m.entities || []) {
|
|
2228
|
+
for (const itemId of entity.objectLayerItemIds || []) {
|
|
2229
|
+
if (dropOlItemIds.has(itemId)) sharedOlItemIds.add(itemId);
|
|
2230
|
+
}
|
|
2231
|
+
}
|
|
2232
|
+
}
|
|
2233
|
+
for (const shared of sharedOlItemIds) dropOlItemIds.delete(shared);
|
|
2234
|
+
if (sharedOlItemIds.size > 0) {
|
|
2235
|
+
logger.info(`Preserved ${sharedOlItemIds.size} ObjectLayer(s) shared with other maps`);
|
|
2236
|
+
}
|
|
2237
|
+
|
|
2238
|
+
// Exclude thumbnail File IDs referenced by other instances or maps
|
|
2239
|
+
const otherMapThumbs = otherMaps.map((m) => m.thumbnail?.toString()).filter(Boolean);
|
|
2240
|
+
const otherInstThumbs = otherInstances.map((i) => i.thumbnail?.toString()).filter(Boolean);
|
|
2241
|
+
const sharedThumbIds = new Set([...otherMapThumbs, ...otherInstThumbs]);
|
|
2242
|
+
for (let i = thumbFileIds.length - 1; i >= 0; i--) {
|
|
2243
|
+
if (sharedThumbIds.has(thumbFileIds[i].toString())) thumbFileIds.splice(i, 1);
|
|
2244
|
+
}
|
|
2245
|
+
|
|
2246
|
+
if (dropOlItemIds.size > 0) {
|
|
2247
|
+
const olDocs = await ObjectLayer.find(
|
|
2248
|
+
{ 'data.item.id': { $in: [...dropOlItemIds] } },
|
|
2249
|
+
{
|
|
2250
|
+
cid: 1,
|
|
2251
|
+
'data.item.id': 1,
|
|
2252
|
+
'data.render': 1,
|
|
2253
|
+
objectLayerRenderFramesId: 1,
|
|
2254
|
+
atlasSpriteSheetId: 1,
|
|
2255
|
+
},
|
|
2256
|
+
).lean();
|
|
2257
|
+
|
|
2258
|
+
const cidsToUnpin = new Set();
|
|
2259
|
+
const renderFrameIds = [];
|
|
2260
|
+
const atlasIds = [];
|
|
2261
|
+
const itemKeysToClean = new Set();
|
|
2262
|
+
|
|
2263
|
+
for (const doc of olDocs) {
|
|
2264
|
+
if (doc.cid) cidsToUnpin.add(doc.cid);
|
|
2265
|
+
if (doc.data?.render?.cid) cidsToUnpin.add(doc.data.render.cid);
|
|
2266
|
+
if (doc.data?.render?.metadataCid) cidsToUnpin.add(doc.data.render.metadataCid);
|
|
2267
|
+
if (doc.data?.item?.id) itemKeysToClean.add(doc.data.item.id);
|
|
2268
|
+
if (doc.objectLayerRenderFramesId) renderFrameIds.push(doc.objectLayerRenderFramesId);
|
|
2269
|
+
if (doc.atlasSpriteSheetId) atlasIds.push(doc.atlasSpriteSheetId);
|
|
2270
|
+
}
|
|
2271
|
+
|
|
2272
|
+
if (atlasIds.length > 0) {
|
|
2273
|
+
const atlasDocs = await AtlasSpriteSheet.find({ _id: { $in: atlasIds } }, { fileId: 1, cid: 1 }).lean();
|
|
2274
|
+
const atlasFileIds = atlasDocs.map((a) => a.fileId).filter(Boolean);
|
|
2275
|
+
for (const atlas of atlasDocs) {
|
|
2276
|
+
if (atlas.cid) cidsToUnpin.add(atlas.cid);
|
|
2277
|
+
}
|
|
2278
|
+
if (atlasFileIds.length > 0) {
|
|
2279
|
+
const fileResult = await File.deleteMany({ _id: { $in: atlasFileIds } });
|
|
2280
|
+
logger.info(`Dropped ${fileResult.deletedCount} File document(s) (atlas)`);
|
|
2281
|
+
}
|
|
2282
|
+
const atlasResult = await AtlasSpriteSheet.deleteMany({ _id: { $in: atlasIds } });
|
|
2283
|
+
logger.info(`Dropped ${atlasResult.deletedCount} AtlasSpriteSheet document(s)`);
|
|
2284
|
+
}
|
|
2285
|
+
|
|
2286
|
+
if (renderFrameIds.length > 0) {
|
|
2287
|
+
const rfResult = await ObjectLayerRenderFrames.deleteMany({ _id: { $in: renderFrameIds } });
|
|
2288
|
+
logger.info(`Dropped ${rfResult.deletedCount} ObjectLayerRenderFrames document(s)`);
|
|
2289
|
+
}
|
|
2290
|
+
|
|
2291
|
+
if (cidsToUnpin.size > 0) {
|
|
2292
|
+
const ipfsResult = await Ipfs.deleteMany({ cid: { $in: [...cidsToUnpin] } });
|
|
2293
|
+
logger.info(`Dropped ${ipfsResult.deletedCount} Ipfs pin record(s)`);
|
|
2294
|
+
}
|
|
2295
|
+
|
|
2296
|
+
let unpinCount = 0;
|
|
2297
|
+
for (const cid of cidsToUnpin) {
|
|
2298
|
+
const ok = await IpfsClient.unpinCid(cid);
|
|
2299
|
+
if (ok) unpinCount++;
|
|
2300
|
+
}
|
|
2301
|
+
let mfsCount = 0;
|
|
2302
|
+
for (const itemKey of itemKeysToClean) {
|
|
2303
|
+
const ok = await IpfsClient.removeMfsPath(`/object-layer/${itemKey}`);
|
|
2304
|
+
if (ok) mfsCount++;
|
|
2305
|
+
}
|
|
2306
|
+
logger.info(
|
|
2307
|
+
`IPFS cleanup: ${unpinCount}/${cidsToUnpin.size} CIDs unpinned, ${mfsCount}/${itemKeysToClean.size} MFS paths removed`,
|
|
2308
|
+
);
|
|
2309
|
+
|
|
2310
|
+
const olResult = await ObjectLayer.deleteMany({ 'data.item.id': { $in: [...dropOlItemIds] } });
|
|
2311
|
+
logger.info(`Dropped ${olResult.deletedCount} ObjectLayer document(s)`);
|
|
2312
|
+
}
|
|
2313
|
+
|
|
2314
|
+
const mapResult = await CyberiaMap.deleteMany({ code: { $in: [...dropMapCodes] } });
|
|
2315
|
+
logger.info(`Dropped ${mapResult.deletedCount} CyberiaMap document(s)`);
|
|
2316
|
+
}
|
|
2317
|
+
|
|
2318
|
+
// Drop thumbnail File documents (instance + maps), excluding shared ones
|
|
2319
|
+
if (thumbFileIds.length > 0) {
|
|
2320
|
+
const thumbResult = await File.deleteMany({ _id: { $in: thumbFileIds } });
|
|
2321
|
+
logger.info(`Dropped ${thumbResult.deletedCount} File document(s) (thumbnails)`);
|
|
2322
|
+
}
|
|
2323
|
+
|
|
2324
|
+
await CyberiaInstance.deleteOne({ code: instanceCode });
|
|
2325
|
+
logger.info('Dropped CyberiaInstance', { code: instanceCode });
|
|
2326
|
+
} else {
|
|
2327
|
+
logger.info('No existing instance to drop', { code: instanceCode });
|
|
2328
|
+
}
|
|
2329
|
+
}
|
|
2330
|
+
|
|
2331
|
+
if (options.export === undefined && options.import === undefined && !options.drop) {
|
|
2332
|
+
logger.error('Specify --export, --import, or --drop flag');
|
|
2333
|
+
}
|
|
2334
|
+
|
|
2335
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
2336
|
+
});
|
|
2337
|
+
|
|
2338
|
+
// ── chain: Hyperledger Besu / ERC-1155 lifecycle commands ────────────────
|
|
2339
|
+
const chain = program.command('chain').description('Hyperledger Besu chain & ERC-1155 ObjectLayerToken lifecycle');
|
|
2340
|
+
|
|
2341
|
+
chain
|
|
2342
|
+
.command('deploy')
|
|
2343
|
+
.description(
|
|
2344
|
+
'Deploy Besu IBFT2 network to kubeadm Kubernetes cluster.\n' +
|
|
2345
|
+
'Dynamically generates fresh validator keys, genesis, extraData, enode URLs,\n' +
|
|
2346
|
+
'and all K8s manifests in manifests/besu/ before applying via kustomize.\n' +
|
|
2347
|
+
'Each invocation creates a unique chain identity (new keys, new extraData).',
|
|
2348
|
+
)
|
|
2349
|
+
.option('--pull-image', 'Pull Besu container images into containerd before deployment')
|
|
2350
|
+
.option('--validators <count>', 'Number of IBFT2 validators (default: 4)', '4')
|
|
2351
|
+
.option('--chain-id <chainId>', 'Chain ID for the network (default: 777771)', '777771')
|
|
2352
|
+
.option('--block-period <seconds>', 'IBFT2 block period in seconds (default: 5)', '5')
|
|
2353
|
+
.option('--epoch-length <length>', 'IBFT2 epoch length (default: 30000)', '30000')
|
|
2354
|
+
.option('--coinbase-address <address>', 'Coinbase deployer address (auto-detected from engine-private if omitted)')
|
|
2355
|
+
.option('--besu-image <image>', 'Besu container image', 'hyperledger/besu:24.12.1')
|
|
2356
|
+
.option('--curl-image <image>', 'Curl init container image', 'curlimages/curl:8.11.1')
|
|
2357
|
+
.option('--node-port-rpc <port>', 'NodePort for external JSON-RPC access', '30545')
|
|
2358
|
+
.option('--node-port-ws <port>', 'NodePort for external WebSocket access', '30546')
|
|
2359
|
+
.option('--namespace <ns>', 'Kubernetes namespace for Besu resources', 'besu')
|
|
2360
|
+
.option('--skip-generate', 'Skip manifest generation and use existing manifests/besu/ as-is')
|
|
2361
|
+
.option('--skip-wait', 'Skip waiting for validators to reach Running state')
|
|
2362
|
+
.action(async (options) => {
|
|
2363
|
+
const result = await deployBesu({
|
|
2364
|
+
pullImage: !!options.pullImage,
|
|
2365
|
+
validators: parseInt(options.validators, 10),
|
|
2366
|
+
chainId: parseInt(options.chainId, 10),
|
|
2367
|
+
blockPeriodSeconds: parseInt(options.blockPeriod, 10),
|
|
2368
|
+
epochLength: parseInt(options.epochLength, 10),
|
|
2369
|
+
coinbaseAddress: options.coinbaseAddress || '',
|
|
2370
|
+
besuImage: options.besuImage,
|
|
2371
|
+
curlImage: options.curlImage,
|
|
2372
|
+
nodePortRpc: parseInt(options.nodePortRpc, 10),
|
|
2373
|
+
nodePortWs: parseInt(options.nodePortWs, 10),
|
|
2374
|
+
namespace: options.namespace,
|
|
2375
|
+
skipGenerate: !!options.skipGenerate,
|
|
2376
|
+
skipWait: !!options.skipWait,
|
|
2377
|
+
manifestsPath: './manifests/besu',
|
|
2378
|
+
networkConfigDir: './hardhat/networks',
|
|
2379
|
+
privateKeysDir: './engine-private/eth-networks/besu/validators',
|
|
2380
|
+
});
|
|
2381
|
+
if (!result && !options.skipGenerate) {
|
|
2382
|
+
process.exit(1);
|
|
2383
|
+
}
|
|
2384
|
+
});
|
|
2385
|
+
|
|
2386
|
+
chain
|
|
2387
|
+
.command('remove')
|
|
2388
|
+
.description('Remove Besu IBFT2 network from kubeadm Kubernetes cluster')
|
|
2389
|
+
.option('--namespace <ns>', 'Kubernetes namespace for Besu resources', 'besu')
|
|
2390
|
+
.option('--clean-keys', 'Also remove generated validator keys from engine-private/')
|
|
2391
|
+
.option('--clean-manifests', 'Also remove the generated manifests/besu/ directory')
|
|
2392
|
+
.action(async (options) => {
|
|
2393
|
+
removeBesu({
|
|
2394
|
+
namespace: options.namespace,
|
|
2395
|
+
cleanKeys: !!options.cleanKeys,
|
|
2396
|
+
cleanManifests: !!options.cleanManifests,
|
|
2397
|
+
manifestsPath: './manifests/besu',
|
|
2398
|
+
privateKeysDir: './engine-private/eth-networks/besu/validators',
|
|
2399
|
+
});
|
|
2400
|
+
});
|
|
2401
|
+
|
|
2402
|
+
chain
|
|
2403
|
+
.command('generate-manifests')
|
|
2404
|
+
.description(
|
|
2405
|
+
'Generate fresh Besu IBFT2 K8s manifests without deploying.\n' +
|
|
2406
|
+
'Creates new validator keys, genesis, extraData, and all manifest files\n' +
|
|
2407
|
+
'in manifests/besu/. Use "cyberia chain deploy --skip-generate" to apply them later.',
|
|
2408
|
+
)
|
|
2409
|
+
.option('--validators <count>', 'Number of IBFT2 validators (default: 4)', '4')
|
|
2410
|
+
.option('--chain-id <chainId>', 'Chain ID for the network (default: 777771)', '777771')
|
|
2411
|
+
.option('--block-period <seconds>', 'IBFT2 block period in seconds (default: 5)', '5')
|
|
2412
|
+
.option('--epoch-length <length>', 'IBFT2 epoch length (default: 30000)', '30000')
|
|
2413
|
+
.option('--coinbase-address <address>', 'Coinbase deployer address (auto-detected from engine-private if omitted)')
|
|
2414
|
+
.option('--besu-image <image>', 'Besu container image', 'hyperledger/besu:24.12.1')
|
|
2415
|
+
.option('--curl-image <image>', 'Curl init container image', 'curlimages/curl:8.11.1')
|
|
2416
|
+
.option('--node-port-rpc <port>', 'NodePort for external JSON-RPC access', '30545')
|
|
2417
|
+
.option('--node-port-ws <port>', 'NodePort for external WebSocket access', '30546')
|
|
2418
|
+
.option('--namespace <ns>', 'Kubernetes namespace for Besu resources', 'besu')
|
|
2419
|
+
.option('--output-dir <dir>', 'Output directory for manifests', './manifests/besu')
|
|
2420
|
+
.action(async (options) => {
|
|
2421
|
+
try {
|
|
2422
|
+
const result = await generateBesuManifests({
|
|
2423
|
+
outputDir: options.outputDir,
|
|
2424
|
+
networkConfigDir: './hardhat/networks',
|
|
2425
|
+
validatorCount: parseInt(options.validators, 10),
|
|
2426
|
+
namespace: options.namespace,
|
|
2427
|
+
chainId: parseInt(options.chainId, 10),
|
|
2428
|
+
blockPeriodSeconds: parseInt(options.blockPeriod, 10),
|
|
2429
|
+
epochLength: parseInt(options.epochLength, 10),
|
|
2430
|
+
requestTimeoutSeconds: 10,
|
|
2431
|
+
coinbaseAddress: options.coinbaseAddress || '',
|
|
2432
|
+
besuImage: options.besuImage,
|
|
2433
|
+
curlImage: options.curlImage,
|
|
2434
|
+
nodePortRpc: parseInt(options.nodePortRpc, 10),
|
|
2435
|
+
nodePortWs: parseInt(options.nodePortWs, 10),
|
|
2436
|
+
savePrivateKeys: true,
|
|
2437
|
+
privateKeysDir: './engine-private/eth-networks/besu/validators',
|
|
2438
|
+
});
|
|
2439
|
+
logger.info('');
|
|
2440
|
+
logger.info('Manifests generated successfully. To deploy:');
|
|
2441
|
+
logger.info(' cyberia chain deploy --skip-generate');
|
|
2442
|
+
logger.info('');
|
|
2443
|
+
logger.info('Validator summary:');
|
|
2444
|
+
for (const v of result.validators) {
|
|
2445
|
+
logger.info(` Validator ${v.index}: address=${v.address} pubkey=${v.publicKey.slice(0, 16)}...`);
|
|
2446
|
+
}
|
|
2447
|
+
} catch (err) {
|
|
2448
|
+
logger.error(`Manifest generation failed: ${err.message}`);
|
|
2449
|
+
process.exit(1);
|
|
2450
|
+
}
|
|
2451
|
+
});
|
|
2452
|
+
|
|
2453
|
+
chain
|
|
2454
|
+
.command('deploy-contract')
|
|
2455
|
+
.description('Deploy ObjectLayerToken (ERC-1155) contract to a Besu network via Hardhat')
|
|
2456
|
+
.option('--network <network>', 'Hardhat network name (besu-k8s for kubeadm cluster)', 'besu-k8s')
|
|
2457
|
+
.action(async (options) => {
|
|
2458
|
+
const network = options.network || 'besu-k8s';
|
|
2459
|
+
logger.info(`Deploying ObjectLayerToken to network: ${network}`);
|
|
2460
|
+
shellExec(`cd hardhat && npx hardhat run scripts/deployObjectLayerToken.js --network ${network}`);
|
|
2461
|
+
logger.info('Contract deployment complete. Check hardhat/deployments/ for the artifact.');
|
|
2462
|
+
});
|
|
2463
|
+
|
|
2464
|
+
chain
|
|
2465
|
+
.command('compile')
|
|
2466
|
+
.description('Compile Solidity contracts via Hardhat')
|
|
2467
|
+
.action(async () => {
|
|
2468
|
+
logger.info('Compiling contracts...');
|
|
2469
|
+
shellExec('cd hardhat && npx hardhat compile');
|
|
2470
|
+
logger.info('Compilation complete.');
|
|
2471
|
+
});
|
|
2472
|
+
|
|
2473
|
+
chain
|
|
2474
|
+
.command('test')
|
|
2475
|
+
.description('Run Hardhat tests for ObjectLayerToken')
|
|
2476
|
+
.action(async () => {
|
|
2477
|
+
logger.info('Running ObjectLayerToken tests...');
|
|
2478
|
+
shellExec('cd hardhat && npx hardhat test test/ObjectLayerToken.js');
|
|
2479
|
+
});
|
|
2480
|
+
|
|
2481
|
+
chain
|
|
2482
|
+
.command('register')
|
|
2483
|
+
.description(
|
|
2484
|
+
'Register an Object Layer item on-chain via the deployed ObjectLayerToken contract.\n' +
|
|
2485
|
+
'When --from-db is set the canonical CID is resolved from MongoDB (fast-json-stable-stringify of objectLayer.data).\n' +
|
|
2486
|
+
'This guarantees the on-chain metadataCid always matches the content-addressed IPFS payload.',
|
|
2487
|
+
)
|
|
2488
|
+
.requiredOption('--item-id <itemId>', 'Human-readable item identifier (e.g. "hatchet")')
|
|
2489
|
+
.option('--metadata-cid <cid>', 'IPFS metadata CID for the item (ignored when --from-db is set)', '')
|
|
2490
|
+
.option('--from-db', 'Resolve the canonical CID from the ObjectLayer MongoDB document (recommended)')
|
|
2491
|
+
.option('--supply <supply>', 'Initial token supply (1 = non-fungible, >1 = semi-fungible)', '1')
|
|
2492
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
2493
|
+
.option('--env-path <envPath>', 'Env path', './.env')
|
|
2494
|
+
.option('--mongo-host <mongoHost>', 'MongoDB host override (used with --from-db)')
|
|
2495
|
+
.action(async (options) => {
|
|
2496
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
2497
|
+
|
|
2498
|
+
const deploymentsDir = './hardhat/deployments';
|
|
2499
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
2500
|
+
if (!fs.existsSync(artifactPath)) {
|
|
2501
|
+
logger.error(`Deployment artifact not found: ${artifactPath}. Run "cyberia chain deploy-contract" first.`);
|
|
2502
|
+
process.exit(1);
|
|
2503
|
+
}
|
|
2504
|
+
const deployment = JSON.parse(fs.readFileSync(artifactPath, 'utf8'));
|
|
2505
|
+
const contractAddress = deployment.address;
|
|
2506
|
+
|
|
2507
|
+
// ── Resolve canonical CID ───────────────────────────────────────
|
|
2508
|
+
let canonicalCid = options.metadataCid || '';
|
|
2509
|
+
|
|
2510
|
+
if (options.fromDb) {
|
|
2511
|
+
try {
|
|
2512
|
+
const { ObjectLayer, host, path } = await connectDbForChain({
|
|
2513
|
+
envPath: options.envPath,
|
|
2514
|
+
mongoHost: options.mongoHost,
|
|
2515
|
+
});
|
|
2516
|
+
const resolved = await resolveCanonicalCid({
|
|
2517
|
+
itemId: options.itemId,
|
|
2518
|
+
ObjectLayer,
|
|
2519
|
+
ipfsClient: IpfsClient,
|
|
2520
|
+
options: { host, path },
|
|
2521
|
+
});
|
|
2522
|
+
|
|
2523
|
+
if (options.metadataCid && options.metadataCid !== resolved.cid) {
|
|
2524
|
+
logger.warn(
|
|
2525
|
+
`Provided --metadata-cid "${options.metadataCid}" differs from canonical CID "${resolved.cid}" (source: ${resolved.source}).`,
|
|
2526
|
+
);
|
|
2527
|
+
logger.warn('Using the canonical CID to ensure on-chain integrity.');
|
|
2528
|
+
}
|
|
2529
|
+
|
|
2530
|
+
canonicalCid = resolved.cid;
|
|
2531
|
+
logger.info(`Canonical CID resolved (${resolved.source}): ${canonicalCid}`);
|
|
2532
|
+
logger.info(` SHA-256: ${resolved.sha256}`);
|
|
2533
|
+
|
|
2534
|
+
// Close the DB connection after resolving
|
|
2535
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
2536
|
+
} catch (dbErr) {
|
|
2537
|
+
logger.error(`Failed to resolve canonical CID from database: ${dbErr.message}`);
|
|
2538
|
+
process.exit(1);
|
|
2539
|
+
}
|
|
2540
|
+
} else if (!canonicalCid) {
|
|
2541
|
+
logger.warn(
|
|
2542
|
+
'No --metadata-cid provided and --from-db not set. The on-chain metadataCid will be empty.\n' +
|
|
2543
|
+
'Consider using --from-db to automatically resolve the canonical CID from the database.',
|
|
2544
|
+
);
|
|
2545
|
+
}
|
|
2546
|
+
|
|
2547
|
+
logger.info(`Registering Object Layer item "${options.itemId}" on contract ${contractAddress}`);
|
|
2548
|
+
logger.info(` Metadata CID: ${canonicalCid || '(none)'}`);
|
|
2549
|
+
logger.info(` Supply: ${options.supply}`);
|
|
2550
|
+
|
|
2551
|
+
// Use a Hardhat script via inline JS to call registerObjectLayer
|
|
2552
|
+
const registerScript = `
|
|
2553
|
+
import hre from 'hardhat';
|
|
2554
|
+
const { ethers } = await hre.network.connect();
|
|
2555
|
+
async function main() {
|
|
2556
|
+
const [deployer] = await ethers.getSigners();
|
|
2557
|
+
const token = await ethers.getContractAt('ObjectLayerToken', '${contractAddress}');
|
|
2558
|
+
const tx = await token.registerObjectLayer(
|
|
2559
|
+
deployer.address,
|
|
2560
|
+
'${options.itemId}',
|
|
2561
|
+
'${canonicalCid}',
|
|
2562
|
+
${options.supply},
|
|
2563
|
+
'0x'
|
|
2564
|
+
);
|
|
2565
|
+
const receipt = await tx.wait();
|
|
2566
|
+
const tokenId = await token.computeTokenId('${options.itemId}');
|
|
2567
|
+
console.log('Registered tokenId:', tokenId.toString());
|
|
2568
|
+
console.log('Tx hash:', receipt.hash);
|
|
2569
|
+
}
|
|
2570
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
2571
|
+
`;
|
|
2572
|
+
const tmpScript = './hardhat/scripts/_cli_register_tmp.js';
|
|
2573
|
+
fs.writeFileSync(tmpScript, registerScript, 'utf8');
|
|
2574
|
+
try {
|
|
2575
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_register_tmp.js --network ${options.network}`);
|
|
2576
|
+
} finally {
|
|
2577
|
+
fs.removeSync(tmpScript);
|
|
2578
|
+
}
|
|
2579
|
+
});
|
|
2580
|
+
|
|
2581
|
+
chain
|
|
2582
|
+
.command('mint')
|
|
2583
|
+
.description('Mint additional tokens for an existing token ID')
|
|
2584
|
+
.requiredOption('--token-id <tokenId>', 'ERC-1155 token ID (uint256)')
|
|
2585
|
+
.requiredOption('--to <address>', 'Recipient address')
|
|
2586
|
+
.requiredOption('--amount <amount>', 'Amount to mint')
|
|
2587
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
2588
|
+
.option('--env-path <envPath>', 'Env path', './.env')
|
|
2589
|
+
.action(async (options) => {
|
|
2590
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
2591
|
+
|
|
2592
|
+
const deploymentsDir = './hardhat/deployments';
|
|
2593
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
2594
|
+
if (!fs.existsSync(artifactPath)) {
|
|
2595
|
+
logger.error(`Deployment artifact not found: ${artifactPath}. Run "cyberia chain deploy-contract" first.`);
|
|
2596
|
+
process.exit(1);
|
|
2597
|
+
}
|
|
2598
|
+
const deployment = JSON.parse(fs.readFileSync(artifactPath, 'utf8'));
|
|
2599
|
+
const contractAddress = deployment.address;
|
|
2600
|
+
|
|
2601
|
+
logger.info(`Minting ${options.amount} of token ID ${options.tokenId} to ${options.to}`);
|
|
2602
|
+
|
|
2603
|
+
const mintScript = `
|
|
2604
|
+
import hre from 'hardhat';
|
|
2605
|
+
const { ethers } = await hre.network.connect();
|
|
2606
|
+
async function main() {
|
|
2607
|
+
const token = await ethers.getContractAt('ObjectLayerToken', '${contractAddress}');
|
|
2608
|
+
const tx = await token.mint('${options.to}', ${options.tokenId}, ${options.amount}, '0x');
|
|
2609
|
+
const receipt = await tx.wait();
|
|
2610
|
+
console.log('Mint tx hash:', receipt.hash);
|
|
2611
|
+
const balance = await token.balanceOf('${options.to}', ${options.tokenId});
|
|
2612
|
+
console.log('New balance:', balance.toString());
|
|
2613
|
+
}
|
|
2614
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
2615
|
+
`;
|
|
2616
|
+
const tmpScript = './hardhat/scripts/_cli_mint_tmp.js';
|
|
2617
|
+
fs.writeFileSync(tmpScript, mintScript, 'utf8');
|
|
2618
|
+
try {
|
|
2619
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_mint_tmp.js --network ${options.network}`);
|
|
2620
|
+
} finally {
|
|
2621
|
+
fs.removeSync(tmpScript);
|
|
2622
|
+
}
|
|
2623
|
+
});
|
|
2624
|
+
|
|
2625
|
+
chain
|
|
2626
|
+
.command('status')
|
|
2627
|
+
.description('Query Besu chain and ObjectLayerToken contract status')
|
|
2628
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
2629
|
+
.option('--env-path <envPath>', 'Env path', './.env')
|
|
2630
|
+
.action(async (options) => {
|
|
2631
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
2632
|
+
|
|
2633
|
+
const deploymentsDir = './hardhat/deployments';
|
|
2634
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
2635
|
+
|
|
2636
|
+
logger.info('── Besu Chain Status ──');
|
|
2637
|
+
|
|
2638
|
+
// Check node connectivity
|
|
2639
|
+
const statusScript = `
|
|
2640
|
+
import hre from 'hardhat';
|
|
2641
|
+
import { readFileSync } from 'fs';
|
|
2642
|
+
const { ethers } = await hre.network.connect();
|
|
2643
|
+
async function main() {
|
|
2644
|
+
const provider = ethers.provider;
|
|
2645
|
+
const network = await provider.getNetwork();
|
|
2646
|
+
const blockNumber = await provider.getBlockNumber();
|
|
2647
|
+
const [deployer] = await ethers.getSigners();
|
|
2648
|
+
const balance = await provider.getBalance(deployer.address);
|
|
2649
|
+
console.log('Network:', JSON.stringify({
|
|
2650
|
+
name: network.name,
|
|
2651
|
+
chainId: network.chainId.toString(),
|
|
2652
|
+
blockNumber,
|
|
2653
|
+
deployerAddress: deployer.address,
|
|
2654
|
+
deployerBalance: ethers.formatEther(balance) + ' ETH'
|
|
2655
|
+
}, null, 2));
|
|
2656
|
+
|
|
2657
|
+
${
|
|
2658
|
+
fs.existsSync(artifactPath)
|
|
2659
|
+
? `
|
|
2660
|
+
const deployment = JSON.parse(readFileSync('${nodePath.resolve(artifactPath)}', 'utf8'));
|
|
2661
|
+
try {
|
|
2662
|
+
const token = await ethers.getContractAt('ObjectLayerToken', deployment.address);
|
|
2663
|
+
const cryptokoynSupply = await token['totalSupply(uint256)'](0);
|
|
2664
|
+
const deployerCKY = await token.balanceOf(deployer.address, 0);
|
|
2665
|
+
const isPaused = false; // pausable check would need try-catch
|
|
2666
|
+
console.log('Contract:', JSON.stringify({
|
|
2667
|
+
address: deployment.address,
|
|
2668
|
+
cryptokoynTotalSupply: ethers.formatEther(cryptokoynSupply) + ' CKY',
|
|
2669
|
+
deployerCryptokoynBalance: ethers.formatEther(deployerCKY) + ' CKY',
|
|
2670
|
+
}, null, 2));
|
|
2671
|
+
} catch (e) {
|
|
2672
|
+
console.log('Contract not accessible:', e.message);
|
|
2673
|
+
}
|
|
2674
|
+
`
|
|
2675
|
+
: `console.log('No deployment artifact found for network ${options.network}.');`
|
|
2676
|
+
}
|
|
2677
|
+
}
|
|
2678
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
2679
|
+
`;
|
|
2680
|
+
const tmpScript = './hardhat/scripts/_cli_status_tmp.js';
|
|
2681
|
+
fs.writeFileSync(tmpScript, statusScript, 'utf8');
|
|
2682
|
+
try {
|
|
2683
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_status_tmp.js --network ${options.network}`);
|
|
2684
|
+
} finally {
|
|
2685
|
+
fs.removeSync(tmpScript);
|
|
2686
|
+
}
|
|
2687
|
+
});
|
|
2688
|
+
|
|
2689
|
+
chain
|
|
2690
|
+
.command('pause')
|
|
2691
|
+
.description('Pause all token transfers on the ObjectLayerToken contract (emergency governance)')
|
|
2692
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
2693
|
+
.action(async (options) => {
|
|
2694
|
+
const deploymentsDir = './hardhat/deployments';
|
|
2695
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
2696
|
+
if (!fs.existsSync(artifactPath)) {
|
|
2697
|
+
logger.error(`Deployment artifact not found: ${artifactPath}`);
|
|
2698
|
+
process.exit(1);
|
|
2699
|
+
}
|
|
2700
|
+
const deployment = JSON.parse(fs.readFileSync(artifactPath, 'utf8'));
|
|
2701
|
+
|
|
2702
|
+
const pauseScript = `
|
|
2703
|
+
import hre from 'hardhat';
|
|
2704
|
+
const { ethers } = await hre.network.connect();
|
|
2705
|
+
async function main() {
|
|
2706
|
+
const token = await ethers.getContractAt('ObjectLayerToken', '${deployment.address}');
|
|
2707
|
+
const tx = await token.pause();
|
|
2708
|
+
await tx.wait();
|
|
2709
|
+
console.log('Contract PAUSED. All transfers are frozen.');
|
|
2710
|
+
}
|
|
2711
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
2712
|
+
`;
|
|
2713
|
+
const tmpScript = './hardhat/scripts/_cli_pause_tmp.js';
|
|
2714
|
+
fs.writeFileSync(tmpScript, pauseScript, 'utf8');
|
|
2715
|
+
try {
|
|
2716
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_pause_tmp.js --network ${options.network}`);
|
|
2717
|
+
} finally {
|
|
2718
|
+
fs.removeSync(tmpScript);
|
|
2719
|
+
}
|
|
2720
|
+
});
|
|
2721
|
+
|
|
2722
|
+
chain
|
|
2723
|
+
.command('unpause')
|
|
2724
|
+
.description('Unpause token transfers on the ObjectLayerToken contract')
|
|
2725
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
2726
|
+
.action(async (options) => {
|
|
2727
|
+
const deploymentsDir = './hardhat/deployments';
|
|
2728
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
2729
|
+
if (!fs.existsSync(artifactPath)) {
|
|
2730
|
+
logger.error(`Deployment artifact not found: ${artifactPath}`);
|
|
2731
|
+
process.exit(1);
|
|
2732
|
+
}
|
|
2733
|
+
const deployment = JSON.parse(fs.readFileSync(artifactPath, 'utf8'));
|
|
2734
|
+
|
|
2735
|
+
const unpauseScript = `
|
|
2736
|
+
import hre from 'hardhat';
|
|
2737
|
+
const { ethers } = await hre.network.connect();
|
|
2738
|
+
async function main() {
|
|
2739
|
+
const token = await ethers.getContractAt('ObjectLayerToken', '${deployment.address}');
|
|
2740
|
+
const tx = await token.unpause();
|
|
2741
|
+
await tx.wait();
|
|
2742
|
+
console.log('Contract UNPAUSED. Transfers resumed.');
|
|
2743
|
+
}
|
|
2744
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
2745
|
+
`;
|
|
2746
|
+
const tmpScript = './hardhat/scripts/_cli_unpause_tmp.js';
|
|
2747
|
+
fs.writeFileSync(tmpScript, unpauseScript, 'utf8');
|
|
2748
|
+
try {
|
|
2749
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_unpause_tmp.js --network ${options.network}`);
|
|
2750
|
+
} finally {
|
|
2751
|
+
fs.removeSync(tmpScript);
|
|
2752
|
+
}
|
|
2753
|
+
});
|
|
2754
|
+
|
|
2755
|
+
// ── key-gen: Generate Ethereum secp256k1 key pair ───────────────────────
|
|
2756
|
+
chain
|
|
2757
|
+
.command('key-gen')
|
|
2758
|
+
.description('Generate a new Ethereum secp256k1 key pair for player identity or deployer accounts')
|
|
2759
|
+
.option(
|
|
2760
|
+
'--save',
|
|
2761
|
+
'Persist key files to default paths (private → ./engine-private/, public → ./hardhat/deployments/)',
|
|
2762
|
+
)
|
|
2763
|
+
.option('--private-path <path>', 'Custom path for the private key JSON file (overrides default)')
|
|
2764
|
+
.option('--public-path <path>', 'Custom path for the public key JSON file (overrides default)')
|
|
2765
|
+
.action(async (options) => {
|
|
2766
|
+
const { ethers } = await import('ethers');
|
|
2767
|
+
const wallet = ethers.Wallet.createRandom();
|
|
2768
|
+
|
|
2769
|
+
const addressLower = wallet.address.toLowerCase();
|
|
2770
|
+
|
|
2771
|
+
const privateData = {
|
|
2772
|
+
address: wallet.address,
|
|
2773
|
+
privateKey: wallet.privateKey,
|
|
2774
|
+
mnemonic: wallet.mnemonic ? wallet.mnemonic.phrase : null,
|
|
2775
|
+
};
|
|
2776
|
+
|
|
2777
|
+
const publicData = {
|
|
2778
|
+
address: wallet.address,
|
|
2779
|
+
publicKey: wallet.publicKey,
|
|
2780
|
+
};
|
|
2781
|
+
|
|
2782
|
+
logger.info('── New Ethereum Key Pair ──');
|
|
2783
|
+
logger.info(` Address : ${wallet.address}`);
|
|
2784
|
+
logger.info(` Private Key: ${wallet.privateKey}`);
|
|
2785
|
+
logger.info(` Public Key : ${wallet.publicKey}`);
|
|
2786
|
+
if (privateData.mnemonic) {
|
|
2787
|
+
logger.info(` Mnemonic : ${privateData.mnemonic}`);
|
|
2788
|
+
}
|
|
2789
|
+
|
|
2790
|
+
const shouldSave = options.save || options.privatePath || options.publicPath;
|
|
2791
|
+
|
|
2792
|
+
if (shouldSave) {
|
|
2793
|
+
const privatePath = options.privatePath || `./engine-private/eth-networks/besu/${addressLower}.key.json`;
|
|
2794
|
+
const publicPath = options.publicPath || `./hardhat/deployments/${addressLower}.pub.json`;
|
|
2795
|
+
|
|
2796
|
+
fs.ensureDirSync(nodePath.dirname(privatePath));
|
|
2797
|
+
fs.writeJsonSync(privatePath, privateData, { spaces: 2 });
|
|
2798
|
+
logger.info(` Private key saved to: ${privatePath}`);
|
|
2799
|
+
logger.warn(' ⚠ Keep this file secure! Anyone with the private key controls this address.');
|
|
2800
|
+
|
|
2801
|
+
fs.ensureDirSync(nodePath.dirname(publicPath));
|
|
2802
|
+
fs.writeJsonSync(publicPath, publicData, { spaces: 2 });
|
|
2803
|
+
logger.info(` Public key saved to : ${publicPath}`);
|
|
2804
|
+
}
|
|
2805
|
+
});
|
|
2806
|
+
|
|
2807
|
+
// ── set-coinbase: Set the Besu deployer (coinbase) private key ──────────
|
|
2808
|
+
chain
|
|
2809
|
+
.command('set-coinbase')
|
|
2810
|
+
.description(
|
|
2811
|
+
'Set the coinbase deployer private key used by hardhat.config.js for Besu network deployments.\n' +
|
|
2812
|
+
'Accepts either a raw hex private key via --private-key, or a .key.json file generated by "cyberia chain key-gen --save" via --from-file.',
|
|
2813
|
+
)
|
|
2814
|
+
.option('--private-key <hex>', 'Raw hex private key (with or without 0x prefix)')
|
|
2815
|
+
.option(
|
|
2816
|
+
'--from-file <path>',
|
|
2817
|
+
'Path to a .key.json file (e.g. ./engine-private/eth-networks/besu/<address>.key.json)',
|
|
2818
|
+
)
|
|
2819
|
+
.option(
|
|
2820
|
+
'--coinbase-path <path>',
|
|
2821
|
+
'Custom output path for the coinbase file',
|
|
2822
|
+
'./engine-private/eth-networks/besu/coinbase',
|
|
2823
|
+
)
|
|
2824
|
+
.action(async (options) => {
|
|
2825
|
+
let privateKey;
|
|
2826
|
+
|
|
2827
|
+
if (options.fromFile) {
|
|
2828
|
+
if (!fs.existsSync(options.fromFile)) {
|
|
2829
|
+
logger.error(`Key file not found: ${options.fromFile}`);
|
|
2830
|
+
process.exit(1);
|
|
2831
|
+
}
|
|
2832
|
+
try {
|
|
2833
|
+
const keyData = fs.readJsonSync(options.fromFile);
|
|
2834
|
+
if (!keyData.privateKey) {
|
|
2835
|
+
logger.error(`Key file does not contain a "privateKey" field: ${options.fromFile}`);
|
|
2836
|
+
process.exit(1);
|
|
2837
|
+
}
|
|
2838
|
+
privateKey = keyData.privateKey;
|
|
2839
|
+
logger.info(`Read private key for address ${keyData.address || '(unknown)'} from ${options.fromFile}`);
|
|
2840
|
+
} catch (e) {
|
|
2841
|
+
logger.error(`Failed to parse key file: ${e.message}`);
|
|
2842
|
+
process.exit(1);
|
|
2843
|
+
}
|
|
2844
|
+
} else if (options.privateKey) {
|
|
2845
|
+
privateKey = options.privateKey;
|
|
2846
|
+
} else {
|
|
2847
|
+
logger.error('Provide either --private-key <hex> or --from-file <path>.');
|
|
2848
|
+
process.exit(1);
|
|
2849
|
+
}
|
|
2850
|
+
|
|
2851
|
+
// Normalise: ensure 0x prefix
|
|
2852
|
+
privateKey = privateKey.trim();
|
|
2853
|
+
if (!privateKey.startsWith('0x')) privateKey = `0x${privateKey}`;
|
|
2854
|
+
|
|
2855
|
+
// Validate the key by deriving the address
|
|
2856
|
+
try {
|
|
2857
|
+
const { ethers } = await import('ethers');
|
|
2858
|
+
const wallet = new ethers.Wallet(privateKey);
|
|
2859
|
+
logger.info(` Derived address: ${wallet.address}`);
|
|
2860
|
+
} catch (e) {
|
|
2861
|
+
logger.error(`Invalid private key: ${e.message}`);
|
|
2862
|
+
process.exit(1);
|
|
2863
|
+
}
|
|
2864
|
+
|
|
2865
|
+
// Write the coinbase file
|
|
2866
|
+
const coinbasePath = options.coinbasePath;
|
|
2867
|
+
fs.ensureDirSync(nodePath.dirname(coinbasePath));
|
|
2868
|
+
fs.writeFileSync(coinbasePath, privateKey, 'utf8');
|
|
2869
|
+
logger.info(`Coinbase private key written to: ${coinbasePath}`);
|
|
2870
|
+
logger.warn('⚠ Keep this file secure! Anyone with the private key controls the deployer address.');
|
|
2871
|
+
logger.info('hardhat.config.js will read this file automatically for Besu network deployments.');
|
|
2872
|
+
});
|
|
2873
|
+
|
|
2874
|
+
// ── balance: Query token balance for an address ─────────────────────────
|
|
2875
|
+
chain
|
|
2876
|
+
.command('balance')
|
|
2877
|
+
.description('Query ERC-1155 token balance for an address (CKY fungible, semi-fungible, or non-fungible)')
|
|
2878
|
+
.requiredOption('--address <address>', 'Ethereum address to query')
|
|
2879
|
+
.option('--token-id <tokenId>', 'ERC-1155 token ID (default: 0 = CKY)', '0')
|
|
2880
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
2881
|
+
.option('--env-path <envPath>', 'Env path', './.env')
|
|
2882
|
+
.action(async (options) => {
|
|
2883
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
2884
|
+
|
|
2885
|
+
const deploymentsDir = './hardhat/deployments';
|
|
2886
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
2887
|
+
if (!fs.existsSync(artifactPath)) {
|
|
2888
|
+
logger.error(`Deployment artifact not found: ${artifactPath}. Run "cyberia chain deploy-contract" first.`);
|
|
2889
|
+
process.exit(1);
|
|
2890
|
+
}
|
|
2891
|
+
const deployment = JSON.parse(fs.readFileSync(artifactPath, 'utf8'));
|
|
2892
|
+
const contractAddress = deployment.address;
|
|
2893
|
+
|
|
2894
|
+
const balanceScript = `
|
|
2895
|
+
import hre from 'hardhat';
|
|
2896
|
+
const { ethers } = await hre.network.connect();
|
|
2897
|
+
async function main() {
|
|
2898
|
+
const token = await ethers.getContractAt('ObjectLayerToken', '${contractAddress}');
|
|
2899
|
+
const balance = await token.balanceOf('${options.address}', ${options.tokenId});
|
|
2900
|
+
const itemId = await token.getItemId(${options.tokenId});
|
|
2901
|
+
const metadataCid = await token.getMetadataCID(${options.tokenId});
|
|
2902
|
+
let totalSupply;
|
|
2903
|
+
try { totalSupply = await token['totalSupply(uint256)'](${options.tokenId}); } catch (_) { totalSupply = 'N/A'; }
|
|
2904
|
+
console.log(JSON.stringify({
|
|
2905
|
+
address: '${options.address}',
|
|
2906
|
+
tokenId: '${options.tokenId}',
|
|
2907
|
+
itemId: itemId || '(unregistered)',
|
|
2908
|
+
balance: balance.toString(),
|
|
2909
|
+
formattedBalance: ${options.tokenId} === '0' || ${options.tokenId} === 0 ? ethers.formatEther(balance) + ' CKY' : balance.toString() + ' units',
|
|
2910
|
+
totalSupply: totalSupply.toString(),
|
|
2911
|
+
metadataCid: metadataCid || '(none)',
|
|
2912
|
+
}, null, 2));
|
|
2913
|
+
}
|
|
2914
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
2915
|
+
`;
|
|
2916
|
+
const tmpScript = './hardhat/scripts/_cli_balance_tmp.js';
|
|
2917
|
+
fs.writeFileSync(tmpScript, balanceScript, 'utf8');
|
|
2918
|
+
try {
|
|
2919
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_balance_tmp.js --network ${options.network}`);
|
|
2920
|
+
} finally {
|
|
2921
|
+
fs.removeSync(tmpScript);
|
|
2922
|
+
}
|
|
2923
|
+
});
|
|
2924
|
+
|
|
2925
|
+
// ── transfer: Transfer ERC-1155 tokens between addresses ────────────────
|
|
2926
|
+
chain
|
|
2927
|
+
.command('transfer')
|
|
2928
|
+
.description('Transfer ERC-1155 tokens (CKY, semi-fungible resources, or non-fungible items)')
|
|
2929
|
+
.requiredOption('--from <address>', 'Sender address (must be the deployer/owner for relayed transfers)')
|
|
2930
|
+
.requiredOption('--to <address>', 'Recipient address')
|
|
2931
|
+
.requiredOption('--token-id <tokenId>', 'ERC-1155 token ID (0 = CKY)')
|
|
2932
|
+
.requiredOption('--amount <amount>', 'Amount to transfer')
|
|
2933
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
2934
|
+
.option('--env-path <envPath>', 'Env path', './.env')
|
|
2935
|
+
.action(async (options) => {
|
|
2936
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
2937
|
+
|
|
2938
|
+
const deploymentsDir = './hardhat/deployments';
|
|
2939
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
2940
|
+
if (!fs.existsSync(artifactPath)) {
|
|
2941
|
+
logger.error(`Deployment artifact not found: ${artifactPath}. Run "cyberia chain deploy-contract" first.`);
|
|
2942
|
+
process.exit(1);
|
|
2943
|
+
}
|
|
2944
|
+
const deployment = JSON.parse(fs.readFileSync(artifactPath, 'utf8'));
|
|
2945
|
+
const contractAddress = deployment.address;
|
|
2946
|
+
|
|
2947
|
+
logger.info(
|
|
2948
|
+
`Transferring ${options.amount} of token ID ${options.tokenId} from ${options.from} to ${options.to}`,
|
|
2949
|
+
);
|
|
2950
|
+
|
|
2951
|
+
const transferScript = `
|
|
2952
|
+
import hre from 'hardhat';
|
|
2953
|
+
const { ethers } = await hre.network.connect();
|
|
2954
|
+
async function main() {
|
|
2955
|
+
const [signer] = await ethers.getSigners();
|
|
2956
|
+
const token = await ethers.getContractAt('ObjectLayerToken', '${contractAddress}');
|
|
2957
|
+
const tx = await token.safeTransferFrom(
|
|
2958
|
+
'${options.from}',
|
|
2959
|
+
'${options.to}',
|
|
2960
|
+
${options.tokenId},
|
|
2961
|
+
${options.amount},
|
|
2962
|
+
'0x'
|
|
2963
|
+
);
|
|
2964
|
+
const receipt = await tx.wait();
|
|
2965
|
+
console.log('Transfer tx hash:', receipt.hash);
|
|
2966
|
+
const senderBal = await token.balanceOf('${options.from}', ${options.tokenId});
|
|
2967
|
+
const recipientBal = await token.balanceOf('${options.to}', ${options.tokenId});
|
|
2968
|
+
console.log('Sender balance:', senderBal.toString());
|
|
2969
|
+
console.log('Recipient balance:', recipientBal.toString());
|
|
2970
|
+
}
|
|
2971
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
2972
|
+
`;
|
|
2973
|
+
const tmpScript = './hardhat/scripts/_cli_transfer_tmp.js';
|
|
2974
|
+
fs.writeFileSync(tmpScript, transferScript, 'utf8');
|
|
2975
|
+
try {
|
|
2976
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_transfer_tmp.js --network ${options.network}`);
|
|
2977
|
+
} finally {
|
|
2978
|
+
fs.removeSync(tmpScript);
|
|
2979
|
+
}
|
|
2980
|
+
});
|
|
2981
|
+
|
|
2982
|
+
// ── burn: Burn ERC-1155 tokens ──────────────────────────────────────────
|
|
2983
|
+
chain
|
|
2984
|
+
.command('burn')
|
|
2985
|
+
.description(
|
|
2986
|
+
'Burn ERC-1155 tokens (CKY to reduce supply, semi-fungible for crafting cost, non-fungible to destroy)',
|
|
2987
|
+
)
|
|
2988
|
+
.requiredOption('--address <address>', 'Address holding the tokens to burn')
|
|
2989
|
+
.requiredOption('--token-id <tokenId>', 'ERC-1155 token ID (0 = CKY)')
|
|
2990
|
+
.requiredOption('--amount <amount>', 'Amount to burn')
|
|
2991
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
2992
|
+
.option('--env-path <envPath>', 'Env path', './.env')
|
|
2993
|
+
.action(async (options) => {
|
|
2994
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
2995
|
+
|
|
2996
|
+
const deploymentsDir = './hardhat/deployments';
|
|
2997
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
2998
|
+
if (!fs.existsSync(artifactPath)) {
|
|
2999
|
+
logger.error(`Deployment artifact not found: ${artifactPath}. Run "cyberia chain deploy-contract" first.`);
|
|
3000
|
+
process.exit(1);
|
|
3001
|
+
}
|
|
3002
|
+
const deployment = JSON.parse(fs.readFileSync(artifactPath, 'utf8'));
|
|
3003
|
+
const contractAddress = deployment.address;
|
|
3004
|
+
|
|
3005
|
+
logger.info(`Burning ${options.amount} of token ID ${options.tokenId} from ${options.address}`);
|
|
3006
|
+
|
|
3007
|
+
const burnScript = `
|
|
3008
|
+
import hre from 'hardhat';
|
|
3009
|
+
const { ethers } = await hre.network.connect();
|
|
3010
|
+
async function main() {
|
|
3011
|
+
const token = await ethers.getContractAt('ObjectLayerToken', '${contractAddress}');
|
|
3012
|
+
const tx = await token.burn('${options.address}', ${options.tokenId}, ${options.amount});
|
|
3013
|
+
const receipt = await tx.wait();
|
|
3014
|
+
console.log('Burn tx hash:', receipt.hash);
|
|
3015
|
+
const remaining = await token.balanceOf('${options.address}', ${options.tokenId});
|
|
3016
|
+
console.log('Remaining balance:', remaining.toString());
|
|
3017
|
+
let totalSupply;
|
|
3018
|
+
try { totalSupply = await token['totalSupply(uint256)'](${options.tokenId}); } catch (_) { totalSupply = 'N/A'; }
|
|
3019
|
+
console.log('Total supply after burn:', totalSupply.toString());
|
|
3020
|
+
}
|
|
3021
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
3022
|
+
`;
|
|
3023
|
+
const tmpScript = './hardhat/scripts/_cli_burn_tmp.js';
|
|
3024
|
+
fs.writeFileSync(tmpScript, burnScript, 'utf8');
|
|
3025
|
+
try {
|
|
3026
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_burn_tmp.js --network ${options.network}`);
|
|
3027
|
+
} finally {
|
|
3028
|
+
fs.removeSync(tmpScript);
|
|
3029
|
+
}
|
|
3030
|
+
});
|
|
3031
|
+
|
|
3032
|
+
// ── batch-register: Register multiple Object Layer items in one tx ──────
|
|
3033
|
+
chain
|
|
3034
|
+
.command('batch-register')
|
|
3035
|
+
.description(
|
|
3036
|
+
'Batch-register multiple Object Layer items on-chain in a single transaction.\n' +
|
|
3037
|
+
'When --from-db is set, the canonical CID for every item is resolved from MongoDB\n' +
|
|
3038
|
+
'(fast-json-stable-stringify of objectLayer.data), overriding any "cid" values in the JSON input.',
|
|
3039
|
+
)
|
|
3040
|
+
.requiredOption('--items <json>', 'JSON array of items: [{"itemId":"wood","cid":"bafk...","supply":500000}, ...]')
|
|
3041
|
+
.option('--from-db', 'Resolve canonical CIDs from the ObjectLayer MongoDB documents (recommended)')
|
|
3042
|
+
.option('--network <network>', 'Hardhat network name', 'besu-k8s')
|
|
3043
|
+
.option('--env-path <envPath>', 'Env path', './.env')
|
|
3044
|
+
.option('--mongo-host <mongoHost>', 'MongoDB host override (used with --from-db)')
|
|
3045
|
+
.action(async (options) => {
|
|
3046
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
3047
|
+
|
|
3048
|
+
let items;
|
|
3049
|
+
try {
|
|
3050
|
+
items = JSON.parse(options.items);
|
|
3051
|
+
if (!Array.isArray(items) || items.length === 0) throw new Error('Must be a non-empty array');
|
|
3052
|
+
} catch (e) {
|
|
3053
|
+
logger.error(`Invalid --items JSON: ${e.message}`);
|
|
3054
|
+
process.exit(1);
|
|
3055
|
+
}
|
|
3056
|
+
|
|
3057
|
+
const deploymentsDir = './hardhat/deployments';
|
|
3058
|
+
const artifactPath = `${deploymentsDir}/${options.network}-ObjectLayerToken.json`;
|
|
3059
|
+
if (!fs.existsSync(artifactPath)) {
|
|
3060
|
+
logger.error(`Deployment artifact not found: ${artifactPath}. Run "cyberia chain deploy-contract" first.`);
|
|
3061
|
+
process.exit(1);
|
|
3062
|
+
}
|
|
3063
|
+
const deployment = JSON.parse(fs.readFileSync(artifactPath, 'utf8'));
|
|
3064
|
+
const contractAddress = deployment.address;
|
|
3065
|
+
|
|
3066
|
+
// ── Resolve canonical CIDs when --from-db is set ────────────────
|
|
3067
|
+
if (options.fromDb) {
|
|
3068
|
+
let ObjectLayer, host, path;
|
|
3069
|
+
try {
|
|
3070
|
+
({ ObjectLayer, host, path } = await connectDbForChain({
|
|
3071
|
+
envPath: options.envPath,
|
|
3072
|
+
mongoHost: options.mongoHost,
|
|
3073
|
+
}));
|
|
3074
|
+
} catch (dbErr) {
|
|
3075
|
+
logger.error(`Failed to connect to database: ${dbErr.message}`);
|
|
3076
|
+
process.exit(1);
|
|
3077
|
+
}
|
|
3078
|
+
|
|
3079
|
+
for (const item of items) {
|
|
3080
|
+
try {
|
|
3081
|
+
const resolved = await resolveCanonicalCid({
|
|
3082
|
+
itemId: item.itemId,
|
|
3083
|
+
ObjectLayer,
|
|
3084
|
+
ipfsClient: IpfsClient,
|
|
3085
|
+
options: { host, path },
|
|
3086
|
+
});
|
|
3087
|
+
|
|
3088
|
+
if (item.cid && item.cid !== resolved.cid) {
|
|
3089
|
+
logger.warn(
|
|
3090
|
+
`Item "${item.itemId}": provided cid "${item.cid}" differs from canonical "${resolved.cid}" (${resolved.source}). Using canonical.`,
|
|
3091
|
+
);
|
|
3092
|
+
}
|
|
3093
|
+
|
|
3094
|
+
item.cid = resolved.cid;
|
|
3095
|
+
logger.info(` "${item.itemId}" canonical CID (${resolved.source}): ${resolved.cid}`);
|
|
3096
|
+
} catch (resolveErr) {
|
|
3097
|
+
logger.error(`Failed to resolve canonical CID for "${item.itemId}": ${resolveErr.message}`);
|
|
3098
|
+
process.exit(1);
|
|
3099
|
+
}
|
|
3100
|
+
}
|
|
3101
|
+
|
|
3102
|
+
try {
|
|
3103
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
3104
|
+
} catch (_) {
|
|
3105
|
+
/* ignore close errors */
|
|
3106
|
+
}
|
|
3107
|
+
}
|
|
3108
|
+
|
|
3109
|
+
const itemIds = items.map((i) => i.itemId);
|
|
3110
|
+
const cids = items.map((i) => i.cid || '');
|
|
3111
|
+
const supplies = items.map((i) => i.supply || 1);
|
|
3112
|
+
|
|
3113
|
+
logger.info(`Batch-registering ${items.length} items on contract ${contractAddress}`);
|
|
3114
|
+
for (const item of items) {
|
|
3115
|
+
logger.info(` - ${item.itemId} (supply: ${item.supply || 1}, cid: ${item.cid || '(none)'})`);
|
|
3116
|
+
}
|
|
3117
|
+
|
|
3118
|
+
const batchScript = `
|
|
3119
|
+
import hre from 'hardhat';
|
|
3120
|
+
const { ethers } = await hre.network.connect();
|
|
3121
|
+
async function main() {
|
|
3122
|
+
const [deployer] = await ethers.getSigners();
|
|
3123
|
+
const token = await ethers.getContractAt('ObjectLayerToken', '${contractAddress}');
|
|
3124
|
+
const itemIds = ${JSON.stringify(itemIds)};
|
|
3125
|
+
const cids = ${JSON.stringify(cids)};
|
|
3126
|
+
const supplies = ${JSON.stringify(supplies)};
|
|
3127
|
+
const tx = await token.batchRegisterObjectLayers(
|
|
3128
|
+
deployer.address,
|
|
3129
|
+
itemIds,
|
|
3130
|
+
cids,
|
|
3131
|
+
supplies,
|
|
3132
|
+
'0x'
|
|
3133
|
+
);
|
|
3134
|
+
const receipt = await tx.wait();
|
|
3135
|
+
console.log('Batch register tx hash:', receipt.hash);
|
|
3136
|
+
for (const id of itemIds) {
|
|
3137
|
+
const tokenId = await token.computeTokenId(id);
|
|
3138
|
+
const balance = await token.balanceOf(deployer.address, tokenId);
|
|
3139
|
+
console.log(' ' + id + ' -> tokenId:', tokenId.toString(), ' balance:', balance.toString());
|
|
3140
|
+
}
|
|
3141
|
+
}
|
|
3142
|
+
main().then(() => process.exit(0)).catch(e => { console.error(e); process.exit(1); });
|
|
3143
|
+
`;
|
|
3144
|
+
const tmpScript = './hardhat/scripts/_cli_batch_register_tmp.js';
|
|
3145
|
+
fs.writeFileSync(tmpScript, batchScript, 'utf8');
|
|
3146
|
+
try {
|
|
3147
|
+
shellExec(`cd hardhat && npx hardhat run scripts/_cli_batch_register_tmp.js --network ${options.network}`);
|
|
3148
|
+
} finally {
|
|
3149
|
+
fs.removeSync(tmpScript);
|
|
3150
|
+
}
|
|
3151
|
+
});
|
|
3152
|
+
|
|
3153
|
+
const runner = program.command('run-workflow').description('Run a Cyberia script from the "scripts" directory');
|
|
3154
|
+
|
|
3155
|
+
runner
|
|
3156
|
+
.command('import-default-items')
|
|
3157
|
+
.option('--dev', 'Force development environment (loads .env.development for IPFS localhost, etc.)')
|
|
3158
|
+
.description('Import default Object Layer items, skill config, and dialogues into MongoDB')
|
|
3159
|
+
.action(async (options) => {
|
|
3160
|
+
const devFlag = options.dev ? ' --dev' : '';
|
|
3161
|
+
shellExec(`node bin/cyberia ol ${DefaultCyberiaItems.map((e) => e.item.id)} --import${devFlag}`);
|
|
3162
|
+
shellExec(`node bin/cyberia run-workflow seed-skill-config${devFlag}`);
|
|
3163
|
+
shellExec(`node bin/cyberia run-workflow seed-dialogues${devFlag}`);
|
|
3164
|
+
});
|
|
3165
|
+
|
|
3166
|
+
runner
|
|
3167
|
+
.command('seed-skill-config')
|
|
3168
|
+
.option('--instance-code <code>', 'CyberiaInstance code to update (default: $INSTANCE_CODE env or "default")')
|
|
3169
|
+
.option('--env-path <env-path>', 'Env path e.g. ./engine-private/conf/dd-cyberia/.env.development')
|
|
3170
|
+
.option('--mongo-host <mongo-host>', 'Mongo host override')
|
|
3171
|
+
.option('--dev', 'Force development environment')
|
|
3172
|
+
.description('Upsert default skillConfig entries into a CyberiaInstance document')
|
|
3173
|
+
.action(async (options) => {
|
|
3174
|
+
if (!options.envPath) options.envPath = `./.env`;
|
|
3175
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
3176
|
+
|
|
3177
|
+
if (options.dev && process.env.DEFAULT_DEPLOY_ID) {
|
|
3178
|
+
const devEnvPath = `./engine-private/conf/${process.env.DEFAULT_DEPLOY_ID}/.env.development`;
|
|
3179
|
+
if (fs.existsSync(devEnvPath)) dotenv.config({ path: devEnvPath, override: true });
|
|
3180
|
+
}
|
|
3181
|
+
|
|
3182
|
+
const deployId = process.env.DEFAULT_DEPLOY_ID;
|
|
3183
|
+
const host = process.env.DEFAULT_DEPLOY_HOST;
|
|
3184
|
+
const path = process.env.DEFAULT_DEPLOY_PATH;
|
|
3185
|
+
const instanceCode = options.instanceCode || process.env.INSTANCE_CODE || 'default';
|
|
3186
|
+
|
|
3187
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
3188
|
+
if (!fs.existsSync(confServerPath)) {
|
|
3189
|
+
logger.error(`Server config not found: ${confServerPath}`);
|
|
3190
|
+
process.exit(1);
|
|
3191
|
+
}
|
|
3192
|
+
const confServer = loadConfServerJson(confServerPath, { resolve: true });
|
|
3193
|
+
const { db } = confServer[host][path];
|
|
3194
|
+
|
|
3195
|
+
db.host = options.mongoHost
|
|
3196
|
+
? options.mongoHost
|
|
3197
|
+
: options.dev
|
|
3198
|
+
? db.host
|
|
3199
|
+
: db.host.replace('127.0.0.1', 'mongodb-0.mongodb-service');
|
|
3200
|
+
|
|
3201
|
+
logger.info('seed-skill-config', { instanceCode, deployId, host, path, db });
|
|
3202
|
+
|
|
3203
|
+
await DataBaseProvider.load({ apis: ['cyberia-instance', 'cyberia-instance-conf'], host, path, db });
|
|
3204
|
+
|
|
3205
|
+
const CyberiaInstance = DataBaseProvider.instance[`${host}${path}`].mongoose.models.CyberiaInstance;
|
|
3206
|
+
const CyberiaInstanceConf = DataBaseProvider.instance[`${host}${path}`].mongoose.models.CyberiaInstanceConf;
|
|
3207
|
+
|
|
3208
|
+
const instance = await CyberiaInstance.findOne({ code: instanceCode }).lean();
|
|
3209
|
+
|
|
3210
|
+
if (!instance) {
|
|
3211
|
+
logger.info(
|
|
3212
|
+
`CyberiaInstance "${instanceCode}" not found — seeding skillConfig into conf using fallback defaults. ` +
|
|
3213
|
+
`To link to a live instance, create or import it with: node bin/cyberia instance ${instanceCode} --import`,
|
|
3214
|
+
);
|
|
3215
|
+
}
|
|
3216
|
+
|
|
3217
|
+
// Always upsert the conf with DefaultSkillConfig — idempotent regardless of instance existence.
|
|
3218
|
+
const conf = await CyberiaInstanceConf.findOneAndUpdate(
|
|
3219
|
+
{ instanceCode },
|
|
3220
|
+
{ $set: { skillConfig: DefaultSkillConfig } },
|
|
3221
|
+
{ upsert: true, returnDocument: 'after' },
|
|
3222
|
+
);
|
|
3223
|
+
|
|
3224
|
+
// If a live instance exists, ensure its conf ref is linked.
|
|
3225
|
+
if (instance && (!instance.conf || String(instance.conf) !== String(conf._id))) {
|
|
3226
|
+
await CyberiaInstance.findByIdAndUpdate(instance._id, { conf: conf._id });
|
|
3227
|
+
}
|
|
3228
|
+
|
|
3229
|
+
logger.info(
|
|
3230
|
+
`skillConfig seeded for instance "${instanceCode}" (${DefaultSkillConfig.length} entries)`,
|
|
3231
|
+
DefaultSkillConfig.map((e) => `${e.triggerItemId} → [${e.logicEventIds.join(', ')}]`),
|
|
3232
|
+
);
|
|
3233
|
+
|
|
3234
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
3235
|
+
});
|
|
3236
|
+
|
|
3237
|
+
runner
|
|
3238
|
+
.command('seed-dialogues')
|
|
3239
|
+
.option('--env-path <env-path>', 'Env path e.g. ./engine-private/conf/dd-cyberia/.env.development')
|
|
3240
|
+
.option('--mongo-host <mongo-host>', 'Mongo host override')
|
|
3241
|
+
.option('--dev', 'Force development environment')
|
|
3242
|
+
.description('Upsert DefaultCyberiaDialogues into the cyberia-dialogue collection (idempotent)')
|
|
3243
|
+
.action(async (options) => {
|
|
3244
|
+
if (!options.envPath) options.envPath = `./.env`;
|
|
3245
|
+
if (fs.existsSync(options.envPath)) dotenv.config({ path: options.envPath, override: true });
|
|
3246
|
+
|
|
3247
|
+
if (options.dev && process.env.DEFAULT_DEPLOY_ID) {
|
|
3248
|
+
const devEnvPath = `./engine-private/conf/${process.env.DEFAULT_DEPLOY_ID}/.env.development`;
|
|
3249
|
+
if (fs.existsSync(devEnvPath)) dotenv.config({ path: devEnvPath, override: true });
|
|
3250
|
+
}
|
|
3251
|
+
|
|
3252
|
+
const deployId = process.env.DEFAULT_DEPLOY_ID;
|
|
3253
|
+
const host = process.env.DEFAULT_DEPLOY_HOST;
|
|
3254
|
+
const path = process.env.DEFAULT_DEPLOY_PATH;
|
|
3255
|
+
|
|
3256
|
+
const confServerPath = `./engine-private/conf/${deployId}/conf.server.json`;
|
|
3257
|
+
if (!fs.existsSync(confServerPath)) {
|
|
3258
|
+
logger.error(`Server config not found: ${confServerPath}`);
|
|
3259
|
+
process.exit(1);
|
|
3260
|
+
}
|
|
3261
|
+
const confServer = loadConfServerJson(confServerPath, { resolve: true });
|
|
3262
|
+
const { db } = confServer[host][path];
|
|
3263
|
+
|
|
3264
|
+
db.host = options.mongoHost
|
|
3265
|
+
? options.mongoHost
|
|
3266
|
+
: options.dev
|
|
3267
|
+
? db.host
|
|
3268
|
+
: db.host.replace('127.0.0.1', 'mongodb-0.mongodb-service');
|
|
3269
|
+
|
|
3270
|
+
logger.info('seed-dialogues', { deployId, host, path, db });
|
|
3271
|
+
|
|
3272
|
+
await DataBaseProvider.load({ apis: ['cyberia-dialogue'], host, path, db });
|
|
3273
|
+
|
|
3274
|
+
const CyberiaDialogue = DataBaseProvider.instance[`${host}${path}`].mongoose.models.CyberiaDialogue;
|
|
3275
|
+
|
|
3276
|
+
// Upsert each dialogue record keyed by (itemId, order) — idempotent.
|
|
3277
|
+
let upserted = 0;
|
|
3278
|
+
for (const dlg of DefaultCyberiaDialogues) {
|
|
3279
|
+
await CyberiaDialogue.findOneAndUpdate(
|
|
3280
|
+
{ itemId: dlg.itemId, order: dlg.order },
|
|
3281
|
+
{ $set: { speaker: dlg.speaker, text: dlg.text, mood: dlg.mood } },
|
|
3282
|
+
{ upsert: true },
|
|
3283
|
+
);
|
|
3284
|
+
upserted++;
|
|
3285
|
+
}
|
|
3286
|
+
|
|
3287
|
+
logger.info(`seed-dialogues: ${upserted} dialogue records upserted`);
|
|
3288
|
+
|
|
3289
|
+
await DataBaseProvider.instance[`${host}${path}`].mongoose.close();
|
|
3290
|
+
});
|
|
3291
|
+
|
|
3292
|
+
runner
|
|
3293
|
+
.command('generate-semantic-examples')
|
|
3294
|
+
.option('--seed <seed>', 'Base seed string (each type gets a unique suffix appended)', 'example')
|
|
3295
|
+
.option('--frame-count <frameCount>', 'Number of frames to generate per item (default: 4)', parseInt)
|
|
3296
|
+
.option('--env-path <env-path>', 'Env path e.g. ./engine-private/conf/dd-cyberia/.env.development')
|
|
3297
|
+
.option('--dev', 'Force development environment')
|
|
3298
|
+
.description('Generate one procedural example of every registered semantic prefix')
|
|
3299
|
+
.action(async (options) => {
|
|
3300
|
+
const SEMANTIC_TYPES = [
|
|
3301
|
+
'floor-desert',
|
|
3302
|
+
'floor-grass',
|
|
3303
|
+
// 'floor-water',
|
|
3304
|
+
// 'floor-stone',
|
|
3305
|
+
// 'floor-lava',
|
|
3306
|
+
'skin-random',
|
|
3307
|
+
// 'skin-dark',
|
|
3308
|
+
// 'skin-light',
|
|
3309
|
+
// 'skin-vivid',
|
|
3310
|
+
// 'skin-natural',
|
|
3311
|
+
'skin-shaved',
|
|
3312
|
+
];
|
|
3313
|
+
|
|
3314
|
+
const baseSeed = options.seed || 'example';
|
|
3315
|
+
const frameCount = options.frameCount || 2;
|
|
3316
|
+
const envFlag = options.envPath ? ` --env-path ${options.envPath}` : '';
|
|
3317
|
+
const devFlag = options.dev ? ' --dev' : '';
|
|
3318
|
+
|
|
3319
|
+
logger.info(
|
|
3320
|
+
`Generating ${SEMANTIC_TYPES.length} semantic examples (seed base: "${baseSeed}", frames: ${frameCount})`,
|
|
3321
|
+
);
|
|
3322
|
+
|
|
3323
|
+
for (const prefix of SEMANTIC_TYPES) {
|
|
3324
|
+
const seed = `${baseSeed}-${prefix}`;
|
|
3325
|
+
const cmd = `node bin/cyberia ol ${prefix} --generate --seed ${seed} --frame-count ${frameCount}${envFlag}${devFlag}`;
|
|
3326
|
+
logger.info(` → ${cmd}`);
|
|
3327
|
+
shellExec(cmd);
|
|
3328
|
+
}
|
|
3329
|
+
|
|
3330
|
+
logger.info('All semantic examples generated.');
|
|
3331
|
+
});
|
|
3332
|
+
|
|
3333
|
+
if (underpostProgram.commands.find((c) => c._name == process.argv[2]))
|
|
3334
|
+
throw new Error('Trigger underpost passthrough');
|
|
801
3335
|
|
|
802
3336
|
program.parse();
|
|
803
3337
|
} catch (error) {
|