@geekmidas/cli 0.13.0 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{bundler-DskIqW2t.mjs → bundler-D7cM_FWw.mjs} +34 -10
- package/dist/bundler-D7cM_FWw.mjs.map +1 -0
- package/dist/{bundler-B1qy9b-j.cjs → bundler-Nuew7Xcn.cjs} +33 -9
- package/dist/bundler-Nuew7Xcn.cjs.map +1 -0
- package/dist/config.d.cts +1 -1
- package/dist/config.d.mts +1 -1
- package/dist/dokploy-api-B7KxOQr3.cjs +3 -0
- package/dist/dokploy-api-C7F9VykY.cjs +317 -0
- package/dist/dokploy-api-C7F9VykY.cjs.map +1 -0
- package/dist/dokploy-api-CaETb2L6.mjs +305 -0
- package/dist/dokploy-api-CaETb2L6.mjs.map +1 -0
- package/dist/dokploy-api-DHvfmWbi.mjs +3 -0
- package/dist/{encryption-Dyf_r1h-.cjs → encryption-D7Efcdi9.cjs} +1 -1
- package/dist/{encryption-Dyf_r1h-.cjs.map → encryption-D7Efcdi9.cjs.map} +1 -1
- package/dist/{encryption-C8H-38Yy.mjs → encryption-h4Nb6W-M.mjs} +1 -1
- package/dist/{encryption-C8H-38Yy.mjs.map → encryption-h4Nb6W-M.mjs.map} +1 -1
- package/dist/index.cjs +1508 -1073
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +1508 -1073
- package/dist/index.mjs.map +1 -1
- package/dist/{openapi-Bt_1FDpT.cjs → openapi-C89hhkZC.cjs} +3 -3
- package/dist/{openapi-Bt_1FDpT.cjs.map → openapi-C89hhkZC.cjs.map} +1 -1
- package/dist/{openapi-BfFlOBCG.mjs → openapi-CZVcfxk-.mjs} +3 -3
- package/dist/{openapi-BfFlOBCG.mjs.map → openapi-CZVcfxk-.mjs.map} +1 -1
- package/dist/{openapi-react-query-B6XTeGqS.mjs → openapi-react-query-CM2_qlW9.mjs} +1 -1
- package/dist/{openapi-react-query-B6XTeGqS.mjs.map → openapi-react-query-CM2_qlW9.mjs.map} +1 -1
- package/dist/{openapi-react-query-B-sNWHFU.cjs → openapi-react-query-iKjfLzff.cjs} +1 -1
- package/dist/{openapi-react-query-B-sNWHFU.cjs.map → openapi-react-query-iKjfLzff.cjs.map} +1 -1
- package/dist/openapi-react-query.cjs +1 -1
- package/dist/openapi-react-query.mjs +1 -1
- package/dist/openapi.cjs +1 -1
- package/dist/openapi.d.cts +1 -1
- package/dist/openapi.d.mts +1 -1
- package/dist/openapi.mjs +1 -1
- package/dist/{storage-kSxTjkNb.mjs → storage-BaOP55oq.mjs} +16 -2
- package/dist/storage-BaOP55oq.mjs.map +1 -0
- package/dist/{storage-Bj1E26lU.cjs → storage-Bn3K9Ccu.cjs} +21 -1
- package/dist/storage-Bn3K9Ccu.cjs.map +1 -0
- package/dist/storage-UfyTn7Zm.cjs +7 -0
- package/dist/storage-nkGIjeXt.mjs +3 -0
- package/dist/{types-BhkZc-vm.d.cts → types-BgaMXsUa.d.cts} +3 -1
- package/dist/{types-BR0M2v_c.d.mts.map → types-BgaMXsUa.d.cts.map} +1 -1
- package/dist/{types-BR0M2v_c.d.mts → types-iFk5ms7y.d.mts} +3 -1
- package/dist/{types-BhkZc-vm.d.cts.map → types-iFk5ms7y.d.mts.map} +1 -1
- package/package.json +4 -4
- package/src/auth/__tests__/credentials.spec.ts +127 -0
- package/src/auth/__tests__/index.spec.ts +69 -0
- package/src/auth/credentials.ts +33 -0
- package/src/auth/index.ts +57 -50
- package/src/build/__tests__/bundler.spec.ts +5 -4
- package/src/build/__tests__/endpoint-analyzer.spec.ts +623 -0
- package/src/build/__tests__/handler-templates.spec.ts +272 -0
- package/src/build/bundler.ts +61 -8
- package/src/build/index.ts +21 -0
- package/src/build/types.ts +6 -0
- package/src/deploy/__tests__/docker.spec.ts +44 -6
- package/src/deploy/__tests__/dokploy-api.spec.ts +698 -0
- package/src/deploy/__tests__/dokploy.spec.ts +196 -6
- package/src/deploy/__tests__/index.spec.ts +401 -0
- package/src/deploy/__tests__/init.spec.ts +147 -16
- package/src/deploy/docker.ts +109 -5
- package/src/deploy/dokploy-api.ts +581 -0
- package/src/deploy/dokploy.ts +66 -93
- package/src/deploy/index.ts +630 -32
- package/src/deploy/init.ts +192 -249
- package/src/deploy/types.ts +24 -2
- package/src/dev/__tests__/index.spec.ts +95 -0
- package/src/docker/__tests__/templates.spec.ts +144 -0
- package/src/docker/index.ts +96 -6
- package/src/docker/templates.ts +114 -27
- package/src/generators/EndpointGenerator.ts +2 -2
- package/src/index.ts +34 -13
- package/src/secrets/storage.ts +15 -0
- package/src/types.ts +2 -0
- package/dist/bundler-B1qy9b-j.cjs.map +0 -1
- package/dist/bundler-DskIqW2t.mjs.map +0 -1
- package/dist/storage-BOOpAF8N.cjs +0 -5
- package/dist/storage-Bj1E26lU.cjs.map +0 -1
- package/dist/storage-kSxTjkNb.mjs.map +0 -1
- package/dist/storage-tgZSUnKl.mjs +0 -3
package/dist/index.cjs
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
#!/usr/bin/env -S npx tsx
|
|
2
2
|
const require_chunk = require('./chunk-CUT6urMc.cjs');
|
|
3
3
|
const require_config = require('./config-AmInkU7k.cjs');
|
|
4
|
-
const require_openapi = require('./openapi-
|
|
5
|
-
const
|
|
6
|
-
const
|
|
4
|
+
const require_openapi = require('./openapi-C89hhkZC.cjs');
|
|
5
|
+
const require_dokploy_api = require('./dokploy-api-C7F9VykY.cjs');
|
|
6
|
+
const require_openapi_react_query = require('./openapi-react-query-iKjfLzff.cjs');
|
|
7
|
+
const require_storage = require('./storage-Bn3K9Ccu.cjs');
|
|
7
8
|
const node_fs = require_chunk.__toESM(require("node:fs"));
|
|
8
9
|
const node_path = require_chunk.__toESM(require("node:path"));
|
|
9
10
|
const commander = require_chunk.__toESM(require("commander"));
|
|
@@ -24,7 +25,7 @@ const node_crypto = require_chunk.__toESM(require("node:crypto"));
|
|
|
24
25
|
|
|
25
26
|
//#region package.json
|
|
26
27
|
var name = "@geekmidas/cli";
|
|
27
|
-
var version = "0.
|
|
28
|
+
var version = "0.15.0";
|
|
28
29
|
var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
|
|
29
30
|
var private$1 = false;
|
|
30
31
|
var type = "module";
|
|
@@ -170,7 +171,8 @@ async function getDokployCredentials(options) {
|
|
|
170
171
|
if (!credentials.dokploy) return null;
|
|
171
172
|
return {
|
|
172
173
|
token: credentials.dokploy.token,
|
|
173
|
-
endpoint: credentials.dokploy.endpoint
|
|
174
|
+
endpoint: credentials.dokploy.endpoint,
|
|
175
|
+
registryId: credentials.dokploy.registryId
|
|
174
176
|
};
|
|
175
177
|
}
|
|
176
178
|
/**
|
|
@@ -193,6 +195,22 @@ async function getDokployToken(options) {
|
|
|
193
195
|
if (stored) return stored.token;
|
|
194
196
|
return null;
|
|
195
197
|
}
|
|
198
|
+
/**
|
|
199
|
+
* Store Dokploy registry ID
|
|
200
|
+
*/
|
|
201
|
+
async function storeDokployRegistryId(registryId, options) {
|
|
202
|
+
const credentials = await readCredentials(options);
|
|
203
|
+
if (!credentials.dokploy) throw new Error("Dokploy credentials not found. Run \"gkm login --service dokploy\" first.");
|
|
204
|
+
credentials.dokploy.registryId = registryId;
|
|
205
|
+
await writeCredentials(credentials, options);
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Get Dokploy registry ID from stored credentials
|
|
209
|
+
*/
|
|
210
|
+
async function getDokployRegistryId(options) {
|
|
211
|
+
const stored = await getDokployCredentials(options);
|
|
212
|
+
return stored?.registryId ?? void 0;
|
|
213
|
+
}
|
|
196
214
|
|
|
197
215
|
//#endregion
|
|
198
216
|
//#region src/auth/index.ts
|
|
@@ -201,52 +219,61 @@ const logger$9 = console;
|
|
|
201
219
|
* Validate Dokploy token by making a test API call
|
|
202
220
|
*/
|
|
203
221
|
async function validateDokployToken(endpoint, token) {
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
}
|
|
211
|
-
});
|
|
212
|
-
return response.ok;
|
|
213
|
-
} catch {
|
|
214
|
-
return false;
|
|
215
|
-
}
|
|
222
|
+
const { DokployApi: DokployApi$1 } = await Promise.resolve().then(() => require("./dokploy-api-B7KxOQr3.cjs"));
|
|
223
|
+
const api = new DokployApi$1({
|
|
224
|
+
baseUrl: endpoint,
|
|
225
|
+
token
|
|
226
|
+
});
|
|
227
|
+
return api.validateToken();
|
|
216
228
|
}
|
|
217
229
|
/**
|
|
218
230
|
* Prompt for input (handles both TTY and non-TTY)
|
|
219
231
|
*/
|
|
220
|
-
async function prompt(message, hidden = false) {
|
|
232
|
+
async function prompt$1(message, hidden = false) {
|
|
221
233
|
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please provide --token option.");
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
234
|
+
if (hidden) {
|
|
235
|
+
process.stdout.write(message);
|
|
236
|
+
return new Promise((resolve$1, reject) => {
|
|
237
|
+
let value = "";
|
|
238
|
+
const cleanup = () => {
|
|
239
|
+
process.stdin.setRawMode(false);
|
|
240
|
+
process.stdin.pause();
|
|
241
|
+
process.stdin.removeListener("data", onData);
|
|
242
|
+
process.stdin.removeListener("error", onError);
|
|
243
|
+
};
|
|
244
|
+
const onError = (err) => {
|
|
245
|
+
cleanup();
|
|
246
|
+
reject(err);
|
|
247
|
+
};
|
|
248
|
+
const onData = (char) => {
|
|
249
|
+
const c = char.toString();
|
|
250
|
+
if (c === "\n" || c === "\r") {
|
|
251
|
+
cleanup();
|
|
252
|
+
process.stdout.write("\n");
|
|
253
|
+
resolve$1(value);
|
|
254
|
+
} else if (c === "") {
|
|
255
|
+
cleanup();
|
|
256
|
+
process.stdout.write("\n");
|
|
257
|
+
process.exit(1);
|
|
258
|
+
} else if (c === "" || c === "\b") {
|
|
259
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
260
|
+
} else value += c;
|
|
261
|
+
};
|
|
262
|
+
process.stdin.setRawMode(true);
|
|
263
|
+
process.stdin.resume();
|
|
264
|
+
process.stdin.on("data", onData);
|
|
265
|
+
process.stdin.on("error", onError);
|
|
266
|
+
});
|
|
267
|
+
} else {
|
|
268
|
+
const rl = node_readline_promises.createInterface({
|
|
269
|
+
input: node_process.stdin,
|
|
270
|
+
output: node_process.stdout
|
|
271
|
+
});
|
|
272
|
+
try {
|
|
273
|
+
return await rl.question(message);
|
|
274
|
+
} finally {
|
|
275
|
+
rl.close();
|
|
276
|
+
}
|
|
250
277
|
}
|
|
251
278
|
}
|
|
252
279
|
/**
|
|
@@ -257,7 +284,7 @@ async function loginCommand(options) {
|
|
|
257
284
|
if (service === "dokploy") {
|
|
258
285
|
logger$9.log("\n🔐 Logging in to Dokploy...\n");
|
|
259
286
|
let endpoint = providedEndpoint;
|
|
260
|
-
if (!endpoint) endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
287
|
+
if (!endpoint) endpoint = await prompt$1("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
261
288
|
endpoint = endpoint.replace(/\/$/, "");
|
|
262
289
|
try {
|
|
263
290
|
new URL(endpoint);
|
|
@@ -268,7 +295,7 @@ async function loginCommand(options) {
|
|
|
268
295
|
let token = providedToken;
|
|
269
296
|
if (!token) {
|
|
270
297
|
logger$9.log(`\nGenerate a token at: ${endpoint}/settings/profile\n`);
|
|
271
|
-
token = await prompt("API Token: ", true);
|
|
298
|
+
token = await prompt$1("API Token: ", true);
|
|
272
299
|
}
|
|
273
300
|
if (!token) {
|
|
274
301
|
logger$9.error("Token is required");
|
|
@@ -1057,9 +1084,9 @@ var DevServer = class {
|
|
|
1057
1084
|
}
|
|
1058
1085
|
async createServerEntry() {
|
|
1059
1086
|
const { writeFile: writeFile$8 } = await import("node:fs/promises");
|
|
1060
|
-
const { relative: relative$
|
|
1087
|
+
const { relative: relative$6, dirname: dirname$6 } = await import("node:path");
|
|
1061
1088
|
const serverPath = (0, node_path.join)(process.cwd(), ".gkm", this.provider, "server.ts");
|
|
1062
|
-
const relativeAppPath = relative$
|
|
1089
|
+
const relativeAppPath = relative$6(dirname$6(serverPath), (0, node_path.join)(dirname$6(serverPath), "app.js"));
|
|
1063
1090
|
const serveCode = this.runtime === "bun" ? `Bun.serve({
|
|
1064
1091
|
port,
|
|
1065
1092
|
fetch: app.fetch,
|
|
@@ -1189,6 +1216,16 @@ async function buildCommand(options) {
|
|
|
1189
1216
|
if (studio) logger$6.log(`🗄️ Studio enabled at ${studio.path}`);
|
|
1190
1217
|
const hooks = normalizeHooksConfig(config.hooks);
|
|
1191
1218
|
if (hooks) logger$6.log(`🪝 Server hooks enabled`);
|
|
1219
|
+
const services = config.docker?.compose?.services;
|
|
1220
|
+
const dockerServices = services ? Array.isArray(services) ? {
|
|
1221
|
+
postgres: services.includes("postgres"),
|
|
1222
|
+
redis: services.includes("redis"),
|
|
1223
|
+
rabbitmq: services.includes("rabbitmq")
|
|
1224
|
+
} : {
|
|
1225
|
+
postgres: Boolean(services.postgres),
|
|
1226
|
+
redis: Boolean(services.redis),
|
|
1227
|
+
rabbitmq: Boolean(services.rabbitmq)
|
|
1228
|
+
} : void 0;
|
|
1192
1229
|
const buildContext = {
|
|
1193
1230
|
envParserPath,
|
|
1194
1231
|
envParserImportPattern,
|
|
@@ -1197,7 +1234,8 @@ async function buildCommand(options) {
|
|
|
1197
1234
|
telescope,
|
|
1198
1235
|
studio,
|
|
1199
1236
|
hooks,
|
|
1200
|
-
production
|
|
1237
|
+
production,
|
|
1238
|
+
dockerServices
|
|
1201
1239
|
};
|
|
1202
1240
|
const endpointGenerator = new require_openapi.EndpointGenerator();
|
|
1203
1241
|
const functionGenerator = new FunctionGenerator();
|
|
@@ -1255,13 +1293,14 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1255
1293
|
let masterKey;
|
|
1256
1294
|
if (context.production?.bundle && !skipBundle) {
|
|
1257
1295
|
logger$6.log(`\n📦 Bundling production server...`);
|
|
1258
|
-
const { bundleServer } = await Promise.resolve().then(() => require("./bundler-
|
|
1296
|
+
const { bundleServer } = await Promise.resolve().then(() => require("./bundler-Nuew7Xcn.cjs"));
|
|
1259
1297
|
const allConstructs = [
|
|
1260
1298
|
...endpoints.map((e) => e.construct),
|
|
1261
1299
|
...functions.map((f) => f.construct),
|
|
1262
1300
|
...crons.map((c) => c.construct),
|
|
1263
1301
|
...subscribers.map((s) => s.construct)
|
|
1264
1302
|
];
|
|
1303
|
+
const dockerServices = context.dockerServices;
|
|
1265
1304
|
const bundleResult = await bundleServer({
|
|
1266
1305
|
entryPoint: (0, node_path.join)(outputDir, "server.ts"),
|
|
1267
1306
|
outputDir: (0, node_path.join)(outputDir, "dist"),
|
|
@@ -1269,7 +1308,8 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1269
1308
|
sourcemap: false,
|
|
1270
1309
|
external: context.production.external,
|
|
1271
1310
|
stage,
|
|
1272
|
-
constructs: allConstructs
|
|
1311
|
+
constructs: allConstructs,
|
|
1312
|
+
dockerServices
|
|
1273
1313
|
});
|
|
1274
1314
|
masterKey = bundleResult.masterKey;
|
|
1275
1315
|
logger$6.log(`✅ Bundle complete: .gkm/server/dist/server.mjs`);
|
|
@@ -1284,366 +1324,1008 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1284
1324
|
}
|
|
1285
1325
|
|
|
1286
1326
|
//#endregion
|
|
1287
|
-
//#region src/
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1327
|
+
//#region src/docker/compose.ts
|
|
1328
|
+
/** Default Docker images for services */
|
|
1329
|
+
const DEFAULT_SERVICE_IMAGES = {
|
|
1330
|
+
postgres: "postgres",
|
|
1331
|
+
redis: "redis",
|
|
1332
|
+
rabbitmq: "rabbitmq"
|
|
1333
|
+
};
|
|
1334
|
+
/** Default Docker image versions for services */
|
|
1335
|
+
const DEFAULT_SERVICE_VERSIONS = {
|
|
1336
|
+
postgres: "16-alpine",
|
|
1337
|
+
redis: "7-alpine",
|
|
1338
|
+
rabbitmq: "3-management-alpine"
|
|
1339
|
+
};
|
|
1340
|
+
/** Get the default full image reference for a service */
|
|
1341
|
+
function getDefaultImage(serviceName) {
|
|
1342
|
+
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
1295
1343
|
}
|
|
1296
|
-
/**
|
|
1297
|
-
|
|
1298
|
-
*/
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1344
|
+
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1345
|
+
function normalizeServices(services) {
|
|
1346
|
+
const result = /* @__PURE__ */ new Map();
|
|
1347
|
+
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1348
|
+
else for (const [name$1, config] of Object.entries(services)) {
|
|
1349
|
+
const serviceName = name$1;
|
|
1350
|
+
if (config === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1351
|
+
else if (config && typeof config === "object") {
|
|
1352
|
+
const serviceConfig = config;
|
|
1353
|
+
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1354
|
+
else {
|
|
1355
|
+
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1356
|
+
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
1308
1357
|
}
|
|
1309
|
-
}
|
|
1310
|
-
logger$5.log(`✅ Image built: ${imageRef}`);
|
|
1311
|
-
} catch (error) {
|
|
1312
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1358
|
+
}
|
|
1313
1359
|
}
|
|
1360
|
+
return result;
|
|
1314
1361
|
}
|
|
1315
1362
|
/**
|
|
1316
|
-
*
|
|
1363
|
+
* Generate docker-compose.yml for production deployment
|
|
1317
1364
|
*/
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1365
|
+
function generateDockerCompose(options) {
|
|
1366
|
+
const { imageName, registry, port, healthCheckPath, services } = options;
|
|
1367
|
+
const serviceMap = normalizeServices(services);
|
|
1368
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1369
|
+
let yaml = `version: '3.8'
|
|
1370
|
+
|
|
1371
|
+
services:
|
|
1372
|
+
api:
|
|
1373
|
+
build:
|
|
1374
|
+
context: ../..
|
|
1375
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1376
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1377
|
+
container_name: ${imageName}
|
|
1378
|
+
restart: unless-stopped
|
|
1379
|
+
ports:
|
|
1380
|
+
- "\${PORT:-${port}}:${port}"
|
|
1381
|
+
environment:
|
|
1382
|
+
- NODE_ENV=production
|
|
1383
|
+
`;
|
|
1384
|
+
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1385
|
+
`;
|
|
1386
|
+
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1387
|
+
`;
|
|
1388
|
+
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1389
|
+
`;
|
|
1390
|
+
yaml += ` healthcheck:
|
|
1391
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1392
|
+
interval: 30s
|
|
1393
|
+
timeout: 3s
|
|
1394
|
+
retries: 3
|
|
1395
|
+
`;
|
|
1396
|
+
if (serviceMap.size > 0) {
|
|
1397
|
+
yaml += ` depends_on:
|
|
1398
|
+
`;
|
|
1399
|
+
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1400
|
+
condition: service_healthy
|
|
1401
|
+
`;
|
|
1328
1402
|
}
|
|
1403
|
+
yaml += ` networks:
|
|
1404
|
+
- app-network
|
|
1405
|
+
`;
|
|
1406
|
+
const postgresImage = serviceMap.get("postgres");
|
|
1407
|
+
if (postgresImage) yaml += `
|
|
1408
|
+
postgres:
|
|
1409
|
+
image: ${postgresImage}
|
|
1410
|
+
container_name: postgres
|
|
1411
|
+
restart: unless-stopped
|
|
1412
|
+
environment:
|
|
1413
|
+
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
1414
|
+
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1415
|
+
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1416
|
+
volumes:
|
|
1417
|
+
- postgres_data:/var/lib/postgresql/data
|
|
1418
|
+
healthcheck:
|
|
1419
|
+
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1420
|
+
interval: 5s
|
|
1421
|
+
timeout: 5s
|
|
1422
|
+
retries: 5
|
|
1423
|
+
networks:
|
|
1424
|
+
- app-network
|
|
1425
|
+
`;
|
|
1426
|
+
const redisImage = serviceMap.get("redis");
|
|
1427
|
+
if (redisImage) yaml += `
|
|
1428
|
+
redis:
|
|
1429
|
+
image: ${redisImage}
|
|
1430
|
+
container_name: redis
|
|
1431
|
+
restart: unless-stopped
|
|
1432
|
+
volumes:
|
|
1433
|
+
- redis_data:/data
|
|
1434
|
+
healthcheck:
|
|
1435
|
+
test: ["CMD", "redis-cli", "ping"]
|
|
1436
|
+
interval: 5s
|
|
1437
|
+
timeout: 5s
|
|
1438
|
+
retries: 5
|
|
1439
|
+
networks:
|
|
1440
|
+
- app-network
|
|
1441
|
+
`;
|
|
1442
|
+
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1443
|
+
if (rabbitmqImage) yaml += `
|
|
1444
|
+
rabbitmq:
|
|
1445
|
+
image: ${rabbitmqImage}
|
|
1446
|
+
container_name: rabbitmq
|
|
1447
|
+
restart: unless-stopped
|
|
1448
|
+
environment:
|
|
1449
|
+
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1450
|
+
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1451
|
+
ports:
|
|
1452
|
+
- "15672:15672" # Management UI
|
|
1453
|
+
volumes:
|
|
1454
|
+
- rabbitmq_data:/var/lib/rabbitmq
|
|
1455
|
+
healthcheck:
|
|
1456
|
+
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1457
|
+
interval: 10s
|
|
1458
|
+
timeout: 5s
|
|
1459
|
+
retries: 5
|
|
1460
|
+
networks:
|
|
1461
|
+
- app-network
|
|
1462
|
+
`;
|
|
1463
|
+
yaml += `
|
|
1464
|
+
volumes:
|
|
1465
|
+
`;
|
|
1466
|
+
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1467
|
+
`;
|
|
1468
|
+
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1469
|
+
`;
|
|
1470
|
+
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1471
|
+
`;
|
|
1472
|
+
yaml += `
|
|
1473
|
+
networks:
|
|
1474
|
+
app-network:
|
|
1475
|
+
driver: bridge
|
|
1476
|
+
`;
|
|
1477
|
+
return yaml;
|
|
1329
1478
|
}
|
|
1330
1479
|
/**
|
|
1331
|
-
*
|
|
1480
|
+
* Generate a minimal docker-compose.yml for API only
|
|
1332
1481
|
*/
|
|
1333
|
-
|
|
1334
|
-
const {
|
|
1335
|
-
const
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1482
|
+
function generateMinimalDockerCompose(options) {
|
|
1483
|
+
const { imageName, registry, port, healthCheckPath } = options;
|
|
1484
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1485
|
+
return `version: '3.8'
|
|
1486
|
+
|
|
1487
|
+
services:
|
|
1488
|
+
api:
|
|
1489
|
+
build:
|
|
1490
|
+
context: ../..
|
|
1491
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1492
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1493
|
+
container_name: ${imageName}
|
|
1494
|
+
restart: unless-stopped
|
|
1495
|
+
ports:
|
|
1496
|
+
- "\${PORT:-${port}}:${port}"
|
|
1497
|
+
environment:
|
|
1498
|
+
- NODE_ENV=production
|
|
1499
|
+
healthcheck:
|
|
1500
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1501
|
+
interval: 30s
|
|
1502
|
+
timeout: 3s
|
|
1503
|
+
retries: 3
|
|
1504
|
+
networks:
|
|
1505
|
+
- app-network
|
|
1506
|
+
|
|
1507
|
+
networks:
|
|
1508
|
+
app-network:
|
|
1509
|
+
driver: bridge
|
|
1510
|
+
`;
|
|
1354
1511
|
}
|
|
1512
|
+
|
|
1513
|
+
//#endregion
|
|
1514
|
+
//#region src/docker/templates.ts
|
|
1515
|
+
const LOCKFILES = [
|
|
1516
|
+
["pnpm-lock.yaml", "pnpm"],
|
|
1517
|
+
["bun.lockb", "bun"],
|
|
1518
|
+
["yarn.lock", "yarn"],
|
|
1519
|
+
["package-lock.json", "npm"]
|
|
1520
|
+
];
|
|
1355
1521
|
/**
|
|
1356
|
-
*
|
|
1522
|
+
* Detect package manager from lockfiles
|
|
1523
|
+
* Walks up the directory tree to find lockfile (for monorepos)
|
|
1357
1524
|
*/
|
|
1358
|
-
function
|
|
1359
|
-
|
|
1360
|
-
|
|
1361
|
-
|
|
1362
|
-
|
|
1525
|
+
function detectPackageManager$1(cwd = process.cwd()) {
|
|
1526
|
+
let dir = cwd;
|
|
1527
|
+
const root = (0, node_path.parse)(dir).root;
|
|
1528
|
+
while (dir !== root) {
|
|
1529
|
+
for (const [lockfile, pm] of LOCKFILES) if ((0, node_fs.existsSync)((0, node_path.join)(dir, lockfile))) return pm;
|
|
1530
|
+
dir = (0, node_path.dirname)(dir);
|
|
1531
|
+
}
|
|
1532
|
+
for (const [lockfile, pm] of LOCKFILES) if ((0, node_fs.existsSync)((0, node_path.join)(root, lockfile))) return pm;
|
|
1533
|
+
return "pnpm";
|
|
1363
1534
|
}
|
|
1364
|
-
|
|
1365
|
-
//#endregion
|
|
1366
|
-
//#region src/deploy/dokploy.ts
|
|
1367
|
-
const logger$4 = console;
|
|
1368
1535
|
/**
|
|
1369
|
-
*
|
|
1536
|
+
* Find the lockfile path by walking up the directory tree
|
|
1537
|
+
* Returns the full path to the lockfile, or null if not found
|
|
1370
1538
|
*/
|
|
1371
|
-
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
|
|
1539
|
+
function findLockfilePath(cwd = process.cwd()) {
|
|
1540
|
+
let dir = cwd;
|
|
1541
|
+
const root = (0, node_path.parse)(dir).root;
|
|
1542
|
+
while (dir !== root) {
|
|
1543
|
+
for (const [lockfile] of LOCKFILES) {
|
|
1544
|
+
const lockfilePath = (0, node_path.join)(dir, lockfile);
|
|
1545
|
+
if ((0, node_fs.existsSync)(lockfilePath)) return lockfilePath;
|
|
1546
|
+
}
|
|
1547
|
+
dir = (0, node_path.dirname)(dir);
|
|
1548
|
+
}
|
|
1549
|
+
for (const [lockfile] of LOCKFILES) {
|
|
1550
|
+
const lockfilePath = (0, node_path.join)(root, lockfile);
|
|
1551
|
+
if ((0, node_fs.existsSync)(lockfilePath)) return lockfilePath;
|
|
1552
|
+
}
|
|
1553
|
+
return null;
|
|
1375
1554
|
}
|
|
1376
1555
|
/**
|
|
1377
|
-
*
|
|
1556
|
+
* Check if we're in a monorepo (lockfile is in a parent directory)
|
|
1378
1557
|
*/
|
|
1379
|
-
|
|
1380
|
-
const
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
"Content-Type": "application/json",
|
|
1385
|
-
Authorization: `Bearer ${token}`
|
|
1386
|
-
},
|
|
1387
|
-
body: JSON.stringify(body)
|
|
1388
|
-
});
|
|
1389
|
-
if (!response.ok) {
|
|
1390
|
-
let errorMessage = `Dokploy API error: ${response.status} ${response.statusText}`;
|
|
1391
|
-
try {
|
|
1392
|
-
const errorBody = await response.json();
|
|
1393
|
-
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1394
|
-
if (errorBody.issues?.length) errorMessage += `\n Issues: ${errorBody.issues.map((i) => i.message).join(", ")}`;
|
|
1395
|
-
} catch {}
|
|
1396
|
-
throw new Error(errorMessage);
|
|
1397
|
-
}
|
|
1398
|
-
return response.json();
|
|
1558
|
+
function isMonorepo(cwd = process.cwd()) {
|
|
1559
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
1560
|
+
if (!lockfilePath) return false;
|
|
1561
|
+
const lockfileDir = (0, node_path.dirname)(lockfilePath);
|
|
1562
|
+
return lockfileDir !== cwd;
|
|
1399
1563
|
}
|
|
1400
1564
|
/**
|
|
1401
|
-
*
|
|
1565
|
+
* Check if turbo.json exists (walks up directory tree)
|
|
1402
1566
|
*/
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
const
|
|
1406
|
-
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
}
|
|
1410
|
-
|
|
1567
|
+
function hasTurboConfig(cwd = process.cwd()) {
|
|
1568
|
+
let dir = cwd;
|
|
1569
|
+
const root = (0, node_path.parse)(dir).root;
|
|
1570
|
+
while (dir !== root) {
|
|
1571
|
+
if ((0, node_fs.existsSync)((0, node_path.join)(dir, "turbo.json"))) return true;
|
|
1572
|
+
dir = (0, node_path.dirname)(dir);
|
|
1573
|
+
}
|
|
1574
|
+
return (0, node_fs.existsSync)((0, node_path.join)(root, "turbo.json"));
|
|
1411
1575
|
}
|
|
1412
1576
|
/**
|
|
1413
|
-
*
|
|
1577
|
+
* Get install command for turbo builds (without frozen lockfile)
|
|
1578
|
+
* Turbo prune creates a subset that may not perfectly match the lockfile
|
|
1414
1579
|
*/
|
|
1415
|
-
|
|
1416
|
-
|
|
1417
|
-
|
|
1418
|
-
|
|
1580
|
+
function getTurboInstallCmd(pm) {
|
|
1581
|
+
const commands = {
|
|
1582
|
+
pnpm: "pnpm install",
|
|
1583
|
+
npm: "npm install",
|
|
1584
|
+
yarn: "yarn install",
|
|
1585
|
+
bun: "bun install"
|
|
1586
|
+
};
|
|
1587
|
+
return commands[pm];
|
|
1419
1588
|
}
|
|
1420
1589
|
/**
|
|
1421
|
-
*
|
|
1590
|
+
* Get package manager specific commands and paths
|
|
1422
1591
|
*/
|
|
1423
|
-
|
|
1424
|
-
const
|
|
1425
|
-
|
|
1426
|
-
|
|
1427
|
-
|
|
1428
|
-
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1592
|
+
function getPmConfig(pm) {
|
|
1593
|
+
const configs = {
|
|
1594
|
+
pnpm: {
|
|
1595
|
+
install: "corepack enable && corepack prepare pnpm@latest --activate",
|
|
1596
|
+
lockfile: "pnpm-lock.yaml",
|
|
1597
|
+
fetch: "pnpm fetch",
|
|
1598
|
+
installCmd: "pnpm install --frozen-lockfile --offline",
|
|
1599
|
+
cacheTarget: "/root/.local/share/pnpm/store",
|
|
1600
|
+
cacheId: "pnpm",
|
|
1601
|
+
run: "pnpm",
|
|
1602
|
+
dlx: "pnpm dlx",
|
|
1603
|
+
addGlobal: "pnpm add -g"
|
|
1604
|
+
},
|
|
1605
|
+
npm: {
|
|
1606
|
+
install: "",
|
|
1607
|
+
lockfile: "package-lock.json",
|
|
1608
|
+
fetch: "",
|
|
1609
|
+
installCmd: "npm ci",
|
|
1610
|
+
cacheTarget: "/root/.npm",
|
|
1611
|
+
cacheId: "npm",
|
|
1612
|
+
run: "npm run",
|
|
1613
|
+
dlx: "npx",
|
|
1614
|
+
addGlobal: "npm install -g"
|
|
1615
|
+
},
|
|
1616
|
+
yarn: {
|
|
1617
|
+
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
1618
|
+
lockfile: "yarn.lock",
|
|
1619
|
+
fetch: "",
|
|
1620
|
+
installCmd: "yarn install --frozen-lockfile",
|
|
1621
|
+
cacheTarget: "/root/.yarn/cache",
|
|
1622
|
+
cacheId: "yarn",
|
|
1623
|
+
run: "yarn",
|
|
1624
|
+
dlx: "yarn dlx",
|
|
1625
|
+
addGlobal: "yarn global add"
|
|
1626
|
+
},
|
|
1627
|
+
bun: {
|
|
1628
|
+
install: "npm install -g bun",
|
|
1629
|
+
lockfile: "bun.lockb",
|
|
1630
|
+
fetch: "",
|
|
1631
|
+
installCmd: "bun install --frozen-lockfile",
|
|
1632
|
+
cacheTarget: "/root/.bun/install/cache",
|
|
1633
|
+
cacheId: "bun",
|
|
1634
|
+
run: "bun run",
|
|
1635
|
+
dlx: "bunx",
|
|
1636
|
+
addGlobal: "bun add -g"
|
|
1637
|
+
}
|
|
1445
1638
|
};
|
|
1639
|
+
return configs[pm];
|
|
1446
1640
|
}
|
|
1447
1641
|
/**
|
|
1448
|
-
*
|
|
1642
|
+
* Generate a multi-stage Dockerfile for building from source
|
|
1643
|
+
* Optimized for build speed with:
|
|
1644
|
+
* - BuildKit cache mounts for package manager store
|
|
1645
|
+
* - pnpm fetch for better layer caching (when using pnpm)
|
|
1646
|
+
* - Optional turbo prune for monorepos
|
|
1449
1647
|
*/
|
|
1450
|
-
function
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
"
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
const
|
|
1458
|
-
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
endpoint: 'https://dokploy.example.com',
|
|
1462
|
-
projectId: 'proj_xxx',
|
|
1463
|
-
applicationId: 'app_xxx',
|
|
1464
|
-
},
|
|
1465
|
-
}`);
|
|
1466
|
-
return true;
|
|
1467
|
-
}
|
|
1648
|
+
function generateMultiStageDockerfile(options) {
|
|
1649
|
+
const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
|
|
1650
|
+
if (turbo) return generateTurboDockerfile({
|
|
1651
|
+
...options,
|
|
1652
|
+
turboPackage: turboPackage ?? "api"
|
|
1653
|
+
});
|
|
1654
|
+
const pm = getPmConfig(packageManager);
|
|
1655
|
+
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
1656
|
+
const hasFetch = packageManager === "pnpm";
|
|
1657
|
+
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
1658
|
+
COPY ${pm.lockfile} ./
|
|
1468
1659
|
|
|
1469
|
-
|
|
1470
|
-
|
|
1471
|
-
|
|
1660
|
+
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
1661
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1662
|
+
${pm.fetch}
|
|
1663
|
+
|
|
1664
|
+
# Copy package.json after fetch
|
|
1665
|
+
COPY package.json ./
|
|
1666
|
+
|
|
1667
|
+
# Install from cache (fast - no network needed)
|
|
1668
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1669
|
+
${pm.installCmd}` : `# Copy package files
|
|
1670
|
+
COPY package.json ${pm.lockfile} ./
|
|
1671
|
+
|
|
1672
|
+
# Install dependencies with cache
|
|
1673
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1674
|
+
${pm.installCmd}`;
|
|
1675
|
+
return `# syntax=docker/dockerfile:1
|
|
1676
|
+
# Stage 1: Dependencies
|
|
1677
|
+
FROM ${baseImage} AS deps
|
|
1678
|
+
|
|
1679
|
+
WORKDIR /app
|
|
1680
|
+
${installPm}
|
|
1681
|
+
${depsStage}
|
|
1682
|
+
|
|
1683
|
+
# Stage 2: Build
|
|
1684
|
+
FROM deps AS builder
|
|
1685
|
+
|
|
1686
|
+
WORKDIR /app
|
|
1687
|
+
|
|
1688
|
+
# Copy source (deps already installed)
|
|
1689
|
+
COPY . .
|
|
1690
|
+
|
|
1691
|
+
# Build production server using CLI from npm
|
|
1692
|
+
RUN ${pm.dlx} @geekmidas/cli build --provider server --production
|
|
1693
|
+
|
|
1694
|
+
# Stage 3: Production
|
|
1695
|
+
FROM ${baseImage} AS runner
|
|
1696
|
+
|
|
1697
|
+
WORKDIR /app
|
|
1698
|
+
|
|
1699
|
+
# Install tini for proper signal handling as PID 1
|
|
1700
|
+
RUN apk add --no-cache tini
|
|
1701
|
+
|
|
1702
|
+
# Create non-root user
|
|
1703
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1704
|
+
adduser --system --uid 1001 hono
|
|
1705
|
+
|
|
1706
|
+
# Copy bundled server
|
|
1707
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
1708
|
+
|
|
1709
|
+
# Environment
|
|
1710
|
+
ENV NODE_ENV=production
|
|
1711
|
+
ENV PORT=${port}
|
|
1712
|
+
|
|
1713
|
+
# Health check
|
|
1714
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1715
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1716
|
+
|
|
1717
|
+
# Switch to non-root user
|
|
1718
|
+
USER hono
|
|
1719
|
+
|
|
1720
|
+
EXPOSE ${port}
|
|
1721
|
+
|
|
1722
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
1723
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1724
|
+
CMD ["node", "server.mjs"]
|
|
1725
|
+
`;
|
|
1726
|
+
}
|
|
1472
1727
|
/**
|
|
1473
|
-
* Generate
|
|
1728
|
+
* Generate a Dockerfile optimized for Turbo monorepos
|
|
1729
|
+
* Uses turbo prune to create minimal Docker context
|
|
1474
1730
|
*/
|
|
1475
|
-
function
|
|
1476
|
-
const
|
|
1477
|
-
|
|
1731
|
+
function generateTurboDockerfile(options) {
|
|
1732
|
+
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
1733
|
+
const pm = getPmConfig(packageManager);
|
|
1734
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
1735
|
+
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
1736
|
+
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
1737
|
+
return `# syntax=docker/dockerfile:1
|
|
1738
|
+
# Stage 1: Prune monorepo
|
|
1739
|
+
FROM ${baseImage} AS pruner
|
|
1740
|
+
|
|
1741
|
+
WORKDIR /app
|
|
1742
|
+
|
|
1743
|
+
${installPm}
|
|
1744
|
+
|
|
1745
|
+
COPY . .
|
|
1746
|
+
|
|
1747
|
+
# Prune to only include necessary packages
|
|
1748
|
+
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
1749
|
+
|
|
1750
|
+
# Stage 2: Install dependencies
|
|
1751
|
+
FROM ${baseImage} AS deps
|
|
1752
|
+
|
|
1753
|
+
WORKDIR /app
|
|
1754
|
+
|
|
1755
|
+
${installPm}
|
|
1756
|
+
|
|
1757
|
+
# Copy pruned lockfile and package.jsons
|
|
1758
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
1759
|
+
COPY --from=pruner /app/out/json/ ./
|
|
1760
|
+
|
|
1761
|
+
# Install dependencies (no frozen-lockfile since turbo prune creates a subset)
|
|
1762
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1763
|
+
${turboInstallCmd}
|
|
1764
|
+
|
|
1765
|
+
# Stage 3: Build
|
|
1766
|
+
FROM deps AS builder
|
|
1767
|
+
|
|
1768
|
+
WORKDIR /app
|
|
1769
|
+
|
|
1770
|
+
# Copy pruned source
|
|
1771
|
+
COPY --from=pruner /app/out/full/ ./
|
|
1772
|
+
|
|
1773
|
+
# Build production server using CLI from npm
|
|
1774
|
+
RUN ${pm.dlx} @geekmidas/cli build --provider server --production
|
|
1775
|
+
|
|
1776
|
+
# Stage 4: Production
|
|
1777
|
+
FROM ${baseImage} AS runner
|
|
1778
|
+
|
|
1779
|
+
WORKDIR /app
|
|
1780
|
+
|
|
1781
|
+
RUN apk add --no-cache tini
|
|
1782
|
+
|
|
1783
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1784
|
+
adduser --system --uid 1001 hono
|
|
1785
|
+
|
|
1786
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
1787
|
+
|
|
1788
|
+
ENV NODE_ENV=production
|
|
1789
|
+
ENV PORT=${port}
|
|
1790
|
+
|
|
1791
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1792
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1793
|
+
|
|
1794
|
+
USER hono
|
|
1795
|
+
|
|
1796
|
+
EXPOSE ${port}
|
|
1797
|
+
|
|
1798
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1799
|
+
CMD ["node", "server.mjs"]
|
|
1800
|
+
`;
|
|
1478
1801
|
}
|
|
1479
1802
|
/**
|
|
1480
|
-
*
|
|
1803
|
+
* Generate a slim Dockerfile for pre-built bundles
|
|
1481
1804
|
*/
|
|
1482
|
-
|
|
1483
|
-
const {
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
const config = await require_config.loadConfig();
|
|
1487
|
-
const imageTag = tag ?? generateTag(stage);
|
|
1488
|
-
logger$3.log(` Tag: ${imageTag}`);
|
|
1489
|
-
let masterKey;
|
|
1490
|
-
if (!skipBuild) {
|
|
1491
|
-
logger$3.log(`\n📦 Building for production...`);
|
|
1492
|
-
const buildResult = await buildCommand({
|
|
1493
|
-
provider: "server",
|
|
1494
|
-
production: true,
|
|
1495
|
-
stage
|
|
1496
|
-
});
|
|
1497
|
-
masterKey = buildResult.masterKey;
|
|
1498
|
-
} else logger$3.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
1499
|
-
const dockerConfig = resolveDockerConfig$1(config);
|
|
1500
|
-
const imageName = dockerConfig.imageName ?? "app";
|
|
1501
|
-
const registry = dockerConfig.registry;
|
|
1502
|
-
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
1503
|
-
let result;
|
|
1504
|
-
switch (provider) {
|
|
1505
|
-
case "docker": {
|
|
1506
|
-
result = await deployDocker({
|
|
1507
|
-
stage,
|
|
1508
|
-
tag: imageTag,
|
|
1509
|
-
skipPush,
|
|
1510
|
-
masterKey,
|
|
1511
|
-
config: dockerConfig
|
|
1512
|
-
});
|
|
1513
|
-
break;
|
|
1514
|
-
}
|
|
1515
|
-
case "dokploy": {
|
|
1516
|
-
const dokployConfigRaw = config.providers?.dokploy;
|
|
1517
|
-
if (typeof dokployConfigRaw === "boolean" || !dokployConfigRaw) throw new Error("Dokploy provider requires configuration.\nConfigure in gkm.config.ts:\n providers: {\n dokploy: {\n endpoint: 'https://dokploy.example.com',\n projectId: 'proj_xxx',\n applicationId: 'app_xxx',\n },\n }");
|
|
1518
|
-
validateDokployConfig(dokployConfigRaw);
|
|
1519
|
-
const dokployConfig = dokployConfigRaw;
|
|
1520
|
-
await deployDocker({
|
|
1521
|
-
stage,
|
|
1522
|
-
tag: imageTag,
|
|
1523
|
-
skipPush: false,
|
|
1524
|
-
masterKey,
|
|
1525
|
-
config: {
|
|
1526
|
-
registry: dokployConfig.registry ?? dockerConfig.registry,
|
|
1527
|
-
imageName: dockerConfig.imageName
|
|
1528
|
-
}
|
|
1529
|
-
});
|
|
1530
|
-
result = await deployDokploy({
|
|
1531
|
-
stage,
|
|
1532
|
-
tag: imageTag,
|
|
1533
|
-
imageRef,
|
|
1534
|
-
masterKey,
|
|
1535
|
-
config: dokployConfig
|
|
1536
|
-
});
|
|
1537
|
-
break;
|
|
1538
|
-
}
|
|
1539
|
-
case "aws-lambda": {
|
|
1540
|
-
logger$3.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
1541
|
-
logger$3.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
1542
|
-
result = {
|
|
1543
|
-
imageRef,
|
|
1544
|
-
masterKey
|
|
1545
|
-
};
|
|
1546
|
-
break;
|
|
1547
|
-
}
|
|
1548
|
-
default: throw new Error(`Unknown deploy provider: ${provider}\nSupported providers: docker, dokploy, aws-lambda`);
|
|
1549
|
-
}
|
|
1550
|
-
logger$3.log("\n✅ Deployment complete!");
|
|
1551
|
-
return result;
|
|
1552
|
-
}
|
|
1805
|
+
function generateSlimDockerfile(options) {
|
|
1806
|
+
const { baseImage, port, healthCheckPath } = options;
|
|
1807
|
+
return `# Slim Dockerfile for pre-built production bundle
|
|
1808
|
+
FROM ${baseImage}
|
|
1553
1809
|
|
|
1554
|
-
|
|
1555
|
-
|
|
1556
|
-
|
|
1810
|
+
WORKDIR /app
|
|
1811
|
+
|
|
1812
|
+
# Install tini for proper signal handling as PID 1
|
|
1813
|
+
# Handles SIGTERM propagation and zombie process reaping
|
|
1814
|
+
RUN apk add --no-cache tini
|
|
1815
|
+
|
|
1816
|
+
# Create non-root user
|
|
1817
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1818
|
+
adduser --system --uid 1001 hono
|
|
1819
|
+
|
|
1820
|
+
# Copy pre-built bundle
|
|
1821
|
+
COPY .gkm/server/dist/server.mjs ./
|
|
1822
|
+
|
|
1823
|
+
# Environment
|
|
1824
|
+
ENV NODE_ENV=production
|
|
1825
|
+
ENV PORT=${port}
|
|
1826
|
+
|
|
1827
|
+
# Health check
|
|
1828
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1829
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1830
|
+
|
|
1831
|
+
# Switch to non-root user
|
|
1832
|
+
USER hono
|
|
1833
|
+
|
|
1834
|
+
EXPOSE ${port}
|
|
1835
|
+
|
|
1836
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
1837
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1838
|
+
CMD ["node", "server.mjs"]
|
|
1839
|
+
`;
|
|
1840
|
+
}
|
|
1557
1841
|
/**
|
|
1558
|
-
*
|
|
1842
|
+
* Generate .dockerignore file
|
|
1559
1843
|
*/
|
|
1560
|
-
|
|
1561
|
-
|
|
1562
|
-
|
|
1563
|
-
|
|
1844
|
+
function generateDockerignore() {
|
|
1845
|
+
return `# Dependencies
|
|
1846
|
+
node_modules
|
|
1847
|
+
.pnpm-store
|
|
1848
|
+
|
|
1849
|
+
# Build output (except what we need)
|
|
1850
|
+
.gkm/aws*
|
|
1851
|
+
.gkm/server/*.ts
|
|
1852
|
+
!.gkm/server/dist
|
|
1853
|
+
|
|
1854
|
+
# IDE and editor
|
|
1855
|
+
.idea
|
|
1856
|
+
.vscode
|
|
1857
|
+
*.swp
|
|
1858
|
+
*.swo
|
|
1859
|
+
|
|
1860
|
+
# Git
|
|
1861
|
+
.git
|
|
1862
|
+
.gitignore
|
|
1863
|
+
|
|
1864
|
+
# Logs
|
|
1865
|
+
*.log
|
|
1866
|
+
npm-debug.log*
|
|
1867
|
+
pnpm-debug.log*
|
|
1868
|
+
|
|
1869
|
+
# Test files
|
|
1870
|
+
**/*.test.ts
|
|
1871
|
+
**/*.spec.ts
|
|
1872
|
+
**/__tests__
|
|
1873
|
+
coverage
|
|
1874
|
+
|
|
1875
|
+
# Documentation
|
|
1876
|
+
docs
|
|
1877
|
+
*.md
|
|
1878
|
+
!README.md
|
|
1879
|
+
|
|
1880
|
+
# Environment files (handle secrets separately)
|
|
1881
|
+
.env
|
|
1882
|
+
.env.*
|
|
1883
|
+
!.env.example
|
|
1884
|
+
|
|
1885
|
+
# Docker files (don't copy recursively)
|
|
1886
|
+
Dockerfile*
|
|
1887
|
+
docker-compose*
|
|
1888
|
+
.dockerignore
|
|
1889
|
+
`;
|
|
1564
1890
|
}
|
|
1565
1891
|
/**
|
|
1566
|
-
*
|
|
1892
|
+
* Generate docker-entrypoint.sh for custom startup logic
|
|
1567
1893
|
*/
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
|
|
1571
|
-
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1583
|
-
} catch {}
|
|
1584
|
-
throw new Error(errorMessage);
|
|
1585
|
-
}
|
|
1586
|
-
const text = await response.text();
|
|
1587
|
-
if (!text) return {};
|
|
1588
|
-
return JSON.parse(text);
|
|
1894
|
+
function generateDockerEntrypoint() {
|
|
1895
|
+
return `#!/bin/sh
|
|
1896
|
+
set -e
|
|
1897
|
+
|
|
1898
|
+
# Run any custom startup scripts here
|
|
1899
|
+
# Example: wait for database
|
|
1900
|
+
# until nc -z $DB_HOST $DB_PORT; do
|
|
1901
|
+
# echo "Waiting for database..."
|
|
1902
|
+
# sleep 1
|
|
1903
|
+
# done
|
|
1904
|
+
|
|
1905
|
+
# Execute the main command
|
|
1906
|
+
exec "$@"
|
|
1907
|
+
`;
|
|
1589
1908
|
}
|
|
1590
1909
|
/**
|
|
1591
|
-
*
|
|
1910
|
+
* Resolve Docker configuration from GkmConfig with defaults
|
|
1592
1911
|
*/
|
|
1593
|
-
|
|
1594
|
-
|
|
1912
|
+
function resolveDockerConfig$1(config) {
|
|
1913
|
+
const docker = config.docker ?? {};
|
|
1914
|
+
let defaultImageName = "api";
|
|
1915
|
+
try {
|
|
1916
|
+
const pkg = require(`${process.cwd()}/package.json`);
|
|
1917
|
+
if (pkg.name) defaultImageName = pkg.name.replace(/^@[^/]+\//, "");
|
|
1918
|
+
} catch {}
|
|
1919
|
+
return {
|
|
1920
|
+
registry: docker.registry ?? "",
|
|
1921
|
+
imageName: docker.imageName ?? defaultImageName,
|
|
1922
|
+
baseImage: docker.baseImage ?? "node:22-alpine",
|
|
1923
|
+
port: docker.port ?? 3e3,
|
|
1924
|
+
compose: docker.compose
|
|
1925
|
+
};
|
|
1595
1926
|
}
|
|
1927
|
+
|
|
1928
|
+
//#endregion
|
|
1929
|
+
//#region src/docker/index.ts
|
|
1930
|
+
const logger$5 = console;
|
|
1596
1931
|
/**
|
|
1597
|
-
*
|
|
1932
|
+
* Docker command implementation
|
|
1933
|
+
* Generates Dockerfile, docker-compose.yml, and related files
|
|
1934
|
+
*
|
|
1935
|
+
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
1936
|
+
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
1598
1937
|
*/
|
|
1599
|
-
async function
|
|
1600
|
-
|
|
1601
|
-
|
|
1602
|
-
|
|
1603
|
-
|
|
1938
|
+
async function dockerCommand(options) {
|
|
1939
|
+
const config = await require_config.loadConfig();
|
|
1940
|
+
const dockerConfig = resolveDockerConfig$1(config);
|
|
1941
|
+
const serverConfig = typeof config.providers?.server === "object" ? config.providers.server : void 0;
|
|
1942
|
+
const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
|
|
1943
|
+
const useSlim = options.slim === true;
|
|
1944
|
+
if (useSlim) {
|
|
1945
|
+
const distDir = (0, node_path.join)(process.cwd(), ".gkm", "server", "dist");
|
|
1946
|
+
const hasBuild = (0, node_fs.existsSync)((0, node_path.join)(distDir, "server.mjs"));
|
|
1947
|
+
if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
|
|
1948
|
+
}
|
|
1949
|
+
const dockerDir = (0, node_path.join)(process.cwd(), ".gkm", "docker");
|
|
1950
|
+
await (0, node_fs_promises.mkdir)(dockerDir, { recursive: true });
|
|
1951
|
+
const packageManager = detectPackageManager$1();
|
|
1952
|
+
const inMonorepo = isMonorepo();
|
|
1953
|
+
const hasTurbo = hasTurboConfig();
|
|
1954
|
+
let useTurbo = options.turbo ?? false;
|
|
1955
|
+
if (inMonorepo && !useSlim) if (hasTurbo) {
|
|
1956
|
+
useTurbo = true;
|
|
1957
|
+
logger$5.log(" Detected monorepo with turbo.json - using turbo prune");
|
|
1958
|
+
} else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
|
|
1959
|
+
let turboPackage = options.turboPackage ?? dockerConfig.imageName;
|
|
1960
|
+
if (useTurbo && !options.turboPackage) try {
|
|
1961
|
+
const pkg = require(`${process.cwd()}/package.json`);
|
|
1962
|
+
if (pkg.name) {
|
|
1963
|
+
turboPackage = pkg.name;
|
|
1964
|
+
logger$5.log(` Turbo package: ${turboPackage}`);
|
|
1965
|
+
}
|
|
1966
|
+
} catch {}
|
|
1967
|
+
const templateOptions = {
|
|
1968
|
+
imageName: dockerConfig.imageName,
|
|
1969
|
+
baseImage: dockerConfig.baseImage,
|
|
1970
|
+
port: dockerConfig.port,
|
|
1971
|
+
healthCheckPath,
|
|
1972
|
+
prebuilt: useSlim,
|
|
1973
|
+
turbo: useTurbo,
|
|
1974
|
+
turboPackage,
|
|
1975
|
+
packageManager
|
|
1976
|
+
};
|
|
1977
|
+
const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
|
|
1978
|
+
const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
|
|
1979
|
+
const dockerfilePath = (0, node_path.join)(dockerDir, "Dockerfile");
|
|
1980
|
+
await (0, node_fs_promises.writeFile)(dockerfilePath, dockerfile);
|
|
1981
|
+
logger$5.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
|
|
1982
|
+
const composeOptions = {
|
|
1983
|
+
imageName: dockerConfig.imageName,
|
|
1984
|
+
registry: options.registry ?? dockerConfig.registry,
|
|
1985
|
+
port: dockerConfig.port,
|
|
1986
|
+
healthCheckPath,
|
|
1987
|
+
services: dockerConfig.compose?.services ?? {}
|
|
1988
|
+
};
|
|
1989
|
+
const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
|
|
1990
|
+
const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
|
|
1991
|
+
const composePath = (0, node_path.join)(dockerDir, "docker-compose.yml");
|
|
1992
|
+
await (0, node_fs_promises.writeFile)(composePath, dockerCompose);
|
|
1993
|
+
logger$5.log("Generated: .gkm/docker/docker-compose.yml");
|
|
1994
|
+
const dockerignore = generateDockerignore();
|
|
1995
|
+
const dockerignorePath = (0, node_path.join)(process.cwd(), ".dockerignore");
|
|
1996
|
+
await (0, node_fs_promises.writeFile)(dockerignorePath, dockerignore);
|
|
1997
|
+
logger$5.log("Generated: .dockerignore (project root)");
|
|
1998
|
+
const entrypoint = generateDockerEntrypoint();
|
|
1999
|
+
const entrypointPath = (0, node_path.join)(dockerDir, "docker-entrypoint.sh");
|
|
2000
|
+
await (0, node_fs_promises.writeFile)(entrypointPath, entrypoint);
|
|
2001
|
+
logger$5.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2002
|
+
const result = {
|
|
2003
|
+
dockerfile: dockerfilePath,
|
|
2004
|
+
dockerCompose: composePath,
|
|
2005
|
+
dockerignore: dockerignorePath,
|
|
2006
|
+
entrypoint: entrypointPath
|
|
2007
|
+
};
|
|
2008
|
+
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2009
|
+
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2010
|
+
return result;
|
|
1604
2011
|
}
|
|
1605
2012
|
/**
|
|
1606
|
-
*
|
|
2013
|
+
* Ensure lockfile exists in the build context
|
|
2014
|
+
* For monorepos, copies from workspace root if needed
|
|
2015
|
+
* Returns cleanup function if file was copied
|
|
1607
2016
|
*/
|
|
1608
|
-
|
|
1609
|
-
|
|
2017
|
+
function ensureLockfile(cwd) {
|
|
2018
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2019
|
+
if (!lockfilePath) {
|
|
2020
|
+
logger$5.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
|
|
2021
|
+
return null;
|
|
2022
|
+
}
|
|
2023
|
+
const lockfileName = (0, node_path.basename)(lockfilePath);
|
|
2024
|
+
const localLockfile = (0, node_path.join)(cwd, lockfileName);
|
|
2025
|
+
if (lockfilePath === localLockfile) return null;
|
|
2026
|
+
logger$5.log(` Copying ${lockfileName} from monorepo root...`);
|
|
2027
|
+
(0, node_fs.copyFileSync)(lockfilePath, localLockfile);
|
|
2028
|
+
return () => {
|
|
2029
|
+
try {
|
|
2030
|
+
(0, node_fs.unlinkSync)(localLockfile);
|
|
2031
|
+
} catch {}
|
|
2032
|
+
};
|
|
1610
2033
|
}
|
|
1611
2034
|
/**
|
|
1612
|
-
*
|
|
2035
|
+
* Build Docker image
|
|
2036
|
+
* Uses BuildKit for cache mount support
|
|
1613
2037
|
*/
|
|
1614
|
-
async function
|
|
1615
|
-
const
|
|
1616
|
-
|
|
1617
|
-
const
|
|
1618
|
-
|
|
1619
|
-
|
|
1620
|
-
|
|
1621
|
-
|
|
1622
|
-
|
|
1623
|
-
|
|
2038
|
+
async function buildDockerImage(imageName, options) {
|
|
2039
|
+
const tag = options.tag ?? "latest";
|
|
2040
|
+
const registry = options.registry;
|
|
2041
|
+
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2042
|
+
logger$5.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2043
|
+
const cwd = process.cwd();
|
|
2044
|
+
const cleanup = ensureLockfile(cwd);
|
|
2045
|
+
try {
|
|
2046
|
+
(0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2047
|
+
cwd,
|
|
2048
|
+
stdio: "inherit",
|
|
2049
|
+
env: {
|
|
2050
|
+
...process.env,
|
|
2051
|
+
DOCKER_BUILDKIT: "1"
|
|
2052
|
+
}
|
|
1624
2053
|
});
|
|
1625
|
-
|
|
2054
|
+
logger$5.log(`✅ Docker image built: ${fullImageName}`);
|
|
2055
|
+
} catch (error) {
|
|
2056
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2057
|
+
} finally {
|
|
2058
|
+
cleanup?.();
|
|
1626
2059
|
}
|
|
1627
|
-
return dokployRequest("POST", "application.create", baseUrl, token, {
|
|
1628
|
-
name: name$1,
|
|
1629
|
-
projectId,
|
|
1630
|
-
environmentId
|
|
1631
|
-
});
|
|
1632
2060
|
}
|
|
1633
2061
|
/**
|
|
1634
|
-
*
|
|
2062
|
+
* Push Docker image to registry
|
|
1635
2063
|
*/
|
|
1636
|
-
async function
|
|
1637
|
-
|
|
1638
|
-
|
|
1639
|
-
|
|
2064
|
+
async function pushDockerImage(imageName, options) {
|
|
2065
|
+
const tag = options.tag ?? "latest";
|
|
2066
|
+
const registry = options.registry;
|
|
2067
|
+
if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
|
|
2068
|
+
const fullImageName = `${registry}/${imageName}:${tag}`;
|
|
2069
|
+
logger$5.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
|
|
2070
|
+
try {
|
|
2071
|
+
(0, node_child_process.execSync)(`docker push ${fullImageName}`, {
|
|
2072
|
+
cwd: process.cwd(),
|
|
2073
|
+
stdio: "inherit"
|
|
2074
|
+
});
|
|
2075
|
+
logger$5.log(`✅ Docker image pushed: ${fullImageName}`);
|
|
2076
|
+
} catch (error) {
|
|
2077
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2078
|
+
}
|
|
2079
|
+
}
|
|
2080
|
+
|
|
2081
|
+
//#endregion
|
|
2082
|
+
//#region src/deploy/docker.ts
|
|
2083
|
+
/**
|
|
2084
|
+
* Get app name from package.json in the current working directory
|
|
2085
|
+
* Used for Dokploy app/project naming
|
|
2086
|
+
*/
|
|
2087
|
+
function getAppNameFromCwd() {
|
|
2088
|
+
const packageJsonPath = (0, node_path.join)(process.cwd(), "package.json");
|
|
2089
|
+
if (!(0, node_fs.existsSync)(packageJsonPath)) return void 0;
|
|
2090
|
+
try {
|
|
2091
|
+
const pkg = JSON.parse((0, node_fs.readFileSync)(packageJsonPath, "utf-8"));
|
|
2092
|
+
if (pkg.name) return pkg.name.replace(/^@[^/]+\//, "");
|
|
2093
|
+
} catch {}
|
|
2094
|
+
return void 0;
|
|
2095
|
+
}
|
|
2096
|
+
/**
|
|
2097
|
+
* Get app name from package.json adjacent to the lockfile (project root)
|
|
2098
|
+
* Used for Docker image naming
|
|
2099
|
+
*/
|
|
2100
|
+
function getAppNameFromPackageJson() {
|
|
2101
|
+
const cwd = process.cwd();
|
|
2102
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2103
|
+
if (!lockfilePath) return void 0;
|
|
2104
|
+
const projectRoot = (0, node_path.dirname)(lockfilePath);
|
|
2105
|
+
const packageJsonPath = (0, node_path.join)(projectRoot, "package.json");
|
|
2106
|
+
if (!(0, node_fs.existsSync)(packageJsonPath)) return void 0;
|
|
2107
|
+
try {
|
|
2108
|
+
const pkg = JSON.parse((0, node_fs.readFileSync)(packageJsonPath, "utf-8"));
|
|
2109
|
+
if (pkg.name) return pkg.name.replace(/^@[^/]+\//, "");
|
|
2110
|
+
} catch {}
|
|
2111
|
+
return void 0;
|
|
2112
|
+
}
|
|
2113
|
+
const logger$4 = console;
|
|
2114
|
+
/**
|
|
2115
|
+
* Get the full image reference
|
|
2116
|
+
*/
|
|
2117
|
+
function getImageRef(registry, imageName, tag) {
|
|
2118
|
+
if (registry) return `${registry}/${imageName}:${tag}`;
|
|
2119
|
+
return `${imageName}:${tag}`;
|
|
2120
|
+
}
|
|
2121
|
+
/**
|
|
2122
|
+
* Build Docker image
|
|
2123
|
+
*/
|
|
2124
|
+
async function buildImage(imageRef) {
|
|
2125
|
+
logger$4.log(`\n🔨 Building Docker image: ${imageRef}`);
|
|
2126
|
+
const cwd = process.cwd();
|
|
2127
|
+
const inMonorepo = isMonorepo(cwd);
|
|
2128
|
+
if (inMonorepo) logger$4.log(" Generating Dockerfile for monorepo (turbo prune)...");
|
|
2129
|
+
else logger$4.log(" Generating Dockerfile...");
|
|
2130
|
+
await dockerCommand({});
|
|
2131
|
+
let buildCwd = cwd;
|
|
2132
|
+
let dockerfilePath = ".gkm/docker/Dockerfile";
|
|
2133
|
+
if (inMonorepo) {
|
|
2134
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2135
|
+
if (lockfilePath) {
|
|
2136
|
+
const monorepoRoot = (0, node_path.dirname)(lockfilePath);
|
|
2137
|
+
const appRelPath = (0, node_path.relative)(monorepoRoot, cwd);
|
|
2138
|
+
dockerfilePath = (0, node_path.join)(appRelPath, ".gkm/docker/Dockerfile");
|
|
2139
|
+
buildCwd = monorepoRoot;
|
|
2140
|
+
logger$4.log(` Building from monorepo root: ${monorepoRoot}`);
|
|
2141
|
+
}
|
|
2142
|
+
}
|
|
2143
|
+
try {
|
|
2144
|
+
(0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build --platform linux/amd64 -f ${dockerfilePath} -t ${imageRef} .`, {
|
|
2145
|
+
cwd: buildCwd,
|
|
2146
|
+
stdio: "inherit",
|
|
2147
|
+
env: {
|
|
2148
|
+
...process.env,
|
|
2149
|
+
DOCKER_BUILDKIT: "1"
|
|
2150
|
+
}
|
|
2151
|
+
});
|
|
2152
|
+
logger$4.log(`✅ Image built: ${imageRef}`);
|
|
2153
|
+
} catch (error) {
|
|
2154
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2155
|
+
}
|
|
2156
|
+
}
|
|
2157
|
+
/**
|
|
2158
|
+
* Push Docker image to registry
|
|
2159
|
+
*/
|
|
2160
|
+
async function pushImage(imageRef) {
|
|
2161
|
+
logger$4.log(`\n☁️ Pushing image: ${imageRef}`);
|
|
2162
|
+
try {
|
|
2163
|
+
(0, node_child_process.execSync)(`docker push ${imageRef}`, {
|
|
2164
|
+
cwd: process.cwd(),
|
|
2165
|
+
stdio: "inherit"
|
|
2166
|
+
});
|
|
2167
|
+
logger$4.log(`✅ Image pushed: ${imageRef}`);
|
|
2168
|
+
} catch (error) {
|
|
2169
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2170
|
+
}
|
|
2171
|
+
}
|
|
2172
|
+
/**
|
|
2173
|
+
* Deploy using Docker (build and optionally push image)
|
|
2174
|
+
*/
|
|
2175
|
+
async function deployDocker(options) {
|
|
2176
|
+
const { stage, tag, skipPush, masterKey, config } = options;
|
|
2177
|
+
const imageName = config.imageName;
|
|
2178
|
+
const imageRef = getImageRef(config.registry, imageName, tag);
|
|
2179
|
+
await buildImage(imageRef);
|
|
2180
|
+
if (!skipPush) if (!config.registry) logger$4.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
|
|
2181
|
+
else await pushImage(imageRef);
|
|
2182
|
+
logger$4.log("\n✅ Docker deployment ready!");
|
|
2183
|
+
logger$4.log(`\n📋 Deployment details:`);
|
|
2184
|
+
logger$4.log(` Image: ${imageRef}`);
|
|
2185
|
+
logger$4.log(` Stage: ${stage}`);
|
|
2186
|
+
if (masterKey) {
|
|
2187
|
+
logger$4.log(`\n🔐 Deploy with this environment variable:`);
|
|
2188
|
+
logger$4.log(` GKM_MASTER_KEY=${masterKey}`);
|
|
2189
|
+
logger$4.log("\n Example docker run:");
|
|
2190
|
+
logger$4.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
|
|
2191
|
+
}
|
|
2192
|
+
return {
|
|
2193
|
+
imageRef,
|
|
2194
|
+
masterKey
|
|
2195
|
+
};
|
|
2196
|
+
}
|
|
2197
|
+
/**
|
|
2198
|
+
* Resolve Docker deploy config from gkm config
|
|
2199
|
+
* - imageName: from config, or cwd package.json, or 'app' (for Docker image)
|
|
2200
|
+
* - projectName: from root package.json, or 'app' (for Dokploy project)
|
|
2201
|
+
* - appName: from cwd package.json, or projectName (for Dokploy app within project)
|
|
2202
|
+
*/
|
|
2203
|
+
function resolveDockerConfig(config) {
|
|
2204
|
+
const projectName = getAppNameFromPackageJson() ?? "app";
|
|
2205
|
+
const appName = getAppNameFromCwd() ?? projectName;
|
|
2206
|
+
const imageName = config.docker?.imageName ?? appName;
|
|
2207
|
+
return {
|
|
2208
|
+
registry: config.docker?.registry,
|
|
2209
|
+
imageName,
|
|
2210
|
+
projectName,
|
|
2211
|
+
appName
|
|
2212
|
+
};
|
|
2213
|
+
}
|
|
2214
|
+
|
|
2215
|
+
//#endregion
|
|
2216
|
+
//#region src/deploy/dokploy.ts
|
|
2217
|
+
const logger$3 = console;
|
|
2218
|
+
/**
|
|
2219
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
2220
|
+
*/
|
|
2221
|
+
async function getApiToken$1() {
|
|
2222
|
+
const token = await getDokployToken();
|
|
2223
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
2224
|
+
return token;
|
|
2225
|
+
}
|
|
2226
|
+
/**
|
|
2227
|
+
* Create a Dokploy API client
|
|
2228
|
+
*/
|
|
2229
|
+
async function createApi$1(endpoint) {
|
|
2230
|
+
const token = await getApiToken$1();
|
|
2231
|
+
return new require_dokploy_api.DokployApi({
|
|
2232
|
+
baseUrl: endpoint,
|
|
2233
|
+
token
|
|
1640
2234
|
});
|
|
1641
2235
|
}
|
|
1642
2236
|
/**
|
|
1643
|
-
*
|
|
2237
|
+
* Deploy to Dokploy
|
|
2238
|
+
*/
|
|
2239
|
+
async function deployDokploy(options) {
|
|
2240
|
+
const { stage, imageRef, masterKey, config } = options;
|
|
2241
|
+
logger$3.log(`\n🎯 Deploying to Dokploy...`);
|
|
2242
|
+
logger$3.log(` Endpoint: ${config.endpoint}`);
|
|
2243
|
+
logger$3.log(` Application: ${config.applicationId}`);
|
|
2244
|
+
const api = await createApi$1(config.endpoint);
|
|
2245
|
+
logger$3.log(` Configuring Docker image: ${imageRef}`);
|
|
2246
|
+
const registryOptions = {};
|
|
2247
|
+
if (config.registryId) {
|
|
2248
|
+
registryOptions.registryId = config.registryId;
|
|
2249
|
+
logger$3.log(` Using Dokploy registry: ${config.registryId}`);
|
|
2250
|
+
} else {
|
|
2251
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
2252
|
+
if (storedRegistryId) {
|
|
2253
|
+
registryOptions.registryId = storedRegistryId;
|
|
2254
|
+
logger$3.log(` Using stored Dokploy registry: ${storedRegistryId}`);
|
|
2255
|
+
} else if (config.registryCredentials) {
|
|
2256
|
+
registryOptions.username = config.registryCredentials.username;
|
|
2257
|
+
registryOptions.password = config.registryCredentials.password;
|
|
2258
|
+
registryOptions.registryUrl = config.registryCredentials.registryUrl;
|
|
2259
|
+
logger$3.log(` Using registry credentials for: ${config.registryCredentials.registryUrl}`);
|
|
2260
|
+
} else {
|
|
2261
|
+
const username = process.env.DOCKER_REGISTRY_USERNAME;
|
|
2262
|
+
const password = process.env.DOCKER_REGISTRY_PASSWORD;
|
|
2263
|
+
const registryUrl = process.env.DOCKER_REGISTRY_URL || config.registry;
|
|
2264
|
+
if (username && password && registryUrl) {
|
|
2265
|
+
registryOptions.username = username;
|
|
2266
|
+
registryOptions.password = password;
|
|
2267
|
+
registryOptions.registryUrl = registryUrl;
|
|
2268
|
+
logger$3.log(` Using registry credentials from environment`);
|
|
2269
|
+
}
|
|
2270
|
+
}
|
|
2271
|
+
}
|
|
2272
|
+
await api.saveDockerProvider(config.applicationId, imageRef, registryOptions);
|
|
2273
|
+
logger$3.log(" ✓ Docker provider configured");
|
|
2274
|
+
const envVars = {};
|
|
2275
|
+
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
2276
|
+
if (Object.keys(envVars).length > 0) {
|
|
2277
|
+
logger$3.log(" Updating environment variables...");
|
|
2278
|
+
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
2279
|
+
await api.saveApplicationEnv(config.applicationId, envString);
|
|
2280
|
+
logger$3.log(" ✓ Environment variables updated");
|
|
2281
|
+
}
|
|
2282
|
+
logger$3.log(" Triggering deployment...");
|
|
2283
|
+
await api.deployApplication(config.applicationId);
|
|
2284
|
+
logger$3.log(" ✓ Deployment triggered");
|
|
2285
|
+
logger$3.log("\n✅ Dokploy deployment initiated!");
|
|
2286
|
+
logger$3.log(`\n📋 Deployment details:`);
|
|
2287
|
+
logger$3.log(` Image: ${imageRef}`);
|
|
2288
|
+
logger$3.log(` Stage: ${stage}`);
|
|
2289
|
+
logger$3.log(` Application ID: ${config.applicationId}`);
|
|
2290
|
+
if (masterKey) logger$3.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
2291
|
+
const deploymentUrl = `${config.endpoint}/project/${config.projectId}`;
|
|
2292
|
+
logger$3.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
2293
|
+
return {
|
|
2294
|
+
imageRef,
|
|
2295
|
+
masterKey,
|
|
2296
|
+
url: deploymentUrl
|
|
2297
|
+
};
|
|
2298
|
+
}
|
|
2299
|
+
|
|
2300
|
+
//#endregion
|
|
2301
|
+
//#region src/deploy/init.ts
|
|
2302
|
+
const logger$2 = console;
|
|
2303
|
+
/**
|
|
2304
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
2305
|
+
*/
|
|
2306
|
+
async function getApiToken() {
|
|
2307
|
+
const token = await getDokployToken();
|
|
2308
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
2309
|
+
return token;
|
|
2310
|
+
}
|
|
2311
|
+
/**
|
|
2312
|
+
* Get Dokploy endpoint from options or stored credentials
|
|
1644
2313
|
*/
|
|
1645
|
-
async function
|
|
1646
|
-
|
|
2314
|
+
async function getEndpoint(providedEndpoint) {
|
|
2315
|
+
if (providedEndpoint) return providedEndpoint;
|
|
2316
|
+
const stored = await getDokployCredentials();
|
|
2317
|
+
if (stored) return stored.endpoint;
|
|
2318
|
+
throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
2319
|
+
}
|
|
2320
|
+
/**
|
|
2321
|
+
* Create a Dokploy API client
|
|
2322
|
+
*/
|
|
2323
|
+
async function createApi(endpoint) {
|
|
2324
|
+
const token = await getApiToken();
|
|
2325
|
+
return new require_dokploy_api.DokployApi({
|
|
2326
|
+
baseUrl: endpoint,
|
|
2327
|
+
token
|
|
2328
|
+
});
|
|
1647
2329
|
}
|
|
1648
2330
|
/**
|
|
1649
2331
|
* Update gkm.config.ts with Dokploy configuration
|
|
@@ -1666,25 +2348,18 @@ async function updateConfig(config, cwd = process.cwd()) {
|
|
|
1666
2348
|
logger$2.log("\n Dokploy config already exists in gkm.config.ts");
|
|
1667
2349
|
logger$2.log(" Updating with new values...");
|
|
1668
2350
|
}
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
endpoint: '${config.endpoint}',
|
|
1672
|
-
projectId: '${config.projectId}',
|
|
1673
|
-
applicationId: '${config.applicationId}',
|
|
1674
|
-
}`);
|
|
1675
|
-
else newContent = content.replace(/providers:\s*\{/, `providers: {
|
|
1676
|
-
dokploy: {
|
|
2351
|
+
const registryLine = config.registryId ? `\n\t\t\tregistryId: '${config.registryId}',` : "";
|
|
2352
|
+
const dokployConfigStr = `dokploy: {
|
|
1677
2353
|
endpoint: '${config.endpoint}',
|
|
1678
2354
|
projectId: '${config.projectId}',
|
|
1679
|
-
applicationId: '${config.applicationId}'
|
|
1680
|
-
}
|
|
2355
|
+
applicationId: '${config.applicationId}',${registryLine}
|
|
2356
|
+
}`;
|
|
2357
|
+
let newContent;
|
|
2358
|
+
if (content.includes("providers:")) if (content.includes("dokploy:")) newContent = content.replace(/dokploy:\s*\{[^}]*\}/s, dokployConfigStr);
|
|
2359
|
+
else newContent = content.replace(/providers:\s*\{/, `providers: {\n\t\t${dokployConfigStr},`);
|
|
1681
2360
|
else newContent = content.replace(/}\s*\)\s*;?\s*$/, `
|
|
1682
2361
|
providers: {
|
|
1683
|
-
|
|
1684
|
-
endpoint: '${config.endpoint}',
|
|
1685
|
-
projectId: '${config.projectId}',
|
|
1686
|
-
applicationId: '${config.applicationId}',
|
|
1687
|
-
},
|
|
2362
|
+
${dokployConfigStr},
|
|
1688
2363
|
},
|
|
1689
2364
|
});`);
|
|
1690
2365
|
await (0, node_fs_promises.writeFile)(configPath, newContent);
|
|
@@ -1695,42 +2370,46 @@ async function updateConfig(config, cwd = process.cwd()) {
|
|
|
1695
2370
|
*/
|
|
1696
2371
|
async function deployInitCommand(options) {
|
|
1697
2372
|
const { projectName, appName, projectId: existingProjectId, registryId } = options;
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
const stored = await getDokployCredentials();
|
|
1701
|
-
if (stored) endpoint = stored.endpoint;
|
|
1702
|
-
else throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
1703
|
-
}
|
|
2373
|
+
const endpoint = await getEndpoint(options.endpoint);
|
|
2374
|
+
const api = await createApi(endpoint);
|
|
1704
2375
|
logger$2.log(`\n🚀 Initializing Dokploy deployment...`);
|
|
1705
2376
|
logger$2.log(` Endpoint: ${endpoint}`);
|
|
1706
|
-
const token = await getApiToken();
|
|
1707
2377
|
let projectId;
|
|
1708
2378
|
if (existingProjectId) {
|
|
1709
2379
|
projectId = existingProjectId;
|
|
1710
2380
|
logger$2.log(`\n📁 Using existing project: ${projectId}`);
|
|
1711
2381
|
} else {
|
|
1712
2382
|
logger$2.log(`\n📁 Looking for project: ${projectName}`);
|
|
1713
|
-
const projects = await
|
|
2383
|
+
const projects = await api.listProjects();
|
|
1714
2384
|
const existingProject = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
1715
2385
|
if (existingProject) {
|
|
1716
2386
|
projectId = existingProject.projectId;
|
|
1717
2387
|
logger$2.log(` Found existing project: ${projectId}`);
|
|
1718
2388
|
} else {
|
|
1719
2389
|
logger$2.log(` Creating new project...`);
|
|
1720
|
-
const
|
|
1721
|
-
projectId = project.projectId;
|
|
2390
|
+
const result = await api.createProject(projectName);
|
|
2391
|
+
projectId = result.project.projectId;
|
|
1722
2392
|
logger$2.log(` ✓ Created project: ${projectId}`);
|
|
1723
2393
|
}
|
|
1724
2394
|
}
|
|
2395
|
+
const project = await api.getProject(projectId);
|
|
2396
|
+
let environmentId;
|
|
2397
|
+
const firstEnv = project.environments?.[0];
|
|
2398
|
+
if (firstEnv) environmentId = firstEnv.environmentId;
|
|
2399
|
+
else {
|
|
2400
|
+
logger$2.log(` Creating production environment...`);
|
|
2401
|
+
const env = await api.createEnvironment(projectId, "production");
|
|
2402
|
+
environmentId = env.environmentId;
|
|
2403
|
+
}
|
|
1725
2404
|
logger$2.log(`\n📦 Creating application: ${appName}`);
|
|
1726
|
-
const application = await createApplication(
|
|
2405
|
+
const application = await api.createApplication(appName, projectId, environmentId);
|
|
1727
2406
|
logger$2.log(` ✓ Created application: ${application.applicationId}`);
|
|
1728
2407
|
if (registryId) {
|
|
1729
2408
|
logger$2.log(`\n🔧 Configuring registry: ${registryId}`);
|
|
1730
|
-
await
|
|
2409
|
+
await api.updateApplication(application.applicationId, { registryId });
|
|
1731
2410
|
logger$2.log(` ✓ Registry configured`);
|
|
1732
2411
|
} else try {
|
|
1733
|
-
const registries = await
|
|
2412
|
+
const registries = await api.listRegistries();
|
|
1734
2413
|
if (registries.length > 0) {
|
|
1735
2414
|
logger$2.log(`\n📋 Available registries:`);
|
|
1736
2415
|
for (const reg of registries) logger$2.log(` - ${reg.registryName}: ${reg.registryUrl} (${reg.registryId})`);
|
|
@@ -1754,705 +2433,448 @@ async function deployInitCommand(options) {
|
|
|
1754
2433
|
return config;
|
|
1755
2434
|
}
|
|
1756
2435
|
/**
|
|
1757
|
-
* List available Dokploy resources
|
|
1758
|
-
*/
|
|
1759
|
-
async function deployListCommand(options) {
|
|
1760
|
-
let endpoint = options.endpoint;
|
|
1761
|
-
if (!endpoint) {
|
|
1762
|
-
const stored = await getDokployCredentials();
|
|
1763
|
-
if (stored) endpoint = stored.endpoint;
|
|
1764
|
-
else throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
1765
|
-
}
|
|
1766
|
-
const { resource } = options;
|
|
1767
|
-
const token = await getApiToken();
|
|
1768
|
-
if (resource === "projects") {
|
|
1769
|
-
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
1770
|
-
const projects = await getProjects(endpoint, token);
|
|
1771
|
-
if (projects.length === 0) {
|
|
1772
|
-
logger$2.log(" No projects found");
|
|
1773
|
-
return;
|
|
1774
|
-
}
|
|
1775
|
-
for (const project of projects) {
|
|
1776
|
-
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
1777
|
-
if (project.description) logger$2.log(` ${project.description}`);
|
|
1778
|
-
}
|
|
1779
|
-
} else if (resource === "registries") {
|
|
1780
|
-
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
1781
|
-
const registries = await getRegistries(endpoint, token);
|
|
1782
|
-
if (registries.length === 0) {
|
|
1783
|
-
logger$2.log(" No registries configured");
|
|
1784
|
-
logger$2.log(" Add a registry in Dokploy: Settings > Docker Registry");
|
|
1785
|
-
return;
|
|
1786
|
-
}
|
|
1787
|
-
for (const registry of registries) {
|
|
1788
|
-
logger$2.log(`\n ${registry.registryName} (${registry.registryId})`);
|
|
1789
|
-
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
1790
|
-
logger$2.log(` Username: ${registry.username}`);
|
|
1791
|
-
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
1792
|
-
}
|
|
1793
|
-
}
|
|
1794
|
-
}
|
|
1795
|
-
|
|
1796
|
-
//#endregion
|
|
1797
|
-
//#region src/docker/compose.ts
|
|
1798
|
-
/** Default Docker images for services */
|
|
1799
|
-
const DEFAULT_SERVICE_IMAGES = {
|
|
1800
|
-
postgres: "postgres",
|
|
1801
|
-
redis: "redis",
|
|
1802
|
-
rabbitmq: "rabbitmq"
|
|
1803
|
-
};
|
|
1804
|
-
/** Default Docker image versions for services */
|
|
1805
|
-
const DEFAULT_SERVICE_VERSIONS = {
|
|
1806
|
-
postgres: "16-alpine",
|
|
1807
|
-
redis: "7-alpine",
|
|
1808
|
-
rabbitmq: "3-management-alpine"
|
|
1809
|
-
};
|
|
1810
|
-
/** Get the default full image reference for a service */
|
|
1811
|
-
function getDefaultImage(serviceName) {
|
|
1812
|
-
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
1813
|
-
}
|
|
1814
|
-
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1815
|
-
function normalizeServices(services) {
|
|
1816
|
-
const result = /* @__PURE__ */ new Map();
|
|
1817
|
-
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1818
|
-
else for (const [name$1, config] of Object.entries(services)) {
|
|
1819
|
-
const serviceName = name$1;
|
|
1820
|
-
if (config === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1821
|
-
else if (config && typeof config === "object") {
|
|
1822
|
-
const serviceConfig = config;
|
|
1823
|
-
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1824
|
-
else {
|
|
1825
|
-
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1826
|
-
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
1827
|
-
}
|
|
1828
|
-
}
|
|
1829
|
-
}
|
|
1830
|
-
return result;
|
|
1831
|
-
}
|
|
1832
|
-
/**
|
|
1833
|
-
* Generate docker-compose.yml for production deployment
|
|
1834
|
-
*/
|
|
1835
|
-
function generateDockerCompose(options) {
|
|
1836
|
-
const { imageName, registry, port, healthCheckPath, services } = options;
|
|
1837
|
-
const serviceMap = normalizeServices(services);
|
|
1838
|
-
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1839
|
-
let yaml = `version: '3.8'
|
|
1840
|
-
|
|
1841
|
-
services:
|
|
1842
|
-
api:
|
|
1843
|
-
build:
|
|
1844
|
-
context: ../..
|
|
1845
|
-
dockerfile: .gkm/docker/Dockerfile
|
|
1846
|
-
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1847
|
-
container_name: ${imageName}
|
|
1848
|
-
restart: unless-stopped
|
|
1849
|
-
ports:
|
|
1850
|
-
- "\${PORT:-${port}}:${port}"
|
|
1851
|
-
environment:
|
|
1852
|
-
- NODE_ENV=production
|
|
1853
|
-
`;
|
|
1854
|
-
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1855
|
-
`;
|
|
1856
|
-
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1857
|
-
`;
|
|
1858
|
-
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1859
|
-
`;
|
|
1860
|
-
yaml += ` healthcheck:
|
|
1861
|
-
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1862
|
-
interval: 30s
|
|
1863
|
-
timeout: 3s
|
|
1864
|
-
retries: 3
|
|
1865
|
-
`;
|
|
1866
|
-
if (serviceMap.size > 0) {
|
|
1867
|
-
yaml += ` depends_on:
|
|
1868
|
-
`;
|
|
1869
|
-
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1870
|
-
condition: service_healthy
|
|
1871
|
-
`;
|
|
1872
|
-
}
|
|
1873
|
-
yaml += ` networks:
|
|
1874
|
-
- app-network
|
|
1875
|
-
`;
|
|
1876
|
-
const postgresImage = serviceMap.get("postgres");
|
|
1877
|
-
if (postgresImage) yaml += `
|
|
1878
|
-
postgres:
|
|
1879
|
-
image: ${postgresImage}
|
|
1880
|
-
container_name: postgres
|
|
1881
|
-
restart: unless-stopped
|
|
1882
|
-
environment:
|
|
1883
|
-
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
1884
|
-
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1885
|
-
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1886
|
-
volumes:
|
|
1887
|
-
- postgres_data:/var/lib/postgresql/data
|
|
1888
|
-
healthcheck:
|
|
1889
|
-
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1890
|
-
interval: 5s
|
|
1891
|
-
timeout: 5s
|
|
1892
|
-
retries: 5
|
|
1893
|
-
networks:
|
|
1894
|
-
- app-network
|
|
1895
|
-
`;
|
|
1896
|
-
const redisImage = serviceMap.get("redis");
|
|
1897
|
-
if (redisImage) yaml += `
|
|
1898
|
-
redis:
|
|
1899
|
-
image: ${redisImage}
|
|
1900
|
-
container_name: redis
|
|
1901
|
-
restart: unless-stopped
|
|
1902
|
-
volumes:
|
|
1903
|
-
- redis_data:/data
|
|
1904
|
-
healthcheck:
|
|
1905
|
-
test: ["CMD", "redis-cli", "ping"]
|
|
1906
|
-
interval: 5s
|
|
1907
|
-
timeout: 5s
|
|
1908
|
-
retries: 5
|
|
1909
|
-
networks:
|
|
1910
|
-
- app-network
|
|
1911
|
-
`;
|
|
1912
|
-
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1913
|
-
if (rabbitmqImage) yaml += `
|
|
1914
|
-
rabbitmq:
|
|
1915
|
-
image: ${rabbitmqImage}
|
|
1916
|
-
container_name: rabbitmq
|
|
1917
|
-
restart: unless-stopped
|
|
1918
|
-
environment:
|
|
1919
|
-
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1920
|
-
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1921
|
-
ports:
|
|
1922
|
-
- "15672:15672" # Management UI
|
|
1923
|
-
volumes:
|
|
1924
|
-
- rabbitmq_data:/var/lib/rabbitmq
|
|
1925
|
-
healthcheck:
|
|
1926
|
-
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1927
|
-
interval: 10s
|
|
1928
|
-
timeout: 5s
|
|
1929
|
-
retries: 5
|
|
1930
|
-
networks:
|
|
1931
|
-
- app-network
|
|
1932
|
-
`;
|
|
1933
|
-
yaml += `
|
|
1934
|
-
volumes:
|
|
1935
|
-
`;
|
|
1936
|
-
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1937
|
-
`;
|
|
1938
|
-
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1939
|
-
`;
|
|
1940
|
-
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1941
|
-
`;
|
|
1942
|
-
yaml += `
|
|
1943
|
-
networks:
|
|
1944
|
-
app-network:
|
|
1945
|
-
driver: bridge
|
|
1946
|
-
`;
|
|
1947
|
-
return yaml;
|
|
1948
|
-
}
|
|
1949
|
-
/**
|
|
1950
|
-
* Generate a minimal docker-compose.yml for API only
|
|
1951
|
-
*/
|
|
1952
|
-
function generateMinimalDockerCompose(options) {
|
|
1953
|
-
const { imageName, registry, port, healthCheckPath } = options;
|
|
1954
|
-
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1955
|
-
return `version: '3.8'
|
|
1956
|
-
|
|
1957
|
-
services:
|
|
1958
|
-
api:
|
|
1959
|
-
build:
|
|
1960
|
-
context: ../..
|
|
1961
|
-
dockerfile: .gkm/docker/Dockerfile
|
|
1962
|
-
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1963
|
-
container_name: ${imageName}
|
|
1964
|
-
restart: unless-stopped
|
|
1965
|
-
ports:
|
|
1966
|
-
- "\${PORT:-${port}}:${port}"
|
|
1967
|
-
environment:
|
|
1968
|
-
- NODE_ENV=production
|
|
1969
|
-
healthcheck:
|
|
1970
|
-
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1971
|
-
interval: 30s
|
|
1972
|
-
timeout: 3s
|
|
1973
|
-
retries: 3
|
|
1974
|
-
networks:
|
|
1975
|
-
- app-network
|
|
1976
|
-
|
|
1977
|
-
networks:
|
|
1978
|
-
app-network:
|
|
1979
|
-
driver: bridge
|
|
1980
|
-
`;
|
|
1981
|
-
}
|
|
1982
|
-
|
|
1983
|
-
//#endregion
|
|
1984
|
-
//#region src/docker/templates.ts
|
|
1985
|
-
/**
|
|
1986
|
-
* Detect package manager from lockfiles
|
|
1987
|
-
* Walks up the directory tree to find lockfile (for monorepos)
|
|
1988
|
-
*/
|
|
1989
|
-
function detectPackageManager$1(cwd = process.cwd()) {
|
|
1990
|
-
const lockfiles = [
|
|
1991
|
-
["pnpm-lock.yaml", "pnpm"],
|
|
1992
|
-
["bun.lockb", "bun"],
|
|
1993
|
-
["yarn.lock", "yarn"],
|
|
1994
|
-
["package-lock.json", "npm"]
|
|
1995
|
-
];
|
|
1996
|
-
let dir = cwd;
|
|
1997
|
-
const root = (0, node_path.parse)(dir).root;
|
|
1998
|
-
while (dir !== root) {
|
|
1999
|
-
for (const [lockfile, pm] of lockfiles) if ((0, node_fs.existsSync)((0, node_path.join)(dir, lockfile))) return pm;
|
|
2000
|
-
dir = (0, node_path.dirname)(dir);
|
|
2001
|
-
}
|
|
2002
|
-
for (const [lockfile, pm] of lockfiles) if ((0, node_fs.existsSync)((0, node_path.join)(root, lockfile))) return pm;
|
|
2003
|
-
return "pnpm";
|
|
2004
|
-
}
|
|
2005
|
-
/**
|
|
2006
|
-
* Get package manager specific commands and paths
|
|
2007
|
-
*/
|
|
2008
|
-
function getPmConfig(pm) {
|
|
2009
|
-
const configs = {
|
|
2010
|
-
pnpm: {
|
|
2011
|
-
install: "corepack enable && corepack prepare pnpm@latest --activate",
|
|
2012
|
-
lockfile: "pnpm-lock.yaml",
|
|
2013
|
-
fetch: "pnpm fetch",
|
|
2014
|
-
installCmd: "pnpm install --frozen-lockfile --offline",
|
|
2015
|
-
cacheTarget: "/root/.local/share/pnpm/store",
|
|
2016
|
-
cacheId: "pnpm",
|
|
2017
|
-
run: "pnpm",
|
|
2018
|
-
addGlobal: "pnpm add -g"
|
|
2019
|
-
},
|
|
2020
|
-
npm: {
|
|
2021
|
-
install: "",
|
|
2022
|
-
lockfile: "package-lock.json",
|
|
2023
|
-
fetch: "",
|
|
2024
|
-
installCmd: "npm ci",
|
|
2025
|
-
cacheTarget: "/root/.npm",
|
|
2026
|
-
cacheId: "npm",
|
|
2027
|
-
run: "npm run",
|
|
2028
|
-
addGlobal: "npm install -g"
|
|
2029
|
-
},
|
|
2030
|
-
yarn: {
|
|
2031
|
-
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
2032
|
-
lockfile: "yarn.lock",
|
|
2033
|
-
fetch: "",
|
|
2034
|
-
installCmd: "yarn install --frozen-lockfile",
|
|
2035
|
-
cacheTarget: "/root/.yarn/cache",
|
|
2036
|
-
cacheId: "yarn",
|
|
2037
|
-
run: "yarn",
|
|
2038
|
-
addGlobal: "yarn global add"
|
|
2039
|
-
},
|
|
2040
|
-
bun: {
|
|
2041
|
-
install: "npm install -g bun",
|
|
2042
|
-
lockfile: "bun.lockb",
|
|
2043
|
-
fetch: "",
|
|
2044
|
-
installCmd: "bun install --frozen-lockfile",
|
|
2045
|
-
cacheTarget: "/root/.bun/install/cache",
|
|
2046
|
-
cacheId: "bun",
|
|
2047
|
-
run: "bun run",
|
|
2048
|
-
addGlobal: "bun add -g"
|
|
2049
|
-
}
|
|
2050
|
-
};
|
|
2051
|
-
return configs[pm];
|
|
2052
|
-
}
|
|
2053
|
-
/**
|
|
2054
|
-
* Generate a multi-stage Dockerfile for building from source
|
|
2055
|
-
* Optimized for build speed with:
|
|
2056
|
-
* - BuildKit cache mounts for package manager store
|
|
2057
|
-
* - pnpm fetch for better layer caching (when using pnpm)
|
|
2058
|
-
* - Optional turbo prune for monorepos
|
|
2059
|
-
*/
|
|
2060
|
-
function generateMultiStageDockerfile(options) {
|
|
2061
|
-
const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
|
|
2062
|
-
if (turbo) return generateTurboDockerfile({
|
|
2063
|
-
...options,
|
|
2064
|
-
turboPackage: turboPackage ?? "api"
|
|
2065
|
-
});
|
|
2066
|
-
const pm = getPmConfig(packageManager);
|
|
2067
|
-
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
2068
|
-
const hasFetch = packageManager === "pnpm";
|
|
2069
|
-
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
2070
|
-
COPY ${pm.lockfile} ./
|
|
2071
|
-
|
|
2072
|
-
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
2073
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2074
|
-
${pm.fetch}
|
|
2075
|
-
|
|
2076
|
-
# Copy package.json after fetch
|
|
2077
|
-
COPY package.json ./
|
|
2078
|
-
|
|
2079
|
-
# Install from cache (fast - no network needed)
|
|
2080
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2081
|
-
${pm.installCmd}` : `# Copy package files
|
|
2082
|
-
COPY package.json ${pm.lockfile} ./
|
|
2083
|
-
|
|
2084
|
-
# Install dependencies with cache
|
|
2085
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2086
|
-
${pm.installCmd}`;
|
|
2087
|
-
return `# syntax=docker/dockerfile:1
|
|
2088
|
-
# Stage 1: Dependencies
|
|
2089
|
-
FROM ${baseImage} AS deps
|
|
2090
|
-
|
|
2091
|
-
WORKDIR /app
|
|
2092
|
-
${installPm}
|
|
2093
|
-
${depsStage}
|
|
2094
|
-
|
|
2095
|
-
# Stage 2: Build
|
|
2096
|
-
FROM deps AS builder
|
|
2097
|
-
|
|
2098
|
-
WORKDIR /app
|
|
2099
|
-
|
|
2100
|
-
# Copy source (deps already installed)
|
|
2101
|
-
COPY . .
|
|
2102
|
-
|
|
2103
|
-
# Build production server
|
|
2104
|
-
RUN ${pm.run} gkm build --provider server --production
|
|
2105
|
-
|
|
2106
|
-
# Stage 3: Production
|
|
2107
|
-
FROM ${baseImage} AS runner
|
|
2108
|
-
|
|
2109
|
-
WORKDIR /app
|
|
2110
|
-
|
|
2111
|
-
# Install tini for proper signal handling as PID 1
|
|
2112
|
-
RUN apk add --no-cache tini
|
|
2113
|
-
|
|
2114
|
-
# Create non-root user
|
|
2115
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2116
|
-
adduser --system --uid 1001 hono
|
|
2117
|
-
|
|
2118
|
-
# Copy bundled server
|
|
2119
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2120
|
-
|
|
2121
|
-
# Environment
|
|
2122
|
-
ENV NODE_ENV=production
|
|
2123
|
-
ENV PORT=${port}
|
|
2124
|
-
|
|
2125
|
-
# Health check
|
|
2126
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2127
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2128
|
-
|
|
2129
|
-
# Switch to non-root user
|
|
2130
|
-
USER hono
|
|
2131
|
-
|
|
2132
|
-
EXPOSE ${port}
|
|
2133
|
-
|
|
2134
|
-
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2135
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2136
|
-
CMD ["node", "server.mjs"]
|
|
2137
|
-
`;
|
|
2138
|
-
}
|
|
2139
|
-
/**
|
|
2140
|
-
* Generate a Dockerfile optimized for Turbo monorepos
|
|
2141
|
-
* Uses turbo prune to create minimal Docker context
|
|
2142
|
-
*/
|
|
2143
|
-
function generateTurboDockerfile(options) {
|
|
2144
|
-
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
2145
|
-
const pm = getPmConfig(packageManager);
|
|
2146
|
-
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
2147
|
-
const hasFetch = packageManager === "pnpm";
|
|
2148
|
-
const depsInstall = hasFetch ? `# Fetch and install from cache
|
|
2149
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2150
|
-
${pm.fetch}
|
|
2151
|
-
|
|
2152
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2153
|
-
${pm.installCmd}` : `# Install dependencies with cache
|
|
2154
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2155
|
-
${pm.installCmd}`;
|
|
2156
|
-
return `# syntax=docker/dockerfile:1
|
|
2157
|
-
# Stage 1: Prune monorepo
|
|
2158
|
-
FROM ${baseImage} AS pruner
|
|
2159
|
-
|
|
2160
|
-
WORKDIR /app
|
|
2161
|
-
|
|
2162
|
-
${installPm}
|
|
2163
|
-
RUN ${pm.addGlobal} turbo
|
|
2164
|
-
|
|
2165
|
-
COPY . .
|
|
2166
|
-
|
|
2167
|
-
# Prune to only include necessary packages
|
|
2168
|
-
RUN turbo prune ${turboPackage} --docker
|
|
2169
|
-
|
|
2170
|
-
# Stage 2: Install dependencies
|
|
2171
|
-
FROM ${baseImage} AS deps
|
|
2172
|
-
|
|
2173
|
-
WORKDIR /app
|
|
2174
|
-
|
|
2175
|
-
${installPm}
|
|
2176
|
-
|
|
2177
|
-
# Copy pruned lockfile and package.jsons
|
|
2178
|
-
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
2179
|
-
COPY --from=pruner /app/out/json/ ./
|
|
2180
|
-
|
|
2181
|
-
${depsInstall}
|
|
2182
|
-
|
|
2183
|
-
# Stage 3: Build
|
|
2184
|
-
FROM deps AS builder
|
|
2185
|
-
|
|
2186
|
-
WORKDIR /app
|
|
2187
|
-
|
|
2188
|
-
# Copy pruned source
|
|
2189
|
-
COPY --from=pruner /app/out/full/ ./
|
|
2190
|
-
|
|
2191
|
-
# Build production server
|
|
2192
|
-
RUN ${pm.run} gkm build --provider server --production
|
|
2193
|
-
|
|
2194
|
-
# Stage 4: Production
|
|
2195
|
-
FROM ${baseImage} AS runner
|
|
2196
|
-
|
|
2197
|
-
WORKDIR /app
|
|
2198
|
-
|
|
2199
|
-
RUN apk add --no-cache tini
|
|
2200
|
-
|
|
2201
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2202
|
-
adduser --system --uid 1001 hono
|
|
2203
|
-
|
|
2204
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2205
|
-
|
|
2206
|
-
ENV NODE_ENV=production
|
|
2207
|
-
ENV PORT=${port}
|
|
2208
|
-
|
|
2209
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2210
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2211
|
-
|
|
2212
|
-
USER hono
|
|
2213
|
-
|
|
2214
|
-
EXPOSE ${port}
|
|
2215
|
-
|
|
2216
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2217
|
-
CMD ["node", "server.mjs"]
|
|
2218
|
-
`;
|
|
2219
|
-
}
|
|
2220
|
-
/**
|
|
2221
|
-
* Generate a slim Dockerfile for pre-built bundles
|
|
2222
|
-
*/
|
|
2223
|
-
function generateSlimDockerfile(options) {
|
|
2224
|
-
const { baseImage, port, healthCheckPath } = options;
|
|
2225
|
-
return `# Slim Dockerfile for pre-built production bundle
|
|
2226
|
-
FROM ${baseImage}
|
|
2227
|
-
|
|
2228
|
-
WORKDIR /app
|
|
2229
|
-
|
|
2230
|
-
# Install tini for proper signal handling as PID 1
|
|
2231
|
-
# Handles SIGTERM propagation and zombie process reaping
|
|
2232
|
-
RUN apk add --no-cache tini
|
|
2233
|
-
|
|
2234
|
-
# Create non-root user
|
|
2235
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2236
|
-
adduser --system --uid 1001 hono
|
|
2237
|
-
|
|
2238
|
-
# Copy pre-built bundle
|
|
2239
|
-
COPY .gkm/server/dist/server.mjs ./
|
|
2240
|
-
|
|
2241
|
-
# Environment
|
|
2242
|
-
ENV NODE_ENV=production
|
|
2243
|
-
ENV PORT=${port}
|
|
2244
|
-
|
|
2245
|
-
# Health check
|
|
2246
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2247
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2248
|
-
|
|
2249
|
-
# Switch to non-root user
|
|
2250
|
-
USER hono
|
|
2251
|
-
|
|
2252
|
-
EXPOSE ${port}
|
|
2253
|
-
|
|
2254
|
-
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2255
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2256
|
-
CMD ["node", "server.mjs"]
|
|
2257
|
-
`;
|
|
2258
|
-
}
|
|
2259
|
-
/**
|
|
2260
|
-
* Generate .dockerignore file
|
|
2261
|
-
*/
|
|
2262
|
-
function generateDockerignore() {
|
|
2263
|
-
return `# Dependencies
|
|
2264
|
-
node_modules
|
|
2265
|
-
.pnpm-store
|
|
2266
|
-
|
|
2267
|
-
# Build output (except what we need)
|
|
2268
|
-
.gkm/aws*
|
|
2269
|
-
.gkm/server/*.ts
|
|
2270
|
-
!.gkm/server/dist
|
|
2271
|
-
|
|
2272
|
-
# IDE and editor
|
|
2273
|
-
.idea
|
|
2274
|
-
.vscode
|
|
2275
|
-
*.swp
|
|
2276
|
-
*.swo
|
|
2277
|
-
|
|
2278
|
-
# Git
|
|
2279
|
-
.git
|
|
2280
|
-
.gitignore
|
|
2281
|
-
|
|
2282
|
-
# Logs
|
|
2283
|
-
*.log
|
|
2284
|
-
npm-debug.log*
|
|
2285
|
-
pnpm-debug.log*
|
|
2286
|
-
|
|
2287
|
-
# Test files
|
|
2288
|
-
**/*.test.ts
|
|
2289
|
-
**/*.spec.ts
|
|
2290
|
-
**/__tests__
|
|
2291
|
-
coverage
|
|
2292
|
-
|
|
2293
|
-
# Documentation
|
|
2294
|
-
docs
|
|
2295
|
-
*.md
|
|
2296
|
-
!README.md
|
|
2297
|
-
|
|
2298
|
-
# Environment files (handle secrets separately)
|
|
2299
|
-
.env
|
|
2300
|
-
.env.*
|
|
2301
|
-
!.env.example
|
|
2302
|
-
|
|
2303
|
-
# Docker files (don't copy recursively)
|
|
2304
|
-
Dockerfile*
|
|
2305
|
-
docker-compose*
|
|
2306
|
-
.dockerignore
|
|
2307
|
-
`;
|
|
2308
|
-
}
|
|
2309
|
-
/**
|
|
2310
|
-
* Generate docker-entrypoint.sh for custom startup logic
|
|
2311
|
-
*/
|
|
2312
|
-
function generateDockerEntrypoint() {
|
|
2313
|
-
return `#!/bin/sh
|
|
2314
|
-
set -e
|
|
2315
|
-
|
|
2316
|
-
# Run any custom startup scripts here
|
|
2317
|
-
# Example: wait for database
|
|
2318
|
-
# until nc -z $DB_HOST $DB_PORT; do
|
|
2319
|
-
# echo "Waiting for database..."
|
|
2320
|
-
# sleep 1
|
|
2321
|
-
# done
|
|
2322
|
-
|
|
2323
|
-
# Execute the main command
|
|
2324
|
-
exec "$@"
|
|
2325
|
-
`;
|
|
2326
|
-
}
|
|
2327
|
-
/**
|
|
2328
|
-
* Resolve Docker configuration from GkmConfig with defaults
|
|
2436
|
+
* List available Dokploy resources
|
|
2329
2437
|
*/
|
|
2330
|
-
function
|
|
2331
|
-
const
|
|
2332
|
-
|
|
2333
|
-
|
|
2334
|
-
|
|
2335
|
-
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
|
|
2339
|
-
|
|
2340
|
-
|
|
2341
|
-
|
|
2342
|
-
|
|
2343
|
-
|
|
2438
|
+
async function deployListCommand(options) {
|
|
2439
|
+
const endpoint = await getEndpoint(options.endpoint);
|
|
2440
|
+
const api = await createApi(endpoint);
|
|
2441
|
+
const { resource } = options;
|
|
2442
|
+
if (resource === "projects") {
|
|
2443
|
+
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
2444
|
+
const projects = await api.listProjects();
|
|
2445
|
+
if (projects.length === 0) {
|
|
2446
|
+
logger$2.log(" No projects found");
|
|
2447
|
+
return;
|
|
2448
|
+
}
|
|
2449
|
+
for (const project of projects) {
|
|
2450
|
+
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
2451
|
+
if (project.description) logger$2.log(` ${project.description}`);
|
|
2452
|
+
}
|
|
2453
|
+
} else if (resource === "registries") {
|
|
2454
|
+
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
2455
|
+
const registries = await api.listRegistries();
|
|
2456
|
+
if (registries.length === 0) {
|
|
2457
|
+
logger$2.log(" No registries configured");
|
|
2458
|
+
logger$2.log(" Run \"gkm registry:setup\" to configure a registry");
|
|
2459
|
+
return;
|
|
2460
|
+
}
|
|
2461
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
2462
|
+
for (const registry of registries) {
|
|
2463
|
+
const isDefault = registry.registryId === storedRegistryId;
|
|
2464
|
+
const marker = isDefault ? " (default)" : "";
|
|
2465
|
+
logger$2.log(`\n ${registry.registryName}${marker} (${registry.registryId})`);
|
|
2466
|
+
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
2467
|
+
logger$2.log(` Username: ${registry.username}`);
|
|
2468
|
+
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
2469
|
+
}
|
|
2470
|
+
}
|
|
2344
2471
|
}
|
|
2345
2472
|
|
|
2346
2473
|
//#endregion
|
|
2347
|
-
//#region src/
|
|
2474
|
+
//#region src/deploy/index.ts
|
|
2348
2475
|
const logger$1 = console;
|
|
2349
2476
|
/**
|
|
2350
|
-
*
|
|
2351
|
-
* Generates Dockerfile, docker-compose.yml, and related files
|
|
2352
|
-
*
|
|
2353
|
-
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
2354
|
-
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
2477
|
+
* Prompt for input
|
|
2355
2478
|
*/
|
|
2356
|
-
async function
|
|
2357
|
-
|
|
2358
|
-
|
|
2359
|
-
|
|
2360
|
-
|
|
2361
|
-
|
|
2362
|
-
|
|
2363
|
-
|
|
2364
|
-
|
|
2365
|
-
|
|
2479
|
+
async function prompt(message, hidden = false) {
|
|
2480
|
+
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please configure manually.");
|
|
2481
|
+
if (hidden) {
|
|
2482
|
+
process.stdout.write(message);
|
|
2483
|
+
return new Promise((resolve$1) => {
|
|
2484
|
+
let value = "";
|
|
2485
|
+
const onData = (char) => {
|
|
2486
|
+
const c = char.toString();
|
|
2487
|
+
if (c === "\n" || c === "\r") {
|
|
2488
|
+
process.stdin.setRawMode(false);
|
|
2489
|
+
process.stdin.pause();
|
|
2490
|
+
process.stdin.removeListener("data", onData);
|
|
2491
|
+
process.stdout.write("\n");
|
|
2492
|
+
resolve$1(value);
|
|
2493
|
+
} else if (c === "") {
|
|
2494
|
+
process.stdin.setRawMode(false);
|
|
2495
|
+
process.stdin.pause();
|
|
2496
|
+
process.stdout.write("\n");
|
|
2497
|
+
process.exit(1);
|
|
2498
|
+
} else if (c === "" || c === "\b") {
|
|
2499
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
2500
|
+
} else value += c;
|
|
2501
|
+
};
|
|
2502
|
+
process.stdin.setRawMode(true);
|
|
2503
|
+
process.stdin.resume();
|
|
2504
|
+
process.stdin.on("data", onData);
|
|
2505
|
+
});
|
|
2366
2506
|
}
|
|
2367
|
-
const
|
|
2368
|
-
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
2507
|
+
const rl = node_readline_promises.createInterface({
|
|
2508
|
+
input: node_process.stdin,
|
|
2509
|
+
output: node_process.stdout
|
|
2510
|
+
});
|
|
2511
|
+
try {
|
|
2512
|
+
return await rl.question(message);
|
|
2513
|
+
} finally {
|
|
2514
|
+
rl.close();
|
|
2515
|
+
}
|
|
2516
|
+
}
|
|
2517
|
+
/**
|
|
2518
|
+
* Provision docker compose services in Dokploy
|
|
2519
|
+
* @internal Exported for testing
|
|
2520
|
+
*/
|
|
2521
|
+
async function provisionServices(api, projectId, environmentId, appName, services, existingUrls) {
|
|
2522
|
+
logger$1.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
|
|
2523
|
+
if (!services || !environmentId) {
|
|
2524
|
+
logger$1.log(" Skipping: no services or no environmentId");
|
|
2525
|
+
return void 0;
|
|
2526
|
+
}
|
|
2527
|
+
const serviceUrls = {};
|
|
2528
|
+
if (services.postgres) if (existingUrls?.DATABASE_URL) logger$1.log("\n🐘 PostgreSQL: Already configured (skipping)");
|
|
2529
|
+
else {
|
|
2530
|
+
logger$1.log("\n🐘 Provisioning PostgreSQL...");
|
|
2531
|
+
const postgresName = `${appName}-db`;
|
|
2532
|
+
try {
|
|
2533
|
+
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
2534
|
+
const databasePassword = randomBytes$1(16).toString("hex");
|
|
2535
|
+
const postgres = await api.createPostgres(postgresName, projectId, environmentId, { databasePassword });
|
|
2536
|
+
logger$1.log(` ✓ Created PostgreSQL: ${postgres.postgresId}`);
|
|
2537
|
+
await api.deployPostgres(postgres.postgresId);
|
|
2538
|
+
logger$1.log(" ✓ PostgreSQL deployed");
|
|
2539
|
+
serviceUrls.DATABASE_HOST = postgres.appName;
|
|
2540
|
+
serviceUrls.DATABASE_PORT = "5432";
|
|
2541
|
+
serviceUrls.DATABASE_NAME = postgres.databaseName;
|
|
2542
|
+
serviceUrls.DATABASE_USER = postgres.databaseUser;
|
|
2543
|
+
serviceUrls.DATABASE_PASSWORD = postgres.databasePassword;
|
|
2544
|
+
serviceUrls.DATABASE_URL = `postgresql://${postgres.databaseUser}:${postgres.databasePassword}@${postgres.appName}:5432/${postgres.databaseName}`;
|
|
2545
|
+
logger$1.log(` ✓ Database credentials configured`);
|
|
2546
|
+
} catch (error) {
|
|
2547
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2548
|
+
if (message.includes("already exists") || message.includes("duplicate")) logger$1.log(` ℹ PostgreSQL already exists`);
|
|
2549
|
+
else logger$1.log(` ⚠ Failed to provision PostgreSQL: ${message}`);
|
|
2550
|
+
}
|
|
2551
|
+
}
|
|
2552
|
+
if (services.redis) if (existingUrls?.REDIS_URL) logger$1.log("\n🔴 Redis: Already configured (skipping)");
|
|
2553
|
+
else {
|
|
2554
|
+
logger$1.log("\n🔴 Provisioning Redis...");
|
|
2555
|
+
const redisName = `${appName}-cache`;
|
|
2556
|
+
try {
|
|
2557
|
+
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
2558
|
+
const databasePassword = randomBytes$1(16).toString("hex");
|
|
2559
|
+
const redis = await api.createRedis(redisName, projectId, environmentId, { databasePassword });
|
|
2560
|
+
logger$1.log(` ✓ Created Redis: ${redis.redisId}`);
|
|
2561
|
+
await api.deployRedis(redis.redisId);
|
|
2562
|
+
logger$1.log(" ✓ Redis deployed");
|
|
2563
|
+
serviceUrls.REDIS_HOST = redis.appName;
|
|
2564
|
+
serviceUrls.REDIS_PORT = "6379";
|
|
2565
|
+
if (redis.databasePassword) serviceUrls.REDIS_PASSWORD = redis.databasePassword;
|
|
2566
|
+
const password = redis.databasePassword ? `:${redis.databasePassword}@` : "";
|
|
2567
|
+
serviceUrls.REDIS_URL = `redis://${password}${redis.appName}:6379`;
|
|
2568
|
+
logger$1.log(` ✓ Redis credentials configured`);
|
|
2569
|
+
} catch (error) {
|
|
2570
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2571
|
+
if (message.includes("already exists") || message.includes("duplicate")) logger$1.log(` ℹ Redis already exists`);
|
|
2572
|
+
else logger$1.log(` ⚠ Failed to provision Redis: ${message}`);
|
|
2573
|
+
}
|
|
2574
|
+
}
|
|
2575
|
+
return Object.keys(serviceUrls).length > 0 ? serviceUrls : void 0;
|
|
2576
|
+
}
|
|
2577
|
+
/**
|
|
2578
|
+
* Ensure Dokploy is fully configured, recovering/creating resources as needed
|
|
2579
|
+
*/
|
|
2580
|
+
async function ensureDokploySetup(config, dockerConfig, stage, services) {
|
|
2581
|
+
logger$1.log("\n🔧 Checking Dokploy setup...");
|
|
2582
|
+
const { readStageSecrets: readStageSecrets$1 } = await Promise.resolve().then(() => require("./storage-UfyTn7Zm.cjs"));
|
|
2583
|
+
const existingSecrets = await readStageSecrets$1(stage);
|
|
2584
|
+
const existingUrls = {
|
|
2585
|
+
DATABASE_URL: existingSecrets?.urls?.DATABASE_URL,
|
|
2586
|
+
REDIS_URL: existingSecrets?.urls?.REDIS_URL
|
|
2379
2587
|
};
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
2390
|
-
|
|
2588
|
+
let creds = await getDokployCredentials();
|
|
2589
|
+
if (!creds) {
|
|
2590
|
+
logger$1.log("\n📋 Dokploy credentials not found. Let's set them up.");
|
|
2591
|
+
const endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
2592
|
+
const normalizedEndpoint = endpoint.replace(/\/$/, "");
|
|
2593
|
+
try {
|
|
2594
|
+
new URL(normalizedEndpoint);
|
|
2595
|
+
} catch {
|
|
2596
|
+
throw new Error("Invalid URL format");
|
|
2597
|
+
}
|
|
2598
|
+
logger$1.log(`\nGenerate a token at: ${normalizedEndpoint}/settings/profile\n`);
|
|
2599
|
+
const token = await prompt("API Token: ", true);
|
|
2600
|
+
logger$1.log("\nValidating credentials...");
|
|
2601
|
+
const isValid = await validateDokployToken(normalizedEndpoint, token);
|
|
2602
|
+
if (!isValid) throw new Error("Invalid credentials. Please check your token.");
|
|
2603
|
+
await storeDokployCredentials(token, normalizedEndpoint);
|
|
2604
|
+
creds = {
|
|
2605
|
+
token,
|
|
2606
|
+
endpoint: normalizedEndpoint
|
|
2607
|
+
};
|
|
2608
|
+
logger$1.log("✓ Credentials saved");
|
|
2609
|
+
}
|
|
2610
|
+
const api = new require_dokploy_api.DokployApi({
|
|
2611
|
+
baseUrl: creds.endpoint,
|
|
2612
|
+
token: creds.token
|
|
2613
|
+
});
|
|
2614
|
+
const existingConfig = config.providers?.dokploy;
|
|
2615
|
+
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId && existingConfig.projectId) {
|
|
2616
|
+
logger$1.log("✓ Dokploy config found in gkm.config.ts");
|
|
2617
|
+
try {
|
|
2618
|
+
const projectDetails = await api.getProject(existingConfig.projectId);
|
|
2619
|
+
logger$1.log("✓ Project verified");
|
|
2620
|
+
const storedRegistryId = existingConfig.registryId ?? await getDokployRegistryId();
|
|
2621
|
+
const environments = projectDetails.environments ?? [];
|
|
2622
|
+
let environment = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
2623
|
+
if (!environment) {
|
|
2624
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2625
|
+
environment = await api.createEnvironment(existingConfig.projectId, stage);
|
|
2626
|
+
logger$1.log(` ✓ Created environment: ${environment.environmentId}`);
|
|
2627
|
+
}
|
|
2628
|
+
const environmentId$1 = environment.environmentId;
|
|
2629
|
+
logger$1.log(` Services config: ${JSON.stringify(services)}, envId: ${environmentId$1}`);
|
|
2630
|
+
const serviceUrls$1 = await provisionServices(api, existingConfig.projectId, environmentId$1, dockerConfig.appName, services, existingUrls);
|
|
2631
|
+
return {
|
|
2632
|
+
config: {
|
|
2633
|
+
endpoint: existingConfig.endpoint,
|
|
2634
|
+
projectId: existingConfig.projectId,
|
|
2635
|
+
applicationId: existingConfig.applicationId,
|
|
2636
|
+
registry: existingConfig.registry,
|
|
2637
|
+
registryId: storedRegistryId ?? void 0
|
|
2638
|
+
},
|
|
2639
|
+
serviceUrls: serviceUrls$1
|
|
2640
|
+
};
|
|
2641
|
+
} catch {
|
|
2642
|
+
logger$1.log("⚠ Project not found, will recover...");
|
|
2643
|
+
}
|
|
2644
|
+
}
|
|
2645
|
+
logger$1.log("\n📁 Looking for project...");
|
|
2646
|
+
const projectName = dockerConfig.projectName;
|
|
2647
|
+
const projects = await api.listProjects();
|
|
2648
|
+
let project = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
2649
|
+
let environmentId;
|
|
2650
|
+
if (project) {
|
|
2651
|
+
logger$1.log(` Found existing project: ${project.name} (${project.projectId})`);
|
|
2652
|
+
const projectDetails = await api.getProject(project.projectId);
|
|
2653
|
+
const environments = projectDetails.environments ?? [];
|
|
2654
|
+
const matchingEnv = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
2655
|
+
if (matchingEnv) {
|
|
2656
|
+
environmentId = matchingEnv.environmentId;
|
|
2657
|
+
logger$1.log(` Using environment: ${matchingEnv.name}`);
|
|
2658
|
+
} else {
|
|
2659
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2660
|
+
const env = await api.createEnvironment(project.projectId, stage);
|
|
2661
|
+
environmentId = env.environmentId;
|
|
2662
|
+
logger$1.log(` ✓ Created environment: ${stage}`);
|
|
2663
|
+
}
|
|
2664
|
+
} else {
|
|
2665
|
+
logger$1.log(` Creating project: ${projectName}`);
|
|
2666
|
+
const result = await api.createProject(projectName);
|
|
2667
|
+
project = result.project;
|
|
2668
|
+
if (result.environment.name.toLowerCase() !== stage.toLowerCase()) {
|
|
2669
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2670
|
+
const env = await api.createEnvironment(project.projectId, stage);
|
|
2671
|
+
environmentId = env.environmentId;
|
|
2672
|
+
} else environmentId = result.environment.environmentId;
|
|
2673
|
+
logger$1.log(` ✓ Created project: ${project.projectId}`);
|
|
2674
|
+
logger$1.log(` ✓ Using environment: ${stage}`);
|
|
2675
|
+
}
|
|
2676
|
+
logger$1.log("\n📦 Looking for application...");
|
|
2677
|
+
const appName = dockerConfig.appName;
|
|
2678
|
+
let applicationId;
|
|
2679
|
+
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId) {
|
|
2680
|
+
applicationId = existingConfig.applicationId;
|
|
2681
|
+
logger$1.log(` Using application from config: ${applicationId}`);
|
|
2682
|
+
} else {
|
|
2683
|
+
logger$1.log(` Creating application: ${appName}`);
|
|
2684
|
+
const app = await api.createApplication(appName, project.projectId, environmentId);
|
|
2685
|
+
applicationId = app.applicationId;
|
|
2686
|
+
logger$1.log(` ✓ Created application: ${applicationId}`);
|
|
2687
|
+
}
|
|
2688
|
+
logger$1.log("\n🐳 Checking registry...");
|
|
2689
|
+
let registryId = await getDokployRegistryId();
|
|
2690
|
+
if (registryId) try {
|
|
2691
|
+
const registry = await api.getRegistry(registryId);
|
|
2692
|
+
logger$1.log(` Using registry: ${registry.registryName}`);
|
|
2693
|
+
} catch {
|
|
2694
|
+
logger$1.log(" ⚠ Stored registry not found, clearing...");
|
|
2695
|
+
registryId = void 0;
|
|
2696
|
+
await storeDokployRegistryId("");
|
|
2697
|
+
}
|
|
2698
|
+
if (!registryId) {
|
|
2699
|
+
const registries = await api.listRegistries();
|
|
2700
|
+
if (registries.length === 0) if (dockerConfig.registry) {
|
|
2701
|
+
logger$1.log(" No registries found in Dokploy. Let's create one.");
|
|
2702
|
+
logger$1.log(` Registry URL: ${dockerConfig.registry}`);
|
|
2703
|
+
const username = await prompt("Registry username: ");
|
|
2704
|
+
const password = await prompt("Registry password/token: ", true);
|
|
2705
|
+
const registry = await api.createRegistry("Default Registry", dockerConfig.registry, username, password);
|
|
2706
|
+
registryId = registry.registryId;
|
|
2707
|
+
await storeDokployRegistryId(registryId);
|
|
2708
|
+
logger$1.log(` ✓ Registry created: ${registryId}`);
|
|
2709
|
+
} else logger$1.log(" ⚠ No registry configured. Set docker.registry in gkm.config.ts");
|
|
2710
|
+
else {
|
|
2711
|
+
logger$1.log(" Available registries:");
|
|
2712
|
+
registries.forEach((reg, i) => {
|
|
2713
|
+
logger$1.log(` ${i + 1}. ${reg.registryName} (${reg.registryUrl})`);
|
|
2714
|
+
});
|
|
2715
|
+
if (dockerConfig.registry) logger$1.log(` ${registries.length + 1}. Create new registry`);
|
|
2716
|
+
const maxOption = dockerConfig.registry ? registries.length + 1 : registries.length;
|
|
2717
|
+
const selection = await prompt(` Select registry (1-${maxOption}): `);
|
|
2718
|
+
const index = parseInt(selection, 10) - 1;
|
|
2719
|
+
if (index >= 0 && index < registries.length) {
|
|
2720
|
+
registryId = registries[index].registryId;
|
|
2721
|
+
await storeDokployRegistryId(registryId);
|
|
2722
|
+
logger$1.log(` ✓ Selected: ${registries[index].registryName}`);
|
|
2723
|
+
} else if (dockerConfig.registry && index === registries.length) {
|
|
2724
|
+
logger$1.log(`\n Creating new registry...`);
|
|
2725
|
+
logger$1.log(` Registry URL: ${dockerConfig.registry}`);
|
|
2726
|
+
const username = await prompt(" Registry username: ");
|
|
2727
|
+
const password = await prompt(" Registry password/token: ", true);
|
|
2728
|
+
const registry = await api.createRegistry(dockerConfig.registry.replace(/^https?:\/\//, ""), dockerConfig.registry, username, password);
|
|
2729
|
+
registryId = registry.registryId;
|
|
2730
|
+
await storeDokployRegistryId(registryId);
|
|
2731
|
+
logger$1.log(` ✓ Registry created: ${registryId}`);
|
|
2732
|
+
} else logger$1.log(" ⚠ Invalid selection, skipping registry setup");
|
|
2733
|
+
}
|
|
2734
|
+
}
|
|
2735
|
+
const dokployConfig = {
|
|
2736
|
+
endpoint: creds.endpoint,
|
|
2737
|
+
projectId: project.projectId,
|
|
2738
|
+
applicationId,
|
|
2739
|
+
registryId: registryId ?? void 0
|
|
2391
2740
|
};
|
|
2392
|
-
|
|
2393
|
-
|
|
2394
|
-
|
|
2395
|
-
|
|
2396
|
-
logger$1.log(
|
|
2397
|
-
const
|
|
2398
|
-
|
|
2399
|
-
|
|
2400
|
-
|
|
2401
|
-
const entrypoint = generateDockerEntrypoint();
|
|
2402
|
-
const entrypointPath = (0, node_path.join)(dockerDir, "docker-entrypoint.sh");
|
|
2403
|
-
await (0, node_fs_promises.writeFile)(entrypointPath, entrypoint);
|
|
2404
|
-
logger$1.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2405
|
-
const result = {
|
|
2406
|
-
dockerfile: dockerfilePath,
|
|
2407
|
-
dockerCompose: composePath,
|
|
2408
|
-
dockerignore: dockerignorePath,
|
|
2409
|
-
entrypoint: entrypointPath
|
|
2741
|
+
await updateConfig(dokployConfig);
|
|
2742
|
+
logger$1.log("\n✅ Dokploy setup complete!");
|
|
2743
|
+
logger$1.log(` Project: ${project.projectId}`);
|
|
2744
|
+
logger$1.log(` Application: ${applicationId}`);
|
|
2745
|
+
if (registryId) logger$1.log(` Registry: ${registryId}`);
|
|
2746
|
+
const serviceUrls = await provisionServices(api, project.projectId, environmentId, dockerConfig.appName, services, existingUrls);
|
|
2747
|
+
return {
|
|
2748
|
+
config: dokployConfig,
|
|
2749
|
+
serviceUrls
|
|
2410
2750
|
};
|
|
2411
|
-
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2412
|
-
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2413
|
-
return result;
|
|
2414
2751
|
}
|
|
2415
2752
|
/**
|
|
2416
|
-
*
|
|
2417
|
-
* Uses BuildKit for cache mount support
|
|
2753
|
+
* Generate image tag from stage and timestamp
|
|
2418
2754
|
*/
|
|
2419
|
-
|
|
2420
|
-
const
|
|
2421
|
-
|
|
2422
|
-
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2423
|
-
logger$1.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2424
|
-
try {
|
|
2425
|
-
(0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2426
|
-
cwd: process.cwd(),
|
|
2427
|
-
stdio: "inherit",
|
|
2428
|
-
env: {
|
|
2429
|
-
...process.env,
|
|
2430
|
-
DOCKER_BUILDKIT: "1"
|
|
2431
|
-
}
|
|
2432
|
-
});
|
|
2433
|
-
logger$1.log(`✅ Docker image built: ${fullImageName}`);
|
|
2434
|
-
} catch (error) {
|
|
2435
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2436
|
-
}
|
|
2755
|
+
function generateTag(stage) {
|
|
2756
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
2757
|
+
return `${stage}-${timestamp}`;
|
|
2437
2758
|
}
|
|
2438
2759
|
/**
|
|
2439
|
-
*
|
|
2760
|
+
* Main deploy command
|
|
2440
2761
|
*/
|
|
2441
|
-
async function
|
|
2442
|
-
const tag = options
|
|
2443
|
-
|
|
2444
|
-
|
|
2445
|
-
const
|
|
2446
|
-
|
|
2447
|
-
|
|
2448
|
-
|
|
2449
|
-
|
|
2450
|
-
|
|
2762
|
+
async function deployCommand(options) {
|
|
2763
|
+
const { provider, stage, tag, skipPush, skipBuild } = options;
|
|
2764
|
+
logger$1.log(`\n🚀 Deploying to ${provider}...`);
|
|
2765
|
+
logger$1.log(` Stage: ${stage}`);
|
|
2766
|
+
const config = await require_config.loadConfig();
|
|
2767
|
+
const imageTag = tag ?? generateTag(stage);
|
|
2768
|
+
logger$1.log(` Tag: ${imageTag}`);
|
|
2769
|
+
const dockerConfig = resolveDockerConfig(config);
|
|
2770
|
+
const imageName = dockerConfig.imageName;
|
|
2771
|
+
const registry = dockerConfig.registry;
|
|
2772
|
+
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
2773
|
+
let dokployConfig;
|
|
2774
|
+
let finalRegistry = registry;
|
|
2775
|
+
if (provider === "dokploy") {
|
|
2776
|
+
const composeServices = config.docker?.compose?.services;
|
|
2777
|
+
logger$1.log(`\n🔍 Docker compose config: ${JSON.stringify(config.docker?.compose)}`);
|
|
2778
|
+
const dockerServices = composeServices ? Array.isArray(composeServices) ? {
|
|
2779
|
+
postgres: composeServices.includes("postgres"),
|
|
2780
|
+
redis: composeServices.includes("redis"),
|
|
2781
|
+
rabbitmq: composeServices.includes("rabbitmq")
|
|
2782
|
+
} : {
|
|
2783
|
+
postgres: Boolean(composeServices.postgres),
|
|
2784
|
+
redis: Boolean(composeServices.redis),
|
|
2785
|
+
rabbitmq: Boolean(composeServices.rabbitmq)
|
|
2786
|
+
} : void 0;
|
|
2787
|
+
const setupResult = await ensureDokploySetup(config, dockerConfig, stage, dockerServices);
|
|
2788
|
+
dokployConfig = setupResult.config;
|
|
2789
|
+
finalRegistry = dokployConfig.registry ?? dockerConfig.registry;
|
|
2790
|
+
if (setupResult.serviceUrls) {
|
|
2791
|
+
const { readStageSecrets: readStageSecrets$1, writeStageSecrets: writeStageSecrets$1, initStageSecrets } = await Promise.resolve().then(() => require("./storage-UfyTn7Zm.cjs"));
|
|
2792
|
+
let secrets = await readStageSecrets$1(stage);
|
|
2793
|
+
if (!secrets) {
|
|
2794
|
+
logger$1.log(` Creating secrets file for stage "${stage}"...`);
|
|
2795
|
+
secrets = initStageSecrets(stage);
|
|
2796
|
+
}
|
|
2797
|
+
let updated = false;
|
|
2798
|
+
const urlFields = [
|
|
2799
|
+
"DATABASE_URL",
|
|
2800
|
+
"REDIS_URL",
|
|
2801
|
+
"RABBITMQ_URL"
|
|
2802
|
+
];
|
|
2803
|
+
for (const [key, value] of Object.entries(setupResult.serviceUrls)) {
|
|
2804
|
+
if (!value) continue;
|
|
2805
|
+
if (urlFields.includes(key)) {
|
|
2806
|
+
const urlKey = key;
|
|
2807
|
+
if (!secrets.urls[urlKey]) {
|
|
2808
|
+
secrets.urls[urlKey] = value;
|
|
2809
|
+
logger$1.log(` Saved ${key} to secrets.urls`);
|
|
2810
|
+
updated = true;
|
|
2811
|
+
}
|
|
2812
|
+
} else if (!secrets.custom[key]) {
|
|
2813
|
+
secrets.custom[key] = value;
|
|
2814
|
+
logger$1.log(` Saved ${key} to secrets.custom`);
|
|
2815
|
+
updated = true;
|
|
2816
|
+
}
|
|
2817
|
+
}
|
|
2818
|
+
if (updated) await writeStageSecrets$1(secrets);
|
|
2819
|
+
}
|
|
2820
|
+
}
|
|
2821
|
+
let masterKey;
|
|
2822
|
+
if (!skipBuild) {
|
|
2823
|
+
logger$1.log(`\n📦 Building for production...`);
|
|
2824
|
+
const buildResult = await buildCommand({
|
|
2825
|
+
provider: "server",
|
|
2826
|
+
production: true,
|
|
2827
|
+
stage
|
|
2451
2828
|
});
|
|
2452
|
-
|
|
2453
|
-
}
|
|
2454
|
-
|
|
2829
|
+
masterKey = buildResult.masterKey;
|
|
2830
|
+
} else logger$1.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
2831
|
+
let result;
|
|
2832
|
+
switch (provider) {
|
|
2833
|
+
case "docker": {
|
|
2834
|
+
result = await deployDocker({
|
|
2835
|
+
stage,
|
|
2836
|
+
tag: imageTag,
|
|
2837
|
+
skipPush,
|
|
2838
|
+
masterKey,
|
|
2839
|
+
config: dockerConfig
|
|
2840
|
+
});
|
|
2841
|
+
break;
|
|
2842
|
+
}
|
|
2843
|
+
case "dokploy": {
|
|
2844
|
+
if (!dokployConfig) throw new Error("Dokploy config not initialized");
|
|
2845
|
+
const finalImageRef = finalRegistry ? `${finalRegistry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
2846
|
+
await deployDocker({
|
|
2847
|
+
stage,
|
|
2848
|
+
tag: imageTag,
|
|
2849
|
+
skipPush: false,
|
|
2850
|
+
masterKey,
|
|
2851
|
+
config: {
|
|
2852
|
+
registry: finalRegistry,
|
|
2853
|
+
imageName: dockerConfig.imageName
|
|
2854
|
+
}
|
|
2855
|
+
});
|
|
2856
|
+
result = await deployDokploy({
|
|
2857
|
+
stage,
|
|
2858
|
+
tag: imageTag,
|
|
2859
|
+
imageRef: finalImageRef,
|
|
2860
|
+
masterKey,
|
|
2861
|
+
config: dokployConfig
|
|
2862
|
+
});
|
|
2863
|
+
break;
|
|
2864
|
+
}
|
|
2865
|
+
case "aws-lambda": {
|
|
2866
|
+
logger$1.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
2867
|
+
logger$1.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
2868
|
+
result = {
|
|
2869
|
+
imageRef,
|
|
2870
|
+
masterKey
|
|
2871
|
+
};
|
|
2872
|
+
break;
|
|
2873
|
+
}
|
|
2874
|
+
default: throw new Error(`Unknown deploy provider: ${provider}\nSupported providers: docker, dokploy, aws-lambda`);
|
|
2455
2875
|
}
|
|
2876
|
+
logger$1.log("\n✅ Deployment complete!");
|
|
2877
|
+
return result;
|
|
2456
2878
|
}
|
|
2457
2879
|
|
|
2458
2880
|
//#endregion
|
|
@@ -4083,11 +4505,11 @@ async function initCommand(projectName, options = {}) {
|
|
|
4083
4505
|
};
|
|
4084
4506
|
const targetDir = (0, node_path.join)(cwd, name$1);
|
|
4085
4507
|
const template = getTemplate(templateOptions.template);
|
|
4086
|
-
const isMonorepo = templateOptions.monorepo;
|
|
4508
|
+
const isMonorepo$1 = templateOptions.monorepo;
|
|
4087
4509
|
const apiPath = templateOptions.apiPath;
|
|
4088
4510
|
await (0, node_fs_promises.mkdir)(targetDir, { recursive: true });
|
|
4089
|
-
const appDir = isMonorepo ? (0, node_path.join)(targetDir, apiPath) : targetDir;
|
|
4090
|
-
if (isMonorepo) await (0, node_fs_promises.mkdir)(appDir, { recursive: true });
|
|
4511
|
+
const appDir = isMonorepo$1 ? (0, node_path.join)(targetDir, apiPath) : targetDir;
|
|
4512
|
+
if (isMonorepo$1) await (0, node_fs_promises.mkdir)(appDir, { recursive: true });
|
|
4091
4513
|
const appFiles = [
|
|
4092
4514
|
...generatePackageJson(templateOptions, template),
|
|
4093
4515
|
...generateConfigFiles(templateOptions, template),
|
|
@@ -4103,7 +4525,7 @@ async function initCommand(projectName, options = {}) {
|
|
|
4103
4525
|
}
|
|
4104
4526
|
for (const { path, content } of appFiles) {
|
|
4105
4527
|
const fullPath = (0, node_path.join)(appDir, path);
|
|
4106
|
-
const _displayPath = isMonorepo ? `${apiPath}/${path}` : path;
|
|
4528
|
+
const _displayPath = isMonorepo$1 ? `${apiPath}/${path}` : path;
|
|
4107
4529
|
await (0, node_fs_promises.mkdir)((0, node_path.dirname)(fullPath), { recursive: true });
|
|
4108
4530
|
await (0, node_fs_promises.writeFile)(fullPath, content);
|
|
4109
4531
|
}
|
|
@@ -4436,7 +4858,8 @@ program.command("init").description("Scaffold a new project").argument("[name]",
|
|
|
4436
4858
|
const globalOptions = program.opts();
|
|
4437
4859
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4438
4860
|
await initCommand(name$1, options);
|
|
4439
|
-
} catch (
|
|
4861
|
+
} catch (error) {
|
|
4862
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4440
4863
|
process.exit(1);
|
|
4441
4864
|
}
|
|
4442
4865
|
});
|
|
@@ -4468,7 +4891,8 @@ program.command("build").description("Build handlers from endpoints, functions,
|
|
|
4468
4891
|
skipBundle: options.skipBundle || false,
|
|
4469
4892
|
stage: options.stage
|
|
4470
4893
|
});
|
|
4471
|
-
} catch (
|
|
4894
|
+
} catch (error) {
|
|
4895
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4472
4896
|
process.exit(1);
|
|
4473
4897
|
}
|
|
4474
4898
|
});
|
|
@@ -4481,7 +4905,8 @@ program.command("dev").description("Start development server with automatic relo
|
|
|
4481
4905
|
portExplicit: !!options.port,
|
|
4482
4906
|
enableOpenApi: options.enableOpenapi ?? true
|
|
4483
4907
|
});
|
|
4484
|
-
} catch (
|
|
4908
|
+
} catch (error) {
|
|
4909
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4485
4910
|
process.exit(1);
|
|
4486
4911
|
}
|
|
4487
4912
|
});
|
|
@@ -4505,7 +4930,8 @@ program.command("openapi").description("Generate OpenAPI specification from endp
|
|
|
4505
4930
|
const globalOptions = program.opts();
|
|
4506
4931
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4507
4932
|
await require_openapi.openapiCommand({});
|
|
4508
|
-
} catch (
|
|
4933
|
+
} catch (error) {
|
|
4934
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4509
4935
|
process.exit(1);
|
|
4510
4936
|
}
|
|
4511
4937
|
});
|
|
@@ -4514,7 +4940,8 @@ program.command("generate:react-query").description("Generate React Query hooks
|
|
|
4514
4940
|
const globalOptions = program.opts();
|
|
4515
4941
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4516
4942
|
await require_openapi_react_query.generateReactQueryCommand(options);
|
|
4517
|
-
} catch (
|
|
4943
|
+
} catch (error) {
|
|
4944
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4518
4945
|
process.exit(1);
|
|
4519
4946
|
}
|
|
4520
4947
|
});
|
|
@@ -4523,7 +4950,8 @@ program.command("docker").description("Generate Docker deployment files").option
|
|
|
4523
4950
|
const globalOptions = program.opts();
|
|
4524
4951
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4525
4952
|
await dockerCommand(options);
|
|
4526
|
-
} catch (
|
|
4953
|
+
} catch (error) {
|
|
4954
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4527
4955
|
process.exit(1);
|
|
4528
4956
|
}
|
|
4529
4957
|
});
|
|
@@ -4551,7 +4979,8 @@ program.command("prepack").description("Generate Docker files for production dep
|
|
|
4551
4979
|
const registry = options.registry;
|
|
4552
4980
|
const _imageRef = registry ? `${registry}/api:${tag}` : `api:${tag}`;
|
|
4553
4981
|
}
|
|
4554
|
-
} catch (
|
|
4982
|
+
} catch (error) {
|
|
4983
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4555
4984
|
process.exit(1);
|
|
4556
4985
|
}
|
|
4557
4986
|
});
|
|
@@ -4560,7 +4989,8 @@ program.command("secrets:init").description("Initialize secrets for a deployment
|
|
|
4560
4989
|
const globalOptions = program.opts();
|
|
4561
4990
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4562
4991
|
await secretsInitCommand(options);
|
|
4563
|
-
} catch (
|
|
4992
|
+
} catch (error) {
|
|
4993
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4564
4994
|
process.exit(1);
|
|
4565
4995
|
}
|
|
4566
4996
|
});
|
|
@@ -4569,7 +4999,8 @@ program.command("secrets:set").description("Set a custom secret for a stage").ar
|
|
|
4569
4999
|
const globalOptions = program.opts();
|
|
4570
5000
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4571
5001
|
await secretsSetCommand(key, value, options);
|
|
4572
|
-
} catch (
|
|
5002
|
+
} catch (error) {
|
|
5003
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4573
5004
|
process.exit(1);
|
|
4574
5005
|
}
|
|
4575
5006
|
});
|
|
@@ -4578,7 +5009,8 @@ program.command("secrets:show").description("Show secrets for a stage").required
|
|
|
4578
5009
|
const globalOptions = program.opts();
|
|
4579
5010
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4580
5011
|
await secretsShowCommand(options);
|
|
4581
|
-
} catch (
|
|
5012
|
+
} catch (error) {
|
|
5013
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4582
5014
|
process.exit(1);
|
|
4583
5015
|
}
|
|
4584
5016
|
});
|
|
@@ -4587,7 +5019,8 @@ program.command("secrets:rotate").description("Rotate service passwords").requir
|
|
|
4587
5019
|
const globalOptions = program.opts();
|
|
4588
5020
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4589
5021
|
await secretsRotateCommand(options);
|
|
4590
|
-
} catch (
|
|
5022
|
+
} catch (error) {
|
|
5023
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4591
5024
|
process.exit(1);
|
|
4592
5025
|
}
|
|
4593
5026
|
});
|
|
@@ -4596,7 +5029,8 @@ program.command("secrets:import").description("Import secrets from a JSON file")
|
|
|
4596
5029
|
const globalOptions = program.opts();
|
|
4597
5030
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4598
5031
|
await secretsImportCommand(file, options);
|
|
4599
|
-
} catch (
|
|
5032
|
+
} catch (error) {
|
|
5033
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4600
5034
|
process.exit(1);
|
|
4601
5035
|
}
|
|
4602
5036
|
});
|
|
@@ -4620,7 +5054,8 @@ program.command("deploy").description("Deploy application to a provider").requir
|
|
|
4620
5054
|
skipPush: options.skipPush,
|
|
4621
5055
|
skipBuild: options.skipBuild
|
|
4622
5056
|
});
|
|
4623
|
-
} catch (
|
|
5057
|
+
} catch (error) {
|
|
5058
|
+
console.error(error instanceof Error ? error.message : "Deploy failed");
|
|
4624
5059
|
process.exit(1);
|
|
4625
5060
|
}
|
|
4626
5061
|
});
|