@geekmidas/cli 0.13.0 → 0.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/{bundler-DskIqW2t.mjs → bundler-D7cM_FWw.mjs} +34 -10
- package/dist/bundler-D7cM_FWw.mjs.map +1 -0
- package/dist/{bundler-B1qy9b-j.cjs → bundler-Nuew7Xcn.cjs} +33 -9
- package/dist/bundler-Nuew7Xcn.cjs.map +1 -0
- package/dist/config.d.cts +1 -1
- package/dist/config.d.mts +1 -1
- package/dist/dokploy-api-B7KxOQr3.cjs +3 -0
- package/dist/dokploy-api-C7F9VykY.cjs +317 -0
- package/dist/dokploy-api-C7F9VykY.cjs.map +1 -0
- package/dist/dokploy-api-CaETb2L6.mjs +305 -0
- package/dist/dokploy-api-CaETb2L6.mjs.map +1 -0
- package/dist/dokploy-api-DHvfmWbi.mjs +3 -0
- package/dist/{encryption-Dyf_r1h-.cjs → encryption-D7Efcdi9.cjs} +1 -1
- package/dist/{encryption-Dyf_r1h-.cjs.map → encryption-D7Efcdi9.cjs.map} +1 -1
- package/dist/{encryption-C8H-38Yy.mjs → encryption-h4Nb6W-M.mjs} +1 -1
- package/dist/{encryption-C8H-38Yy.mjs.map → encryption-h4Nb6W-M.mjs.map} +1 -1
- package/dist/index.cjs +1508 -1073
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +1508 -1073
- package/dist/index.mjs.map +1 -1
- package/dist/{openapi-Bt_1FDpT.cjs → openapi-C89hhkZC.cjs} +3 -3
- package/dist/{openapi-Bt_1FDpT.cjs.map → openapi-C89hhkZC.cjs.map} +1 -1
- package/dist/{openapi-BfFlOBCG.mjs → openapi-CZVcfxk-.mjs} +3 -3
- package/dist/{openapi-BfFlOBCG.mjs.map → openapi-CZVcfxk-.mjs.map} +1 -1
- package/dist/{openapi-react-query-B6XTeGqS.mjs → openapi-react-query-CM2_qlW9.mjs} +1 -1
- package/dist/{openapi-react-query-B6XTeGqS.mjs.map → openapi-react-query-CM2_qlW9.mjs.map} +1 -1
- package/dist/{openapi-react-query-B-sNWHFU.cjs → openapi-react-query-iKjfLzff.cjs} +1 -1
- package/dist/{openapi-react-query-B-sNWHFU.cjs.map → openapi-react-query-iKjfLzff.cjs.map} +1 -1
- package/dist/openapi-react-query.cjs +1 -1
- package/dist/openapi-react-query.mjs +1 -1
- package/dist/openapi.cjs +1 -1
- package/dist/openapi.d.cts +1 -1
- package/dist/openapi.d.mts +1 -1
- package/dist/openapi.mjs +1 -1
- package/dist/{storage-kSxTjkNb.mjs → storage-BaOP55oq.mjs} +16 -2
- package/dist/storage-BaOP55oq.mjs.map +1 -0
- package/dist/{storage-Bj1E26lU.cjs → storage-Bn3K9Ccu.cjs} +21 -1
- package/dist/storage-Bn3K9Ccu.cjs.map +1 -0
- package/dist/storage-UfyTn7Zm.cjs +7 -0
- package/dist/storage-nkGIjeXt.mjs +3 -0
- package/dist/{types-BhkZc-vm.d.cts → types-BgaMXsUa.d.cts} +3 -1
- package/dist/{types-BR0M2v_c.d.mts.map → types-BgaMXsUa.d.cts.map} +1 -1
- package/dist/{types-BR0M2v_c.d.mts → types-iFk5ms7y.d.mts} +3 -1
- package/dist/{types-BhkZc-vm.d.cts.map → types-iFk5ms7y.d.mts.map} +1 -1
- package/package.json +4 -4
- package/src/auth/__tests__/credentials.spec.ts +127 -0
- package/src/auth/__tests__/index.spec.ts +69 -0
- package/src/auth/credentials.ts +33 -0
- package/src/auth/index.ts +57 -50
- package/src/build/__tests__/bundler.spec.ts +5 -4
- package/src/build/__tests__/endpoint-analyzer.spec.ts +623 -0
- package/src/build/__tests__/handler-templates.spec.ts +272 -0
- package/src/build/bundler.ts +61 -8
- package/src/build/index.ts +21 -0
- package/src/build/types.ts +6 -0
- package/src/deploy/__tests__/docker.spec.ts +44 -6
- package/src/deploy/__tests__/dokploy-api.spec.ts +698 -0
- package/src/deploy/__tests__/dokploy.spec.ts +196 -6
- package/src/deploy/__tests__/index.spec.ts +401 -0
- package/src/deploy/__tests__/init.spec.ts +147 -16
- package/src/deploy/docker.ts +109 -5
- package/src/deploy/dokploy-api.ts +581 -0
- package/src/deploy/dokploy.ts +66 -93
- package/src/deploy/index.ts +630 -32
- package/src/deploy/init.ts +192 -249
- package/src/deploy/types.ts +24 -2
- package/src/dev/__tests__/index.spec.ts +95 -0
- package/src/docker/__tests__/templates.spec.ts +144 -0
- package/src/docker/index.ts +96 -6
- package/src/docker/templates.ts +114 -27
- package/src/generators/EndpointGenerator.ts +2 -2
- package/src/index.ts +34 -13
- package/src/secrets/storage.ts +15 -0
- package/src/types.ts +2 -0
- package/dist/bundler-B1qy9b-j.cjs.map +0 -1
- package/dist/bundler-DskIqW2t.mjs.map +0 -1
- package/dist/storage-BOOpAF8N.cjs +0 -5
- package/dist/storage-Bj1E26lU.cjs.map +0 -1
- package/dist/storage-kSxTjkNb.mjs.map +0 -1
- package/dist/storage-tgZSUnKl.mjs +0 -3
package/dist/index.mjs
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/env -S npx tsx
|
|
2
2
|
import { loadConfig, parseModuleConfig } from "./config-DYULeEv8.mjs";
|
|
3
|
-
import { ConstructGenerator, EndpointGenerator, OPENAPI_OUTPUT_PATH, generateOpenApi, openapiCommand, resolveOpenApiConfig } from "./openapi-
|
|
4
|
-
import {
|
|
5
|
-
import {
|
|
3
|
+
import { ConstructGenerator, EndpointGenerator, OPENAPI_OUTPUT_PATH, generateOpenApi, openapiCommand, resolveOpenApiConfig } from "./openapi-CZVcfxk-.mjs";
|
|
4
|
+
import { DokployApi } from "./dokploy-api-CaETb2L6.mjs";
|
|
5
|
+
import { generateReactQueryCommand } from "./openapi-react-query-CM2_qlW9.mjs";
|
|
6
|
+
import { maskPassword, readStageSecrets, secretsExist, setCustomSecret, writeStageSecrets } from "./storage-BaOP55oq.mjs";
|
|
6
7
|
import { createRequire } from "node:module";
|
|
7
|
-
import { existsSync, mkdirSync } from "node:fs";
|
|
8
|
-
import { dirname, join, parse, relative, resolve } from "node:path";
|
|
8
|
+
import { copyFileSync, existsSync, mkdirSync, readFileSync, unlinkSync } from "node:fs";
|
|
9
|
+
import { basename, dirname, join, parse, relative, resolve } from "node:path";
|
|
9
10
|
import { Command } from "commander";
|
|
10
11
|
import { stdin, stdout } from "node:process";
|
|
11
12
|
import * as readline from "node:readline/promises";
|
|
@@ -28,7 +29,7 @@ var __require = /* @__PURE__ */ createRequire(import.meta.url);
|
|
|
28
29
|
//#endregion
|
|
29
30
|
//#region package.json
|
|
30
31
|
var name = "@geekmidas/cli";
|
|
31
|
-
var version = "0.
|
|
32
|
+
var version = "0.15.0";
|
|
32
33
|
var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
|
|
33
34
|
var private$1 = false;
|
|
34
35
|
var type = "module";
|
|
@@ -174,7 +175,8 @@ async function getDokployCredentials(options) {
|
|
|
174
175
|
if (!credentials.dokploy) return null;
|
|
175
176
|
return {
|
|
176
177
|
token: credentials.dokploy.token,
|
|
177
|
-
endpoint: credentials.dokploy.endpoint
|
|
178
|
+
endpoint: credentials.dokploy.endpoint,
|
|
179
|
+
registryId: credentials.dokploy.registryId
|
|
178
180
|
};
|
|
179
181
|
}
|
|
180
182
|
/**
|
|
@@ -197,6 +199,22 @@ async function getDokployToken(options) {
|
|
|
197
199
|
if (stored) return stored.token;
|
|
198
200
|
return null;
|
|
199
201
|
}
|
|
202
|
+
/**
|
|
203
|
+
* Store Dokploy registry ID
|
|
204
|
+
*/
|
|
205
|
+
async function storeDokployRegistryId(registryId, options) {
|
|
206
|
+
const credentials = await readCredentials(options);
|
|
207
|
+
if (!credentials.dokploy) throw new Error("Dokploy credentials not found. Run \"gkm login --service dokploy\" first.");
|
|
208
|
+
credentials.dokploy.registryId = registryId;
|
|
209
|
+
await writeCredentials(credentials, options);
|
|
210
|
+
}
|
|
211
|
+
/**
|
|
212
|
+
* Get Dokploy registry ID from stored credentials
|
|
213
|
+
*/
|
|
214
|
+
async function getDokployRegistryId(options) {
|
|
215
|
+
const stored = await getDokployCredentials(options);
|
|
216
|
+
return stored?.registryId ?? void 0;
|
|
217
|
+
}
|
|
200
218
|
|
|
201
219
|
//#endregion
|
|
202
220
|
//#region src/auth/index.ts
|
|
@@ -205,52 +223,61 @@ const logger$9 = console;
|
|
|
205
223
|
* Validate Dokploy token by making a test API call
|
|
206
224
|
*/
|
|
207
225
|
async function validateDokployToken(endpoint, token) {
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
}
|
|
215
|
-
});
|
|
216
|
-
return response.ok;
|
|
217
|
-
} catch {
|
|
218
|
-
return false;
|
|
219
|
-
}
|
|
226
|
+
const { DokployApi: DokployApi$1 } = await import("./dokploy-api-DHvfmWbi.mjs");
|
|
227
|
+
const api = new DokployApi$1({
|
|
228
|
+
baseUrl: endpoint,
|
|
229
|
+
token
|
|
230
|
+
});
|
|
231
|
+
return api.validateToken();
|
|
220
232
|
}
|
|
221
233
|
/**
|
|
222
234
|
* Prompt for input (handles both TTY and non-TTY)
|
|
223
235
|
*/
|
|
224
|
-
async function prompt(message, hidden = false) {
|
|
236
|
+
async function prompt$1(message, hidden = false) {
|
|
225
237
|
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please provide --token option.");
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
238
|
+
if (hidden) {
|
|
239
|
+
process.stdout.write(message);
|
|
240
|
+
return new Promise((resolve$1, reject) => {
|
|
241
|
+
let value = "";
|
|
242
|
+
const cleanup = () => {
|
|
243
|
+
process.stdin.setRawMode(false);
|
|
244
|
+
process.stdin.pause();
|
|
245
|
+
process.stdin.removeListener("data", onData);
|
|
246
|
+
process.stdin.removeListener("error", onError);
|
|
247
|
+
};
|
|
248
|
+
const onError = (err) => {
|
|
249
|
+
cleanup();
|
|
250
|
+
reject(err);
|
|
251
|
+
};
|
|
252
|
+
const onData = (char) => {
|
|
253
|
+
const c = char.toString();
|
|
254
|
+
if (c === "\n" || c === "\r") {
|
|
255
|
+
cleanup();
|
|
256
|
+
process.stdout.write("\n");
|
|
257
|
+
resolve$1(value);
|
|
258
|
+
} else if (c === "") {
|
|
259
|
+
cleanup();
|
|
260
|
+
process.stdout.write("\n");
|
|
261
|
+
process.exit(1);
|
|
262
|
+
} else if (c === "" || c === "\b") {
|
|
263
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
264
|
+
} else value += c;
|
|
265
|
+
};
|
|
266
|
+
process.stdin.setRawMode(true);
|
|
267
|
+
process.stdin.resume();
|
|
268
|
+
process.stdin.on("data", onData);
|
|
269
|
+
process.stdin.on("error", onError);
|
|
270
|
+
});
|
|
271
|
+
} else {
|
|
272
|
+
const rl = readline.createInterface({
|
|
273
|
+
input: stdin,
|
|
274
|
+
output: stdout
|
|
275
|
+
});
|
|
276
|
+
try {
|
|
277
|
+
return await rl.question(message);
|
|
278
|
+
} finally {
|
|
279
|
+
rl.close();
|
|
280
|
+
}
|
|
254
281
|
}
|
|
255
282
|
}
|
|
256
283
|
/**
|
|
@@ -261,7 +288,7 @@ async function loginCommand(options) {
|
|
|
261
288
|
if (service === "dokploy") {
|
|
262
289
|
logger$9.log("\n🔐 Logging in to Dokploy...\n");
|
|
263
290
|
let endpoint = providedEndpoint;
|
|
264
|
-
if (!endpoint) endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
291
|
+
if (!endpoint) endpoint = await prompt$1("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
265
292
|
endpoint = endpoint.replace(/\/$/, "");
|
|
266
293
|
try {
|
|
267
294
|
new URL(endpoint);
|
|
@@ -272,7 +299,7 @@ async function loginCommand(options) {
|
|
|
272
299
|
let token = providedToken;
|
|
273
300
|
if (!token) {
|
|
274
301
|
logger$9.log(`\nGenerate a token at: ${endpoint}/settings/profile\n`);
|
|
275
|
-
token = await prompt("API Token: ", true);
|
|
302
|
+
token = await prompt$1("API Token: ", true);
|
|
276
303
|
}
|
|
277
304
|
if (!token) {
|
|
278
305
|
logger$9.error("Token is required");
|
|
@@ -1193,6 +1220,16 @@ async function buildCommand(options) {
|
|
|
1193
1220
|
if (studio) logger$6.log(`🗄️ Studio enabled at ${studio.path}`);
|
|
1194
1221
|
const hooks = normalizeHooksConfig(config$1.hooks);
|
|
1195
1222
|
if (hooks) logger$6.log(`🪝 Server hooks enabled`);
|
|
1223
|
+
const services = config$1.docker?.compose?.services;
|
|
1224
|
+
const dockerServices = services ? Array.isArray(services) ? {
|
|
1225
|
+
postgres: services.includes("postgres"),
|
|
1226
|
+
redis: services.includes("redis"),
|
|
1227
|
+
rabbitmq: services.includes("rabbitmq")
|
|
1228
|
+
} : {
|
|
1229
|
+
postgres: Boolean(services.postgres),
|
|
1230
|
+
redis: Boolean(services.redis),
|
|
1231
|
+
rabbitmq: Boolean(services.rabbitmq)
|
|
1232
|
+
} : void 0;
|
|
1196
1233
|
const buildContext = {
|
|
1197
1234
|
envParserPath,
|
|
1198
1235
|
envParserImportPattern,
|
|
@@ -1201,7 +1238,8 @@ async function buildCommand(options) {
|
|
|
1201
1238
|
telescope,
|
|
1202
1239
|
studio,
|
|
1203
1240
|
hooks,
|
|
1204
|
-
production
|
|
1241
|
+
production,
|
|
1242
|
+
dockerServices
|
|
1205
1243
|
};
|
|
1206
1244
|
const endpointGenerator = new EndpointGenerator();
|
|
1207
1245
|
const functionGenerator = new FunctionGenerator();
|
|
@@ -1259,13 +1297,14 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1259
1297
|
let masterKey;
|
|
1260
1298
|
if (context.production?.bundle && !skipBundle) {
|
|
1261
1299
|
logger$6.log(`\n📦 Bundling production server...`);
|
|
1262
|
-
const { bundleServer } = await import("./bundler-
|
|
1300
|
+
const { bundleServer } = await import("./bundler-D7cM_FWw.mjs");
|
|
1263
1301
|
const allConstructs = [
|
|
1264
1302
|
...endpoints.map((e) => e.construct),
|
|
1265
1303
|
...functions.map((f) => f.construct),
|
|
1266
1304
|
...crons.map((c) => c.construct),
|
|
1267
1305
|
...subscribers.map((s) => s.construct)
|
|
1268
1306
|
];
|
|
1307
|
+
const dockerServices = context.dockerServices;
|
|
1269
1308
|
const bundleResult = await bundleServer({
|
|
1270
1309
|
entryPoint: join(outputDir, "server.ts"),
|
|
1271
1310
|
outputDir: join(outputDir, "dist"),
|
|
@@ -1273,7 +1312,8 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1273
1312
|
sourcemap: false,
|
|
1274
1313
|
external: context.production.external,
|
|
1275
1314
|
stage,
|
|
1276
|
-
constructs: allConstructs
|
|
1315
|
+
constructs: allConstructs,
|
|
1316
|
+
dockerServices
|
|
1277
1317
|
});
|
|
1278
1318
|
masterKey = bundleResult.masterKey;
|
|
1279
1319
|
logger$6.log(`✅ Bundle complete: .gkm/server/dist/server.mjs`);
|
|
@@ -1288,366 +1328,1008 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1288
1328
|
}
|
|
1289
1329
|
|
|
1290
1330
|
//#endregion
|
|
1291
|
-
//#region src/
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1331
|
+
//#region src/docker/compose.ts
|
|
1332
|
+
/** Default Docker images for services */
|
|
1333
|
+
const DEFAULT_SERVICE_IMAGES = {
|
|
1334
|
+
postgres: "postgres",
|
|
1335
|
+
redis: "redis",
|
|
1336
|
+
rabbitmq: "rabbitmq"
|
|
1337
|
+
};
|
|
1338
|
+
/** Default Docker image versions for services */
|
|
1339
|
+
const DEFAULT_SERVICE_VERSIONS = {
|
|
1340
|
+
postgres: "16-alpine",
|
|
1341
|
+
redis: "7-alpine",
|
|
1342
|
+
rabbitmq: "3-management-alpine"
|
|
1343
|
+
};
|
|
1344
|
+
/** Get the default full image reference for a service */
|
|
1345
|
+
function getDefaultImage(serviceName) {
|
|
1346
|
+
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
1299
1347
|
}
|
|
1300
|
-
/**
|
|
1301
|
-
|
|
1302
|
-
*/
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
|
|
1306
|
-
|
|
1307
|
-
|
|
1308
|
-
|
|
1309
|
-
|
|
1310
|
-
|
|
1311
|
-
|
|
1348
|
+
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1349
|
+
function normalizeServices(services) {
|
|
1350
|
+
const result = /* @__PURE__ */ new Map();
|
|
1351
|
+
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1352
|
+
else for (const [name$1, config$1] of Object.entries(services)) {
|
|
1353
|
+
const serviceName = name$1;
|
|
1354
|
+
if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1355
|
+
else if (config$1 && typeof config$1 === "object") {
|
|
1356
|
+
const serviceConfig = config$1;
|
|
1357
|
+
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1358
|
+
else {
|
|
1359
|
+
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1360
|
+
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
1312
1361
|
}
|
|
1313
|
-
}
|
|
1314
|
-
logger$5.log(`✅ Image built: ${imageRef}`);
|
|
1315
|
-
} catch (error) {
|
|
1316
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1362
|
+
}
|
|
1317
1363
|
}
|
|
1364
|
+
return result;
|
|
1318
1365
|
}
|
|
1319
1366
|
/**
|
|
1320
|
-
*
|
|
1367
|
+
* Generate docker-compose.yml for production deployment
|
|
1321
1368
|
*/
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1325
|
-
|
|
1326
|
-
|
|
1327
|
-
|
|
1328
|
-
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1369
|
+
function generateDockerCompose(options) {
|
|
1370
|
+
const { imageName, registry, port, healthCheckPath, services } = options;
|
|
1371
|
+
const serviceMap = normalizeServices(services);
|
|
1372
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1373
|
+
let yaml = `version: '3.8'
|
|
1374
|
+
|
|
1375
|
+
services:
|
|
1376
|
+
api:
|
|
1377
|
+
build:
|
|
1378
|
+
context: ../..
|
|
1379
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1380
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1381
|
+
container_name: ${imageName}
|
|
1382
|
+
restart: unless-stopped
|
|
1383
|
+
ports:
|
|
1384
|
+
- "\${PORT:-${port}}:${port}"
|
|
1385
|
+
environment:
|
|
1386
|
+
- NODE_ENV=production
|
|
1387
|
+
`;
|
|
1388
|
+
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1389
|
+
`;
|
|
1390
|
+
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1391
|
+
`;
|
|
1392
|
+
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1393
|
+
`;
|
|
1394
|
+
yaml += ` healthcheck:
|
|
1395
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1396
|
+
interval: 30s
|
|
1397
|
+
timeout: 3s
|
|
1398
|
+
retries: 3
|
|
1399
|
+
`;
|
|
1400
|
+
if (serviceMap.size > 0) {
|
|
1401
|
+
yaml += ` depends_on:
|
|
1402
|
+
`;
|
|
1403
|
+
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1404
|
+
condition: service_healthy
|
|
1405
|
+
`;
|
|
1332
1406
|
}
|
|
1407
|
+
yaml += ` networks:
|
|
1408
|
+
- app-network
|
|
1409
|
+
`;
|
|
1410
|
+
const postgresImage = serviceMap.get("postgres");
|
|
1411
|
+
if (postgresImage) yaml += `
|
|
1412
|
+
postgres:
|
|
1413
|
+
image: ${postgresImage}
|
|
1414
|
+
container_name: postgres
|
|
1415
|
+
restart: unless-stopped
|
|
1416
|
+
environment:
|
|
1417
|
+
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
1418
|
+
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1419
|
+
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1420
|
+
volumes:
|
|
1421
|
+
- postgres_data:/var/lib/postgresql/data
|
|
1422
|
+
healthcheck:
|
|
1423
|
+
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1424
|
+
interval: 5s
|
|
1425
|
+
timeout: 5s
|
|
1426
|
+
retries: 5
|
|
1427
|
+
networks:
|
|
1428
|
+
- app-network
|
|
1429
|
+
`;
|
|
1430
|
+
const redisImage = serviceMap.get("redis");
|
|
1431
|
+
if (redisImage) yaml += `
|
|
1432
|
+
redis:
|
|
1433
|
+
image: ${redisImage}
|
|
1434
|
+
container_name: redis
|
|
1435
|
+
restart: unless-stopped
|
|
1436
|
+
volumes:
|
|
1437
|
+
- redis_data:/data
|
|
1438
|
+
healthcheck:
|
|
1439
|
+
test: ["CMD", "redis-cli", "ping"]
|
|
1440
|
+
interval: 5s
|
|
1441
|
+
timeout: 5s
|
|
1442
|
+
retries: 5
|
|
1443
|
+
networks:
|
|
1444
|
+
- app-network
|
|
1445
|
+
`;
|
|
1446
|
+
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1447
|
+
if (rabbitmqImage) yaml += `
|
|
1448
|
+
rabbitmq:
|
|
1449
|
+
image: ${rabbitmqImage}
|
|
1450
|
+
container_name: rabbitmq
|
|
1451
|
+
restart: unless-stopped
|
|
1452
|
+
environment:
|
|
1453
|
+
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1454
|
+
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1455
|
+
ports:
|
|
1456
|
+
- "15672:15672" # Management UI
|
|
1457
|
+
volumes:
|
|
1458
|
+
- rabbitmq_data:/var/lib/rabbitmq
|
|
1459
|
+
healthcheck:
|
|
1460
|
+
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1461
|
+
interval: 10s
|
|
1462
|
+
timeout: 5s
|
|
1463
|
+
retries: 5
|
|
1464
|
+
networks:
|
|
1465
|
+
- app-network
|
|
1466
|
+
`;
|
|
1467
|
+
yaml += `
|
|
1468
|
+
volumes:
|
|
1469
|
+
`;
|
|
1470
|
+
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1471
|
+
`;
|
|
1472
|
+
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1473
|
+
`;
|
|
1474
|
+
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1475
|
+
`;
|
|
1476
|
+
yaml += `
|
|
1477
|
+
networks:
|
|
1478
|
+
app-network:
|
|
1479
|
+
driver: bridge
|
|
1480
|
+
`;
|
|
1481
|
+
return yaml;
|
|
1333
1482
|
}
|
|
1334
1483
|
/**
|
|
1335
|
-
*
|
|
1484
|
+
* Generate a minimal docker-compose.yml for API only
|
|
1336
1485
|
*/
|
|
1337
|
-
|
|
1338
|
-
const {
|
|
1339
|
-
const
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1351
|
-
|
|
1352
|
-
|
|
1353
|
-
|
|
1354
|
-
|
|
1355
|
-
|
|
1356
|
-
|
|
1357
|
-
|
|
1486
|
+
function generateMinimalDockerCompose(options) {
|
|
1487
|
+
const { imageName, registry, port, healthCheckPath } = options;
|
|
1488
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1489
|
+
return `version: '3.8'
|
|
1490
|
+
|
|
1491
|
+
services:
|
|
1492
|
+
api:
|
|
1493
|
+
build:
|
|
1494
|
+
context: ../..
|
|
1495
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1496
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1497
|
+
container_name: ${imageName}
|
|
1498
|
+
restart: unless-stopped
|
|
1499
|
+
ports:
|
|
1500
|
+
- "\${PORT:-${port}}:${port}"
|
|
1501
|
+
environment:
|
|
1502
|
+
- NODE_ENV=production
|
|
1503
|
+
healthcheck:
|
|
1504
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1505
|
+
interval: 30s
|
|
1506
|
+
timeout: 3s
|
|
1507
|
+
retries: 3
|
|
1508
|
+
networks:
|
|
1509
|
+
- app-network
|
|
1510
|
+
|
|
1511
|
+
networks:
|
|
1512
|
+
app-network:
|
|
1513
|
+
driver: bridge
|
|
1514
|
+
`;
|
|
1358
1515
|
}
|
|
1516
|
+
|
|
1517
|
+
//#endregion
|
|
1518
|
+
//#region src/docker/templates.ts
|
|
1519
|
+
const LOCKFILES = [
|
|
1520
|
+
["pnpm-lock.yaml", "pnpm"],
|
|
1521
|
+
["bun.lockb", "bun"],
|
|
1522
|
+
["yarn.lock", "yarn"],
|
|
1523
|
+
["package-lock.json", "npm"]
|
|
1524
|
+
];
|
|
1359
1525
|
/**
|
|
1360
|
-
*
|
|
1526
|
+
* Detect package manager from lockfiles
|
|
1527
|
+
* Walks up the directory tree to find lockfile (for monorepos)
|
|
1361
1528
|
*/
|
|
1362
|
-
function
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1529
|
+
function detectPackageManager$1(cwd = process.cwd()) {
|
|
1530
|
+
let dir = cwd;
|
|
1531
|
+
const root = parse(dir).root;
|
|
1532
|
+
while (dir !== root) {
|
|
1533
|
+
for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(dir, lockfile))) return pm;
|
|
1534
|
+
dir = dirname(dir);
|
|
1535
|
+
}
|
|
1536
|
+
for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(root, lockfile))) return pm;
|
|
1537
|
+
return "pnpm";
|
|
1367
1538
|
}
|
|
1368
|
-
|
|
1369
|
-
//#endregion
|
|
1370
|
-
//#region src/deploy/dokploy.ts
|
|
1371
|
-
const logger$4 = console;
|
|
1372
1539
|
/**
|
|
1373
|
-
*
|
|
1540
|
+
* Find the lockfile path by walking up the directory tree
|
|
1541
|
+
* Returns the full path to the lockfile, or null if not found
|
|
1374
1542
|
*/
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1543
|
+
function findLockfilePath(cwd = process.cwd()) {
|
|
1544
|
+
let dir = cwd;
|
|
1545
|
+
const root = parse(dir).root;
|
|
1546
|
+
while (dir !== root) {
|
|
1547
|
+
for (const [lockfile] of LOCKFILES) {
|
|
1548
|
+
const lockfilePath = join(dir, lockfile);
|
|
1549
|
+
if (existsSync(lockfilePath)) return lockfilePath;
|
|
1550
|
+
}
|
|
1551
|
+
dir = dirname(dir);
|
|
1552
|
+
}
|
|
1553
|
+
for (const [lockfile] of LOCKFILES) {
|
|
1554
|
+
const lockfilePath = join(root, lockfile);
|
|
1555
|
+
if (existsSync(lockfilePath)) return lockfilePath;
|
|
1556
|
+
}
|
|
1557
|
+
return null;
|
|
1379
1558
|
}
|
|
1380
1559
|
/**
|
|
1381
|
-
*
|
|
1560
|
+
* Check if we're in a monorepo (lockfile is in a parent directory)
|
|
1382
1561
|
*/
|
|
1383
|
-
|
|
1384
|
-
const
|
|
1385
|
-
|
|
1386
|
-
|
|
1387
|
-
|
|
1388
|
-
"Content-Type": "application/json",
|
|
1389
|
-
Authorization: `Bearer ${token}`
|
|
1390
|
-
},
|
|
1391
|
-
body: JSON.stringify(body)
|
|
1392
|
-
});
|
|
1393
|
-
if (!response.ok) {
|
|
1394
|
-
let errorMessage = `Dokploy API error: ${response.status} ${response.statusText}`;
|
|
1395
|
-
try {
|
|
1396
|
-
const errorBody = await response.json();
|
|
1397
|
-
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1398
|
-
if (errorBody.issues?.length) errorMessage += `\n Issues: ${errorBody.issues.map((i) => i.message).join(", ")}`;
|
|
1399
|
-
} catch {}
|
|
1400
|
-
throw new Error(errorMessage);
|
|
1401
|
-
}
|
|
1402
|
-
return response.json();
|
|
1562
|
+
function isMonorepo(cwd = process.cwd()) {
|
|
1563
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
1564
|
+
if (!lockfilePath) return false;
|
|
1565
|
+
const lockfileDir = dirname(lockfilePath);
|
|
1566
|
+
return lockfileDir !== cwd;
|
|
1403
1567
|
}
|
|
1404
1568
|
/**
|
|
1405
|
-
*
|
|
1569
|
+
* Check if turbo.json exists (walks up directory tree)
|
|
1406
1570
|
*/
|
|
1407
|
-
|
|
1408
|
-
|
|
1409
|
-
const
|
|
1410
|
-
|
|
1411
|
-
|
|
1412
|
-
|
|
1413
|
-
}
|
|
1414
|
-
|
|
1571
|
+
function hasTurboConfig(cwd = process.cwd()) {
|
|
1572
|
+
let dir = cwd;
|
|
1573
|
+
const root = parse(dir).root;
|
|
1574
|
+
while (dir !== root) {
|
|
1575
|
+
if (existsSync(join(dir, "turbo.json"))) return true;
|
|
1576
|
+
dir = dirname(dir);
|
|
1577
|
+
}
|
|
1578
|
+
return existsSync(join(root, "turbo.json"));
|
|
1415
1579
|
}
|
|
1416
1580
|
/**
|
|
1417
|
-
*
|
|
1581
|
+
* Get install command for turbo builds (without frozen lockfile)
|
|
1582
|
+
* Turbo prune creates a subset that may not perfectly match the lockfile
|
|
1418
1583
|
*/
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1422
|
-
|
|
1584
|
+
function getTurboInstallCmd(pm) {
|
|
1585
|
+
const commands = {
|
|
1586
|
+
pnpm: "pnpm install",
|
|
1587
|
+
npm: "npm install",
|
|
1588
|
+
yarn: "yarn install",
|
|
1589
|
+
bun: "bun install"
|
|
1590
|
+
};
|
|
1591
|
+
return commands[pm];
|
|
1423
1592
|
}
|
|
1424
1593
|
/**
|
|
1425
|
-
*
|
|
1594
|
+
* Get package manager specific commands and paths
|
|
1426
1595
|
*/
|
|
1427
|
-
|
|
1428
|
-
const
|
|
1429
|
-
|
|
1430
|
-
|
|
1431
|
-
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
|
-
|
|
1435
|
-
|
|
1436
|
-
|
|
1437
|
-
|
|
1438
|
-
|
|
1439
|
-
|
|
1440
|
-
|
|
1441
|
-
|
|
1442
|
-
|
|
1443
|
-
|
|
1444
|
-
|
|
1445
|
-
|
|
1446
|
-
|
|
1447
|
-
|
|
1448
|
-
|
|
1596
|
+
function getPmConfig(pm) {
|
|
1597
|
+
const configs = {
|
|
1598
|
+
pnpm: {
|
|
1599
|
+
install: "corepack enable && corepack prepare pnpm@latest --activate",
|
|
1600
|
+
lockfile: "pnpm-lock.yaml",
|
|
1601
|
+
fetch: "pnpm fetch",
|
|
1602
|
+
installCmd: "pnpm install --frozen-lockfile --offline",
|
|
1603
|
+
cacheTarget: "/root/.local/share/pnpm/store",
|
|
1604
|
+
cacheId: "pnpm",
|
|
1605
|
+
run: "pnpm",
|
|
1606
|
+
dlx: "pnpm dlx",
|
|
1607
|
+
addGlobal: "pnpm add -g"
|
|
1608
|
+
},
|
|
1609
|
+
npm: {
|
|
1610
|
+
install: "",
|
|
1611
|
+
lockfile: "package-lock.json",
|
|
1612
|
+
fetch: "",
|
|
1613
|
+
installCmd: "npm ci",
|
|
1614
|
+
cacheTarget: "/root/.npm",
|
|
1615
|
+
cacheId: "npm",
|
|
1616
|
+
run: "npm run",
|
|
1617
|
+
dlx: "npx",
|
|
1618
|
+
addGlobal: "npm install -g"
|
|
1619
|
+
},
|
|
1620
|
+
yarn: {
|
|
1621
|
+
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
1622
|
+
lockfile: "yarn.lock",
|
|
1623
|
+
fetch: "",
|
|
1624
|
+
installCmd: "yarn install --frozen-lockfile",
|
|
1625
|
+
cacheTarget: "/root/.yarn/cache",
|
|
1626
|
+
cacheId: "yarn",
|
|
1627
|
+
run: "yarn",
|
|
1628
|
+
dlx: "yarn dlx",
|
|
1629
|
+
addGlobal: "yarn global add"
|
|
1630
|
+
},
|
|
1631
|
+
bun: {
|
|
1632
|
+
install: "npm install -g bun",
|
|
1633
|
+
lockfile: "bun.lockb",
|
|
1634
|
+
fetch: "",
|
|
1635
|
+
installCmd: "bun install --frozen-lockfile",
|
|
1636
|
+
cacheTarget: "/root/.bun/install/cache",
|
|
1637
|
+
cacheId: "bun",
|
|
1638
|
+
run: "bun run",
|
|
1639
|
+
dlx: "bunx",
|
|
1640
|
+
addGlobal: "bun add -g"
|
|
1641
|
+
}
|
|
1449
1642
|
};
|
|
1643
|
+
return configs[pm];
|
|
1450
1644
|
}
|
|
1451
1645
|
/**
|
|
1452
|
-
*
|
|
1646
|
+
* Generate a multi-stage Dockerfile for building from source
|
|
1647
|
+
* Optimized for build speed with:
|
|
1648
|
+
* - BuildKit cache mounts for package manager store
|
|
1649
|
+
* - pnpm fetch for better layer caching (when using pnpm)
|
|
1650
|
+
* - Optional turbo prune for monorepos
|
|
1453
1651
|
*/
|
|
1454
|
-
function
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
|
|
1458
|
-
"
|
|
1459
|
-
|
|
1460
|
-
|
|
1461
|
-
const
|
|
1462
|
-
|
|
1463
|
-
|
|
1464
|
-
|
|
1465
|
-
endpoint: 'https://dokploy.example.com',
|
|
1466
|
-
projectId: 'proj_xxx',
|
|
1467
|
-
applicationId: 'app_xxx',
|
|
1468
|
-
},
|
|
1469
|
-
}`);
|
|
1470
|
-
return true;
|
|
1471
|
-
}
|
|
1652
|
+
function generateMultiStageDockerfile(options) {
|
|
1653
|
+
const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
|
|
1654
|
+
if (turbo) return generateTurboDockerfile({
|
|
1655
|
+
...options,
|
|
1656
|
+
turboPackage: turboPackage ?? "api"
|
|
1657
|
+
});
|
|
1658
|
+
const pm = getPmConfig(packageManager);
|
|
1659
|
+
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
1660
|
+
const hasFetch = packageManager === "pnpm";
|
|
1661
|
+
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
1662
|
+
COPY ${pm.lockfile} ./
|
|
1472
1663
|
|
|
1473
|
-
|
|
1474
|
-
|
|
1475
|
-
|
|
1664
|
+
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
1665
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1666
|
+
${pm.fetch}
|
|
1667
|
+
|
|
1668
|
+
# Copy package.json after fetch
|
|
1669
|
+
COPY package.json ./
|
|
1670
|
+
|
|
1671
|
+
# Install from cache (fast - no network needed)
|
|
1672
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1673
|
+
${pm.installCmd}` : `# Copy package files
|
|
1674
|
+
COPY package.json ${pm.lockfile} ./
|
|
1675
|
+
|
|
1676
|
+
# Install dependencies with cache
|
|
1677
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1678
|
+
${pm.installCmd}`;
|
|
1679
|
+
return `# syntax=docker/dockerfile:1
|
|
1680
|
+
# Stage 1: Dependencies
|
|
1681
|
+
FROM ${baseImage} AS deps
|
|
1682
|
+
|
|
1683
|
+
WORKDIR /app
|
|
1684
|
+
${installPm}
|
|
1685
|
+
${depsStage}
|
|
1686
|
+
|
|
1687
|
+
# Stage 2: Build
|
|
1688
|
+
FROM deps AS builder
|
|
1689
|
+
|
|
1690
|
+
WORKDIR /app
|
|
1691
|
+
|
|
1692
|
+
# Copy source (deps already installed)
|
|
1693
|
+
COPY . .
|
|
1694
|
+
|
|
1695
|
+
# Build production server using CLI from npm
|
|
1696
|
+
RUN ${pm.dlx} @geekmidas/cli build --provider server --production
|
|
1697
|
+
|
|
1698
|
+
# Stage 3: Production
|
|
1699
|
+
FROM ${baseImage} AS runner
|
|
1700
|
+
|
|
1701
|
+
WORKDIR /app
|
|
1702
|
+
|
|
1703
|
+
# Install tini for proper signal handling as PID 1
|
|
1704
|
+
RUN apk add --no-cache tini
|
|
1705
|
+
|
|
1706
|
+
# Create non-root user
|
|
1707
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1708
|
+
adduser --system --uid 1001 hono
|
|
1709
|
+
|
|
1710
|
+
# Copy bundled server
|
|
1711
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
1712
|
+
|
|
1713
|
+
# Environment
|
|
1714
|
+
ENV NODE_ENV=production
|
|
1715
|
+
ENV PORT=${port}
|
|
1716
|
+
|
|
1717
|
+
# Health check
|
|
1718
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1719
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1720
|
+
|
|
1721
|
+
# Switch to non-root user
|
|
1722
|
+
USER hono
|
|
1723
|
+
|
|
1724
|
+
EXPOSE ${port}
|
|
1725
|
+
|
|
1726
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
1727
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1728
|
+
CMD ["node", "server.mjs"]
|
|
1729
|
+
`;
|
|
1730
|
+
}
|
|
1476
1731
|
/**
|
|
1477
|
-
* Generate
|
|
1732
|
+
* Generate a Dockerfile optimized for Turbo monorepos
|
|
1733
|
+
* Uses turbo prune to create minimal Docker context
|
|
1478
1734
|
*/
|
|
1479
|
-
function
|
|
1480
|
-
const
|
|
1481
|
-
|
|
1735
|
+
function generateTurboDockerfile(options) {
|
|
1736
|
+
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
1737
|
+
const pm = getPmConfig(packageManager);
|
|
1738
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
1739
|
+
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
1740
|
+
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
1741
|
+
return `# syntax=docker/dockerfile:1
|
|
1742
|
+
# Stage 1: Prune monorepo
|
|
1743
|
+
FROM ${baseImage} AS pruner
|
|
1744
|
+
|
|
1745
|
+
WORKDIR /app
|
|
1746
|
+
|
|
1747
|
+
${installPm}
|
|
1748
|
+
|
|
1749
|
+
COPY . .
|
|
1750
|
+
|
|
1751
|
+
# Prune to only include necessary packages
|
|
1752
|
+
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
1753
|
+
|
|
1754
|
+
# Stage 2: Install dependencies
|
|
1755
|
+
FROM ${baseImage} AS deps
|
|
1756
|
+
|
|
1757
|
+
WORKDIR /app
|
|
1758
|
+
|
|
1759
|
+
${installPm}
|
|
1760
|
+
|
|
1761
|
+
# Copy pruned lockfile and package.jsons
|
|
1762
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
1763
|
+
COPY --from=pruner /app/out/json/ ./
|
|
1764
|
+
|
|
1765
|
+
# Install dependencies (no frozen-lockfile since turbo prune creates a subset)
|
|
1766
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1767
|
+
${turboInstallCmd}
|
|
1768
|
+
|
|
1769
|
+
# Stage 3: Build
|
|
1770
|
+
FROM deps AS builder
|
|
1771
|
+
|
|
1772
|
+
WORKDIR /app
|
|
1773
|
+
|
|
1774
|
+
# Copy pruned source
|
|
1775
|
+
COPY --from=pruner /app/out/full/ ./
|
|
1776
|
+
|
|
1777
|
+
# Build production server using CLI from npm
|
|
1778
|
+
RUN ${pm.dlx} @geekmidas/cli build --provider server --production
|
|
1779
|
+
|
|
1780
|
+
# Stage 4: Production
|
|
1781
|
+
FROM ${baseImage} AS runner
|
|
1782
|
+
|
|
1783
|
+
WORKDIR /app
|
|
1784
|
+
|
|
1785
|
+
RUN apk add --no-cache tini
|
|
1786
|
+
|
|
1787
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1788
|
+
adduser --system --uid 1001 hono
|
|
1789
|
+
|
|
1790
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
1791
|
+
|
|
1792
|
+
ENV NODE_ENV=production
|
|
1793
|
+
ENV PORT=${port}
|
|
1794
|
+
|
|
1795
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1796
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1797
|
+
|
|
1798
|
+
USER hono
|
|
1799
|
+
|
|
1800
|
+
EXPOSE ${port}
|
|
1801
|
+
|
|
1802
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1803
|
+
CMD ["node", "server.mjs"]
|
|
1804
|
+
`;
|
|
1482
1805
|
}
|
|
1483
1806
|
/**
|
|
1484
|
-
*
|
|
1807
|
+
* Generate a slim Dockerfile for pre-built bundles
|
|
1485
1808
|
*/
|
|
1486
|
-
|
|
1487
|
-
const {
|
|
1488
|
-
|
|
1489
|
-
|
|
1490
|
-
const config$1 = await loadConfig();
|
|
1491
|
-
const imageTag = tag ?? generateTag(stage);
|
|
1492
|
-
logger$3.log(` Tag: ${imageTag}`);
|
|
1493
|
-
let masterKey;
|
|
1494
|
-
if (!skipBuild) {
|
|
1495
|
-
logger$3.log(`\n📦 Building for production...`);
|
|
1496
|
-
const buildResult = await buildCommand({
|
|
1497
|
-
provider: "server",
|
|
1498
|
-
production: true,
|
|
1499
|
-
stage
|
|
1500
|
-
});
|
|
1501
|
-
masterKey = buildResult.masterKey;
|
|
1502
|
-
} else logger$3.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
1503
|
-
const dockerConfig = resolveDockerConfig$1(config$1);
|
|
1504
|
-
const imageName = dockerConfig.imageName ?? "app";
|
|
1505
|
-
const registry = dockerConfig.registry;
|
|
1506
|
-
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
1507
|
-
let result;
|
|
1508
|
-
switch (provider) {
|
|
1509
|
-
case "docker": {
|
|
1510
|
-
result = await deployDocker({
|
|
1511
|
-
stage,
|
|
1512
|
-
tag: imageTag,
|
|
1513
|
-
skipPush,
|
|
1514
|
-
masterKey,
|
|
1515
|
-
config: dockerConfig
|
|
1516
|
-
});
|
|
1517
|
-
break;
|
|
1518
|
-
}
|
|
1519
|
-
case "dokploy": {
|
|
1520
|
-
const dokployConfigRaw = config$1.providers?.dokploy;
|
|
1521
|
-
if (typeof dokployConfigRaw === "boolean" || !dokployConfigRaw) throw new Error("Dokploy provider requires configuration.\nConfigure in gkm.config.ts:\n providers: {\n dokploy: {\n endpoint: 'https://dokploy.example.com',\n projectId: 'proj_xxx',\n applicationId: 'app_xxx',\n },\n }");
|
|
1522
|
-
validateDokployConfig(dokployConfigRaw);
|
|
1523
|
-
const dokployConfig = dokployConfigRaw;
|
|
1524
|
-
await deployDocker({
|
|
1525
|
-
stage,
|
|
1526
|
-
tag: imageTag,
|
|
1527
|
-
skipPush: false,
|
|
1528
|
-
masterKey,
|
|
1529
|
-
config: {
|
|
1530
|
-
registry: dokployConfig.registry ?? dockerConfig.registry,
|
|
1531
|
-
imageName: dockerConfig.imageName
|
|
1532
|
-
}
|
|
1533
|
-
});
|
|
1534
|
-
result = await deployDokploy({
|
|
1535
|
-
stage,
|
|
1536
|
-
tag: imageTag,
|
|
1537
|
-
imageRef,
|
|
1538
|
-
masterKey,
|
|
1539
|
-
config: dokployConfig
|
|
1540
|
-
});
|
|
1541
|
-
break;
|
|
1542
|
-
}
|
|
1543
|
-
case "aws-lambda": {
|
|
1544
|
-
logger$3.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
1545
|
-
logger$3.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
1546
|
-
result = {
|
|
1547
|
-
imageRef,
|
|
1548
|
-
masterKey
|
|
1549
|
-
};
|
|
1550
|
-
break;
|
|
1551
|
-
}
|
|
1552
|
-
default: throw new Error(`Unknown deploy provider: ${provider}\nSupported providers: docker, dokploy, aws-lambda`);
|
|
1553
|
-
}
|
|
1554
|
-
logger$3.log("\n✅ Deployment complete!");
|
|
1555
|
-
return result;
|
|
1556
|
-
}
|
|
1809
|
+
function generateSlimDockerfile(options) {
|
|
1810
|
+
const { baseImage, port, healthCheckPath } = options;
|
|
1811
|
+
return `# Slim Dockerfile for pre-built production bundle
|
|
1812
|
+
FROM ${baseImage}
|
|
1557
1813
|
|
|
1558
|
-
|
|
1559
|
-
|
|
1560
|
-
|
|
1814
|
+
WORKDIR /app
|
|
1815
|
+
|
|
1816
|
+
# Install tini for proper signal handling as PID 1
|
|
1817
|
+
# Handles SIGTERM propagation and zombie process reaping
|
|
1818
|
+
RUN apk add --no-cache tini
|
|
1819
|
+
|
|
1820
|
+
# Create non-root user
|
|
1821
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1822
|
+
adduser --system --uid 1001 hono
|
|
1823
|
+
|
|
1824
|
+
# Copy pre-built bundle
|
|
1825
|
+
COPY .gkm/server/dist/server.mjs ./
|
|
1826
|
+
|
|
1827
|
+
# Environment
|
|
1828
|
+
ENV NODE_ENV=production
|
|
1829
|
+
ENV PORT=${port}
|
|
1830
|
+
|
|
1831
|
+
# Health check
|
|
1832
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1833
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1834
|
+
|
|
1835
|
+
# Switch to non-root user
|
|
1836
|
+
USER hono
|
|
1837
|
+
|
|
1838
|
+
EXPOSE ${port}
|
|
1839
|
+
|
|
1840
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
1841
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1842
|
+
CMD ["node", "server.mjs"]
|
|
1843
|
+
`;
|
|
1844
|
+
}
|
|
1561
1845
|
/**
|
|
1562
|
-
*
|
|
1846
|
+
* Generate .dockerignore file
|
|
1563
1847
|
*/
|
|
1564
|
-
|
|
1565
|
-
|
|
1566
|
-
|
|
1567
|
-
|
|
1848
|
+
function generateDockerignore() {
|
|
1849
|
+
return `# Dependencies
|
|
1850
|
+
node_modules
|
|
1851
|
+
.pnpm-store
|
|
1852
|
+
|
|
1853
|
+
# Build output (except what we need)
|
|
1854
|
+
.gkm/aws*
|
|
1855
|
+
.gkm/server/*.ts
|
|
1856
|
+
!.gkm/server/dist
|
|
1857
|
+
|
|
1858
|
+
# IDE and editor
|
|
1859
|
+
.idea
|
|
1860
|
+
.vscode
|
|
1861
|
+
*.swp
|
|
1862
|
+
*.swo
|
|
1863
|
+
|
|
1864
|
+
# Git
|
|
1865
|
+
.git
|
|
1866
|
+
.gitignore
|
|
1867
|
+
|
|
1868
|
+
# Logs
|
|
1869
|
+
*.log
|
|
1870
|
+
npm-debug.log*
|
|
1871
|
+
pnpm-debug.log*
|
|
1872
|
+
|
|
1873
|
+
# Test files
|
|
1874
|
+
**/*.test.ts
|
|
1875
|
+
**/*.spec.ts
|
|
1876
|
+
**/__tests__
|
|
1877
|
+
coverage
|
|
1878
|
+
|
|
1879
|
+
# Documentation
|
|
1880
|
+
docs
|
|
1881
|
+
*.md
|
|
1882
|
+
!README.md
|
|
1883
|
+
|
|
1884
|
+
# Environment files (handle secrets separately)
|
|
1885
|
+
.env
|
|
1886
|
+
.env.*
|
|
1887
|
+
!.env.example
|
|
1888
|
+
|
|
1889
|
+
# Docker files (don't copy recursively)
|
|
1890
|
+
Dockerfile*
|
|
1891
|
+
docker-compose*
|
|
1892
|
+
.dockerignore
|
|
1893
|
+
`;
|
|
1568
1894
|
}
|
|
1569
1895
|
/**
|
|
1570
|
-
*
|
|
1896
|
+
* Generate docker-entrypoint.sh for custom startup logic
|
|
1571
1897
|
*/
|
|
1572
|
-
|
|
1573
|
-
|
|
1574
|
-
|
|
1575
|
-
|
|
1576
|
-
|
|
1577
|
-
|
|
1578
|
-
|
|
1579
|
-
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
|
|
1583
|
-
|
|
1584
|
-
|
|
1585
|
-
|
|
1586
|
-
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1587
|
-
} catch {}
|
|
1588
|
-
throw new Error(errorMessage);
|
|
1589
|
-
}
|
|
1590
|
-
const text = await response.text();
|
|
1591
|
-
if (!text) return {};
|
|
1592
|
-
return JSON.parse(text);
|
|
1898
|
+
function generateDockerEntrypoint() {
|
|
1899
|
+
return `#!/bin/sh
|
|
1900
|
+
set -e
|
|
1901
|
+
|
|
1902
|
+
# Run any custom startup scripts here
|
|
1903
|
+
# Example: wait for database
|
|
1904
|
+
# until nc -z $DB_HOST $DB_PORT; do
|
|
1905
|
+
# echo "Waiting for database..."
|
|
1906
|
+
# sleep 1
|
|
1907
|
+
# done
|
|
1908
|
+
|
|
1909
|
+
# Execute the main command
|
|
1910
|
+
exec "$@"
|
|
1911
|
+
`;
|
|
1593
1912
|
}
|
|
1594
1913
|
/**
|
|
1595
|
-
*
|
|
1914
|
+
* Resolve Docker configuration from GkmConfig with defaults
|
|
1596
1915
|
*/
|
|
1597
|
-
|
|
1598
|
-
|
|
1916
|
+
function resolveDockerConfig$1(config$1) {
|
|
1917
|
+
const docker = config$1.docker ?? {};
|
|
1918
|
+
let defaultImageName = "api";
|
|
1919
|
+
try {
|
|
1920
|
+
const pkg = __require(`${process.cwd()}/package.json`);
|
|
1921
|
+
if (pkg.name) defaultImageName = pkg.name.replace(/^@[^/]+\//, "");
|
|
1922
|
+
} catch {}
|
|
1923
|
+
return {
|
|
1924
|
+
registry: docker.registry ?? "",
|
|
1925
|
+
imageName: docker.imageName ?? defaultImageName,
|
|
1926
|
+
baseImage: docker.baseImage ?? "node:22-alpine",
|
|
1927
|
+
port: docker.port ?? 3e3,
|
|
1928
|
+
compose: docker.compose
|
|
1929
|
+
};
|
|
1599
1930
|
}
|
|
1931
|
+
|
|
1932
|
+
//#endregion
|
|
1933
|
+
//#region src/docker/index.ts
|
|
1934
|
+
const logger$5 = console;
|
|
1600
1935
|
/**
|
|
1601
|
-
*
|
|
1936
|
+
* Docker command implementation
|
|
1937
|
+
* Generates Dockerfile, docker-compose.yml, and related files
|
|
1938
|
+
*
|
|
1939
|
+
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
1940
|
+
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
1602
1941
|
*/
|
|
1603
|
-
async function
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
|
|
1607
|
-
|
|
1942
|
+
async function dockerCommand(options) {
|
|
1943
|
+
const config$1 = await loadConfig();
|
|
1944
|
+
const dockerConfig = resolveDockerConfig$1(config$1);
|
|
1945
|
+
const serverConfig = typeof config$1.providers?.server === "object" ? config$1.providers.server : void 0;
|
|
1946
|
+
const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
|
|
1947
|
+
const useSlim = options.slim === true;
|
|
1948
|
+
if (useSlim) {
|
|
1949
|
+
const distDir = join(process.cwd(), ".gkm", "server", "dist");
|
|
1950
|
+
const hasBuild = existsSync(join(distDir, "server.mjs"));
|
|
1951
|
+
if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
|
|
1952
|
+
}
|
|
1953
|
+
const dockerDir = join(process.cwd(), ".gkm", "docker");
|
|
1954
|
+
await mkdir(dockerDir, { recursive: true });
|
|
1955
|
+
const packageManager = detectPackageManager$1();
|
|
1956
|
+
const inMonorepo = isMonorepo();
|
|
1957
|
+
const hasTurbo = hasTurboConfig();
|
|
1958
|
+
let useTurbo = options.turbo ?? false;
|
|
1959
|
+
if (inMonorepo && !useSlim) if (hasTurbo) {
|
|
1960
|
+
useTurbo = true;
|
|
1961
|
+
logger$5.log(" Detected monorepo with turbo.json - using turbo prune");
|
|
1962
|
+
} else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
|
|
1963
|
+
let turboPackage = options.turboPackage ?? dockerConfig.imageName;
|
|
1964
|
+
if (useTurbo && !options.turboPackage) try {
|
|
1965
|
+
const pkg = __require(`${process.cwd()}/package.json`);
|
|
1966
|
+
if (pkg.name) {
|
|
1967
|
+
turboPackage = pkg.name;
|
|
1968
|
+
logger$5.log(` Turbo package: ${turboPackage}`);
|
|
1969
|
+
}
|
|
1970
|
+
} catch {}
|
|
1971
|
+
const templateOptions = {
|
|
1972
|
+
imageName: dockerConfig.imageName,
|
|
1973
|
+
baseImage: dockerConfig.baseImage,
|
|
1974
|
+
port: dockerConfig.port,
|
|
1975
|
+
healthCheckPath,
|
|
1976
|
+
prebuilt: useSlim,
|
|
1977
|
+
turbo: useTurbo,
|
|
1978
|
+
turboPackage,
|
|
1979
|
+
packageManager
|
|
1980
|
+
};
|
|
1981
|
+
const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
|
|
1982
|
+
const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
|
|
1983
|
+
const dockerfilePath = join(dockerDir, "Dockerfile");
|
|
1984
|
+
await writeFile(dockerfilePath, dockerfile);
|
|
1985
|
+
logger$5.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
|
|
1986
|
+
const composeOptions = {
|
|
1987
|
+
imageName: dockerConfig.imageName,
|
|
1988
|
+
registry: options.registry ?? dockerConfig.registry,
|
|
1989
|
+
port: dockerConfig.port,
|
|
1990
|
+
healthCheckPath,
|
|
1991
|
+
services: dockerConfig.compose?.services ?? {}
|
|
1992
|
+
};
|
|
1993
|
+
const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
|
|
1994
|
+
const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
|
|
1995
|
+
const composePath = join(dockerDir, "docker-compose.yml");
|
|
1996
|
+
await writeFile(composePath, dockerCompose);
|
|
1997
|
+
logger$5.log("Generated: .gkm/docker/docker-compose.yml");
|
|
1998
|
+
const dockerignore = generateDockerignore();
|
|
1999
|
+
const dockerignorePath = join(process.cwd(), ".dockerignore");
|
|
2000
|
+
await writeFile(dockerignorePath, dockerignore);
|
|
2001
|
+
logger$5.log("Generated: .dockerignore (project root)");
|
|
2002
|
+
const entrypoint = generateDockerEntrypoint();
|
|
2003
|
+
const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
|
|
2004
|
+
await writeFile(entrypointPath, entrypoint);
|
|
2005
|
+
logger$5.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2006
|
+
const result = {
|
|
2007
|
+
dockerfile: dockerfilePath,
|
|
2008
|
+
dockerCompose: composePath,
|
|
2009
|
+
dockerignore: dockerignorePath,
|
|
2010
|
+
entrypoint: entrypointPath
|
|
2011
|
+
};
|
|
2012
|
+
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2013
|
+
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2014
|
+
return result;
|
|
1608
2015
|
}
|
|
1609
2016
|
/**
|
|
1610
|
-
*
|
|
2017
|
+
* Ensure lockfile exists in the build context
|
|
2018
|
+
* For monorepos, copies from workspace root if needed
|
|
2019
|
+
* Returns cleanup function if file was copied
|
|
1611
2020
|
*/
|
|
1612
|
-
|
|
1613
|
-
|
|
2021
|
+
function ensureLockfile(cwd) {
|
|
2022
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2023
|
+
if (!lockfilePath) {
|
|
2024
|
+
logger$5.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
|
|
2025
|
+
return null;
|
|
2026
|
+
}
|
|
2027
|
+
const lockfileName = basename(lockfilePath);
|
|
2028
|
+
const localLockfile = join(cwd, lockfileName);
|
|
2029
|
+
if (lockfilePath === localLockfile) return null;
|
|
2030
|
+
logger$5.log(` Copying ${lockfileName} from monorepo root...`);
|
|
2031
|
+
copyFileSync(lockfilePath, localLockfile);
|
|
2032
|
+
return () => {
|
|
2033
|
+
try {
|
|
2034
|
+
unlinkSync(localLockfile);
|
|
2035
|
+
} catch {}
|
|
2036
|
+
};
|
|
1614
2037
|
}
|
|
1615
2038
|
/**
|
|
1616
|
-
*
|
|
2039
|
+
* Build Docker image
|
|
2040
|
+
* Uses BuildKit for cache mount support
|
|
1617
2041
|
*/
|
|
1618
|
-
async function
|
|
1619
|
-
const
|
|
1620
|
-
|
|
1621
|
-
const
|
|
1622
|
-
|
|
1623
|
-
|
|
1624
|
-
|
|
1625
|
-
|
|
1626
|
-
|
|
1627
|
-
|
|
2042
|
+
async function buildDockerImage(imageName, options) {
|
|
2043
|
+
const tag = options.tag ?? "latest";
|
|
2044
|
+
const registry = options.registry;
|
|
2045
|
+
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2046
|
+
logger$5.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2047
|
+
const cwd = process.cwd();
|
|
2048
|
+
const cleanup = ensureLockfile(cwd);
|
|
2049
|
+
try {
|
|
2050
|
+
execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2051
|
+
cwd,
|
|
2052
|
+
stdio: "inherit",
|
|
2053
|
+
env: {
|
|
2054
|
+
...process.env,
|
|
2055
|
+
DOCKER_BUILDKIT: "1"
|
|
2056
|
+
}
|
|
1628
2057
|
});
|
|
1629
|
-
|
|
2058
|
+
logger$5.log(`✅ Docker image built: ${fullImageName}`);
|
|
2059
|
+
} catch (error) {
|
|
2060
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2061
|
+
} finally {
|
|
2062
|
+
cleanup?.();
|
|
1630
2063
|
}
|
|
1631
|
-
return dokployRequest("POST", "application.create", baseUrl, token, {
|
|
1632
|
-
name: name$1,
|
|
1633
|
-
projectId,
|
|
1634
|
-
environmentId
|
|
1635
|
-
});
|
|
1636
2064
|
}
|
|
1637
2065
|
/**
|
|
1638
|
-
*
|
|
2066
|
+
* Push Docker image to registry
|
|
1639
2067
|
*/
|
|
1640
|
-
async function
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
2068
|
+
async function pushDockerImage(imageName, options) {
|
|
2069
|
+
const tag = options.tag ?? "latest";
|
|
2070
|
+
const registry = options.registry;
|
|
2071
|
+
if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
|
|
2072
|
+
const fullImageName = `${registry}/${imageName}:${tag}`;
|
|
2073
|
+
logger$5.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
|
|
2074
|
+
try {
|
|
2075
|
+
execSync(`docker push ${fullImageName}`, {
|
|
2076
|
+
cwd: process.cwd(),
|
|
2077
|
+
stdio: "inherit"
|
|
2078
|
+
});
|
|
2079
|
+
logger$5.log(`✅ Docker image pushed: ${fullImageName}`);
|
|
2080
|
+
} catch (error) {
|
|
2081
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2082
|
+
}
|
|
2083
|
+
}
|
|
2084
|
+
|
|
2085
|
+
//#endregion
|
|
2086
|
+
//#region src/deploy/docker.ts
|
|
2087
|
+
/**
|
|
2088
|
+
* Get app name from package.json in the current working directory
|
|
2089
|
+
* Used for Dokploy app/project naming
|
|
2090
|
+
*/
|
|
2091
|
+
function getAppNameFromCwd() {
|
|
2092
|
+
const packageJsonPath = join(process.cwd(), "package.json");
|
|
2093
|
+
if (!existsSync(packageJsonPath)) return void 0;
|
|
2094
|
+
try {
|
|
2095
|
+
const pkg = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
|
|
2096
|
+
if (pkg.name) return pkg.name.replace(/^@[^/]+\//, "");
|
|
2097
|
+
} catch {}
|
|
2098
|
+
return void 0;
|
|
2099
|
+
}
|
|
2100
|
+
/**
|
|
2101
|
+
* Get app name from package.json adjacent to the lockfile (project root)
|
|
2102
|
+
* Used for Docker image naming
|
|
2103
|
+
*/
|
|
2104
|
+
function getAppNameFromPackageJson() {
|
|
2105
|
+
const cwd = process.cwd();
|
|
2106
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2107
|
+
if (!lockfilePath) return void 0;
|
|
2108
|
+
const projectRoot = dirname(lockfilePath);
|
|
2109
|
+
const packageJsonPath = join(projectRoot, "package.json");
|
|
2110
|
+
if (!existsSync(packageJsonPath)) return void 0;
|
|
2111
|
+
try {
|
|
2112
|
+
const pkg = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
|
|
2113
|
+
if (pkg.name) return pkg.name.replace(/^@[^/]+\//, "");
|
|
2114
|
+
} catch {}
|
|
2115
|
+
return void 0;
|
|
2116
|
+
}
|
|
2117
|
+
const logger$4 = console;
|
|
2118
|
+
/**
|
|
2119
|
+
* Get the full image reference
|
|
2120
|
+
*/
|
|
2121
|
+
function getImageRef(registry, imageName, tag) {
|
|
2122
|
+
if (registry) return `${registry}/${imageName}:${tag}`;
|
|
2123
|
+
return `${imageName}:${tag}`;
|
|
2124
|
+
}
|
|
2125
|
+
/**
|
|
2126
|
+
* Build Docker image
|
|
2127
|
+
*/
|
|
2128
|
+
async function buildImage(imageRef) {
|
|
2129
|
+
logger$4.log(`\n🔨 Building Docker image: ${imageRef}`);
|
|
2130
|
+
const cwd = process.cwd();
|
|
2131
|
+
const inMonorepo = isMonorepo(cwd);
|
|
2132
|
+
if (inMonorepo) logger$4.log(" Generating Dockerfile for monorepo (turbo prune)...");
|
|
2133
|
+
else logger$4.log(" Generating Dockerfile...");
|
|
2134
|
+
await dockerCommand({});
|
|
2135
|
+
let buildCwd = cwd;
|
|
2136
|
+
let dockerfilePath = ".gkm/docker/Dockerfile";
|
|
2137
|
+
if (inMonorepo) {
|
|
2138
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2139
|
+
if (lockfilePath) {
|
|
2140
|
+
const monorepoRoot = dirname(lockfilePath);
|
|
2141
|
+
const appRelPath = relative(monorepoRoot, cwd);
|
|
2142
|
+
dockerfilePath = join(appRelPath, ".gkm/docker/Dockerfile");
|
|
2143
|
+
buildCwd = monorepoRoot;
|
|
2144
|
+
logger$4.log(` Building from monorepo root: ${monorepoRoot}`);
|
|
2145
|
+
}
|
|
2146
|
+
}
|
|
2147
|
+
try {
|
|
2148
|
+
execSync(`DOCKER_BUILDKIT=1 docker build --platform linux/amd64 -f ${dockerfilePath} -t ${imageRef} .`, {
|
|
2149
|
+
cwd: buildCwd,
|
|
2150
|
+
stdio: "inherit",
|
|
2151
|
+
env: {
|
|
2152
|
+
...process.env,
|
|
2153
|
+
DOCKER_BUILDKIT: "1"
|
|
2154
|
+
}
|
|
2155
|
+
});
|
|
2156
|
+
logger$4.log(`✅ Image built: ${imageRef}`);
|
|
2157
|
+
} catch (error) {
|
|
2158
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2159
|
+
}
|
|
2160
|
+
}
|
|
2161
|
+
/**
|
|
2162
|
+
* Push Docker image to registry
|
|
2163
|
+
*/
|
|
2164
|
+
async function pushImage(imageRef) {
|
|
2165
|
+
logger$4.log(`\n☁️ Pushing image: ${imageRef}`);
|
|
2166
|
+
try {
|
|
2167
|
+
execSync(`docker push ${imageRef}`, {
|
|
2168
|
+
cwd: process.cwd(),
|
|
2169
|
+
stdio: "inherit"
|
|
2170
|
+
});
|
|
2171
|
+
logger$4.log(`✅ Image pushed: ${imageRef}`);
|
|
2172
|
+
} catch (error) {
|
|
2173
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2174
|
+
}
|
|
2175
|
+
}
|
|
2176
|
+
/**
|
|
2177
|
+
* Deploy using Docker (build and optionally push image)
|
|
2178
|
+
*/
|
|
2179
|
+
async function deployDocker(options) {
|
|
2180
|
+
const { stage, tag, skipPush, masterKey, config: config$1 } = options;
|
|
2181
|
+
const imageName = config$1.imageName;
|
|
2182
|
+
const imageRef = getImageRef(config$1.registry, imageName, tag);
|
|
2183
|
+
await buildImage(imageRef);
|
|
2184
|
+
if (!skipPush) if (!config$1.registry) logger$4.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
|
|
2185
|
+
else await pushImage(imageRef);
|
|
2186
|
+
logger$4.log("\n✅ Docker deployment ready!");
|
|
2187
|
+
logger$4.log(`\n📋 Deployment details:`);
|
|
2188
|
+
logger$4.log(` Image: ${imageRef}`);
|
|
2189
|
+
logger$4.log(` Stage: ${stage}`);
|
|
2190
|
+
if (masterKey) {
|
|
2191
|
+
logger$4.log(`\n🔐 Deploy with this environment variable:`);
|
|
2192
|
+
logger$4.log(` GKM_MASTER_KEY=${masterKey}`);
|
|
2193
|
+
logger$4.log("\n Example docker run:");
|
|
2194
|
+
logger$4.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
|
|
2195
|
+
}
|
|
2196
|
+
return {
|
|
2197
|
+
imageRef,
|
|
2198
|
+
masterKey
|
|
2199
|
+
};
|
|
2200
|
+
}
|
|
2201
|
+
/**
|
|
2202
|
+
* Resolve Docker deploy config from gkm config
|
|
2203
|
+
* - imageName: from config, or cwd package.json, or 'app' (for Docker image)
|
|
2204
|
+
* - projectName: from root package.json, or 'app' (for Dokploy project)
|
|
2205
|
+
* - appName: from cwd package.json, or projectName (for Dokploy app within project)
|
|
2206
|
+
*/
|
|
2207
|
+
function resolveDockerConfig(config$1) {
|
|
2208
|
+
const projectName = getAppNameFromPackageJson() ?? "app";
|
|
2209
|
+
const appName = getAppNameFromCwd() ?? projectName;
|
|
2210
|
+
const imageName = config$1.docker?.imageName ?? appName;
|
|
2211
|
+
return {
|
|
2212
|
+
registry: config$1.docker?.registry,
|
|
2213
|
+
imageName,
|
|
2214
|
+
projectName,
|
|
2215
|
+
appName
|
|
2216
|
+
};
|
|
2217
|
+
}
|
|
2218
|
+
|
|
2219
|
+
//#endregion
|
|
2220
|
+
//#region src/deploy/dokploy.ts
|
|
2221
|
+
const logger$3 = console;
|
|
2222
|
+
/**
|
|
2223
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
2224
|
+
*/
|
|
2225
|
+
async function getApiToken$1() {
|
|
2226
|
+
const token = await getDokployToken();
|
|
2227
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
2228
|
+
return token;
|
|
2229
|
+
}
|
|
2230
|
+
/**
|
|
2231
|
+
* Create a Dokploy API client
|
|
2232
|
+
*/
|
|
2233
|
+
async function createApi$1(endpoint) {
|
|
2234
|
+
const token = await getApiToken$1();
|
|
2235
|
+
return new DokployApi({
|
|
2236
|
+
baseUrl: endpoint,
|
|
2237
|
+
token
|
|
1644
2238
|
});
|
|
1645
2239
|
}
|
|
1646
2240
|
/**
|
|
1647
|
-
*
|
|
2241
|
+
* Deploy to Dokploy
|
|
2242
|
+
*/
|
|
2243
|
+
async function deployDokploy(options) {
|
|
2244
|
+
const { stage, imageRef, masterKey, config: config$1 } = options;
|
|
2245
|
+
logger$3.log(`\n🎯 Deploying to Dokploy...`);
|
|
2246
|
+
logger$3.log(` Endpoint: ${config$1.endpoint}`);
|
|
2247
|
+
logger$3.log(` Application: ${config$1.applicationId}`);
|
|
2248
|
+
const api = await createApi$1(config$1.endpoint);
|
|
2249
|
+
logger$3.log(` Configuring Docker image: ${imageRef}`);
|
|
2250
|
+
const registryOptions = {};
|
|
2251
|
+
if (config$1.registryId) {
|
|
2252
|
+
registryOptions.registryId = config$1.registryId;
|
|
2253
|
+
logger$3.log(` Using Dokploy registry: ${config$1.registryId}`);
|
|
2254
|
+
} else {
|
|
2255
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
2256
|
+
if (storedRegistryId) {
|
|
2257
|
+
registryOptions.registryId = storedRegistryId;
|
|
2258
|
+
logger$3.log(` Using stored Dokploy registry: ${storedRegistryId}`);
|
|
2259
|
+
} else if (config$1.registryCredentials) {
|
|
2260
|
+
registryOptions.username = config$1.registryCredentials.username;
|
|
2261
|
+
registryOptions.password = config$1.registryCredentials.password;
|
|
2262
|
+
registryOptions.registryUrl = config$1.registryCredentials.registryUrl;
|
|
2263
|
+
logger$3.log(` Using registry credentials for: ${config$1.registryCredentials.registryUrl}`);
|
|
2264
|
+
} else {
|
|
2265
|
+
const username = process.env.DOCKER_REGISTRY_USERNAME;
|
|
2266
|
+
const password = process.env.DOCKER_REGISTRY_PASSWORD;
|
|
2267
|
+
const registryUrl = process.env.DOCKER_REGISTRY_URL || config$1.registry;
|
|
2268
|
+
if (username && password && registryUrl) {
|
|
2269
|
+
registryOptions.username = username;
|
|
2270
|
+
registryOptions.password = password;
|
|
2271
|
+
registryOptions.registryUrl = registryUrl;
|
|
2272
|
+
logger$3.log(` Using registry credentials from environment`);
|
|
2273
|
+
}
|
|
2274
|
+
}
|
|
2275
|
+
}
|
|
2276
|
+
await api.saveDockerProvider(config$1.applicationId, imageRef, registryOptions);
|
|
2277
|
+
logger$3.log(" ✓ Docker provider configured");
|
|
2278
|
+
const envVars = {};
|
|
2279
|
+
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
2280
|
+
if (Object.keys(envVars).length > 0) {
|
|
2281
|
+
logger$3.log(" Updating environment variables...");
|
|
2282
|
+
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
2283
|
+
await api.saveApplicationEnv(config$1.applicationId, envString);
|
|
2284
|
+
logger$3.log(" ✓ Environment variables updated");
|
|
2285
|
+
}
|
|
2286
|
+
logger$3.log(" Triggering deployment...");
|
|
2287
|
+
await api.deployApplication(config$1.applicationId);
|
|
2288
|
+
logger$3.log(" ✓ Deployment triggered");
|
|
2289
|
+
logger$3.log("\n✅ Dokploy deployment initiated!");
|
|
2290
|
+
logger$3.log(`\n📋 Deployment details:`);
|
|
2291
|
+
logger$3.log(` Image: ${imageRef}`);
|
|
2292
|
+
logger$3.log(` Stage: ${stage}`);
|
|
2293
|
+
logger$3.log(` Application ID: ${config$1.applicationId}`);
|
|
2294
|
+
if (masterKey) logger$3.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
2295
|
+
const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
|
|
2296
|
+
logger$3.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
2297
|
+
return {
|
|
2298
|
+
imageRef,
|
|
2299
|
+
masterKey,
|
|
2300
|
+
url: deploymentUrl
|
|
2301
|
+
};
|
|
2302
|
+
}
|
|
2303
|
+
|
|
2304
|
+
//#endregion
|
|
2305
|
+
//#region src/deploy/init.ts
|
|
2306
|
+
const logger$2 = console;
|
|
2307
|
+
/**
|
|
2308
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
2309
|
+
*/
|
|
2310
|
+
async function getApiToken() {
|
|
2311
|
+
const token = await getDokployToken();
|
|
2312
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
2313
|
+
return token;
|
|
2314
|
+
}
|
|
2315
|
+
/**
|
|
2316
|
+
* Get Dokploy endpoint from options or stored credentials
|
|
1648
2317
|
*/
|
|
1649
|
-
async function
|
|
1650
|
-
|
|
2318
|
+
async function getEndpoint(providedEndpoint) {
|
|
2319
|
+
if (providedEndpoint) return providedEndpoint;
|
|
2320
|
+
const stored = await getDokployCredentials();
|
|
2321
|
+
if (stored) return stored.endpoint;
|
|
2322
|
+
throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
2323
|
+
}
|
|
2324
|
+
/**
|
|
2325
|
+
* Create a Dokploy API client
|
|
2326
|
+
*/
|
|
2327
|
+
async function createApi(endpoint) {
|
|
2328
|
+
const token = await getApiToken();
|
|
2329
|
+
return new DokployApi({
|
|
2330
|
+
baseUrl: endpoint,
|
|
2331
|
+
token
|
|
2332
|
+
});
|
|
1651
2333
|
}
|
|
1652
2334
|
/**
|
|
1653
2335
|
* Update gkm.config.ts with Dokploy configuration
|
|
@@ -1670,25 +2352,18 @@ async function updateConfig(config$1, cwd = process.cwd()) {
|
|
|
1670
2352
|
logger$2.log("\n Dokploy config already exists in gkm.config.ts");
|
|
1671
2353
|
logger$2.log(" Updating with new values...");
|
|
1672
2354
|
}
|
|
1673
|
-
|
|
1674
|
-
|
|
1675
|
-
endpoint: '${config$1.endpoint}',
|
|
1676
|
-
projectId: '${config$1.projectId}',
|
|
1677
|
-
applicationId: '${config$1.applicationId}',
|
|
1678
|
-
}`);
|
|
1679
|
-
else newContent = content.replace(/providers:\s*\{/, `providers: {
|
|
1680
|
-
dokploy: {
|
|
2355
|
+
const registryLine = config$1.registryId ? `\n\t\t\tregistryId: '${config$1.registryId}',` : "";
|
|
2356
|
+
const dokployConfigStr = `dokploy: {
|
|
1681
2357
|
endpoint: '${config$1.endpoint}',
|
|
1682
2358
|
projectId: '${config$1.projectId}',
|
|
1683
|
-
applicationId: '${config$1.applicationId}'
|
|
1684
|
-
}
|
|
2359
|
+
applicationId: '${config$1.applicationId}',${registryLine}
|
|
2360
|
+
}`;
|
|
2361
|
+
let newContent;
|
|
2362
|
+
if (content.includes("providers:")) if (content.includes("dokploy:")) newContent = content.replace(/dokploy:\s*\{[^}]*\}/s, dokployConfigStr);
|
|
2363
|
+
else newContent = content.replace(/providers:\s*\{/, `providers: {\n\t\t${dokployConfigStr},`);
|
|
1685
2364
|
else newContent = content.replace(/}\s*\)\s*;?\s*$/, `
|
|
1686
2365
|
providers: {
|
|
1687
|
-
|
|
1688
|
-
endpoint: '${config$1.endpoint}',
|
|
1689
|
-
projectId: '${config$1.projectId}',
|
|
1690
|
-
applicationId: '${config$1.applicationId}',
|
|
1691
|
-
},
|
|
2366
|
+
${dokployConfigStr},
|
|
1692
2367
|
},
|
|
1693
2368
|
});`);
|
|
1694
2369
|
await writeFile(configPath, newContent);
|
|
@@ -1699,42 +2374,46 @@ async function updateConfig(config$1, cwd = process.cwd()) {
|
|
|
1699
2374
|
*/
|
|
1700
2375
|
async function deployInitCommand(options) {
|
|
1701
2376
|
const { projectName, appName, projectId: existingProjectId, registryId } = options;
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
const stored = await getDokployCredentials();
|
|
1705
|
-
if (stored) endpoint = stored.endpoint;
|
|
1706
|
-
else throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
1707
|
-
}
|
|
2377
|
+
const endpoint = await getEndpoint(options.endpoint);
|
|
2378
|
+
const api = await createApi(endpoint);
|
|
1708
2379
|
logger$2.log(`\n🚀 Initializing Dokploy deployment...`);
|
|
1709
2380
|
logger$2.log(` Endpoint: ${endpoint}`);
|
|
1710
|
-
const token = await getApiToken();
|
|
1711
2381
|
let projectId;
|
|
1712
2382
|
if (existingProjectId) {
|
|
1713
2383
|
projectId = existingProjectId;
|
|
1714
2384
|
logger$2.log(`\n📁 Using existing project: ${projectId}`);
|
|
1715
2385
|
} else {
|
|
1716
2386
|
logger$2.log(`\n📁 Looking for project: ${projectName}`);
|
|
1717
|
-
const projects = await
|
|
2387
|
+
const projects = await api.listProjects();
|
|
1718
2388
|
const existingProject = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
1719
2389
|
if (existingProject) {
|
|
1720
2390
|
projectId = existingProject.projectId;
|
|
1721
2391
|
logger$2.log(` Found existing project: ${projectId}`);
|
|
1722
2392
|
} else {
|
|
1723
2393
|
logger$2.log(` Creating new project...`);
|
|
1724
|
-
const
|
|
1725
|
-
projectId = project.projectId;
|
|
2394
|
+
const result = await api.createProject(projectName);
|
|
2395
|
+
projectId = result.project.projectId;
|
|
1726
2396
|
logger$2.log(` ✓ Created project: ${projectId}`);
|
|
1727
2397
|
}
|
|
1728
2398
|
}
|
|
2399
|
+
const project = await api.getProject(projectId);
|
|
2400
|
+
let environmentId;
|
|
2401
|
+
const firstEnv = project.environments?.[0];
|
|
2402
|
+
if (firstEnv) environmentId = firstEnv.environmentId;
|
|
2403
|
+
else {
|
|
2404
|
+
logger$2.log(` Creating production environment...`);
|
|
2405
|
+
const env = await api.createEnvironment(projectId, "production");
|
|
2406
|
+
environmentId = env.environmentId;
|
|
2407
|
+
}
|
|
1729
2408
|
logger$2.log(`\n📦 Creating application: ${appName}`);
|
|
1730
|
-
const application = await createApplication(
|
|
2409
|
+
const application = await api.createApplication(appName, projectId, environmentId);
|
|
1731
2410
|
logger$2.log(` ✓ Created application: ${application.applicationId}`);
|
|
1732
2411
|
if (registryId) {
|
|
1733
2412
|
logger$2.log(`\n🔧 Configuring registry: ${registryId}`);
|
|
1734
|
-
await
|
|
2413
|
+
await api.updateApplication(application.applicationId, { registryId });
|
|
1735
2414
|
logger$2.log(` ✓ Registry configured`);
|
|
1736
2415
|
} else try {
|
|
1737
|
-
const registries = await
|
|
2416
|
+
const registries = await api.listRegistries();
|
|
1738
2417
|
if (registries.length > 0) {
|
|
1739
2418
|
logger$2.log(`\n📋 Available registries:`);
|
|
1740
2419
|
for (const reg of registries) logger$2.log(` - ${reg.registryName}: ${reg.registryUrl} (${reg.registryId})`);
|
|
@@ -1758,705 +2437,448 @@ async function deployInitCommand(options) {
|
|
|
1758
2437
|
return config$1;
|
|
1759
2438
|
}
|
|
1760
2439
|
/**
|
|
1761
|
-
* List available Dokploy resources
|
|
1762
|
-
*/
|
|
1763
|
-
async function deployListCommand(options) {
|
|
1764
|
-
let endpoint = options.endpoint;
|
|
1765
|
-
if (!endpoint) {
|
|
1766
|
-
const stored = await getDokployCredentials();
|
|
1767
|
-
if (stored) endpoint = stored.endpoint;
|
|
1768
|
-
else throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
1769
|
-
}
|
|
1770
|
-
const { resource } = options;
|
|
1771
|
-
const token = await getApiToken();
|
|
1772
|
-
if (resource === "projects") {
|
|
1773
|
-
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
1774
|
-
const projects = await getProjects(endpoint, token);
|
|
1775
|
-
if (projects.length === 0) {
|
|
1776
|
-
logger$2.log(" No projects found");
|
|
1777
|
-
return;
|
|
1778
|
-
}
|
|
1779
|
-
for (const project of projects) {
|
|
1780
|
-
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
1781
|
-
if (project.description) logger$2.log(` ${project.description}`);
|
|
1782
|
-
}
|
|
1783
|
-
} else if (resource === "registries") {
|
|
1784
|
-
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
1785
|
-
const registries = await getRegistries(endpoint, token);
|
|
1786
|
-
if (registries.length === 0) {
|
|
1787
|
-
logger$2.log(" No registries configured");
|
|
1788
|
-
logger$2.log(" Add a registry in Dokploy: Settings > Docker Registry");
|
|
1789
|
-
return;
|
|
1790
|
-
}
|
|
1791
|
-
for (const registry of registries) {
|
|
1792
|
-
logger$2.log(`\n ${registry.registryName} (${registry.registryId})`);
|
|
1793
|
-
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
1794
|
-
logger$2.log(` Username: ${registry.username}`);
|
|
1795
|
-
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
1796
|
-
}
|
|
1797
|
-
}
|
|
1798
|
-
}
|
|
1799
|
-
|
|
1800
|
-
//#endregion
|
|
1801
|
-
//#region src/docker/compose.ts
|
|
1802
|
-
/** Default Docker images for services */
|
|
1803
|
-
const DEFAULT_SERVICE_IMAGES = {
|
|
1804
|
-
postgres: "postgres",
|
|
1805
|
-
redis: "redis",
|
|
1806
|
-
rabbitmq: "rabbitmq"
|
|
1807
|
-
};
|
|
1808
|
-
/** Default Docker image versions for services */
|
|
1809
|
-
const DEFAULT_SERVICE_VERSIONS = {
|
|
1810
|
-
postgres: "16-alpine",
|
|
1811
|
-
redis: "7-alpine",
|
|
1812
|
-
rabbitmq: "3-management-alpine"
|
|
1813
|
-
};
|
|
1814
|
-
/** Get the default full image reference for a service */
|
|
1815
|
-
function getDefaultImage(serviceName) {
|
|
1816
|
-
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
1817
|
-
}
|
|
1818
|
-
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1819
|
-
function normalizeServices(services) {
|
|
1820
|
-
const result = /* @__PURE__ */ new Map();
|
|
1821
|
-
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1822
|
-
else for (const [name$1, config$1] of Object.entries(services)) {
|
|
1823
|
-
const serviceName = name$1;
|
|
1824
|
-
if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1825
|
-
else if (config$1 && typeof config$1 === "object") {
|
|
1826
|
-
const serviceConfig = config$1;
|
|
1827
|
-
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1828
|
-
else {
|
|
1829
|
-
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1830
|
-
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
1831
|
-
}
|
|
1832
|
-
}
|
|
1833
|
-
}
|
|
1834
|
-
return result;
|
|
1835
|
-
}
|
|
1836
|
-
/**
|
|
1837
|
-
* Generate docker-compose.yml for production deployment
|
|
1838
|
-
*/
|
|
1839
|
-
function generateDockerCompose(options) {
|
|
1840
|
-
const { imageName, registry, port, healthCheckPath, services } = options;
|
|
1841
|
-
const serviceMap = normalizeServices(services);
|
|
1842
|
-
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1843
|
-
let yaml = `version: '3.8'
|
|
1844
|
-
|
|
1845
|
-
services:
|
|
1846
|
-
api:
|
|
1847
|
-
build:
|
|
1848
|
-
context: ../..
|
|
1849
|
-
dockerfile: .gkm/docker/Dockerfile
|
|
1850
|
-
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1851
|
-
container_name: ${imageName}
|
|
1852
|
-
restart: unless-stopped
|
|
1853
|
-
ports:
|
|
1854
|
-
- "\${PORT:-${port}}:${port}"
|
|
1855
|
-
environment:
|
|
1856
|
-
- NODE_ENV=production
|
|
1857
|
-
`;
|
|
1858
|
-
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1859
|
-
`;
|
|
1860
|
-
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1861
|
-
`;
|
|
1862
|
-
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1863
|
-
`;
|
|
1864
|
-
yaml += ` healthcheck:
|
|
1865
|
-
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1866
|
-
interval: 30s
|
|
1867
|
-
timeout: 3s
|
|
1868
|
-
retries: 3
|
|
1869
|
-
`;
|
|
1870
|
-
if (serviceMap.size > 0) {
|
|
1871
|
-
yaml += ` depends_on:
|
|
1872
|
-
`;
|
|
1873
|
-
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1874
|
-
condition: service_healthy
|
|
1875
|
-
`;
|
|
1876
|
-
}
|
|
1877
|
-
yaml += ` networks:
|
|
1878
|
-
- app-network
|
|
1879
|
-
`;
|
|
1880
|
-
const postgresImage = serviceMap.get("postgres");
|
|
1881
|
-
if (postgresImage) yaml += `
|
|
1882
|
-
postgres:
|
|
1883
|
-
image: ${postgresImage}
|
|
1884
|
-
container_name: postgres
|
|
1885
|
-
restart: unless-stopped
|
|
1886
|
-
environment:
|
|
1887
|
-
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
1888
|
-
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1889
|
-
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1890
|
-
volumes:
|
|
1891
|
-
- postgres_data:/var/lib/postgresql/data
|
|
1892
|
-
healthcheck:
|
|
1893
|
-
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1894
|
-
interval: 5s
|
|
1895
|
-
timeout: 5s
|
|
1896
|
-
retries: 5
|
|
1897
|
-
networks:
|
|
1898
|
-
- app-network
|
|
1899
|
-
`;
|
|
1900
|
-
const redisImage = serviceMap.get("redis");
|
|
1901
|
-
if (redisImage) yaml += `
|
|
1902
|
-
redis:
|
|
1903
|
-
image: ${redisImage}
|
|
1904
|
-
container_name: redis
|
|
1905
|
-
restart: unless-stopped
|
|
1906
|
-
volumes:
|
|
1907
|
-
- redis_data:/data
|
|
1908
|
-
healthcheck:
|
|
1909
|
-
test: ["CMD", "redis-cli", "ping"]
|
|
1910
|
-
interval: 5s
|
|
1911
|
-
timeout: 5s
|
|
1912
|
-
retries: 5
|
|
1913
|
-
networks:
|
|
1914
|
-
- app-network
|
|
1915
|
-
`;
|
|
1916
|
-
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1917
|
-
if (rabbitmqImage) yaml += `
|
|
1918
|
-
rabbitmq:
|
|
1919
|
-
image: ${rabbitmqImage}
|
|
1920
|
-
container_name: rabbitmq
|
|
1921
|
-
restart: unless-stopped
|
|
1922
|
-
environment:
|
|
1923
|
-
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1924
|
-
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1925
|
-
ports:
|
|
1926
|
-
- "15672:15672" # Management UI
|
|
1927
|
-
volumes:
|
|
1928
|
-
- rabbitmq_data:/var/lib/rabbitmq
|
|
1929
|
-
healthcheck:
|
|
1930
|
-
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1931
|
-
interval: 10s
|
|
1932
|
-
timeout: 5s
|
|
1933
|
-
retries: 5
|
|
1934
|
-
networks:
|
|
1935
|
-
- app-network
|
|
1936
|
-
`;
|
|
1937
|
-
yaml += `
|
|
1938
|
-
volumes:
|
|
1939
|
-
`;
|
|
1940
|
-
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1941
|
-
`;
|
|
1942
|
-
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1943
|
-
`;
|
|
1944
|
-
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1945
|
-
`;
|
|
1946
|
-
yaml += `
|
|
1947
|
-
networks:
|
|
1948
|
-
app-network:
|
|
1949
|
-
driver: bridge
|
|
1950
|
-
`;
|
|
1951
|
-
return yaml;
|
|
1952
|
-
}
|
|
1953
|
-
/**
|
|
1954
|
-
* Generate a minimal docker-compose.yml for API only
|
|
1955
|
-
*/
|
|
1956
|
-
function generateMinimalDockerCompose(options) {
|
|
1957
|
-
const { imageName, registry, port, healthCheckPath } = options;
|
|
1958
|
-
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1959
|
-
return `version: '3.8'
|
|
1960
|
-
|
|
1961
|
-
services:
|
|
1962
|
-
api:
|
|
1963
|
-
build:
|
|
1964
|
-
context: ../..
|
|
1965
|
-
dockerfile: .gkm/docker/Dockerfile
|
|
1966
|
-
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1967
|
-
container_name: ${imageName}
|
|
1968
|
-
restart: unless-stopped
|
|
1969
|
-
ports:
|
|
1970
|
-
- "\${PORT:-${port}}:${port}"
|
|
1971
|
-
environment:
|
|
1972
|
-
- NODE_ENV=production
|
|
1973
|
-
healthcheck:
|
|
1974
|
-
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1975
|
-
interval: 30s
|
|
1976
|
-
timeout: 3s
|
|
1977
|
-
retries: 3
|
|
1978
|
-
networks:
|
|
1979
|
-
- app-network
|
|
1980
|
-
|
|
1981
|
-
networks:
|
|
1982
|
-
app-network:
|
|
1983
|
-
driver: bridge
|
|
1984
|
-
`;
|
|
1985
|
-
}
|
|
1986
|
-
|
|
1987
|
-
//#endregion
|
|
1988
|
-
//#region src/docker/templates.ts
|
|
1989
|
-
/**
|
|
1990
|
-
* Detect package manager from lockfiles
|
|
1991
|
-
* Walks up the directory tree to find lockfile (for monorepos)
|
|
1992
|
-
*/
|
|
1993
|
-
function detectPackageManager$1(cwd = process.cwd()) {
|
|
1994
|
-
const lockfiles = [
|
|
1995
|
-
["pnpm-lock.yaml", "pnpm"],
|
|
1996
|
-
["bun.lockb", "bun"],
|
|
1997
|
-
["yarn.lock", "yarn"],
|
|
1998
|
-
["package-lock.json", "npm"]
|
|
1999
|
-
];
|
|
2000
|
-
let dir = cwd;
|
|
2001
|
-
const root = parse(dir).root;
|
|
2002
|
-
while (dir !== root) {
|
|
2003
|
-
for (const [lockfile, pm] of lockfiles) if (existsSync(join(dir, lockfile))) return pm;
|
|
2004
|
-
dir = dirname(dir);
|
|
2005
|
-
}
|
|
2006
|
-
for (const [lockfile, pm] of lockfiles) if (existsSync(join(root, lockfile))) return pm;
|
|
2007
|
-
return "pnpm";
|
|
2008
|
-
}
|
|
2009
|
-
/**
|
|
2010
|
-
* Get package manager specific commands and paths
|
|
2011
|
-
*/
|
|
2012
|
-
function getPmConfig(pm) {
|
|
2013
|
-
const configs = {
|
|
2014
|
-
pnpm: {
|
|
2015
|
-
install: "corepack enable && corepack prepare pnpm@latest --activate",
|
|
2016
|
-
lockfile: "pnpm-lock.yaml",
|
|
2017
|
-
fetch: "pnpm fetch",
|
|
2018
|
-
installCmd: "pnpm install --frozen-lockfile --offline",
|
|
2019
|
-
cacheTarget: "/root/.local/share/pnpm/store",
|
|
2020
|
-
cacheId: "pnpm",
|
|
2021
|
-
run: "pnpm",
|
|
2022
|
-
addGlobal: "pnpm add -g"
|
|
2023
|
-
},
|
|
2024
|
-
npm: {
|
|
2025
|
-
install: "",
|
|
2026
|
-
lockfile: "package-lock.json",
|
|
2027
|
-
fetch: "",
|
|
2028
|
-
installCmd: "npm ci",
|
|
2029
|
-
cacheTarget: "/root/.npm",
|
|
2030
|
-
cacheId: "npm",
|
|
2031
|
-
run: "npm run",
|
|
2032
|
-
addGlobal: "npm install -g"
|
|
2033
|
-
},
|
|
2034
|
-
yarn: {
|
|
2035
|
-
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
2036
|
-
lockfile: "yarn.lock",
|
|
2037
|
-
fetch: "",
|
|
2038
|
-
installCmd: "yarn install --frozen-lockfile",
|
|
2039
|
-
cacheTarget: "/root/.yarn/cache",
|
|
2040
|
-
cacheId: "yarn",
|
|
2041
|
-
run: "yarn",
|
|
2042
|
-
addGlobal: "yarn global add"
|
|
2043
|
-
},
|
|
2044
|
-
bun: {
|
|
2045
|
-
install: "npm install -g bun",
|
|
2046
|
-
lockfile: "bun.lockb",
|
|
2047
|
-
fetch: "",
|
|
2048
|
-
installCmd: "bun install --frozen-lockfile",
|
|
2049
|
-
cacheTarget: "/root/.bun/install/cache",
|
|
2050
|
-
cacheId: "bun",
|
|
2051
|
-
run: "bun run",
|
|
2052
|
-
addGlobal: "bun add -g"
|
|
2053
|
-
}
|
|
2054
|
-
};
|
|
2055
|
-
return configs[pm];
|
|
2056
|
-
}
|
|
2057
|
-
/**
|
|
2058
|
-
* Generate a multi-stage Dockerfile for building from source
|
|
2059
|
-
* Optimized for build speed with:
|
|
2060
|
-
* - BuildKit cache mounts for package manager store
|
|
2061
|
-
* - pnpm fetch for better layer caching (when using pnpm)
|
|
2062
|
-
* - Optional turbo prune for monorepos
|
|
2063
|
-
*/
|
|
2064
|
-
function generateMultiStageDockerfile(options) {
|
|
2065
|
-
const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
|
|
2066
|
-
if (turbo) return generateTurboDockerfile({
|
|
2067
|
-
...options,
|
|
2068
|
-
turboPackage: turboPackage ?? "api"
|
|
2069
|
-
});
|
|
2070
|
-
const pm = getPmConfig(packageManager);
|
|
2071
|
-
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
2072
|
-
const hasFetch = packageManager === "pnpm";
|
|
2073
|
-
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
2074
|
-
COPY ${pm.lockfile} ./
|
|
2075
|
-
|
|
2076
|
-
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
2077
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2078
|
-
${pm.fetch}
|
|
2079
|
-
|
|
2080
|
-
# Copy package.json after fetch
|
|
2081
|
-
COPY package.json ./
|
|
2082
|
-
|
|
2083
|
-
# Install from cache (fast - no network needed)
|
|
2084
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2085
|
-
${pm.installCmd}` : `# Copy package files
|
|
2086
|
-
COPY package.json ${pm.lockfile} ./
|
|
2087
|
-
|
|
2088
|
-
# Install dependencies with cache
|
|
2089
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2090
|
-
${pm.installCmd}`;
|
|
2091
|
-
return `# syntax=docker/dockerfile:1
|
|
2092
|
-
# Stage 1: Dependencies
|
|
2093
|
-
FROM ${baseImage} AS deps
|
|
2094
|
-
|
|
2095
|
-
WORKDIR /app
|
|
2096
|
-
${installPm}
|
|
2097
|
-
${depsStage}
|
|
2098
|
-
|
|
2099
|
-
# Stage 2: Build
|
|
2100
|
-
FROM deps AS builder
|
|
2101
|
-
|
|
2102
|
-
WORKDIR /app
|
|
2103
|
-
|
|
2104
|
-
# Copy source (deps already installed)
|
|
2105
|
-
COPY . .
|
|
2106
|
-
|
|
2107
|
-
# Build production server
|
|
2108
|
-
RUN ${pm.run} gkm build --provider server --production
|
|
2109
|
-
|
|
2110
|
-
# Stage 3: Production
|
|
2111
|
-
FROM ${baseImage} AS runner
|
|
2112
|
-
|
|
2113
|
-
WORKDIR /app
|
|
2114
|
-
|
|
2115
|
-
# Install tini for proper signal handling as PID 1
|
|
2116
|
-
RUN apk add --no-cache tini
|
|
2117
|
-
|
|
2118
|
-
# Create non-root user
|
|
2119
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2120
|
-
adduser --system --uid 1001 hono
|
|
2121
|
-
|
|
2122
|
-
# Copy bundled server
|
|
2123
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2124
|
-
|
|
2125
|
-
# Environment
|
|
2126
|
-
ENV NODE_ENV=production
|
|
2127
|
-
ENV PORT=${port}
|
|
2128
|
-
|
|
2129
|
-
# Health check
|
|
2130
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2131
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2132
|
-
|
|
2133
|
-
# Switch to non-root user
|
|
2134
|
-
USER hono
|
|
2135
|
-
|
|
2136
|
-
EXPOSE ${port}
|
|
2137
|
-
|
|
2138
|
-
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2139
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2140
|
-
CMD ["node", "server.mjs"]
|
|
2141
|
-
`;
|
|
2142
|
-
}
|
|
2143
|
-
/**
|
|
2144
|
-
* Generate a Dockerfile optimized for Turbo monorepos
|
|
2145
|
-
* Uses turbo prune to create minimal Docker context
|
|
2146
|
-
*/
|
|
2147
|
-
function generateTurboDockerfile(options) {
|
|
2148
|
-
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
2149
|
-
const pm = getPmConfig(packageManager);
|
|
2150
|
-
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
2151
|
-
const hasFetch = packageManager === "pnpm";
|
|
2152
|
-
const depsInstall = hasFetch ? `# Fetch and install from cache
|
|
2153
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2154
|
-
${pm.fetch}
|
|
2155
|
-
|
|
2156
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2157
|
-
${pm.installCmd}` : `# Install dependencies with cache
|
|
2158
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2159
|
-
${pm.installCmd}`;
|
|
2160
|
-
return `# syntax=docker/dockerfile:1
|
|
2161
|
-
# Stage 1: Prune monorepo
|
|
2162
|
-
FROM ${baseImage} AS pruner
|
|
2163
|
-
|
|
2164
|
-
WORKDIR /app
|
|
2165
|
-
|
|
2166
|
-
${installPm}
|
|
2167
|
-
RUN ${pm.addGlobal} turbo
|
|
2168
|
-
|
|
2169
|
-
COPY . .
|
|
2170
|
-
|
|
2171
|
-
# Prune to only include necessary packages
|
|
2172
|
-
RUN turbo prune ${turboPackage} --docker
|
|
2173
|
-
|
|
2174
|
-
# Stage 2: Install dependencies
|
|
2175
|
-
FROM ${baseImage} AS deps
|
|
2176
|
-
|
|
2177
|
-
WORKDIR /app
|
|
2178
|
-
|
|
2179
|
-
${installPm}
|
|
2180
|
-
|
|
2181
|
-
# Copy pruned lockfile and package.jsons
|
|
2182
|
-
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
2183
|
-
COPY --from=pruner /app/out/json/ ./
|
|
2184
|
-
|
|
2185
|
-
${depsInstall}
|
|
2186
|
-
|
|
2187
|
-
# Stage 3: Build
|
|
2188
|
-
FROM deps AS builder
|
|
2189
|
-
|
|
2190
|
-
WORKDIR /app
|
|
2191
|
-
|
|
2192
|
-
# Copy pruned source
|
|
2193
|
-
COPY --from=pruner /app/out/full/ ./
|
|
2194
|
-
|
|
2195
|
-
# Build production server
|
|
2196
|
-
RUN ${pm.run} gkm build --provider server --production
|
|
2197
|
-
|
|
2198
|
-
# Stage 4: Production
|
|
2199
|
-
FROM ${baseImage} AS runner
|
|
2200
|
-
|
|
2201
|
-
WORKDIR /app
|
|
2202
|
-
|
|
2203
|
-
RUN apk add --no-cache tini
|
|
2204
|
-
|
|
2205
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2206
|
-
adduser --system --uid 1001 hono
|
|
2207
|
-
|
|
2208
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2209
|
-
|
|
2210
|
-
ENV NODE_ENV=production
|
|
2211
|
-
ENV PORT=${port}
|
|
2212
|
-
|
|
2213
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2214
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2215
|
-
|
|
2216
|
-
USER hono
|
|
2217
|
-
|
|
2218
|
-
EXPOSE ${port}
|
|
2219
|
-
|
|
2220
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2221
|
-
CMD ["node", "server.mjs"]
|
|
2222
|
-
`;
|
|
2223
|
-
}
|
|
2224
|
-
/**
|
|
2225
|
-
* Generate a slim Dockerfile for pre-built bundles
|
|
2226
|
-
*/
|
|
2227
|
-
function generateSlimDockerfile(options) {
|
|
2228
|
-
const { baseImage, port, healthCheckPath } = options;
|
|
2229
|
-
return `# Slim Dockerfile for pre-built production bundle
|
|
2230
|
-
FROM ${baseImage}
|
|
2231
|
-
|
|
2232
|
-
WORKDIR /app
|
|
2233
|
-
|
|
2234
|
-
# Install tini for proper signal handling as PID 1
|
|
2235
|
-
# Handles SIGTERM propagation and zombie process reaping
|
|
2236
|
-
RUN apk add --no-cache tini
|
|
2237
|
-
|
|
2238
|
-
# Create non-root user
|
|
2239
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2240
|
-
adduser --system --uid 1001 hono
|
|
2241
|
-
|
|
2242
|
-
# Copy pre-built bundle
|
|
2243
|
-
COPY .gkm/server/dist/server.mjs ./
|
|
2244
|
-
|
|
2245
|
-
# Environment
|
|
2246
|
-
ENV NODE_ENV=production
|
|
2247
|
-
ENV PORT=${port}
|
|
2248
|
-
|
|
2249
|
-
# Health check
|
|
2250
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2251
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2252
|
-
|
|
2253
|
-
# Switch to non-root user
|
|
2254
|
-
USER hono
|
|
2255
|
-
|
|
2256
|
-
EXPOSE ${port}
|
|
2257
|
-
|
|
2258
|
-
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2259
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2260
|
-
CMD ["node", "server.mjs"]
|
|
2261
|
-
`;
|
|
2262
|
-
}
|
|
2263
|
-
/**
|
|
2264
|
-
* Generate .dockerignore file
|
|
2265
|
-
*/
|
|
2266
|
-
function generateDockerignore() {
|
|
2267
|
-
return `# Dependencies
|
|
2268
|
-
node_modules
|
|
2269
|
-
.pnpm-store
|
|
2270
|
-
|
|
2271
|
-
# Build output (except what we need)
|
|
2272
|
-
.gkm/aws*
|
|
2273
|
-
.gkm/server/*.ts
|
|
2274
|
-
!.gkm/server/dist
|
|
2275
|
-
|
|
2276
|
-
# IDE and editor
|
|
2277
|
-
.idea
|
|
2278
|
-
.vscode
|
|
2279
|
-
*.swp
|
|
2280
|
-
*.swo
|
|
2281
|
-
|
|
2282
|
-
# Git
|
|
2283
|
-
.git
|
|
2284
|
-
.gitignore
|
|
2285
|
-
|
|
2286
|
-
# Logs
|
|
2287
|
-
*.log
|
|
2288
|
-
npm-debug.log*
|
|
2289
|
-
pnpm-debug.log*
|
|
2290
|
-
|
|
2291
|
-
# Test files
|
|
2292
|
-
**/*.test.ts
|
|
2293
|
-
**/*.spec.ts
|
|
2294
|
-
**/__tests__
|
|
2295
|
-
coverage
|
|
2296
|
-
|
|
2297
|
-
# Documentation
|
|
2298
|
-
docs
|
|
2299
|
-
*.md
|
|
2300
|
-
!README.md
|
|
2301
|
-
|
|
2302
|
-
# Environment files (handle secrets separately)
|
|
2303
|
-
.env
|
|
2304
|
-
.env.*
|
|
2305
|
-
!.env.example
|
|
2306
|
-
|
|
2307
|
-
# Docker files (don't copy recursively)
|
|
2308
|
-
Dockerfile*
|
|
2309
|
-
docker-compose*
|
|
2310
|
-
.dockerignore
|
|
2311
|
-
`;
|
|
2312
|
-
}
|
|
2313
|
-
/**
|
|
2314
|
-
* Generate docker-entrypoint.sh for custom startup logic
|
|
2315
|
-
*/
|
|
2316
|
-
function generateDockerEntrypoint() {
|
|
2317
|
-
return `#!/bin/sh
|
|
2318
|
-
set -e
|
|
2319
|
-
|
|
2320
|
-
# Run any custom startup scripts here
|
|
2321
|
-
# Example: wait for database
|
|
2322
|
-
# until nc -z $DB_HOST $DB_PORT; do
|
|
2323
|
-
# echo "Waiting for database..."
|
|
2324
|
-
# sleep 1
|
|
2325
|
-
# done
|
|
2326
|
-
|
|
2327
|
-
# Execute the main command
|
|
2328
|
-
exec "$@"
|
|
2329
|
-
`;
|
|
2330
|
-
}
|
|
2331
|
-
/**
|
|
2332
|
-
* Resolve Docker configuration from GkmConfig with defaults
|
|
2440
|
+
* List available Dokploy resources
|
|
2333
2441
|
*/
|
|
2334
|
-
function
|
|
2335
|
-
const
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
|
|
2339
|
-
|
|
2340
|
-
|
|
2341
|
-
|
|
2342
|
-
|
|
2343
|
-
|
|
2344
|
-
|
|
2345
|
-
|
|
2346
|
-
|
|
2347
|
-
|
|
2442
|
+
async function deployListCommand(options) {
|
|
2443
|
+
const endpoint = await getEndpoint(options.endpoint);
|
|
2444
|
+
const api = await createApi(endpoint);
|
|
2445
|
+
const { resource } = options;
|
|
2446
|
+
if (resource === "projects") {
|
|
2447
|
+
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
2448
|
+
const projects = await api.listProjects();
|
|
2449
|
+
if (projects.length === 0) {
|
|
2450
|
+
logger$2.log(" No projects found");
|
|
2451
|
+
return;
|
|
2452
|
+
}
|
|
2453
|
+
for (const project of projects) {
|
|
2454
|
+
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
2455
|
+
if (project.description) logger$2.log(` ${project.description}`);
|
|
2456
|
+
}
|
|
2457
|
+
} else if (resource === "registries") {
|
|
2458
|
+
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
2459
|
+
const registries = await api.listRegistries();
|
|
2460
|
+
if (registries.length === 0) {
|
|
2461
|
+
logger$2.log(" No registries configured");
|
|
2462
|
+
logger$2.log(" Run \"gkm registry:setup\" to configure a registry");
|
|
2463
|
+
return;
|
|
2464
|
+
}
|
|
2465
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
2466
|
+
for (const registry of registries) {
|
|
2467
|
+
const isDefault = registry.registryId === storedRegistryId;
|
|
2468
|
+
const marker = isDefault ? " (default)" : "";
|
|
2469
|
+
logger$2.log(`\n ${registry.registryName}${marker} (${registry.registryId})`);
|
|
2470
|
+
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
2471
|
+
logger$2.log(` Username: ${registry.username}`);
|
|
2472
|
+
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
2473
|
+
}
|
|
2474
|
+
}
|
|
2348
2475
|
}
|
|
2349
2476
|
|
|
2350
2477
|
//#endregion
|
|
2351
|
-
//#region src/
|
|
2478
|
+
//#region src/deploy/index.ts
|
|
2352
2479
|
const logger$1 = console;
|
|
2353
2480
|
/**
|
|
2354
|
-
*
|
|
2355
|
-
* Generates Dockerfile, docker-compose.yml, and related files
|
|
2356
|
-
*
|
|
2357
|
-
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
2358
|
-
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
2481
|
+
* Prompt for input
|
|
2359
2482
|
*/
|
|
2360
|
-
async function
|
|
2361
|
-
|
|
2362
|
-
|
|
2363
|
-
|
|
2364
|
-
|
|
2365
|
-
|
|
2366
|
-
|
|
2367
|
-
|
|
2368
|
-
|
|
2369
|
-
|
|
2483
|
+
async function prompt(message, hidden = false) {
|
|
2484
|
+
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please configure manually.");
|
|
2485
|
+
if (hidden) {
|
|
2486
|
+
process.stdout.write(message);
|
|
2487
|
+
return new Promise((resolve$1) => {
|
|
2488
|
+
let value = "";
|
|
2489
|
+
const onData = (char) => {
|
|
2490
|
+
const c = char.toString();
|
|
2491
|
+
if (c === "\n" || c === "\r") {
|
|
2492
|
+
process.stdin.setRawMode(false);
|
|
2493
|
+
process.stdin.pause();
|
|
2494
|
+
process.stdin.removeListener("data", onData);
|
|
2495
|
+
process.stdout.write("\n");
|
|
2496
|
+
resolve$1(value);
|
|
2497
|
+
} else if (c === "") {
|
|
2498
|
+
process.stdin.setRawMode(false);
|
|
2499
|
+
process.stdin.pause();
|
|
2500
|
+
process.stdout.write("\n");
|
|
2501
|
+
process.exit(1);
|
|
2502
|
+
} else if (c === "" || c === "\b") {
|
|
2503
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
2504
|
+
} else value += c;
|
|
2505
|
+
};
|
|
2506
|
+
process.stdin.setRawMode(true);
|
|
2507
|
+
process.stdin.resume();
|
|
2508
|
+
process.stdin.on("data", onData);
|
|
2509
|
+
});
|
|
2370
2510
|
}
|
|
2371
|
-
const
|
|
2372
|
-
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2511
|
+
const rl = readline.createInterface({
|
|
2512
|
+
input: stdin,
|
|
2513
|
+
output: stdout
|
|
2514
|
+
});
|
|
2515
|
+
try {
|
|
2516
|
+
return await rl.question(message);
|
|
2517
|
+
} finally {
|
|
2518
|
+
rl.close();
|
|
2519
|
+
}
|
|
2520
|
+
}
|
|
2521
|
+
/**
|
|
2522
|
+
* Provision docker compose services in Dokploy
|
|
2523
|
+
* @internal Exported for testing
|
|
2524
|
+
*/
|
|
2525
|
+
async function provisionServices(api, projectId, environmentId, appName, services, existingUrls) {
|
|
2526
|
+
logger$1.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
|
|
2527
|
+
if (!services || !environmentId) {
|
|
2528
|
+
logger$1.log(" Skipping: no services or no environmentId");
|
|
2529
|
+
return void 0;
|
|
2530
|
+
}
|
|
2531
|
+
const serviceUrls = {};
|
|
2532
|
+
if (services.postgres) if (existingUrls?.DATABASE_URL) logger$1.log("\n🐘 PostgreSQL: Already configured (skipping)");
|
|
2533
|
+
else {
|
|
2534
|
+
logger$1.log("\n🐘 Provisioning PostgreSQL...");
|
|
2535
|
+
const postgresName = `${appName}-db`;
|
|
2536
|
+
try {
|
|
2537
|
+
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
2538
|
+
const databasePassword = randomBytes$1(16).toString("hex");
|
|
2539
|
+
const postgres = await api.createPostgres(postgresName, projectId, environmentId, { databasePassword });
|
|
2540
|
+
logger$1.log(` ✓ Created PostgreSQL: ${postgres.postgresId}`);
|
|
2541
|
+
await api.deployPostgres(postgres.postgresId);
|
|
2542
|
+
logger$1.log(" ✓ PostgreSQL deployed");
|
|
2543
|
+
serviceUrls.DATABASE_HOST = postgres.appName;
|
|
2544
|
+
serviceUrls.DATABASE_PORT = "5432";
|
|
2545
|
+
serviceUrls.DATABASE_NAME = postgres.databaseName;
|
|
2546
|
+
serviceUrls.DATABASE_USER = postgres.databaseUser;
|
|
2547
|
+
serviceUrls.DATABASE_PASSWORD = postgres.databasePassword;
|
|
2548
|
+
serviceUrls.DATABASE_URL = `postgresql://${postgres.databaseUser}:${postgres.databasePassword}@${postgres.appName}:5432/${postgres.databaseName}`;
|
|
2549
|
+
logger$1.log(` ✓ Database credentials configured`);
|
|
2550
|
+
} catch (error) {
|
|
2551
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2552
|
+
if (message.includes("already exists") || message.includes("duplicate")) logger$1.log(` ℹ PostgreSQL already exists`);
|
|
2553
|
+
else logger$1.log(` ⚠ Failed to provision PostgreSQL: ${message}`);
|
|
2554
|
+
}
|
|
2555
|
+
}
|
|
2556
|
+
if (services.redis) if (existingUrls?.REDIS_URL) logger$1.log("\n🔴 Redis: Already configured (skipping)");
|
|
2557
|
+
else {
|
|
2558
|
+
logger$1.log("\n🔴 Provisioning Redis...");
|
|
2559
|
+
const redisName = `${appName}-cache`;
|
|
2560
|
+
try {
|
|
2561
|
+
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
2562
|
+
const databasePassword = randomBytes$1(16).toString("hex");
|
|
2563
|
+
const redis = await api.createRedis(redisName, projectId, environmentId, { databasePassword });
|
|
2564
|
+
logger$1.log(` ✓ Created Redis: ${redis.redisId}`);
|
|
2565
|
+
await api.deployRedis(redis.redisId);
|
|
2566
|
+
logger$1.log(" ✓ Redis deployed");
|
|
2567
|
+
serviceUrls.REDIS_HOST = redis.appName;
|
|
2568
|
+
serviceUrls.REDIS_PORT = "6379";
|
|
2569
|
+
if (redis.databasePassword) serviceUrls.REDIS_PASSWORD = redis.databasePassword;
|
|
2570
|
+
const password = redis.databasePassword ? `:${redis.databasePassword}@` : "";
|
|
2571
|
+
serviceUrls.REDIS_URL = `redis://${password}${redis.appName}:6379`;
|
|
2572
|
+
logger$1.log(` ✓ Redis credentials configured`);
|
|
2573
|
+
} catch (error) {
|
|
2574
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2575
|
+
if (message.includes("already exists") || message.includes("duplicate")) logger$1.log(` ℹ Redis already exists`);
|
|
2576
|
+
else logger$1.log(` ⚠ Failed to provision Redis: ${message}`);
|
|
2577
|
+
}
|
|
2578
|
+
}
|
|
2579
|
+
return Object.keys(serviceUrls).length > 0 ? serviceUrls : void 0;
|
|
2580
|
+
}
|
|
2581
|
+
/**
|
|
2582
|
+
* Ensure Dokploy is fully configured, recovering/creating resources as needed
|
|
2583
|
+
*/
|
|
2584
|
+
async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
2585
|
+
logger$1.log("\n🔧 Checking Dokploy setup...");
|
|
2586
|
+
const { readStageSecrets: readStageSecrets$1 } = await import("./storage-nkGIjeXt.mjs");
|
|
2587
|
+
const existingSecrets = await readStageSecrets$1(stage);
|
|
2588
|
+
const existingUrls = {
|
|
2589
|
+
DATABASE_URL: existingSecrets?.urls?.DATABASE_URL,
|
|
2590
|
+
REDIS_URL: existingSecrets?.urls?.REDIS_URL
|
|
2383
2591
|
};
|
|
2384
|
-
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
|
|
2390
|
-
|
|
2391
|
-
|
|
2392
|
-
|
|
2393
|
-
|
|
2394
|
-
|
|
2592
|
+
let creds = await getDokployCredentials();
|
|
2593
|
+
if (!creds) {
|
|
2594
|
+
logger$1.log("\n📋 Dokploy credentials not found. Let's set them up.");
|
|
2595
|
+
const endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
2596
|
+
const normalizedEndpoint = endpoint.replace(/\/$/, "");
|
|
2597
|
+
try {
|
|
2598
|
+
new URL(normalizedEndpoint);
|
|
2599
|
+
} catch {
|
|
2600
|
+
throw new Error("Invalid URL format");
|
|
2601
|
+
}
|
|
2602
|
+
logger$1.log(`\nGenerate a token at: ${normalizedEndpoint}/settings/profile\n`);
|
|
2603
|
+
const token = await prompt("API Token: ", true);
|
|
2604
|
+
logger$1.log("\nValidating credentials...");
|
|
2605
|
+
const isValid = await validateDokployToken(normalizedEndpoint, token);
|
|
2606
|
+
if (!isValid) throw new Error("Invalid credentials. Please check your token.");
|
|
2607
|
+
await storeDokployCredentials(token, normalizedEndpoint);
|
|
2608
|
+
creds = {
|
|
2609
|
+
token,
|
|
2610
|
+
endpoint: normalizedEndpoint
|
|
2611
|
+
};
|
|
2612
|
+
logger$1.log("✓ Credentials saved");
|
|
2613
|
+
}
|
|
2614
|
+
const api = new DokployApi({
|
|
2615
|
+
baseUrl: creds.endpoint,
|
|
2616
|
+
token: creds.token
|
|
2617
|
+
});
|
|
2618
|
+
const existingConfig = config$1.providers?.dokploy;
|
|
2619
|
+
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId && existingConfig.projectId) {
|
|
2620
|
+
logger$1.log("✓ Dokploy config found in gkm.config.ts");
|
|
2621
|
+
try {
|
|
2622
|
+
const projectDetails = await api.getProject(existingConfig.projectId);
|
|
2623
|
+
logger$1.log("✓ Project verified");
|
|
2624
|
+
const storedRegistryId = existingConfig.registryId ?? await getDokployRegistryId();
|
|
2625
|
+
const environments = projectDetails.environments ?? [];
|
|
2626
|
+
let environment = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
2627
|
+
if (!environment) {
|
|
2628
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2629
|
+
environment = await api.createEnvironment(existingConfig.projectId, stage);
|
|
2630
|
+
logger$1.log(` ✓ Created environment: ${environment.environmentId}`);
|
|
2631
|
+
}
|
|
2632
|
+
const environmentId$1 = environment.environmentId;
|
|
2633
|
+
logger$1.log(` Services config: ${JSON.stringify(services)}, envId: ${environmentId$1}`);
|
|
2634
|
+
const serviceUrls$1 = await provisionServices(api, existingConfig.projectId, environmentId$1, dockerConfig.appName, services, existingUrls);
|
|
2635
|
+
return {
|
|
2636
|
+
config: {
|
|
2637
|
+
endpoint: existingConfig.endpoint,
|
|
2638
|
+
projectId: existingConfig.projectId,
|
|
2639
|
+
applicationId: existingConfig.applicationId,
|
|
2640
|
+
registry: existingConfig.registry,
|
|
2641
|
+
registryId: storedRegistryId ?? void 0
|
|
2642
|
+
},
|
|
2643
|
+
serviceUrls: serviceUrls$1
|
|
2644
|
+
};
|
|
2645
|
+
} catch {
|
|
2646
|
+
logger$1.log("⚠ Project not found, will recover...");
|
|
2647
|
+
}
|
|
2648
|
+
}
|
|
2649
|
+
logger$1.log("\n📁 Looking for project...");
|
|
2650
|
+
const projectName = dockerConfig.projectName;
|
|
2651
|
+
const projects = await api.listProjects();
|
|
2652
|
+
let project = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
2653
|
+
let environmentId;
|
|
2654
|
+
if (project) {
|
|
2655
|
+
logger$1.log(` Found existing project: ${project.name} (${project.projectId})`);
|
|
2656
|
+
const projectDetails = await api.getProject(project.projectId);
|
|
2657
|
+
const environments = projectDetails.environments ?? [];
|
|
2658
|
+
const matchingEnv = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
2659
|
+
if (matchingEnv) {
|
|
2660
|
+
environmentId = matchingEnv.environmentId;
|
|
2661
|
+
logger$1.log(` Using environment: ${matchingEnv.name}`);
|
|
2662
|
+
} else {
|
|
2663
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2664
|
+
const env = await api.createEnvironment(project.projectId, stage);
|
|
2665
|
+
environmentId = env.environmentId;
|
|
2666
|
+
logger$1.log(` ✓ Created environment: ${stage}`);
|
|
2667
|
+
}
|
|
2668
|
+
} else {
|
|
2669
|
+
logger$1.log(` Creating project: ${projectName}`);
|
|
2670
|
+
const result = await api.createProject(projectName);
|
|
2671
|
+
project = result.project;
|
|
2672
|
+
if (result.environment.name.toLowerCase() !== stage.toLowerCase()) {
|
|
2673
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2674
|
+
const env = await api.createEnvironment(project.projectId, stage);
|
|
2675
|
+
environmentId = env.environmentId;
|
|
2676
|
+
} else environmentId = result.environment.environmentId;
|
|
2677
|
+
logger$1.log(` ✓ Created project: ${project.projectId}`);
|
|
2678
|
+
logger$1.log(` ✓ Using environment: ${stage}`);
|
|
2679
|
+
}
|
|
2680
|
+
logger$1.log("\n📦 Looking for application...");
|
|
2681
|
+
const appName = dockerConfig.appName;
|
|
2682
|
+
let applicationId;
|
|
2683
|
+
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId) {
|
|
2684
|
+
applicationId = existingConfig.applicationId;
|
|
2685
|
+
logger$1.log(` Using application from config: ${applicationId}`);
|
|
2686
|
+
} else {
|
|
2687
|
+
logger$1.log(` Creating application: ${appName}`);
|
|
2688
|
+
const app = await api.createApplication(appName, project.projectId, environmentId);
|
|
2689
|
+
applicationId = app.applicationId;
|
|
2690
|
+
logger$1.log(` ✓ Created application: ${applicationId}`);
|
|
2691
|
+
}
|
|
2692
|
+
logger$1.log("\n🐳 Checking registry...");
|
|
2693
|
+
let registryId = await getDokployRegistryId();
|
|
2694
|
+
if (registryId) try {
|
|
2695
|
+
const registry = await api.getRegistry(registryId);
|
|
2696
|
+
logger$1.log(` Using registry: ${registry.registryName}`);
|
|
2697
|
+
} catch {
|
|
2698
|
+
logger$1.log(" ⚠ Stored registry not found, clearing...");
|
|
2699
|
+
registryId = void 0;
|
|
2700
|
+
await storeDokployRegistryId("");
|
|
2701
|
+
}
|
|
2702
|
+
if (!registryId) {
|
|
2703
|
+
const registries = await api.listRegistries();
|
|
2704
|
+
if (registries.length === 0) if (dockerConfig.registry) {
|
|
2705
|
+
logger$1.log(" No registries found in Dokploy. Let's create one.");
|
|
2706
|
+
logger$1.log(` Registry URL: ${dockerConfig.registry}`);
|
|
2707
|
+
const username = await prompt("Registry username: ");
|
|
2708
|
+
const password = await prompt("Registry password/token: ", true);
|
|
2709
|
+
const registry = await api.createRegistry("Default Registry", dockerConfig.registry, username, password);
|
|
2710
|
+
registryId = registry.registryId;
|
|
2711
|
+
await storeDokployRegistryId(registryId);
|
|
2712
|
+
logger$1.log(` ✓ Registry created: ${registryId}`);
|
|
2713
|
+
} else logger$1.log(" ⚠ No registry configured. Set docker.registry in gkm.config.ts");
|
|
2714
|
+
else {
|
|
2715
|
+
logger$1.log(" Available registries:");
|
|
2716
|
+
registries.forEach((reg, i) => {
|
|
2717
|
+
logger$1.log(` ${i + 1}. ${reg.registryName} (${reg.registryUrl})`);
|
|
2718
|
+
});
|
|
2719
|
+
if (dockerConfig.registry) logger$1.log(` ${registries.length + 1}. Create new registry`);
|
|
2720
|
+
const maxOption = dockerConfig.registry ? registries.length + 1 : registries.length;
|
|
2721
|
+
const selection = await prompt(` Select registry (1-${maxOption}): `);
|
|
2722
|
+
const index = parseInt(selection, 10) - 1;
|
|
2723
|
+
if (index >= 0 && index < registries.length) {
|
|
2724
|
+
registryId = registries[index].registryId;
|
|
2725
|
+
await storeDokployRegistryId(registryId);
|
|
2726
|
+
logger$1.log(` ✓ Selected: ${registries[index].registryName}`);
|
|
2727
|
+
} else if (dockerConfig.registry && index === registries.length) {
|
|
2728
|
+
logger$1.log(`\n Creating new registry...`);
|
|
2729
|
+
logger$1.log(` Registry URL: ${dockerConfig.registry}`);
|
|
2730
|
+
const username = await prompt(" Registry username: ");
|
|
2731
|
+
const password = await prompt(" Registry password/token: ", true);
|
|
2732
|
+
const registry = await api.createRegistry(dockerConfig.registry.replace(/^https?:\/\//, ""), dockerConfig.registry, username, password);
|
|
2733
|
+
registryId = registry.registryId;
|
|
2734
|
+
await storeDokployRegistryId(registryId);
|
|
2735
|
+
logger$1.log(` ✓ Registry created: ${registryId}`);
|
|
2736
|
+
} else logger$1.log(" ⚠ Invalid selection, skipping registry setup");
|
|
2737
|
+
}
|
|
2738
|
+
}
|
|
2739
|
+
const dokployConfig = {
|
|
2740
|
+
endpoint: creds.endpoint,
|
|
2741
|
+
projectId: project.projectId,
|
|
2742
|
+
applicationId,
|
|
2743
|
+
registryId: registryId ?? void 0
|
|
2395
2744
|
};
|
|
2396
|
-
|
|
2397
|
-
|
|
2398
|
-
|
|
2399
|
-
|
|
2400
|
-
logger$1.log(
|
|
2401
|
-
const
|
|
2402
|
-
|
|
2403
|
-
|
|
2404
|
-
|
|
2405
|
-
const entrypoint = generateDockerEntrypoint();
|
|
2406
|
-
const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
|
|
2407
|
-
await writeFile(entrypointPath, entrypoint);
|
|
2408
|
-
logger$1.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2409
|
-
const result = {
|
|
2410
|
-
dockerfile: dockerfilePath,
|
|
2411
|
-
dockerCompose: composePath,
|
|
2412
|
-
dockerignore: dockerignorePath,
|
|
2413
|
-
entrypoint: entrypointPath
|
|
2745
|
+
await updateConfig(dokployConfig);
|
|
2746
|
+
logger$1.log("\n✅ Dokploy setup complete!");
|
|
2747
|
+
logger$1.log(` Project: ${project.projectId}`);
|
|
2748
|
+
logger$1.log(` Application: ${applicationId}`);
|
|
2749
|
+
if (registryId) logger$1.log(` Registry: ${registryId}`);
|
|
2750
|
+
const serviceUrls = await provisionServices(api, project.projectId, environmentId, dockerConfig.appName, services, existingUrls);
|
|
2751
|
+
return {
|
|
2752
|
+
config: dokployConfig,
|
|
2753
|
+
serviceUrls
|
|
2414
2754
|
};
|
|
2415
|
-
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2416
|
-
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2417
|
-
return result;
|
|
2418
2755
|
}
|
|
2419
2756
|
/**
|
|
2420
|
-
*
|
|
2421
|
-
* Uses BuildKit for cache mount support
|
|
2757
|
+
* Generate image tag from stage and timestamp
|
|
2422
2758
|
*/
|
|
2423
|
-
|
|
2424
|
-
const
|
|
2425
|
-
|
|
2426
|
-
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2427
|
-
logger$1.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2428
|
-
try {
|
|
2429
|
-
execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2430
|
-
cwd: process.cwd(),
|
|
2431
|
-
stdio: "inherit",
|
|
2432
|
-
env: {
|
|
2433
|
-
...process.env,
|
|
2434
|
-
DOCKER_BUILDKIT: "1"
|
|
2435
|
-
}
|
|
2436
|
-
});
|
|
2437
|
-
logger$1.log(`✅ Docker image built: ${fullImageName}`);
|
|
2438
|
-
} catch (error) {
|
|
2439
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2440
|
-
}
|
|
2759
|
+
function generateTag(stage) {
|
|
2760
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
2761
|
+
return `${stage}-${timestamp}`;
|
|
2441
2762
|
}
|
|
2442
2763
|
/**
|
|
2443
|
-
*
|
|
2764
|
+
* Main deploy command
|
|
2444
2765
|
*/
|
|
2445
|
-
async function
|
|
2446
|
-
const tag = options
|
|
2447
|
-
|
|
2448
|
-
|
|
2449
|
-
const
|
|
2450
|
-
|
|
2451
|
-
|
|
2452
|
-
|
|
2453
|
-
|
|
2454
|
-
|
|
2766
|
+
async function deployCommand(options) {
|
|
2767
|
+
const { provider, stage, tag, skipPush, skipBuild } = options;
|
|
2768
|
+
logger$1.log(`\n🚀 Deploying to ${provider}...`);
|
|
2769
|
+
logger$1.log(` Stage: ${stage}`);
|
|
2770
|
+
const config$1 = await loadConfig();
|
|
2771
|
+
const imageTag = tag ?? generateTag(stage);
|
|
2772
|
+
logger$1.log(` Tag: ${imageTag}`);
|
|
2773
|
+
const dockerConfig = resolveDockerConfig(config$1);
|
|
2774
|
+
const imageName = dockerConfig.imageName;
|
|
2775
|
+
const registry = dockerConfig.registry;
|
|
2776
|
+
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
2777
|
+
let dokployConfig;
|
|
2778
|
+
let finalRegistry = registry;
|
|
2779
|
+
if (provider === "dokploy") {
|
|
2780
|
+
const composeServices = config$1.docker?.compose?.services;
|
|
2781
|
+
logger$1.log(`\n🔍 Docker compose config: ${JSON.stringify(config$1.docker?.compose)}`);
|
|
2782
|
+
const dockerServices = composeServices ? Array.isArray(composeServices) ? {
|
|
2783
|
+
postgres: composeServices.includes("postgres"),
|
|
2784
|
+
redis: composeServices.includes("redis"),
|
|
2785
|
+
rabbitmq: composeServices.includes("rabbitmq")
|
|
2786
|
+
} : {
|
|
2787
|
+
postgres: Boolean(composeServices.postgres),
|
|
2788
|
+
redis: Boolean(composeServices.redis),
|
|
2789
|
+
rabbitmq: Boolean(composeServices.rabbitmq)
|
|
2790
|
+
} : void 0;
|
|
2791
|
+
const setupResult = await ensureDokploySetup(config$1, dockerConfig, stage, dockerServices);
|
|
2792
|
+
dokployConfig = setupResult.config;
|
|
2793
|
+
finalRegistry = dokployConfig.registry ?? dockerConfig.registry;
|
|
2794
|
+
if (setupResult.serviceUrls) {
|
|
2795
|
+
const { readStageSecrets: readStageSecrets$1, writeStageSecrets: writeStageSecrets$1, initStageSecrets } = await import("./storage-nkGIjeXt.mjs");
|
|
2796
|
+
let secrets = await readStageSecrets$1(stage);
|
|
2797
|
+
if (!secrets) {
|
|
2798
|
+
logger$1.log(` Creating secrets file for stage "${stage}"...`);
|
|
2799
|
+
secrets = initStageSecrets(stage);
|
|
2800
|
+
}
|
|
2801
|
+
let updated = false;
|
|
2802
|
+
const urlFields = [
|
|
2803
|
+
"DATABASE_URL",
|
|
2804
|
+
"REDIS_URL",
|
|
2805
|
+
"RABBITMQ_URL"
|
|
2806
|
+
];
|
|
2807
|
+
for (const [key, value] of Object.entries(setupResult.serviceUrls)) {
|
|
2808
|
+
if (!value) continue;
|
|
2809
|
+
if (urlFields.includes(key)) {
|
|
2810
|
+
const urlKey = key;
|
|
2811
|
+
if (!secrets.urls[urlKey]) {
|
|
2812
|
+
secrets.urls[urlKey] = value;
|
|
2813
|
+
logger$1.log(` Saved ${key} to secrets.urls`);
|
|
2814
|
+
updated = true;
|
|
2815
|
+
}
|
|
2816
|
+
} else if (!secrets.custom[key]) {
|
|
2817
|
+
secrets.custom[key] = value;
|
|
2818
|
+
logger$1.log(` Saved ${key} to secrets.custom`);
|
|
2819
|
+
updated = true;
|
|
2820
|
+
}
|
|
2821
|
+
}
|
|
2822
|
+
if (updated) await writeStageSecrets$1(secrets);
|
|
2823
|
+
}
|
|
2824
|
+
}
|
|
2825
|
+
let masterKey;
|
|
2826
|
+
if (!skipBuild) {
|
|
2827
|
+
logger$1.log(`\n📦 Building for production...`);
|
|
2828
|
+
const buildResult = await buildCommand({
|
|
2829
|
+
provider: "server",
|
|
2830
|
+
production: true,
|
|
2831
|
+
stage
|
|
2455
2832
|
});
|
|
2456
|
-
|
|
2457
|
-
}
|
|
2458
|
-
|
|
2833
|
+
masterKey = buildResult.masterKey;
|
|
2834
|
+
} else logger$1.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
2835
|
+
let result;
|
|
2836
|
+
switch (provider) {
|
|
2837
|
+
case "docker": {
|
|
2838
|
+
result = await deployDocker({
|
|
2839
|
+
stage,
|
|
2840
|
+
tag: imageTag,
|
|
2841
|
+
skipPush,
|
|
2842
|
+
masterKey,
|
|
2843
|
+
config: dockerConfig
|
|
2844
|
+
});
|
|
2845
|
+
break;
|
|
2846
|
+
}
|
|
2847
|
+
case "dokploy": {
|
|
2848
|
+
if (!dokployConfig) throw new Error("Dokploy config not initialized");
|
|
2849
|
+
const finalImageRef = finalRegistry ? `${finalRegistry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
2850
|
+
await deployDocker({
|
|
2851
|
+
stage,
|
|
2852
|
+
tag: imageTag,
|
|
2853
|
+
skipPush: false,
|
|
2854
|
+
masterKey,
|
|
2855
|
+
config: {
|
|
2856
|
+
registry: finalRegistry,
|
|
2857
|
+
imageName: dockerConfig.imageName
|
|
2858
|
+
}
|
|
2859
|
+
});
|
|
2860
|
+
result = await deployDokploy({
|
|
2861
|
+
stage,
|
|
2862
|
+
tag: imageTag,
|
|
2863
|
+
imageRef: finalImageRef,
|
|
2864
|
+
masterKey,
|
|
2865
|
+
config: dokployConfig
|
|
2866
|
+
});
|
|
2867
|
+
break;
|
|
2868
|
+
}
|
|
2869
|
+
case "aws-lambda": {
|
|
2870
|
+
logger$1.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
2871
|
+
logger$1.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
2872
|
+
result = {
|
|
2873
|
+
imageRef,
|
|
2874
|
+
masterKey
|
|
2875
|
+
};
|
|
2876
|
+
break;
|
|
2877
|
+
}
|
|
2878
|
+
default: throw new Error(`Unknown deploy provider: ${provider}\nSupported providers: docker, dokploy, aws-lambda`);
|
|
2459
2879
|
}
|
|
2880
|
+
logger$1.log("\n✅ Deployment complete!");
|
|
2881
|
+
return result;
|
|
2460
2882
|
}
|
|
2461
2883
|
|
|
2462
2884
|
//#endregion
|
|
@@ -4087,11 +4509,11 @@ async function initCommand(projectName, options = {}) {
|
|
|
4087
4509
|
};
|
|
4088
4510
|
const targetDir = join(cwd, name$1);
|
|
4089
4511
|
const template = getTemplate(templateOptions.template);
|
|
4090
|
-
const isMonorepo = templateOptions.monorepo;
|
|
4512
|
+
const isMonorepo$1 = templateOptions.monorepo;
|
|
4091
4513
|
const apiPath = templateOptions.apiPath;
|
|
4092
4514
|
await mkdir(targetDir, { recursive: true });
|
|
4093
|
-
const appDir = isMonorepo ? join(targetDir, apiPath) : targetDir;
|
|
4094
|
-
if (isMonorepo) await mkdir(appDir, { recursive: true });
|
|
4515
|
+
const appDir = isMonorepo$1 ? join(targetDir, apiPath) : targetDir;
|
|
4516
|
+
if (isMonorepo$1) await mkdir(appDir, { recursive: true });
|
|
4095
4517
|
const appFiles = [
|
|
4096
4518
|
...generatePackageJson(templateOptions, template),
|
|
4097
4519
|
...generateConfigFiles(templateOptions, template),
|
|
@@ -4107,7 +4529,7 @@ async function initCommand(projectName, options = {}) {
|
|
|
4107
4529
|
}
|
|
4108
4530
|
for (const { path, content } of appFiles) {
|
|
4109
4531
|
const fullPath = join(appDir, path);
|
|
4110
|
-
const _displayPath = isMonorepo ? `${apiPath}/${path}` : path;
|
|
4532
|
+
const _displayPath = isMonorepo$1 ? `${apiPath}/${path}` : path;
|
|
4111
4533
|
await mkdir(dirname(fullPath), { recursive: true });
|
|
4112
4534
|
await writeFile(fullPath, content);
|
|
4113
4535
|
}
|
|
@@ -4440,7 +4862,8 @@ program.command("init").description("Scaffold a new project").argument("[name]",
|
|
|
4440
4862
|
const globalOptions = program.opts();
|
|
4441
4863
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4442
4864
|
await initCommand(name$1, options);
|
|
4443
|
-
} catch (
|
|
4865
|
+
} catch (error) {
|
|
4866
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4444
4867
|
process.exit(1);
|
|
4445
4868
|
}
|
|
4446
4869
|
});
|
|
@@ -4472,7 +4895,8 @@ program.command("build").description("Build handlers from endpoints, functions,
|
|
|
4472
4895
|
skipBundle: options.skipBundle || false,
|
|
4473
4896
|
stage: options.stage
|
|
4474
4897
|
});
|
|
4475
|
-
} catch (
|
|
4898
|
+
} catch (error) {
|
|
4899
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4476
4900
|
process.exit(1);
|
|
4477
4901
|
}
|
|
4478
4902
|
});
|
|
@@ -4485,7 +4909,8 @@ program.command("dev").description("Start development server with automatic relo
|
|
|
4485
4909
|
portExplicit: !!options.port,
|
|
4486
4910
|
enableOpenApi: options.enableOpenapi ?? true
|
|
4487
4911
|
});
|
|
4488
|
-
} catch (
|
|
4912
|
+
} catch (error) {
|
|
4913
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4489
4914
|
process.exit(1);
|
|
4490
4915
|
}
|
|
4491
4916
|
});
|
|
@@ -4509,7 +4934,8 @@ program.command("openapi").description("Generate OpenAPI specification from endp
|
|
|
4509
4934
|
const globalOptions = program.opts();
|
|
4510
4935
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4511
4936
|
await openapiCommand({});
|
|
4512
|
-
} catch (
|
|
4937
|
+
} catch (error) {
|
|
4938
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4513
4939
|
process.exit(1);
|
|
4514
4940
|
}
|
|
4515
4941
|
});
|
|
@@ -4518,7 +4944,8 @@ program.command("generate:react-query").description("Generate React Query hooks
|
|
|
4518
4944
|
const globalOptions = program.opts();
|
|
4519
4945
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4520
4946
|
await generateReactQueryCommand(options);
|
|
4521
|
-
} catch (
|
|
4947
|
+
} catch (error) {
|
|
4948
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4522
4949
|
process.exit(1);
|
|
4523
4950
|
}
|
|
4524
4951
|
});
|
|
@@ -4527,7 +4954,8 @@ program.command("docker").description("Generate Docker deployment files").option
|
|
|
4527
4954
|
const globalOptions = program.opts();
|
|
4528
4955
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4529
4956
|
await dockerCommand(options);
|
|
4530
|
-
} catch (
|
|
4957
|
+
} catch (error) {
|
|
4958
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4531
4959
|
process.exit(1);
|
|
4532
4960
|
}
|
|
4533
4961
|
});
|
|
@@ -4555,7 +4983,8 @@ program.command("prepack").description("Generate Docker files for production dep
|
|
|
4555
4983
|
const registry = options.registry;
|
|
4556
4984
|
const _imageRef = registry ? `${registry}/api:${tag}` : `api:${tag}`;
|
|
4557
4985
|
}
|
|
4558
|
-
} catch (
|
|
4986
|
+
} catch (error) {
|
|
4987
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4559
4988
|
process.exit(1);
|
|
4560
4989
|
}
|
|
4561
4990
|
});
|
|
@@ -4564,7 +4993,8 @@ program.command("secrets:init").description("Initialize secrets for a deployment
|
|
|
4564
4993
|
const globalOptions = program.opts();
|
|
4565
4994
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4566
4995
|
await secretsInitCommand(options);
|
|
4567
|
-
} catch (
|
|
4996
|
+
} catch (error) {
|
|
4997
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4568
4998
|
process.exit(1);
|
|
4569
4999
|
}
|
|
4570
5000
|
});
|
|
@@ -4573,7 +5003,8 @@ program.command("secrets:set").description("Set a custom secret for a stage").ar
|
|
|
4573
5003
|
const globalOptions = program.opts();
|
|
4574
5004
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4575
5005
|
await secretsSetCommand(key, value, options);
|
|
4576
|
-
} catch (
|
|
5006
|
+
} catch (error) {
|
|
5007
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4577
5008
|
process.exit(1);
|
|
4578
5009
|
}
|
|
4579
5010
|
});
|
|
@@ -4582,7 +5013,8 @@ program.command("secrets:show").description("Show secrets for a stage").required
|
|
|
4582
5013
|
const globalOptions = program.opts();
|
|
4583
5014
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4584
5015
|
await secretsShowCommand(options);
|
|
4585
|
-
} catch (
|
|
5016
|
+
} catch (error) {
|
|
5017
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4586
5018
|
process.exit(1);
|
|
4587
5019
|
}
|
|
4588
5020
|
});
|
|
@@ -4591,7 +5023,8 @@ program.command("secrets:rotate").description("Rotate service passwords").requir
|
|
|
4591
5023
|
const globalOptions = program.opts();
|
|
4592
5024
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4593
5025
|
await secretsRotateCommand(options);
|
|
4594
|
-
} catch (
|
|
5026
|
+
} catch (error) {
|
|
5027
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4595
5028
|
process.exit(1);
|
|
4596
5029
|
}
|
|
4597
5030
|
});
|
|
@@ -4600,7 +5033,8 @@ program.command("secrets:import").description("Import secrets from a JSON file")
|
|
|
4600
5033
|
const globalOptions = program.opts();
|
|
4601
5034
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4602
5035
|
await secretsImportCommand(file, options);
|
|
4603
|
-
} catch (
|
|
5036
|
+
} catch (error) {
|
|
5037
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4604
5038
|
process.exit(1);
|
|
4605
5039
|
}
|
|
4606
5040
|
});
|
|
@@ -4624,7 +5058,8 @@ program.command("deploy").description("Deploy application to a provider").requir
|
|
|
4624
5058
|
skipPush: options.skipPush,
|
|
4625
5059
|
skipBuild: options.skipBuild
|
|
4626
5060
|
});
|
|
4627
|
-
} catch (
|
|
5061
|
+
} catch (error) {
|
|
5062
|
+
console.error(error instanceof Error ? error.message : "Deploy failed");
|
|
4628
5063
|
process.exit(1);
|
|
4629
5064
|
}
|
|
4630
5065
|
});
|