@geekmidas/cli 0.12.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundler-BjholBlA.cjs +131 -0
- package/dist/bundler-BjholBlA.cjs.map +1 -0
- package/dist/bundler-DWctKN1z.mjs +130 -0
- package/dist/bundler-DWctKN1z.mjs.map +1 -0
- package/dist/config.d.cts +1 -1
- package/dist/config.d.mts +1 -1
- package/dist/dokploy-api-B7KxOQr3.cjs +3 -0
- package/dist/dokploy-api-C7F9VykY.cjs +317 -0
- package/dist/dokploy-api-C7F9VykY.cjs.map +1 -0
- package/dist/dokploy-api-CaETb2L6.mjs +305 -0
- package/dist/dokploy-api-CaETb2L6.mjs.map +1 -0
- package/dist/dokploy-api-DHvfmWbi.mjs +3 -0
- package/dist/{encryption-Dyf_r1h-.cjs → encryption-D7Efcdi9.cjs} +1 -1
- package/dist/{encryption-Dyf_r1h-.cjs.map → encryption-D7Efcdi9.cjs.map} +1 -1
- package/dist/{encryption-C8H-38Yy.mjs → encryption-h4Nb6W-M.mjs} +1 -1
- package/dist/{encryption-C8H-38Yy.mjs.map → encryption-h4Nb6W-M.mjs.map} +1 -1
- package/dist/index.cjs +1520 -1136
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +1520 -1136
- package/dist/index.mjs.map +1 -1
- package/dist/{openapi-Bt_1FDpT.cjs → openapi-C89hhkZC.cjs} +3 -3
- package/dist/{openapi-Bt_1FDpT.cjs.map → openapi-C89hhkZC.cjs.map} +1 -1
- package/dist/{openapi-BfFlOBCG.mjs → openapi-CZVcfxk-.mjs} +3 -3
- package/dist/{openapi-BfFlOBCG.mjs.map → openapi-CZVcfxk-.mjs.map} +1 -1
- package/dist/{openapi-react-query-B6XTeGqS.mjs → openapi-react-query-CM2_qlW9.mjs} +1 -1
- package/dist/{openapi-react-query-B6XTeGqS.mjs.map → openapi-react-query-CM2_qlW9.mjs.map} +1 -1
- package/dist/{openapi-react-query-B-sNWHFU.cjs → openapi-react-query-iKjfLzff.cjs} +1 -1
- package/dist/{openapi-react-query-B-sNWHFU.cjs.map → openapi-react-query-iKjfLzff.cjs.map} +1 -1
- package/dist/openapi-react-query.cjs +1 -1
- package/dist/openapi-react-query.mjs +1 -1
- package/dist/openapi.cjs +1 -1
- package/dist/openapi.d.cts +1 -1
- package/dist/openapi.d.mts +1 -1
- package/dist/openapi.mjs +1 -1
- package/dist/{storage-C9PU_30f.mjs → storage-BaOP55oq.mjs} +48 -2
- package/dist/storage-BaOP55oq.mjs.map +1 -0
- package/dist/{storage-BXoJvmv2.cjs → storage-Bn3K9Ccu.cjs} +59 -1
- package/dist/storage-Bn3K9Ccu.cjs.map +1 -0
- package/dist/storage-UfyTn7Zm.cjs +7 -0
- package/dist/storage-nkGIjeXt.mjs +3 -0
- package/dist/{types-BR0M2v_c.d.mts → types-BgaMXsUa.d.cts} +3 -1
- package/dist/{types-BR0M2v_c.d.mts.map → types-BgaMXsUa.d.cts.map} +1 -1
- package/dist/{types-BhkZc-vm.d.cts → types-iFk5ms7y.d.mts} +3 -1
- package/dist/{types-BhkZc-vm.d.cts.map → types-iFk5ms7y.d.mts.map} +1 -1
- package/package.json +4 -4
- package/src/auth/__tests__/credentials.spec.ts +127 -0
- package/src/auth/__tests__/index.spec.ts +69 -0
- package/src/auth/credentials.ts +33 -0
- package/src/auth/index.ts +57 -50
- package/src/build/__tests__/bundler.spec.ts +444 -0
- package/src/build/__tests__/endpoint-analyzer.spec.ts +623 -0
- package/src/build/__tests__/handler-templates.spec.ts +272 -0
- package/src/build/bundler.ts +126 -8
- package/src/build/index.ts +31 -0
- package/src/build/types.ts +6 -0
- package/src/deploy/__tests__/dokploy-api.spec.ts +698 -0
- package/src/deploy/__tests__/dokploy.spec.ts +196 -6
- package/src/deploy/__tests__/index.spec.ts +339 -0
- package/src/deploy/__tests__/init.spec.ts +147 -16
- package/src/deploy/docker.ts +32 -3
- package/src/deploy/dokploy-api.ts +581 -0
- package/src/deploy/dokploy.ts +66 -93
- package/src/deploy/index.ts +587 -32
- package/src/deploy/init.ts +192 -249
- package/src/deploy/types.ts +19 -1
- package/src/dev/__tests__/index.spec.ts +95 -0
- package/src/docker/__tests__/templates.spec.ts +144 -0
- package/src/docker/index.ts +96 -6
- package/src/docker/templates.ts +114 -27
- package/src/generators/EndpointGenerator.ts +2 -2
- package/src/index.ts +34 -13
- package/src/secrets/__tests__/storage.spec.ts +208 -0
- package/src/secrets/storage.ts +73 -0
- package/src/types.ts +2 -0
- package/dist/bundler-DRXCw_YR.mjs +0 -70
- package/dist/bundler-DRXCw_YR.mjs.map +0 -1
- package/dist/bundler-WsEvH_b2.cjs +0 -71
- package/dist/bundler-WsEvH_b2.cjs.map +0 -1
- package/dist/storage-BUYQJgz7.cjs +0 -4
- package/dist/storage-BXoJvmv2.cjs.map +0 -1
- package/dist/storage-C9PU_30f.mjs.map +0 -1
- package/dist/storage-DLJAYxzJ.mjs +0 -3
package/dist/index.mjs
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
#!/usr/bin/env -S npx tsx
|
|
2
2
|
import { loadConfig, parseModuleConfig } from "./config-DYULeEv8.mjs";
|
|
3
|
-
import { ConstructGenerator, EndpointGenerator, OPENAPI_OUTPUT_PATH, generateOpenApi, openapiCommand, resolveOpenApiConfig } from "./openapi-
|
|
4
|
-
import {
|
|
5
|
-
import {
|
|
3
|
+
import { ConstructGenerator, EndpointGenerator, OPENAPI_OUTPUT_PATH, generateOpenApi, openapiCommand, resolveOpenApiConfig } from "./openapi-CZVcfxk-.mjs";
|
|
4
|
+
import { DokployApi } from "./dokploy-api-CaETb2L6.mjs";
|
|
5
|
+
import { generateReactQueryCommand } from "./openapi-react-query-CM2_qlW9.mjs";
|
|
6
|
+
import { maskPassword, readStageSecrets, secretsExist, setCustomSecret, writeStageSecrets } from "./storage-BaOP55oq.mjs";
|
|
6
7
|
import { createRequire } from "node:module";
|
|
7
|
-
import { existsSync, mkdirSync } from "node:fs";
|
|
8
|
-
import { dirname, join, parse, relative, resolve } from "node:path";
|
|
8
|
+
import { copyFileSync, existsSync, mkdirSync, unlinkSync } from "node:fs";
|
|
9
|
+
import { basename, dirname, join, parse, relative, resolve } from "node:path";
|
|
9
10
|
import { Command } from "commander";
|
|
10
11
|
import { stdin, stdout } from "node:process";
|
|
11
12
|
import * as readline from "node:readline/promises";
|
|
@@ -28,7 +29,7 @@ var __require = /* @__PURE__ */ createRequire(import.meta.url);
|
|
|
28
29
|
//#endregion
|
|
29
30
|
//#region package.json
|
|
30
31
|
var name = "@geekmidas/cli";
|
|
31
|
-
var version = "0.
|
|
32
|
+
var version = "0.14.0";
|
|
32
33
|
var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
|
|
33
34
|
var private$1 = false;
|
|
34
35
|
var type = "module";
|
|
@@ -174,7 +175,8 @@ async function getDokployCredentials(options) {
|
|
|
174
175
|
if (!credentials.dokploy) return null;
|
|
175
176
|
return {
|
|
176
177
|
token: credentials.dokploy.token,
|
|
177
|
-
endpoint: credentials.dokploy.endpoint
|
|
178
|
+
endpoint: credentials.dokploy.endpoint,
|
|
179
|
+
registryId: credentials.dokploy.registryId
|
|
178
180
|
};
|
|
179
181
|
}
|
|
180
182
|
/**
|
|
@@ -197,6 +199,22 @@ async function getDokployToken(options) {
|
|
|
197
199
|
if (stored) return stored.token;
|
|
198
200
|
return null;
|
|
199
201
|
}
|
|
202
|
+
/**
|
|
203
|
+
* Store Dokploy registry ID
|
|
204
|
+
*/
|
|
205
|
+
async function storeDokployRegistryId(registryId, options) {
|
|
206
|
+
const credentials = await readCredentials(options);
|
|
207
|
+
if (!credentials.dokploy) throw new Error("Dokploy credentials not found. Run \"gkm login --service dokploy\" first.");
|
|
208
|
+
credentials.dokploy.registryId = registryId;
|
|
209
|
+
await writeCredentials(credentials, options);
|
|
210
|
+
}
|
|
211
|
+
/**
|
|
212
|
+
* Get Dokploy registry ID from stored credentials
|
|
213
|
+
*/
|
|
214
|
+
async function getDokployRegistryId(options) {
|
|
215
|
+
const stored = await getDokployCredentials(options);
|
|
216
|
+
return stored?.registryId ?? void 0;
|
|
217
|
+
}
|
|
200
218
|
|
|
201
219
|
//#endregion
|
|
202
220
|
//#region src/auth/index.ts
|
|
@@ -205,52 +223,61 @@ const logger$9 = console;
|
|
|
205
223
|
* Validate Dokploy token by making a test API call
|
|
206
224
|
*/
|
|
207
225
|
async function validateDokployToken(endpoint, token) {
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
}
|
|
215
|
-
});
|
|
216
|
-
return response.ok;
|
|
217
|
-
} catch {
|
|
218
|
-
return false;
|
|
219
|
-
}
|
|
226
|
+
const { DokployApi: DokployApi$1 } = await import("./dokploy-api-DHvfmWbi.mjs");
|
|
227
|
+
const api = new DokployApi$1({
|
|
228
|
+
baseUrl: endpoint,
|
|
229
|
+
token
|
|
230
|
+
});
|
|
231
|
+
return api.validateToken();
|
|
220
232
|
}
|
|
221
233
|
/**
|
|
222
234
|
* Prompt for input (handles both TTY and non-TTY)
|
|
223
235
|
*/
|
|
224
|
-
async function prompt(message, hidden = false) {
|
|
236
|
+
async function prompt$1(message, hidden = false) {
|
|
225
237
|
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please provide --token option.");
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
}
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
238
|
+
if (hidden) {
|
|
239
|
+
process.stdout.write(message);
|
|
240
|
+
return new Promise((resolve$1, reject) => {
|
|
241
|
+
let value = "";
|
|
242
|
+
const cleanup = () => {
|
|
243
|
+
process.stdin.setRawMode(false);
|
|
244
|
+
process.stdin.pause();
|
|
245
|
+
process.stdin.removeListener("data", onData);
|
|
246
|
+
process.stdin.removeListener("error", onError);
|
|
247
|
+
};
|
|
248
|
+
const onError = (err) => {
|
|
249
|
+
cleanup();
|
|
250
|
+
reject(err);
|
|
251
|
+
};
|
|
252
|
+
const onData = (char) => {
|
|
253
|
+
const c = char.toString();
|
|
254
|
+
if (c === "\n" || c === "\r") {
|
|
255
|
+
cleanup();
|
|
256
|
+
process.stdout.write("\n");
|
|
257
|
+
resolve$1(value);
|
|
258
|
+
} else if (c === "") {
|
|
259
|
+
cleanup();
|
|
260
|
+
process.stdout.write("\n");
|
|
261
|
+
process.exit(1);
|
|
262
|
+
} else if (c === "" || c === "\b") {
|
|
263
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
264
|
+
} else value += c;
|
|
265
|
+
};
|
|
266
|
+
process.stdin.setRawMode(true);
|
|
267
|
+
process.stdin.resume();
|
|
268
|
+
process.stdin.on("data", onData);
|
|
269
|
+
process.stdin.on("error", onError);
|
|
270
|
+
});
|
|
271
|
+
} else {
|
|
272
|
+
const rl = readline.createInterface({
|
|
273
|
+
input: stdin,
|
|
274
|
+
output: stdout
|
|
275
|
+
});
|
|
276
|
+
try {
|
|
277
|
+
return await rl.question(message);
|
|
278
|
+
} finally {
|
|
279
|
+
rl.close();
|
|
280
|
+
}
|
|
254
281
|
}
|
|
255
282
|
}
|
|
256
283
|
/**
|
|
@@ -261,7 +288,7 @@ async function loginCommand(options) {
|
|
|
261
288
|
if (service === "dokploy") {
|
|
262
289
|
logger$9.log("\n🔐 Logging in to Dokploy...\n");
|
|
263
290
|
let endpoint = providedEndpoint;
|
|
264
|
-
if (!endpoint) endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
291
|
+
if (!endpoint) endpoint = await prompt$1("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
265
292
|
endpoint = endpoint.replace(/\/$/, "");
|
|
266
293
|
try {
|
|
267
294
|
new URL(endpoint);
|
|
@@ -272,7 +299,7 @@ async function loginCommand(options) {
|
|
|
272
299
|
let token = providedToken;
|
|
273
300
|
if (!token) {
|
|
274
301
|
logger$9.log(`\nGenerate a token at: ${endpoint}/settings/profile\n`);
|
|
275
|
-
token = await prompt("API Token: ", true);
|
|
302
|
+
token = await prompt$1("API Token: ", true);
|
|
276
303
|
}
|
|
277
304
|
if (!token) {
|
|
278
305
|
logger$9.error("Token is required");
|
|
@@ -1193,6 +1220,16 @@ async function buildCommand(options) {
|
|
|
1193
1220
|
if (studio) logger$6.log(`🗄️ Studio enabled at ${studio.path}`);
|
|
1194
1221
|
const hooks = normalizeHooksConfig(config$1.hooks);
|
|
1195
1222
|
if (hooks) logger$6.log(`🪝 Server hooks enabled`);
|
|
1223
|
+
const services = config$1.docker?.compose?.services;
|
|
1224
|
+
const dockerServices = services ? Array.isArray(services) ? {
|
|
1225
|
+
postgres: services.includes("postgres"),
|
|
1226
|
+
redis: services.includes("redis"),
|
|
1227
|
+
rabbitmq: services.includes("rabbitmq")
|
|
1228
|
+
} : {
|
|
1229
|
+
postgres: Boolean(services.postgres),
|
|
1230
|
+
redis: Boolean(services.redis),
|
|
1231
|
+
rabbitmq: Boolean(services.rabbitmq)
|
|
1232
|
+
} : void 0;
|
|
1196
1233
|
const buildContext = {
|
|
1197
1234
|
envParserPath,
|
|
1198
1235
|
envParserImportPattern,
|
|
@@ -1201,7 +1238,8 @@ async function buildCommand(options) {
|
|
|
1201
1238
|
telescope,
|
|
1202
1239
|
studio,
|
|
1203
1240
|
hooks,
|
|
1204
|
-
production
|
|
1241
|
+
production,
|
|
1242
|
+
dockerServices
|
|
1205
1243
|
};
|
|
1206
1244
|
const endpointGenerator = new EndpointGenerator();
|
|
1207
1245
|
const functionGenerator = new FunctionGenerator();
|
|
@@ -1259,14 +1297,23 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1259
1297
|
let masterKey;
|
|
1260
1298
|
if (context.production?.bundle && !skipBundle) {
|
|
1261
1299
|
logger$6.log(`\n📦 Bundling production server...`);
|
|
1262
|
-
const { bundleServer } = await import("./bundler-
|
|
1300
|
+
const { bundleServer } = await import("./bundler-DWctKN1z.mjs");
|
|
1301
|
+
const allConstructs = [
|
|
1302
|
+
...endpoints.map((e) => e.construct),
|
|
1303
|
+
...functions.map((f) => f.construct),
|
|
1304
|
+
...crons.map((c) => c.construct),
|
|
1305
|
+
...subscribers.map((s) => s.construct)
|
|
1306
|
+
];
|
|
1307
|
+
const dockerServices = context.dockerServices;
|
|
1263
1308
|
const bundleResult = await bundleServer({
|
|
1264
1309
|
entryPoint: join(outputDir, "server.ts"),
|
|
1265
1310
|
outputDir: join(outputDir, "dist"),
|
|
1266
1311
|
minify: context.production.minify,
|
|
1267
1312
|
sourcemap: false,
|
|
1268
1313
|
external: context.production.external,
|
|
1269
|
-
stage
|
|
1314
|
+
stage,
|
|
1315
|
+
constructs: allConstructs,
|
|
1316
|
+
dockerServices
|
|
1270
1317
|
});
|
|
1271
1318
|
masterKey = bundleResult.masterKey;
|
|
1272
1319
|
logger$6.log(`✅ Bundle complete: .gkm/server/dist/server.mjs`);
|
|
@@ -1281,1175 +1328,1499 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1281
1328
|
}
|
|
1282
1329
|
|
|
1283
1330
|
//#endregion
|
|
1284
|
-
//#region src/
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1288
|
-
|
|
1289
|
-
|
|
1290
|
-
|
|
1291
|
-
|
|
1331
|
+
//#region src/docker/compose.ts
|
|
1332
|
+
/** Default Docker images for services */
|
|
1333
|
+
const DEFAULT_SERVICE_IMAGES = {
|
|
1334
|
+
postgres: "postgres",
|
|
1335
|
+
redis: "redis",
|
|
1336
|
+
rabbitmq: "rabbitmq"
|
|
1337
|
+
};
|
|
1338
|
+
/** Default Docker image versions for services */
|
|
1339
|
+
const DEFAULT_SERVICE_VERSIONS = {
|
|
1340
|
+
postgres: "16-alpine",
|
|
1341
|
+
redis: "7-alpine",
|
|
1342
|
+
rabbitmq: "3-management-alpine"
|
|
1343
|
+
};
|
|
1344
|
+
/** Get the default full image reference for a service */
|
|
1345
|
+
function getDefaultImage(serviceName) {
|
|
1346
|
+
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
1292
1347
|
}
|
|
1293
|
-
/**
|
|
1294
|
-
|
|
1295
|
-
*/
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1301
|
-
|
|
1302
|
-
|
|
1303
|
-
|
|
1304
|
-
|
|
1348
|
+
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1349
|
+
function normalizeServices(services) {
|
|
1350
|
+
const result = /* @__PURE__ */ new Map();
|
|
1351
|
+
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1352
|
+
else for (const [name$1, config$1] of Object.entries(services)) {
|
|
1353
|
+
const serviceName = name$1;
|
|
1354
|
+
if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1355
|
+
else if (config$1 && typeof config$1 === "object") {
|
|
1356
|
+
const serviceConfig = config$1;
|
|
1357
|
+
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1358
|
+
else {
|
|
1359
|
+
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1360
|
+
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
1305
1361
|
}
|
|
1306
|
-
}
|
|
1307
|
-
logger$5.log(`✅ Image built: ${imageRef}`);
|
|
1308
|
-
} catch (error) {
|
|
1309
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1362
|
+
}
|
|
1310
1363
|
}
|
|
1364
|
+
return result;
|
|
1311
1365
|
}
|
|
1312
1366
|
/**
|
|
1313
|
-
*
|
|
1367
|
+
* Generate docker-compose.yml for production deployment
|
|
1314
1368
|
*/
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1321
|
-
|
|
1322
|
-
|
|
1323
|
-
|
|
1324
|
-
|
|
1369
|
+
function generateDockerCompose(options) {
|
|
1370
|
+
const { imageName, registry, port, healthCheckPath, services } = options;
|
|
1371
|
+
const serviceMap = normalizeServices(services);
|
|
1372
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1373
|
+
let yaml = `version: '3.8'
|
|
1374
|
+
|
|
1375
|
+
services:
|
|
1376
|
+
api:
|
|
1377
|
+
build:
|
|
1378
|
+
context: ../..
|
|
1379
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1380
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1381
|
+
container_name: ${imageName}
|
|
1382
|
+
restart: unless-stopped
|
|
1383
|
+
ports:
|
|
1384
|
+
- "\${PORT:-${port}}:${port}"
|
|
1385
|
+
environment:
|
|
1386
|
+
- NODE_ENV=production
|
|
1387
|
+
`;
|
|
1388
|
+
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1389
|
+
`;
|
|
1390
|
+
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1391
|
+
`;
|
|
1392
|
+
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1393
|
+
`;
|
|
1394
|
+
yaml += ` healthcheck:
|
|
1395
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1396
|
+
interval: 30s
|
|
1397
|
+
timeout: 3s
|
|
1398
|
+
retries: 3
|
|
1399
|
+
`;
|
|
1400
|
+
if (serviceMap.size > 0) {
|
|
1401
|
+
yaml += ` depends_on:
|
|
1402
|
+
`;
|
|
1403
|
+
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1404
|
+
condition: service_healthy
|
|
1405
|
+
`;
|
|
1325
1406
|
}
|
|
1407
|
+
yaml += ` networks:
|
|
1408
|
+
- app-network
|
|
1409
|
+
`;
|
|
1410
|
+
const postgresImage = serviceMap.get("postgres");
|
|
1411
|
+
if (postgresImage) yaml += `
|
|
1412
|
+
postgres:
|
|
1413
|
+
image: ${postgresImage}
|
|
1414
|
+
container_name: postgres
|
|
1415
|
+
restart: unless-stopped
|
|
1416
|
+
environment:
|
|
1417
|
+
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
1418
|
+
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1419
|
+
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1420
|
+
volumes:
|
|
1421
|
+
- postgres_data:/var/lib/postgresql/data
|
|
1422
|
+
healthcheck:
|
|
1423
|
+
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1424
|
+
interval: 5s
|
|
1425
|
+
timeout: 5s
|
|
1426
|
+
retries: 5
|
|
1427
|
+
networks:
|
|
1428
|
+
- app-network
|
|
1429
|
+
`;
|
|
1430
|
+
const redisImage = serviceMap.get("redis");
|
|
1431
|
+
if (redisImage) yaml += `
|
|
1432
|
+
redis:
|
|
1433
|
+
image: ${redisImage}
|
|
1434
|
+
container_name: redis
|
|
1435
|
+
restart: unless-stopped
|
|
1436
|
+
volumes:
|
|
1437
|
+
- redis_data:/data
|
|
1438
|
+
healthcheck:
|
|
1439
|
+
test: ["CMD", "redis-cli", "ping"]
|
|
1440
|
+
interval: 5s
|
|
1441
|
+
timeout: 5s
|
|
1442
|
+
retries: 5
|
|
1443
|
+
networks:
|
|
1444
|
+
- app-network
|
|
1445
|
+
`;
|
|
1446
|
+
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1447
|
+
if (rabbitmqImage) yaml += `
|
|
1448
|
+
rabbitmq:
|
|
1449
|
+
image: ${rabbitmqImage}
|
|
1450
|
+
container_name: rabbitmq
|
|
1451
|
+
restart: unless-stopped
|
|
1452
|
+
environment:
|
|
1453
|
+
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1454
|
+
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1455
|
+
ports:
|
|
1456
|
+
- "15672:15672" # Management UI
|
|
1457
|
+
volumes:
|
|
1458
|
+
- rabbitmq_data:/var/lib/rabbitmq
|
|
1459
|
+
healthcheck:
|
|
1460
|
+
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1461
|
+
interval: 10s
|
|
1462
|
+
timeout: 5s
|
|
1463
|
+
retries: 5
|
|
1464
|
+
networks:
|
|
1465
|
+
- app-network
|
|
1466
|
+
`;
|
|
1467
|
+
yaml += `
|
|
1468
|
+
volumes:
|
|
1469
|
+
`;
|
|
1470
|
+
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1471
|
+
`;
|
|
1472
|
+
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1473
|
+
`;
|
|
1474
|
+
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1475
|
+
`;
|
|
1476
|
+
yaml += `
|
|
1477
|
+
networks:
|
|
1478
|
+
app-network:
|
|
1479
|
+
driver: bridge
|
|
1480
|
+
`;
|
|
1481
|
+
return yaml;
|
|
1326
1482
|
}
|
|
1327
1483
|
/**
|
|
1328
|
-
*
|
|
1484
|
+
* Generate a minimal docker-compose.yml for API only
|
|
1329
1485
|
*/
|
|
1330
|
-
|
|
1331
|
-
const {
|
|
1332
|
-
const
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1347
|
-
|
|
1348
|
-
|
|
1349
|
-
|
|
1350
|
-
|
|
1486
|
+
function generateMinimalDockerCompose(options) {
|
|
1487
|
+
const { imageName, registry, port, healthCheckPath } = options;
|
|
1488
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1489
|
+
return `version: '3.8'
|
|
1490
|
+
|
|
1491
|
+
services:
|
|
1492
|
+
api:
|
|
1493
|
+
build:
|
|
1494
|
+
context: ../..
|
|
1495
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1496
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1497
|
+
container_name: ${imageName}
|
|
1498
|
+
restart: unless-stopped
|
|
1499
|
+
ports:
|
|
1500
|
+
- "\${PORT:-${port}}:${port}"
|
|
1501
|
+
environment:
|
|
1502
|
+
- NODE_ENV=production
|
|
1503
|
+
healthcheck:
|
|
1504
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1505
|
+
interval: 30s
|
|
1506
|
+
timeout: 3s
|
|
1507
|
+
retries: 3
|
|
1508
|
+
networks:
|
|
1509
|
+
- app-network
|
|
1510
|
+
|
|
1511
|
+
networks:
|
|
1512
|
+
app-network:
|
|
1513
|
+
driver: bridge
|
|
1514
|
+
`;
|
|
1351
1515
|
}
|
|
1516
|
+
|
|
1517
|
+
//#endregion
|
|
1518
|
+
//#region src/docker/templates.ts
|
|
1519
|
+
const LOCKFILES = [
|
|
1520
|
+
["pnpm-lock.yaml", "pnpm"],
|
|
1521
|
+
["bun.lockb", "bun"],
|
|
1522
|
+
["yarn.lock", "yarn"],
|
|
1523
|
+
["package-lock.json", "npm"]
|
|
1524
|
+
];
|
|
1352
1525
|
/**
|
|
1353
|
-
*
|
|
1354
|
-
|
|
1355
|
-
function resolveDockerConfig$1(config$1) {
|
|
1356
|
-
return {
|
|
1357
|
-
registry: config$1.docker?.registry,
|
|
1358
|
-
imageName: config$1.docker?.imageName
|
|
1359
|
-
};
|
|
1360
|
-
}
|
|
1361
|
-
|
|
1362
|
-
//#endregion
|
|
1363
|
-
//#region src/deploy/dokploy.ts
|
|
1364
|
-
const logger$4 = console;
|
|
1365
|
-
/**
|
|
1366
|
-
* Get the Dokploy API token from stored credentials or environment
|
|
1367
|
-
*/
|
|
1368
|
-
async function getApiToken$1() {
|
|
1369
|
-
const token = await getDokployToken();
|
|
1370
|
-
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
1371
|
-
return token;
|
|
1372
|
-
}
|
|
1373
|
-
/**
|
|
1374
|
-
* Make a request to the Dokploy API
|
|
1526
|
+
* Detect package manager from lockfiles
|
|
1527
|
+
* Walks up the directory tree to find lockfile (for monorepos)
|
|
1375
1528
|
*/
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
const
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
Authorization: `Bearer ${token}`
|
|
1383
|
-
},
|
|
1384
|
-
body: JSON.stringify(body)
|
|
1385
|
-
});
|
|
1386
|
-
if (!response.ok) {
|
|
1387
|
-
let errorMessage = `Dokploy API error: ${response.status} ${response.statusText}`;
|
|
1388
|
-
try {
|
|
1389
|
-
const errorBody = await response.json();
|
|
1390
|
-
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1391
|
-
if (errorBody.issues?.length) errorMessage += `\n Issues: ${errorBody.issues.map((i) => i.message).join(", ")}`;
|
|
1392
|
-
} catch {}
|
|
1393
|
-
throw new Error(errorMessage);
|
|
1529
|
+
function detectPackageManager$1(cwd = process.cwd()) {
|
|
1530
|
+
let dir = cwd;
|
|
1531
|
+
const root = parse(dir).root;
|
|
1532
|
+
while (dir !== root) {
|
|
1533
|
+
for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(dir, lockfile))) return pm;
|
|
1534
|
+
dir = dirname(dir);
|
|
1394
1535
|
}
|
|
1395
|
-
|
|
1396
|
-
|
|
1397
|
-
/**
|
|
1398
|
-
* Update application environment variables
|
|
1399
|
-
*/
|
|
1400
|
-
async function updateEnvironment(baseUrl, token, applicationId, envVars) {
|
|
1401
|
-
logger$4.log(" Updating environment variables...");
|
|
1402
|
-
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
1403
|
-
await dokployRequest$1("application.update", baseUrl, token, {
|
|
1404
|
-
applicationId,
|
|
1405
|
-
env: envString
|
|
1406
|
-
});
|
|
1407
|
-
logger$4.log(" ✓ Environment variables updated");
|
|
1408
|
-
}
|
|
1409
|
-
/**
|
|
1410
|
-
* Trigger application deployment
|
|
1411
|
-
*/
|
|
1412
|
-
async function triggerDeploy(baseUrl, token, applicationId) {
|
|
1413
|
-
logger$4.log(" Triggering deployment...");
|
|
1414
|
-
await dokployRequest$1("application.deploy", baseUrl, token, { applicationId });
|
|
1415
|
-
logger$4.log(" ✓ Deployment triggered");
|
|
1416
|
-
}
|
|
1417
|
-
/**
|
|
1418
|
-
* Deploy to Dokploy
|
|
1419
|
-
*/
|
|
1420
|
-
async function deployDokploy(options) {
|
|
1421
|
-
const { stage, imageRef, masterKey, config: config$1 } = options;
|
|
1422
|
-
logger$4.log(`\n🎯 Deploying to Dokploy...`);
|
|
1423
|
-
logger$4.log(` Endpoint: ${config$1.endpoint}`);
|
|
1424
|
-
logger$4.log(` Application: ${config$1.applicationId}`);
|
|
1425
|
-
const token = await getApiToken$1();
|
|
1426
|
-
const envVars = {};
|
|
1427
|
-
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
1428
|
-
if (Object.keys(envVars).length > 0) await updateEnvironment(config$1.endpoint, token, config$1.applicationId, envVars);
|
|
1429
|
-
await triggerDeploy(config$1.endpoint, token, config$1.applicationId);
|
|
1430
|
-
logger$4.log("\n✅ Dokploy deployment initiated!");
|
|
1431
|
-
logger$4.log(`\n📋 Deployment details:`);
|
|
1432
|
-
logger$4.log(` Image: ${imageRef}`);
|
|
1433
|
-
logger$4.log(` Stage: ${stage}`);
|
|
1434
|
-
logger$4.log(` Application ID: ${config$1.applicationId}`);
|
|
1435
|
-
if (masterKey) logger$4.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
1436
|
-
const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
|
|
1437
|
-
logger$4.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
1438
|
-
return {
|
|
1439
|
-
imageRef,
|
|
1440
|
-
masterKey,
|
|
1441
|
-
url: deploymentUrl
|
|
1442
|
-
};
|
|
1443
|
-
}
|
|
1444
|
-
/**
|
|
1445
|
-
* Validate Dokploy configuration
|
|
1446
|
-
*/
|
|
1447
|
-
function validateDokployConfig(config$1) {
|
|
1448
|
-
if (!config$1) return false;
|
|
1449
|
-
const required = [
|
|
1450
|
-
"endpoint",
|
|
1451
|
-
"projectId",
|
|
1452
|
-
"applicationId"
|
|
1453
|
-
];
|
|
1454
|
-
const missing = required.filter((key) => !config$1[key]);
|
|
1455
|
-
if (missing.length > 0) throw new Error(`Missing Dokploy configuration: ${missing.join(", ")}\nConfigure in gkm.config.ts:
|
|
1456
|
-
providers: {
|
|
1457
|
-
dokploy: {
|
|
1458
|
-
endpoint: 'https://dokploy.example.com',
|
|
1459
|
-
projectId: 'proj_xxx',
|
|
1460
|
-
applicationId: 'app_xxx',
|
|
1461
|
-
},
|
|
1462
|
-
}`);
|
|
1463
|
-
return true;
|
|
1464
|
-
}
|
|
1465
|
-
|
|
1466
|
-
//#endregion
|
|
1467
|
-
//#region src/deploy/index.ts
|
|
1468
|
-
const logger$3 = console;
|
|
1469
|
-
/**
|
|
1470
|
-
* Generate image tag from stage and timestamp
|
|
1471
|
-
*/
|
|
1472
|
-
function generateTag(stage) {
|
|
1473
|
-
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
1474
|
-
return `${stage}-${timestamp}`;
|
|
1536
|
+
for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(root, lockfile))) return pm;
|
|
1537
|
+
return "pnpm";
|
|
1475
1538
|
}
|
|
1476
1539
|
/**
|
|
1477
|
-
*
|
|
1540
|
+
* Find the lockfile path by walking up the directory tree
|
|
1541
|
+
* Returns the full path to the lockfile, or null if not found
|
|
1478
1542
|
*/
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
|
|
1483
|
-
|
|
1484
|
-
|
|
1485
|
-
|
|
1486
|
-
let masterKey;
|
|
1487
|
-
if (!skipBuild) {
|
|
1488
|
-
logger$3.log(`\n📦 Building for production...`);
|
|
1489
|
-
const buildResult = await buildCommand({
|
|
1490
|
-
provider: "server",
|
|
1491
|
-
production: true,
|
|
1492
|
-
stage
|
|
1493
|
-
});
|
|
1494
|
-
masterKey = buildResult.masterKey;
|
|
1495
|
-
} else logger$3.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
1496
|
-
const dockerConfig = resolveDockerConfig$1(config$1);
|
|
1497
|
-
const imageName = dockerConfig.imageName ?? "app";
|
|
1498
|
-
const registry = dockerConfig.registry;
|
|
1499
|
-
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
1500
|
-
let result;
|
|
1501
|
-
switch (provider) {
|
|
1502
|
-
case "docker": {
|
|
1503
|
-
result = await deployDocker({
|
|
1504
|
-
stage,
|
|
1505
|
-
tag: imageTag,
|
|
1506
|
-
skipPush,
|
|
1507
|
-
masterKey,
|
|
1508
|
-
config: dockerConfig
|
|
1509
|
-
});
|
|
1510
|
-
break;
|
|
1511
|
-
}
|
|
1512
|
-
case "dokploy": {
|
|
1513
|
-
const dokployConfigRaw = config$1.providers?.dokploy;
|
|
1514
|
-
if (typeof dokployConfigRaw === "boolean" || !dokployConfigRaw) throw new Error("Dokploy provider requires configuration.\nConfigure in gkm.config.ts:\n providers: {\n dokploy: {\n endpoint: 'https://dokploy.example.com',\n projectId: 'proj_xxx',\n applicationId: 'app_xxx',\n },\n }");
|
|
1515
|
-
validateDokployConfig(dokployConfigRaw);
|
|
1516
|
-
const dokployConfig = dokployConfigRaw;
|
|
1517
|
-
await deployDocker({
|
|
1518
|
-
stage,
|
|
1519
|
-
tag: imageTag,
|
|
1520
|
-
skipPush: false,
|
|
1521
|
-
masterKey,
|
|
1522
|
-
config: {
|
|
1523
|
-
registry: dokployConfig.registry ?? dockerConfig.registry,
|
|
1524
|
-
imageName: dockerConfig.imageName
|
|
1525
|
-
}
|
|
1526
|
-
});
|
|
1527
|
-
result = await deployDokploy({
|
|
1528
|
-
stage,
|
|
1529
|
-
tag: imageTag,
|
|
1530
|
-
imageRef,
|
|
1531
|
-
masterKey,
|
|
1532
|
-
config: dokployConfig
|
|
1533
|
-
});
|
|
1534
|
-
break;
|
|
1535
|
-
}
|
|
1536
|
-
case "aws-lambda": {
|
|
1537
|
-
logger$3.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
1538
|
-
logger$3.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
1539
|
-
result = {
|
|
1540
|
-
imageRef,
|
|
1541
|
-
masterKey
|
|
1542
|
-
};
|
|
1543
|
-
break;
|
|
1543
|
+
function findLockfilePath(cwd = process.cwd()) {
|
|
1544
|
+
let dir = cwd;
|
|
1545
|
+
const root = parse(dir).root;
|
|
1546
|
+
while (dir !== root) {
|
|
1547
|
+
for (const [lockfile] of LOCKFILES) {
|
|
1548
|
+
const lockfilePath = join(dir, lockfile);
|
|
1549
|
+
if (existsSync(lockfilePath)) return lockfilePath;
|
|
1544
1550
|
}
|
|
1545
|
-
|
|
1551
|
+
dir = dirname(dir);
|
|
1546
1552
|
}
|
|
1547
|
-
|
|
1548
|
-
|
|
1549
|
-
|
|
1550
|
-
|
|
1551
|
-
//#endregion
|
|
1552
|
-
//#region src/deploy/init.ts
|
|
1553
|
-
const logger$2 = console;
|
|
1554
|
-
/**
|
|
1555
|
-
* Get the Dokploy API token from stored credentials or environment
|
|
1556
|
-
*/
|
|
1557
|
-
async function getApiToken() {
|
|
1558
|
-
const token = await getDokployToken();
|
|
1559
|
-
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
1560
|
-
return token;
|
|
1561
|
-
}
|
|
1562
|
-
/**
|
|
1563
|
-
* Make a request to the Dokploy API
|
|
1564
|
-
*/
|
|
1565
|
-
async function dokployRequest(method, endpoint, baseUrl, token, body) {
|
|
1566
|
-
const url = `${baseUrl}/api/${endpoint}`;
|
|
1567
|
-
const response = await fetch(url, {
|
|
1568
|
-
method,
|
|
1569
|
-
headers: {
|
|
1570
|
-
"Content-Type": "application/json",
|
|
1571
|
-
Authorization: `Bearer ${token}`
|
|
1572
|
-
},
|
|
1573
|
-
body: body ? JSON.stringify(body) : void 0
|
|
1574
|
-
});
|
|
1575
|
-
if (!response.ok) {
|
|
1576
|
-
let errorMessage = `Dokploy API error: ${response.status} ${response.statusText}`;
|
|
1577
|
-
try {
|
|
1578
|
-
const errorBody = await response.json();
|
|
1579
|
-
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1580
|
-
} catch {}
|
|
1581
|
-
throw new Error(errorMessage);
|
|
1553
|
+
for (const [lockfile] of LOCKFILES) {
|
|
1554
|
+
const lockfilePath = join(root, lockfile);
|
|
1555
|
+
if (existsSync(lockfilePath)) return lockfilePath;
|
|
1582
1556
|
}
|
|
1583
|
-
|
|
1584
|
-
if (!text) return {};
|
|
1585
|
-
return JSON.parse(text);
|
|
1586
|
-
}
|
|
1587
|
-
/**
|
|
1588
|
-
* Get all projects from Dokploy
|
|
1589
|
-
*/
|
|
1590
|
-
async function getProjects(baseUrl, token) {
|
|
1591
|
-
return dokployRequest("GET", "project.all", baseUrl, token);
|
|
1592
|
-
}
|
|
1593
|
-
/**
|
|
1594
|
-
* Create a new project in Dokploy
|
|
1595
|
-
*/
|
|
1596
|
-
async function createProject(baseUrl, token, name$1, description$1) {
|
|
1597
|
-
return dokployRequest("POST", "project.create", baseUrl, token, {
|
|
1598
|
-
name: name$1,
|
|
1599
|
-
description: description$1 || `Created by gkm CLI`
|
|
1600
|
-
});
|
|
1557
|
+
return null;
|
|
1601
1558
|
}
|
|
1602
1559
|
/**
|
|
1603
|
-
*
|
|
1560
|
+
* Check if we're in a monorepo (lockfile is in a parent directory)
|
|
1604
1561
|
*/
|
|
1605
|
-
|
|
1606
|
-
|
|
1562
|
+
function isMonorepo(cwd = process.cwd()) {
|
|
1563
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
1564
|
+
if (!lockfilePath) return false;
|
|
1565
|
+
const lockfileDir = dirname(lockfilePath);
|
|
1566
|
+
return lockfileDir !== cwd;
|
|
1607
1567
|
}
|
|
1608
1568
|
/**
|
|
1609
|
-
*
|
|
1569
|
+
* Check if turbo.json exists (walks up directory tree)
|
|
1610
1570
|
*/
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
|
|
1614
|
-
|
|
1615
|
-
|
|
1616
|
-
|
|
1617
|
-
const env = await dokployRequest("POST", "environment.create", baseUrl, token, {
|
|
1618
|
-
projectId,
|
|
1619
|
-
name: "production",
|
|
1620
|
-
description: "Production environment"
|
|
1621
|
-
});
|
|
1622
|
-
environmentId = env.environmentId;
|
|
1571
|
+
function hasTurboConfig(cwd = process.cwd()) {
|
|
1572
|
+
let dir = cwd;
|
|
1573
|
+
const root = parse(dir).root;
|
|
1574
|
+
while (dir !== root) {
|
|
1575
|
+
if (existsSync(join(dir, "turbo.json"))) return true;
|
|
1576
|
+
dir = dirname(dir);
|
|
1623
1577
|
}
|
|
1624
|
-
return
|
|
1625
|
-
name: name$1,
|
|
1626
|
-
projectId,
|
|
1627
|
-
environmentId
|
|
1628
|
-
});
|
|
1578
|
+
return existsSync(join(root, "turbo.json"));
|
|
1629
1579
|
}
|
|
1630
1580
|
/**
|
|
1631
|
-
*
|
|
1581
|
+
* Get install command for turbo builds (without frozen lockfile)
|
|
1582
|
+
* Turbo prune creates a subset that may not perfectly match the lockfile
|
|
1632
1583
|
*/
|
|
1633
|
-
|
|
1634
|
-
|
|
1635
|
-
|
|
1636
|
-
|
|
1637
|
-
|
|
1584
|
+
function getTurboInstallCmd(pm) {
|
|
1585
|
+
const commands = {
|
|
1586
|
+
pnpm: "pnpm install",
|
|
1587
|
+
npm: "npm install",
|
|
1588
|
+
yarn: "yarn install",
|
|
1589
|
+
bun: "bun install"
|
|
1590
|
+
};
|
|
1591
|
+
return commands[pm];
|
|
1638
1592
|
}
|
|
1639
1593
|
/**
|
|
1640
|
-
* Get
|
|
1594
|
+
* Get package manager specific commands and paths
|
|
1641
1595
|
*/
|
|
1642
|
-
|
|
1643
|
-
|
|
1596
|
+
function getPmConfig(pm) {
|
|
1597
|
+
const configs = {
|
|
1598
|
+
pnpm: {
|
|
1599
|
+
install: "corepack enable && corepack prepare pnpm@latest --activate",
|
|
1600
|
+
lockfile: "pnpm-lock.yaml",
|
|
1601
|
+
fetch: "pnpm fetch",
|
|
1602
|
+
installCmd: "pnpm install --frozen-lockfile --offline",
|
|
1603
|
+
cacheTarget: "/root/.local/share/pnpm/store",
|
|
1604
|
+
cacheId: "pnpm",
|
|
1605
|
+
run: "pnpm",
|
|
1606
|
+
dlx: "pnpm dlx",
|
|
1607
|
+
addGlobal: "pnpm add -g"
|
|
1608
|
+
},
|
|
1609
|
+
npm: {
|
|
1610
|
+
install: "",
|
|
1611
|
+
lockfile: "package-lock.json",
|
|
1612
|
+
fetch: "",
|
|
1613
|
+
installCmd: "npm ci",
|
|
1614
|
+
cacheTarget: "/root/.npm",
|
|
1615
|
+
cacheId: "npm",
|
|
1616
|
+
run: "npm run",
|
|
1617
|
+
dlx: "npx",
|
|
1618
|
+
addGlobal: "npm install -g"
|
|
1619
|
+
},
|
|
1620
|
+
yarn: {
|
|
1621
|
+
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
1622
|
+
lockfile: "yarn.lock",
|
|
1623
|
+
fetch: "",
|
|
1624
|
+
installCmd: "yarn install --frozen-lockfile",
|
|
1625
|
+
cacheTarget: "/root/.yarn/cache",
|
|
1626
|
+
cacheId: "yarn",
|
|
1627
|
+
run: "yarn",
|
|
1628
|
+
dlx: "yarn dlx",
|
|
1629
|
+
addGlobal: "yarn global add"
|
|
1630
|
+
},
|
|
1631
|
+
bun: {
|
|
1632
|
+
install: "npm install -g bun",
|
|
1633
|
+
lockfile: "bun.lockb",
|
|
1634
|
+
fetch: "",
|
|
1635
|
+
installCmd: "bun install --frozen-lockfile",
|
|
1636
|
+
cacheTarget: "/root/.bun/install/cache",
|
|
1637
|
+
cacheId: "bun",
|
|
1638
|
+
run: "bun run",
|
|
1639
|
+
dlx: "bunx",
|
|
1640
|
+
addGlobal: "bun add -g"
|
|
1641
|
+
}
|
|
1642
|
+
};
|
|
1643
|
+
return configs[pm];
|
|
1644
1644
|
}
|
|
1645
1645
|
/**
|
|
1646
|
-
*
|
|
1646
|
+
* Generate a multi-stage Dockerfile for building from source
|
|
1647
|
+
* Optimized for build speed with:
|
|
1648
|
+
* - BuildKit cache mounts for package manager store
|
|
1649
|
+
* - pnpm fetch for better layer caching (when using pnpm)
|
|
1650
|
+
* - Optional turbo prune for monorepos
|
|
1647
1651
|
*/
|
|
1648
|
-
|
|
1649
|
-
const
|
|
1650
|
-
if (
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
|
|
1674
|
-
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1685
|
-
|
|
1686
|
-
|
|
1687
|
-
|
|
1688
|
-
|
|
1652
|
+
function generateMultiStageDockerfile(options) {
|
|
1653
|
+
const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
|
|
1654
|
+
if (turbo) return generateTurboDockerfile({
|
|
1655
|
+
...options,
|
|
1656
|
+
turboPackage: turboPackage ?? "api"
|
|
1657
|
+
});
|
|
1658
|
+
const pm = getPmConfig(packageManager);
|
|
1659
|
+
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
1660
|
+
const hasFetch = packageManager === "pnpm";
|
|
1661
|
+
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
1662
|
+
COPY ${pm.lockfile} ./
|
|
1663
|
+
|
|
1664
|
+
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
1665
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1666
|
+
${pm.fetch}
|
|
1667
|
+
|
|
1668
|
+
# Copy package.json after fetch
|
|
1669
|
+
COPY package.json ./
|
|
1670
|
+
|
|
1671
|
+
# Install from cache (fast - no network needed)
|
|
1672
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1673
|
+
${pm.installCmd}` : `# Copy package files
|
|
1674
|
+
COPY package.json ${pm.lockfile} ./
|
|
1675
|
+
|
|
1676
|
+
# Install dependencies with cache
|
|
1677
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1678
|
+
${pm.installCmd}`;
|
|
1679
|
+
return `# syntax=docker/dockerfile:1
|
|
1680
|
+
# Stage 1: Dependencies
|
|
1681
|
+
FROM ${baseImage} AS deps
|
|
1682
|
+
|
|
1683
|
+
WORKDIR /app
|
|
1684
|
+
${installPm}
|
|
1685
|
+
${depsStage}
|
|
1686
|
+
|
|
1687
|
+
# Stage 2: Build
|
|
1688
|
+
FROM deps AS builder
|
|
1689
|
+
|
|
1690
|
+
WORKDIR /app
|
|
1691
|
+
|
|
1692
|
+
# Copy source (deps already installed)
|
|
1693
|
+
COPY . .
|
|
1694
|
+
|
|
1695
|
+
# Build production server using CLI from npm
|
|
1696
|
+
RUN ${pm.dlx} @geekmidas/cli build --provider server --production
|
|
1697
|
+
|
|
1698
|
+
# Stage 3: Production
|
|
1699
|
+
FROM ${baseImage} AS runner
|
|
1700
|
+
|
|
1701
|
+
WORKDIR /app
|
|
1702
|
+
|
|
1703
|
+
# Install tini for proper signal handling as PID 1
|
|
1704
|
+
RUN apk add --no-cache tini
|
|
1705
|
+
|
|
1706
|
+
# Create non-root user
|
|
1707
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1708
|
+
adduser --system --uid 1001 hono
|
|
1709
|
+
|
|
1710
|
+
# Copy bundled server
|
|
1711
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
1712
|
+
|
|
1713
|
+
# Environment
|
|
1714
|
+
ENV NODE_ENV=production
|
|
1715
|
+
ENV PORT=${port}
|
|
1716
|
+
|
|
1717
|
+
# Health check
|
|
1718
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1719
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1720
|
+
|
|
1721
|
+
# Switch to non-root user
|
|
1722
|
+
USER hono
|
|
1723
|
+
|
|
1724
|
+
EXPOSE ${port}
|
|
1725
|
+
|
|
1726
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
1727
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1728
|
+
CMD ["node", "server.mjs"]
|
|
1729
|
+
`;
|
|
1689
1730
|
}
|
|
1690
1731
|
/**
|
|
1691
|
-
*
|
|
1732
|
+
* Generate a Dockerfile optimized for Turbo monorepos
|
|
1733
|
+
* Uses turbo prune to create minimal Docker context
|
|
1692
1734
|
*/
|
|
1693
|
-
|
|
1694
|
-
const {
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1735
|
+
function generateTurboDockerfile(options) {
|
|
1736
|
+
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
1737
|
+
const pm = getPmConfig(packageManager);
|
|
1738
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
1739
|
+
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
1740
|
+
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
1741
|
+
return `# syntax=docker/dockerfile:1
|
|
1742
|
+
# Stage 1: Prune monorepo
|
|
1743
|
+
FROM ${baseImage} AS pruner
|
|
1744
|
+
|
|
1745
|
+
WORKDIR /app
|
|
1746
|
+
|
|
1747
|
+
${installPm}
|
|
1748
|
+
|
|
1749
|
+
COPY . .
|
|
1750
|
+
|
|
1751
|
+
# Prune to only include necessary packages
|
|
1752
|
+
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
1753
|
+
|
|
1754
|
+
# Stage 2: Install dependencies
|
|
1755
|
+
FROM ${baseImage} AS deps
|
|
1756
|
+
|
|
1757
|
+
WORKDIR /app
|
|
1758
|
+
|
|
1759
|
+
${installPm}
|
|
1760
|
+
|
|
1761
|
+
# Copy pruned lockfile and package.jsons
|
|
1762
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
1763
|
+
COPY --from=pruner /app/out/json/ ./
|
|
1764
|
+
|
|
1765
|
+
# Install dependencies (no frozen-lockfile since turbo prune creates a subset)
|
|
1766
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1767
|
+
${turboInstallCmd}
|
|
1768
|
+
|
|
1769
|
+
# Stage 3: Build
|
|
1770
|
+
FROM deps AS builder
|
|
1771
|
+
|
|
1772
|
+
WORKDIR /app
|
|
1773
|
+
|
|
1774
|
+
# Copy pruned source
|
|
1775
|
+
COPY --from=pruner /app/out/full/ ./
|
|
1776
|
+
|
|
1777
|
+
# Build production server using CLI from npm
|
|
1778
|
+
RUN ${pm.dlx} @geekmidas/cli build --provider server --production
|
|
1779
|
+
|
|
1780
|
+
# Stage 4: Production
|
|
1781
|
+
FROM ${baseImage} AS runner
|
|
1782
|
+
|
|
1783
|
+
WORKDIR /app
|
|
1784
|
+
|
|
1785
|
+
RUN apk add --no-cache tini
|
|
1786
|
+
|
|
1787
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1788
|
+
adduser --system --uid 1001 hono
|
|
1789
|
+
|
|
1790
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
1791
|
+
|
|
1792
|
+
ENV NODE_ENV=production
|
|
1793
|
+
ENV PORT=${port}
|
|
1794
|
+
|
|
1795
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1796
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1797
|
+
|
|
1798
|
+
USER hono
|
|
1799
|
+
|
|
1800
|
+
EXPOSE ${port}
|
|
1801
|
+
|
|
1802
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1803
|
+
CMD ["node", "server.mjs"]
|
|
1804
|
+
`;
|
|
1805
|
+
}
|
|
1806
|
+
/**
|
|
1807
|
+
* Generate a slim Dockerfile for pre-built bundles
|
|
1808
|
+
*/
|
|
1809
|
+
function generateSlimDockerfile(options) {
|
|
1810
|
+
const { baseImage, port, healthCheckPath } = options;
|
|
1811
|
+
return `# Slim Dockerfile for pre-built production bundle
|
|
1812
|
+
FROM ${baseImage}
|
|
1813
|
+
|
|
1814
|
+
WORKDIR /app
|
|
1815
|
+
|
|
1816
|
+
# Install tini for proper signal handling as PID 1
|
|
1817
|
+
# Handles SIGTERM propagation and zombie process reaping
|
|
1818
|
+
RUN apk add --no-cache tini
|
|
1819
|
+
|
|
1820
|
+
# Create non-root user
|
|
1821
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1822
|
+
adduser --system --uid 1001 hono
|
|
1823
|
+
|
|
1824
|
+
# Copy pre-built bundle
|
|
1825
|
+
COPY .gkm/server/dist/server.mjs ./
|
|
1826
|
+
|
|
1827
|
+
# Environment
|
|
1828
|
+
ENV NODE_ENV=production
|
|
1829
|
+
ENV PORT=${port}
|
|
1830
|
+
|
|
1831
|
+
# Health check
|
|
1832
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1833
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1834
|
+
|
|
1835
|
+
# Switch to non-root user
|
|
1836
|
+
USER hono
|
|
1837
|
+
|
|
1838
|
+
EXPOSE ${port}
|
|
1839
|
+
|
|
1840
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
1841
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1842
|
+
CMD ["node", "server.mjs"]
|
|
1843
|
+
`;
|
|
1844
|
+
}
|
|
1845
|
+
/**
|
|
1846
|
+
* Generate .dockerignore file
|
|
1847
|
+
*/
|
|
1848
|
+
function generateDockerignore() {
|
|
1849
|
+
return `# Dependencies
|
|
1850
|
+
node_modules
|
|
1851
|
+
.pnpm-store
|
|
1852
|
+
|
|
1853
|
+
# Build output (except what we need)
|
|
1854
|
+
.gkm/aws*
|
|
1855
|
+
.gkm/server/*.ts
|
|
1856
|
+
!.gkm/server/dist
|
|
1857
|
+
|
|
1858
|
+
# IDE and editor
|
|
1859
|
+
.idea
|
|
1860
|
+
.vscode
|
|
1861
|
+
*.swp
|
|
1862
|
+
*.swo
|
|
1863
|
+
|
|
1864
|
+
# Git
|
|
1865
|
+
.git
|
|
1866
|
+
.gitignore
|
|
1867
|
+
|
|
1868
|
+
# Logs
|
|
1869
|
+
*.log
|
|
1870
|
+
npm-debug.log*
|
|
1871
|
+
pnpm-debug.log*
|
|
1872
|
+
|
|
1873
|
+
# Test files
|
|
1874
|
+
**/*.test.ts
|
|
1875
|
+
**/*.spec.ts
|
|
1876
|
+
**/__tests__
|
|
1877
|
+
coverage
|
|
1878
|
+
|
|
1879
|
+
# Documentation
|
|
1880
|
+
docs
|
|
1881
|
+
*.md
|
|
1882
|
+
!README.md
|
|
1883
|
+
|
|
1884
|
+
# Environment files (handle secrets separately)
|
|
1885
|
+
.env
|
|
1886
|
+
.env.*
|
|
1887
|
+
!.env.example
|
|
1888
|
+
|
|
1889
|
+
# Docker files (don't copy recursively)
|
|
1890
|
+
Dockerfile*
|
|
1891
|
+
docker-compose*
|
|
1892
|
+
.dockerignore
|
|
1893
|
+
`;
|
|
1894
|
+
}
|
|
1895
|
+
/**
|
|
1896
|
+
* Generate docker-entrypoint.sh for custom startup logic
|
|
1897
|
+
*/
|
|
1898
|
+
function generateDockerEntrypoint() {
|
|
1899
|
+
return `#!/bin/sh
|
|
1900
|
+
set -e
|
|
1901
|
+
|
|
1902
|
+
# Run any custom startup scripts here
|
|
1903
|
+
# Example: wait for database
|
|
1904
|
+
# until nc -z $DB_HOST $DB_PORT; do
|
|
1905
|
+
# echo "Waiting for database..."
|
|
1906
|
+
# sleep 1
|
|
1907
|
+
# done
|
|
1908
|
+
|
|
1909
|
+
# Execute the main command
|
|
1910
|
+
exec "$@"
|
|
1911
|
+
`;
|
|
1912
|
+
}
|
|
1913
|
+
/**
|
|
1914
|
+
* Resolve Docker configuration from GkmConfig with defaults
|
|
1915
|
+
*/
|
|
1916
|
+
function resolveDockerConfig$1(config$1) {
|
|
1917
|
+
const docker = config$1.docker ?? {};
|
|
1918
|
+
let defaultImageName = "api";
|
|
1919
|
+
try {
|
|
1920
|
+
const pkg = __require(`${process.cwd()}/package.json`);
|
|
1921
|
+
if (pkg.name) defaultImageName = pkg.name.replace(/^@[^/]+\//, "");
|
|
1922
|
+
} catch {}
|
|
1923
|
+
return {
|
|
1924
|
+
registry: docker.registry ?? "",
|
|
1925
|
+
imageName: docker.imageName ?? defaultImageName,
|
|
1926
|
+
baseImage: docker.baseImage ?? "node:22-alpine",
|
|
1927
|
+
port: docker.port ?? 3e3,
|
|
1928
|
+
compose: docker.compose
|
|
1929
|
+
};
|
|
1930
|
+
}
|
|
1931
|
+
|
|
1932
|
+
//#endregion
|
|
1933
|
+
//#region src/docker/index.ts
|
|
1934
|
+
const logger$5 = console;
|
|
1935
|
+
/**
|
|
1936
|
+
* Docker command implementation
|
|
1937
|
+
* Generates Dockerfile, docker-compose.yml, and related files
|
|
1938
|
+
*
|
|
1939
|
+
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
1940
|
+
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
1941
|
+
*/
|
|
1942
|
+
async function dockerCommand(options) {
|
|
1943
|
+
const config$1 = await loadConfig();
|
|
1944
|
+
const dockerConfig = resolveDockerConfig$1(config$1);
|
|
1945
|
+
const serverConfig = typeof config$1.providers?.server === "object" ? config$1.providers.server : void 0;
|
|
1946
|
+
const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
|
|
1947
|
+
const useSlim = options.slim === true;
|
|
1948
|
+
if (useSlim) {
|
|
1949
|
+
const distDir = join(process.cwd(), ".gkm", "server", "dist");
|
|
1950
|
+
const hasBuild = existsSync(join(distDir, "server.mjs"));
|
|
1951
|
+
if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
|
|
1721
1952
|
}
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1731
|
-
|
|
1732
|
-
|
|
1733
|
-
|
|
1734
|
-
|
|
1953
|
+
const dockerDir = join(process.cwd(), ".gkm", "docker");
|
|
1954
|
+
await mkdir(dockerDir, { recursive: true });
|
|
1955
|
+
const packageManager = detectPackageManager$1();
|
|
1956
|
+
const inMonorepo = isMonorepo();
|
|
1957
|
+
const hasTurbo = hasTurboConfig();
|
|
1958
|
+
let useTurbo = options.turbo ?? false;
|
|
1959
|
+
if (inMonorepo && !useSlim) if (hasTurbo) {
|
|
1960
|
+
useTurbo = true;
|
|
1961
|
+
logger$5.log(" Detected monorepo with turbo.json - using turbo prune");
|
|
1962
|
+
} else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
|
|
1963
|
+
let turboPackage = options.turboPackage ?? dockerConfig.imageName;
|
|
1964
|
+
if (useTurbo && !options.turboPackage) try {
|
|
1965
|
+
const pkg = __require(`${process.cwd()}/package.json`);
|
|
1966
|
+
if (pkg.name) {
|
|
1967
|
+
turboPackage = pkg.name;
|
|
1968
|
+
logger$5.log(` Turbo package: ${turboPackage}`);
|
|
1735
1969
|
}
|
|
1736
1970
|
} catch {}
|
|
1737
|
-
const
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
|
|
1971
|
+
const templateOptions = {
|
|
1972
|
+
imageName: dockerConfig.imageName,
|
|
1973
|
+
baseImage: dockerConfig.baseImage,
|
|
1974
|
+
port: dockerConfig.port,
|
|
1975
|
+
healthCheckPath,
|
|
1976
|
+
prebuilt: useSlim,
|
|
1977
|
+
turbo: useTurbo,
|
|
1978
|
+
turboPackage,
|
|
1979
|
+
packageManager
|
|
1741
1980
|
};
|
|
1742
|
-
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
logger$
|
|
1747
|
-
|
|
1748
|
-
|
|
1749
|
-
|
|
1750
|
-
|
|
1751
|
-
|
|
1981
|
+
const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
|
|
1982
|
+
const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
|
|
1983
|
+
const dockerfilePath = join(dockerDir, "Dockerfile");
|
|
1984
|
+
await writeFile(dockerfilePath, dockerfile);
|
|
1985
|
+
logger$5.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
|
|
1986
|
+
const composeOptions = {
|
|
1987
|
+
imageName: dockerConfig.imageName,
|
|
1988
|
+
registry: options.registry ?? dockerConfig.registry,
|
|
1989
|
+
port: dockerConfig.port,
|
|
1990
|
+
healthCheckPath,
|
|
1991
|
+
services: dockerConfig.compose?.services ?? {}
|
|
1992
|
+
};
|
|
1993
|
+
const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
|
|
1994
|
+
const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
|
|
1995
|
+
const composePath = join(dockerDir, "docker-compose.yml");
|
|
1996
|
+
await writeFile(composePath, dockerCompose);
|
|
1997
|
+
logger$5.log("Generated: .gkm/docker/docker-compose.yml");
|
|
1998
|
+
const dockerignore = generateDockerignore();
|
|
1999
|
+
const dockerignorePath = join(process.cwd(), ".dockerignore");
|
|
2000
|
+
await writeFile(dockerignorePath, dockerignore);
|
|
2001
|
+
logger$5.log("Generated: .dockerignore (project root)");
|
|
2002
|
+
const entrypoint = generateDockerEntrypoint();
|
|
2003
|
+
const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
|
|
2004
|
+
await writeFile(entrypointPath, entrypoint);
|
|
2005
|
+
logger$5.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2006
|
+
const result = {
|
|
2007
|
+
dockerfile: dockerfilePath,
|
|
2008
|
+
dockerCompose: composePath,
|
|
2009
|
+
dockerignore: dockerignorePath,
|
|
2010
|
+
entrypoint: entrypointPath
|
|
2011
|
+
};
|
|
2012
|
+
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2013
|
+
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2014
|
+
return result;
|
|
1752
2015
|
}
|
|
1753
2016
|
/**
|
|
1754
|
-
*
|
|
2017
|
+
* Ensure lockfile exists in the build context
|
|
2018
|
+
* For monorepos, copies from workspace root if needed
|
|
2019
|
+
* Returns cleanup function if file was copied
|
|
1755
2020
|
*/
|
|
1756
|
-
|
|
1757
|
-
|
|
1758
|
-
if (!
|
|
1759
|
-
|
|
1760
|
-
|
|
1761
|
-
else throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
1762
|
-
}
|
|
1763
|
-
const { resource } = options;
|
|
1764
|
-
const token = await getApiToken();
|
|
1765
|
-
if (resource === "projects") {
|
|
1766
|
-
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
1767
|
-
const projects = await getProjects(endpoint, token);
|
|
1768
|
-
if (projects.length === 0) {
|
|
1769
|
-
logger$2.log(" No projects found");
|
|
1770
|
-
return;
|
|
1771
|
-
}
|
|
1772
|
-
for (const project of projects) {
|
|
1773
|
-
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
1774
|
-
if (project.description) logger$2.log(` ${project.description}`);
|
|
1775
|
-
}
|
|
1776
|
-
} else if (resource === "registries") {
|
|
1777
|
-
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
1778
|
-
const registries = await getRegistries(endpoint, token);
|
|
1779
|
-
if (registries.length === 0) {
|
|
1780
|
-
logger$2.log(" No registries configured");
|
|
1781
|
-
logger$2.log(" Add a registry in Dokploy: Settings > Docker Registry");
|
|
1782
|
-
return;
|
|
1783
|
-
}
|
|
1784
|
-
for (const registry of registries) {
|
|
1785
|
-
logger$2.log(`\n ${registry.registryName} (${registry.registryId})`);
|
|
1786
|
-
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
1787
|
-
logger$2.log(` Username: ${registry.username}`);
|
|
1788
|
-
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
1789
|
-
}
|
|
2021
|
+
function ensureLockfile(cwd) {
|
|
2022
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2023
|
+
if (!lockfilePath) {
|
|
2024
|
+
logger$5.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
|
|
2025
|
+
return null;
|
|
1790
2026
|
}
|
|
2027
|
+
const lockfileName = basename(lockfilePath);
|
|
2028
|
+
const localLockfile = join(cwd, lockfileName);
|
|
2029
|
+
if (lockfilePath === localLockfile) return null;
|
|
2030
|
+
logger$5.log(` Copying ${lockfileName} from monorepo root...`);
|
|
2031
|
+
copyFileSync(lockfilePath, localLockfile);
|
|
2032
|
+
return () => {
|
|
2033
|
+
try {
|
|
2034
|
+
unlinkSync(localLockfile);
|
|
2035
|
+
} catch {}
|
|
2036
|
+
};
|
|
1791
2037
|
}
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
|
|
1797
|
-
|
|
1798
|
-
|
|
1799
|
-
|
|
1800
|
-
};
|
|
1801
|
-
|
|
1802
|
-
const
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
|
|
1807
|
-
|
|
1808
|
-
|
|
1809
|
-
|
|
1810
|
-
}
|
|
1811
|
-
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1812
|
-
function normalizeServices(services) {
|
|
1813
|
-
const result = /* @__PURE__ */ new Map();
|
|
1814
|
-
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1815
|
-
else for (const [name$1, config$1] of Object.entries(services)) {
|
|
1816
|
-
const serviceName = name$1;
|
|
1817
|
-
if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1818
|
-
else if (config$1 && typeof config$1 === "object") {
|
|
1819
|
-
const serviceConfig = config$1;
|
|
1820
|
-
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1821
|
-
else {
|
|
1822
|
-
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1823
|
-
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
2038
|
+
/**
|
|
2039
|
+
* Build Docker image
|
|
2040
|
+
* Uses BuildKit for cache mount support
|
|
2041
|
+
*/
|
|
2042
|
+
async function buildDockerImage(imageName, options) {
|
|
2043
|
+
const tag = options.tag ?? "latest";
|
|
2044
|
+
const registry = options.registry;
|
|
2045
|
+
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2046
|
+
logger$5.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2047
|
+
const cwd = process.cwd();
|
|
2048
|
+
const cleanup = ensureLockfile(cwd);
|
|
2049
|
+
try {
|
|
2050
|
+
execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2051
|
+
cwd,
|
|
2052
|
+
stdio: "inherit",
|
|
2053
|
+
env: {
|
|
2054
|
+
...process.env,
|
|
2055
|
+
DOCKER_BUILDKIT: "1"
|
|
1824
2056
|
}
|
|
1825
|
-
}
|
|
2057
|
+
});
|
|
2058
|
+
logger$5.log(`✅ Docker image built: ${fullImageName}`);
|
|
2059
|
+
} catch (error) {
|
|
2060
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2061
|
+
} finally {
|
|
2062
|
+
cleanup?.();
|
|
1826
2063
|
}
|
|
1827
|
-
return result;
|
|
1828
2064
|
}
|
|
1829
2065
|
/**
|
|
1830
|
-
*
|
|
2066
|
+
* Push Docker image to registry
|
|
1831
2067
|
*/
|
|
1832
|
-
function
|
|
1833
|
-
const
|
|
1834
|
-
const
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
|
|
1842
|
-
|
|
1843
|
-
|
|
1844
|
-
|
|
1845
|
-
|
|
1846
|
-
ports:
|
|
1847
|
-
- "\${PORT:-${port}}:${port}"
|
|
1848
|
-
environment:
|
|
1849
|
-
- NODE_ENV=production
|
|
1850
|
-
`;
|
|
1851
|
-
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1852
|
-
`;
|
|
1853
|
-
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1854
|
-
`;
|
|
1855
|
-
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1856
|
-
`;
|
|
1857
|
-
yaml += ` healthcheck:
|
|
1858
|
-
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1859
|
-
interval: 30s
|
|
1860
|
-
timeout: 3s
|
|
1861
|
-
retries: 3
|
|
1862
|
-
`;
|
|
1863
|
-
if (serviceMap.size > 0) {
|
|
1864
|
-
yaml += ` depends_on:
|
|
1865
|
-
`;
|
|
1866
|
-
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1867
|
-
condition: service_healthy
|
|
1868
|
-
`;
|
|
2068
|
+
async function pushDockerImage(imageName, options) {
|
|
2069
|
+
const tag = options.tag ?? "latest";
|
|
2070
|
+
const registry = options.registry;
|
|
2071
|
+
if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
|
|
2072
|
+
const fullImageName = `${registry}/${imageName}:${tag}`;
|
|
2073
|
+
logger$5.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
|
|
2074
|
+
try {
|
|
2075
|
+
execSync(`docker push ${fullImageName}`, {
|
|
2076
|
+
cwd: process.cwd(),
|
|
2077
|
+
stdio: "inherit"
|
|
2078
|
+
});
|
|
2079
|
+
logger$5.log(`✅ Docker image pushed: ${fullImageName}`);
|
|
2080
|
+
} catch (error) {
|
|
2081
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1869
2082
|
}
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
|
|
1878
|
-
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1882
|
-
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1883
|
-
volumes:
|
|
1884
|
-
- postgres_data:/var/lib/postgresql/data
|
|
1885
|
-
healthcheck:
|
|
1886
|
-
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1887
|
-
interval: 5s
|
|
1888
|
-
timeout: 5s
|
|
1889
|
-
retries: 5
|
|
1890
|
-
networks:
|
|
1891
|
-
- app-network
|
|
1892
|
-
`;
|
|
1893
|
-
const redisImage = serviceMap.get("redis");
|
|
1894
|
-
if (redisImage) yaml += `
|
|
1895
|
-
redis:
|
|
1896
|
-
image: ${redisImage}
|
|
1897
|
-
container_name: redis
|
|
1898
|
-
restart: unless-stopped
|
|
1899
|
-
volumes:
|
|
1900
|
-
- redis_data:/data
|
|
1901
|
-
healthcheck:
|
|
1902
|
-
test: ["CMD", "redis-cli", "ping"]
|
|
1903
|
-
interval: 5s
|
|
1904
|
-
timeout: 5s
|
|
1905
|
-
retries: 5
|
|
1906
|
-
networks:
|
|
1907
|
-
- app-network
|
|
1908
|
-
`;
|
|
1909
|
-
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1910
|
-
if (rabbitmqImage) yaml += `
|
|
1911
|
-
rabbitmq:
|
|
1912
|
-
image: ${rabbitmqImage}
|
|
1913
|
-
container_name: rabbitmq
|
|
1914
|
-
restart: unless-stopped
|
|
1915
|
-
environment:
|
|
1916
|
-
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1917
|
-
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1918
|
-
ports:
|
|
1919
|
-
- "15672:15672" # Management UI
|
|
1920
|
-
volumes:
|
|
1921
|
-
- rabbitmq_data:/var/lib/rabbitmq
|
|
1922
|
-
healthcheck:
|
|
1923
|
-
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1924
|
-
interval: 10s
|
|
1925
|
-
timeout: 5s
|
|
1926
|
-
retries: 5
|
|
1927
|
-
networks:
|
|
1928
|
-
- app-network
|
|
1929
|
-
`;
|
|
1930
|
-
yaml += `
|
|
1931
|
-
volumes:
|
|
1932
|
-
`;
|
|
1933
|
-
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1934
|
-
`;
|
|
1935
|
-
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1936
|
-
`;
|
|
1937
|
-
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1938
|
-
`;
|
|
1939
|
-
yaml += `
|
|
1940
|
-
networks:
|
|
1941
|
-
app-network:
|
|
1942
|
-
driver: bridge
|
|
1943
|
-
`;
|
|
1944
|
-
return yaml;
|
|
2083
|
+
}
|
|
2084
|
+
|
|
2085
|
+
//#endregion
|
|
2086
|
+
//#region src/deploy/docker.ts
|
|
2087
|
+
const logger$4 = console;
|
|
2088
|
+
/**
|
|
2089
|
+
* Get the full image reference
|
|
2090
|
+
*/
|
|
2091
|
+
function getImageRef(registry, imageName, tag) {
|
|
2092
|
+
if (registry) return `${registry}/${imageName}:${tag}`;
|
|
2093
|
+
return `${imageName}:${tag}`;
|
|
1945
2094
|
}
|
|
1946
2095
|
/**
|
|
1947
|
-
*
|
|
2096
|
+
* Build Docker image
|
|
1948
2097
|
*/
|
|
1949
|
-
function
|
|
1950
|
-
|
|
1951
|
-
const
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
1974
|
-
|
|
1975
|
-
|
|
1976
|
-
|
|
1977
|
-
|
|
2098
|
+
async function buildImage(imageRef) {
|
|
2099
|
+
logger$4.log(`\n🔨 Building Docker image: ${imageRef}`);
|
|
2100
|
+
const cwd = process.cwd();
|
|
2101
|
+
const inMonorepo = isMonorepo(cwd);
|
|
2102
|
+
if (inMonorepo) logger$4.log(" Generating Dockerfile for monorepo (turbo prune)...");
|
|
2103
|
+
else logger$4.log(" Generating Dockerfile...");
|
|
2104
|
+
await dockerCommand({});
|
|
2105
|
+
let buildCwd = cwd;
|
|
2106
|
+
let dockerfilePath = ".gkm/docker/Dockerfile";
|
|
2107
|
+
if (inMonorepo) {
|
|
2108
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2109
|
+
if (lockfilePath) {
|
|
2110
|
+
const monorepoRoot = dirname(lockfilePath);
|
|
2111
|
+
const appRelPath = relative(monorepoRoot, cwd);
|
|
2112
|
+
dockerfilePath = join(appRelPath, ".gkm/docker/Dockerfile");
|
|
2113
|
+
buildCwd = monorepoRoot;
|
|
2114
|
+
logger$4.log(` Building from monorepo root: ${monorepoRoot}`);
|
|
2115
|
+
}
|
|
2116
|
+
}
|
|
2117
|
+
try {
|
|
2118
|
+
execSync(`DOCKER_BUILDKIT=1 docker build --platform linux/amd64 -f ${dockerfilePath} -t ${imageRef} .`, {
|
|
2119
|
+
cwd: buildCwd,
|
|
2120
|
+
stdio: "inherit",
|
|
2121
|
+
env: {
|
|
2122
|
+
...process.env,
|
|
2123
|
+
DOCKER_BUILDKIT: "1"
|
|
2124
|
+
}
|
|
2125
|
+
});
|
|
2126
|
+
logger$4.log(`✅ Image built: ${imageRef}`);
|
|
2127
|
+
} catch (error) {
|
|
2128
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2129
|
+
}
|
|
1978
2130
|
}
|
|
1979
|
-
|
|
1980
|
-
//#endregion
|
|
1981
|
-
//#region src/docker/templates.ts
|
|
1982
2131
|
/**
|
|
1983
|
-
*
|
|
1984
|
-
* Walks up the directory tree to find lockfile (for monorepos)
|
|
2132
|
+
* Push Docker image to registry
|
|
1985
2133
|
*/
|
|
1986
|
-
function
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
|
-
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
|
|
1993
|
-
|
|
1994
|
-
|
|
1995
|
-
|
|
1996
|
-
for (const [lockfile, pm] of lockfiles) if (existsSync(join(dir, lockfile))) return pm;
|
|
1997
|
-
dir = dirname(dir);
|
|
2134
|
+
async function pushImage(imageRef) {
|
|
2135
|
+
logger$4.log(`\n☁️ Pushing image: ${imageRef}`);
|
|
2136
|
+
try {
|
|
2137
|
+
execSync(`docker push ${imageRef}`, {
|
|
2138
|
+
cwd: process.cwd(),
|
|
2139
|
+
stdio: "inherit"
|
|
2140
|
+
});
|
|
2141
|
+
logger$4.log(`✅ Image pushed: ${imageRef}`);
|
|
2142
|
+
} catch (error) {
|
|
2143
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1998
2144
|
}
|
|
1999
|
-
for (const [lockfile, pm] of lockfiles) if (existsSync(join(root, lockfile))) return pm;
|
|
2000
|
-
return "pnpm";
|
|
2001
2145
|
}
|
|
2002
2146
|
/**
|
|
2003
|
-
*
|
|
2147
|
+
* Deploy using Docker (build and optionally push image)
|
|
2004
2148
|
*/
|
|
2005
|
-
function
|
|
2006
|
-
const
|
|
2007
|
-
|
|
2008
|
-
|
|
2009
|
-
|
|
2010
|
-
|
|
2011
|
-
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
|
|
2020
|
-
|
|
2021
|
-
|
|
2022
|
-
|
|
2023
|
-
|
|
2024
|
-
|
|
2025
|
-
addGlobal: "npm install -g"
|
|
2026
|
-
},
|
|
2027
|
-
yarn: {
|
|
2028
|
-
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
2029
|
-
lockfile: "yarn.lock",
|
|
2030
|
-
fetch: "",
|
|
2031
|
-
installCmd: "yarn install --frozen-lockfile",
|
|
2032
|
-
cacheTarget: "/root/.yarn/cache",
|
|
2033
|
-
cacheId: "yarn",
|
|
2034
|
-
run: "yarn",
|
|
2035
|
-
addGlobal: "yarn global add"
|
|
2036
|
-
},
|
|
2037
|
-
bun: {
|
|
2038
|
-
install: "npm install -g bun",
|
|
2039
|
-
lockfile: "bun.lockb",
|
|
2040
|
-
fetch: "",
|
|
2041
|
-
installCmd: "bun install --frozen-lockfile",
|
|
2042
|
-
cacheTarget: "/root/.bun/install/cache",
|
|
2043
|
-
cacheId: "bun",
|
|
2044
|
-
run: "bun run",
|
|
2045
|
-
addGlobal: "bun add -g"
|
|
2046
|
-
}
|
|
2149
|
+
async function deployDocker(options) {
|
|
2150
|
+
const { stage, tag, skipPush, masterKey, config: config$1 } = options;
|
|
2151
|
+
const imageName = config$1.imageName ?? "app";
|
|
2152
|
+
const imageRef = getImageRef(config$1.registry, imageName, tag);
|
|
2153
|
+
await buildImage(imageRef);
|
|
2154
|
+
if (!skipPush) if (!config$1.registry) logger$4.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
|
|
2155
|
+
else await pushImage(imageRef);
|
|
2156
|
+
logger$4.log("\n✅ Docker deployment ready!");
|
|
2157
|
+
logger$4.log(`\n📋 Deployment details:`);
|
|
2158
|
+
logger$4.log(` Image: ${imageRef}`);
|
|
2159
|
+
logger$4.log(` Stage: ${stage}`);
|
|
2160
|
+
if (masterKey) {
|
|
2161
|
+
logger$4.log(`\n🔐 Deploy with this environment variable:`);
|
|
2162
|
+
logger$4.log(` GKM_MASTER_KEY=${masterKey}`);
|
|
2163
|
+
logger$4.log("\n Example docker run:");
|
|
2164
|
+
logger$4.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
|
|
2165
|
+
}
|
|
2166
|
+
return {
|
|
2167
|
+
imageRef,
|
|
2168
|
+
masterKey
|
|
2047
2169
|
};
|
|
2048
|
-
return configs[pm];
|
|
2049
2170
|
}
|
|
2050
2171
|
/**
|
|
2051
|
-
*
|
|
2052
|
-
* Optimized for build speed with:
|
|
2053
|
-
* - BuildKit cache mounts for package manager store
|
|
2054
|
-
* - pnpm fetch for better layer caching (when using pnpm)
|
|
2055
|
-
* - Optional turbo prune for monorepos
|
|
2172
|
+
* Resolve Docker deploy config from gkm config
|
|
2056
2173
|
*/
|
|
2057
|
-
function
|
|
2058
|
-
|
|
2059
|
-
|
|
2060
|
-
|
|
2061
|
-
|
|
2062
|
-
|
|
2063
|
-
const pm = getPmConfig(packageManager);
|
|
2064
|
-
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
2065
|
-
const hasFetch = packageManager === "pnpm";
|
|
2066
|
-
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
2067
|
-
COPY ${pm.lockfile} ./
|
|
2068
|
-
|
|
2069
|
-
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
2070
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2071
|
-
${pm.fetch}
|
|
2072
|
-
|
|
2073
|
-
# Copy package.json after fetch
|
|
2074
|
-
COPY package.json ./
|
|
2075
|
-
|
|
2076
|
-
# Install from cache (fast - no network needed)
|
|
2077
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2078
|
-
${pm.installCmd}` : `# Copy package files
|
|
2079
|
-
COPY package.json ${pm.lockfile} ./
|
|
2080
|
-
|
|
2081
|
-
# Install dependencies with cache
|
|
2082
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2083
|
-
${pm.installCmd}`;
|
|
2084
|
-
return `# syntax=docker/dockerfile:1
|
|
2085
|
-
# Stage 1: Dependencies
|
|
2086
|
-
FROM ${baseImage} AS deps
|
|
2087
|
-
|
|
2088
|
-
WORKDIR /app
|
|
2089
|
-
${installPm}
|
|
2090
|
-
${depsStage}
|
|
2091
|
-
|
|
2092
|
-
# Stage 2: Build
|
|
2093
|
-
FROM deps AS builder
|
|
2094
|
-
|
|
2095
|
-
WORKDIR /app
|
|
2096
|
-
|
|
2097
|
-
# Copy source (deps already installed)
|
|
2098
|
-
COPY . .
|
|
2099
|
-
|
|
2100
|
-
# Build production server
|
|
2101
|
-
RUN ${pm.run} gkm build --provider server --production
|
|
2102
|
-
|
|
2103
|
-
# Stage 3: Production
|
|
2104
|
-
FROM ${baseImage} AS runner
|
|
2105
|
-
|
|
2106
|
-
WORKDIR /app
|
|
2107
|
-
|
|
2108
|
-
# Install tini for proper signal handling as PID 1
|
|
2109
|
-
RUN apk add --no-cache tini
|
|
2110
|
-
|
|
2111
|
-
# Create non-root user
|
|
2112
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2113
|
-
adduser --system --uid 1001 hono
|
|
2114
|
-
|
|
2115
|
-
# Copy bundled server
|
|
2116
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2117
|
-
|
|
2118
|
-
# Environment
|
|
2119
|
-
ENV NODE_ENV=production
|
|
2120
|
-
ENV PORT=${port}
|
|
2121
|
-
|
|
2122
|
-
# Health check
|
|
2123
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2124
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2125
|
-
|
|
2126
|
-
# Switch to non-root user
|
|
2127
|
-
USER hono
|
|
2174
|
+
function resolveDockerConfig(config$1) {
|
|
2175
|
+
return {
|
|
2176
|
+
registry: config$1.docker?.registry,
|
|
2177
|
+
imageName: config$1.docker?.imageName
|
|
2178
|
+
};
|
|
2179
|
+
}
|
|
2128
2180
|
|
|
2129
|
-
|
|
2181
|
+
//#endregion
|
|
2182
|
+
//#region src/deploy/dokploy.ts
|
|
2183
|
+
const logger$3 = console;
|
|
2184
|
+
/**
|
|
2185
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
2186
|
+
*/
|
|
2187
|
+
async function getApiToken$1() {
|
|
2188
|
+
const token = await getDokployToken();
|
|
2189
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
2190
|
+
return token;
|
|
2191
|
+
}
|
|
2192
|
+
/**
|
|
2193
|
+
* Create a Dokploy API client
|
|
2194
|
+
*/
|
|
2195
|
+
async function createApi$1(endpoint) {
|
|
2196
|
+
const token = await getApiToken$1();
|
|
2197
|
+
return new DokployApi({
|
|
2198
|
+
baseUrl: endpoint,
|
|
2199
|
+
token
|
|
2200
|
+
});
|
|
2201
|
+
}
|
|
2202
|
+
/**
|
|
2203
|
+
* Deploy to Dokploy
|
|
2204
|
+
*/
|
|
2205
|
+
async function deployDokploy(options) {
|
|
2206
|
+
const { stage, imageRef, masterKey, config: config$1 } = options;
|
|
2207
|
+
logger$3.log(`\n🎯 Deploying to Dokploy...`);
|
|
2208
|
+
logger$3.log(` Endpoint: ${config$1.endpoint}`);
|
|
2209
|
+
logger$3.log(` Application: ${config$1.applicationId}`);
|
|
2210
|
+
const api = await createApi$1(config$1.endpoint);
|
|
2211
|
+
logger$3.log(` Configuring Docker image: ${imageRef}`);
|
|
2212
|
+
const registryOptions = {};
|
|
2213
|
+
if (config$1.registryId) {
|
|
2214
|
+
registryOptions.registryId = config$1.registryId;
|
|
2215
|
+
logger$3.log(` Using Dokploy registry: ${config$1.registryId}`);
|
|
2216
|
+
} else {
|
|
2217
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
2218
|
+
if (storedRegistryId) {
|
|
2219
|
+
registryOptions.registryId = storedRegistryId;
|
|
2220
|
+
logger$3.log(` Using stored Dokploy registry: ${storedRegistryId}`);
|
|
2221
|
+
} else if (config$1.registryCredentials) {
|
|
2222
|
+
registryOptions.username = config$1.registryCredentials.username;
|
|
2223
|
+
registryOptions.password = config$1.registryCredentials.password;
|
|
2224
|
+
registryOptions.registryUrl = config$1.registryCredentials.registryUrl;
|
|
2225
|
+
logger$3.log(` Using registry credentials for: ${config$1.registryCredentials.registryUrl}`);
|
|
2226
|
+
} else {
|
|
2227
|
+
const username = process.env.DOCKER_REGISTRY_USERNAME;
|
|
2228
|
+
const password = process.env.DOCKER_REGISTRY_PASSWORD;
|
|
2229
|
+
const registryUrl = process.env.DOCKER_REGISTRY_URL || config$1.registry;
|
|
2230
|
+
if (username && password && registryUrl) {
|
|
2231
|
+
registryOptions.username = username;
|
|
2232
|
+
registryOptions.password = password;
|
|
2233
|
+
registryOptions.registryUrl = registryUrl;
|
|
2234
|
+
logger$3.log(` Using registry credentials from environment`);
|
|
2235
|
+
}
|
|
2236
|
+
}
|
|
2237
|
+
}
|
|
2238
|
+
await api.saveDockerProvider(config$1.applicationId, imageRef, registryOptions);
|
|
2239
|
+
logger$3.log(" ✓ Docker provider configured");
|
|
2240
|
+
const envVars = {};
|
|
2241
|
+
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
2242
|
+
if (Object.keys(envVars).length > 0) {
|
|
2243
|
+
logger$3.log(" Updating environment variables...");
|
|
2244
|
+
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
2245
|
+
await api.saveApplicationEnv(config$1.applicationId, envString);
|
|
2246
|
+
logger$3.log(" ✓ Environment variables updated");
|
|
2247
|
+
}
|
|
2248
|
+
logger$3.log(" Triggering deployment...");
|
|
2249
|
+
await api.deployApplication(config$1.applicationId);
|
|
2250
|
+
logger$3.log(" ✓ Deployment triggered");
|
|
2251
|
+
logger$3.log("\n✅ Dokploy deployment initiated!");
|
|
2252
|
+
logger$3.log(`\n📋 Deployment details:`);
|
|
2253
|
+
logger$3.log(` Image: ${imageRef}`);
|
|
2254
|
+
logger$3.log(` Stage: ${stage}`);
|
|
2255
|
+
logger$3.log(` Application ID: ${config$1.applicationId}`);
|
|
2256
|
+
if (masterKey) logger$3.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
2257
|
+
const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
|
|
2258
|
+
logger$3.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
2259
|
+
return {
|
|
2260
|
+
imageRef,
|
|
2261
|
+
masterKey,
|
|
2262
|
+
url: deploymentUrl
|
|
2263
|
+
};
|
|
2264
|
+
}
|
|
2130
2265
|
|
|
2131
|
-
|
|
2132
|
-
|
|
2133
|
-
|
|
2134
|
-
|
|
2266
|
+
//#endregion
|
|
2267
|
+
//#region src/deploy/init.ts
|
|
2268
|
+
const logger$2 = console;
|
|
2269
|
+
/**
|
|
2270
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
2271
|
+
*/
|
|
2272
|
+
async function getApiToken() {
|
|
2273
|
+
const token = await getDokployToken();
|
|
2274
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
2275
|
+
return token;
|
|
2135
2276
|
}
|
|
2136
2277
|
/**
|
|
2137
|
-
*
|
|
2138
|
-
* Uses turbo prune to create minimal Docker context
|
|
2278
|
+
* Get Dokploy endpoint from options or stored credentials
|
|
2139
2279
|
*/
|
|
2140
|
-
function
|
|
2141
|
-
|
|
2142
|
-
const
|
|
2143
|
-
|
|
2144
|
-
|
|
2145
|
-
const depsInstall = hasFetch ? `# Fetch and install from cache
|
|
2146
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2147
|
-
${pm.fetch}
|
|
2148
|
-
|
|
2149
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2150
|
-
${pm.installCmd}` : `# Install dependencies with cache
|
|
2151
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2152
|
-
${pm.installCmd}`;
|
|
2153
|
-
return `# syntax=docker/dockerfile:1
|
|
2154
|
-
# Stage 1: Prune monorepo
|
|
2155
|
-
FROM ${baseImage} AS pruner
|
|
2156
|
-
|
|
2157
|
-
WORKDIR /app
|
|
2158
|
-
|
|
2159
|
-
${installPm}
|
|
2160
|
-
RUN ${pm.addGlobal} turbo
|
|
2161
|
-
|
|
2162
|
-
COPY . .
|
|
2163
|
-
|
|
2164
|
-
# Prune to only include necessary packages
|
|
2165
|
-
RUN turbo prune ${turboPackage} --docker
|
|
2166
|
-
|
|
2167
|
-
# Stage 2: Install dependencies
|
|
2168
|
-
FROM ${baseImage} AS deps
|
|
2169
|
-
|
|
2170
|
-
WORKDIR /app
|
|
2171
|
-
|
|
2172
|
-
${installPm}
|
|
2173
|
-
|
|
2174
|
-
# Copy pruned lockfile and package.jsons
|
|
2175
|
-
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
2176
|
-
COPY --from=pruner /app/out/json/ ./
|
|
2177
|
-
|
|
2178
|
-
${depsInstall}
|
|
2179
|
-
|
|
2180
|
-
# Stage 3: Build
|
|
2181
|
-
FROM deps AS builder
|
|
2182
|
-
|
|
2183
|
-
WORKDIR /app
|
|
2184
|
-
|
|
2185
|
-
# Copy pruned source
|
|
2186
|
-
COPY --from=pruner /app/out/full/ ./
|
|
2187
|
-
|
|
2188
|
-
# Build production server
|
|
2189
|
-
RUN ${pm.run} gkm build --provider server --production
|
|
2190
|
-
|
|
2191
|
-
# Stage 4: Production
|
|
2192
|
-
FROM ${baseImage} AS runner
|
|
2193
|
-
|
|
2194
|
-
WORKDIR /app
|
|
2195
|
-
|
|
2196
|
-
RUN apk add --no-cache tini
|
|
2197
|
-
|
|
2198
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2199
|
-
adduser --system --uid 1001 hono
|
|
2200
|
-
|
|
2201
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2202
|
-
|
|
2203
|
-
ENV NODE_ENV=production
|
|
2204
|
-
ENV PORT=${port}
|
|
2205
|
-
|
|
2206
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2207
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2208
|
-
|
|
2209
|
-
USER hono
|
|
2210
|
-
|
|
2211
|
-
EXPOSE ${port}
|
|
2212
|
-
|
|
2213
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2214
|
-
CMD ["node", "server.mjs"]
|
|
2215
|
-
`;
|
|
2280
|
+
async function getEndpoint(providedEndpoint) {
|
|
2281
|
+
if (providedEndpoint) return providedEndpoint;
|
|
2282
|
+
const stored = await getDokployCredentials();
|
|
2283
|
+
if (stored) return stored.endpoint;
|
|
2284
|
+
throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
2216
2285
|
}
|
|
2217
2286
|
/**
|
|
2218
|
-
*
|
|
2287
|
+
* Create a Dokploy API client
|
|
2219
2288
|
*/
|
|
2220
|
-
function
|
|
2221
|
-
const
|
|
2222
|
-
return
|
|
2223
|
-
|
|
2224
|
-
|
|
2225
|
-
|
|
2226
|
-
|
|
2227
|
-
# Install tini for proper signal handling as PID 1
|
|
2228
|
-
# Handles SIGTERM propagation and zombie process reaping
|
|
2229
|
-
RUN apk add --no-cache tini
|
|
2230
|
-
|
|
2231
|
-
# Create non-root user
|
|
2232
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2233
|
-
adduser --system --uid 1001 hono
|
|
2234
|
-
|
|
2235
|
-
# Copy pre-built bundle
|
|
2236
|
-
COPY .gkm/server/dist/server.mjs ./
|
|
2237
|
-
|
|
2238
|
-
# Environment
|
|
2239
|
-
ENV NODE_ENV=production
|
|
2240
|
-
ENV PORT=${port}
|
|
2241
|
-
|
|
2242
|
-
# Health check
|
|
2243
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2244
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2245
|
-
|
|
2246
|
-
# Switch to non-root user
|
|
2247
|
-
USER hono
|
|
2248
|
-
|
|
2249
|
-
EXPOSE ${port}
|
|
2250
|
-
|
|
2251
|
-
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2252
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2253
|
-
CMD ["node", "server.mjs"]
|
|
2254
|
-
`;
|
|
2289
|
+
async function createApi(endpoint) {
|
|
2290
|
+
const token = await getApiToken();
|
|
2291
|
+
return new DokployApi({
|
|
2292
|
+
baseUrl: endpoint,
|
|
2293
|
+
token
|
|
2294
|
+
});
|
|
2255
2295
|
}
|
|
2256
2296
|
/**
|
|
2257
|
-
*
|
|
2297
|
+
* Update gkm.config.ts with Dokploy configuration
|
|
2258
2298
|
*/
|
|
2259
|
-
function
|
|
2260
|
-
|
|
2261
|
-
|
|
2262
|
-
.
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
.
|
|
2266
|
-
.
|
|
2267
|
-
|
|
2268
|
-
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
2277
|
-
.
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
|
|
2289
|
-
|
|
2290
|
-
|
|
2291
|
-
|
|
2292
|
-
|
|
2293
|
-
!README.md
|
|
2294
|
-
|
|
2295
|
-
# Environment files (handle secrets separately)
|
|
2296
|
-
.env
|
|
2297
|
-
.env.*
|
|
2298
|
-
!.env.example
|
|
2299
|
-
|
|
2300
|
-
# Docker files (don't copy recursively)
|
|
2301
|
-
Dockerfile*
|
|
2302
|
-
docker-compose*
|
|
2303
|
-
.dockerignore
|
|
2304
|
-
`;
|
|
2299
|
+
async function updateConfig(config$1, cwd = process.cwd()) {
|
|
2300
|
+
const configPath = join(cwd, "gkm.config.ts");
|
|
2301
|
+
if (!existsSync(configPath)) {
|
|
2302
|
+
logger$2.warn("\n gkm.config.ts not found. Add this configuration manually:\n");
|
|
2303
|
+
logger$2.log(` providers: {`);
|
|
2304
|
+
logger$2.log(` dokploy: {`);
|
|
2305
|
+
logger$2.log(` endpoint: '${config$1.endpoint}',`);
|
|
2306
|
+
logger$2.log(` projectId: '${config$1.projectId}',`);
|
|
2307
|
+
logger$2.log(` applicationId: '${config$1.applicationId}',`);
|
|
2308
|
+
logger$2.log(` },`);
|
|
2309
|
+
logger$2.log(` },`);
|
|
2310
|
+
return;
|
|
2311
|
+
}
|
|
2312
|
+
const content = await readFile(configPath, "utf-8");
|
|
2313
|
+
if (content.includes("dokploy:") && content.includes("applicationId:")) {
|
|
2314
|
+
logger$2.log("\n Dokploy config already exists in gkm.config.ts");
|
|
2315
|
+
logger$2.log(" Updating with new values...");
|
|
2316
|
+
}
|
|
2317
|
+
const registryLine = config$1.registryId ? `\n\t\t\tregistryId: '${config$1.registryId}',` : "";
|
|
2318
|
+
const dokployConfigStr = `dokploy: {
|
|
2319
|
+
endpoint: '${config$1.endpoint}',
|
|
2320
|
+
projectId: '${config$1.projectId}',
|
|
2321
|
+
applicationId: '${config$1.applicationId}',${registryLine}
|
|
2322
|
+
}`;
|
|
2323
|
+
let newContent;
|
|
2324
|
+
if (content.includes("providers:")) if (content.includes("dokploy:")) newContent = content.replace(/dokploy:\s*\{[^}]*\}/s, dokployConfigStr);
|
|
2325
|
+
else newContent = content.replace(/providers:\s*\{/, `providers: {\n\t\t${dokployConfigStr},`);
|
|
2326
|
+
else newContent = content.replace(/}\s*\)\s*;?\s*$/, `
|
|
2327
|
+
providers: {
|
|
2328
|
+
${dokployConfigStr},
|
|
2329
|
+
},
|
|
2330
|
+
});`);
|
|
2331
|
+
await writeFile(configPath, newContent);
|
|
2332
|
+
logger$2.log("\n ✓ Updated gkm.config.ts with Dokploy configuration");
|
|
2305
2333
|
}
|
|
2306
2334
|
/**
|
|
2307
|
-
*
|
|
2335
|
+
* Initialize Dokploy deployment configuration
|
|
2308
2336
|
*/
|
|
2309
|
-
function
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
|
|
2322
|
-
|
|
2337
|
+
async function deployInitCommand(options) {
|
|
2338
|
+
const { projectName, appName, projectId: existingProjectId, registryId } = options;
|
|
2339
|
+
const endpoint = await getEndpoint(options.endpoint);
|
|
2340
|
+
const api = await createApi(endpoint);
|
|
2341
|
+
logger$2.log(`\n🚀 Initializing Dokploy deployment...`);
|
|
2342
|
+
logger$2.log(` Endpoint: ${endpoint}`);
|
|
2343
|
+
let projectId;
|
|
2344
|
+
if (existingProjectId) {
|
|
2345
|
+
projectId = existingProjectId;
|
|
2346
|
+
logger$2.log(`\n📁 Using existing project: ${projectId}`);
|
|
2347
|
+
} else {
|
|
2348
|
+
logger$2.log(`\n📁 Looking for project: ${projectName}`);
|
|
2349
|
+
const projects = await api.listProjects();
|
|
2350
|
+
const existingProject = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
2351
|
+
if (existingProject) {
|
|
2352
|
+
projectId = existingProject.projectId;
|
|
2353
|
+
logger$2.log(` Found existing project: ${projectId}`);
|
|
2354
|
+
} else {
|
|
2355
|
+
logger$2.log(` Creating new project...`);
|
|
2356
|
+
const result = await api.createProject(projectName);
|
|
2357
|
+
projectId = result.project.projectId;
|
|
2358
|
+
logger$2.log(` ✓ Created project: ${projectId}`);
|
|
2359
|
+
}
|
|
2360
|
+
}
|
|
2361
|
+
const project = await api.getProject(projectId);
|
|
2362
|
+
let environmentId;
|
|
2363
|
+
const firstEnv = project.environments?.[0];
|
|
2364
|
+
if (firstEnv) environmentId = firstEnv.environmentId;
|
|
2365
|
+
else {
|
|
2366
|
+
logger$2.log(` Creating production environment...`);
|
|
2367
|
+
const env = await api.createEnvironment(projectId, "production");
|
|
2368
|
+
environmentId = env.environmentId;
|
|
2369
|
+
}
|
|
2370
|
+
logger$2.log(`\n📦 Creating application: ${appName}`);
|
|
2371
|
+
const application = await api.createApplication(appName, projectId, environmentId);
|
|
2372
|
+
logger$2.log(` ✓ Created application: ${application.applicationId}`);
|
|
2373
|
+
if (registryId) {
|
|
2374
|
+
logger$2.log(`\n🔧 Configuring registry: ${registryId}`);
|
|
2375
|
+
await api.updateApplication(application.applicationId, { registryId });
|
|
2376
|
+
logger$2.log(` ✓ Registry configured`);
|
|
2377
|
+
} else try {
|
|
2378
|
+
const registries = await api.listRegistries();
|
|
2379
|
+
if (registries.length > 0) {
|
|
2380
|
+
logger$2.log(`\n📋 Available registries:`);
|
|
2381
|
+
for (const reg of registries) logger$2.log(` - ${reg.registryName}: ${reg.registryUrl} (${reg.registryId})`);
|
|
2382
|
+
logger$2.log(`\n To use a registry, run with --registry-id <id>`);
|
|
2383
|
+
}
|
|
2384
|
+
} catch {}
|
|
2385
|
+
const config$1 = {
|
|
2386
|
+
endpoint,
|
|
2387
|
+
projectId,
|
|
2388
|
+
applicationId: application.applicationId
|
|
2389
|
+
};
|
|
2390
|
+
await updateConfig(config$1);
|
|
2391
|
+
logger$2.log(`\n✅ Dokploy deployment initialized!`);
|
|
2392
|
+
logger$2.log(`\n📋 Configuration:`);
|
|
2393
|
+
logger$2.log(` Project ID: ${projectId}`);
|
|
2394
|
+
logger$2.log(` Application ID: ${application.applicationId}`);
|
|
2395
|
+
logger$2.log(`\n🔗 View in Dokploy: ${endpoint}/project/${projectId}`);
|
|
2396
|
+
logger$2.log(`\n📝 Next steps:`);
|
|
2397
|
+
logger$2.log(` 1. Initialize secrets: gkm secrets:init --stage production`);
|
|
2398
|
+
logger$2.log(` 2. Deploy: gkm deploy --provider dokploy --stage production`);
|
|
2399
|
+
return config$1;
|
|
2323
2400
|
}
|
|
2324
2401
|
/**
|
|
2325
|
-
*
|
|
2402
|
+
* List available Dokploy resources
|
|
2326
2403
|
*/
|
|
2327
|
-
function
|
|
2328
|
-
const
|
|
2329
|
-
|
|
2330
|
-
|
|
2331
|
-
|
|
2332
|
-
|
|
2333
|
-
|
|
2334
|
-
|
|
2335
|
-
|
|
2336
|
-
|
|
2337
|
-
|
|
2338
|
-
|
|
2339
|
-
|
|
2340
|
-
|
|
2404
|
+
async function deployListCommand(options) {
|
|
2405
|
+
const endpoint = await getEndpoint(options.endpoint);
|
|
2406
|
+
const api = await createApi(endpoint);
|
|
2407
|
+
const { resource } = options;
|
|
2408
|
+
if (resource === "projects") {
|
|
2409
|
+
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
2410
|
+
const projects = await api.listProjects();
|
|
2411
|
+
if (projects.length === 0) {
|
|
2412
|
+
logger$2.log(" No projects found");
|
|
2413
|
+
return;
|
|
2414
|
+
}
|
|
2415
|
+
for (const project of projects) {
|
|
2416
|
+
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
2417
|
+
if (project.description) logger$2.log(` ${project.description}`);
|
|
2418
|
+
}
|
|
2419
|
+
} else if (resource === "registries") {
|
|
2420
|
+
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
2421
|
+
const registries = await api.listRegistries();
|
|
2422
|
+
if (registries.length === 0) {
|
|
2423
|
+
logger$2.log(" No registries configured");
|
|
2424
|
+
logger$2.log(" Run \"gkm registry:setup\" to configure a registry");
|
|
2425
|
+
return;
|
|
2426
|
+
}
|
|
2427
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
2428
|
+
for (const registry of registries) {
|
|
2429
|
+
const isDefault = registry.registryId === storedRegistryId;
|
|
2430
|
+
const marker = isDefault ? " (default)" : "";
|
|
2431
|
+
logger$2.log(`\n ${registry.registryName}${marker} (${registry.registryId})`);
|
|
2432
|
+
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
2433
|
+
logger$2.log(` Username: ${registry.username}`);
|
|
2434
|
+
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
2435
|
+
}
|
|
2436
|
+
}
|
|
2341
2437
|
}
|
|
2342
2438
|
|
|
2343
2439
|
//#endregion
|
|
2344
|
-
//#region src/
|
|
2440
|
+
//#region src/deploy/index.ts
|
|
2345
2441
|
const logger$1 = console;
|
|
2346
2442
|
/**
|
|
2347
|
-
*
|
|
2348
|
-
* Generates Dockerfile, docker-compose.yml, and related files
|
|
2349
|
-
*
|
|
2350
|
-
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
2351
|
-
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
2443
|
+
* Prompt for input
|
|
2352
2444
|
*/
|
|
2353
|
-
async function
|
|
2354
|
-
|
|
2355
|
-
|
|
2356
|
-
|
|
2357
|
-
|
|
2358
|
-
|
|
2359
|
-
|
|
2360
|
-
|
|
2361
|
-
|
|
2362
|
-
|
|
2445
|
+
async function prompt(message, hidden = false) {
|
|
2446
|
+
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please configure manually.");
|
|
2447
|
+
if (hidden) {
|
|
2448
|
+
process.stdout.write(message);
|
|
2449
|
+
return new Promise((resolve$1) => {
|
|
2450
|
+
let value = "";
|
|
2451
|
+
const onData = (char) => {
|
|
2452
|
+
const c = char.toString();
|
|
2453
|
+
if (c === "\n" || c === "\r") {
|
|
2454
|
+
process.stdin.setRawMode(false);
|
|
2455
|
+
process.stdin.pause();
|
|
2456
|
+
process.stdin.removeListener("data", onData);
|
|
2457
|
+
process.stdout.write("\n");
|
|
2458
|
+
resolve$1(value);
|
|
2459
|
+
} else if (c === "") {
|
|
2460
|
+
process.stdin.setRawMode(false);
|
|
2461
|
+
process.stdin.pause();
|
|
2462
|
+
process.stdout.write("\n");
|
|
2463
|
+
process.exit(1);
|
|
2464
|
+
} else if (c === "" || c === "\b") {
|
|
2465
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
2466
|
+
} else value += c;
|
|
2467
|
+
};
|
|
2468
|
+
process.stdin.setRawMode(true);
|
|
2469
|
+
process.stdin.resume();
|
|
2470
|
+
process.stdin.on("data", onData);
|
|
2471
|
+
});
|
|
2363
2472
|
}
|
|
2364
|
-
const
|
|
2365
|
-
|
|
2366
|
-
|
|
2367
|
-
|
|
2368
|
-
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2372
|
-
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2473
|
+
const rl = readline.createInterface({
|
|
2474
|
+
input: stdin,
|
|
2475
|
+
output: stdout
|
|
2476
|
+
});
|
|
2477
|
+
try {
|
|
2478
|
+
return await rl.question(message);
|
|
2479
|
+
} finally {
|
|
2480
|
+
rl.close();
|
|
2481
|
+
}
|
|
2482
|
+
}
|
|
2483
|
+
/**
|
|
2484
|
+
* Provision docker compose services in Dokploy
|
|
2485
|
+
* @internal Exported for testing
|
|
2486
|
+
*/
|
|
2487
|
+
async function provisionServices(api, projectId, environmentId, appName, services, existingUrls) {
|
|
2488
|
+
logger$1.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
|
|
2489
|
+
if (!services || !environmentId) {
|
|
2490
|
+
logger$1.log(" Skipping: no services or no environmentId");
|
|
2491
|
+
return void 0;
|
|
2492
|
+
}
|
|
2493
|
+
const serviceUrls = {};
|
|
2494
|
+
if (services.postgres) if (existingUrls?.DATABASE_URL) logger$1.log("\n🐘 PostgreSQL: Already configured (skipping)");
|
|
2495
|
+
else {
|
|
2496
|
+
logger$1.log("\n🐘 Provisioning PostgreSQL...");
|
|
2497
|
+
const postgresName = `${appName}-db`;
|
|
2498
|
+
try {
|
|
2499
|
+
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
2500
|
+
const databasePassword = randomBytes$1(16).toString("hex");
|
|
2501
|
+
const postgres = await api.createPostgres(postgresName, projectId, environmentId, { databasePassword });
|
|
2502
|
+
logger$1.log(` ✓ Created PostgreSQL: ${postgres.postgresId}`);
|
|
2503
|
+
await api.deployPostgres(postgres.postgresId);
|
|
2504
|
+
logger$1.log(" ✓ PostgreSQL deployed");
|
|
2505
|
+
serviceUrls.DATABASE_URL = `postgresql://${postgres.databaseUser}:${postgres.databasePassword}@${postgres.appName}:5432/${postgres.databaseName}`;
|
|
2506
|
+
logger$1.log(` ✓ DATABASE_URL configured`);
|
|
2507
|
+
} catch (error) {
|
|
2508
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2509
|
+
if (message.includes("already exists") || message.includes("duplicate")) logger$1.log(` ℹ PostgreSQL already exists`);
|
|
2510
|
+
else logger$1.log(` ⚠ Failed to provision PostgreSQL: ${message}`);
|
|
2511
|
+
}
|
|
2512
|
+
}
|
|
2513
|
+
if (services.redis) if (existingUrls?.REDIS_URL) logger$1.log("\n🔴 Redis: Already configured (skipping)");
|
|
2514
|
+
else {
|
|
2515
|
+
logger$1.log("\n🔴 Provisioning Redis...");
|
|
2516
|
+
const redisName = `${appName}-cache`;
|
|
2517
|
+
try {
|
|
2518
|
+
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
2519
|
+
const databasePassword = randomBytes$1(16).toString("hex");
|
|
2520
|
+
const redis = await api.createRedis(redisName, projectId, environmentId, { databasePassword });
|
|
2521
|
+
logger$1.log(` ✓ Created Redis: ${redis.redisId}`);
|
|
2522
|
+
await api.deployRedis(redis.redisId);
|
|
2523
|
+
logger$1.log(" ✓ Redis deployed");
|
|
2524
|
+
const password = redis.databasePassword ? `:${redis.databasePassword}@` : "";
|
|
2525
|
+
serviceUrls.REDIS_URL = `redis://${password}${redis.appName}:6379`;
|
|
2526
|
+
logger$1.log(` ✓ REDIS_URL configured`);
|
|
2527
|
+
} catch (error) {
|
|
2528
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2529
|
+
if (message.includes("already exists") || message.includes("duplicate")) logger$1.log(` ℹ Redis already exists`);
|
|
2530
|
+
else logger$1.log(` ⚠ Failed to provision Redis: ${message}`);
|
|
2531
|
+
}
|
|
2532
|
+
}
|
|
2533
|
+
return Object.keys(serviceUrls).length > 0 ? serviceUrls : void 0;
|
|
2534
|
+
}
|
|
2535
|
+
/**
|
|
2536
|
+
* Ensure Dokploy is fully configured, recovering/creating resources as needed
|
|
2537
|
+
*/
|
|
2538
|
+
async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
2539
|
+
logger$1.log("\n🔧 Checking Dokploy setup...");
|
|
2540
|
+
const { readStageSecrets: readStageSecrets$1 } = await import("./storage-nkGIjeXt.mjs");
|
|
2541
|
+
const existingSecrets = await readStageSecrets$1(stage);
|
|
2542
|
+
const existingUrls = {
|
|
2543
|
+
DATABASE_URL: existingSecrets?.urls?.DATABASE_URL,
|
|
2544
|
+
REDIS_URL: existingSecrets?.urls?.REDIS_URL
|
|
2376
2545
|
};
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2384
|
-
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2546
|
+
let creds = await getDokployCredentials();
|
|
2547
|
+
if (!creds) {
|
|
2548
|
+
logger$1.log("\n📋 Dokploy credentials not found. Let's set them up.");
|
|
2549
|
+
const endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
2550
|
+
const normalizedEndpoint = endpoint.replace(/\/$/, "");
|
|
2551
|
+
try {
|
|
2552
|
+
new URL(normalizedEndpoint);
|
|
2553
|
+
} catch {
|
|
2554
|
+
throw new Error("Invalid URL format");
|
|
2555
|
+
}
|
|
2556
|
+
logger$1.log(`\nGenerate a token at: ${normalizedEndpoint}/settings/profile\n`);
|
|
2557
|
+
const token = await prompt("API Token: ", true);
|
|
2558
|
+
logger$1.log("\nValidating credentials...");
|
|
2559
|
+
const isValid = await validateDokployToken(normalizedEndpoint, token);
|
|
2560
|
+
if (!isValid) throw new Error("Invalid credentials. Please check your token.");
|
|
2561
|
+
await storeDokployCredentials(token, normalizedEndpoint);
|
|
2562
|
+
creds = {
|
|
2563
|
+
token,
|
|
2564
|
+
endpoint: normalizedEndpoint
|
|
2565
|
+
};
|
|
2566
|
+
logger$1.log("✓ Credentials saved");
|
|
2567
|
+
}
|
|
2568
|
+
const api = new DokployApi({
|
|
2569
|
+
baseUrl: creds.endpoint,
|
|
2570
|
+
token: creds.token
|
|
2571
|
+
});
|
|
2572
|
+
const existingConfig = config$1.providers?.dokploy;
|
|
2573
|
+
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId && existingConfig.projectId) {
|
|
2574
|
+
logger$1.log("✓ Dokploy config found in gkm.config.ts");
|
|
2575
|
+
try {
|
|
2576
|
+
const projectDetails = await api.getProject(existingConfig.projectId);
|
|
2577
|
+
logger$1.log("✓ Project verified");
|
|
2578
|
+
const storedRegistryId = existingConfig.registryId ?? await getDokployRegistryId();
|
|
2579
|
+
const environments = projectDetails.environments ?? [];
|
|
2580
|
+
let environment = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
2581
|
+
if (!environment) {
|
|
2582
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2583
|
+
environment = await api.createEnvironment(existingConfig.projectId, stage);
|
|
2584
|
+
logger$1.log(` ✓ Created environment: ${environment.environmentId}`);
|
|
2585
|
+
}
|
|
2586
|
+
const environmentId$1 = environment.environmentId;
|
|
2587
|
+
logger$1.log(` Services config: ${JSON.stringify(services)}, envId: ${environmentId$1}`);
|
|
2588
|
+
const serviceUrls$1 = await provisionServices(api, existingConfig.projectId, environmentId$1, dockerConfig.imageName || "app", services, existingUrls);
|
|
2589
|
+
return {
|
|
2590
|
+
config: {
|
|
2591
|
+
endpoint: existingConfig.endpoint,
|
|
2592
|
+
projectId: existingConfig.projectId,
|
|
2593
|
+
applicationId: existingConfig.applicationId,
|
|
2594
|
+
registry: existingConfig.registry,
|
|
2595
|
+
registryId: storedRegistryId ?? void 0
|
|
2596
|
+
},
|
|
2597
|
+
serviceUrls: serviceUrls$1
|
|
2598
|
+
};
|
|
2599
|
+
} catch {
|
|
2600
|
+
logger$1.log("⚠ Project not found, will recover...");
|
|
2601
|
+
}
|
|
2602
|
+
}
|
|
2603
|
+
logger$1.log("\n📁 Looking for project...");
|
|
2604
|
+
const projectName = dockerConfig.imageName || "app";
|
|
2605
|
+
const projects = await api.listProjects();
|
|
2606
|
+
let project = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
2607
|
+
let environmentId;
|
|
2608
|
+
if (project) {
|
|
2609
|
+
logger$1.log(` Found existing project: ${project.name} (${project.projectId})`);
|
|
2610
|
+
const projectDetails = await api.getProject(project.projectId);
|
|
2611
|
+
const environments = projectDetails.environments ?? [];
|
|
2612
|
+
const matchingEnv = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
2613
|
+
if (matchingEnv) {
|
|
2614
|
+
environmentId = matchingEnv.environmentId;
|
|
2615
|
+
logger$1.log(` Using environment: ${matchingEnv.name}`);
|
|
2616
|
+
} else {
|
|
2617
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2618
|
+
const env = await api.createEnvironment(project.projectId, stage);
|
|
2619
|
+
environmentId = env.environmentId;
|
|
2620
|
+
logger$1.log(` ✓ Created environment: ${stage}`);
|
|
2621
|
+
}
|
|
2622
|
+
} else {
|
|
2623
|
+
logger$1.log(` Creating project: ${projectName}`);
|
|
2624
|
+
const result = await api.createProject(projectName);
|
|
2625
|
+
project = result.project;
|
|
2626
|
+
if (result.environment.name.toLowerCase() !== stage.toLowerCase()) {
|
|
2627
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2628
|
+
const env = await api.createEnvironment(project.projectId, stage);
|
|
2629
|
+
environmentId = env.environmentId;
|
|
2630
|
+
} else environmentId = result.environment.environmentId;
|
|
2631
|
+
logger$1.log(` ✓ Created project: ${project.projectId}`);
|
|
2632
|
+
logger$1.log(` ✓ Using environment: ${stage}`);
|
|
2633
|
+
}
|
|
2634
|
+
logger$1.log("\n📦 Looking for application...");
|
|
2635
|
+
const appName = dockerConfig.imageName || projectName;
|
|
2636
|
+
let applicationId;
|
|
2637
|
+
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId) {
|
|
2638
|
+
applicationId = existingConfig.applicationId;
|
|
2639
|
+
logger$1.log(` Using application from config: ${applicationId}`);
|
|
2640
|
+
} else {
|
|
2641
|
+
logger$1.log(` Creating application: ${appName}`);
|
|
2642
|
+
const app = await api.createApplication(appName, project.projectId, environmentId);
|
|
2643
|
+
applicationId = app.applicationId;
|
|
2644
|
+
logger$1.log(` ✓ Created application: ${applicationId}`);
|
|
2645
|
+
}
|
|
2646
|
+
logger$1.log("\n🐳 Checking registry...");
|
|
2647
|
+
let registryId = await getDokployRegistryId();
|
|
2648
|
+
if (registryId) try {
|
|
2649
|
+
const registry = await api.getRegistry(registryId);
|
|
2650
|
+
logger$1.log(` Using registry: ${registry.registryName}`);
|
|
2651
|
+
} catch {
|
|
2652
|
+
logger$1.log(" ⚠ Stored registry not found, clearing...");
|
|
2653
|
+
registryId = void 0;
|
|
2654
|
+
await storeDokployRegistryId("");
|
|
2655
|
+
}
|
|
2656
|
+
if (!registryId) {
|
|
2657
|
+
const registries = await api.listRegistries();
|
|
2658
|
+
if (registries.length === 0) if (dockerConfig.registry) {
|
|
2659
|
+
logger$1.log(" No registries found in Dokploy. Let's create one.");
|
|
2660
|
+
logger$1.log(` Registry URL: ${dockerConfig.registry}`);
|
|
2661
|
+
const username = await prompt("Registry username: ");
|
|
2662
|
+
const password = await prompt("Registry password/token: ", true);
|
|
2663
|
+
const registry = await api.createRegistry("Default Registry", dockerConfig.registry, username, password);
|
|
2664
|
+
registryId = registry.registryId;
|
|
2665
|
+
await storeDokployRegistryId(registryId);
|
|
2666
|
+
logger$1.log(` ✓ Registry created: ${registryId}`);
|
|
2667
|
+
} else logger$1.log(" ⚠ No registry configured. Set docker.registry in gkm.config.ts");
|
|
2668
|
+
else {
|
|
2669
|
+
logger$1.log(" Available registries:");
|
|
2670
|
+
registries.forEach((reg, i) => {
|
|
2671
|
+
logger$1.log(` ${i + 1}. ${reg.registryName} (${reg.registryUrl})`);
|
|
2672
|
+
});
|
|
2673
|
+
if (dockerConfig.registry) logger$1.log(` ${registries.length + 1}. Create new registry`);
|
|
2674
|
+
const maxOption = dockerConfig.registry ? registries.length + 1 : registries.length;
|
|
2675
|
+
const selection = await prompt(` Select registry (1-${maxOption}): `);
|
|
2676
|
+
const index = parseInt(selection, 10) - 1;
|
|
2677
|
+
if (index >= 0 && index < registries.length) {
|
|
2678
|
+
registryId = registries[index].registryId;
|
|
2679
|
+
await storeDokployRegistryId(registryId);
|
|
2680
|
+
logger$1.log(` ✓ Selected: ${registries[index].registryName}`);
|
|
2681
|
+
} else if (dockerConfig.registry && index === registries.length) {
|
|
2682
|
+
logger$1.log(`\n Creating new registry...`);
|
|
2683
|
+
logger$1.log(` Registry URL: ${dockerConfig.registry}`);
|
|
2684
|
+
const username = await prompt(" Registry username: ");
|
|
2685
|
+
const password = await prompt(" Registry password/token: ", true);
|
|
2686
|
+
const registry = await api.createRegistry(dockerConfig.registry.replace(/^https?:\/\//, ""), dockerConfig.registry, username, password);
|
|
2687
|
+
registryId = registry.registryId;
|
|
2688
|
+
await storeDokployRegistryId(registryId);
|
|
2689
|
+
logger$1.log(` ✓ Registry created: ${registryId}`);
|
|
2690
|
+
} else logger$1.log(" ⚠ Invalid selection, skipping registry setup");
|
|
2691
|
+
}
|
|
2692
|
+
}
|
|
2693
|
+
const dokployConfig = {
|
|
2694
|
+
endpoint: creds.endpoint,
|
|
2695
|
+
projectId: project.projectId,
|
|
2696
|
+
applicationId,
|
|
2697
|
+
registryId: registryId ?? void 0
|
|
2388
2698
|
};
|
|
2389
|
-
|
|
2390
|
-
|
|
2391
|
-
|
|
2392
|
-
|
|
2393
|
-
logger$1.log(
|
|
2394
|
-
const
|
|
2395
|
-
|
|
2396
|
-
|
|
2397
|
-
|
|
2398
|
-
const entrypoint = generateDockerEntrypoint();
|
|
2399
|
-
const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
|
|
2400
|
-
await writeFile(entrypointPath, entrypoint);
|
|
2401
|
-
logger$1.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2402
|
-
const result = {
|
|
2403
|
-
dockerfile: dockerfilePath,
|
|
2404
|
-
dockerCompose: composePath,
|
|
2405
|
-
dockerignore: dockerignorePath,
|
|
2406
|
-
entrypoint: entrypointPath
|
|
2699
|
+
await updateConfig(dokployConfig);
|
|
2700
|
+
logger$1.log("\n✅ Dokploy setup complete!");
|
|
2701
|
+
logger$1.log(` Project: ${project.projectId}`);
|
|
2702
|
+
logger$1.log(` Application: ${applicationId}`);
|
|
2703
|
+
if (registryId) logger$1.log(` Registry: ${registryId}`);
|
|
2704
|
+
const serviceUrls = await provisionServices(api, project.projectId, environmentId, dockerConfig.imageName || "app", services, existingUrls);
|
|
2705
|
+
return {
|
|
2706
|
+
config: dokployConfig,
|
|
2707
|
+
serviceUrls
|
|
2407
2708
|
};
|
|
2408
|
-
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2409
|
-
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2410
|
-
return result;
|
|
2411
2709
|
}
|
|
2412
2710
|
/**
|
|
2413
|
-
*
|
|
2414
|
-
* Uses BuildKit for cache mount support
|
|
2711
|
+
* Generate image tag from stage and timestamp
|
|
2415
2712
|
*/
|
|
2416
|
-
|
|
2417
|
-
const
|
|
2418
|
-
|
|
2419
|
-
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2420
|
-
logger$1.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2421
|
-
try {
|
|
2422
|
-
execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2423
|
-
cwd: process.cwd(),
|
|
2424
|
-
stdio: "inherit",
|
|
2425
|
-
env: {
|
|
2426
|
-
...process.env,
|
|
2427
|
-
DOCKER_BUILDKIT: "1"
|
|
2428
|
-
}
|
|
2429
|
-
});
|
|
2430
|
-
logger$1.log(`✅ Docker image built: ${fullImageName}`);
|
|
2431
|
-
} catch (error) {
|
|
2432
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2433
|
-
}
|
|
2713
|
+
function generateTag(stage) {
|
|
2714
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
2715
|
+
return `${stage}-${timestamp}`;
|
|
2434
2716
|
}
|
|
2435
2717
|
/**
|
|
2436
|
-
*
|
|
2718
|
+
* Main deploy command
|
|
2437
2719
|
*/
|
|
2438
|
-
async function
|
|
2439
|
-
const tag = options
|
|
2440
|
-
|
|
2441
|
-
|
|
2442
|
-
const
|
|
2443
|
-
|
|
2444
|
-
|
|
2445
|
-
|
|
2446
|
-
|
|
2447
|
-
|
|
2720
|
+
async function deployCommand(options) {
|
|
2721
|
+
const { provider, stage, tag, skipPush, skipBuild } = options;
|
|
2722
|
+
logger$1.log(`\n🚀 Deploying to ${provider}...`);
|
|
2723
|
+
logger$1.log(` Stage: ${stage}`);
|
|
2724
|
+
const config$1 = await loadConfig();
|
|
2725
|
+
const imageTag = tag ?? generateTag(stage);
|
|
2726
|
+
logger$1.log(` Tag: ${imageTag}`);
|
|
2727
|
+
const dockerConfig = resolveDockerConfig(config$1);
|
|
2728
|
+
const imageName = dockerConfig.imageName ?? "app";
|
|
2729
|
+
const registry = dockerConfig.registry;
|
|
2730
|
+
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
2731
|
+
let dokployConfig;
|
|
2732
|
+
let finalRegistry = registry;
|
|
2733
|
+
if (provider === "dokploy") {
|
|
2734
|
+
const composeServices = config$1.docker?.compose?.services;
|
|
2735
|
+
logger$1.log(`\n🔍 Docker compose config: ${JSON.stringify(config$1.docker?.compose)}`);
|
|
2736
|
+
const dockerServices = composeServices ? Array.isArray(composeServices) ? {
|
|
2737
|
+
postgres: composeServices.includes("postgres"),
|
|
2738
|
+
redis: composeServices.includes("redis"),
|
|
2739
|
+
rabbitmq: composeServices.includes("rabbitmq")
|
|
2740
|
+
} : {
|
|
2741
|
+
postgres: Boolean(composeServices.postgres),
|
|
2742
|
+
redis: Boolean(composeServices.redis),
|
|
2743
|
+
rabbitmq: Boolean(composeServices.rabbitmq)
|
|
2744
|
+
} : void 0;
|
|
2745
|
+
const setupResult = await ensureDokploySetup(config$1, dockerConfig, stage, dockerServices);
|
|
2746
|
+
dokployConfig = setupResult.config;
|
|
2747
|
+
finalRegistry = dokployConfig.registry ?? dockerConfig.registry;
|
|
2748
|
+
if (setupResult.serviceUrls) {
|
|
2749
|
+
const { readStageSecrets: readStageSecrets$1, writeStageSecrets: writeStageSecrets$1, initStageSecrets } = await import("./storage-nkGIjeXt.mjs");
|
|
2750
|
+
let secrets = await readStageSecrets$1(stage);
|
|
2751
|
+
if (!secrets) {
|
|
2752
|
+
logger$1.log(` Creating secrets file for stage "${stage}"...`);
|
|
2753
|
+
secrets = initStageSecrets(stage);
|
|
2754
|
+
}
|
|
2755
|
+
let updated = false;
|
|
2756
|
+
for (const [key, value] of Object.entries(setupResult.serviceUrls)) {
|
|
2757
|
+
const urlKey = key;
|
|
2758
|
+
if (value && !secrets.urls[urlKey] && !secrets.custom[key]) {
|
|
2759
|
+
secrets.urls[urlKey] = value;
|
|
2760
|
+
logger$1.log(` Saved ${key} to secrets`);
|
|
2761
|
+
updated = true;
|
|
2762
|
+
}
|
|
2763
|
+
}
|
|
2764
|
+
if (updated) await writeStageSecrets$1(secrets);
|
|
2765
|
+
}
|
|
2766
|
+
}
|
|
2767
|
+
let masterKey;
|
|
2768
|
+
if (!skipBuild) {
|
|
2769
|
+
logger$1.log(`\n📦 Building for production...`);
|
|
2770
|
+
const buildResult = await buildCommand({
|
|
2771
|
+
provider: "server",
|
|
2772
|
+
production: true,
|
|
2773
|
+
stage
|
|
2448
2774
|
});
|
|
2449
|
-
|
|
2450
|
-
}
|
|
2451
|
-
|
|
2775
|
+
masterKey = buildResult.masterKey;
|
|
2776
|
+
} else logger$1.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
2777
|
+
let result;
|
|
2778
|
+
switch (provider) {
|
|
2779
|
+
case "docker": {
|
|
2780
|
+
result = await deployDocker({
|
|
2781
|
+
stage,
|
|
2782
|
+
tag: imageTag,
|
|
2783
|
+
skipPush,
|
|
2784
|
+
masterKey,
|
|
2785
|
+
config: dockerConfig
|
|
2786
|
+
});
|
|
2787
|
+
break;
|
|
2788
|
+
}
|
|
2789
|
+
case "dokploy": {
|
|
2790
|
+
if (!dokployConfig) throw new Error("Dokploy config not initialized");
|
|
2791
|
+
const finalImageRef = finalRegistry ? `${finalRegistry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
2792
|
+
await deployDocker({
|
|
2793
|
+
stage,
|
|
2794
|
+
tag: imageTag,
|
|
2795
|
+
skipPush: false,
|
|
2796
|
+
masterKey,
|
|
2797
|
+
config: {
|
|
2798
|
+
registry: finalRegistry,
|
|
2799
|
+
imageName: dockerConfig.imageName
|
|
2800
|
+
}
|
|
2801
|
+
});
|
|
2802
|
+
result = await deployDokploy({
|
|
2803
|
+
stage,
|
|
2804
|
+
tag: imageTag,
|
|
2805
|
+
imageRef: finalImageRef,
|
|
2806
|
+
masterKey,
|
|
2807
|
+
config: dokployConfig
|
|
2808
|
+
});
|
|
2809
|
+
break;
|
|
2810
|
+
}
|
|
2811
|
+
case "aws-lambda": {
|
|
2812
|
+
logger$1.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
2813
|
+
logger$1.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
2814
|
+
result = {
|
|
2815
|
+
imageRef,
|
|
2816
|
+
masterKey
|
|
2817
|
+
};
|
|
2818
|
+
break;
|
|
2819
|
+
}
|
|
2820
|
+
default: throw new Error(`Unknown deploy provider: ${provider}\nSupported providers: docker, dokploy, aws-lambda`);
|
|
2452
2821
|
}
|
|
2822
|
+
logger$1.log("\n✅ Deployment complete!");
|
|
2823
|
+
return result;
|
|
2453
2824
|
}
|
|
2454
2825
|
|
|
2455
2826
|
//#endregion
|
|
@@ -4080,11 +4451,11 @@ async function initCommand(projectName, options = {}) {
|
|
|
4080
4451
|
};
|
|
4081
4452
|
const targetDir = join(cwd, name$1);
|
|
4082
4453
|
const template = getTemplate(templateOptions.template);
|
|
4083
|
-
const isMonorepo = templateOptions.monorepo;
|
|
4454
|
+
const isMonorepo$1 = templateOptions.monorepo;
|
|
4084
4455
|
const apiPath = templateOptions.apiPath;
|
|
4085
4456
|
await mkdir(targetDir, { recursive: true });
|
|
4086
|
-
const appDir = isMonorepo ? join(targetDir, apiPath) : targetDir;
|
|
4087
|
-
if (isMonorepo) await mkdir(appDir, { recursive: true });
|
|
4457
|
+
const appDir = isMonorepo$1 ? join(targetDir, apiPath) : targetDir;
|
|
4458
|
+
if (isMonorepo$1) await mkdir(appDir, { recursive: true });
|
|
4088
4459
|
const appFiles = [
|
|
4089
4460
|
...generatePackageJson(templateOptions, template),
|
|
4090
4461
|
...generateConfigFiles(templateOptions, template),
|
|
@@ -4100,7 +4471,7 @@ async function initCommand(projectName, options = {}) {
|
|
|
4100
4471
|
}
|
|
4101
4472
|
for (const { path, content } of appFiles) {
|
|
4102
4473
|
const fullPath = join(appDir, path);
|
|
4103
|
-
const _displayPath = isMonorepo ? `${apiPath}/${path}` : path;
|
|
4474
|
+
const _displayPath = isMonorepo$1 ? `${apiPath}/${path}` : path;
|
|
4104
4475
|
await mkdir(dirname(fullPath), { recursive: true });
|
|
4105
4476
|
await writeFile(fullPath, content);
|
|
4106
4477
|
}
|
|
@@ -4433,7 +4804,8 @@ program.command("init").description("Scaffold a new project").argument("[name]",
|
|
|
4433
4804
|
const globalOptions = program.opts();
|
|
4434
4805
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4435
4806
|
await initCommand(name$1, options);
|
|
4436
|
-
} catch (
|
|
4807
|
+
} catch (error) {
|
|
4808
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4437
4809
|
process.exit(1);
|
|
4438
4810
|
}
|
|
4439
4811
|
});
|
|
@@ -4465,7 +4837,8 @@ program.command("build").description("Build handlers from endpoints, functions,
|
|
|
4465
4837
|
skipBundle: options.skipBundle || false,
|
|
4466
4838
|
stage: options.stage
|
|
4467
4839
|
});
|
|
4468
|
-
} catch (
|
|
4840
|
+
} catch (error) {
|
|
4841
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4469
4842
|
process.exit(1);
|
|
4470
4843
|
}
|
|
4471
4844
|
});
|
|
@@ -4478,7 +4851,8 @@ program.command("dev").description("Start development server with automatic relo
|
|
|
4478
4851
|
portExplicit: !!options.port,
|
|
4479
4852
|
enableOpenApi: options.enableOpenapi ?? true
|
|
4480
4853
|
});
|
|
4481
|
-
} catch (
|
|
4854
|
+
} catch (error) {
|
|
4855
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4482
4856
|
process.exit(1);
|
|
4483
4857
|
}
|
|
4484
4858
|
});
|
|
@@ -4502,7 +4876,8 @@ program.command("openapi").description("Generate OpenAPI specification from endp
|
|
|
4502
4876
|
const globalOptions = program.opts();
|
|
4503
4877
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4504
4878
|
await openapiCommand({});
|
|
4505
|
-
} catch (
|
|
4879
|
+
} catch (error) {
|
|
4880
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4506
4881
|
process.exit(1);
|
|
4507
4882
|
}
|
|
4508
4883
|
});
|
|
@@ -4511,7 +4886,8 @@ program.command("generate:react-query").description("Generate React Query hooks
|
|
|
4511
4886
|
const globalOptions = program.opts();
|
|
4512
4887
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4513
4888
|
await generateReactQueryCommand(options);
|
|
4514
|
-
} catch (
|
|
4889
|
+
} catch (error) {
|
|
4890
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4515
4891
|
process.exit(1);
|
|
4516
4892
|
}
|
|
4517
4893
|
});
|
|
@@ -4520,7 +4896,8 @@ program.command("docker").description("Generate Docker deployment files").option
|
|
|
4520
4896
|
const globalOptions = program.opts();
|
|
4521
4897
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4522
4898
|
await dockerCommand(options);
|
|
4523
|
-
} catch (
|
|
4899
|
+
} catch (error) {
|
|
4900
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4524
4901
|
process.exit(1);
|
|
4525
4902
|
}
|
|
4526
4903
|
});
|
|
@@ -4548,7 +4925,8 @@ program.command("prepack").description("Generate Docker files for production dep
|
|
|
4548
4925
|
const registry = options.registry;
|
|
4549
4926
|
const _imageRef = registry ? `${registry}/api:${tag}` : `api:${tag}`;
|
|
4550
4927
|
}
|
|
4551
|
-
} catch (
|
|
4928
|
+
} catch (error) {
|
|
4929
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4552
4930
|
process.exit(1);
|
|
4553
4931
|
}
|
|
4554
4932
|
});
|
|
@@ -4557,7 +4935,8 @@ program.command("secrets:init").description("Initialize secrets for a deployment
|
|
|
4557
4935
|
const globalOptions = program.opts();
|
|
4558
4936
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4559
4937
|
await secretsInitCommand(options);
|
|
4560
|
-
} catch (
|
|
4938
|
+
} catch (error) {
|
|
4939
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4561
4940
|
process.exit(1);
|
|
4562
4941
|
}
|
|
4563
4942
|
});
|
|
@@ -4566,7 +4945,8 @@ program.command("secrets:set").description("Set a custom secret for a stage").ar
|
|
|
4566
4945
|
const globalOptions = program.opts();
|
|
4567
4946
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4568
4947
|
await secretsSetCommand(key, value, options);
|
|
4569
|
-
} catch (
|
|
4948
|
+
} catch (error) {
|
|
4949
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4570
4950
|
process.exit(1);
|
|
4571
4951
|
}
|
|
4572
4952
|
});
|
|
@@ -4575,7 +4955,8 @@ program.command("secrets:show").description("Show secrets for a stage").required
|
|
|
4575
4955
|
const globalOptions = program.opts();
|
|
4576
4956
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4577
4957
|
await secretsShowCommand(options);
|
|
4578
|
-
} catch (
|
|
4958
|
+
} catch (error) {
|
|
4959
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4579
4960
|
process.exit(1);
|
|
4580
4961
|
}
|
|
4581
4962
|
});
|
|
@@ -4584,7 +4965,8 @@ program.command("secrets:rotate").description("Rotate service passwords").requir
|
|
|
4584
4965
|
const globalOptions = program.opts();
|
|
4585
4966
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4586
4967
|
await secretsRotateCommand(options);
|
|
4587
|
-
} catch (
|
|
4968
|
+
} catch (error) {
|
|
4969
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4588
4970
|
process.exit(1);
|
|
4589
4971
|
}
|
|
4590
4972
|
});
|
|
@@ -4593,7 +4975,8 @@ program.command("secrets:import").description("Import secrets from a JSON file")
|
|
|
4593
4975
|
const globalOptions = program.opts();
|
|
4594
4976
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4595
4977
|
await secretsImportCommand(file, options);
|
|
4596
|
-
} catch (
|
|
4978
|
+
} catch (error) {
|
|
4979
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4597
4980
|
process.exit(1);
|
|
4598
4981
|
}
|
|
4599
4982
|
});
|
|
@@ -4617,7 +5000,8 @@ program.command("deploy").description("Deploy application to a provider").requir
|
|
|
4617
5000
|
skipPush: options.skipPush,
|
|
4618
5001
|
skipBuild: options.skipBuild
|
|
4619
5002
|
});
|
|
4620
|
-
} catch (
|
|
5003
|
+
} catch (error) {
|
|
5004
|
+
console.error(error instanceof Error ? error.message : "Deploy failed");
|
|
4621
5005
|
process.exit(1);
|
|
4622
5006
|
}
|
|
4623
5007
|
});
|