@geekmidas/cli 0.12.0 → 0.14.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bundler-BjholBlA.cjs +131 -0
- package/dist/bundler-BjholBlA.cjs.map +1 -0
- package/dist/bundler-DWctKN1z.mjs +130 -0
- package/dist/bundler-DWctKN1z.mjs.map +1 -0
- package/dist/config.d.cts +1 -1
- package/dist/config.d.mts +1 -1
- package/dist/dokploy-api-B7KxOQr3.cjs +3 -0
- package/dist/dokploy-api-C7F9VykY.cjs +317 -0
- package/dist/dokploy-api-C7F9VykY.cjs.map +1 -0
- package/dist/dokploy-api-CaETb2L6.mjs +305 -0
- package/dist/dokploy-api-CaETb2L6.mjs.map +1 -0
- package/dist/dokploy-api-DHvfmWbi.mjs +3 -0
- package/dist/{encryption-Dyf_r1h-.cjs → encryption-D7Efcdi9.cjs} +1 -1
- package/dist/{encryption-Dyf_r1h-.cjs.map → encryption-D7Efcdi9.cjs.map} +1 -1
- package/dist/{encryption-C8H-38Yy.mjs → encryption-h4Nb6W-M.mjs} +1 -1
- package/dist/{encryption-C8H-38Yy.mjs.map → encryption-h4Nb6W-M.mjs.map} +1 -1
- package/dist/index.cjs +1520 -1136
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +1520 -1136
- package/dist/index.mjs.map +1 -1
- package/dist/{openapi-Bt_1FDpT.cjs → openapi-C89hhkZC.cjs} +3 -3
- package/dist/{openapi-Bt_1FDpT.cjs.map → openapi-C89hhkZC.cjs.map} +1 -1
- package/dist/{openapi-BfFlOBCG.mjs → openapi-CZVcfxk-.mjs} +3 -3
- package/dist/{openapi-BfFlOBCG.mjs.map → openapi-CZVcfxk-.mjs.map} +1 -1
- package/dist/{openapi-react-query-B6XTeGqS.mjs → openapi-react-query-CM2_qlW9.mjs} +1 -1
- package/dist/{openapi-react-query-B6XTeGqS.mjs.map → openapi-react-query-CM2_qlW9.mjs.map} +1 -1
- package/dist/{openapi-react-query-B-sNWHFU.cjs → openapi-react-query-iKjfLzff.cjs} +1 -1
- package/dist/{openapi-react-query-B-sNWHFU.cjs.map → openapi-react-query-iKjfLzff.cjs.map} +1 -1
- package/dist/openapi-react-query.cjs +1 -1
- package/dist/openapi-react-query.mjs +1 -1
- package/dist/openapi.cjs +1 -1
- package/dist/openapi.d.cts +1 -1
- package/dist/openapi.d.mts +1 -1
- package/dist/openapi.mjs +1 -1
- package/dist/{storage-C9PU_30f.mjs → storage-BaOP55oq.mjs} +48 -2
- package/dist/storage-BaOP55oq.mjs.map +1 -0
- package/dist/{storage-BXoJvmv2.cjs → storage-Bn3K9Ccu.cjs} +59 -1
- package/dist/storage-Bn3K9Ccu.cjs.map +1 -0
- package/dist/storage-UfyTn7Zm.cjs +7 -0
- package/dist/storage-nkGIjeXt.mjs +3 -0
- package/dist/{types-BR0M2v_c.d.mts → types-BgaMXsUa.d.cts} +3 -1
- package/dist/{types-BR0M2v_c.d.mts.map → types-BgaMXsUa.d.cts.map} +1 -1
- package/dist/{types-BhkZc-vm.d.cts → types-iFk5ms7y.d.mts} +3 -1
- package/dist/{types-BhkZc-vm.d.cts.map → types-iFk5ms7y.d.mts.map} +1 -1
- package/package.json +4 -4
- package/src/auth/__tests__/credentials.spec.ts +127 -0
- package/src/auth/__tests__/index.spec.ts +69 -0
- package/src/auth/credentials.ts +33 -0
- package/src/auth/index.ts +57 -50
- package/src/build/__tests__/bundler.spec.ts +444 -0
- package/src/build/__tests__/endpoint-analyzer.spec.ts +623 -0
- package/src/build/__tests__/handler-templates.spec.ts +272 -0
- package/src/build/bundler.ts +126 -8
- package/src/build/index.ts +31 -0
- package/src/build/types.ts +6 -0
- package/src/deploy/__tests__/dokploy-api.spec.ts +698 -0
- package/src/deploy/__tests__/dokploy.spec.ts +196 -6
- package/src/deploy/__tests__/index.spec.ts +339 -0
- package/src/deploy/__tests__/init.spec.ts +147 -16
- package/src/deploy/docker.ts +32 -3
- package/src/deploy/dokploy-api.ts +581 -0
- package/src/deploy/dokploy.ts +66 -93
- package/src/deploy/index.ts +587 -32
- package/src/deploy/init.ts +192 -249
- package/src/deploy/types.ts +19 -1
- package/src/dev/__tests__/index.spec.ts +95 -0
- package/src/docker/__tests__/templates.spec.ts +144 -0
- package/src/docker/index.ts +96 -6
- package/src/docker/templates.ts +114 -27
- package/src/generators/EndpointGenerator.ts +2 -2
- package/src/index.ts +34 -13
- package/src/secrets/__tests__/storage.spec.ts +208 -0
- package/src/secrets/storage.ts +73 -0
- package/src/types.ts +2 -0
- package/dist/bundler-DRXCw_YR.mjs +0 -70
- package/dist/bundler-DRXCw_YR.mjs.map +0 -1
- package/dist/bundler-WsEvH_b2.cjs +0 -71
- package/dist/bundler-WsEvH_b2.cjs.map +0 -1
- package/dist/storage-BUYQJgz7.cjs +0 -4
- package/dist/storage-BXoJvmv2.cjs.map +0 -1
- package/dist/storage-C9PU_30f.mjs.map +0 -1
- package/dist/storage-DLJAYxzJ.mjs +0 -3
package/dist/index.cjs
CHANGED
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
#!/usr/bin/env -S npx tsx
|
|
2
2
|
const require_chunk = require('./chunk-CUT6urMc.cjs');
|
|
3
3
|
const require_config = require('./config-AmInkU7k.cjs');
|
|
4
|
-
const require_openapi = require('./openapi-
|
|
5
|
-
const
|
|
6
|
-
const
|
|
4
|
+
const require_openapi = require('./openapi-C89hhkZC.cjs');
|
|
5
|
+
const require_dokploy_api = require('./dokploy-api-C7F9VykY.cjs');
|
|
6
|
+
const require_openapi_react_query = require('./openapi-react-query-iKjfLzff.cjs');
|
|
7
|
+
const require_storage = require('./storage-Bn3K9Ccu.cjs');
|
|
7
8
|
const node_fs = require_chunk.__toESM(require("node:fs"));
|
|
8
9
|
const node_path = require_chunk.__toESM(require("node:path"));
|
|
9
10
|
const commander = require_chunk.__toESM(require("commander"));
|
|
@@ -24,7 +25,7 @@ const node_crypto = require_chunk.__toESM(require("node:crypto"));
|
|
|
24
25
|
|
|
25
26
|
//#region package.json
|
|
26
27
|
var name = "@geekmidas/cli";
|
|
27
|
-
var version = "0.
|
|
28
|
+
var version = "0.14.0";
|
|
28
29
|
var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
|
|
29
30
|
var private$1 = false;
|
|
30
31
|
var type = "module";
|
|
@@ -170,7 +171,8 @@ async function getDokployCredentials(options) {
|
|
|
170
171
|
if (!credentials.dokploy) return null;
|
|
171
172
|
return {
|
|
172
173
|
token: credentials.dokploy.token,
|
|
173
|
-
endpoint: credentials.dokploy.endpoint
|
|
174
|
+
endpoint: credentials.dokploy.endpoint,
|
|
175
|
+
registryId: credentials.dokploy.registryId
|
|
174
176
|
};
|
|
175
177
|
}
|
|
176
178
|
/**
|
|
@@ -193,6 +195,22 @@ async function getDokployToken(options) {
|
|
|
193
195
|
if (stored) return stored.token;
|
|
194
196
|
return null;
|
|
195
197
|
}
|
|
198
|
+
/**
|
|
199
|
+
* Store Dokploy registry ID
|
|
200
|
+
*/
|
|
201
|
+
async function storeDokployRegistryId(registryId, options) {
|
|
202
|
+
const credentials = await readCredentials(options);
|
|
203
|
+
if (!credentials.dokploy) throw new Error("Dokploy credentials not found. Run \"gkm login --service dokploy\" first.");
|
|
204
|
+
credentials.dokploy.registryId = registryId;
|
|
205
|
+
await writeCredentials(credentials, options);
|
|
206
|
+
}
|
|
207
|
+
/**
|
|
208
|
+
* Get Dokploy registry ID from stored credentials
|
|
209
|
+
*/
|
|
210
|
+
async function getDokployRegistryId(options) {
|
|
211
|
+
const stored = await getDokployCredentials(options);
|
|
212
|
+
return stored?.registryId ?? void 0;
|
|
213
|
+
}
|
|
196
214
|
|
|
197
215
|
//#endregion
|
|
198
216
|
//#region src/auth/index.ts
|
|
@@ -201,52 +219,61 @@ const logger$9 = console;
|
|
|
201
219
|
* Validate Dokploy token by making a test API call
|
|
202
220
|
*/
|
|
203
221
|
async function validateDokployToken(endpoint, token) {
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
}
|
|
211
|
-
});
|
|
212
|
-
return response.ok;
|
|
213
|
-
} catch {
|
|
214
|
-
return false;
|
|
215
|
-
}
|
|
222
|
+
const { DokployApi: DokployApi$1 } = await Promise.resolve().then(() => require("./dokploy-api-B7KxOQr3.cjs"));
|
|
223
|
+
const api = new DokployApi$1({
|
|
224
|
+
baseUrl: endpoint,
|
|
225
|
+
token
|
|
226
|
+
});
|
|
227
|
+
return api.validateToken();
|
|
216
228
|
}
|
|
217
229
|
/**
|
|
218
230
|
* Prompt for input (handles both TTY and non-TTY)
|
|
219
231
|
*/
|
|
220
|
-
async function prompt(message, hidden = false) {
|
|
232
|
+
async function prompt$1(message, hidden = false) {
|
|
221
233
|
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please provide --token option.");
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
}
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
234
|
+
if (hidden) {
|
|
235
|
+
process.stdout.write(message);
|
|
236
|
+
return new Promise((resolve$1, reject) => {
|
|
237
|
+
let value = "";
|
|
238
|
+
const cleanup = () => {
|
|
239
|
+
process.stdin.setRawMode(false);
|
|
240
|
+
process.stdin.pause();
|
|
241
|
+
process.stdin.removeListener("data", onData);
|
|
242
|
+
process.stdin.removeListener("error", onError);
|
|
243
|
+
};
|
|
244
|
+
const onError = (err) => {
|
|
245
|
+
cleanup();
|
|
246
|
+
reject(err);
|
|
247
|
+
};
|
|
248
|
+
const onData = (char) => {
|
|
249
|
+
const c = char.toString();
|
|
250
|
+
if (c === "\n" || c === "\r") {
|
|
251
|
+
cleanup();
|
|
252
|
+
process.stdout.write("\n");
|
|
253
|
+
resolve$1(value);
|
|
254
|
+
} else if (c === "") {
|
|
255
|
+
cleanup();
|
|
256
|
+
process.stdout.write("\n");
|
|
257
|
+
process.exit(1);
|
|
258
|
+
} else if (c === "" || c === "\b") {
|
|
259
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
260
|
+
} else value += c;
|
|
261
|
+
};
|
|
262
|
+
process.stdin.setRawMode(true);
|
|
263
|
+
process.stdin.resume();
|
|
264
|
+
process.stdin.on("data", onData);
|
|
265
|
+
process.stdin.on("error", onError);
|
|
266
|
+
});
|
|
267
|
+
} else {
|
|
268
|
+
const rl = node_readline_promises.createInterface({
|
|
269
|
+
input: node_process.stdin,
|
|
270
|
+
output: node_process.stdout
|
|
271
|
+
});
|
|
272
|
+
try {
|
|
273
|
+
return await rl.question(message);
|
|
274
|
+
} finally {
|
|
275
|
+
rl.close();
|
|
276
|
+
}
|
|
250
277
|
}
|
|
251
278
|
}
|
|
252
279
|
/**
|
|
@@ -257,7 +284,7 @@ async function loginCommand(options) {
|
|
|
257
284
|
if (service === "dokploy") {
|
|
258
285
|
logger$9.log("\n🔐 Logging in to Dokploy...\n");
|
|
259
286
|
let endpoint = providedEndpoint;
|
|
260
|
-
if (!endpoint) endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
287
|
+
if (!endpoint) endpoint = await prompt$1("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
261
288
|
endpoint = endpoint.replace(/\/$/, "");
|
|
262
289
|
try {
|
|
263
290
|
new URL(endpoint);
|
|
@@ -268,7 +295,7 @@ async function loginCommand(options) {
|
|
|
268
295
|
let token = providedToken;
|
|
269
296
|
if (!token) {
|
|
270
297
|
logger$9.log(`\nGenerate a token at: ${endpoint}/settings/profile\n`);
|
|
271
|
-
token = await prompt("API Token: ", true);
|
|
298
|
+
token = await prompt$1("API Token: ", true);
|
|
272
299
|
}
|
|
273
300
|
if (!token) {
|
|
274
301
|
logger$9.error("Token is required");
|
|
@@ -1057,9 +1084,9 @@ var DevServer = class {
|
|
|
1057
1084
|
}
|
|
1058
1085
|
async createServerEntry() {
|
|
1059
1086
|
const { writeFile: writeFile$8 } = await import("node:fs/promises");
|
|
1060
|
-
const { relative: relative$
|
|
1087
|
+
const { relative: relative$6, dirname: dirname$6 } = await import("node:path");
|
|
1061
1088
|
const serverPath = (0, node_path.join)(process.cwd(), ".gkm", this.provider, "server.ts");
|
|
1062
|
-
const relativeAppPath = relative$
|
|
1089
|
+
const relativeAppPath = relative$6(dirname$6(serverPath), (0, node_path.join)(dirname$6(serverPath), "app.js"));
|
|
1063
1090
|
const serveCode = this.runtime === "bun" ? `Bun.serve({
|
|
1064
1091
|
port,
|
|
1065
1092
|
fetch: app.fetch,
|
|
@@ -1189,6 +1216,16 @@ async function buildCommand(options) {
|
|
|
1189
1216
|
if (studio) logger$6.log(`🗄️ Studio enabled at ${studio.path}`);
|
|
1190
1217
|
const hooks = normalizeHooksConfig(config.hooks);
|
|
1191
1218
|
if (hooks) logger$6.log(`🪝 Server hooks enabled`);
|
|
1219
|
+
const services = config.docker?.compose?.services;
|
|
1220
|
+
const dockerServices = services ? Array.isArray(services) ? {
|
|
1221
|
+
postgres: services.includes("postgres"),
|
|
1222
|
+
redis: services.includes("redis"),
|
|
1223
|
+
rabbitmq: services.includes("rabbitmq")
|
|
1224
|
+
} : {
|
|
1225
|
+
postgres: Boolean(services.postgres),
|
|
1226
|
+
redis: Boolean(services.redis),
|
|
1227
|
+
rabbitmq: Boolean(services.rabbitmq)
|
|
1228
|
+
} : void 0;
|
|
1192
1229
|
const buildContext = {
|
|
1193
1230
|
envParserPath,
|
|
1194
1231
|
envParserImportPattern,
|
|
@@ -1197,7 +1234,8 @@ async function buildCommand(options) {
|
|
|
1197
1234
|
telescope,
|
|
1198
1235
|
studio,
|
|
1199
1236
|
hooks,
|
|
1200
|
-
production
|
|
1237
|
+
production,
|
|
1238
|
+
dockerServices
|
|
1201
1239
|
};
|
|
1202
1240
|
const endpointGenerator = new require_openapi.EndpointGenerator();
|
|
1203
1241
|
const functionGenerator = new FunctionGenerator();
|
|
@@ -1255,14 +1293,23 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1255
1293
|
let masterKey;
|
|
1256
1294
|
if (context.production?.bundle && !skipBundle) {
|
|
1257
1295
|
logger$6.log(`\n📦 Bundling production server...`);
|
|
1258
|
-
const { bundleServer } = await Promise.resolve().then(() => require("./bundler-
|
|
1296
|
+
const { bundleServer } = await Promise.resolve().then(() => require("./bundler-BjholBlA.cjs"));
|
|
1297
|
+
const allConstructs = [
|
|
1298
|
+
...endpoints.map((e) => e.construct),
|
|
1299
|
+
...functions.map((f) => f.construct),
|
|
1300
|
+
...crons.map((c) => c.construct),
|
|
1301
|
+
...subscribers.map((s) => s.construct)
|
|
1302
|
+
];
|
|
1303
|
+
const dockerServices = context.dockerServices;
|
|
1259
1304
|
const bundleResult = await bundleServer({
|
|
1260
1305
|
entryPoint: (0, node_path.join)(outputDir, "server.ts"),
|
|
1261
1306
|
outputDir: (0, node_path.join)(outputDir, "dist"),
|
|
1262
1307
|
minify: context.production.minify,
|
|
1263
1308
|
sourcemap: false,
|
|
1264
1309
|
external: context.production.external,
|
|
1265
|
-
stage
|
|
1310
|
+
stage,
|
|
1311
|
+
constructs: allConstructs,
|
|
1312
|
+
dockerServices
|
|
1266
1313
|
});
|
|
1267
1314
|
masterKey = bundleResult.masterKey;
|
|
1268
1315
|
logger$6.log(`✅ Bundle complete: .gkm/server/dist/server.mjs`);
|
|
@@ -1277,1175 +1324,1499 @@ async function buildForProvider(provider, context, rootOutputDir, endpointGenera
|
|
|
1277
1324
|
}
|
|
1278
1325
|
|
|
1279
1326
|
//#endregion
|
|
1280
|
-
//#region src/
|
|
1281
|
-
|
|
1282
|
-
|
|
1283
|
-
|
|
1284
|
-
|
|
1285
|
-
|
|
1286
|
-
|
|
1287
|
-
|
|
1327
|
+
//#region src/docker/compose.ts
|
|
1328
|
+
/** Default Docker images for services */
|
|
1329
|
+
const DEFAULT_SERVICE_IMAGES = {
|
|
1330
|
+
postgres: "postgres",
|
|
1331
|
+
redis: "redis",
|
|
1332
|
+
rabbitmq: "rabbitmq"
|
|
1333
|
+
};
|
|
1334
|
+
/** Default Docker image versions for services */
|
|
1335
|
+
const DEFAULT_SERVICE_VERSIONS = {
|
|
1336
|
+
postgres: "16-alpine",
|
|
1337
|
+
redis: "7-alpine",
|
|
1338
|
+
rabbitmq: "3-management-alpine"
|
|
1339
|
+
};
|
|
1340
|
+
/** Get the default full image reference for a service */
|
|
1341
|
+
function getDefaultImage(serviceName) {
|
|
1342
|
+
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
1288
1343
|
}
|
|
1289
|
-
/**
|
|
1290
|
-
|
|
1291
|
-
*/
|
|
1292
|
-
|
|
1293
|
-
|
|
1294
|
-
|
|
1295
|
-
|
|
1296
|
-
|
|
1297
|
-
|
|
1298
|
-
|
|
1299
|
-
|
|
1300
|
-
|
|
1344
|
+
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1345
|
+
function normalizeServices(services) {
|
|
1346
|
+
const result = /* @__PURE__ */ new Map();
|
|
1347
|
+
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1348
|
+
else for (const [name$1, config] of Object.entries(services)) {
|
|
1349
|
+
const serviceName = name$1;
|
|
1350
|
+
if (config === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1351
|
+
else if (config && typeof config === "object") {
|
|
1352
|
+
const serviceConfig = config;
|
|
1353
|
+
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1354
|
+
else {
|
|
1355
|
+
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1356
|
+
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
1301
1357
|
}
|
|
1302
|
-
}
|
|
1303
|
-
logger$5.log(`✅ Image built: ${imageRef}`);
|
|
1304
|
-
} catch (error) {
|
|
1305
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1358
|
+
}
|
|
1306
1359
|
}
|
|
1360
|
+
return result;
|
|
1307
1361
|
}
|
|
1308
1362
|
/**
|
|
1309
|
-
*
|
|
1363
|
+
* Generate docker-compose.yml for production deployment
|
|
1310
1364
|
*/
|
|
1311
|
-
|
|
1312
|
-
|
|
1313
|
-
|
|
1314
|
-
|
|
1315
|
-
|
|
1316
|
-
|
|
1317
|
-
|
|
1318
|
-
|
|
1319
|
-
|
|
1320
|
-
|
|
1365
|
+
function generateDockerCompose(options) {
|
|
1366
|
+
const { imageName, registry, port, healthCheckPath, services } = options;
|
|
1367
|
+
const serviceMap = normalizeServices(services);
|
|
1368
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1369
|
+
let yaml = `version: '3.8'
|
|
1370
|
+
|
|
1371
|
+
services:
|
|
1372
|
+
api:
|
|
1373
|
+
build:
|
|
1374
|
+
context: ../..
|
|
1375
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1376
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1377
|
+
container_name: ${imageName}
|
|
1378
|
+
restart: unless-stopped
|
|
1379
|
+
ports:
|
|
1380
|
+
- "\${PORT:-${port}}:${port}"
|
|
1381
|
+
environment:
|
|
1382
|
+
- NODE_ENV=production
|
|
1383
|
+
`;
|
|
1384
|
+
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1385
|
+
`;
|
|
1386
|
+
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1387
|
+
`;
|
|
1388
|
+
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1389
|
+
`;
|
|
1390
|
+
yaml += ` healthcheck:
|
|
1391
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1392
|
+
interval: 30s
|
|
1393
|
+
timeout: 3s
|
|
1394
|
+
retries: 3
|
|
1395
|
+
`;
|
|
1396
|
+
if (serviceMap.size > 0) {
|
|
1397
|
+
yaml += ` depends_on:
|
|
1398
|
+
`;
|
|
1399
|
+
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1400
|
+
condition: service_healthy
|
|
1401
|
+
`;
|
|
1321
1402
|
}
|
|
1403
|
+
yaml += ` networks:
|
|
1404
|
+
- app-network
|
|
1405
|
+
`;
|
|
1406
|
+
const postgresImage = serviceMap.get("postgres");
|
|
1407
|
+
if (postgresImage) yaml += `
|
|
1408
|
+
postgres:
|
|
1409
|
+
image: ${postgresImage}
|
|
1410
|
+
container_name: postgres
|
|
1411
|
+
restart: unless-stopped
|
|
1412
|
+
environment:
|
|
1413
|
+
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
1414
|
+
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1415
|
+
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1416
|
+
volumes:
|
|
1417
|
+
- postgres_data:/var/lib/postgresql/data
|
|
1418
|
+
healthcheck:
|
|
1419
|
+
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1420
|
+
interval: 5s
|
|
1421
|
+
timeout: 5s
|
|
1422
|
+
retries: 5
|
|
1423
|
+
networks:
|
|
1424
|
+
- app-network
|
|
1425
|
+
`;
|
|
1426
|
+
const redisImage = serviceMap.get("redis");
|
|
1427
|
+
if (redisImage) yaml += `
|
|
1428
|
+
redis:
|
|
1429
|
+
image: ${redisImage}
|
|
1430
|
+
container_name: redis
|
|
1431
|
+
restart: unless-stopped
|
|
1432
|
+
volumes:
|
|
1433
|
+
- redis_data:/data
|
|
1434
|
+
healthcheck:
|
|
1435
|
+
test: ["CMD", "redis-cli", "ping"]
|
|
1436
|
+
interval: 5s
|
|
1437
|
+
timeout: 5s
|
|
1438
|
+
retries: 5
|
|
1439
|
+
networks:
|
|
1440
|
+
- app-network
|
|
1441
|
+
`;
|
|
1442
|
+
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1443
|
+
if (rabbitmqImage) yaml += `
|
|
1444
|
+
rabbitmq:
|
|
1445
|
+
image: ${rabbitmqImage}
|
|
1446
|
+
container_name: rabbitmq
|
|
1447
|
+
restart: unless-stopped
|
|
1448
|
+
environment:
|
|
1449
|
+
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1450
|
+
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1451
|
+
ports:
|
|
1452
|
+
- "15672:15672" # Management UI
|
|
1453
|
+
volumes:
|
|
1454
|
+
- rabbitmq_data:/var/lib/rabbitmq
|
|
1455
|
+
healthcheck:
|
|
1456
|
+
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1457
|
+
interval: 10s
|
|
1458
|
+
timeout: 5s
|
|
1459
|
+
retries: 5
|
|
1460
|
+
networks:
|
|
1461
|
+
- app-network
|
|
1462
|
+
`;
|
|
1463
|
+
yaml += `
|
|
1464
|
+
volumes:
|
|
1465
|
+
`;
|
|
1466
|
+
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1467
|
+
`;
|
|
1468
|
+
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1469
|
+
`;
|
|
1470
|
+
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1471
|
+
`;
|
|
1472
|
+
yaml += `
|
|
1473
|
+
networks:
|
|
1474
|
+
app-network:
|
|
1475
|
+
driver: bridge
|
|
1476
|
+
`;
|
|
1477
|
+
return yaml;
|
|
1322
1478
|
}
|
|
1323
1479
|
/**
|
|
1324
|
-
*
|
|
1480
|
+
* Generate a minimal docker-compose.yml for API only
|
|
1325
1481
|
*/
|
|
1326
|
-
|
|
1327
|
-
const {
|
|
1328
|
-
const
|
|
1329
|
-
|
|
1330
|
-
|
|
1331
|
-
|
|
1332
|
-
|
|
1333
|
-
|
|
1334
|
-
|
|
1335
|
-
|
|
1336
|
-
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
|
|
1340
|
-
|
|
1341
|
-
|
|
1342
|
-
|
|
1343
|
-
|
|
1344
|
-
|
|
1345
|
-
|
|
1346
|
-
|
|
1482
|
+
function generateMinimalDockerCompose(options) {
|
|
1483
|
+
const { imageName, registry, port, healthCheckPath } = options;
|
|
1484
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
1485
|
+
return `version: '3.8'
|
|
1486
|
+
|
|
1487
|
+
services:
|
|
1488
|
+
api:
|
|
1489
|
+
build:
|
|
1490
|
+
context: ../..
|
|
1491
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
1492
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
1493
|
+
container_name: ${imageName}
|
|
1494
|
+
restart: unless-stopped
|
|
1495
|
+
ports:
|
|
1496
|
+
- "\${PORT:-${port}}:${port}"
|
|
1497
|
+
environment:
|
|
1498
|
+
- NODE_ENV=production
|
|
1499
|
+
healthcheck:
|
|
1500
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1501
|
+
interval: 30s
|
|
1502
|
+
timeout: 3s
|
|
1503
|
+
retries: 3
|
|
1504
|
+
networks:
|
|
1505
|
+
- app-network
|
|
1506
|
+
|
|
1507
|
+
networks:
|
|
1508
|
+
app-network:
|
|
1509
|
+
driver: bridge
|
|
1510
|
+
`;
|
|
1347
1511
|
}
|
|
1512
|
+
|
|
1513
|
+
//#endregion
|
|
1514
|
+
//#region src/docker/templates.ts
|
|
1515
|
+
const LOCKFILES = [
|
|
1516
|
+
["pnpm-lock.yaml", "pnpm"],
|
|
1517
|
+
["bun.lockb", "bun"],
|
|
1518
|
+
["yarn.lock", "yarn"],
|
|
1519
|
+
["package-lock.json", "npm"]
|
|
1520
|
+
];
|
|
1348
1521
|
/**
|
|
1349
|
-
*
|
|
1350
|
-
|
|
1351
|
-
function resolveDockerConfig$1(config) {
|
|
1352
|
-
return {
|
|
1353
|
-
registry: config.docker?.registry,
|
|
1354
|
-
imageName: config.docker?.imageName
|
|
1355
|
-
};
|
|
1356
|
-
}
|
|
1357
|
-
|
|
1358
|
-
//#endregion
|
|
1359
|
-
//#region src/deploy/dokploy.ts
|
|
1360
|
-
const logger$4 = console;
|
|
1361
|
-
/**
|
|
1362
|
-
* Get the Dokploy API token from stored credentials or environment
|
|
1363
|
-
*/
|
|
1364
|
-
async function getApiToken$1() {
|
|
1365
|
-
const token = await getDokployToken();
|
|
1366
|
-
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
1367
|
-
return token;
|
|
1368
|
-
}
|
|
1369
|
-
/**
|
|
1370
|
-
* Make a request to the Dokploy API
|
|
1522
|
+
* Detect package manager from lockfiles
|
|
1523
|
+
* Walks up the directory tree to find lockfile (for monorepos)
|
|
1371
1524
|
*/
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
const
|
|
1375
|
-
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
Authorization: `Bearer ${token}`
|
|
1379
|
-
},
|
|
1380
|
-
body: JSON.stringify(body)
|
|
1381
|
-
});
|
|
1382
|
-
if (!response.ok) {
|
|
1383
|
-
let errorMessage = `Dokploy API error: ${response.status} ${response.statusText}`;
|
|
1384
|
-
try {
|
|
1385
|
-
const errorBody = await response.json();
|
|
1386
|
-
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1387
|
-
if (errorBody.issues?.length) errorMessage += `\n Issues: ${errorBody.issues.map((i) => i.message).join(", ")}`;
|
|
1388
|
-
} catch {}
|
|
1389
|
-
throw new Error(errorMessage);
|
|
1525
|
+
function detectPackageManager$1(cwd = process.cwd()) {
|
|
1526
|
+
let dir = cwd;
|
|
1527
|
+
const root = (0, node_path.parse)(dir).root;
|
|
1528
|
+
while (dir !== root) {
|
|
1529
|
+
for (const [lockfile, pm] of LOCKFILES) if ((0, node_fs.existsSync)((0, node_path.join)(dir, lockfile))) return pm;
|
|
1530
|
+
dir = (0, node_path.dirname)(dir);
|
|
1390
1531
|
}
|
|
1391
|
-
|
|
1392
|
-
|
|
1393
|
-
/**
|
|
1394
|
-
* Update application environment variables
|
|
1395
|
-
*/
|
|
1396
|
-
async function updateEnvironment(baseUrl, token, applicationId, envVars) {
|
|
1397
|
-
logger$4.log(" Updating environment variables...");
|
|
1398
|
-
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
1399
|
-
await dokployRequest$1("application.update", baseUrl, token, {
|
|
1400
|
-
applicationId,
|
|
1401
|
-
env: envString
|
|
1402
|
-
});
|
|
1403
|
-
logger$4.log(" ✓ Environment variables updated");
|
|
1404
|
-
}
|
|
1405
|
-
/**
|
|
1406
|
-
* Trigger application deployment
|
|
1407
|
-
*/
|
|
1408
|
-
async function triggerDeploy(baseUrl, token, applicationId) {
|
|
1409
|
-
logger$4.log(" Triggering deployment...");
|
|
1410
|
-
await dokployRequest$1("application.deploy", baseUrl, token, { applicationId });
|
|
1411
|
-
logger$4.log(" ✓ Deployment triggered");
|
|
1412
|
-
}
|
|
1413
|
-
/**
|
|
1414
|
-
* Deploy to Dokploy
|
|
1415
|
-
*/
|
|
1416
|
-
async function deployDokploy(options) {
|
|
1417
|
-
const { stage, imageRef, masterKey, config } = options;
|
|
1418
|
-
logger$4.log(`\n🎯 Deploying to Dokploy...`);
|
|
1419
|
-
logger$4.log(` Endpoint: ${config.endpoint}`);
|
|
1420
|
-
logger$4.log(` Application: ${config.applicationId}`);
|
|
1421
|
-
const token = await getApiToken$1();
|
|
1422
|
-
const envVars = {};
|
|
1423
|
-
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
1424
|
-
if (Object.keys(envVars).length > 0) await updateEnvironment(config.endpoint, token, config.applicationId, envVars);
|
|
1425
|
-
await triggerDeploy(config.endpoint, token, config.applicationId);
|
|
1426
|
-
logger$4.log("\n✅ Dokploy deployment initiated!");
|
|
1427
|
-
logger$4.log(`\n📋 Deployment details:`);
|
|
1428
|
-
logger$4.log(` Image: ${imageRef}`);
|
|
1429
|
-
logger$4.log(` Stage: ${stage}`);
|
|
1430
|
-
logger$4.log(` Application ID: ${config.applicationId}`);
|
|
1431
|
-
if (masterKey) logger$4.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
1432
|
-
const deploymentUrl = `${config.endpoint}/project/${config.projectId}`;
|
|
1433
|
-
logger$4.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
1434
|
-
return {
|
|
1435
|
-
imageRef,
|
|
1436
|
-
masterKey,
|
|
1437
|
-
url: deploymentUrl
|
|
1438
|
-
};
|
|
1439
|
-
}
|
|
1440
|
-
/**
|
|
1441
|
-
* Validate Dokploy configuration
|
|
1442
|
-
*/
|
|
1443
|
-
function validateDokployConfig(config) {
|
|
1444
|
-
if (!config) return false;
|
|
1445
|
-
const required = [
|
|
1446
|
-
"endpoint",
|
|
1447
|
-
"projectId",
|
|
1448
|
-
"applicationId"
|
|
1449
|
-
];
|
|
1450
|
-
const missing = required.filter((key) => !config[key]);
|
|
1451
|
-
if (missing.length > 0) throw new Error(`Missing Dokploy configuration: ${missing.join(", ")}\nConfigure in gkm.config.ts:
|
|
1452
|
-
providers: {
|
|
1453
|
-
dokploy: {
|
|
1454
|
-
endpoint: 'https://dokploy.example.com',
|
|
1455
|
-
projectId: 'proj_xxx',
|
|
1456
|
-
applicationId: 'app_xxx',
|
|
1457
|
-
},
|
|
1458
|
-
}`);
|
|
1459
|
-
return true;
|
|
1460
|
-
}
|
|
1461
|
-
|
|
1462
|
-
//#endregion
|
|
1463
|
-
//#region src/deploy/index.ts
|
|
1464
|
-
const logger$3 = console;
|
|
1465
|
-
/**
|
|
1466
|
-
* Generate image tag from stage and timestamp
|
|
1467
|
-
*/
|
|
1468
|
-
function generateTag(stage) {
|
|
1469
|
-
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
1470
|
-
return `${stage}-${timestamp}`;
|
|
1532
|
+
for (const [lockfile, pm] of LOCKFILES) if ((0, node_fs.existsSync)((0, node_path.join)(root, lockfile))) return pm;
|
|
1533
|
+
return "pnpm";
|
|
1471
1534
|
}
|
|
1472
1535
|
/**
|
|
1473
|
-
*
|
|
1536
|
+
* Find the lockfile path by walking up the directory tree
|
|
1537
|
+
* Returns the full path to the lockfile, or null if not found
|
|
1474
1538
|
*/
|
|
1475
|
-
|
|
1476
|
-
|
|
1477
|
-
|
|
1478
|
-
|
|
1479
|
-
|
|
1480
|
-
|
|
1481
|
-
|
|
1482
|
-
let masterKey;
|
|
1483
|
-
if (!skipBuild) {
|
|
1484
|
-
logger$3.log(`\n📦 Building for production...`);
|
|
1485
|
-
const buildResult = await buildCommand({
|
|
1486
|
-
provider: "server",
|
|
1487
|
-
production: true,
|
|
1488
|
-
stage
|
|
1489
|
-
});
|
|
1490
|
-
masterKey = buildResult.masterKey;
|
|
1491
|
-
} else logger$3.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
1492
|
-
const dockerConfig = resolveDockerConfig$1(config);
|
|
1493
|
-
const imageName = dockerConfig.imageName ?? "app";
|
|
1494
|
-
const registry = dockerConfig.registry;
|
|
1495
|
-
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
1496
|
-
let result;
|
|
1497
|
-
switch (provider) {
|
|
1498
|
-
case "docker": {
|
|
1499
|
-
result = await deployDocker({
|
|
1500
|
-
stage,
|
|
1501
|
-
tag: imageTag,
|
|
1502
|
-
skipPush,
|
|
1503
|
-
masterKey,
|
|
1504
|
-
config: dockerConfig
|
|
1505
|
-
});
|
|
1506
|
-
break;
|
|
1507
|
-
}
|
|
1508
|
-
case "dokploy": {
|
|
1509
|
-
const dokployConfigRaw = config.providers?.dokploy;
|
|
1510
|
-
if (typeof dokployConfigRaw === "boolean" || !dokployConfigRaw) throw new Error("Dokploy provider requires configuration.\nConfigure in gkm.config.ts:\n providers: {\n dokploy: {\n endpoint: 'https://dokploy.example.com',\n projectId: 'proj_xxx',\n applicationId: 'app_xxx',\n },\n }");
|
|
1511
|
-
validateDokployConfig(dokployConfigRaw);
|
|
1512
|
-
const dokployConfig = dokployConfigRaw;
|
|
1513
|
-
await deployDocker({
|
|
1514
|
-
stage,
|
|
1515
|
-
tag: imageTag,
|
|
1516
|
-
skipPush: false,
|
|
1517
|
-
masterKey,
|
|
1518
|
-
config: {
|
|
1519
|
-
registry: dokployConfig.registry ?? dockerConfig.registry,
|
|
1520
|
-
imageName: dockerConfig.imageName
|
|
1521
|
-
}
|
|
1522
|
-
});
|
|
1523
|
-
result = await deployDokploy({
|
|
1524
|
-
stage,
|
|
1525
|
-
tag: imageTag,
|
|
1526
|
-
imageRef,
|
|
1527
|
-
masterKey,
|
|
1528
|
-
config: dokployConfig
|
|
1529
|
-
});
|
|
1530
|
-
break;
|
|
1531
|
-
}
|
|
1532
|
-
case "aws-lambda": {
|
|
1533
|
-
logger$3.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
1534
|
-
logger$3.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
1535
|
-
result = {
|
|
1536
|
-
imageRef,
|
|
1537
|
-
masterKey
|
|
1538
|
-
};
|
|
1539
|
-
break;
|
|
1539
|
+
function findLockfilePath(cwd = process.cwd()) {
|
|
1540
|
+
let dir = cwd;
|
|
1541
|
+
const root = (0, node_path.parse)(dir).root;
|
|
1542
|
+
while (dir !== root) {
|
|
1543
|
+
for (const [lockfile] of LOCKFILES) {
|
|
1544
|
+
const lockfilePath = (0, node_path.join)(dir, lockfile);
|
|
1545
|
+
if ((0, node_fs.existsSync)(lockfilePath)) return lockfilePath;
|
|
1540
1546
|
}
|
|
1541
|
-
|
|
1547
|
+
dir = (0, node_path.dirname)(dir);
|
|
1542
1548
|
}
|
|
1543
|
-
|
|
1544
|
-
|
|
1545
|
-
|
|
1546
|
-
|
|
1547
|
-
//#endregion
|
|
1548
|
-
//#region src/deploy/init.ts
|
|
1549
|
-
const logger$2 = console;
|
|
1550
|
-
/**
|
|
1551
|
-
* Get the Dokploy API token from stored credentials or environment
|
|
1552
|
-
*/
|
|
1553
|
-
async function getApiToken() {
|
|
1554
|
-
const token = await getDokployToken();
|
|
1555
|
-
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
1556
|
-
return token;
|
|
1557
|
-
}
|
|
1558
|
-
/**
|
|
1559
|
-
* Make a request to the Dokploy API
|
|
1560
|
-
*/
|
|
1561
|
-
async function dokployRequest(method, endpoint, baseUrl, token, body) {
|
|
1562
|
-
const url = `${baseUrl}/api/${endpoint}`;
|
|
1563
|
-
const response = await fetch(url, {
|
|
1564
|
-
method,
|
|
1565
|
-
headers: {
|
|
1566
|
-
"Content-Type": "application/json",
|
|
1567
|
-
Authorization: `Bearer ${token}`
|
|
1568
|
-
},
|
|
1569
|
-
body: body ? JSON.stringify(body) : void 0
|
|
1570
|
-
});
|
|
1571
|
-
if (!response.ok) {
|
|
1572
|
-
let errorMessage = `Dokploy API error: ${response.status} ${response.statusText}`;
|
|
1573
|
-
try {
|
|
1574
|
-
const errorBody = await response.json();
|
|
1575
|
-
if (errorBody.message) errorMessage = `Dokploy API error: ${errorBody.message}`;
|
|
1576
|
-
} catch {}
|
|
1577
|
-
throw new Error(errorMessage);
|
|
1549
|
+
for (const [lockfile] of LOCKFILES) {
|
|
1550
|
+
const lockfilePath = (0, node_path.join)(root, lockfile);
|
|
1551
|
+
if ((0, node_fs.existsSync)(lockfilePath)) return lockfilePath;
|
|
1578
1552
|
}
|
|
1579
|
-
|
|
1580
|
-
if (!text) return {};
|
|
1581
|
-
return JSON.parse(text);
|
|
1582
|
-
}
|
|
1583
|
-
/**
|
|
1584
|
-
* Get all projects from Dokploy
|
|
1585
|
-
*/
|
|
1586
|
-
async function getProjects(baseUrl, token) {
|
|
1587
|
-
return dokployRequest("GET", "project.all", baseUrl, token);
|
|
1588
|
-
}
|
|
1589
|
-
/**
|
|
1590
|
-
* Create a new project in Dokploy
|
|
1591
|
-
*/
|
|
1592
|
-
async function createProject(baseUrl, token, name$1, description$1) {
|
|
1593
|
-
return dokployRequest("POST", "project.create", baseUrl, token, {
|
|
1594
|
-
name: name$1,
|
|
1595
|
-
description: description$1 || `Created by gkm CLI`
|
|
1596
|
-
});
|
|
1553
|
+
return null;
|
|
1597
1554
|
}
|
|
1598
1555
|
/**
|
|
1599
|
-
*
|
|
1556
|
+
* Check if we're in a monorepo (lockfile is in a parent directory)
|
|
1600
1557
|
*/
|
|
1601
|
-
|
|
1602
|
-
|
|
1558
|
+
function isMonorepo(cwd = process.cwd()) {
|
|
1559
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
1560
|
+
if (!lockfilePath) return false;
|
|
1561
|
+
const lockfileDir = (0, node_path.dirname)(lockfilePath);
|
|
1562
|
+
return lockfileDir !== cwd;
|
|
1603
1563
|
}
|
|
1604
1564
|
/**
|
|
1605
|
-
*
|
|
1565
|
+
* Check if turbo.json exists (walks up directory tree)
|
|
1606
1566
|
*/
|
|
1607
|
-
|
|
1608
|
-
|
|
1609
|
-
|
|
1610
|
-
|
|
1611
|
-
|
|
1612
|
-
|
|
1613
|
-
const env = await dokployRequest("POST", "environment.create", baseUrl, token, {
|
|
1614
|
-
projectId,
|
|
1615
|
-
name: "production",
|
|
1616
|
-
description: "Production environment"
|
|
1617
|
-
});
|
|
1618
|
-
environmentId = env.environmentId;
|
|
1567
|
+
function hasTurboConfig(cwd = process.cwd()) {
|
|
1568
|
+
let dir = cwd;
|
|
1569
|
+
const root = (0, node_path.parse)(dir).root;
|
|
1570
|
+
while (dir !== root) {
|
|
1571
|
+
if ((0, node_fs.existsSync)((0, node_path.join)(dir, "turbo.json"))) return true;
|
|
1572
|
+
dir = (0, node_path.dirname)(dir);
|
|
1619
1573
|
}
|
|
1620
|
-
return
|
|
1621
|
-
name: name$1,
|
|
1622
|
-
projectId,
|
|
1623
|
-
environmentId
|
|
1624
|
-
});
|
|
1574
|
+
return (0, node_fs.existsSync)((0, node_path.join)(root, "turbo.json"));
|
|
1625
1575
|
}
|
|
1626
1576
|
/**
|
|
1627
|
-
*
|
|
1577
|
+
* Get install command for turbo builds (without frozen lockfile)
|
|
1578
|
+
* Turbo prune creates a subset that may not perfectly match the lockfile
|
|
1628
1579
|
*/
|
|
1629
|
-
|
|
1630
|
-
|
|
1631
|
-
|
|
1632
|
-
|
|
1633
|
-
|
|
1580
|
+
function getTurboInstallCmd(pm) {
|
|
1581
|
+
const commands = {
|
|
1582
|
+
pnpm: "pnpm install",
|
|
1583
|
+
npm: "npm install",
|
|
1584
|
+
yarn: "yarn install",
|
|
1585
|
+
bun: "bun install"
|
|
1586
|
+
};
|
|
1587
|
+
return commands[pm];
|
|
1634
1588
|
}
|
|
1635
1589
|
/**
|
|
1636
|
-
* Get
|
|
1590
|
+
* Get package manager specific commands and paths
|
|
1637
1591
|
*/
|
|
1638
|
-
|
|
1639
|
-
|
|
1592
|
+
function getPmConfig(pm) {
|
|
1593
|
+
const configs = {
|
|
1594
|
+
pnpm: {
|
|
1595
|
+
install: "corepack enable && corepack prepare pnpm@latest --activate",
|
|
1596
|
+
lockfile: "pnpm-lock.yaml",
|
|
1597
|
+
fetch: "pnpm fetch",
|
|
1598
|
+
installCmd: "pnpm install --frozen-lockfile --offline",
|
|
1599
|
+
cacheTarget: "/root/.local/share/pnpm/store",
|
|
1600
|
+
cacheId: "pnpm",
|
|
1601
|
+
run: "pnpm",
|
|
1602
|
+
dlx: "pnpm dlx",
|
|
1603
|
+
addGlobal: "pnpm add -g"
|
|
1604
|
+
},
|
|
1605
|
+
npm: {
|
|
1606
|
+
install: "",
|
|
1607
|
+
lockfile: "package-lock.json",
|
|
1608
|
+
fetch: "",
|
|
1609
|
+
installCmd: "npm ci",
|
|
1610
|
+
cacheTarget: "/root/.npm",
|
|
1611
|
+
cacheId: "npm",
|
|
1612
|
+
run: "npm run",
|
|
1613
|
+
dlx: "npx",
|
|
1614
|
+
addGlobal: "npm install -g"
|
|
1615
|
+
},
|
|
1616
|
+
yarn: {
|
|
1617
|
+
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
1618
|
+
lockfile: "yarn.lock",
|
|
1619
|
+
fetch: "",
|
|
1620
|
+
installCmd: "yarn install --frozen-lockfile",
|
|
1621
|
+
cacheTarget: "/root/.yarn/cache",
|
|
1622
|
+
cacheId: "yarn",
|
|
1623
|
+
run: "yarn",
|
|
1624
|
+
dlx: "yarn dlx",
|
|
1625
|
+
addGlobal: "yarn global add"
|
|
1626
|
+
},
|
|
1627
|
+
bun: {
|
|
1628
|
+
install: "npm install -g bun",
|
|
1629
|
+
lockfile: "bun.lockb",
|
|
1630
|
+
fetch: "",
|
|
1631
|
+
installCmd: "bun install --frozen-lockfile",
|
|
1632
|
+
cacheTarget: "/root/.bun/install/cache",
|
|
1633
|
+
cacheId: "bun",
|
|
1634
|
+
run: "bun run",
|
|
1635
|
+
dlx: "bunx",
|
|
1636
|
+
addGlobal: "bun add -g"
|
|
1637
|
+
}
|
|
1638
|
+
};
|
|
1639
|
+
return configs[pm];
|
|
1640
1640
|
}
|
|
1641
1641
|
/**
|
|
1642
|
-
*
|
|
1642
|
+
* Generate a multi-stage Dockerfile for building from source
|
|
1643
|
+
* Optimized for build speed with:
|
|
1644
|
+
* - BuildKit cache mounts for package manager store
|
|
1645
|
+
* - pnpm fetch for better layer caching (when using pnpm)
|
|
1646
|
+
* - Optional turbo prune for monorepos
|
|
1643
1647
|
*/
|
|
1644
|
-
|
|
1645
|
-
const
|
|
1646
|
-
if (
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
1662
|
-
|
|
1663
|
-
|
|
1664
|
-
|
|
1665
|
-
|
|
1666
|
-
|
|
1667
|
-
|
|
1668
|
-
|
|
1669
|
-
|
|
1670
|
-
|
|
1671
|
-
|
|
1672
|
-
|
|
1673
|
-
|
|
1674
|
-
|
|
1675
|
-
|
|
1676
|
-
|
|
1677
|
-
|
|
1678
|
-
|
|
1679
|
-
|
|
1680
|
-
|
|
1681
|
-
|
|
1682
|
-
|
|
1683
|
-
|
|
1684
|
-
|
|
1648
|
+
function generateMultiStageDockerfile(options) {
|
|
1649
|
+
const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
|
|
1650
|
+
if (turbo) return generateTurboDockerfile({
|
|
1651
|
+
...options,
|
|
1652
|
+
turboPackage: turboPackage ?? "api"
|
|
1653
|
+
});
|
|
1654
|
+
const pm = getPmConfig(packageManager);
|
|
1655
|
+
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
1656
|
+
const hasFetch = packageManager === "pnpm";
|
|
1657
|
+
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
1658
|
+
COPY ${pm.lockfile} ./
|
|
1659
|
+
|
|
1660
|
+
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
1661
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1662
|
+
${pm.fetch}
|
|
1663
|
+
|
|
1664
|
+
# Copy package.json after fetch
|
|
1665
|
+
COPY package.json ./
|
|
1666
|
+
|
|
1667
|
+
# Install from cache (fast - no network needed)
|
|
1668
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1669
|
+
${pm.installCmd}` : `# Copy package files
|
|
1670
|
+
COPY package.json ${pm.lockfile} ./
|
|
1671
|
+
|
|
1672
|
+
# Install dependencies with cache
|
|
1673
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1674
|
+
${pm.installCmd}`;
|
|
1675
|
+
return `# syntax=docker/dockerfile:1
|
|
1676
|
+
# Stage 1: Dependencies
|
|
1677
|
+
FROM ${baseImage} AS deps
|
|
1678
|
+
|
|
1679
|
+
WORKDIR /app
|
|
1680
|
+
${installPm}
|
|
1681
|
+
${depsStage}
|
|
1682
|
+
|
|
1683
|
+
# Stage 2: Build
|
|
1684
|
+
FROM deps AS builder
|
|
1685
|
+
|
|
1686
|
+
WORKDIR /app
|
|
1687
|
+
|
|
1688
|
+
# Copy source (deps already installed)
|
|
1689
|
+
COPY . .
|
|
1690
|
+
|
|
1691
|
+
# Build production server using CLI from npm
|
|
1692
|
+
RUN ${pm.dlx} @geekmidas/cli build --provider server --production
|
|
1693
|
+
|
|
1694
|
+
# Stage 3: Production
|
|
1695
|
+
FROM ${baseImage} AS runner
|
|
1696
|
+
|
|
1697
|
+
WORKDIR /app
|
|
1698
|
+
|
|
1699
|
+
# Install tini for proper signal handling as PID 1
|
|
1700
|
+
RUN apk add --no-cache tini
|
|
1701
|
+
|
|
1702
|
+
# Create non-root user
|
|
1703
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1704
|
+
adduser --system --uid 1001 hono
|
|
1705
|
+
|
|
1706
|
+
# Copy bundled server
|
|
1707
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
1708
|
+
|
|
1709
|
+
# Environment
|
|
1710
|
+
ENV NODE_ENV=production
|
|
1711
|
+
ENV PORT=${port}
|
|
1712
|
+
|
|
1713
|
+
# Health check
|
|
1714
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1715
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1716
|
+
|
|
1717
|
+
# Switch to non-root user
|
|
1718
|
+
USER hono
|
|
1719
|
+
|
|
1720
|
+
EXPOSE ${port}
|
|
1721
|
+
|
|
1722
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
1723
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1724
|
+
CMD ["node", "server.mjs"]
|
|
1725
|
+
`;
|
|
1685
1726
|
}
|
|
1686
1727
|
/**
|
|
1687
|
-
*
|
|
1728
|
+
* Generate a Dockerfile optimized for Turbo monorepos
|
|
1729
|
+
* Uses turbo prune to create minimal Docker context
|
|
1688
1730
|
*/
|
|
1689
|
-
|
|
1690
|
-
const {
|
|
1691
|
-
|
|
1692
|
-
|
|
1693
|
-
|
|
1694
|
-
|
|
1695
|
-
|
|
1696
|
-
|
|
1697
|
-
|
|
1698
|
-
|
|
1699
|
-
|
|
1700
|
-
|
|
1701
|
-
|
|
1702
|
-
|
|
1703
|
-
|
|
1704
|
-
|
|
1705
|
-
|
|
1706
|
-
|
|
1707
|
-
|
|
1708
|
-
|
|
1709
|
-
|
|
1710
|
-
|
|
1711
|
-
|
|
1712
|
-
|
|
1713
|
-
|
|
1714
|
-
|
|
1715
|
-
|
|
1716
|
-
|
|
1731
|
+
function generateTurboDockerfile(options) {
|
|
1732
|
+
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
1733
|
+
const pm = getPmConfig(packageManager);
|
|
1734
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
1735
|
+
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
1736
|
+
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
1737
|
+
return `# syntax=docker/dockerfile:1
|
|
1738
|
+
# Stage 1: Prune monorepo
|
|
1739
|
+
FROM ${baseImage} AS pruner
|
|
1740
|
+
|
|
1741
|
+
WORKDIR /app
|
|
1742
|
+
|
|
1743
|
+
${installPm}
|
|
1744
|
+
|
|
1745
|
+
COPY . .
|
|
1746
|
+
|
|
1747
|
+
# Prune to only include necessary packages
|
|
1748
|
+
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
1749
|
+
|
|
1750
|
+
# Stage 2: Install dependencies
|
|
1751
|
+
FROM ${baseImage} AS deps
|
|
1752
|
+
|
|
1753
|
+
WORKDIR /app
|
|
1754
|
+
|
|
1755
|
+
${installPm}
|
|
1756
|
+
|
|
1757
|
+
# Copy pruned lockfile and package.jsons
|
|
1758
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
1759
|
+
COPY --from=pruner /app/out/json/ ./
|
|
1760
|
+
|
|
1761
|
+
# Install dependencies (no frozen-lockfile since turbo prune creates a subset)
|
|
1762
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
1763
|
+
${turboInstallCmd}
|
|
1764
|
+
|
|
1765
|
+
# Stage 3: Build
|
|
1766
|
+
FROM deps AS builder
|
|
1767
|
+
|
|
1768
|
+
WORKDIR /app
|
|
1769
|
+
|
|
1770
|
+
# Copy pruned source
|
|
1771
|
+
COPY --from=pruner /app/out/full/ ./
|
|
1772
|
+
|
|
1773
|
+
# Build production server using CLI from npm
|
|
1774
|
+
RUN ${pm.dlx} @geekmidas/cli build --provider server --production
|
|
1775
|
+
|
|
1776
|
+
# Stage 4: Production
|
|
1777
|
+
FROM ${baseImage} AS runner
|
|
1778
|
+
|
|
1779
|
+
WORKDIR /app
|
|
1780
|
+
|
|
1781
|
+
RUN apk add --no-cache tini
|
|
1782
|
+
|
|
1783
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1784
|
+
adduser --system --uid 1001 hono
|
|
1785
|
+
|
|
1786
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
1787
|
+
|
|
1788
|
+
ENV NODE_ENV=production
|
|
1789
|
+
ENV PORT=${port}
|
|
1790
|
+
|
|
1791
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1792
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1793
|
+
|
|
1794
|
+
USER hono
|
|
1795
|
+
|
|
1796
|
+
EXPOSE ${port}
|
|
1797
|
+
|
|
1798
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1799
|
+
CMD ["node", "server.mjs"]
|
|
1800
|
+
`;
|
|
1801
|
+
}
|
|
1802
|
+
/**
|
|
1803
|
+
* Generate a slim Dockerfile for pre-built bundles
|
|
1804
|
+
*/
|
|
1805
|
+
function generateSlimDockerfile(options) {
|
|
1806
|
+
const { baseImage, port, healthCheckPath } = options;
|
|
1807
|
+
return `# Slim Dockerfile for pre-built production bundle
|
|
1808
|
+
FROM ${baseImage}
|
|
1809
|
+
|
|
1810
|
+
WORKDIR /app
|
|
1811
|
+
|
|
1812
|
+
# Install tini for proper signal handling as PID 1
|
|
1813
|
+
# Handles SIGTERM propagation and zombie process reaping
|
|
1814
|
+
RUN apk add --no-cache tini
|
|
1815
|
+
|
|
1816
|
+
# Create non-root user
|
|
1817
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
1818
|
+
adduser --system --uid 1001 hono
|
|
1819
|
+
|
|
1820
|
+
# Copy pre-built bundle
|
|
1821
|
+
COPY .gkm/server/dist/server.mjs ./
|
|
1822
|
+
|
|
1823
|
+
# Environment
|
|
1824
|
+
ENV NODE_ENV=production
|
|
1825
|
+
ENV PORT=${port}
|
|
1826
|
+
|
|
1827
|
+
# Health check
|
|
1828
|
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
1829
|
+
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
1830
|
+
|
|
1831
|
+
# Switch to non-root user
|
|
1832
|
+
USER hono
|
|
1833
|
+
|
|
1834
|
+
EXPOSE ${port}
|
|
1835
|
+
|
|
1836
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
1837
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
1838
|
+
CMD ["node", "server.mjs"]
|
|
1839
|
+
`;
|
|
1840
|
+
}
|
|
1841
|
+
/**
|
|
1842
|
+
* Generate .dockerignore file
|
|
1843
|
+
*/
|
|
1844
|
+
function generateDockerignore() {
|
|
1845
|
+
return `# Dependencies
|
|
1846
|
+
node_modules
|
|
1847
|
+
.pnpm-store
|
|
1848
|
+
|
|
1849
|
+
# Build output (except what we need)
|
|
1850
|
+
.gkm/aws*
|
|
1851
|
+
.gkm/server/*.ts
|
|
1852
|
+
!.gkm/server/dist
|
|
1853
|
+
|
|
1854
|
+
# IDE and editor
|
|
1855
|
+
.idea
|
|
1856
|
+
.vscode
|
|
1857
|
+
*.swp
|
|
1858
|
+
*.swo
|
|
1859
|
+
|
|
1860
|
+
# Git
|
|
1861
|
+
.git
|
|
1862
|
+
.gitignore
|
|
1863
|
+
|
|
1864
|
+
# Logs
|
|
1865
|
+
*.log
|
|
1866
|
+
npm-debug.log*
|
|
1867
|
+
pnpm-debug.log*
|
|
1868
|
+
|
|
1869
|
+
# Test files
|
|
1870
|
+
**/*.test.ts
|
|
1871
|
+
**/*.spec.ts
|
|
1872
|
+
**/__tests__
|
|
1873
|
+
coverage
|
|
1874
|
+
|
|
1875
|
+
# Documentation
|
|
1876
|
+
docs
|
|
1877
|
+
*.md
|
|
1878
|
+
!README.md
|
|
1879
|
+
|
|
1880
|
+
# Environment files (handle secrets separately)
|
|
1881
|
+
.env
|
|
1882
|
+
.env.*
|
|
1883
|
+
!.env.example
|
|
1884
|
+
|
|
1885
|
+
# Docker files (don't copy recursively)
|
|
1886
|
+
Dockerfile*
|
|
1887
|
+
docker-compose*
|
|
1888
|
+
.dockerignore
|
|
1889
|
+
`;
|
|
1890
|
+
}
|
|
1891
|
+
/**
|
|
1892
|
+
* Generate docker-entrypoint.sh for custom startup logic
|
|
1893
|
+
*/
|
|
1894
|
+
function generateDockerEntrypoint() {
|
|
1895
|
+
return `#!/bin/sh
|
|
1896
|
+
set -e
|
|
1897
|
+
|
|
1898
|
+
# Run any custom startup scripts here
|
|
1899
|
+
# Example: wait for database
|
|
1900
|
+
# until nc -z $DB_HOST $DB_PORT; do
|
|
1901
|
+
# echo "Waiting for database..."
|
|
1902
|
+
# sleep 1
|
|
1903
|
+
# done
|
|
1904
|
+
|
|
1905
|
+
# Execute the main command
|
|
1906
|
+
exec "$@"
|
|
1907
|
+
`;
|
|
1908
|
+
}
|
|
1909
|
+
/**
|
|
1910
|
+
* Resolve Docker configuration from GkmConfig with defaults
|
|
1911
|
+
*/
|
|
1912
|
+
function resolveDockerConfig$1(config) {
|
|
1913
|
+
const docker = config.docker ?? {};
|
|
1914
|
+
let defaultImageName = "api";
|
|
1915
|
+
try {
|
|
1916
|
+
const pkg = require(`${process.cwd()}/package.json`);
|
|
1917
|
+
if (pkg.name) defaultImageName = pkg.name.replace(/^@[^/]+\//, "");
|
|
1918
|
+
} catch {}
|
|
1919
|
+
return {
|
|
1920
|
+
registry: docker.registry ?? "",
|
|
1921
|
+
imageName: docker.imageName ?? defaultImageName,
|
|
1922
|
+
baseImage: docker.baseImage ?? "node:22-alpine",
|
|
1923
|
+
port: docker.port ?? 3e3,
|
|
1924
|
+
compose: docker.compose
|
|
1925
|
+
};
|
|
1926
|
+
}
|
|
1927
|
+
|
|
1928
|
+
//#endregion
|
|
1929
|
+
//#region src/docker/index.ts
|
|
1930
|
+
const logger$5 = console;
|
|
1931
|
+
/**
|
|
1932
|
+
* Docker command implementation
|
|
1933
|
+
* Generates Dockerfile, docker-compose.yml, and related files
|
|
1934
|
+
*
|
|
1935
|
+
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
1936
|
+
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
1937
|
+
*/
|
|
1938
|
+
async function dockerCommand(options) {
|
|
1939
|
+
const config = await require_config.loadConfig();
|
|
1940
|
+
const dockerConfig = resolveDockerConfig$1(config);
|
|
1941
|
+
const serverConfig = typeof config.providers?.server === "object" ? config.providers.server : void 0;
|
|
1942
|
+
const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
|
|
1943
|
+
const useSlim = options.slim === true;
|
|
1944
|
+
if (useSlim) {
|
|
1945
|
+
const distDir = (0, node_path.join)(process.cwd(), ".gkm", "server", "dist");
|
|
1946
|
+
const hasBuild = (0, node_fs.existsSync)((0, node_path.join)(distDir, "server.mjs"));
|
|
1947
|
+
if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
|
|
1717
1948
|
}
|
|
1718
|
-
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
|
|
1722
|
-
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
|
|
1726
|
-
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
|
|
1730
|
-
|
|
1949
|
+
const dockerDir = (0, node_path.join)(process.cwd(), ".gkm", "docker");
|
|
1950
|
+
await (0, node_fs_promises.mkdir)(dockerDir, { recursive: true });
|
|
1951
|
+
const packageManager = detectPackageManager$1();
|
|
1952
|
+
const inMonorepo = isMonorepo();
|
|
1953
|
+
const hasTurbo = hasTurboConfig();
|
|
1954
|
+
let useTurbo = options.turbo ?? false;
|
|
1955
|
+
if (inMonorepo && !useSlim) if (hasTurbo) {
|
|
1956
|
+
useTurbo = true;
|
|
1957
|
+
logger$5.log(" Detected monorepo with turbo.json - using turbo prune");
|
|
1958
|
+
} else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
|
|
1959
|
+
let turboPackage = options.turboPackage ?? dockerConfig.imageName;
|
|
1960
|
+
if (useTurbo && !options.turboPackage) try {
|
|
1961
|
+
const pkg = require(`${process.cwd()}/package.json`);
|
|
1962
|
+
if (pkg.name) {
|
|
1963
|
+
turboPackage = pkg.name;
|
|
1964
|
+
logger$5.log(` Turbo package: ${turboPackage}`);
|
|
1731
1965
|
}
|
|
1732
1966
|
} catch {}
|
|
1733
|
-
const
|
|
1734
|
-
|
|
1735
|
-
|
|
1736
|
-
|
|
1967
|
+
const templateOptions = {
|
|
1968
|
+
imageName: dockerConfig.imageName,
|
|
1969
|
+
baseImage: dockerConfig.baseImage,
|
|
1970
|
+
port: dockerConfig.port,
|
|
1971
|
+
healthCheckPath,
|
|
1972
|
+
prebuilt: useSlim,
|
|
1973
|
+
turbo: useTurbo,
|
|
1974
|
+
turboPackage,
|
|
1975
|
+
packageManager
|
|
1737
1976
|
};
|
|
1738
|
-
|
|
1739
|
-
|
|
1740
|
-
|
|
1741
|
-
|
|
1742
|
-
logger$
|
|
1743
|
-
|
|
1744
|
-
|
|
1745
|
-
|
|
1746
|
-
|
|
1747
|
-
|
|
1977
|
+
const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
|
|
1978
|
+
const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
|
|
1979
|
+
const dockerfilePath = (0, node_path.join)(dockerDir, "Dockerfile");
|
|
1980
|
+
await (0, node_fs_promises.writeFile)(dockerfilePath, dockerfile);
|
|
1981
|
+
logger$5.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
|
|
1982
|
+
const composeOptions = {
|
|
1983
|
+
imageName: dockerConfig.imageName,
|
|
1984
|
+
registry: options.registry ?? dockerConfig.registry,
|
|
1985
|
+
port: dockerConfig.port,
|
|
1986
|
+
healthCheckPath,
|
|
1987
|
+
services: dockerConfig.compose?.services ?? {}
|
|
1988
|
+
};
|
|
1989
|
+
const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
|
|
1990
|
+
const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
|
|
1991
|
+
const composePath = (0, node_path.join)(dockerDir, "docker-compose.yml");
|
|
1992
|
+
await (0, node_fs_promises.writeFile)(composePath, dockerCompose);
|
|
1993
|
+
logger$5.log("Generated: .gkm/docker/docker-compose.yml");
|
|
1994
|
+
const dockerignore = generateDockerignore();
|
|
1995
|
+
const dockerignorePath = (0, node_path.join)(process.cwd(), ".dockerignore");
|
|
1996
|
+
await (0, node_fs_promises.writeFile)(dockerignorePath, dockerignore);
|
|
1997
|
+
logger$5.log("Generated: .dockerignore (project root)");
|
|
1998
|
+
const entrypoint = generateDockerEntrypoint();
|
|
1999
|
+
const entrypointPath = (0, node_path.join)(dockerDir, "docker-entrypoint.sh");
|
|
2000
|
+
await (0, node_fs_promises.writeFile)(entrypointPath, entrypoint);
|
|
2001
|
+
logger$5.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2002
|
+
const result = {
|
|
2003
|
+
dockerfile: dockerfilePath,
|
|
2004
|
+
dockerCompose: composePath,
|
|
2005
|
+
dockerignore: dockerignorePath,
|
|
2006
|
+
entrypoint: entrypointPath
|
|
2007
|
+
};
|
|
2008
|
+
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2009
|
+
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2010
|
+
return result;
|
|
1748
2011
|
}
|
|
1749
2012
|
/**
|
|
1750
|
-
*
|
|
2013
|
+
* Ensure lockfile exists in the build context
|
|
2014
|
+
* For monorepos, copies from workspace root if needed
|
|
2015
|
+
* Returns cleanup function if file was copied
|
|
1751
2016
|
*/
|
|
1752
|
-
|
|
1753
|
-
|
|
1754
|
-
if (!
|
|
1755
|
-
|
|
1756
|
-
|
|
1757
|
-
else throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
1758
|
-
}
|
|
1759
|
-
const { resource } = options;
|
|
1760
|
-
const token = await getApiToken();
|
|
1761
|
-
if (resource === "projects") {
|
|
1762
|
-
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
1763
|
-
const projects = await getProjects(endpoint, token);
|
|
1764
|
-
if (projects.length === 0) {
|
|
1765
|
-
logger$2.log(" No projects found");
|
|
1766
|
-
return;
|
|
1767
|
-
}
|
|
1768
|
-
for (const project of projects) {
|
|
1769
|
-
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
1770
|
-
if (project.description) logger$2.log(` ${project.description}`);
|
|
1771
|
-
}
|
|
1772
|
-
} else if (resource === "registries") {
|
|
1773
|
-
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
1774
|
-
const registries = await getRegistries(endpoint, token);
|
|
1775
|
-
if (registries.length === 0) {
|
|
1776
|
-
logger$2.log(" No registries configured");
|
|
1777
|
-
logger$2.log(" Add a registry in Dokploy: Settings > Docker Registry");
|
|
1778
|
-
return;
|
|
1779
|
-
}
|
|
1780
|
-
for (const registry of registries) {
|
|
1781
|
-
logger$2.log(`\n ${registry.registryName} (${registry.registryId})`);
|
|
1782
|
-
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
1783
|
-
logger$2.log(` Username: ${registry.username}`);
|
|
1784
|
-
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
1785
|
-
}
|
|
2017
|
+
function ensureLockfile(cwd) {
|
|
2018
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2019
|
+
if (!lockfilePath) {
|
|
2020
|
+
logger$5.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
|
|
2021
|
+
return null;
|
|
1786
2022
|
}
|
|
2023
|
+
const lockfileName = (0, node_path.basename)(lockfilePath);
|
|
2024
|
+
const localLockfile = (0, node_path.join)(cwd, lockfileName);
|
|
2025
|
+
if (lockfilePath === localLockfile) return null;
|
|
2026
|
+
logger$5.log(` Copying ${lockfileName} from monorepo root...`);
|
|
2027
|
+
(0, node_fs.copyFileSync)(lockfilePath, localLockfile);
|
|
2028
|
+
return () => {
|
|
2029
|
+
try {
|
|
2030
|
+
(0, node_fs.unlinkSync)(localLockfile);
|
|
2031
|
+
} catch {}
|
|
2032
|
+
};
|
|
1787
2033
|
}
|
|
1788
|
-
|
|
1789
|
-
|
|
1790
|
-
|
|
1791
|
-
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
|
|
1795
|
-
|
|
1796
|
-
};
|
|
1797
|
-
|
|
1798
|
-
const
|
|
1799
|
-
|
|
1800
|
-
|
|
1801
|
-
|
|
1802
|
-
|
|
1803
|
-
|
|
1804
|
-
|
|
1805
|
-
|
|
1806
|
-
}
|
|
1807
|
-
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
1808
|
-
function normalizeServices(services) {
|
|
1809
|
-
const result = /* @__PURE__ */ new Map();
|
|
1810
|
-
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
1811
|
-
else for (const [name$1, config] of Object.entries(services)) {
|
|
1812
|
-
const serviceName = name$1;
|
|
1813
|
-
if (config === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
1814
|
-
else if (config && typeof config === "object") {
|
|
1815
|
-
const serviceConfig = config;
|
|
1816
|
-
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
1817
|
-
else {
|
|
1818
|
-
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
1819
|
-
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
2034
|
+
/**
|
|
2035
|
+
* Build Docker image
|
|
2036
|
+
* Uses BuildKit for cache mount support
|
|
2037
|
+
*/
|
|
2038
|
+
async function buildDockerImage(imageName, options) {
|
|
2039
|
+
const tag = options.tag ?? "latest";
|
|
2040
|
+
const registry = options.registry;
|
|
2041
|
+
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2042
|
+
logger$5.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2043
|
+
const cwd = process.cwd();
|
|
2044
|
+
const cleanup = ensureLockfile(cwd);
|
|
2045
|
+
try {
|
|
2046
|
+
(0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2047
|
+
cwd,
|
|
2048
|
+
stdio: "inherit",
|
|
2049
|
+
env: {
|
|
2050
|
+
...process.env,
|
|
2051
|
+
DOCKER_BUILDKIT: "1"
|
|
1820
2052
|
}
|
|
1821
|
-
}
|
|
2053
|
+
});
|
|
2054
|
+
logger$5.log(`✅ Docker image built: ${fullImageName}`);
|
|
2055
|
+
} catch (error) {
|
|
2056
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2057
|
+
} finally {
|
|
2058
|
+
cleanup?.();
|
|
1822
2059
|
}
|
|
1823
|
-
return result;
|
|
1824
2060
|
}
|
|
1825
2061
|
/**
|
|
1826
|
-
*
|
|
2062
|
+
* Push Docker image to registry
|
|
1827
2063
|
*/
|
|
1828
|
-
function
|
|
1829
|
-
const
|
|
1830
|
-
const
|
|
1831
|
-
|
|
1832
|
-
|
|
1833
|
-
|
|
1834
|
-
|
|
1835
|
-
|
|
1836
|
-
|
|
1837
|
-
|
|
1838
|
-
|
|
1839
|
-
|
|
1840
|
-
|
|
1841
|
-
|
|
1842
|
-
ports:
|
|
1843
|
-
- "\${PORT:-${port}}:${port}"
|
|
1844
|
-
environment:
|
|
1845
|
-
- NODE_ENV=production
|
|
1846
|
-
`;
|
|
1847
|
-
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
1848
|
-
`;
|
|
1849
|
-
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
1850
|
-
`;
|
|
1851
|
-
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
1852
|
-
`;
|
|
1853
|
-
yaml += ` healthcheck:
|
|
1854
|
-
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
1855
|
-
interval: 30s
|
|
1856
|
-
timeout: 3s
|
|
1857
|
-
retries: 3
|
|
1858
|
-
`;
|
|
1859
|
-
if (serviceMap.size > 0) {
|
|
1860
|
-
yaml += ` depends_on:
|
|
1861
|
-
`;
|
|
1862
|
-
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
1863
|
-
condition: service_healthy
|
|
1864
|
-
`;
|
|
2064
|
+
async function pushDockerImage(imageName, options) {
|
|
2065
|
+
const tag = options.tag ?? "latest";
|
|
2066
|
+
const registry = options.registry;
|
|
2067
|
+
if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
|
|
2068
|
+
const fullImageName = `${registry}/${imageName}:${tag}`;
|
|
2069
|
+
logger$5.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
|
|
2070
|
+
try {
|
|
2071
|
+
(0, node_child_process.execSync)(`docker push ${fullImageName}`, {
|
|
2072
|
+
cwd: process.cwd(),
|
|
2073
|
+
stdio: "inherit"
|
|
2074
|
+
});
|
|
2075
|
+
logger$5.log(`✅ Docker image pushed: ${fullImageName}`);
|
|
2076
|
+
} catch (error) {
|
|
2077
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1865
2078
|
}
|
|
1866
|
-
|
|
1867
|
-
|
|
1868
|
-
|
|
1869
|
-
|
|
1870
|
-
|
|
1871
|
-
|
|
1872
|
-
|
|
1873
|
-
|
|
1874
|
-
|
|
1875
|
-
|
|
1876
|
-
|
|
1877
|
-
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
1878
|
-
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
1879
|
-
volumes:
|
|
1880
|
-
- postgres_data:/var/lib/postgresql/data
|
|
1881
|
-
healthcheck:
|
|
1882
|
-
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
1883
|
-
interval: 5s
|
|
1884
|
-
timeout: 5s
|
|
1885
|
-
retries: 5
|
|
1886
|
-
networks:
|
|
1887
|
-
- app-network
|
|
1888
|
-
`;
|
|
1889
|
-
const redisImage = serviceMap.get("redis");
|
|
1890
|
-
if (redisImage) yaml += `
|
|
1891
|
-
redis:
|
|
1892
|
-
image: ${redisImage}
|
|
1893
|
-
container_name: redis
|
|
1894
|
-
restart: unless-stopped
|
|
1895
|
-
volumes:
|
|
1896
|
-
- redis_data:/data
|
|
1897
|
-
healthcheck:
|
|
1898
|
-
test: ["CMD", "redis-cli", "ping"]
|
|
1899
|
-
interval: 5s
|
|
1900
|
-
timeout: 5s
|
|
1901
|
-
retries: 5
|
|
1902
|
-
networks:
|
|
1903
|
-
- app-network
|
|
1904
|
-
`;
|
|
1905
|
-
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
1906
|
-
if (rabbitmqImage) yaml += `
|
|
1907
|
-
rabbitmq:
|
|
1908
|
-
image: ${rabbitmqImage}
|
|
1909
|
-
container_name: rabbitmq
|
|
1910
|
-
restart: unless-stopped
|
|
1911
|
-
environment:
|
|
1912
|
-
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
1913
|
-
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
1914
|
-
ports:
|
|
1915
|
-
- "15672:15672" # Management UI
|
|
1916
|
-
volumes:
|
|
1917
|
-
- rabbitmq_data:/var/lib/rabbitmq
|
|
1918
|
-
healthcheck:
|
|
1919
|
-
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
1920
|
-
interval: 10s
|
|
1921
|
-
timeout: 5s
|
|
1922
|
-
retries: 5
|
|
1923
|
-
networks:
|
|
1924
|
-
- app-network
|
|
1925
|
-
`;
|
|
1926
|
-
yaml += `
|
|
1927
|
-
volumes:
|
|
1928
|
-
`;
|
|
1929
|
-
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
1930
|
-
`;
|
|
1931
|
-
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
1932
|
-
`;
|
|
1933
|
-
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
1934
|
-
`;
|
|
1935
|
-
yaml += `
|
|
1936
|
-
networks:
|
|
1937
|
-
app-network:
|
|
1938
|
-
driver: bridge
|
|
1939
|
-
`;
|
|
1940
|
-
return yaml;
|
|
2079
|
+
}
|
|
2080
|
+
|
|
2081
|
+
//#endregion
|
|
2082
|
+
//#region src/deploy/docker.ts
|
|
2083
|
+
const logger$4 = console;
|
|
2084
|
+
/**
|
|
2085
|
+
* Get the full image reference
|
|
2086
|
+
*/
|
|
2087
|
+
function getImageRef(registry, imageName, tag) {
|
|
2088
|
+
if (registry) return `${registry}/${imageName}:${tag}`;
|
|
2089
|
+
return `${imageName}:${tag}`;
|
|
1941
2090
|
}
|
|
1942
2091
|
/**
|
|
1943
|
-
*
|
|
2092
|
+
* Build Docker image
|
|
1944
2093
|
*/
|
|
1945
|
-
function
|
|
1946
|
-
|
|
1947
|
-
const
|
|
1948
|
-
|
|
1949
|
-
|
|
1950
|
-
|
|
1951
|
-
|
|
1952
|
-
|
|
1953
|
-
|
|
1954
|
-
|
|
1955
|
-
|
|
1956
|
-
|
|
1957
|
-
|
|
1958
|
-
|
|
1959
|
-
|
|
1960
|
-
|
|
1961
|
-
|
|
1962
|
-
|
|
1963
|
-
|
|
1964
|
-
|
|
1965
|
-
|
|
1966
|
-
|
|
1967
|
-
|
|
1968
|
-
|
|
1969
|
-
|
|
1970
|
-
|
|
1971
|
-
|
|
1972
|
-
|
|
1973
|
-
|
|
2094
|
+
async function buildImage(imageRef) {
|
|
2095
|
+
logger$4.log(`\n🔨 Building Docker image: ${imageRef}`);
|
|
2096
|
+
const cwd = process.cwd();
|
|
2097
|
+
const inMonorepo = isMonorepo(cwd);
|
|
2098
|
+
if (inMonorepo) logger$4.log(" Generating Dockerfile for monorepo (turbo prune)...");
|
|
2099
|
+
else logger$4.log(" Generating Dockerfile...");
|
|
2100
|
+
await dockerCommand({});
|
|
2101
|
+
let buildCwd = cwd;
|
|
2102
|
+
let dockerfilePath = ".gkm/docker/Dockerfile";
|
|
2103
|
+
if (inMonorepo) {
|
|
2104
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
2105
|
+
if (lockfilePath) {
|
|
2106
|
+
const monorepoRoot = (0, node_path.dirname)(lockfilePath);
|
|
2107
|
+
const appRelPath = (0, node_path.relative)(monorepoRoot, cwd);
|
|
2108
|
+
dockerfilePath = (0, node_path.join)(appRelPath, ".gkm/docker/Dockerfile");
|
|
2109
|
+
buildCwd = monorepoRoot;
|
|
2110
|
+
logger$4.log(` Building from monorepo root: ${monorepoRoot}`);
|
|
2111
|
+
}
|
|
2112
|
+
}
|
|
2113
|
+
try {
|
|
2114
|
+
(0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build --platform linux/amd64 -f ${dockerfilePath} -t ${imageRef} .`, {
|
|
2115
|
+
cwd: buildCwd,
|
|
2116
|
+
stdio: "inherit",
|
|
2117
|
+
env: {
|
|
2118
|
+
...process.env,
|
|
2119
|
+
DOCKER_BUILDKIT: "1"
|
|
2120
|
+
}
|
|
2121
|
+
});
|
|
2122
|
+
logger$4.log(`✅ Image built: ${imageRef}`);
|
|
2123
|
+
} catch (error) {
|
|
2124
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2125
|
+
}
|
|
1974
2126
|
}
|
|
1975
|
-
|
|
1976
|
-
//#endregion
|
|
1977
|
-
//#region src/docker/templates.ts
|
|
1978
2127
|
/**
|
|
1979
|
-
*
|
|
1980
|
-
* Walks up the directory tree to find lockfile (for monorepos)
|
|
2128
|
+
* Push Docker image to registry
|
|
1981
2129
|
*/
|
|
1982
|
-
function
|
|
1983
|
-
|
|
1984
|
-
|
|
1985
|
-
|
|
1986
|
-
|
|
1987
|
-
|
|
1988
|
-
|
|
1989
|
-
|
|
1990
|
-
|
|
1991
|
-
|
|
1992
|
-
for (const [lockfile, pm] of lockfiles) if ((0, node_fs.existsSync)((0, node_path.join)(dir, lockfile))) return pm;
|
|
1993
|
-
dir = (0, node_path.dirname)(dir);
|
|
2130
|
+
async function pushImage(imageRef) {
|
|
2131
|
+
logger$4.log(`\n☁️ Pushing image: ${imageRef}`);
|
|
2132
|
+
try {
|
|
2133
|
+
(0, node_child_process.execSync)(`docker push ${imageRef}`, {
|
|
2134
|
+
cwd: process.cwd(),
|
|
2135
|
+
stdio: "inherit"
|
|
2136
|
+
});
|
|
2137
|
+
logger$4.log(`✅ Image pushed: ${imageRef}`);
|
|
2138
|
+
} catch (error) {
|
|
2139
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
1994
2140
|
}
|
|
1995
|
-
for (const [lockfile, pm] of lockfiles) if ((0, node_fs.existsSync)((0, node_path.join)(root, lockfile))) return pm;
|
|
1996
|
-
return "pnpm";
|
|
1997
2141
|
}
|
|
1998
2142
|
/**
|
|
1999
|
-
*
|
|
2143
|
+
* Deploy using Docker (build and optionally push image)
|
|
2000
2144
|
*/
|
|
2001
|
-
function
|
|
2002
|
-
const
|
|
2003
|
-
|
|
2004
|
-
|
|
2005
|
-
|
|
2006
|
-
|
|
2007
|
-
|
|
2008
|
-
|
|
2009
|
-
|
|
2010
|
-
|
|
2011
|
-
|
|
2012
|
-
|
|
2013
|
-
|
|
2014
|
-
|
|
2015
|
-
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
|
|
2019
|
-
|
|
2020
|
-
|
|
2021
|
-
addGlobal: "npm install -g"
|
|
2022
|
-
},
|
|
2023
|
-
yarn: {
|
|
2024
|
-
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
2025
|
-
lockfile: "yarn.lock",
|
|
2026
|
-
fetch: "",
|
|
2027
|
-
installCmd: "yarn install --frozen-lockfile",
|
|
2028
|
-
cacheTarget: "/root/.yarn/cache",
|
|
2029
|
-
cacheId: "yarn",
|
|
2030
|
-
run: "yarn",
|
|
2031
|
-
addGlobal: "yarn global add"
|
|
2032
|
-
},
|
|
2033
|
-
bun: {
|
|
2034
|
-
install: "npm install -g bun",
|
|
2035
|
-
lockfile: "bun.lockb",
|
|
2036
|
-
fetch: "",
|
|
2037
|
-
installCmd: "bun install --frozen-lockfile",
|
|
2038
|
-
cacheTarget: "/root/.bun/install/cache",
|
|
2039
|
-
cacheId: "bun",
|
|
2040
|
-
run: "bun run",
|
|
2041
|
-
addGlobal: "bun add -g"
|
|
2042
|
-
}
|
|
2145
|
+
async function deployDocker(options) {
|
|
2146
|
+
const { stage, tag, skipPush, masterKey, config } = options;
|
|
2147
|
+
const imageName = config.imageName ?? "app";
|
|
2148
|
+
const imageRef = getImageRef(config.registry, imageName, tag);
|
|
2149
|
+
await buildImage(imageRef);
|
|
2150
|
+
if (!skipPush) if (!config.registry) logger$4.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
|
|
2151
|
+
else await pushImage(imageRef);
|
|
2152
|
+
logger$4.log("\n✅ Docker deployment ready!");
|
|
2153
|
+
logger$4.log(`\n📋 Deployment details:`);
|
|
2154
|
+
logger$4.log(` Image: ${imageRef}`);
|
|
2155
|
+
logger$4.log(` Stage: ${stage}`);
|
|
2156
|
+
if (masterKey) {
|
|
2157
|
+
logger$4.log(`\n🔐 Deploy with this environment variable:`);
|
|
2158
|
+
logger$4.log(` GKM_MASTER_KEY=${masterKey}`);
|
|
2159
|
+
logger$4.log("\n Example docker run:");
|
|
2160
|
+
logger$4.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
|
|
2161
|
+
}
|
|
2162
|
+
return {
|
|
2163
|
+
imageRef,
|
|
2164
|
+
masterKey
|
|
2043
2165
|
};
|
|
2044
|
-
return configs[pm];
|
|
2045
2166
|
}
|
|
2046
2167
|
/**
|
|
2047
|
-
*
|
|
2048
|
-
* Optimized for build speed with:
|
|
2049
|
-
* - BuildKit cache mounts for package manager store
|
|
2050
|
-
* - pnpm fetch for better layer caching (when using pnpm)
|
|
2051
|
-
* - Optional turbo prune for monorepos
|
|
2168
|
+
* Resolve Docker deploy config from gkm config
|
|
2052
2169
|
*/
|
|
2053
|
-
function
|
|
2054
|
-
|
|
2055
|
-
|
|
2056
|
-
|
|
2057
|
-
|
|
2058
|
-
|
|
2059
|
-
const pm = getPmConfig(packageManager);
|
|
2060
|
-
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
2061
|
-
const hasFetch = packageManager === "pnpm";
|
|
2062
|
-
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
2063
|
-
COPY ${pm.lockfile} ./
|
|
2064
|
-
|
|
2065
|
-
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
2066
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2067
|
-
${pm.fetch}
|
|
2068
|
-
|
|
2069
|
-
# Copy package.json after fetch
|
|
2070
|
-
COPY package.json ./
|
|
2071
|
-
|
|
2072
|
-
# Install from cache (fast - no network needed)
|
|
2073
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2074
|
-
${pm.installCmd}` : `# Copy package files
|
|
2075
|
-
COPY package.json ${pm.lockfile} ./
|
|
2076
|
-
|
|
2077
|
-
# Install dependencies with cache
|
|
2078
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2079
|
-
${pm.installCmd}`;
|
|
2080
|
-
return `# syntax=docker/dockerfile:1
|
|
2081
|
-
# Stage 1: Dependencies
|
|
2082
|
-
FROM ${baseImage} AS deps
|
|
2083
|
-
|
|
2084
|
-
WORKDIR /app
|
|
2085
|
-
${installPm}
|
|
2086
|
-
${depsStage}
|
|
2087
|
-
|
|
2088
|
-
# Stage 2: Build
|
|
2089
|
-
FROM deps AS builder
|
|
2090
|
-
|
|
2091
|
-
WORKDIR /app
|
|
2092
|
-
|
|
2093
|
-
# Copy source (deps already installed)
|
|
2094
|
-
COPY . .
|
|
2095
|
-
|
|
2096
|
-
# Build production server
|
|
2097
|
-
RUN ${pm.run} gkm build --provider server --production
|
|
2098
|
-
|
|
2099
|
-
# Stage 3: Production
|
|
2100
|
-
FROM ${baseImage} AS runner
|
|
2101
|
-
|
|
2102
|
-
WORKDIR /app
|
|
2103
|
-
|
|
2104
|
-
# Install tini for proper signal handling as PID 1
|
|
2105
|
-
RUN apk add --no-cache tini
|
|
2106
|
-
|
|
2107
|
-
# Create non-root user
|
|
2108
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2109
|
-
adduser --system --uid 1001 hono
|
|
2110
|
-
|
|
2111
|
-
# Copy bundled server
|
|
2112
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2113
|
-
|
|
2114
|
-
# Environment
|
|
2115
|
-
ENV NODE_ENV=production
|
|
2116
|
-
ENV PORT=${port}
|
|
2117
|
-
|
|
2118
|
-
# Health check
|
|
2119
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2120
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2121
|
-
|
|
2122
|
-
# Switch to non-root user
|
|
2123
|
-
USER hono
|
|
2170
|
+
function resolveDockerConfig(config) {
|
|
2171
|
+
return {
|
|
2172
|
+
registry: config.docker?.registry,
|
|
2173
|
+
imageName: config.docker?.imageName
|
|
2174
|
+
};
|
|
2175
|
+
}
|
|
2124
2176
|
|
|
2125
|
-
|
|
2177
|
+
//#endregion
|
|
2178
|
+
//#region src/deploy/dokploy.ts
|
|
2179
|
+
const logger$3 = console;
|
|
2180
|
+
/**
|
|
2181
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
2182
|
+
*/
|
|
2183
|
+
async function getApiToken$1() {
|
|
2184
|
+
const token = await getDokployToken();
|
|
2185
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
2186
|
+
return token;
|
|
2187
|
+
}
|
|
2188
|
+
/**
|
|
2189
|
+
* Create a Dokploy API client
|
|
2190
|
+
*/
|
|
2191
|
+
async function createApi$1(endpoint) {
|
|
2192
|
+
const token = await getApiToken$1();
|
|
2193
|
+
return new require_dokploy_api.DokployApi({
|
|
2194
|
+
baseUrl: endpoint,
|
|
2195
|
+
token
|
|
2196
|
+
});
|
|
2197
|
+
}
|
|
2198
|
+
/**
|
|
2199
|
+
* Deploy to Dokploy
|
|
2200
|
+
*/
|
|
2201
|
+
async function deployDokploy(options) {
|
|
2202
|
+
const { stage, imageRef, masterKey, config } = options;
|
|
2203
|
+
logger$3.log(`\n🎯 Deploying to Dokploy...`);
|
|
2204
|
+
logger$3.log(` Endpoint: ${config.endpoint}`);
|
|
2205
|
+
logger$3.log(` Application: ${config.applicationId}`);
|
|
2206
|
+
const api = await createApi$1(config.endpoint);
|
|
2207
|
+
logger$3.log(` Configuring Docker image: ${imageRef}`);
|
|
2208
|
+
const registryOptions = {};
|
|
2209
|
+
if (config.registryId) {
|
|
2210
|
+
registryOptions.registryId = config.registryId;
|
|
2211
|
+
logger$3.log(` Using Dokploy registry: ${config.registryId}`);
|
|
2212
|
+
} else {
|
|
2213
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
2214
|
+
if (storedRegistryId) {
|
|
2215
|
+
registryOptions.registryId = storedRegistryId;
|
|
2216
|
+
logger$3.log(` Using stored Dokploy registry: ${storedRegistryId}`);
|
|
2217
|
+
} else if (config.registryCredentials) {
|
|
2218
|
+
registryOptions.username = config.registryCredentials.username;
|
|
2219
|
+
registryOptions.password = config.registryCredentials.password;
|
|
2220
|
+
registryOptions.registryUrl = config.registryCredentials.registryUrl;
|
|
2221
|
+
logger$3.log(` Using registry credentials for: ${config.registryCredentials.registryUrl}`);
|
|
2222
|
+
} else {
|
|
2223
|
+
const username = process.env.DOCKER_REGISTRY_USERNAME;
|
|
2224
|
+
const password = process.env.DOCKER_REGISTRY_PASSWORD;
|
|
2225
|
+
const registryUrl = process.env.DOCKER_REGISTRY_URL || config.registry;
|
|
2226
|
+
if (username && password && registryUrl) {
|
|
2227
|
+
registryOptions.username = username;
|
|
2228
|
+
registryOptions.password = password;
|
|
2229
|
+
registryOptions.registryUrl = registryUrl;
|
|
2230
|
+
logger$3.log(` Using registry credentials from environment`);
|
|
2231
|
+
}
|
|
2232
|
+
}
|
|
2233
|
+
}
|
|
2234
|
+
await api.saveDockerProvider(config.applicationId, imageRef, registryOptions);
|
|
2235
|
+
logger$3.log(" ✓ Docker provider configured");
|
|
2236
|
+
const envVars = {};
|
|
2237
|
+
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
2238
|
+
if (Object.keys(envVars).length > 0) {
|
|
2239
|
+
logger$3.log(" Updating environment variables...");
|
|
2240
|
+
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
2241
|
+
await api.saveApplicationEnv(config.applicationId, envString);
|
|
2242
|
+
logger$3.log(" ✓ Environment variables updated");
|
|
2243
|
+
}
|
|
2244
|
+
logger$3.log(" Triggering deployment...");
|
|
2245
|
+
await api.deployApplication(config.applicationId);
|
|
2246
|
+
logger$3.log(" ✓ Deployment triggered");
|
|
2247
|
+
logger$3.log("\n✅ Dokploy deployment initiated!");
|
|
2248
|
+
logger$3.log(`\n📋 Deployment details:`);
|
|
2249
|
+
logger$3.log(` Image: ${imageRef}`);
|
|
2250
|
+
logger$3.log(` Stage: ${stage}`);
|
|
2251
|
+
logger$3.log(` Application ID: ${config.applicationId}`);
|
|
2252
|
+
if (masterKey) logger$3.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
2253
|
+
const deploymentUrl = `${config.endpoint}/project/${config.projectId}`;
|
|
2254
|
+
logger$3.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
2255
|
+
return {
|
|
2256
|
+
imageRef,
|
|
2257
|
+
masterKey,
|
|
2258
|
+
url: deploymentUrl
|
|
2259
|
+
};
|
|
2260
|
+
}
|
|
2126
2261
|
|
|
2127
|
-
|
|
2128
|
-
|
|
2129
|
-
|
|
2130
|
-
|
|
2262
|
+
//#endregion
|
|
2263
|
+
//#region src/deploy/init.ts
|
|
2264
|
+
const logger$2 = console;
|
|
2265
|
+
/**
|
|
2266
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
2267
|
+
*/
|
|
2268
|
+
async function getApiToken() {
|
|
2269
|
+
const token = await getDokployToken();
|
|
2270
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
2271
|
+
return token;
|
|
2131
2272
|
}
|
|
2132
2273
|
/**
|
|
2133
|
-
*
|
|
2134
|
-
* Uses turbo prune to create minimal Docker context
|
|
2274
|
+
* Get Dokploy endpoint from options or stored credentials
|
|
2135
2275
|
*/
|
|
2136
|
-
function
|
|
2137
|
-
|
|
2138
|
-
const
|
|
2139
|
-
|
|
2140
|
-
|
|
2141
|
-
const depsInstall = hasFetch ? `# Fetch and install from cache
|
|
2142
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2143
|
-
${pm.fetch}
|
|
2144
|
-
|
|
2145
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2146
|
-
${pm.installCmd}` : `# Install dependencies with cache
|
|
2147
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2148
|
-
${pm.installCmd}`;
|
|
2149
|
-
return `# syntax=docker/dockerfile:1
|
|
2150
|
-
# Stage 1: Prune monorepo
|
|
2151
|
-
FROM ${baseImage} AS pruner
|
|
2152
|
-
|
|
2153
|
-
WORKDIR /app
|
|
2154
|
-
|
|
2155
|
-
${installPm}
|
|
2156
|
-
RUN ${pm.addGlobal} turbo
|
|
2157
|
-
|
|
2158
|
-
COPY . .
|
|
2159
|
-
|
|
2160
|
-
# Prune to only include necessary packages
|
|
2161
|
-
RUN turbo prune ${turboPackage} --docker
|
|
2162
|
-
|
|
2163
|
-
# Stage 2: Install dependencies
|
|
2164
|
-
FROM ${baseImage} AS deps
|
|
2165
|
-
|
|
2166
|
-
WORKDIR /app
|
|
2167
|
-
|
|
2168
|
-
${installPm}
|
|
2169
|
-
|
|
2170
|
-
# Copy pruned lockfile and package.jsons
|
|
2171
|
-
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
2172
|
-
COPY --from=pruner /app/out/json/ ./
|
|
2173
|
-
|
|
2174
|
-
${depsInstall}
|
|
2175
|
-
|
|
2176
|
-
# Stage 3: Build
|
|
2177
|
-
FROM deps AS builder
|
|
2178
|
-
|
|
2179
|
-
WORKDIR /app
|
|
2180
|
-
|
|
2181
|
-
# Copy pruned source
|
|
2182
|
-
COPY --from=pruner /app/out/full/ ./
|
|
2183
|
-
|
|
2184
|
-
# Build production server
|
|
2185
|
-
RUN ${pm.run} gkm build --provider server --production
|
|
2186
|
-
|
|
2187
|
-
# Stage 4: Production
|
|
2188
|
-
FROM ${baseImage} AS runner
|
|
2189
|
-
|
|
2190
|
-
WORKDIR /app
|
|
2191
|
-
|
|
2192
|
-
RUN apk add --no-cache tini
|
|
2193
|
-
|
|
2194
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2195
|
-
adduser --system --uid 1001 hono
|
|
2196
|
-
|
|
2197
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2198
|
-
|
|
2199
|
-
ENV NODE_ENV=production
|
|
2200
|
-
ENV PORT=${port}
|
|
2201
|
-
|
|
2202
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2203
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2204
|
-
|
|
2205
|
-
USER hono
|
|
2206
|
-
|
|
2207
|
-
EXPOSE ${port}
|
|
2208
|
-
|
|
2209
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2210
|
-
CMD ["node", "server.mjs"]
|
|
2211
|
-
`;
|
|
2276
|
+
async function getEndpoint(providedEndpoint) {
|
|
2277
|
+
if (providedEndpoint) return providedEndpoint;
|
|
2278
|
+
const stored = await getDokployCredentials();
|
|
2279
|
+
if (stored) return stored.endpoint;
|
|
2280
|
+
throw new Error("Dokploy endpoint not specified.\nEither run \"gkm login --service dokploy\" first, or provide --endpoint.");
|
|
2212
2281
|
}
|
|
2213
2282
|
/**
|
|
2214
|
-
*
|
|
2283
|
+
* Create a Dokploy API client
|
|
2215
2284
|
*/
|
|
2216
|
-
function
|
|
2217
|
-
const
|
|
2218
|
-
return
|
|
2219
|
-
|
|
2220
|
-
|
|
2221
|
-
|
|
2222
|
-
|
|
2223
|
-
# Install tini for proper signal handling as PID 1
|
|
2224
|
-
# Handles SIGTERM propagation and zombie process reaping
|
|
2225
|
-
RUN apk add --no-cache tini
|
|
2226
|
-
|
|
2227
|
-
# Create non-root user
|
|
2228
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2229
|
-
adduser --system --uid 1001 hono
|
|
2230
|
-
|
|
2231
|
-
# Copy pre-built bundle
|
|
2232
|
-
COPY .gkm/server/dist/server.mjs ./
|
|
2233
|
-
|
|
2234
|
-
# Environment
|
|
2235
|
-
ENV NODE_ENV=production
|
|
2236
|
-
ENV PORT=${port}
|
|
2237
|
-
|
|
2238
|
-
# Health check
|
|
2239
|
-
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \\
|
|
2240
|
-
CMD wget -q --spider http://localhost:${port}${healthCheckPath} || exit 1
|
|
2241
|
-
|
|
2242
|
-
# Switch to non-root user
|
|
2243
|
-
USER hono
|
|
2244
|
-
|
|
2245
|
-
EXPOSE ${port}
|
|
2246
|
-
|
|
2247
|
-
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2248
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2249
|
-
CMD ["node", "server.mjs"]
|
|
2250
|
-
`;
|
|
2285
|
+
async function createApi(endpoint) {
|
|
2286
|
+
const token = await getApiToken();
|
|
2287
|
+
return new require_dokploy_api.DokployApi({
|
|
2288
|
+
baseUrl: endpoint,
|
|
2289
|
+
token
|
|
2290
|
+
});
|
|
2251
2291
|
}
|
|
2252
2292
|
/**
|
|
2253
|
-
*
|
|
2293
|
+
* Update gkm.config.ts with Dokploy configuration
|
|
2254
2294
|
*/
|
|
2255
|
-
function
|
|
2256
|
-
|
|
2257
|
-
|
|
2258
|
-
.
|
|
2259
|
-
|
|
2260
|
-
|
|
2261
|
-
.
|
|
2262
|
-
.
|
|
2263
|
-
|
|
2264
|
-
|
|
2265
|
-
|
|
2266
|
-
|
|
2267
|
-
|
|
2268
|
-
|
|
2269
|
-
|
|
2270
|
-
|
|
2271
|
-
|
|
2272
|
-
|
|
2273
|
-
.
|
|
2274
|
-
|
|
2275
|
-
|
|
2276
|
-
|
|
2277
|
-
|
|
2278
|
-
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
|
|
2284
|
-
|
|
2285
|
-
|
|
2286
|
-
|
|
2287
|
-
|
|
2288
|
-
|
|
2289
|
-
!README.md
|
|
2290
|
-
|
|
2291
|
-
# Environment files (handle secrets separately)
|
|
2292
|
-
.env
|
|
2293
|
-
.env.*
|
|
2294
|
-
!.env.example
|
|
2295
|
-
|
|
2296
|
-
# Docker files (don't copy recursively)
|
|
2297
|
-
Dockerfile*
|
|
2298
|
-
docker-compose*
|
|
2299
|
-
.dockerignore
|
|
2300
|
-
`;
|
|
2295
|
+
async function updateConfig(config, cwd = process.cwd()) {
|
|
2296
|
+
const configPath = (0, node_path.join)(cwd, "gkm.config.ts");
|
|
2297
|
+
if (!(0, node_fs.existsSync)(configPath)) {
|
|
2298
|
+
logger$2.warn("\n gkm.config.ts not found. Add this configuration manually:\n");
|
|
2299
|
+
logger$2.log(` providers: {`);
|
|
2300
|
+
logger$2.log(` dokploy: {`);
|
|
2301
|
+
logger$2.log(` endpoint: '${config.endpoint}',`);
|
|
2302
|
+
logger$2.log(` projectId: '${config.projectId}',`);
|
|
2303
|
+
logger$2.log(` applicationId: '${config.applicationId}',`);
|
|
2304
|
+
logger$2.log(` },`);
|
|
2305
|
+
logger$2.log(` },`);
|
|
2306
|
+
return;
|
|
2307
|
+
}
|
|
2308
|
+
const content = await (0, node_fs_promises.readFile)(configPath, "utf-8");
|
|
2309
|
+
if (content.includes("dokploy:") && content.includes("applicationId:")) {
|
|
2310
|
+
logger$2.log("\n Dokploy config already exists in gkm.config.ts");
|
|
2311
|
+
logger$2.log(" Updating with new values...");
|
|
2312
|
+
}
|
|
2313
|
+
const registryLine = config.registryId ? `\n\t\t\tregistryId: '${config.registryId}',` : "";
|
|
2314
|
+
const dokployConfigStr = `dokploy: {
|
|
2315
|
+
endpoint: '${config.endpoint}',
|
|
2316
|
+
projectId: '${config.projectId}',
|
|
2317
|
+
applicationId: '${config.applicationId}',${registryLine}
|
|
2318
|
+
}`;
|
|
2319
|
+
let newContent;
|
|
2320
|
+
if (content.includes("providers:")) if (content.includes("dokploy:")) newContent = content.replace(/dokploy:\s*\{[^}]*\}/s, dokployConfigStr);
|
|
2321
|
+
else newContent = content.replace(/providers:\s*\{/, `providers: {\n\t\t${dokployConfigStr},`);
|
|
2322
|
+
else newContent = content.replace(/}\s*\)\s*;?\s*$/, `
|
|
2323
|
+
providers: {
|
|
2324
|
+
${dokployConfigStr},
|
|
2325
|
+
},
|
|
2326
|
+
});`);
|
|
2327
|
+
await (0, node_fs_promises.writeFile)(configPath, newContent);
|
|
2328
|
+
logger$2.log("\n ✓ Updated gkm.config.ts with Dokploy configuration");
|
|
2301
2329
|
}
|
|
2302
2330
|
/**
|
|
2303
|
-
*
|
|
2331
|
+
* Initialize Dokploy deployment configuration
|
|
2304
2332
|
*/
|
|
2305
|
-
function
|
|
2306
|
-
|
|
2307
|
-
|
|
2308
|
-
|
|
2309
|
-
|
|
2310
|
-
|
|
2311
|
-
|
|
2312
|
-
|
|
2313
|
-
|
|
2314
|
-
|
|
2315
|
-
|
|
2316
|
-
|
|
2317
|
-
|
|
2318
|
-
|
|
2333
|
+
async function deployInitCommand(options) {
|
|
2334
|
+
const { projectName, appName, projectId: existingProjectId, registryId } = options;
|
|
2335
|
+
const endpoint = await getEndpoint(options.endpoint);
|
|
2336
|
+
const api = await createApi(endpoint);
|
|
2337
|
+
logger$2.log(`\n🚀 Initializing Dokploy deployment...`);
|
|
2338
|
+
logger$2.log(` Endpoint: ${endpoint}`);
|
|
2339
|
+
let projectId;
|
|
2340
|
+
if (existingProjectId) {
|
|
2341
|
+
projectId = existingProjectId;
|
|
2342
|
+
logger$2.log(`\n📁 Using existing project: ${projectId}`);
|
|
2343
|
+
} else {
|
|
2344
|
+
logger$2.log(`\n📁 Looking for project: ${projectName}`);
|
|
2345
|
+
const projects = await api.listProjects();
|
|
2346
|
+
const existingProject = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
2347
|
+
if (existingProject) {
|
|
2348
|
+
projectId = existingProject.projectId;
|
|
2349
|
+
logger$2.log(` Found existing project: ${projectId}`);
|
|
2350
|
+
} else {
|
|
2351
|
+
logger$2.log(` Creating new project...`);
|
|
2352
|
+
const result = await api.createProject(projectName);
|
|
2353
|
+
projectId = result.project.projectId;
|
|
2354
|
+
logger$2.log(` ✓ Created project: ${projectId}`);
|
|
2355
|
+
}
|
|
2356
|
+
}
|
|
2357
|
+
const project = await api.getProject(projectId);
|
|
2358
|
+
let environmentId;
|
|
2359
|
+
const firstEnv = project.environments?.[0];
|
|
2360
|
+
if (firstEnv) environmentId = firstEnv.environmentId;
|
|
2361
|
+
else {
|
|
2362
|
+
logger$2.log(` Creating production environment...`);
|
|
2363
|
+
const env = await api.createEnvironment(projectId, "production");
|
|
2364
|
+
environmentId = env.environmentId;
|
|
2365
|
+
}
|
|
2366
|
+
logger$2.log(`\n📦 Creating application: ${appName}`);
|
|
2367
|
+
const application = await api.createApplication(appName, projectId, environmentId);
|
|
2368
|
+
logger$2.log(` ✓ Created application: ${application.applicationId}`);
|
|
2369
|
+
if (registryId) {
|
|
2370
|
+
logger$2.log(`\n🔧 Configuring registry: ${registryId}`);
|
|
2371
|
+
await api.updateApplication(application.applicationId, { registryId });
|
|
2372
|
+
logger$2.log(` ✓ Registry configured`);
|
|
2373
|
+
} else try {
|
|
2374
|
+
const registries = await api.listRegistries();
|
|
2375
|
+
if (registries.length > 0) {
|
|
2376
|
+
logger$2.log(`\n📋 Available registries:`);
|
|
2377
|
+
for (const reg of registries) logger$2.log(` - ${reg.registryName}: ${reg.registryUrl} (${reg.registryId})`);
|
|
2378
|
+
logger$2.log(`\n To use a registry, run with --registry-id <id>`);
|
|
2379
|
+
}
|
|
2380
|
+
} catch {}
|
|
2381
|
+
const config = {
|
|
2382
|
+
endpoint,
|
|
2383
|
+
projectId,
|
|
2384
|
+
applicationId: application.applicationId
|
|
2385
|
+
};
|
|
2386
|
+
await updateConfig(config);
|
|
2387
|
+
logger$2.log(`\n✅ Dokploy deployment initialized!`);
|
|
2388
|
+
logger$2.log(`\n📋 Configuration:`);
|
|
2389
|
+
logger$2.log(` Project ID: ${projectId}`);
|
|
2390
|
+
logger$2.log(` Application ID: ${application.applicationId}`);
|
|
2391
|
+
logger$2.log(`\n🔗 View in Dokploy: ${endpoint}/project/${projectId}`);
|
|
2392
|
+
logger$2.log(`\n📝 Next steps:`);
|
|
2393
|
+
logger$2.log(` 1. Initialize secrets: gkm secrets:init --stage production`);
|
|
2394
|
+
logger$2.log(` 2. Deploy: gkm deploy --provider dokploy --stage production`);
|
|
2395
|
+
return config;
|
|
2319
2396
|
}
|
|
2320
2397
|
/**
|
|
2321
|
-
*
|
|
2398
|
+
* List available Dokploy resources
|
|
2322
2399
|
*/
|
|
2323
|
-
function
|
|
2324
|
-
const
|
|
2325
|
-
|
|
2326
|
-
|
|
2327
|
-
|
|
2328
|
-
|
|
2329
|
-
|
|
2330
|
-
|
|
2331
|
-
|
|
2332
|
-
|
|
2333
|
-
|
|
2334
|
-
|
|
2335
|
-
|
|
2336
|
-
|
|
2400
|
+
async function deployListCommand(options) {
|
|
2401
|
+
const endpoint = await getEndpoint(options.endpoint);
|
|
2402
|
+
const api = await createApi(endpoint);
|
|
2403
|
+
const { resource } = options;
|
|
2404
|
+
if (resource === "projects") {
|
|
2405
|
+
logger$2.log(`\n📁 Projects in ${endpoint}:`);
|
|
2406
|
+
const projects = await api.listProjects();
|
|
2407
|
+
if (projects.length === 0) {
|
|
2408
|
+
logger$2.log(" No projects found");
|
|
2409
|
+
return;
|
|
2410
|
+
}
|
|
2411
|
+
for (const project of projects) {
|
|
2412
|
+
logger$2.log(`\n ${project.name} (${project.projectId})`);
|
|
2413
|
+
if (project.description) logger$2.log(` ${project.description}`);
|
|
2414
|
+
}
|
|
2415
|
+
} else if (resource === "registries") {
|
|
2416
|
+
logger$2.log(`\n🐳 Registries in ${endpoint}:`);
|
|
2417
|
+
const registries = await api.listRegistries();
|
|
2418
|
+
if (registries.length === 0) {
|
|
2419
|
+
logger$2.log(" No registries configured");
|
|
2420
|
+
logger$2.log(" Run \"gkm registry:setup\" to configure a registry");
|
|
2421
|
+
return;
|
|
2422
|
+
}
|
|
2423
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
2424
|
+
for (const registry of registries) {
|
|
2425
|
+
const isDefault = registry.registryId === storedRegistryId;
|
|
2426
|
+
const marker = isDefault ? " (default)" : "";
|
|
2427
|
+
logger$2.log(`\n ${registry.registryName}${marker} (${registry.registryId})`);
|
|
2428
|
+
logger$2.log(` URL: ${registry.registryUrl}`);
|
|
2429
|
+
logger$2.log(` Username: ${registry.username}`);
|
|
2430
|
+
if (registry.imagePrefix) logger$2.log(` Prefix: ${registry.imagePrefix}`);
|
|
2431
|
+
}
|
|
2432
|
+
}
|
|
2337
2433
|
}
|
|
2338
2434
|
|
|
2339
2435
|
//#endregion
|
|
2340
|
-
//#region src/
|
|
2436
|
+
//#region src/deploy/index.ts
|
|
2341
2437
|
const logger$1 = console;
|
|
2342
2438
|
/**
|
|
2343
|
-
*
|
|
2344
|
-
* Generates Dockerfile, docker-compose.yml, and related files
|
|
2345
|
-
*
|
|
2346
|
-
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
2347
|
-
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
2439
|
+
* Prompt for input
|
|
2348
2440
|
*/
|
|
2349
|
-
async function
|
|
2350
|
-
|
|
2351
|
-
|
|
2352
|
-
|
|
2353
|
-
|
|
2354
|
-
|
|
2355
|
-
|
|
2356
|
-
|
|
2357
|
-
|
|
2358
|
-
|
|
2441
|
+
async function prompt(message, hidden = false) {
|
|
2442
|
+
if (!process.stdin.isTTY) throw new Error("Interactive input required. Please configure manually.");
|
|
2443
|
+
if (hidden) {
|
|
2444
|
+
process.stdout.write(message);
|
|
2445
|
+
return new Promise((resolve$1) => {
|
|
2446
|
+
let value = "";
|
|
2447
|
+
const onData = (char) => {
|
|
2448
|
+
const c = char.toString();
|
|
2449
|
+
if (c === "\n" || c === "\r") {
|
|
2450
|
+
process.stdin.setRawMode(false);
|
|
2451
|
+
process.stdin.pause();
|
|
2452
|
+
process.stdin.removeListener("data", onData);
|
|
2453
|
+
process.stdout.write("\n");
|
|
2454
|
+
resolve$1(value);
|
|
2455
|
+
} else if (c === "") {
|
|
2456
|
+
process.stdin.setRawMode(false);
|
|
2457
|
+
process.stdin.pause();
|
|
2458
|
+
process.stdout.write("\n");
|
|
2459
|
+
process.exit(1);
|
|
2460
|
+
} else if (c === "" || c === "\b") {
|
|
2461
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
2462
|
+
} else value += c;
|
|
2463
|
+
};
|
|
2464
|
+
process.stdin.setRawMode(true);
|
|
2465
|
+
process.stdin.resume();
|
|
2466
|
+
process.stdin.on("data", onData);
|
|
2467
|
+
});
|
|
2359
2468
|
}
|
|
2360
|
-
const
|
|
2361
|
-
|
|
2362
|
-
|
|
2363
|
-
|
|
2364
|
-
|
|
2365
|
-
|
|
2366
|
-
|
|
2367
|
-
|
|
2368
|
-
|
|
2369
|
-
|
|
2370
|
-
|
|
2371
|
-
|
|
2469
|
+
const rl = node_readline_promises.createInterface({
|
|
2470
|
+
input: node_process.stdin,
|
|
2471
|
+
output: node_process.stdout
|
|
2472
|
+
});
|
|
2473
|
+
try {
|
|
2474
|
+
return await rl.question(message);
|
|
2475
|
+
} finally {
|
|
2476
|
+
rl.close();
|
|
2477
|
+
}
|
|
2478
|
+
}
|
|
2479
|
+
/**
|
|
2480
|
+
* Provision docker compose services in Dokploy
|
|
2481
|
+
* @internal Exported for testing
|
|
2482
|
+
*/
|
|
2483
|
+
async function provisionServices(api, projectId, environmentId, appName, services, existingUrls) {
|
|
2484
|
+
logger$1.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
|
|
2485
|
+
if (!services || !environmentId) {
|
|
2486
|
+
logger$1.log(" Skipping: no services or no environmentId");
|
|
2487
|
+
return void 0;
|
|
2488
|
+
}
|
|
2489
|
+
const serviceUrls = {};
|
|
2490
|
+
if (services.postgres) if (existingUrls?.DATABASE_URL) logger$1.log("\n🐘 PostgreSQL: Already configured (skipping)");
|
|
2491
|
+
else {
|
|
2492
|
+
logger$1.log("\n🐘 Provisioning PostgreSQL...");
|
|
2493
|
+
const postgresName = `${appName}-db`;
|
|
2494
|
+
try {
|
|
2495
|
+
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
2496
|
+
const databasePassword = randomBytes$1(16).toString("hex");
|
|
2497
|
+
const postgres = await api.createPostgres(postgresName, projectId, environmentId, { databasePassword });
|
|
2498
|
+
logger$1.log(` ✓ Created PostgreSQL: ${postgres.postgresId}`);
|
|
2499
|
+
await api.deployPostgres(postgres.postgresId);
|
|
2500
|
+
logger$1.log(" ✓ PostgreSQL deployed");
|
|
2501
|
+
serviceUrls.DATABASE_URL = `postgresql://${postgres.databaseUser}:${postgres.databasePassword}@${postgres.appName}:5432/${postgres.databaseName}`;
|
|
2502
|
+
logger$1.log(` ✓ DATABASE_URL configured`);
|
|
2503
|
+
} catch (error) {
|
|
2504
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2505
|
+
if (message.includes("already exists") || message.includes("duplicate")) logger$1.log(` ℹ PostgreSQL already exists`);
|
|
2506
|
+
else logger$1.log(` ⚠ Failed to provision PostgreSQL: ${message}`);
|
|
2507
|
+
}
|
|
2508
|
+
}
|
|
2509
|
+
if (services.redis) if (existingUrls?.REDIS_URL) logger$1.log("\n🔴 Redis: Already configured (skipping)");
|
|
2510
|
+
else {
|
|
2511
|
+
logger$1.log("\n🔴 Provisioning Redis...");
|
|
2512
|
+
const redisName = `${appName}-cache`;
|
|
2513
|
+
try {
|
|
2514
|
+
const { randomBytes: randomBytes$1 } = await import("node:crypto");
|
|
2515
|
+
const databasePassword = randomBytes$1(16).toString("hex");
|
|
2516
|
+
const redis = await api.createRedis(redisName, projectId, environmentId, { databasePassword });
|
|
2517
|
+
logger$1.log(` ✓ Created Redis: ${redis.redisId}`);
|
|
2518
|
+
await api.deployRedis(redis.redisId);
|
|
2519
|
+
logger$1.log(" ✓ Redis deployed");
|
|
2520
|
+
const password = redis.databasePassword ? `:${redis.databasePassword}@` : "";
|
|
2521
|
+
serviceUrls.REDIS_URL = `redis://${password}${redis.appName}:6379`;
|
|
2522
|
+
logger$1.log(` ✓ REDIS_URL configured`);
|
|
2523
|
+
} catch (error) {
|
|
2524
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2525
|
+
if (message.includes("already exists") || message.includes("duplicate")) logger$1.log(` ℹ Redis already exists`);
|
|
2526
|
+
else logger$1.log(` ⚠ Failed to provision Redis: ${message}`);
|
|
2527
|
+
}
|
|
2528
|
+
}
|
|
2529
|
+
return Object.keys(serviceUrls).length > 0 ? serviceUrls : void 0;
|
|
2530
|
+
}
|
|
2531
|
+
/**
|
|
2532
|
+
* Ensure Dokploy is fully configured, recovering/creating resources as needed
|
|
2533
|
+
*/
|
|
2534
|
+
async function ensureDokploySetup(config, dockerConfig, stage, services) {
|
|
2535
|
+
logger$1.log("\n🔧 Checking Dokploy setup...");
|
|
2536
|
+
const { readStageSecrets: readStageSecrets$1 } = await Promise.resolve().then(() => require("./storage-UfyTn7Zm.cjs"));
|
|
2537
|
+
const existingSecrets = await readStageSecrets$1(stage);
|
|
2538
|
+
const existingUrls = {
|
|
2539
|
+
DATABASE_URL: existingSecrets?.urls?.DATABASE_URL,
|
|
2540
|
+
REDIS_URL: existingSecrets?.urls?.REDIS_URL
|
|
2372
2541
|
};
|
|
2373
|
-
|
|
2374
|
-
|
|
2375
|
-
|
|
2376
|
-
|
|
2377
|
-
|
|
2378
|
-
|
|
2379
|
-
|
|
2380
|
-
|
|
2381
|
-
|
|
2382
|
-
|
|
2383
|
-
|
|
2542
|
+
let creds = await getDokployCredentials();
|
|
2543
|
+
if (!creds) {
|
|
2544
|
+
logger$1.log("\n📋 Dokploy credentials not found. Let's set them up.");
|
|
2545
|
+
const endpoint = await prompt("Dokploy URL (e.g., https://dokploy.example.com): ");
|
|
2546
|
+
const normalizedEndpoint = endpoint.replace(/\/$/, "");
|
|
2547
|
+
try {
|
|
2548
|
+
new URL(normalizedEndpoint);
|
|
2549
|
+
} catch {
|
|
2550
|
+
throw new Error("Invalid URL format");
|
|
2551
|
+
}
|
|
2552
|
+
logger$1.log(`\nGenerate a token at: ${normalizedEndpoint}/settings/profile\n`);
|
|
2553
|
+
const token = await prompt("API Token: ", true);
|
|
2554
|
+
logger$1.log("\nValidating credentials...");
|
|
2555
|
+
const isValid = await validateDokployToken(normalizedEndpoint, token);
|
|
2556
|
+
if (!isValid) throw new Error("Invalid credentials. Please check your token.");
|
|
2557
|
+
await storeDokployCredentials(token, normalizedEndpoint);
|
|
2558
|
+
creds = {
|
|
2559
|
+
token,
|
|
2560
|
+
endpoint: normalizedEndpoint
|
|
2561
|
+
};
|
|
2562
|
+
logger$1.log("✓ Credentials saved");
|
|
2563
|
+
}
|
|
2564
|
+
const api = new require_dokploy_api.DokployApi({
|
|
2565
|
+
baseUrl: creds.endpoint,
|
|
2566
|
+
token: creds.token
|
|
2567
|
+
});
|
|
2568
|
+
const existingConfig = config.providers?.dokploy;
|
|
2569
|
+
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId && existingConfig.projectId) {
|
|
2570
|
+
logger$1.log("✓ Dokploy config found in gkm.config.ts");
|
|
2571
|
+
try {
|
|
2572
|
+
const projectDetails = await api.getProject(existingConfig.projectId);
|
|
2573
|
+
logger$1.log("✓ Project verified");
|
|
2574
|
+
const storedRegistryId = existingConfig.registryId ?? await getDokployRegistryId();
|
|
2575
|
+
const environments = projectDetails.environments ?? [];
|
|
2576
|
+
let environment = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
2577
|
+
if (!environment) {
|
|
2578
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2579
|
+
environment = await api.createEnvironment(existingConfig.projectId, stage);
|
|
2580
|
+
logger$1.log(` ✓ Created environment: ${environment.environmentId}`);
|
|
2581
|
+
}
|
|
2582
|
+
const environmentId$1 = environment.environmentId;
|
|
2583
|
+
logger$1.log(` Services config: ${JSON.stringify(services)}, envId: ${environmentId$1}`);
|
|
2584
|
+
const serviceUrls$1 = await provisionServices(api, existingConfig.projectId, environmentId$1, dockerConfig.imageName || "app", services, existingUrls);
|
|
2585
|
+
return {
|
|
2586
|
+
config: {
|
|
2587
|
+
endpoint: existingConfig.endpoint,
|
|
2588
|
+
projectId: existingConfig.projectId,
|
|
2589
|
+
applicationId: existingConfig.applicationId,
|
|
2590
|
+
registry: existingConfig.registry,
|
|
2591
|
+
registryId: storedRegistryId ?? void 0
|
|
2592
|
+
},
|
|
2593
|
+
serviceUrls: serviceUrls$1
|
|
2594
|
+
};
|
|
2595
|
+
} catch {
|
|
2596
|
+
logger$1.log("⚠ Project not found, will recover...");
|
|
2597
|
+
}
|
|
2598
|
+
}
|
|
2599
|
+
logger$1.log("\n📁 Looking for project...");
|
|
2600
|
+
const projectName = dockerConfig.imageName || "app";
|
|
2601
|
+
const projects = await api.listProjects();
|
|
2602
|
+
let project = projects.find((p) => p.name.toLowerCase() === projectName.toLowerCase());
|
|
2603
|
+
let environmentId;
|
|
2604
|
+
if (project) {
|
|
2605
|
+
logger$1.log(` Found existing project: ${project.name} (${project.projectId})`);
|
|
2606
|
+
const projectDetails = await api.getProject(project.projectId);
|
|
2607
|
+
const environments = projectDetails.environments ?? [];
|
|
2608
|
+
const matchingEnv = environments.find((e) => e.name.toLowerCase() === stage.toLowerCase());
|
|
2609
|
+
if (matchingEnv) {
|
|
2610
|
+
environmentId = matchingEnv.environmentId;
|
|
2611
|
+
logger$1.log(` Using environment: ${matchingEnv.name}`);
|
|
2612
|
+
} else {
|
|
2613
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2614
|
+
const env = await api.createEnvironment(project.projectId, stage);
|
|
2615
|
+
environmentId = env.environmentId;
|
|
2616
|
+
logger$1.log(` ✓ Created environment: ${stage}`);
|
|
2617
|
+
}
|
|
2618
|
+
} else {
|
|
2619
|
+
logger$1.log(` Creating project: ${projectName}`);
|
|
2620
|
+
const result = await api.createProject(projectName);
|
|
2621
|
+
project = result.project;
|
|
2622
|
+
if (result.environment.name.toLowerCase() !== stage.toLowerCase()) {
|
|
2623
|
+
logger$1.log(` Creating "${stage}" environment...`);
|
|
2624
|
+
const env = await api.createEnvironment(project.projectId, stage);
|
|
2625
|
+
environmentId = env.environmentId;
|
|
2626
|
+
} else environmentId = result.environment.environmentId;
|
|
2627
|
+
logger$1.log(` ✓ Created project: ${project.projectId}`);
|
|
2628
|
+
logger$1.log(` ✓ Using environment: ${stage}`);
|
|
2629
|
+
}
|
|
2630
|
+
logger$1.log("\n📦 Looking for application...");
|
|
2631
|
+
const appName = dockerConfig.imageName || projectName;
|
|
2632
|
+
let applicationId;
|
|
2633
|
+
if (existingConfig && typeof existingConfig !== "boolean" && existingConfig.applicationId) {
|
|
2634
|
+
applicationId = existingConfig.applicationId;
|
|
2635
|
+
logger$1.log(` Using application from config: ${applicationId}`);
|
|
2636
|
+
} else {
|
|
2637
|
+
logger$1.log(` Creating application: ${appName}`);
|
|
2638
|
+
const app = await api.createApplication(appName, project.projectId, environmentId);
|
|
2639
|
+
applicationId = app.applicationId;
|
|
2640
|
+
logger$1.log(` ✓ Created application: ${applicationId}`);
|
|
2641
|
+
}
|
|
2642
|
+
logger$1.log("\n🐳 Checking registry...");
|
|
2643
|
+
let registryId = await getDokployRegistryId();
|
|
2644
|
+
if (registryId) try {
|
|
2645
|
+
const registry = await api.getRegistry(registryId);
|
|
2646
|
+
logger$1.log(` Using registry: ${registry.registryName}`);
|
|
2647
|
+
} catch {
|
|
2648
|
+
logger$1.log(" ⚠ Stored registry not found, clearing...");
|
|
2649
|
+
registryId = void 0;
|
|
2650
|
+
await storeDokployRegistryId("");
|
|
2651
|
+
}
|
|
2652
|
+
if (!registryId) {
|
|
2653
|
+
const registries = await api.listRegistries();
|
|
2654
|
+
if (registries.length === 0) if (dockerConfig.registry) {
|
|
2655
|
+
logger$1.log(" No registries found in Dokploy. Let's create one.");
|
|
2656
|
+
logger$1.log(` Registry URL: ${dockerConfig.registry}`);
|
|
2657
|
+
const username = await prompt("Registry username: ");
|
|
2658
|
+
const password = await prompt("Registry password/token: ", true);
|
|
2659
|
+
const registry = await api.createRegistry("Default Registry", dockerConfig.registry, username, password);
|
|
2660
|
+
registryId = registry.registryId;
|
|
2661
|
+
await storeDokployRegistryId(registryId);
|
|
2662
|
+
logger$1.log(` ✓ Registry created: ${registryId}`);
|
|
2663
|
+
} else logger$1.log(" ⚠ No registry configured. Set docker.registry in gkm.config.ts");
|
|
2664
|
+
else {
|
|
2665
|
+
logger$1.log(" Available registries:");
|
|
2666
|
+
registries.forEach((reg, i) => {
|
|
2667
|
+
logger$1.log(` ${i + 1}. ${reg.registryName} (${reg.registryUrl})`);
|
|
2668
|
+
});
|
|
2669
|
+
if (dockerConfig.registry) logger$1.log(` ${registries.length + 1}. Create new registry`);
|
|
2670
|
+
const maxOption = dockerConfig.registry ? registries.length + 1 : registries.length;
|
|
2671
|
+
const selection = await prompt(` Select registry (1-${maxOption}): `);
|
|
2672
|
+
const index = parseInt(selection, 10) - 1;
|
|
2673
|
+
if (index >= 0 && index < registries.length) {
|
|
2674
|
+
registryId = registries[index].registryId;
|
|
2675
|
+
await storeDokployRegistryId(registryId);
|
|
2676
|
+
logger$1.log(` ✓ Selected: ${registries[index].registryName}`);
|
|
2677
|
+
} else if (dockerConfig.registry && index === registries.length) {
|
|
2678
|
+
logger$1.log(`\n Creating new registry...`);
|
|
2679
|
+
logger$1.log(` Registry URL: ${dockerConfig.registry}`);
|
|
2680
|
+
const username = await prompt(" Registry username: ");
|
|
2681
|
+
const password = await prompt(" Registry password/token: ", true);
|
|
2682
|
+
const registry = await api.createRegistry(dockerConfig.registry.replace(/^https?:\/\//, ""), dockerConfig.registry, username, password);
|
|
2683
|
+
registryId = registry.registryId;
|
|
2684
|
+
await storeDokployRegistryId(registryId);
|
|
2685
|
+
logger$1.log(` ✓ Registry created: ${registryId}`);
|
|
2686
|
+
} else logger$1.log(" ⚠ Invalid selection, skipping registry setup");
|
|
2687
|
+
}
|
|
2688
|
+
}
|
|
2689
|
+
const dokployConfig = {
|
|
2690
|
+
endpoint: creds.endpoint,
|
|
2691
|
+
projectId: project.projectId,
|
|
2692
|
+
applicationId,
|
|
2693
|
+
registryId: registryId ?? void 0
|
|
2384
2694
|
};
|
|
2385
|
-
|
|
2386
|
-
|
|
2387
|
-
|
|
2388
|
-
|
|
2389
|
-
logger$1.log(
|
|
2390
|
-
const
|
|
2391
|
-
|
|
2392
|
-
|
|
2393
|
-
|
|
2394
|
-
const entrypoint = generateDockerEntrypoint();
|
|
2395
|
-
const entrypointPath = (0, node_path.join)(dockerDir, "docker-entrypoint.sh");
|
|
2396
|
-
await (0, node_fs_promises.writeFile)(entrypointPath, entrypoint);
|
|
2397
|
-
logger$1.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
2398
|
-
const result = {
|
|
2399
|
-
dockerfile: dockerfilePath,
|
|
2400
|
-
dockerCompose: composePath,
|
|
2401
|
-
dockerignore: dockerignorePath,
|
|
2402
|
-
entrypoint: entrypointPath
|
|
2695
|
+
await updateConfig(dokployConfig);
|
|
2696
|
+
logger$1.log("\n✅ Dokploy setup complete!");
|
|
2697
|
+
logger$1.log(` Project: ${project.projectId}`);
|
|
2698
|
+
logger$1.log(` Application: ${applicationId}`);
|
|
2699
|
+
if (registryId) logger$1.log(` Registry: ${registryId}`);
|
|
2700
|
+
const serviceUrls = await provisionServices(api, project.projectId, environmentId, dockerConfig.imageName || "app", services, existingUrls);
|
|
2701
|
+
return {
|
|
2702
|
+
config: dokployConfig,
|
|
2703
|
+
serviceUrls
|
|
2403
2704
|
};
|
|
2404
|
-
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
2405
|
-
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
2406
|
-
return result;
|
|
2407
2705
|
}
|
|
2408
2706
|
/**
|
|
2409
|
-
*
|
|
2410
|
-
* Uses BuildKit for cache mount support
|
|
2707
|
+
* Generate image tag from stage and timestamp
|
|
2411
2708
|
*/
|
|
2412
|
-
|
|
2413
|
-
const
|
|
2414
|
-
|
|
2415
|
-
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
2416
|
-
logger$1.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
2417
|
-
try {
|
|
2418
|
-
(0, node_child_process.execSync)(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
2419
|
-
cwd: process.cwd(),
|
|
2420
|
-
stdio: "inherit",
|
|
2421
|
-
env: {
|
|
2422
|
-
...process.env,
|
|
2423
|
-
DOCKER_BUILDKIT: "1"
|
|
2424
|
-
}
|
|
2425
|
-
});
|
|
2426
|
-
logger$1.log(`✅ Docker image built: ${fullImageName}`);
|
|
2427
|
-
} catch (error) {
|
|
2428
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2429
|
-
}
|
|
2709
|
+
function generateTag(stage) {
|
|
2710
|
+
const timestamp = (/* @__PURE__ */ new Date()).toISOString().replace(/[:.]/g, "-").slice(0, 19);
|
|
2711
|
+
return `${stage}-${timestamp}`;
|
|
2430
2712
|
}
|
|
2431
2713
|
/**
|
|
2432
|
-
*
|
|
2714
|
+
* Main deploy command
|
|
2433
2715
|
*/
|
|
2434
|
-
async function
|
|
2435
|
-
const tag = options
|
|
2436
|
-
|
|
2437
|
-
|
|
2438
|
-
const
|
|
2439
|
-
|
|
2440
|
-
|
|
2441
|
-
|
|
2442
|
-
|
|
2443
|
-
|
|
2716
|
+
async function deployCommand(options) {
|
|
2717
|
+
const { provider, stage, tag, skipPush, skipBuild } = options;
|
|
2718
|
+
logger$1.log(`\n🚀 Deploying to ${provider}...`);
|
|
2719
|
+
logger$1.log(` Stage: ${stage}`);
|
|
2720
|
+
const config = await require_config.loadConfig();
|
|
2721
|
+
const imageTag = tag ?? generateTag(stage);
|
|
2722
|
+
logger$1.log(` Tag: ${imageTag}`);
|
|
2723
|
+
const dockerConfig = resolveDockerConfig(config);
|
|
2724
|
+
const imageName = dockerConfig.imageName ?? "app";
|
|
2725
|
+
const registry = dockerConfig.registry;
|
|
2726
|
+
const imageRef = registry ? `${registry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
2727
|
+
let dokployConfig;
|
|
2728
|
+
let finalRegistry = registry;
|
|
2729
|
+
if (provider === "dokploy") {
|
|
2730
|
+
const composeServices = config.docker?.compose?.services;
|
|
2731
|
+
logger$1.log(`\n🔍 Docker compose config: ${JSON.stringify(config.docker?.compose)}`);
|
|
2732
|
+
const dockerServices = composeServices ? Array.isArray(composeServices) ? {
|
|
2733
|
+
postgres: composeServices.includes("postgres"),
|
|
2734
|
+
redis: composeServices.includes("redis"),
|
|
2735
|
+
rabbitmq: composeServices.includes("rabbitmq")
|
|
2736
|
+
} : {
|
|
2737
|
+
postgres: Boolean(composeServices.postgres),
|
|
2738
|
+
redis: Boolean(composeServices.redis),
|
|
2739
|
+
rabbitmq: Boolean(composeServices.rabbitmq)
|
|
2740
|
+
} : void 0;
|
|
2741
|
+
const setupResult = await ensureDokploySetup(config, dockerConfig, stage, dockerServices);
|
|
2742
|
+
dokployConfig = setupResult.config;
|
|
2743
|
+
finalRegistry = dokployConfig.registry ?? dockerConfig.registry;
|
|
2744
|
+
if (setupResult.serviceUrls) {
|
|
2745
|
+
const { readStageSecrets: readStageSecrets$1, writeStageSecrets: writeStageSecrets$1, initStageSecrets } = await Promise.resolve().then(() => require("./storage-UfyTn7Zm.cjs"));
|
|
2746
|
+
let secrets = await readStageSecrets$1(stage);
|
|
2747
|
+
if (!secrets) {
|
|
2748
|
+
logger$1.log(` Creating secrets file for stage "${stage}"...`);
|
|
2749
|
+
secrets = initStageSecrets(stage);
|
|
2750
|
+
}
|
|
2751
|
+
let updated = false;
|
|
2752
|
+
for (const [key, value] of Object.entries(setupResult.serviceUrls)) {
|
|
2753
|
+
const urlKey = key;
|
|
2754
|
+
if (value && !secrets.urls[urlKey] && !secrets.custom[key]) {
|
|
2755
|
+
secrets.urls[urlKey] = value;
|
|
2756
|
+
logger$1.log(` Saved ${key} to secrets`);
|
|
2757
|
+
updated = true;
|
|
2758
|
+
}
|
|
2759
|
+
}
|
|
2760
|
+
if (updated) await writeStageSecrets$1(secrets);
|
|
2761
|
+
}
|
|
2762
|
+
}
|
|
2763
|
+
let masterKey;
|
|
2764
|
+
if (!skipBuild) {
|
|
2765
|
+
logger$1.log(`\n📦 Building for production...`);
|
|
2766
|
+
const buildResult = await buildCommand({
|
|
2767
|
+
provider: "server",
|
|
2768
|
+
production: true,
|
|
2769
|
+
stage
|
|
2444
2770
|
});
|
|
2445
|
-
|
|
2446
|
-
}
|
|
2447
|
-
|
|
2771
|
+
masterKey = buildResult.masterKey;
|
|
2772
|
+
} else logger$1.log(`\n⏭️ Skipping build (--skip-build)`);
|
|
2773
|
+
let result;
|
|
2774
|
+
switch (provider) {
|
|
2775
|
+
case "docker": {
|
|
2776
|
+
result = await deployDocker({
|
|
2777
|
+
stage,
|
|
2778
|
+
tag: imageTag,
|
|
2779
|
+
skipPush,
|
|
2780
|
+
masterKey,
|
|
2781
|
+
config: dockerConfig
|
|
2782
|
+
});
|
|
2783
|
+
break;
|
|
2784
|
+
}
|
|
2785
|
+
case "dokploy": {
|
|
2786
|
+
if (!dokployConfig) throw new Error("Dokploy config not initialized");
|
|
2787
|
+
const finalImageRef = finalRegistry ? `${finalRegistry}/${imageName}:${imageTag}` : `${imageName}:${imageTag}`;
|
|
2788
|
+
await deployDocker({
|
|
2789
|
+
stage,
|
|
2790
|
+
tag: imageTag,
|
|
2791
|
+
skipPush: false,
|
|
2792
|
+
masterKey,
|
|
2793
|
+
config: {
|
|
2794
|
+
registry: finalRegistry,
|
|
2795
|
+
imageName: dockerConfig.imageName
|
|
2796
|
+
}
|
|
2797
|
+
});
|
|
2798
|
+
result = await deployDokploy({
|
|
2799
|
+
stage,
|
|
2800
|
+
tag: imageTag,
|
|
2801
|
+
imageRef: finalImageRef,
|
|
2802
|
+
masterKey,
|
|
2803
|
+
config: dokployConfig
|
|
2804
|
+
});
|
|
2805
|
+
break;
|
|
2806
|
+
}
|
|
2807
|
+
case "aws-lambda": {
|
|
2808
|
+
logger$1.log("\n⚠️ AWS Lambda deployment is not yet implemented.");
|
|
2809
|
+
logger$1.log(" Use SST or AWS CDK for Lambda deployments.");
|
|
2810
|
+
result = {
|
|
2811
|
+
imageRef,
|
|
2812
|
+
masterKey
|
|
2813
|
+
};
|
|
2814
|
+
break;
|
|
2815
|
+
}
|
|
2816
|
+
default: throw new Error(`Unknown deploy provider: ${provider}\nSupported providers: docker, dokploy, aws-lambda`);
|
|
2448
2817
|
}
|
|
2818
|
+
logger$1.log("\n✅ Deployment complete!");
|
|
2819
|
+
return result;
|
|
2449
2820
|
}
|
|
2450
2821
|
|
|
2451
2822
|
//#endregion
|
|
@@ -4076,11 +4447,11 @@ async function initCommand(projectName, options = {}) {
|
|
|
4076
4447
|
};
|
|
4077
4448
|
const targetDir = (0, node_path.join)(cwd, name$1);
|
|
4078
4449
|
const template = getTemplate(templateOptions.template);
|
|
4079
|
-
const isMonorepo = templateOptions.monorepo;
|
|
4450
|
+
const isMonorepo$1 = templateOptions.monorepo;
|
|
4080
4451
|
const apiPath = templateOptions.apiPath;
|
|
4081
4452
|
await (0, node_fs_promises.mkdir)(targetDir, { recursive: true });
|
|
4082
|
-
const appDir = isMonorepo ? (0, node_path.join)(targetDir, apiPath) : targetDir;
|
|
4083
|
-
if (isMonorepo) await (0, node_fs_promises.mkdir)(appDir, { recursive: true });
|
|
4453
|
+
const appDir = isMonorepo$1 ? (0, node_path.join)(targetDir, apiPath) : targetDir;
|
|
4454
|
+
if (isMonorepo$1) await (0, node_fs_promises.mkdir)(appDir, { recursive: true });
|
|
4084
4455
|
const appFiles = [
|
|
4085
4456
|
...generatePackageJson(templateOptions, template),
|
|
4086
4457
|
...generateConfigFiles(templateOptions, template),
|
|
@@ -4096,7 +4467,7 @@ async function initCommand(projectName, options = {}) {
|
|
|
4096
4467
|
}
|
|
4097
4468
|
for (const { path, content } of appFiles) {
|
|
4098
4469
|
const fullPath = (0, node_path.join)(appDir, path);
|
|
4099
|
-
const _displayPath = isMonorepo ? `${apiPath}/${path}` : path;
|
|
4470
|
+
const _displayPath = isMonorepo$1 ? `${apiPath}/${path}` : path;
|
|
4100
4471
|
await (0, node_fs_promises.mkdir)((0, node_path.dirname)(fullPath), { recursive: true });
|
|
4101
4472
|
await (0, node_fs_promises.writeFile)(fullPath, content);
|
|
4102
4473
|
}
|
|
@@ -4429,7 +4800,8 @@ program.command("init").description("Scaffold a new project").argument("[name]",
|
|
|
4429
4800
|
const globalOptions = program.opts();
|
|
4430
4801
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4431
4802
|
await initCommand(name$1, options);
|
|
4432
|
-
} catch (
|
|
4803
|
+
} catch (error) {
|
|
4804
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4433
4805
|
process.exit(1);
|
|
4434
4806
|
}
|
|
4435
4807
|
});
|
|
@@ -4461,7 +4833,8 @@ program.command("build").description("Build handlers from endpoints, functions,
|
|
|
4461
4833
|
skipBundle: options.skipBundle || false,
|
|
4462
4834
|
stage: options.stage
|
|
4463
4835
|
});
|
|
4464
|
-
} catch (
|
|
4836
|
+
} catch (error) {
|
|
4837
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4465
4838
|
process.exit(1);
|
|
4466
4839
|
}
|
|
4467
4840
|
});
|
|
@@ -4474,7 +4847,8 @@ program.command("dev").description("Start development server with automatic relo
|
|
|
4474
4847
|
portExplicit: !!options.port,
|
|
4475
4848
|
enableOpenApi: options.enableOpenapi ?? true
|
|
4476
4849
|
});
|
|
4477
|
-
} catch (
|
|
4850
|
+
} catch (error) {
|
|
4851
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4478
4852
|
process.exit(1);
|
|
4479
4853
|
}
|
|
4480
4854
|
});
|
|
@@ -4498,7 +4872,8 @@ program.command("openapi").description("Generate OpenAPI specification from endp
|
|
|
4498
4872
|
const globalOptions = program.opts();
|
|
4499
4873
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4500
4874
|
await require_openapi.openapiCommand({});
|
|
4501
|
-
} catch (
|
|
4875
|
+
} catch (error) {
|
|
4876
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4502
4877
|
process.exit(1);
|
|
4503
4878
|
}
|
|
4504
4879
|
});
|
|
@@ -4507,7 +4882,8 @@ program.command("generate:react-query").description("Generate React Query hooks
|
|
|
4507
4882
|
const globalOptions = program.opts();
|
|
4508
4883
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4509
4884
|
await require_openapi_react_query.generateReactQueryCommand(options);
|
|
4510
|
-
} catch (
|
|
4885
|
+
} catch (error) {
|
|
4886
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4511
4887
|
process.exit(1);
|
|
4512
4888
|
}
|
|
4513
4889
|
});
|
|
@@ -4516,7 +4892,8 @@ program.command("docker").description("Generate Docker deployment files").option
|
|
|
4516
4892
|
const globalOptions = program.opts();
|
|
4517
4893
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4518
4894
|
await dockerCommand(options);
|
|
4519
|
-
} catch (
|
|
4895
|
+
} catch (error) {
|
|
4896
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4520
4897
|
process.exit(1);
|
|
4521
4898
|
}
|
|
4522
4899
|
});
|
|
@@ -4544,7 +4921,8 @@ program.command("prepack").description("Generate Docker files for production dep
|
|
|
4544
4921
|
const registry = options.registry;
|
|
4545
4922
|
const _imageRef = registry ? `${registry}/api:${tag}` : `api:${tag}`;
|
|
4546
4923
|
}
|
|
4547
|
-
} catch (
|
|
4924
|
+
} catch (error) {
|
|
4925
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4548
4926
|
process.exit(1);
|
|
4549
4927
|
}
|
|
4550
4928
|
});
|
|
@@ -4553,7 +4931,8 @@ program.command("secrets:init").description("Initialize secrets for a deployment
|
|
|
4553
4931
|
const globalOptions = program.opts();
|
|
4554
4932
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4555
4933
|
await secretsInitCommand(options);
|
|
4556
|
-
} catch (
|
|
4934
|
+
} catch (error) {
|
|
4935
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4557
4936
|
process.exit(1);
|
|
4558
4937
|
}
|
|
4559
4938
|
});
|
|
@@ -4562,7 +4941,8 @@ program.command("secrets:set").description("Set a custom secret for a stage").ar
|
|
|
4562
4941
|
const globalOptions = program.opts();
|
|
4563
4942
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4564
4943
|
await secretsSetCommand(key, value, options);
|
|
4565
|
-
} catch (
|
|
4944
|
+
} catch (error) {
|
|
4945
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4566
4946
|
process.exit(1);
|
|
4567
4947
|
}
|
|
4568
4948
|
});
|
|
@@ -4571,7 +4951,8 @@ program.command("secrets:show").description("Show secrets for a stage").required
|
|
|
4571
4951
|
const globalOptions = program.opts();
|
|
4572
4952
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4573
4953
|
await secretsShowCommand(options);
|
|
4574
|
-
} catch (
|
|
4954
|
+
} catch (error) {
|
|
4955
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4575
4956
|
process.exit(1);
|
|
4576
4957
|
}
|
|
4577
4958
|
});
|
|
@@ -4580,7 +4961,8 @@ program.command("secrets:rotate").description("Rotate service passwords").requir
|
|
|
4580
4961
|
const globalOptions = program.opts();
|
|
4581
4962
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4582
4963
|
await secretsRotateCommand(options);
|
|
4583
|
-
} catch (
|
|
4964
|
+
} catch (error) {
|
|
4965
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4584
4966
|
process.exit(1);
|
|
4585
4967
|
}
|
|
4586
4968
|
});
|
|
@@ -4589,7 +4971,8 @@ program.command("secrets:import").description("Import secrets from a JSON file")
|
|
|
4589
4971
|
const globalOptions = program.opts();
|
|
4590
4972
|
if (globalOptions.cwd) process.chdir(globalOptions.cwd);
|
|
4591
4973
|
await secretsImportCommand(file, options);
|
|
4592
|
-
} catch (
|
|
4974
|
+
} catch (error) {
|
|
4975
|
+
console.error(error instanceof Error ? error.message : "Command failed");
|
|
4593
4976
|
process.exit(1);
|
|
4594
4977
|
}
|
|
4595
4978
|
});
|
|
@@ -4613,7 +4996,8 @@ program.command("deploy").description("Deploy application to a provider").requir
|
|
|
4613
4996
|
skipPush: options.skipPush,
|
|
4614
4997
|
skipBuild: options.skipBuild
|
|
4615
4998
|
});
|
|
4616
|
-
} catch (
|
|
4999
|
+
} catch (error) {
|
|
5000
|
+
console.error(error instanceof Error ? error.message : "Deploy failed");
|
|
4617
5001
|
process.exit(1);
|
|
4618
5002
|
}
|
|
4619
5003
|
});
|