@geekmidas/cli 0.48.0 → 0.50.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/deploy/sniffer-envkit-patch.cjs +27 -0
- package/dist/deploy/sniffer-envkit-patch.cjs.map +1 -0
- package/dist/deploy/sniffer-envkit-patch.d.cts +46 -0
- package/dist/deploy/sniffer-envkit-patch.d.cts.map +1 -0
- package/dist/deploy/sniffer-envkit-patch.d.mts +46 -0
- package/dist/deploy/sniffer-envkit-patch.d.mts.map +1 -0
- package/dist/deploy/sniffer-envkit-patch.mjs +20 -0
- package/dist/deploy/sniffer-envkit-patch.mjs.map +1 -0
- package/dist/deploy/sniffer-hooks.cjs +25 -0
- package/dist/deploy/sniffer-hooks.cjs.map +1 -0
- package/dist/deploy/sniffer-hooks.d.cts +27 -0
- package/dist/deploy/sniffer-hooks.d.cts.map +1 -0
- package/dist/deploy/sniffer-hooks.d.mts +27 -0
- package/dist/deploy/sniffer-hooks.d.mts.map +1 -0
- package/dist/deploy/sniffer-hooks.mjs +24 -0
- package/dist/deploy/sniffer-hooks.mjs.map +1 -0
- package/dist/deploy/sniffer-loader.cjs +16 -0
- package/dist/deploy/sniffer-loader.cjs.map +1 -0
- package/dist/deploy/sniffer-loader.d.cts +1 -0
- package/dist/deploy/sniffer-loader.d.mts +1 -0
- package/dist/deploy/sniffer-loader.mjs +15 -0
- package/dist/deploy/sniffer-loader.mjs.map +1 -0
- package/dist/deploy/sniffer-worker.cjs +42 -0
- package/dist/deploy/sniffer-worker.cjs.map +1 -0
- package/dist/deploy/sniffer-worker.d.cts +9 -0
- package/dist/deploy/sniffer-worker.d.cts.map +1 -0
- package/dist/deploy/sniffer-worker.d.mts +9 -0
- package/dist/deploy/sniffer-worker.d.mts.map +1 -0
- package/dist/deploy/sniffer-worker.mjs +41 -0
- package/dist/deploy/sniffer-worker.mjs.map +1 -0
- package/dist/{dokploy-api-DvzIDxTj.mjs → dokploy-api-94KzmTVf.mjs} +4 -4
- package/dist/dokploy-api-94KzmTVf.mjs.map +1 -0
- package/dist/dokploy-api-CItuaWTq.mjs +3 -0
- package/dist/dokploy-api-DBNE8MDt.cjs +3 -0
- package/dist/{dokploy-api-BDLu0qWi.cjs → dokploy-api-YD8WCQfW.cjs} +4 -4
- package/dist/dokploy-api-YD8WCQfW.cjs.map +1 -0
- package/dist/index.cjs +2415 -1893
- package/dist/index.cjs.map +1 -1
- package/dist/index.mjs +2411 -1889
- package/dist/index.mjs.map +1 -1
- package/package.json +8 -6
- package/src/build/__tests__/handler-templates.spec.ts +947 -0
- package/src/deploy/__tests__/__fixtures__/entry-apps/async-entry.ts +24 -0
- package/src/deploy/__tests__/__fixtures__/entry-apps/nested-config-entry.ts +24 -0
- package/src/deploy/__tests__/__fixtures__/entry-apps/no-env-entry.ts +12 -0
- package/src/deploy/__tests__/__fixtures__/entry-apps/simple-entry.ts +14 -0
- package/src/deploy/__tests__/__fixtures__/entry-apps/throwing-entry.ts +16 -0
- package/src/deploy/__tests__/__fixtures__/env-parsers/non-function-export.ts +10 -0
- package/src/deploy/__tests__/__fixtures__/env-parsers/parseable-env-parser.ts +18 -0
- package/src/deploy/__tests__/__fixtures__/env-parsers/throwing-env-parser.ts +18 -0
- package/src/deploy/__tests__/__fixtures__/env-parsers/valid-env-parser.ts +16 -0
- package/src/deploy/__tests__/dns-verification.spec.ts +229 -0
- package/src/deploy/__tests__/dokploy-api.spec.ts +2 -3
- package/src/deploy/__tests__/domain.spec.ts +7 -3
- package/src/deploy/__tests__/env-resolver.spec.ts +469 -0
- package/src/deploy/__tests__/index.spec.ts +12 -12
- package/src/deploy/__tests__/secrets.spec.ts +4 -1
- package/src/deploy/__tests__/sniffer.spec.ts +326 -1
- package/src/deploy/__tests__/state.spec.ts +844 -0
- package/src/deploy/dns/hostinger-api.ts +4 -1
- package/src/deploy/dns/index.ts +113 -1
- package/src/deploy/docker.ts +1 -2
- package/src/deploy/dokploy-api.ts +18 -9
- package/src/deploy/domain.ts +5 -4
- package/src/deploy/env-resolver.ts +278 -0
- package/src/deploy/index.ts +525 -119
- package/src/deploy/secrets.ts +7 -2
- package/src/deploy/sniffer-envkit-patch.ts +59 -0
- package/src/deploy/sniffer-hooks.ts +57 -0
- package/src/deploy/sniffer-loader.ts +28 -0
- package/src/deploy/sniffer-worker.ts +74 -0
- package/src/deploy/sniffer.ts +170 -14
- package/src/deploy/state.ts +162 -1
- package/src/init/versions.ts +3 -3
- package/tsconfig.tsbuildinfo +1 -1
- package/tsdown.config.ts +5 -0
- package/dist/dokploy-api-BDLu0qWi.cjs.map +0 -1
- package/dist/dokploy-api-BN3V57z1.mjs +0 -3
- package/dist/dokploy-api-BdCKjFDA.cjs +0 -3
- package/dist/dokploy-api-DvzIDxTj.mjs.map +0 -1
package/dist/index.mjs
CHANGED
|
@@ -3,7 +3,7 @@ import { __require, getAppBuildOrder, getDependencyEnvVars, getDeployTargetError
|
|
|
3
3
|
import { getAppNameFromCwd, loadAppConfig, loadConfig, loadWorkspaceConfig, parseModuleConfig } from "./config-C3LSBNSl.mjs";
|
|
4
4
|
import { ConstructGenerator, EndpointGenerator, OPENAPI_OUTPUT_PATH, OpenApiTsGenerator, generateOpenApi, openapiCommand, resolveOpenApiConfig } from "./openapi-C3C-BzIZ.mjs";
|
|
5
5
|
import { getKeyPath, maskPassword, readStageSecrets, secretsExist, setCustomSecret, toEmbeddableSecrets, writeStageSecrets } from "./storage-Dhst7BhI.mjs";
|
|
6
|
-
import { DokployApi } from "./dokploy-api-
|
|
6
|
+
import { DokployApi } from "./dokploy-api-94KzmTVf.mjs";
|
|
7
7
|
import { encryptSecrets } from "./encryption-BC4MAODn.mjs";
|
|
8
8
|
import { generateReactQueryCommand } from "./openapi-react-query-ZoP9DPbY.mjs";
|
|
9
9
|
import { createRequire } from "node:module";
|
|
@@ -23,13 +23,14 @@ import { Cron } from "@geekmidas/constructs/crons";
|
|
|
23
23
|
import { Function } from "@geekmidas/constructs/functions";
|
|
24
24
|
import { Subscriber } from "@geekmidas/constructs/subscribers";
|
|
25
25
|
import { createHash, randomBytes } from "node:crypto";
|
|
26
|
+
import { Client } from "pg";
|
|
26
27
|
import { lookup } from "node:dns/promises";
|
|
27
|
-
import { pathToFileURL } from "node:url";
|
|
28
|
+
import { fileURLToPath, pathToFileURL } from "node:url";
|
|
28
29
|
import prompts from "prompts";
|
|
29
30
|
|
|
30
31
|
//#region package.json
|
|
31
32
|
var name = "@geekmidas/cli";
|
|
32
|
-
var version = "0.
|
|
33
|
+
var version = "0.50.0";
|
|
33
34
|
var description = "CLI tools for building Lambda handlers, server applications, and generating OpenAPI specs";
|
|
34
35
|
var private$1 = false;
|
|
35
36
|
var type = "module";
|
|
@@ -85,12 +86,14 @@ var dependencies = {
|
|
|
85
86
|
"hono": "~4.8.0",
|
|
86
87
|
"lodash.kebabcase": "^4.1.1",
|
|
87
88
|
"openapi-typescript": "^7.4.2",
|
|
89
|
+
"pg": "~8.17.1",
|
|
88
90
|
"prompts": "~2.4.2"
|
|
89
91
|
};
|
|
90
92
|
var devDependencies = {
|
|
91
93
|
"@geekmidas/testkit": "workspace:*",
|
|
92
94
|
"@types/lodash.kebabcase": "^4.1.9",
|
|
93
95
|
"@types/node": "~24.9.1",
|
|
96
|
+
"@types/pg": "~8.16.0",
|
|
94
97
|
"@types/prompts": "~2.4.9",
|
|
95
98
|
"typescript": "^5.8.2",
|
|
96
99
|
"vitest": "^3.2.4",
|
|
@@ -253,7 +256,7 @@ const logger$11 = console;
|
|
|
253
256
|
* Validate Dokploy token by making a test API call
|
|
254
257
|
*/
|
|
255
258
|
async function validateDokployToken(endpoint, token) {
|
|
256
|
-
const { DokployApi: DokployApi$1 } = await import("./dokploy-api-
|
|
259
|
+
const { DokployApi: DokployApi$1 } = await import("./dokploy-api-CItuaWTq.mjs");
|
|
257
260
|
const api = new DokployApi$1({
|
|
258
261
|
baseUrl: endpoint,
|
|
259
262
|
token
|
|
@@ -2274,2027 +2277,2143 @@ function getAppOutputPath(workspace, _appName, app) {
|
|
|
2274
2277
|
}
|
|
2275
2278
|
|
|
2276
2279
|
//#endregion
|
|
2277
|
-
//#region src/
|
|
2278
|
-
/**
|
|
2279
|
-
|
|
2280
|
-
|
|
2281
|
-
|
|
2282
|
-
|
|
2283
|
-
};
|
|
2284
|
-
/** Default Docker image versions for services */
|
|
2285
|
-
const DEFAULT_SERVICE_VERSIONS = {
|
|
2286
|
-
postgres: "16-alpine",
|
|
2287
|
-
redis: "7-alpine",
|
|
2288
|
-
rabbitmq: "3-management-alpine"
|
|
2289
|
-
};
|
|
2290
|
-
/** Get the default full image reference for a service */
|
|
2291
|
-
function getDefaultImage(serviceName) {
|
|
2292
|
-
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
2280
|
+
//#region src/deploy/state.ts
|
|
2281
|
+
/**
|
|
2282
|
+
* Get the state file path for a stage
|
|
2283
|
+
*/
|
|
2284
|
+
function getStateFilePath(workspaceRoot, stage) {
|
|
2285
|
+
return join(workspaceRoot, ".gkm", `deploy-${stage}.json`);
|
|
2293
2286
|
}
|
|
2294
|
-
/**
|
|
2295
|
-
|
|
2296
|
-
|
|
2297
|
-
|
|
2298
|
-
|
|
2299
|
-
|
|
2300
|
-
|
|
2301
|
-
|
|
2302
|
-
|
|
2303
|
-
|
|
2304
|
-
|
|
2305
|
-
|
|
2306
|
-
|
|
2307
|
-
}
|
|
2308
|
-
}
|
|
2287
|
+
/**
|
|
2288
|
+
* Read the deploy state for a stage
|
|
2289
|
+
* Returns null if state file doesn't exist
|
|
2290
|
+
*/
|
|
2291
|
+
async function readStageState(workspaceRoot, stage) {
|
|
2292
|
+
const filePath = getStateFilePath(workspaceRoot, stage);
|
|
2293
|
+
try {
|
|
2294
|
+
const content = await readFile(filePath, "utf-8");
|
|
2295
|
+
return JSON.parse(content);
|
|
2296
|
+
} catch (error) {
|
|
2297
|
+
if (error.code === "ENOENT") return null;
|
|
2298
|
+
console.warn(`Warning: Could not read deploy state: ${error}`);
|
|
2299
|
+
return null;
|
|
2309
2300
|
}
|
|
2310
|
-
return result;
|
|
2311
2301
|
}
|
|
2312
2302
|
/**
|
|
2313
|
-
*
|
|
2303
|
+
* Write the deploy state for a stage
|
|
2314
2304
|
*/
|
|
2315
|
-
function
|
|
2316
|
-
const
|
|
2317
|
-
const
|
|
2318
|
-
|
|
2319
|
-
|
|
2320
|
-
|
|
2321
|
-
services:
|
|
2322
|
-
api:
|
|
2323
|
-
build:
|
|
2324
|
-
context: ../..
|
|
2325
|
-
dockerfile: .gkm/docker/Dockerfile
|
|
2326
|
-
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
2327
|
-
container_name: ${imageName}
|
|
2328
|
-
restart: unless-stopped
|
|
2329
|
-
ports:
|
|
2330
|
-
- "\${PORT:-${port}}:${port}"
|
|
2331
|
-
environment:
|
|
2332
|
-
- NODE_ENV=production
|
|
2333
|
-
`;
|
|
2334
|
-
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
2335
|
-
`;
|
|
2336
|
-
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
2337
|
-
`;
|
|
2338
|
-
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
2339
|
-
`;
|
|
2340
|
-
yaml += ` healthcheck:
|
|
2341
|
-
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
2342
|
-
interval: 30s
|
|
2343
|
-
timeout: 3s
|
|
2344
|
-
retries: 3
|
|
2345
|
-
`;
|
|
2346
|
-
if (serviceMap.size > 0) {
|
|
2347
|
-
yaml += ` depends_on:
|
|
2348
|
-
`;
|
|
2349
|
-
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
2350
|
-
condition: service_healthy
|
|
2351
|
-
`;
|
|
2352
|
-
}
|
|
2353
|
-
yaml += ` networks:
|
|
2354
|
-
- app-network
|
|
2355
|
-
`;
|
|
2356
|
-
const postgresImage = serviceMap.get("postgres");
|
|
2357
|
-
if (postgresImage) yaml += `
|
|
2358
|
-
postgres:
|
|
2359
|
-
image: ${postgresImage}
|
|
2360
|
-
container_name: postgres
|
|
2361
|
-
restart: unless-stopped
|
|
2362
|
-
environment:
|
|
2363
|
-
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
2364
|
-
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
2365
|
-
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
2366
|
-
volumes:
|
|
2367
|
-
- postgres_data:/var/lib/postgresql/data
|
|
2368
|
-
healthcheck:
|
|
2369
|
-
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
2370
|
-
interval: 5s
|
|
2371
|
-
timeout: 5s
|
|
2372
|
-
retries: 5
|
|
2373
|
-
networks:
|
|
2374
|
-
- app-network
|
|
2375
|
-
`;
|
|
2376
|
-
const redisImage = serviceMap.get("redis");
|
|
2377
|
-
if (redisImage) yaml += `
|
|
2378
|
-
redis:
|
|
2379
|
-
image: ${redisImage}
|
|
2380
|
-
container_name: redis
|
|
2381
|
-
restart: unless-stopped
|
|
2382
|
-
volumes:
|
|
2383
|
-
- redis_data:/data
|
|
2384
|
-
healthcheck:
|
|
2385
|
-
test: ["CMD", "redis-cli", "ping"]
|
|
2386
|
-
interval: 5s
|
|
2387
|
-
timeout: 5s
|
|
2388
|
-
retries: 5
|
|
2389
|
-
networks:
|
|
2390
|
-
- app-network
|
|
2391
|
-
`;
|
|
2392
|
-
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
2393
|
-
if (rabbitmqImage) yaml += `
|
|
2394
|
-
rabbitmq:
|
|
2395
|
-
image: ${rabbitmqImage}
|
|
2396
|
-
container_name: rabbitmq
|
|
2397
|
-
restart: unless-stopped
|
|
2398
|
-
environment:
|
|
2399
|
-
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
2400
|
-
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
2401
|
-
ports:
|
|
2402
|
-
- "15672:15672" # Management UI
|
|
2403
|
-
volumes:
|
|
2404
|
-
- rabbitmq_data:/var/lib/rabbitmq
|
|
2405
|
-
healthcheck:
|
|
2406
|
-
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
2407
|
-
interval: 10s
|
|
2408
|
-
timeout: 5s
|
|
2409
|
-
retries: 5
|
|
2410
|
-
networks:
|
|
2411
|
-
- app-network
|
|
2412
|
-
`;
|
|
2413
|
-
yaml += `
|
|
2414
|
-
volumes:
|
|
2415
|
-
`;
|
|
2416
|
-
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
2417
|
-
`;
|
|
2418
|
-
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
2419
|
-
`;
|
|
2420
|
-
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
2421
|
-
`;
|
|
2422
|
-
yaml += `
|
|
2423
|
-
networks:
|
|
2424
|
-
app-network:
|
|
2425
|
-
driver: bridge
|
|
2426
|
-
`;
|
|
2427
|
-
return yaml;
|
|
2305
|
+
async function writeStageState(workspaceRoot, stage, state) {
|
|
2306
|
+
const filePath = getStateFilePath(workspaceRoot, stage);
|
|
2307
|
+
const dir = join(workspaceRoot, ".gkm");
|
|
2308
|
+
await mkdir(dir, { recursive: true });
|
|
2309
|
+
state.lastDeployedAt = (/* @__PURE__ */ new Date()).toISOString();
|
|
2310
|
+
await writeFile(filePath, JSON.stringify(state, null, 2));
|
|
2428
2311
|
}
|
|
2429
2312
|
/**
|
|
2430
|
-
*
|
|
2313
|
+
* Create a new empty state for a stage
|
|
2431
2314
|
*/
|
|
2432
|
-
function
|
|
2433
|
-
|
|
2434
|
-
|
|
2435
|
-
|
|
2436
|
-
|
|
2437
|
-
|
|
2438
|
-
|
|
2439
|
-
|
|
2440
|
-
|
|
2441
|
-
dockerfile: .gkm/docker/Dockerfile
|
|
2442
|
-
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
2443
|
-
container_name: ${imageName}
|
|
2444
|
-
restart: unless-stopped
|
|
2445
|
-
ports:
|
|
2446
|
-
- "\${PORT:-${port}}:${port}"
|
|
2447
|
-
environment:
|
|
2448
|
-
- NODE_ENV=production
|
|
2449
|
-
healthcheck:
|
|
2450
|
-
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
2451
|
-
interval: 30s
|
|
2452
|
-
timeout: 3s
|
|
2453
|
-
retries: 3
|
|
2454
|
-
networks:
|
|
2455
|
-
- app-network
|
|
2456
|
-
|
|
2457
|
-
networks:
|
|
2458
|
-
app-network:
|
|
2459
|
-
driver: bridge
|
|
2460
|
-
`;
|
|
2315
|
+
function createEmptyState(stage, environmentId) {
|
|
2316
|
+
return {
|
|
2317
|
+
provider: "dokploy",
|
|
2318
|
+
stage,
|
|
2319
|
+
environmentId,
|
|
2320
|
+
applications: {},
|
|
2321
|
+
services: {},
|
|
2322
|
+
lastDeployedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2323
|
+
};
|
|
2461
2324
|
}
|
|
2462
2325
|
/**
|
|
2463
|
-
*
|
|
2464
|
-
* Apps can communicate with each other via service names.
|
|
2465
|
-
* @internal Exported for testing
|
|
2326
|
+
* Get application ID from state
|
|
2466
2327
|
*/
|
|
2467
|
-
function
|
|
2468
|
-
|
|
2469
|
-
const apps = Object.entries(workspace.apps);
|
|
2470
|
-
const services = workspace.services;
|
|
2471
|
-
const hasPostgres = services.db !== void 0 && services.db !== false;
|
|
2472
|
-
const hasRedis = services.cache !== void 0 && services.cache !== false;
|
|
2473
|
-
const hasMail = services.mail !== void 0 && services.mail !== false;
|
|
2474
|
-
const postgresImage = getInfraServiceImage("postgres", services.db);
|
|
2475
|
-
const redisImage = getInfraServiceImage("redis", services.cache);
|
|
2476
|
-
let yaml = `# Docker Compose for ${workspace.name} workspace
|
|
2477
|
-
# Generated by gkm - do not edit manually
|
|
2478
|
-
|
|
2479
|
-
services:
|
|
2480
|
-
`;
|
|
2481
|
-
for (const [appName, app] of apps) yaml += generateAppService(appName, app, apps, {
|
|
2482
|
-
registry,
|
|
2483
|
-
hasPostgres,
|
|
2484
|
-
hasRedis
|
|
2485
|
-
});
|
|
2486
|
-
if (hasPostgres) yaml += `
|
|
2487
|
-
postgres:
|
|
2488
|
-
image: ${postgresImage}
|
|
2489
|
-
container_name: ${workspace.name}-postgres
|
|
2490
|
-
restart: unless-stopped
|
|
2491
|
-
environment:
|
|
2492
|
-
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
2493
|
-
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
2494
|
-
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
2495
|
-
volumes:
|
|
2496
|
-
- postgres_data:/var/lib/postgresql/data
|
|
2497
|
-
healthcheck:
|
|
2498
|
-
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
2499
|
-
interval: 5s
|
|
2500
|
-
timeout: 5s
|
|
2501
|
-
retries: 5
|
|
2502
|
-
networks:
|
|
2503
|
-
- workspace-network
|
|
2504
|
-
`;
|
|
2505
|
-
if (hasRedis) yaml += `
|
|
2506
|
-
redis:
|
|
2507
|
-
image: ${redisImage}
|
|
2508
|
-
container_name: ${workspace.name}-redis
|
|
2509
|
-
restart: unless-stopped
|
|
2510
|
-
volumes:
|
|
2511
|
-
- redis_data:/data
|
|
2512
|
-
healthcheck:
|
|
2513
|
-
test: ["CMD", "redis-cli", "ping"]
|
|
2514
|
-
interval: 5s
|
|
2515
|
-
timeout: 5s
|
|
2516
|
-
retries: 5
|
|
2517
|
-
networks:
|
|
2518
|
-
- workspace-network
|
|
2519
|
-
`;
|
|
2520
|
-
if (hasMail) yaml += `
|
|
2521
|
-
mailpit:
|
|
2522
|
-
image: axllent/mailpit:latest
|
|
2523
|
-
container_name: ${workspace.name}-mailpit
|
|
2524
|
-
restart: unless-stopped
|
|
2525
|
-
ports:
|
|
2526
|
-
- "8025:8025" # Web UI
|
|
2527
|
-
- "1025:1025" # SMTP
|
|
2528
|
-
networks:
|
|
2529
|
-
- workspace-network
|
|
2530
|
-
`;
|
|
2531
|
-
yaml += `
|
|
2532
|
-
volumes:
|
|
2533
|
-
`;
|
|
2534
|
-
if (hasPostgres) yaml += ` postgres_data:
|
|
2535
|
-
`;
|
|
2536
|
-
if (hasRedis) yaml += ` redis_data:
|
|
2537
|
-
`;
|
|
2538
|
-
yaml += `
|
|
2539
|
-
networks:
|
|
2540
|
-
workspace-network:
|
|
2541
|
-
driver: bridge
|
|
2542
|
-
`;
|
|
2543
|
-
return yaml;
|
|
2328
|
+
function getApplicationId(state, appName) {
|
|
2329
|
+
return state?.applications[appName];
|
|
2544
2330
|
}
|
|
2545
2331
|
/**
|
|
2546
|
-
*
|
|
2332
|
+
* Set application ID in state (mutates state)
|
|
2547
2333
|
*/
|
|
2548
|
-
function
|
|
2549
|
-
|
|
2550
|
-
postgres: "postgres:16-alpine",
|
|
2551
|
-
redis: "redis:7-alpine"
|
|
2552
|
-
};
|
|
2553
|
-
if (!config$1 || config$1 === true) return defaults[serviceName];
|
|
2554
|
-
if (typeof config$1 === "object") {
|
|
2555
|
-
if (config$1.image) return config$1.image;
|
|
2556
|
-
if (config$1.version) {
|
|
2557
|
-
const baseImage = serviceName === "postgres" ? "postgres" : "redis";
|
|
2558
|
-
return `${baseImage}:${config$1.version}`;
|
|
2559
|
-
}
|
|
2560
|
-
}
|
|
2561
|
-
return defaults[serviceName];
|
|
2334
|
+
function setApplicationId(state, appName, applicationId) {
|
|
2335
|
+
state.applications[appName] = applicationId;
|
|
2562
2336
|
}
|
|
2563
2337
|
/**
|
|
2564
|
-
*
|
|
2338
|
+
* Get postgres ID from state
|
|
2565
2339
|
*/
|
|
2566
|
-
function
|
|
2567
|
-
|
|
2568
|
-
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
2569
|
-
const healthCheckPath = app.type === "frontend" ? "/" : "/health";
|
|
2570
|
-
const healthCheckCmd = app.type === "frontend" ? `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}/"]` : `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}${healthCheckPath}"]`;
|
|
2571
|
-
let yaml = `
|
|
2572
|
-
${appName}:
|
|
2573
|
-
build:
|
|
2574
|
-
context: .
|
|
2575
|
-
dockerfile: .gkm/docker/Dockerfile.${appName}
|
|
2576
|
-
image: ${imageRef}\${${appName.toUpperCase()}_IMAGE:-${appName}}:\${TAG:-latest}
|
|
2577
|
-
container_name: ${appName}
|
|
2578
|
-
restart: unless-stopped
|
|
2579
|
-
ports:
|
|
2580
|
-
- "\${${appName.toUpperCase()}_PORT:-${app.port}}:${app.port}"
|
|
2581
|
-
environment:
|
|
2582
|
-
- NODE_ENV=production
|
|
2583
|
-
- PORT=${app.port}
|
|
2584
|
-
`;
|
|
2585
|
-
for (const dep of app.dependencies) {
|
|
2586
|
-
const depApp = allApps.find(([name$1]) => name$1 === dep)?.[1];
|
|
2587
|
-
if (depApp) yaml += ` - ${dep.toUpperCase()}_URL=http://${dep}:${depApp.port}
|
|
2588
|
-
`;
|
|
2589
|
-
}
|
|
2590
|
-
if (app.type === "backend") {
|
|
2591
|
-
if (hasPostgres) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
2592
|
-
`;
|
|
2593
|
-
if (hasRedis) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
2594
|
-
`;
|
|
2595
|
-
}
|
|
2596
|
-
yaml += ` healthcheck:
|
|
2597
|
-
test: ${healthCheckCmd}
|
|
2598
|
-
interval: 30s
|
|
2599
|
-
timeout: 3s
|
|
2600
|
-
retries: 3
|
|
2601
|
-
`;
|
|
2602
|
-
const dependencies$1 = [...app.dependencies];
|
|
2603
|
-
if (app.type === "backend") {
|
|
2604
|
-
if (hasPostgres) dependencies$1.push("postgres");
|
|
2605
|
-
if (hasRedis) dependencies$1.push("redis");
|
|
2606
|
-
}
|
|
2607
|
-
if (dependencies$1.length > 0) {
|
|
2608
|
-
yaml += ` depends_on:
|
|
2609
|
-
`;
|
|
2610
|
-
for (const dep of dependencies$1) yaml += ` ${dep}:
|
|
2611
|
-
condition: service_healthy
|
|
2612
|
-
`;
|
|
2613
|
-
}
|
|
2614
|
-
yaml += ` networks:
|
|
2615
|
-
- workspace-network
|
|
2616
|
-
`;
|
|
2617
|
-
return yaml;
|
|
2340
|
+
function getPostgresId(state) {
|
|
2341
|
+
return state?.services.postgresId;
|
|
2618
2342
|
}
|
|
2619
|
-
|
|
2620
|
-
//#endregion
|
|
2621
|
-
//#region src/docker/templates.ts
|
|
2622
|
-
const LOCKFILES = [
|
|
2623
|
-
["pnpm-lock.yaml", "pnpm"],
|
|
2624
|
-
["bun.lockb", "bun"],
|
|
2625
|
-
["yarn.lock", "yarn"],
|
|
2626
|
-
["package-lock.json", "npm"]
|
|
2627
|
-
];
|
|
2628
2343
|
/**
|
|
2629
|
-
*
|
|
2630
|
-
* Walks up the directory tree to find lockfile (for monorepos)
|
|
2344
|
+
* Set postgres ID in state (mutates state)
|
|
2631
2345
|
*/
|
|
2632
|
-
function
|
|
2633
|
-
|
|
2634
|
-
const root = parse(dir).root;
|
|
2635
|
-
while (dir !== root) {
|
|
2636
|
-
for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(dir, lockfile))) return pm;
|
|
2637
|
-
dir = dirname(dir);
|
|
2638
|
-
}
|
|
2639
|
-
for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(root, lockfile))) return pm;
|
|
2640
|
-
return "pnpm";
|
|
2346
|
+
function setPostgresId(state, postgresId) {
|
|
2347
|
+
state.services.postgresId = postgresId;
|
|
2641
2348
|
}
|
|
2642
2349
|
/**
|
|
2643
|
-
*
|
|
2644
|
-
* Returns the full path to the lockfile, or null if not found
|
|
2350
|
+
* Get redis ID from state
|
|
2645
2351
|
*/
|
|
2646
|
-
function
|
|
2647
|
-
|
|
2648
|
-
const root = parse(dir).root;
|
|
2649
|
-
while (dir !== root) {
|
|
2650
|
-
for (const [lockfile] of LOCKFILES) {
|
|
2651
|
-
const lockfilePath = join(dir, lockfile);
|
|
2652
|
-
if (existsSync(lockfilePath)) return lockfilePath;
|
|
2653
|
-
}
|
|
2654
|
-
dir = dirname(dir);
|
|
2655
|
-
}
|
|
2656
|
-
for (const [lockfile] of LOCKFILES) {
|
|
2657
|
-
const lockfilePath = join(root, lockfile);
|
|
2658
|
-
if (existsSync(lockfilePath)) return lockfilePath;
|
|
2659
|
-
}
|
|
2660
|
-
return null;
|
|
2352
|
+
function getRedisId(state) {
|
|
2353
|
+
return state?.services.redisId;
|
|
2661
2354
|
}
|
|
2662
2355
|
/**
|
|
2663
|
-
*
|
|
2356
|
+
* Set redis ID in state (mutates state)
|
|
2664
2357
|
*/
|
|
2665
|
-
function
|
|
2666
|
-
|
|
2667
|
-
if (!lockfilePath) return false;
|
|
2668
|
-
const lockfileDir = dirname(lockfilePath);
|
|
2669
|
-
return lockfileDir !== cwd;
|
|
2358
|
+
function setRedisId(state, redisId) {
|
|
2359
|
+
state.services.redisId = redisId;
|
|
2670
2360
|
}
|
|
2671
2361
|
/**
|
|
2672
|
-
*
|
|
2362
|
+
* Set app credentials in state (mutates state)
|
|
2673
2363
|
*/
|
|
2674
|
-
function
|
|
2675
|
-
|
|
2676
|
-
|
|
2677
|
-
while (dir !== root) {
|
|
2678
|
-
if (existsSync(join(dir, "turbo.json"))) return true;
|
|
2679
|
-
dir = dirname(dir);
|
|
2680
|
-
}
|
|
2681
|
-
return existsSync(join(root, "turbo.json"));
|
|
2364
|
+
function setAppCredentials(state, appName, credentials) {
|
|
2365
|
+
if (!state.appCredentials) state.appCredentials = {};
|
|
2366
|
+
state.appCredentials[appName] = credentials;
|
|
2682
2367
|
}
|
|
2683
2368
|
/**
|
|
2684
|
-
* Get
|
|
2685
|
-
* Turbo prune creates a subset that may not perfectly match the lockfile
|
|
2369
|
+
* Get all app credentials from state
|
|
2686
2370
|
*/
|
|
2687
|
-
function
|
|
2688
|
-
|
|
2689
|
-
pnpm: "pnpm install",
|
|
2690
|
-
npm: "npm install",
|
|
2691
|
-
yarn: "yarn install",
|
|
2692
|
-
bun: "bun install"
|
|
2693
|
-
};
|
|
2694
|
-
return commands[pm];
|
|
2371
|
+
function getAllAppCredentials(state) {
|
|
2372
|
+
return state?.appCredentials ?? {};
|
|
2695
2373
|
}
|
|
2696
2374
|
/**
|
|
2697
|
-
* Get
|
|
2375
|
+
* Get a generated secret for an app
|
|
2698
2376
|
*/
|
|
2699
|
-
function
|
|
2700
|
-
|
|
2701
|
-
|
|
2702
|
-
|
|
2703
|
-
|
|
2704
|
-
|
|
2705
|
-
|
|
2706
|
-
|
|
2707
|
-
|
|
2708
|
-
|
|
2709
|
-
|
|
2710
|
-
|
|
2711
|
-
|
|
2712
|
-
|
|
2713
|
-
|
|
2714
|
-
|
|
2715
|
-
|
|
2716
|
-
|
|
2717
|
-
|
|
2718
|
-
cacheTarget: "/root/.npm",
|
|
2719
|
-
cacheId: "npm",
|
|
2720
|
-
run: "npm run",
|
|
2721
|
-
exec: "npx",
|
|
2722
|
-
dlx: "npx",
|
|
2723
|
-
addGlobal: "npm install -g"
|
|
2724
|
-
},
|
|
2725
|
-
yarn: {
|
|
2726
|
-
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
2727
|
-
lockfile: "yarn.lock",
|
|
2728
|
-
fetch: "",
|
|
2729
|
-
installCmd: "yarn install --frozen-lockfile",
|
|
2730
|
-
cacheTarget: "/root/.yarn/cache",
|
|
2731
|
-
cacheId: "yarn",
|
|
2732
|
-
run: "yarn",
|
|
2733
|
-
exec: "yarn exec",
|
|
2734
|
-
dlx: "yarn dlx",
|
|
2735
|
-
addGlobal: "yarn global add"
|
|
2736
|
-
},
|
|
2737
|
-
bun: {
|
|
2738
|
-
install: "npm install -g bun",
|
|
2739
|
-
lockfile: "bun.lockb",
|
|
2740
|
-
fetch: "",
|
|
2741
|
-
installCmd: "bun install --frozen-lockfile",
|
|
2742
|
-
cacheTarget: "/root/.bun/install/cache",
|
|
2743
|
-
cacheId: "bun",
|
|
2744
|
-
run: "bun run",
|
|
2745
|
-
exec: "bunx",
|
|
2746
|
-
dlx: "bunx",
|
|
2747
|
-
addGlobal: "bun add -g"
|
|
2748
|
-
}
|
|
2377
|
+
function getGeneratedSecret(state, appName, secretName) {
|
|
2378
|
+
return state?.generatedSecrets?.[appName]?.[secretName];
|
|
2379
|
+
}
|
|
2380
|
+
/**
|
|
2381
|
+
* Set a generated secret for an app (mutates state)
|
|
2382
|
+
*/
|
|
2383
|
+
function setGeneratedSecret(state, appName, secretName, value) {
|
|
2384
|
+
if (!state.generatedSecrets) state.generatedSecrets = {};
|
|
2385
|
+
if (!state.generatedSecrets[appName]) state.generatedSecrets[appName] = {};
|
|
2386
|
+
state.generatedSecrets[appName][secretName] = value;
|
|
2387
|
+
}
|
|
2388
|
+
/**
|
|
2389
|
+
* Set DNS verification record for a hostname (mutates state)
|
|
2390
|
+
*/
|
|
2391
|
+
function setDnsVerification(state, hostname, serverIp) {
|
|
2392
|
+
if (!state.dnsVerified) state.dnsVerified = {};
|
|
2393
|
+
state.dnsVerified[hostname] = {
|
|
2394
|
+
serverIp,
|
|
2395
|
+
verifiedAt: (/* @__PURE__ */ new Date()).toISOString()
|
|
2749
2396
|
};
|
|
2750
|
-
return configs[pm];
|
|
2751
2397
|
}
|
|
2752
2398
|
/**
|
|
2753
|
-
*
|
|
2754
|
-
* Optimized for build speed with:
|
|
2755
|
-
* - BuildKit cache mounts for package manager store
|
|
2756
|
-
* - pnpm fetch for better layer caching (when using pnpm)
|
|
2757
|
-
* - Optional turbo prune for monorepos
|
|
2399
|
+
* Check if a hostname is already verified with the given IP
|
|
2758
2400
|
*/
|
|
2759
|
-
function
|
|
2760
|
-
const
|
|
2761
|
-
|
|
2762
|
-
...options,
|
|
2763
|
-
turboPackage: turboPackage ?? "api"
|
|
2764
|
-
});
|
|
2765
|
-
const pm = getPmConfig(packageManager);
|
|
2766
|
-
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
2767
|
-
const hasFetch = packageManager === "pnpm";
|
|
2768
|
-
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
2769
|
-
COPY ${pm.lockfile} ./
|
|
2770
|
-
|
|
2771
|
-
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
2772
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2773
|
-
${pm.fetch}
|
|
2774
|
-
|
|
2775
|
-
# Copy package.json after fetch
|
|
2776
|
-
COPY package.json ./
|
|
2777
|
-
|
|
2778
|
-
# Install from cache (fast - no network needed)
|
|
2779
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2780
|
-
${pm.installCmd}` : `# Copy package files
|
|
2781
|
-
COPY package.json ${pm.lockfile} ./
|
|
2782
|
-
|
|
2783
|
-
# Install dependencies with cache
|
|
2784
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2785
|
-
${pm.installCmd}`;
|
|
2786
|
-
return `# syntax=docker/dockerfile:1
|
|
2787
|
-
# Stage 1: Dependencies
|
|
2788
|
-
FROM ${baseImage} AS deps
|
|
2789
|
-
|
|
2790
|
-
WORKDIR /app
|
|
2791
|
-
${installPm}
|
|
2792
|
-
${depsStage}
|
|
2793
|
-
|
|
2794
|
-
# Stage 2: Build
|
|
2795
|
-
FROM deps AS builder
|
|
2796
|
-
|
|
2797
|
-
WORKDIR /app
|
|
2798
|
-
|
|
2799
|
-
# Copy source (deps already installed)
|
|
2800
|
-
COPY . .
|
|
2801
|
-
|
|
2802
|
-
# Debug: Show node_modules/.bin contents and build production server
|
|
2803
|
-
RUN echo "=== node_modules/.bin contents ===" && \
|
|
2804
|
-
ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
|
|
2805
|
-
echo "=== Checking for gkm ===" && \
|
|
2806
|
-
which gkm 2>/dev/null || echo "gkm not in PATH" && \
|
|
2807
|
-
ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
|
|
2808
|
-
echo "=== Running build ===" && \
|
|
2809
|
-
./node_modules/.bin/gkm build --provider server --production
|
|
2810
|
-
|
|
2811
|
-
# Stage 3: Production
|
|
2812
|
-
FROM ${baseImage} AS runner
|
|
2813
|
-
|
|
2814
|
-
WORKDIR /app
|
|
2815
|
-
|
|
2816
|
-
# Install tini for proper signal handling as PID 1
|
|
2817
|
-
RUN apk add --no-cache tini
|
|
2818
|
-
|
|
2819
|
-
# Create non-root user
|
|
2820
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2821
|
-
adduser --system --uid 1001 hono
|
|
2822
|
-
|
|
2823
|
-
# Copy bundled server
|
|
2824
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2825
|
-
|
|
2826
|
-
# Environment
|
|
2827
|
-
ENV NODE_ENV=production
|
|
2828
|
-
ENV PORT=${port}
|
|
2829
|
-
|
|
2830
|
-
# Health check
|
|
2831
|
-
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
2832
|
-
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
2833
|
-
|
|
2834
|
-
# Switch to non-root user
|
|
2835
|
-
USER hono
|
|
2836
|
-
|
|
2837
|
-
EXPOSE ${port}
|
|
2838
|
-
|
|
2839
|
-
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2840
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2841
|
-
CMD ["node", "server.mjs"]
|
|
2842
|
-
`;
|
|
2401
|
+
function isDnsVerified(state, hostname, serverIp) {
|
|
2402
|
+
const record = state?.dnsVerified?.[hostname];
|
|
2403
|
+
return record?.serverIp === serverIp;
|
|
2843
2404
|
}
|
|
2405
|
+
|
|
2406
|
+
//#endregion
|
|
2407
|
+
//#region src/deploy/dns/hostinger-api.ts
|
|
2844
2408
|
/**
|
|
2845
|
-
*
|
|
2846
|
-
*
|
|
2409
|
+
* Hostinger DNS API client
|
|
2410
|
+
*
|
|
2411
|
+
* API Documentation: https://developers.hostinger.com/
|
|
2412
|
+
* Authentication: Bearer token from hpanel.hostinger.com/profile/api
|
|
2847
2413
|
*/
|
|
2848
|
-
|
|
2849
|
-
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
2850
|
-
const pm = getPmConfig(packageManager);
|
|
2851
|
-
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
2852
|
-
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
2853
|
-
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
2854
|
-
return `# syntax=docker/dockerfile:1
|
|
2855
|
-
# Stage 1: Prune monorepo
|
|
2856
|
-
FROM ${baseImage} AS pruner
|
|
2857
|
-
|
|
2858
|
-
WORKDIR /app
|
|
2859
|
-
|
|
2860
|
-
${installPm}
|
|
2861
|
-
|
|
2862
|
-
COPY . .
|
|
2863
|
-
|
|
2864
|
-
# Prune to only include necessary packages
|
|
2865
|
-
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
2866
|
-
|
|
2867
|
-
# Stage 2: Install dependencies
|
|
2868
|
-
FROM ${baseImage} AS deps
|
|
2869
|
-
|
|
2870
|
-
WORKDIR /app
|
|
2871
|
-
|
|
2872
|
-
${installPm}
|
|
2873
|
-
|
|
2874
|
-
# Copy pruned lockfile and package.jsons
|
|
2875
|
-
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
2876
|
-
COPY --from=pruner /app/out/json/ ./
|
|
2877
|
-
|
|
2878
|
-
# Install dependencies (no frozen-lockfile since turbo prune creates a subset)
|
|
2879
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
2880
|
-
${turboInstallCmd}
|
|
2881
|
-
|
|
2882
|
-
# Stage 3: Build
|
|
2883
|
-
FROM deps AS builder
|
|
2884
|
-
|
|
2885
|
-
WORKDIR /app
|
|
2886
|
-
|
|
2887
|
-
# Copy pruned source
|
|
2888
|
-
COPY --from=pruner /app/out/full/ ./
|
|
2889
|
-
|
|
2890
|
-
# Debug: Show node_modules/.bin contents and build production server
|
|
2891
|
-
RUN echo "=== node_modules/.bin contents ===" && \
|
|
2892
|
-
ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
|
|
2893
|
-
echo "=== Checking for gkm ===" && \
|
|
2894
|
-
which gkm 2>/dev/null || echo "gkm not in PATH" && \
|
|
2895
|
-
ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
|
|
2896
|
-
echo "=== Running build ===" && \
|
|
2897
|
-
./node_modules/.bin/gkm build --provider server --production
|
|
2898
|
-
|
|
2899
|
-
# Stage 4: Production
|
|
2900
|
-
FROM ${baseImage} AS runner
|
|
2901
|
-
|
|
2902
|
-
WORKDIR /app
|
|
2903
|
-
|
|
2904
|
-
RUN apk add --no-cache tini
|
|
2905
|
-
|
|
2906
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2907
|
-
adduser --system --uid 1001 hono
|
|
2908
|
-
|
|
2909
|
-
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
2910
|
-
|
|
2911
|
-
ENV NODE_ENV=production
|
|
2912
|
-
ENV PORT=${port}
|
|
2913
|
-
|
|
2914
|
-
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
2915
|
-
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
2916
|
-
|
|
2917
|
-
USER hono
|
|
2918
|
-
|
|
2919
|
-
EXPOSE ${port}
|
|
2920
|
-
|
|
2921
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2922
|
-
CMD ["node", "server.mjs"]
|
|
2923
|
-
`;
|
|
2924
|
-
}
|
|
2414
|
+
const HOSTINGER_API_BASE = "https://developers.hostinger.com";
|
|
2925
2415
|
/**
|
|
2926
|
-
*
|
|
2416
|
+
* Hostinger API error
|
|
2927
2417
|
*/
|
|
2928
|
-
|
|
2929
|
-
|
|
2930
|
-
|
|
2931
|
-
|
|
2932
|
-
|
|
2933
|
-
|
|
2934
|
-
|
|
2935
|
-
|
|
2936
|
-
|
|
2937
|
-
RUN apk add --no-cache tini
|
|
2938
|
-
|
|
2939
|
-
# Create non-root user
|
|
2940
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
2941
|
-
adduser --system --uid 1001 hono
|
|
2942
|
-
|
|
2943
|
-
# Copy pre-built bundle
|
|
2944
|
-
COPY .gkm/server/dist/server.mjs ./
|
|
2945
|
-
|
|
2946
|
-
# Environment
|
|
2947
|
-
ENV NODE_ENV=production
|
|
2948
|
-
ENV PORT=${port}
|
|
2949
|
-
|
|
2950
|
-
# Health check
|
|
2951
|
-
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
2952
|
-
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
2953
|
-
|
|
2954
|
-
# Switch to non-root user
|
|
2955
|
-
USER hono
|
|
2956
|
-
|
|
2957
|
-
EXPOSE ${port}
|
|
2958
|
-
|
|
2959
|
-
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
2960
|
-
ENTRYPOINT ["/sbin/tini", "--"]
|
|
2961
|
-
CMD ["node", "server.mjs"]
|
|
2962
|
-
`;
|
|
2963
|
-
}
|
|
2418
|
+
var HostingerApiError = class extends Error {
|
|
2419
|
+
constructor(message, status, statusText, errors) {
|
|
2420
|
+
super(message);
|
|
2421
|
+
this.status = status;
|
|
2422
|
+
this.statusText = statusText;
|
|
2423
|
+
this.errors = errors;
|
|
2424
|
+
this.name = "HostingerApiError";
|
|
2425
|
+
}
|
|
2426
|
+
};
|
|
2964
2427
|
/**
|
|
2965
|
-
*
|
|
2428
|
+
* Hostinger DNS API client
|
|
2429
|
+
*
|
|
2430
|
+
* @example
|
|
2431
|
+
* ```ts
|
|
2432
|
+
* const api = new HostingerApi(token);
|
|
2433
|
+
*
|
|
2434
|
+
* // Get all records for a domain
|
|
2435
|
+
* const records = await api.getRecords('traflabs.io');
|
|
2436
|
+
*
|
|
2437
|
+
* // Create/update records
|
|
2438
|
+
* await api.upsertRecords('traflabs.io', [
|
|
2439
|
+
* { name: 'api.joemoer', type: 'A', ttl: 300, records: ['1.2.3.4'] }
|
|
2440
|
+
* ]);
|
|
2441
|
+
* ```
|
|
2966
2442
|
*/
|
|
2967
|
-
|
|
2968
|
-
|
|
2969
|
-
|
|
2970
|
-
.
|
|
2971
|
-
|
|
2972
|
-
|
|
2973
|
-
|
|
2974
|
-
|
|
2975
|
-
|
|
2976
|
-
|
|
2977
|
-
|
|
2978
|
-
|
|
2979
|
-
|
|
2980
|
-
|
|
2981
|
-
|
|
2982
|
-
|
|
2983
|
-
|
|
2984
|
-
|
|
2985
|
-
.
|
|
2986
|
-
|
|
2987
|
-
|
|
2988
|
-
|
|
2989
|
-
|
|
2990
|
-
|
|
2991
|
-
|
|
2992
|
-
|
|
2993
|
-
|
|
2994
|
-
|
|
2995
|
-
|
|
2996
|
-
|
|
2997
|
-
|
|
2998
|
-
|
|
2999
|
-
|
|
3000
|
-
|
|
3001
|
-
|
|
3002
|
-
|
|
3003
|
-
|
|
3004
|
-
|
|
3005
|
-
.
|
|
3006
|
-
|
|
2443
|
+
var HostingerApi = class {
|
|
2444
|
+
token;
|
|
2445
|
+
constructor(token) {
|
|
2446
|
+
this.token = token;
|
|
2447
|
+
}
|
|
2448
|
+
/**
|
|
2449
|
+
* Make a request to the Hostinger API
|
|
2450
|
+
*/
|
|
2451
|
+
async request(method, endpoint, body) {
|
|
2452
|
+
const url = `${HOSTINGER_API_BASE}${endpoint}`;
|
|
2453
|
+
const response = await fetch(url, {
|
|
2454
|
+
method,
|
|
2455
|
+
headers: {
|
|
2456
|
+
"Content-Type": "application/json",
|
|
2457
|
+
Authorization: `Bearer ${this.token}`
|
|
2458
|
+
},
|
|
2459
|
+
body: body ? JSON.stringify(body) : void 0
|
|
2460
|
+
});
|
|
2461
|
+
if (!response.ok) {
|
|
2462
|
+
let errorMessage = `Hostinger API error: ${response.status} ${response.statusText}`;
|
|
2463
|
+
let errors;
|
|
2464
|
+
try {
|
|
2465
|
+
const errorBody = await response.json();
|
|
2466
|
+
if (errorBody.message) errorMessage = `Hostinger API error: ${errorBody.message}`;
|
|
2467
|
+
errors = errorBody.errors;
|
|
2468
|
+
} catch {}
|
|
2469
|
+
throw new HostingerApiError(errorMessage, response.status, response.statusText, errors);
|
|
2470
|
+
}
|
|
2471
|
+
const text = await response.text();
|
|
2472
|
+
if (!text || text.trim() === "") return void 0;
|
|
2473
|
+
return JSON.parse(text);
|
|
2474
|
+
}
|
|
2475
|
+
/**
|
|
2476
|
+
* Get all DNS records for a domain
|
|
2477
|
+
*
|
|
2478
|
+
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
2479
|
+
*/
|
|
2480
|
+
async getRecords(domain) {
|
|
2481
|
+
const response = await this.request("GET", `/api/dns/v1/zones/${domain}`);
|
|
2482
|
+
return response.data || [];
|
|
2483
|
+
}
|
|
2484
|
+
/**
|
|
2485
|
+
* Create or update DNS records
|
|
2486
|
+
*
|
|
2487
|
+
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
2488
|
+
* @param records - Records to create/update
|
|
2489
|
+
* @param overwrite - If true, replaces all existing records. If false, merges with existing.
|
|
2490
|
+
*/
|
|
2491
|
+
async upsertRecords(domain, records, overwrite = false) {
|
|
2492
|
+
await this.request("PUT", `/api/dns/v1/zones/${domain}`, {
|
|
2493
|
+
overwrite,
|
|
2494
|
+
zone: records
|
|
2495
|
+
});
|
|
2496
|
+
}
|
|
2497
|
+
/**
|
|
2498
|
+
* Validate DNS records before applying
|
|
2499
|
+
*
|
|
2500
|
+
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
2501
|
+
* @param records - Records to validate
|
|
2502
|
+
* @returns true if valid, throws if invalid
|
|
2503
|
+
*/
|
|
2504
|
+
async validateRecords(domain, records) {
|
|
2505
|
+
await this.request("POST", `/api/dns/v1/zones/${domain}/validate`, {
|
|
2506
|
+
overwrite: false,
|
|
2507
|
+
zone: records
|
|
2508
|
+
});
|
|
2509
|
+
return true;
|
|
2510
|
+
}
|
|
2511
|
+
/**
|
|
2512
|
+
* Delete specific DNS records
|
|
2513
|
+
*
|
|
2514
|
+
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
2515
|
+
* @param filters - Filters to match records for deletion
|
|
2516
|
+
*/
|
|
2517
|
+
async deleteRecords(domain, filters) {
|
|
2518
|
+
await this.request("DELETE", `/api/dns/v1/zones/${domain}`, { filters });
|
|
2519
|
+
}
|
|
2520
|
+
/**
|
|
2521
|
+
* Check if a specific record exists
|
|
2522
|
+
*
|
|
2523
|
+
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
2524
|
+
* @param name - Subdomain name (e.g., 'api.joemoer')
|
|
2525
|
+
* @param type - Record type (e.g., 'A')
|
|
2526
|
+
*/
|
|
2527
|
+
async recordExists(domain, name$1, type$1 = "A") {
|
|
2528
|
+
const records = await this.getRecords(domain);
|
|
2529
|
+
return records.some((r) => r.name === name$1 && r.type === type$1);
|
|
2530
|
+
}
|
|
2531
|
+
/**
|
|
2532
|
+
* Create a single A record if it doesn't exist
|
|
2533
|
+
*
|
|
2534
|
+
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
2535
|
+
* @param subdomain - Subdomain name (e.g., 'api.joemoer')
|
|
2536
|
+
* @param ip - IP address to point to
|
|
2537
|
+
* @param ttl - TTL in seconds (default: 300)
|
|
2538
|
+
* @returns true if created, false if already exists
|
|
2539
|
+
*/
|
|
2540
|
+
async createARecordIfNotExists(domain, subdomain, ip, ttl = 300) {
|
|
2541
|
+
const exists = await this.recordExists(domain, subdomain, "A");
|
|
2542
|
+
if (exists) return false;
|
|
2543
|
+
await this.upsertRecords(domain, [{
|
|
2544
|
+
name: subdomain,
|
|
2545
|
+
type: "A",
|
|
2546
|
+
ttl,
|
|
2547
|
+
records: [{ content: ip }]
|
|
2548
|
+
}]);
|
|
2549
|
+
return true;
|
|
2550
|
+
}
|
|
2551
|
+
};
|
|
3007
2552
|
|
|
3008
|
-
|
|
3009
|
-
|
|
3010
|
-
|
|
3011
|
-
|
|
3012
|
-
|
|
2553
|
+
//#endregion
|
|
2554
|
+
//#region src/deploy/dns/index.ts
|
|
2555
|
+
const logger$6 = console;
|
|
2556
|
+
/**
|
|
2557
|
+
* Resolve IP address from a hostname
|
|
2558
|
+
*/
|
|
2559
|
+
async function resolveHostnameToIp(hostname) {
|
|
2560
|
+
try {
|
|
2561
|
+
const addresses = await lookup(hostname, { family: 4 });
|
|
2562
|
+
return addresses.address;
|
|
2563
|
+
} catch (error) {
|
|
2564
|
+
throw new Error(`Failed to resolve IP for ${hostname}: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
2565
|
+
}
|
|
3013
2566
|
}
|
|
3014
2567
|
/**
|
|
3015
|
-
*
|
|
2568
|
+
* Extract subdomain from full hostname relative to root domain
|
|
2569
|
+
*
|
|
2570
|
+
* @example
|
|
2571
|
+
* extractSubdomain('api.joemoer.traflabs.io', 'traflabs.io') => 'api.joemoer'
|
|
2572
|
+
* extractSubdomain('joemoer.traflabs.io', 'traflabs.io') => 'joemoer'
|
|
3016
2573
|
*/
|
|
3017
|
-
function
|
|
3018
|
-
|
|
3019
|
-
|
|
3020
|
-
|
|
3021
|
-
# Run any custom startup scripts here
|
|
3022
|
-
# Example: wait for database
|
|
3023
|
-
# until nc -z $DB_HOST $DB_PORT; do
|
|
3024
|
-
# echo "Waiting for database..."
|
|
3025
|
-
# sleep 1
|
|
3026
|
-
# done
|
|
3027
|
-
|
|
3028
|
-
# Execute the main command
|
|
3029
|
-
exec "$@"
|
|
3030
|
-
`;
|
|
2574
|
+
function extractSubdomain(hostname, rootDomain) {
|
|
2575
|
+
if (!hostname.endsWith(rootDomain)) throw new Error(`Hostname ${hostname} is not under root domain ${rootDomain}`);
|
|
2576
|
+
const subdomain = hostname.slice(0, -(rootDomain.length + 1));
|
|
2577
|
+
return subdomain || "@";
|
|
3031
2578
|
}
|
|
3032
2579
|
/**
|
|
3033
|
-
*
|
|
2580
|
+
* Generate required DNS records for a deployment
|
|
3034
2581
|
*/
|
|
3035
|
-
function
|
|
3036
|
-
const
|
|
3037
|
-
|
|
3038
|
-
|
|
3039
|
-
|
|
3040
|
-
|
|
3041
|
-
|
|
3042
|
-
|
|
3043
|
-
|
|
3044
|
-
|
|
3045
|
-
|
|
3046
|
-
|
|
3047
|
-
|
|
3048
|
-
};
|
|
2582
|
+
function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
|
|
2583
|
+
const records = [];
|
|
2584
|
+
for (const [appName, hostname] of appHostnames) {
|
|
2585
|
+
const subdomain = extractSubdomain(hostname, rootDomain);
|
|
2586
|
+
records.push({
|
|
2587
|
+
hostname,
|
|
2588
|
+
subdomain,
|
|
2589
|
+
type: "A",
|
|
2590
|
+
value: serverIp,
|
|
2591
|
+
appName
|
|
2592
|
+
});
|
|
2593
|
+
}
|
|
2594
|
+
return records;
|
|
3049
2595
|
}
|
|
3050
2596
|
/**
|
|
3051
|
-
*
|
|
3052
|
-
* Uses turbo prune for monorepo optimization.
|
|
3053
|
-
* @internal Exported for testing
|
|
2597
|
+
* Print DNS records table
|
|
3054
2598
|
*/
|
|
3055
|
-
function
|
|
3056
|
-
|
|
3057
|
-
|
|
3058
|
-
|
|
3059
|
-
|
|
3060
|
-
const
|
|
3061
|
-
|
|
3062
|
-
|
|
3063
|
-
|
|
3064
|
-
|
|
3065
|
-
|
|
3066
|
-
|
|
3067
|
-
|
|
3068
|
-
|
|
3069
|
-
|
|
3070
|
-
|
|
3071
|
-
$
|
|
3072
|
-
|
|
3073
|
-
|
|
3074
|
-
|
|
3075
|
-
|
|
3076
|
-
|
|
3077
|
-
|
|
3078
|
-
|
|
3079
|
-
|
|
3080
|
-
|
|
3081
|
-
|
|
3082
|
-
|
|
3083
|
-
|
|
3084
|
-
|
|
3085
|
-
|
|
3086
|
-
|
|
3087
|
-
|
|
3088
|
-
|
|
3089
|
-
|
|
3090
|
-
|
|
3091
|
-
|
|
3092
|
-
|
|
3093
|
-
|
|
3094
|
-
|
|
3095
|
-
|
|
3096
|
-
|
|
3097
|
-
|
|
3098
|
-
|
|
3099
|
-
|
|
3100
|
-
$
|
|
3101
|
-
|
|
3102
|
-
|
|
3103
|
-
|
|
3104
|
-
|
|
3105
|
-
|
|
3106
|
-
|
|
3107
|
-
|
|
3108
|
-
|
|
3109
|
-
|
|
3110
|
-
|
|
3111
|
-
|
|
3112
|
-
|
|
3113
|
-
|
|
3114
|
-
|
|
3115
|
-
|
|
3116
|
-
|
|
3117
|
-
|
|
3118
|
-
|
|
3119
|
-
|
|
3120
|
-
|
|
3121
|
-
|
|
3122
|
-
|
|
3123
|
-
|
|
3124
|
-
|
|
3125
|
-
|
|
3126
|
-
|
|
3127
|
-
|
|
3128
|
-
|
|
3129
|
-
|
|
3130
|
-
|
|
3131
|
-
|
|
3132
|
-
|
|
3133
|
-
|
|
3134
|
-
|
|
3135
|
-
|
|
3136
|
-
|
|
3137
|
-
|
|
3138
|
-
|
|
3139
|
-
|
|
3140
|
-
|
|
3141
|
-
|
|
3142
|
-
|
|
3143
|
-
|
|
3144
|
-
|
|
2599
|
+
function printDnsRecordsTable(records, rootDomain) {
|
|
2600
|
+
logger$6.log(`\n 📋 DNS Records for ${rootDomain}:`);
|
|
2601
|
+
logger$6.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
|
|
2602
|
+
logger$6.log(" │ Subdomain │ Type │ Value │ Status │");
|
|
2603
|
+
logger$6.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
|
|
2604
|
+
for (const record of records) {
|
|
2605
|
+
const subdomain = record.subdomain.padEnd(35);
|
|
2606
|
+
const type$1 = record.type.padEnd(4);
|
|
2607
|
+
const value = record.value.padEnd(15);
|
|
2608
|
+
let status;
|
|
2609
|
+
if (record.error) status = "✗";
|
|
2610
|
+
else if (record.created) status = "✓ new";
|
|
2611
|
+
else if (record.existed) status = "✓";
|
|
2612
|
+
else status = "?";
|
|
2613
|
+
logger$6.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
|
|
2614
|
+
}
|
|
2615
|
+
logger$6.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
|
|
2616
|
+
}
|
|
2617
|
+
/**
|
|
2618
|
+
* Print DNS records in a simple format for manual setup
|
|
2619
|
+
*/
|
|
2620
|
+
function printDnsRecordsSimple(records, rootDomain) {
|
|
2621
|
+
logger$6.log("\n 📋 Required DNS Records:");
|
|
2622
|
+
logger$6.log(` Add these A records to your DNS provider (${rootDomain}):\n`);
|
|
2623
|
+
for (const record of records) logger$6.log(` ${record.subdomain} → ${record.value} (A record)`);
|
|
2624
|
+
logger$6.log("");
|
|
2625
|
+
}
|
|
2626
|
+
/**
|
|
2627
|
+
* Prompt for input (reuse from deploy/index.ts pattern)
|
|
2628
|
+
*/
|
|
2629
|
+
async function promptForToken(message) {
|
|
2630
|
+
const { stdin: stdin$1, stdout: stdout$1 } = await import("node:process");
|
|
2631
|
+
if (!stdin$1.isTTY) throw new Error("Interactive input required for Hostinger token.");
|
|
2632
|
+
stdout$1.write(message);
|
|
2633
|
+
return new Promise((resolve$1) => {
|
|
2634
|
+
let value = "";
|
|
2635
|
+
const onData = (char) => {
|
|
2636
|
+
const c = char.toString();
|
|
2637
|
+
if (c === "\n" || c === "\r") {
|
|
2638
|
+
stdin$1.setRawMode(false);
|
|
2639
|
+
stdin$1.pause();
|
|
2640
|
+
stdin$1.removeListener("data", onData);
|
|
2641
|
+
stdout$1.write("\n");
|
|
2642
|
+
resolve$1(value);
|
|
2643
|
+
} else if (c === "") {
|
|
2644
|
+
stdin$1.setRawMode(false);
|
|
2645
|
+
stdin$1.pause();
|
|
2646
|
+
stdout$1.write("\n");
|
|
2647
|
+
process.exit(1);
|
|
2648
|
+
} else if (c === "" || c === "\b") {
|
|
2649
|
+
if (value.length > 0) value = value.slice(0, -1);
|
|
2650
|
+
} else value += c;
|
|
2651
|
+
};
|
|
2652
|
+
stdin$1.setRawMode(true);
|
|
2653
|
+
stdin$1.resume();
|
|
2654
|
+
stdin$1.on("data", onData);
|
|
2655
|
+
});
|
|
2656
|
+
}
|
|
2657
|
+
/**
|
|
2658
|
+
* Create DNS records using the configured provider
|
|
2659
|
+
*/
|
|
2660
|
+
async function createDnsRecords(records, dnsConfig) {
|
|
2661
|
+
const { provider, domain: rootDomain, ttl = 300 } = dnsConfig;
|
|
2662
|
+
if (provider === "manual") return records.map((r) => ({
|
|
2663
|
+
...r,
|
|
2664
|
+
created: false,
|
|
2665
|
+
existed: false
|
|
2666
|
+
}));
|
|
2667
|
+
if (provider === "hostinger") return createHostingerRecords(records, rootDomain, ttl);
|
|
2668
|
+
if (provider === "cloudflare") {
|
|
2669
|
+
logger$6.log(" ⚠ Cloudflare DNS integration not yet implemented");
|
|
2670
|
+
return records.map((r) => ({
|
|
2671
|
+
...r,
|
|
2672
|
+
error: "Cloudflare not implemented"
|
|
2673
|
+
}));
|
|
2674
|
+
}
|
|
2675
|
+
return records;
|
|
2676
|
+
}
|
|
2677
|
+
/**
|
|
2678
|
+
* Create DNS records at Hostinger
|
|
2679
|
+
*/
|
|
2680
|
+
async function createHostingerRecords(records, rootDomain, ttl) {
|
|
2681
|
+
let token = await getHostingerToken();
|
|
2682
|
+
if (!token) {
|
|
2683
|
+
logger$6.log("\n 📋 Hostinger API token not found.");
|
|
2684
|
+
logger$6.log(" Get your token from: https://hpanel.hostinger.com/profile/api\n");
|
|
2685
|
+
try {
|
|
2686
|
+
token = await promptForToken(" Hostinger API Token: ");
|
|
2687
|
+
await storeHostingerToken(token);
|
|
2688
|
+
logger$6.log(" ✓ Token saved");
|
|
2689
|
+
} catch {
|
|
2690
|
+
logger$6.log(" ⚠ Could not get token, skipping DNS creation");
|
|
2691
|
+
return records.map((r) => ({
|
|
2692
|
+
...r,
|
|
2693
|
+
error: "No API token"
|
|
2694
|
+
}));
|
|
2695
|
+
}
|
|
2696
|
+
}
|
|
2697
|
+
const api = new HostingerApi(token);
|
|
2698
|
+
const results = [];
|
|
2699
|
+
let existingRecords = [];
|
|
2700
|
+
try {
|
|
2701
|
+
existingRecords = await api.getRecords(rootDomain);
|
|
2702
|
+
} catch (error) {
|
|
2703
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2704
|
+
logger$6.log(` ⚠ Failed to fetch existing DNS records: ${message}`);
|
|
2705
|
+
return records.map((r) => ({
|
|
2706
|
+
...r,
|
|
2707
|
+
error: message
|
|
2708
|
+
}));
|
|
2709
|
+
}
|
|
2710
|
+
for (const record of records) {
|
|
2711
|
+
const existing = existingRecords.find((r) => r.name === record.subdomain && r.type === "A");
|
|
2712
|
+
if (existing) {
|
|
2713
|
+
results.push({
|
|
2714
|
+
...record,
|
|
2715
|
+
existed: true,
|
|
2716
|
+
created: false
|
|
2717
|
+
});
|
|
2718
|
+
continue;
|
|
2719
|
+
}
|
|
2720
|
+
try {
|
|
2721
|
+
await api.upsertRecords(rootDomain, [{
|
|
2722
|
+
name: record.subdomain,
|
|
2723
|
+
type: "A",
|
|
2724
|
+
ttl,
|
|
2725
|
+
records: [{ content: record.value }]
|
|
2726
|
+
}]);
|
|
2727
|
+
results.push({
|
|
2728
|
+
...record,
|
|
2729
|
+
created: true,
|
|
2730
|
+
existed: false
|
|
2731
|
+
});
|
|
2732
|
+
} catch (error) {
|
|
2733
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2734
|
+
results.push({
|
|
2735
|
+
...record,
|
|
2736
|
+
error: message
|
|
2737
|
+
});
|
|
2738
|
+
}
|
|
2739
|
+
}
|
|
2740
|
+
return results;
|
|
2741
|
+
}
|
|
2742
|
+
/**
|
|
2743
|
+
* Main DNS orchestration function for deployments
|
|
2744
|
+
*/
|
|
2745
|
+
async function orchestrateDns(appHostnames, dnsConfig, dokployEndpoint) {
|
|
2746
|
+
if (!dnsConfig) return null;
|
|
2747
|
+
const { domain: rootDomain, autoCreate = true } = dnsConfig;
|
|
2748
|
+
logger$6.log("\n🌐 Setting up DNS records...");
|
|
2749
|
+
let serverIp;
|
|
2750
|
+
try {
|
|
2751
|
+
const endpointUrl = new URL(dokployEndpoint);
|
|
2752
|
+
serverIp = await resolveHostnameToIp(endpointUrl.hostname);
|
|
2753
|
+
logger$6.log(` Server IP: ${serverIp} (from ${endpointUrl.hostname})`);
|
|
2754
|
+
} catch (error) {
|
|
2755
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2756
|
+
logger$6.log(` ⚠ Failed to resolve server IP: ${message}`);
|
|
2757
|
+
return null;
|
|
2758
|
+
}
|
|
2759
|
+
const requiredRecords = generateRequiredRecords(appHostnames, rootDomain, serverIp);
|
|
2760
|
+
if (requiredRecords.length === 0) {
|
|
2761
|
+
logger$6.log(" No DNS records needed");
|
|
2762
|
+
return {
|
|
2763
|
+
records: [],
|
|
2764
|
+
success: true,
|
|
2765
|
+
serverIp
|
|
2766
|
+
};
|
|
2767
|
+
}
|
|
2768
|
+
let finalRecords;
|
|
2769
|
+
if (autoCreate && dnsConfig.provider !== "manual") {
|
|
2770
|
+
logger$6.log(` Creating DNS records at ${dnsConfig.provider}...`);
|
|
2771
|
+
finalRecords = await createDnsRecords(requiredRecords, dnsConfig);
|
|
2772
|
+
const created = finalRecords.filter((r) => r.created).length;
|
|
2773
|
+
const existed = finalRecords.filter((r) => r.existed).length;
|
|
2774
|
+
const failed = finalRecords.filter((r) => r.error).length;
|
|
2775
|
+
if (created > 0) logger$6.log(` ✓ Created ${created} DNS record(s)`);
|
|
2776
|
+
if (existed > 0) logger$6.log(` ✓ ${existed} record(s) already exist`);
|
|
2777
|
+
if (failed > 0) logger$6.log(` ⚠ ${failed} record(s) failed`);
|
|
2778
|
+
} else finalRecords = requiredRecords;
|
|
2779
|
+
printDnsRecordsTable(finalRecords, rootDomain);
|
|
2780
|
+
const hasFailures = finalRecords.some((r) => r.error);
|
|
2781
|
+
if (dnsConfig.provider === "manual" || hasFailures) printDnsRecordsSimple(finalRecords.filter((r) => !r.created && !r.existed), rootDomain);
|
|
2782
|
+
return {
|
|
2783
|
+
records: finalRecords,
|
|
2784
|
+
success: !hasFailures,
|
|
2785
|
+
serverIp
|
|
2786
|
+
};
|
|
2787
|
+
}
|
|
2788
|
+
/**
|
|
2789
|
+
* Verify DNS records resolve correctly after deployment.
|
|
2790
|
+
*
|
|
2791
|
+
* This function:
|
|
2792
|
+
* 1. Checks state for previously verified hostnames (skips if already verified with same IP)
|
|
2793
|
+
* 2. Attempts to resolve each hostname to an IP
|
|
2794
|
+
* 3. Compares resolved IP with expected server IP
|
|
2795
|
+
* 4. Updates state with verification results
|
|
2796
|
+
*
|
|
2797
|
+
* @param appHostnames - Map of app names to hostnames
|
|
2798
|
+
* @param serverIp - Expected IP address the hostnames should resolve to
|
|
2799
|
+
* @param state - Deploy state for caching verification results
|
|
2800
|
+
* @returns Array of verification results
|
|
2801
|
+
*/
|
|
2802
|
+
async function verifyDnsRecords(appHostnames, serverIp, state) {
|
|
2803
|
+
const results = [];
|
|
2804
|
+
logger$6.log("\n🔍 Verifying DNS records...");
|
|
2805
|
+
for (const [appName, hostname] of appHostnames) {
|
|
2806
|
+
if (isDnsVerified(state, hostname, serverIp)) {
|
|
2807
|
+
logger$6.log(` ✓ ${hostname} (previously verified)`);
|
|
2808
|
+
results.push({
|
|
2809
|
+
hostname,
|
|
2810
|
+
appName,
|
|
2811
|
+
verified: true,
|
|
2812
|
+
expectedIp: serverIp,
|
|
2813
|
+
skipped: true
|
|
2814
|
+
});
|
|
2815
|
+
continue;
|
|
2816
|
+
}
|
|
2817
|
+
try {
|
|
2818
|
+
const resolvedIp = await resolveHostnameToIp(hostname);
|
|
2819
|
+
if (resolvedIp === serverIp) {
|
|
2820
|
+
setDnsVerification(state, hostname, serverIp);
|
|
2821
|
+
logger$6.log(` ✓ ${hostname} → ${resolvedIp}`);
|
|
2822
|
+
results.push({
|
|
2823
|
+
hostname,
|
|
2824
|
+
appName,
|
|
2825
|
+
verified: true,
|
|
2826
|
+
resolvedIp,
|
|
2827
|
+
expectedIp: serverIp
|
|
2828
|
+
});
|
|
2829
|
+
} else {
|
|
2830
|
+
logger$6.log(` ⚠ ${hostname} resolves to ${resolvedIp}, expected ${serverIp}`);
|
|
2831
|
+
results.push({
|
|
2832
|
+
hostname,
|
|
2833
|
+
appName,
|
|
2834
|
+
verified: false,
|
|
2835
|
+
resolvedIp,
|
|
2836
|
+
expectedIp: serverIp
|
|
2837
|
+
});
|
|
2838
|
+
}
|
|
2839
|
+
} catch (error) {
|
|
2840
|
+
const message = error instanceof Error ? error.message : "Unknown error";
|
|
2841
|
+
logger$6.log(` ⚠ ${hostname} DNS not propagated (${message})`);
|
|
2842
|
+
results.push({
|
|
2843
|
+
hostname,
|
|
2844
|
+
appName,
|
|
2845
|
+
verified: false,
|
|
2846
|
+
expectedIp: serverIp,
|
|
2847
|
+
error: message
|
|
2848
|
+
});
|
|
2849
|
+
}
|
|
2850
|
+
}
|
|
2851
|
+
const verified = results.filter((r) => r.verified).length;
|
|
2852
|
+
const skipped = results.filter((r) => r.skipped).length;
|
|
2853
|
+
const pending = results.filter((r) => !r.verified).length;
|
|
2854
|
+
if (pending > 0) {
|
|
2855
|
+
logger$6.log(`\n ${verified} verified, ${pending} pending propagation`);
|
|
2856
|
+
logger$6.log(" DNS changes may take 5-30 minutes to propagate");
|
|
2857
|
+
} else if (skipped > 0) logger$6.log(` ${verified} verified (${skipped} from cache)`);
|
|
2858
|
+
return results;
|
|
2859
|
+
}
|
|
3145
2860
|
|
|
3146
|
-
|
|
2861
|
+
//#endregion
|
|
2862
|
+
//#region src/docker/compose.ts
|
|
2863
|
+
/** Default Docker images for services */
|
|
2864
|
+
const DEFAULT_SERVICE_IMAGES = {
|
|
2865
|
+
postgres: "postgres",
|
|
2866
|
+
redis: "redis",
|
|
2867
|
+
rabbitmq: "rabbitmq"
|
|
2868
|
+
};
|
|
2869
|
+
/** Default Docker image versions for services */
|
|
2870
|
+
const DEFAULT_SERVICE_VERSIONS = {
|
|
2871
|
+
postgres: "16-alpine",
|
|
2872
|
+
redis: "7-alpine",
|
|
2873
|
+
rabbitmq: "3-management-alpine"
|
|
2874
|
+
};
|
|
2875
|
+
/** Get the default full image reference for a service */
|
|
2876
|
+
function getDefaultImage(serviceName) {
|
|
2877
|
+
return `${DEFAULT_SERVICE_IMAGES[serviceName]}:${DEFAULT_SERVICE_VERSIONS[serviceName]}`;
|
|
2878
|
+
}
|
|
2879
|
+
/** Normalize services config to a consistent format - returns Map of service name to full image reference */
|
|
2880
|
+
function normalizeServices(services) {
|
|
2881
|
+
const result = /* @__PURE__ */ new Map();
|
|
2882
|
+
if (Array.isArray(services)) for (const name$1 of services) result.set(name$1, getDefaultImage(name$1));
|
|
2883
|
+
else for (const [name$1, config$1] of Object.entries(services)) {
|
|
2884
|
+
const serviceName = name$1;
|
|
2885
|
+
if (config$1 === true) result.set(serviceName, getDefaultImage(serviceName));
|
|
2886
|
+
else if (config$1 && typeof config$1 === "object") {
|
|
2887
|
+
const serviceConfig = config$1;
|
|
2888
|
+
if (serviceConfig.image) result.set(serviceName, serviceConfig.image);
|
|
2889
|
+
else {
|
|
2890
|
+
const version$1 = serviceConfig.version ?? DEFAULT_SERVICE_VERSIONS[serviceName];
|
|
2891
|
+
result.set(serviceName, `${DEFAULT_SERVICE_IMAGES[serviceName]}:${version$1}`);
|
|
2892
|
+
}
|
|
2893
|
+
}
|
|
2894
|
+
}
|
|
2895
|
+
return result;
|
|
2896
|
+
}
|
|
2897
|
+
/**
|
|
2898
|
+
* Generate docker-compose.yml for production deployment
|
|
2899
|
+
*/
|
|
2900
|
+
function generateDockerCompose(options) {
|
|
2901
|
+
const { imageName, registry, port, healthCheckPath, services } = options;
|
|
2902
|
+
const serviceMap = normalizeServices(services);
|
|
2903
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
2904
|
+
let yaml = `version: '3.8'
|
|
3147
2905
|
|
|
3148
|
-
|
|
3149
|
-
|
|
2906
|
+
services:
|
|
2907
|
+
api:
|
|
2908
|
+
build:
|
|
2909
|
+
context: ../..
|
|
2910
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
2911
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
2912
|
+
container_name: ${imageName}
|
|
2913
|
+
restart: unless-stopped
|
|
2914
|
+
ports:
|
|
2915
|
+
- "\${PORT:-${port}}:${port}"
|
|
2916
|
+
environment:
|
|
2917
|
+
- NODE_ENV=production
|
|
2918
|
+
`;
|
|
2919
|
+
if (serviceMap.has("postgres")) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
2920
|
+
`;
|
|
2921
|
+
if (serviceMap.has("redis")) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
2922
|
+
`;
|
|
2923
|
+
if (serviceMap.has("rabbitmq")) yaml += ` - RABBITMQ_URL=\${RABBITMQ_URL:-amqp://rabbitmq:5672}
|
|
2924
|
+
`;
|
|
2925
|
+
yaml += ` healthcheck:
|
|
2926
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
2927
|
+
interval: 30s
|
|
2928
|
+
timeout: 3s
|
|
2929
|
+
retries: 3
|
|
2930
|
+
`;
|
|
2931
|
+
if (serviceMap.size > 0) {
|
|
2932
|
+
yaml += ` depends_on:
|
|
2933
|
+
`;
|
|
2934
|
+
for (const serviceName of serviceMap.keys()) yaml += ` ${serviceName}:
|
|
2935
|
+
condition: service_healthy
|
|
2936
|
+
`;
|
|
2937
|
+
}
|
|
2938
|
+
yaml += ` networks:
|
|
2939
|
+
- app-network
|
|
2940
|
+
`;
|
|
2941
|
+
const postgresImage = serviceMap.get("postgres");
|
|
2942
|
+
if (postgresImage) yaml += `
|
|
2943
|
+
postgres:
|
|
2944
|
+
image: ${postgresImage}
|
|
2945
|
+
container_name: postgres
|
|
2946
|
+
restart: unless-stopped
|
|
2947
|
+
environment:
|
|
2948
|
+
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
2949
|
+
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
2950
|
+
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
2951
|
+
volumes:
|
|
2952
|
+
- postgres_data:/var/lib/postgresql/data
|
|
2953
|
+
healthcheck:
|
|
2954
|
+
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
2955
|
+
interval: 5s
|
|
2956
|
+
timeout: 5s
|
|
2957
|
+
retries: 5
|
|
2958
|
+
networks:
|
|
2959
|
+
- app-network
|
|
2960
|
+
`;
|
|
2961
|
+
const redisImage = serviceMap.get("redis");
|
|
2962
|
+
if (redisImage) yaml += `
|
|
2963
|
+
redis:
|
|
2964
|
+
image: ${redisImage}
|
|
2965
|
+
container_name: redis
|
|
2966
|
+
restart: unless-stopped
|
|
2967
|
+
volumes:
|
|
2968
|
+
- redis_data:/data
|
|
2969
|
+
healthcheck:
|
|
2970
|
+
test: ["CMD", "redis-cli", "ping"]
|
|
2971
|
+
interval: 5s
|
|
2972
|
+
timeout: 5s
|
|
2973
|
+
retries: 5
|
|
2974
|
+
networks:
|
|
2975
|
+
- app-network
|
|
2976
|
+
`;
|
|
2977
|
+
const rabbitmqImage = serviceMap.get("rabbitmq");
|
|
2978
|
+
if (rabbitmqImage) yaml += `
|
|
2979
|
+
rabbitmq:
|
|
2980
|
+
image: ${rabbitmqImage}
|
|
2981
|
+
container_name: rabbitmq
|
|
2982
|
+
restart: unless-stopped
|
|
2983
|
+
environment:
|
|
2984
|
+
RABBITMQ_DEFAULT_USER: \${RABBITMQ_USER:-guest}
|
|
2985
|
+
RABBITMQ_DEFAULT_PASS: \${RABBITMQ_PASSWORD:-guest}
|
|
2986
|
+
ports:
|
|
2987
|
+
- "15672:15672" # Management UI
|
|
2988
|
+
volumes:
|
|
2989
|
+
- rabbitmq_data:/var/lib/rabbitmq
|
|
2990
|
+
healthcheck:
|
|
2991
|
+
test: ["CMD", "rabbitmq-diagnostics", "-q", "ping"]
|
|
2992
|
+
interval: 10s
|
|
2993
|
+
timeout: 5s
|
|
2994
|
+
retries: 5
|
|
2995
|
+
networks:
|
|
2996
|
+
- app-network
|
|
2997
|
+
`;
|
|
2998
|
+
yaml += `
|
|
2999
|
+
volumes:
|
|
3000
|
+
`;
|
|
3001
|
+
if (serviceMap.has("postgres")) yaml += ` postgres_data:
|
|
3002
|
+
`;
|
|
3003
|
+
if (serviceMap.has("redis")) yaml += ` redis_data:
|
|
3004
|
+
`;
|
|
3005
|
+
if (serviceMap.has("rabbitmq")) yaml += ` rabbitmq_data:
|
|
3006
|
+
`;
|
|
3007
|
+
yaml += `
|
|
3008
|
+
networks:
|
|
3009
|
+
app-network:
|
|
3010
|
+
driver: bridge
|
|
3150
3011
|
`;
|
|
3012
|
+
return yaml;
|
|
3151
3013
|
}
|
|
3152
3014
|
/**
|
|
3153
|
-
* Generate a
|
|
3154
|
-
* Uses turbo prune for monorepo optimization.
|
|
3155
|
-
* @internal Exported for testing
|
|
3015
|
+
* Generate a minimal docker-compose.yml for API only
|
|
3156
3016
|
*/
|
|
3157
|
-
function
|
|
3158
|
-
const {
|
|
3159
|
-
const
|
|
3160
|
-
|
|
3161
|
-
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
3162
|
-
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
3163
|
-
return `# syntax=docker/dockerfile:1
|
|
3164
|
-
# Backend Dockerfile with turbo prune optimization
|
|
3165
|
-
|
|
3166
|
-
# Stage 1: Prune monorepo
|
|
3167
|
-
FROM ${baseImage} AS pruner
|
|
3168
|
-
|
|
3169
|
-
WORKDIR /app
|
|
3170
|
-
|
|
3171
|
-
${installPm}
|
|
3172
|
-
|
|
3173
|
-
COPY . .
|
|
3174
|
-
|
|
3175
|
-
# Prune to only include necessary packages
|
|
3176
|
-
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
3177
|
-
|
|
3178
|
-
# Stage 2: Install dependencies
|
|
3179
|
-
FROM ${baseImage} AS deps
|
|
3180
|
-
|
|
3181
|
-
WORKDIR /app
|
|
3182
|
-
|
|
3183
|
-
${installPm}
|
|
3184
|
-
|
|
3185
|
-
# Copy pruned lockfile and package.jsons
|
|
3186
|
-
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
3187
|
-
COPY --from=pruner /app/out/json/ ./
|
|
3188
|
-
|
|
3189
|
-
# Install dependencies
|
|
3190
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3191
|
-
${turboInstallCmd}
|
|
3192
|
-
|
|
3193
|
-
# Stage 3: Build
|
|
3194
|
-
FROM deps AS builder
|
|
3195
|
-
|
|
3196
|
-
WORKDIR /app
|
|
3197
|
-
|
|
3198
|
-
# Build-time args for encrypted secrets
|
|
3199
|
-
ARG GKM_ENCRYPTED_CREDENTIALS=""
|
|
3200
|
-
ARG GKM_CREDENTIALS_IV=""
|
|
3201
|
-
|
|
3202
|
-
# Copy pruned source
|
|
3203
|
-
COPY --from=pruner /app/out/full/ ./
|
|
3204
|
-
|
|
3205
|
-
# Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
|
|
3206
|
-
# Using wildcard to make it optional for single-app projects
|
|
3207
|
-
COPY --from=pruner /app/gkm.config.* ./
|
|
3208
|
-
COPY --from=pruner /app/tsconfig.* ./
|
|
3209
|
-
|
|
3210
|
-
# Write encrypted credentials for gkm build to embed
|
|
3211
|
-
RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
|
|
3212
|
-
mkdir -p ${appPath}/.gkm && \
|
|
3213
|
-
echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
|
|
3214
|
-
echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
|
|
3215
|
-
fi
|
|
3216
|
-
|
|
3217
|
-
# Build production server using gkm
|
|
3218
|
-
RUN cd ${appPath} && ./node_modules/.bin/gkm build --provider server --production
|
|
3219
|
-
|
|
3220
|
-
# Stage 4: Production
|
|
3221
|
-
FROM ${baseImage} AS runner
|
|
3222
|
-
|
|
3223
|
-
WORKDIR /app
|
|
3224
|
-
|
|
3225
|
-
RUN apk add --no-cache tini
|
|
3226
|
-
|
|
3227
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
3228
|
-
adduser --system --uid 1001 hono
|
|
3229
|
-
|
|
3230
|
-
# Copy bundled server
|
|
3231
|
-
COPY --from=builder --chown=hono:nodejs /app/${appPath}/.gkm/server/dist/server.mjs ./
|
|
3232
|
-
|
|
3233
|
-
ENV NODE_ENV=production
|
|
3234
|
-
ENV PORT=${port}
|
|
3235
|
-
|
|
3236
|
-
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
3237
|
-
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
3238
|
-
|
|
3239
|
-
USER hono
|
|
3017
|
+
function generateMinimalDockerCompose(options) {
|
|
3018
|
+
const { imageName, registry, port, healthCheckPath } = options;
|
|
3019
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
3020
|
+
return `version: '3.8'
|
|
3240
3021
|
|
|
3241
|
-
|
|
3022
|
+
services:
|
|
3023
|
+
api:
|
|
3024
|
+
build:
|
|
3025
|
+
context: ../..
|
|
3026
|
+
dockerfile: .gkm/docker/Dockerfile
|
|
3027
|
+
image: ${imageRef}\${IMAGE_NAME:-${imageName}}:\${TAG:-latest}
|
|
3028
|
+
container_name: ${imageName}
|
|
3029
|
+
restart: unless-stopped
|
|
3030
|
+
ports:
|
|
3031
|
+
- "\${PORT:-${port}}:${port}"
|
|
3032
|
+
environment:
|
|
3033
|
+
- NODE_ENV=production
|
|
3034
|
+
healthcheck:
|
|
3035
|
+
test: ["CMD", "wget", "-q", "--spider", "http://localhost:${port}${healthCheckPath}"]
|
|
3036
|
+
interval: 30s
|
|
3037
|
+
timeout: 3s
|
|
3038
|
+
retries: 3
|
|
3039
|
+
networks:
|
|
3040
|
+
- app-network
|
|
3242
3041
|
|
|
3243
|
-
|
|
3244
|
-
|
|
3042
|
+
networks:
|
|
3043
|
+
app-network:
|
|
3044
|
+
driver: bridge
|
|
3245
3045
|
`;
|
|
3246
3046
|
}
|
|
3247
3047
|
/**
|
|
3248
|
-
* Generate
|
|
3249
|
-
*
|
|
3250
|
-
* This is used for apps that don't use gkm routes (e.g., Better Auth servers).
|
|
3048
|
+
* Generate docker-compose.yml for a workspace with all apps as services.
|
|
3049
|
+
* Apps can communicate with each other via service names.
|
|
3251
3050
|
* @internal Exported for testing
|
|
3252
3051
|
*/
|
|
3253
|
-
function
|
|
3254
|
-
const {
|
|
3255
|
-
const
|
|
3256
|
-
const
|
|
3257
|
-
const
|
|
3258
|
-
const
|
|
3259
|
-
|
|
3260
|
-
|
|
3261
|
-
|
|
3262
|
-
|
|
3263
|
-
|
|
3264
|
-
|
|
3265
|
-
WORKDIR /app
|
|
3266
|
-
|
|
3267
|
-
${installPm}
|
|
3268
|
-
|
|
3269
|
-
COPY . .
|
|
3270
|
-
|
|
3271
|
-
# Prune to only include necessary packages
|
|
3272
|
-
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
3273
|
-
|
|
3274
|
-
# Stage 2: Install dependencies
|
|
3275
|
-
FROM ${baseImage} AS deps
|
|
3276
|
-
|
|
3277
|
-
WORKDIR /app
|
|
3278
|
-
|
|
3279
|
-
${installPm}
|
|
3280
|
-
|
|
3281
|
-
# Copy pruned lockfile and package.jsons
|
|
3282
|
-
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
3283
|
-
COPY --from=pruner /app/out/json/ ./
|
|
3284
|
-
|
|
3285
|
-
# Install dependencies
|
|
3286
|
-
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3287
|
-
${turboInstallCmd}
|
|
3288
|
-
|
|
3289
|
-
# Stage 3: Build with tsdown
|
|
3290
|
-
FROM deps AS builder
|
|
3291
|
-
|
|
3292
|
-
WORKDIR /app
|
|
3293
|
-
|
|
3294
|
-
# Build-time args for encrypted secrets
|
|
3295
|
-
ARG GKM_ENCRYPTED_CREDENTIALS=""
|
|
3296
|
-
ARG GKM_CREDENTIALS_IV=""
|
|
3297
|
-
|
|
3298
|
-
# Copy pruned source
|
|
3299
|
-
COPY --from=pruner /app/out/full/ ./
|
|
3300
|
-
|
|
3301
|
-
# Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
|
|
3302
|
-
# Using wildcard to make it optional for single-app projects
|
|
3303
|
-
COPY --from=pruner /app/tsconfig.* ./
|
|
3304
|
-
|
|
3305
|
-
# Write encrypted credentials for tsdown to embed via define
|
|
3306
|
-
RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
|
|
3307
|
-
mkdir -p ${appPath}/.gkm && \
|
|
3308
|
-
echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
|
|
3309
|
-
echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
|
|
3310
|
-
fi
|
|
3311
|
-
|
|
3312
|
-
# Bundle entry point with esbuild (outputs to dist/index.mjs)
|
|
3313
|
-
# Creates a fully standalone bundle with all dependencies included
|
|
3314
|
-
# Use define to embed credentials if present
|
|
3315
|
-
RUN cd ${appPath} && \
|
|
3316
|
-
if [ -f .gkm/credentials.enc ]; then \
|
|
3317
|
-
CREDS=$(cat .gkm/credentials.enc) && \
|
|
3318
|
-
IV=$(cat .gkm/credentials.iv) && \
|
|
3319
|
-
npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
|
|
3320
|
-
--outfile=dist/index.mjs --packages=bundle \
|
|
3321
|
-
--banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);' \
|
|
3322
|
-
--define:__GKM_ENCRYPTED_CREDENTIALS__="'\\"$CREDS\\"'" \
|
|
3323
|
-
--define:__GKM_CREDENTIALS_IV__="'\\"$IV\\"'"; \
|
|
3324
|
-
else \
|
|
3325
|
-
npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
|
|
3326
|
-
--outfile=dist/index.mjs --packages=bundle \
|
|
3327
|
-
--banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);'; \
|
|
3328
|
-
fi
|
|
3329
|
-
|
|
3330
|
-
# Stage 4: Production
|
|
3331
|
-
FROM ${baseImage} AS runner
|
|
3332
|
-
|
|
3333
|
-
WORKDIR /app
|
|
3334
|
-
|
|
3335
|
-
RUN apk add --no-cache tini
|
|
3336
|
-
|
|
3337
|
-
RUN addgroup --system --gid 1001 nodejs && \\
|
|
3338
|
-
adduser --system --uid 1001 app
|
|
3339
|
-
|
|
3340
|
-
# Copy bundled output only (no node_modules needed - fully bundled)
|
|
3341
|
-
COPY --from=builder --chown=app:nodejs /app/${appPath}/dist/index.mjs ./
|
|
3342
|
-
|
|
3343
|
-
ENV NODE_ENV=production
|
|
3344
|
-
ENV PORT=${port}
|
|
3345
|
-
|
|
3346
|
-
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
3347
|
-
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
3348
|
-
|
|
3349
|
-
USER app
|
|
3350
|
-
|
|
3351
|
-
EXPOSE ${port}
|
|
3052
|
+
function generateWorkspaceCompose(workspace, options = {}) {
|
|
3053
|
+
const { registry } = options;
|
|
3054
|
+
const apps = Object.entries(workspace.apps);
|
|
3055
|
+
const services = workspace.services;
|
|
3056
|
+
const hasPostgres = services.db !== void 0 && services.db !== false;
|
|
3057
|
+
const hasRedis = services.cache !== void 0 && services.cache !== false;
|
|
3058
|
+
const hasMail = services.mail !== void 0 && services.mail !== false;
|
|
3059
|
+
const postgresImage = getInfraServiceImage("postgres", services.db);
|
|
3060
|
+
const redisImage = getInfraServiceImage("redis", services.cache);
|
|
3061
|
+
let yaml = `# Docker Compose for ${workspace.name} workspace
|
|
3062
|
+
# Generated by gkm - do not edit manually
|
|
3352
3063
|
|
|
3353
|
-
|
|
3354
|
-
|
|
3064
|
+
services:
|
|
3065
|
+
`;
|
|
3066
|
+
for (const [appName, app] of apps) yaml += generateAppService(appName, app, apps, {
|
|
3067
|
+
registry,
|
|
3068
|
+
hasPostgres,
|
|
3069
|
+
hasRedis
|
|
3070
|
+
});
|
|
3071
|
+
if (hasPostgres) yaml += `
|
|
3072
|
+
postgres:
|
|
3073
|
+
image: ${postgresImage}
|
|
3074
|
+
container_name: ${workspace.name}-postgres
|
|
3075
|
+
restart: unless-stopped
|
|
3076
|
+
environment:
|
|
3077
|
+
POSTGRES_USER: \${POSTGRES_USER:-postgres}
|
|
3078
|
+
POSTGRES_PASSWORD: \${POSTGRES_PASSWORD:-postgres}
|
|
3079
|
+
POSTGRES_DB: \${POSTGRES_DB:-app}
|
|
3080
|
+
volumes:
|
|
3081
|
+
- postgres_data:/var/lib/postgresql/data
|
|
3082
|
+
healthcheck:
|
|
3083
|
+
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
|
3084
|
+
interval: 5s
|
|
3085
|
+
timeout: 5s
|
|
3086
|
+
retries: 5
|
|
3087
|
+
networks:
|
|
3088
|
+
- workspace-network
|
|
3089
|
+
`;
|
|
3090
|
+
if (hasRedis) yaml += `
|
|
3091
|
+
redis:
|
|
3092
|
+
image: ${redisImage}
|
|
3093
|
+
container_name: ${workspace.name}-redis
|
|
3094
|
+
restart: unless-stopped
|
|
3095
|
+
volumes:
|
|
3096
|
+
- redis_data:/data
|
|
3097
|
+
healthcheck:
|
|
3098
|
+
test: ["CMD", "redis-cli", "ping"]
|
|
3099
|
+
interval: 5s
|
|
3100
|
+
timeout: 5s
|
|
3101
|
+
retries: 5
|
|
3102
|
+
networks:
|
|
3103
|
+
- workspace-network
|
|
3104
|
+
`;
|
|
3105
|
+
if (hasMail) yaml += `
|
|
3106
|
+
mailpit:
|
|
3107
|
+
image: axllent/mailpit:latest
|
|
3108
|
+
container_name: ${workspace.name}-mailpit
|
|
3109
|
+
restart: unless-stopped
|
|
3110
|
+
ports:
|
|
3111
|
+
- "8025:8025" # Web UI
|
|
3112
|
+
- "1025:1025" # SMTP
|
|
3113
|
+
networks:
|
|
3114
|
+
- workspace-network
|
|
3115
|
+
`;
|
|
3116
|
+
yaml += `
|
|
3117
|
+
volumes:
|
|
3355
3118
|
`;
|
|
3119
|
+
if (hasPostgres) yaml += ` postgres_data:
|
|
3120
|
+
`;
|
|
3121
|
+
if (hasRedis) yaml += ` redis_data:
|
|
3122
|
+
`;
|
|
3123
|
+
yaml += `
|
|
3124
|
+
networks:
|
|
3125
|
+
workspace-network:
|
|
3126
|
+
driver: bridge
|
|
3127
|
+
`;
|
|
3128
|
+
return yaml;
|
|
3356
3129
|
}
|
|
3357
|
-
|
|
3358
|
-
//#endregion
|
|
3359
|
-
//#region src/docker/index.ts
|
|
3360
|
-
const logger$6 = console;
|
|
3361
3130
|
/**
|
|
3362
|
-
*
|
|
3363
|
-
* Generates Dockerfile, docker-compose.yml, and related files
|
|
3364
|
-
*
|
|
3365
|
-
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
3366
|
-
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
3131
|
+
* Get infrastructure service image with version.
|
|
3367
3132
|
*/
|
|
3368
|
-
|
|
3369
|
-
const
|
|
3370
|
-
|
|
3371
|
-
|
|
3372
|
-
|
|
3133
|
+
function getInfraServiceImage(serviceName, config$1) {
|
|
3134
|
+
const defaults = {
|
|
3135
|
+
postgres: "postgres:16-alpine",
|
|
3136
|
+
redis: "redis:7-alpine"
|
|
3137
|
+
};
|
|
3138
|
+
if (!config$1 || config$1 === true) return defaults[serviceName];
|
|
3139
|
+
if (typeof config$1 === "object") {
|
|
3140
|
+
if (config$1.image) return config$1.image;
|
|
3141
|
+
if (config$1.version) {
|
|
3142
|
+
const baseImage = serviceName === "postgres" ? "postgres" : "redis";
|
|
3143
|
+
return `${baseImage}:${config$1.version}`;
|
|
3144
|
+
}
|
|
3373
3145
|
}
|
|
3374
|
-
|
|
3375
|
-
|
|
3376
|
-
|
|
3377
|
-
|
|
3378
|
-
|
|
3379
|
-
|
|
3380
|
-
|
|
3381
|
-
|
|
3382
|
-
|
|
3146
|
+
return defaults[serviceName];
|
|
3147
|
+
}
|
|
3148
|
+
/**
|
|
3149
|
+
* Generate a service definition for an app.
|
|
3150
|
+
*/
|
|
3151
|
+
function generateAppService(appName, app, allApps, options) {
|
|
3152
|
+
const { registry, hasPostgres, hasRedis } = options;
|
|
3153
|
+
const imageRef = registry ? `\${REGISTRY:-${registry}}/` : "";
|
|
3154
|
+
const healthCheckPath = app.type === "frontend" ? "/" : "/health";
|
|
3155
|
+
const healthCheckCmd = app.type === "frontend" ? `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}/"]` : `["CMD", "wget", "-q", "--spider", "http://localhost:${app.port}${healthCheckPath}"]`;
|
|
3156
|
+
let yaml = `
|
|
3157
|
+
${appName}:
|
|
3158
|
+
build:
|
|
3159
|
+
context: .
|
|
3160
|
+
dockerfile: .gkm/docker/Dockerfile.${appName}
|
|
3161
|
+
image: ${imageRef}\${${appName.toUpperCase()}_IMAGE:-${appName}}:\${TAG:-latest}
|
|
3162
|
+
container_name: ${appName}
|
|
3163
|
+
restart: unless-stopped
|
|
3164
|
+
ports:
|
|
3165
|
+
- "\${${appName.toUpperCase()}_PORT:-${app.port}}:${app.port}"
|
|
3166
|
+
environment:
|
|
3167
|
+
- NODE_ENV=production
|
|
3168
|
+
- PORT=${app.port}
|
|
3169
|
+
`;
|
|
3170
|
+
for (const dep of app.dependencies) {
|
|
3171
|
+
const depApp = allApps.find(([name$1]) => name$1 === dep)?.[1];
|
|
3172
|
+
if (depApp) yaml += ` - ${dep.toUpperCase()}_URL=http://${dep}:${depApp.port}
|
|
3173
|
+
`;
|
|
3383
3174
|
}
|
|
3384
|
-
|
|
3385
|
-
|
|
3386
|
-
|
|
3387
|
-
|
|
3388
|
-
|
|
3389
|
-
|
|
3390
|
-
|
|
3391
|
-
|
|
3392
|
-
|
|
3393
|
-
|
|
3394
|
-
|
|
3395
|
-
|
|
3396
|
-
|
|
3397
|
-
|
|
3398
|
-
|
|
3399
|
-
|
|
3400
|
-
|
|
3401
|
-
|
|
3402
|
-
|
|
3403
|
-
|
|
3404
|
-
|
|
3405
|
-
|
|
3406
|
-
|
|
3407
|
-
|
|
3408
|
-
|
|
3409
|
-
|
|
3410
|
-
|
|
3411
|
-
|
|
3412
|
-
const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
|
|
3413
|
-
const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
|
|
3414
|
-
const dockerfilePath = join(dockerDir, "Dockerfile");
|
|
3415
|
-
await writeFile(dockerfilePath, dockerfile);
|
|
3416
|
-
logger$6.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
|
|
3417
|
-
const composeOptions = {
|
|
3418
|
-
imageName: dockerConfig.imageName,
|
|
3419
|
-
registry: options.registry ?? dockerConfig.registry,
|
|
3420
|
-
port: dockerConfig.port,
|
|
3421
|
-
healthCheckPath,
|
|
3422
|
-
services: dockerConfig.compose?.services ?? {}
|
|
3423
|
-
};
|
|
3424
|
-
const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
|
|
3425
|
-
const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
|
|
3426
|
-
const composePath = join(dockerDir, "docker-compose.yml");
|
|
3427
|
-
await writeFile(composePath, dockerCompose);
|
|
3428
|
-
logger$6.log("Generated: .gkm/docker/docker-compose.yml");
|
|
3429
|
-
const dockerignore = generateDockerignore();
|
|
3430
|
-
const dockerignorePath = join(process.cwd(), ".dockerignore");
|
|
3431
|
-
await writeFile(dockerignorePath, dockerignore);
|
|
3432
|
-
logger$6.log("Generated: .dockerignore (project root)");
|
|
3433
|
-
const entrypoint = generateDockerEntrypoint();
|
|
3434
|
-
const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
|
|
3435
|
-
await writeFile(entrypointPath, entrypoint);
|
|
3436
|
-
logger$6.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
3437
|
-
const result = {
|
|
3438
|
-
dockerfile: dockerfilePath,
|
|
3439
|
-
dockerCompose: composePath,
|
|
3440
|
-
dockerignore: dockerignorePath,
|
|
3441
|
-
entrypoint: entrypointPath
|
|
3442
|
-
};
|
|
3443
|
-
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
3444
|
-
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
3445
|
-
return result;
|
|
3175
|
+
if (app.type === "backend") {
|
|
3176
|
+
if (hasPostgres) yaml += ` - DATABASE_URL=\${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/app}
|
|
3177
|
+
`;
|
|
3178
|
+
if (hasRedis) yaml += ` - REDIS_URL=\${REDIS_URL:-redis://redis:6379}
|
|
3179
|
+
`;
|
|
3180
|
+
}
|
|
3181
|
+
yaml += ` healthcheck:
|
|
3182
|
+
test: ${healthCheckCmd}
|
|
3183
|
+
interval: 30s
|
|
3184
|
+
timeout: 3s
|
|
3185
|
+
retries: 3
|
|
3186
|
+
`;
|
|
3187
|
+
const dependencies$1 = [...app.dependencies];
|
|
3188
|
+
if (app.type === "backend") {
|
|
3189
|
+
if (hasPostgres) dependencies$1.push("postgres");
|
|
3190
|
+
if (hasRedis) dependencies$1.push("redis");
|
|
3191
|
+
}
|
|
3192
|
+
if (dependencies$1.length > 0) {
|
|
3193
|
+
yaml += ` depends_on:
|
|
3194
|
+
`;
|
|
3195
|
+
for (const dep of dependencies$1) yaml += ` ${dep}:
|
|
3196
|
+
condition: service_healthy
|
|
3197
|
+
`;
|
|
3198
|
+
}
|
|
3199
|
+
yaml += ` networks:
|
|
3200
|
+
- workspace-network
|
|
3201
|
+
`;
|
|
3202
|
+
return yaml;
|
|
3446
3203
|
}
|
|
3204
|
+
|
|
3205
|
+
//#endregion
|
|
3206
|
+
//#region src/docker/templates.ts
|
|
3207
|
+
const LOCKFILES = [
|
|
3208
|
+
["pnpm-lock.yaml", "pnpm"],
|
|
3209
|
+
["bun.lockb", "bun"],
|
|
3210
|
+
["yarn.lock", "yarn"],
|
|
3211
|
+
["package-lock.json", "npm"]
|
|
3212
|
+
];
|
|
3447
3213
|
/**
|
|
3448
|
-
*
|
|
3449
|
-
*
|
|
3450
|
-
* Returns cleanup function if file was copied
|
|
3214
|
+
* Detect package manager from lockfiles
|
|
3215
|
+
* Walks up the directory tree to find lockfile (for monorepos)
|
|
3451
3216
|
*/
|
|
3452
|
-
function
|
|
3453
|
-
|
|
3454
|
-
|
|
3455
|
-
|
|
3456
|
-
return
|
|
3217
|
+
function detectPackageManager$1(cwd = process.cwd()) {
|
|
3218
|
+
let dir = cwd;
|
|
3219
|
+
const root = parse(dir).root;
|
|
3220
|
+
while (dir !== root) {
|
|
3221
|
+
for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(dir, lockfile))) return pm;
|
|
3222
|
+
dir = dirname(dir);
|
|
3457
3223
|
}
|
|
3458
|
-
const
|
|
3459
|
-
|
|
3460
|
-
if (lockfilePath === localLockfile) return null;
|
|
3461
|
-
logger$6.log(` Copying ${lockfileName} from monorepo root...`);
|
|
3462
|
-
copyFileSync(lockfilePath, localLockfile);
|
|
3463
|
-
return () => {
|
|
3464
|
-
try {
|
|
3465
|
-
unlinkSync(localLockfile);
|
|
3466
|
-
} catch {}
|
|
3467
|
-
};
|
|
3224
|
+
for (const [lockfile, pm] of LOCKFILES) if (existsSync(join(root, lockfile))) return pm;
|
|
3225
|
+
return "pnpm";
|
|
3468
3226
|
}
|
|
3469
3227
|
/**
|
|
3470
|
-
*
|
|
3471
|
-
*
|
|
3228
|
+
* Find the lockfile path by walking up the directory tree
|
|
3229
|
+
* Returns the full path to the lockfile, or null if not found
|
|
3472
3230
|
*/
|
|
3473
|
-
|
|
3474
|
-
|
|
3475
|
-
const
|
|
3476
|
-
|
|
3477
|
-
|
|
3478
|
-
|
|
3479
|
-
|
|
3480
|
-
|
|
3481
|
-
|
|
3482
|
-
cwd,
|
|
3483
|
-
stdio: "inherit",
|
|
3484
|
-
env: {
|
|
3485
|
-
...process.env,
|
|
3486
|
-
DOCKER_BUILDKIT: "1"
|
|
3487
|
-
}
|
|
3488
|
-
});
|
|
3489
|
-
logger$6.log(`✅ Docker image built: ${fullImageName}`);
|
|
3490
|
-
} catch (error) {
|
|
3491
|
-
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
3492
|
-
} finally {
|
|
3493
|
-
cleanup?.();
|
|
3231
|
+
function findLockfilePath(cwd = process.cwd()) {
|
|
3232
|
+
let dir = cwd;
|
|
3233
|
+
const root = parse(dir).root;
|
|
3234
|
+
while (dir !== root) {
|
|
3235
|
+
for (const [lockfile] of LOCKFILES) {
|
|
3236
|
+
const lockfilePath = join(dir, lockfile);
|
|
3237
|
+
if (existsSync(lockfilePath)) return lockfilePath;
|
|
3238
|
+
}
|
|
3239
|
+
dir = dirname(dir);
|
|
3494
3240
|
}
|
|
3241
|
+
for (const [lockfile] of LOCKFILES) {
|
|
3242
|
+
const lockfilePath = join(root, lockfile);
|
|
3243
|
+
if (existsSync(lockfilePath)) return lockfilePath;
|
|
3244
|
+
}
|
|
3245
|
+
return null;
|
|
3495
3246
|
}
|
|
3496
3247
|
/**
|
|
3497
|
-
*
|
|
3248
|
+
* Check if we're in a monorepo (lockfile is in a parent directory)
|
|
3498
3249
|
*/
|
|
3499
|
-
|
|
3500
|
-
const
|
|
3501
|
-
|
|
3502
|
-
|
|
3503
|
-
|
|
3504
|
-
logger$6.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
|
|
3505
|
-
try {
|
|
3506
|
-
execSync(`docker push ${fullImageName}`, {
|
|
3507
|
-
cwd: process.cwd(),
|
|
3508
|
-
stdio: "inherit"
|
|
3509
|
-
});
|
|
3510
|
-
logger$6.log(`✅ Docker image pushed: ${fullImageName}`);
|
|
3511
|
-
} catch (error) {
|
|
3512
|
-
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
3513
|
-
}
|
|
3250
|
+
function isMonorepo(cwd = process.cwd()) {
|
|
3251
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
3252
|
+
if (!lockfilePath) return false;
|
|
3253
|
+
const lockfileDir = dirname(lockfilePath);
|
|
3254
|
+
return lockfileDir !== cwd;
|
|
3514
3255
|
}
|
|
3515
3256
|
/**
|
|
3516
|
-
*
|
|
3257
|
+
* Check if turbo.json exists (walks up directory tree)
|
|
3517
3258
|
*/
|
|
3518
|
-
function
|
|
3519
|
-
|
|
3520
|
-
|
|
3521
|
-
|
|
3522
|
-
|
|
3523
|
-
|
|
3524
|
-
return pkg$1.name;
|
|
3525
|
-
} catch {
|
|
3526
|
-
return void 0;
|
|
3259
|
+
function hasTurboConfig(cwd = process.cwd()) {
|
|
3260
|
+
let dir = cwd;
|
|
3261
|
+
const root = parse(dir).root;
|
|
3262
|
+
while (dir !== root) {
|
|
3263
|
+
if (existsSync(join(dir, "turbo.json"))) return true;
|
|
3264
|
+
dir = dirname(dir);
|
|
3527
3265
|
}
|
|
3266
|
+
return existsSync(join(root, "turbo.json"));
|
|
3528
3267
|
}
|
|
3529
3268
|
/**
|
|
3530
|
-
*
|
|
3531
|
-
*
|
|
3269
|
+
* Get install command for turbo builds (without frozen lockfile)
|
|
3270
|
+
* Turbo prune creates a subset that may not perfectly match the lockfile
|
|
3532
3271
|
*/
|
|
3533
|
-
|
|
3534
|
-
const
|
|
3535
|
-
|
|
3536
|
-
|
|
3537
|
-
|
|
3538
|
-
|
|
3539
|
-
const packageManager = detectPackageManager$1(workspace.root);
|
|
3540
|
-
logger$6.log(` Package manager: ${packageManager}`);
|
|
3541
|
-
for (const [appName, app] of apps) {
|
|
3542
|
-
const appPath = app.path;
|
|
3543
|
-
const fullAppPath = join(workspace.root, appPath);
|
|
3544
|
-
const turboPackage = getAppPackageName(fullAppPath) ?? appName;
|
|
3545
|
-
const imageName = appName;
|
|
3546
|
-
const hasEntry = !!app.entry;
|
|
3547
|
-
const buildType = hasEntry ? "entry" : app.type;
|
|
3548
|
-
logger$6.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
|
|
3549
|
-
let dockerfile;
|
|
3550
|
-
if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
|
|
3551
|
-
imageName,
|
|
3552
|
-
baseImage: "node:22-alpine",
|
|
3553
|
-
port: app.port,
|
|
3554
|
-
appPath,
|
|
3555
|
-
turboPackage,
|
|
3556
|
-
packageManager
|
|
3557
|
-
});
|
|
3558
|
-
else if (app.entry) dockerfile = generateEntryDockerfile({
|
|
3559
|
-
imageName,
|
|
3560
|
-
baseImage: "node:22-alpine",
|
|
3561
|
-
port: app.port,
|
|
3562
|
-
appPath,
|
|
3563
|
-
entry: app.entry,
|
|
3564
|
-
turboPackage,
|
|
3565
|
-
packageManager,
|
|
3566
|
-
healthCheckPath: "/health"
|
|
3567
|
-
});
|
|
3568
|
-
else dockerfile = generateBackendDockerfile({
|
|
3569
|
-
imageName,
|
|
3570
|
-
baseImage: "node:22-alpine",
|
|
3571
|
-
port: app.port,
|
|
3572
|
-
appPath,
|
|
3573
|
-
turboPackage,
|
|
3574
|
-
packageManager,
|
|
3575
|
-
healthCheckPath: "/health"
|
|
3576
|
-
});
|
|
3577
|
-
const dockerfilePath = join(dockerDir, `Dockerfile.${appName}`);
|
|
3578
|
-
await writeFile(dockerfilePath, dockerfile);
|
|
3579
|
-
logger$6.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
|
|
3580
|
-
results.push({
|
|
3581
|
-
appName,
|
|
3582
|
-
type: app.type,
|
|
3583
|
-
dockerfile: dockerfilePath,
|
|
3584
|
-
imageName
|
|
3585
|
-
});
|
|
3586
|
-
}
|
|
3587
|
-
const dockerignore = generateDockerignore();
|
|
3588
|
-
const dockerignorePath = join(workspace.root, ".dockerignore");
|
|
3589
|
-
await writeFile(dockerignorePath, dockerignore);
|
|
3590
|
-
logger$6.log(`\n Generated: .dockerignore (workspace root)`);
|
|
3591
|
-
const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
|
|
3592
|
-
const composePath = join(dockerDir, "docker-compose.yml");
|
|
3593
|
-
await writeFile(composePath, dockerCompose);
|
|
3594
|
-
logger$6.log(` Generated: .gkm/docker/docker-compose.yml`);
|
|
3595
|
-
logger$6.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
|
|
3596
|
-
logger$6.log("\n📋 Build commands:");
|
|
3597
|
-
for (const result of results) {
|
|
3598
|
-
const icon = result.type === "backend" ? "⚙️" : "🌐";
|
|
3599
|
-
logger$6.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
|
|
3600
|
-
}
|
|
3601
|
-
logger$6.log("\n📋 Run all services:");
|
|
3602
|
-
logger$6.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
|
|
3603
|
-
return {
|
|
3604
|
-
apps: results,
|
|
3605
|
-
dockerCompose: composePath,
|
|
3606
|
-
dockerignore: dockerignorePath
|
|
3272
|
+
function getTurboInstallCmd(pm) {
|
|
3273
|
+
const commands = {
|
|
3274
|
+
pnpm: "pnpm install",
|
|
3275
|
+
npm: "npm install",
|
|
3276
|
+
yarn: "yarn install",
|
|
3277
|
+
bun: "bun install"
|
|
3607
3278
|
};
|
|
3279
|
+
return commands[pm];
|
|
3608
3280
|
}
|
|
3609
|
-
|
|
3610
|
-
//#endregion
|
|
3611
|
-
//#region src/deploy/docker.ts
|
|
3612
3281
|
/**
|
|
3613
|
-
* Get
|
|
3614
|
-
* Used for Dokploy app/project naming
|
|
3282
|
+
* Get package manager specific commands and paths
|
|
3615
3283
|
*/
|
|
3616
|
-
function
|
|
3617
|
-
const
|
|
3618
|
-
|
|
3619
|
-
|
|
3620
|
-
|
|
3621
|
-
|
|
3622
|
-
|
|
3623
|
-
|
|
3284
|
+
function getPmConfig(pm) {
|
|
3285
|
+
const configs = {
|
|
3286
|
+
pnpm: {
|
|
3287
|
+
install: "corepack enable && corepack prepare pnpm@latest --activate",
|
|
3288
|
+
lockfile: "pnpm-lock.yaml",
|
|
3289
|
+
fetch: "pnpm fetch",
|
|
3290
|
+
installCmd: "pnpm install --frozen-lockfile --offline",
|
|
3291
|
+
cacheTarget: "/root/.local/share/pnpm/store",
|
|
3292
|
+
cacheId: "pnpm",
|
|
3293
|
+
run: "pnpm",
|
|
3294
|
+
exec: "pnpm exec",
|
|
3295
|
+
dlx: "pnpm dlx",
|
|
3296
|
+
addGlobal: "pnpm add -g"
|
|
3297
|
+
},
|
|
3298
|
+
npm: {
|
|
3299
|
+
install: "",
|
|
3300
|
+
lockfile: "package-lock.json",
|
|
3301
|
+
fetch: "",
|
|
3302
|
+
installCmd: "npm ci",
|
|
3303
|
+
cacheTarget: "/root/.npm",
|
|
3304
|
+
cacheId: "npm",
|
|
3305
|
+
run: "npm run",
|
|
3306
|
+
exec: "npx",
|
|
3307
|
+
dlx: "npx",
|
|
3308
|
+
addGlobal: "npm install -g"
|
|
3309
|
+
},
|
|
3310
|
+
yarn: {
|
|
3311
|
+
install: "corepack enable && corepack prepare yarn@stable --activate",
|
|
3312
|
+
lockfile: "yarn.lock",
|
|
3313
|
+
fetch: "",
|
|
3314
|
+
installCmd: "yarn install --frozen-lockfile",
|
|
3315
|
+
cacheTarget: "/root/.yarn/cache",
|
|
3316
|
+
cacheId: "yarn",
|
|
3317
|
+
run: "yarn",
|
|
3318
|
+
exec: "yarn exec",
|
|
3319
|
+
dlx: "yarn dlx",
|
|
3320
|
+
addGlobal: "yarn global add"
|
|
3321
|
+
},
|
|
3322
|
+
bun: {
|
|
3323
|
+
install: "npm install -g bun",
|
|
3324
|
+
lockfile: "bun.lockb",
|
|
3325
|
+
fetch: "",
|
|
3326
|
+
installCmd: "bun install --frozen-lockfile",
|
|
3327
|
+
cacheTarget: "/root/.bun/install/cache",
|
|
3328
|
+
cacheId: "bun",
|
|
3329
|
+
run: "bun run",
|
|
3330
|
+
exec: "bunx",
|
|
3331
|
+
dlx: "bunx",
|
|
3332
|
+
addGlobal: "bun add -g"
|
|
3333
|
+
}
|
|
3334
|
+
};
|
|
3335
|
+
return configs[pm];
|
|
3624
3336
|
}
|
|
3625
3337
|
/**
|
|
3626
|
-
*
|
|
3627
|
-
*
|
|
3338
|
+
* Generate a multi-stage Dockerfile for building from source
|
|
3339
|
+
* Optimized for build speed with:
|
|
3340
|
+
* - BuildKit cache mounts for package manager store
|
|
3341
|
+
* - pnpm fetch for better layer caching (when using pnpm)
|
|
3342
|
+
* - Optional turbo prune for monorepos
|
|
3628
3343
|
*/
|
|
3629
|
-
function
|
|
3630
|
-
const
|
|
3631
|
-
|
|
3632
|
-
|
|
3633
|
-
|
|
3634
|
-
|
|
3635
|
-
|
|
3636
|
-
|
|
3637
|
-
|
|
3638
|
-
|
|
3639
|
-
|
|
3640
|
-
|
|
3344
|
+
function generateMultiStageDockerfile(options) {
|
|
3345
|
+
const { baseImage, port, healthCheckPath, turbo, turboPackage, packageManager } = options;
|
|
3346
|
+
if (turbo) return generateTurboDockerfile({
|
|
3347
|
+
...options,
|
|
3348
|
+
turboPackage: turboPackage ?? "api"
|
|
3349
|
+
});
|
|
3350
|
+
const pm = getPmConfig(packageManager);
|
|
3351
|
+
const installPm = pm.install ? `\n# Install ${packageManager}\nRUN ${pm.install}\n` : "";
|
|
3352
|
+
const hasFetch = packageManager === "pnpm";
|
|
3353
|
+
const depsStage = hasFetch ? `# Copy lockfile first for better caching
|
|
3354
|
+
COPY ${pm.lockfile} ./
|
|
3355
|
+
|
|
3356
|
+
# Fetch dependencies (downloads to virtual store, cached separately)
|
|
3357
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3358
|
+
${pm.fetch}
|
|
3359
|
+
|
|
3360
|
+
# Copy package.json after fetch
|
|
3361
|
+
COPY package.json ./
|
|
3362
|
+
|
|
3363
|
+
# Install from cache (fast - no network needed)
|
|
3364
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3365
|
+
${pm.installCmd}` : `# Copy package files
|
|
3366
|
+
COPY package.json ${pm.lockfile} ./
|
|
3367
|
+
|
|
3368
|
+
# Install dependencies with cache
|
|
3369
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3370
|
+
${pm.installCmd}`;
|
|
3371
|
+
return `# syntax=docker/dockerfile:1
|
|
3372
|
+
# Stage 1: Dependencies
|
|
3373
|
+
FROM ${baseImage} AS deps
|
|
3374
|
+
|
|
3375
|
+
WORKDIR /app
|
|
3376
|
+
${installPm}
|
|
3377
|
+
${depsStage}
|
|
3378
|
+
|
|
3379
|
+
# Stage 2: Build
|
|
3380
|
+
FROM deps AS builder
|
|
3381
|
+
|
|
3382
|
+
WORKDIR /app
|
|
3383
|
+
|
|
3384
|
+
# Copy source (deps already installed)
|
|
3385
|
+
COPY . .
|
|
3386
|
+
|
|
3387
|
+
# Debug: Show node_modules/.bin contents and build production server
|
|
3388
|
+
RUN echo "=== node_modules/.bin contents ===" && \
|
|
3389
|
+
ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
|
|
3390
|
+
echo "=== Checking for gkm ===" && \
|
|
3391
|
+
which gkm 2>/dev/null || echo "gkm not in PATH" && \
|
|
3392
|
+
ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
|
|
3393
|
+
echo "=== Running build ===" && \
|
|
3394
|
+
./node_modules/.bin/gkm build --provider server --production
|
|
3395
|
+
|
|
3396
|
+
# Stage 3: Production
|
|
3397
|
+
FROM ${baseImage} AS runner
|
|
3398
|
+
|
|
3399
|
+
WORKDIR /app
|
|
3400
|
+
|
|
3401
|
+
# Install tini for proper signal handling as PID 1
|
|
3402
|
+
RUN apk add --no-cache tini
|
|
3403
|
+
|
|
3404
|
+
# Create non-root user
|
|
3405
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
3406
|
+
adduser --system --uid 1001 hono
|
|
3407
|
+
|
|
3408
|
+
# Copy bundled server
|
|
3409
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
3410
|
+
|
|
3411
|
+
# Environment
|
|
3412
|
+
ENV NODE_ENV=production
|
|
3413
|
+
ENV PORT=${port}
|
|
3414
|
+
|
|
3415
|
+
# Health check
|
|
3416
|
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
3417
|
+
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
3418
|
+
|
|
3419
|
+
# Switch to non-root user
|
|
3420
|
+
USER hono
|
|
3421
|
+
|
|
3422
|
+
EXPOSE ${port}
|
|
3423
|
+
|
|
3424
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
3425
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
3426
|
+
CMD ["node", "server.mjs"]
|
|
3427
|
+
`;
|
|
3641
3428
|
}
|
|
3642
|
-
const logger$5 = console;
|
|
3643
3429
|
/**
|
|
3644
|
-
*
|
|
3430
|
+
* Generate a Dockerfile optimized for Turbo monorepos
|
|
3431
|
+
* Uses turbo prune to create minimal Docker context
|
|
3645
3432
|
*/
|
|
3646
|
-
function
|
|
3647
|
-
|
|
3648
|
-
|
|
3433
|
+
function generateTurboDockerfile(options) {
|
|
3434
|
+
const { baseImage, port, healthCheckPath, turboPackage, packageManager } = options;
|
|
3435
|
+
const pm = getPmConfig(packageManager);
|
|
3436
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
3437
|
+
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
3438
|
+
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
3439
|
+
return `# syntax=docker/dockerfile:1
|
|
3440
|
+
# Stage 1: Prune monorepo
|
|
3441
|
+
FROM ${baseImage} AS pruner
|
|
3442
|
+
|
|
3443
|
+
WORKDIR /app
|
|
3444
|
+
|
|
3445
|
+
${installPm}
|
|
3446
|
+
|
|
3447
|
+
COPY . .
|
|
3448
|
+
|
|
3449
|
+
# Prune to only include necessary packages
|
|
3450
|
+
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
3451
|
+
|
|
3452
|
+
# Stage 2: Install dependencies
|
|
3453
|
+
FROM ${baseImage} AS deps
|
|
3454
|
+
|
|
3455
|
+
WORKDIR /app
|
|
3456
|
+
|
|
3457
|
+
${installPm}
|
|
3458
|
+
|
|
3459
|
+
# Copy pruned lockfile and package.jsons
|
|
3460
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
3461
|
+
COPY --from=pruner /app/out/json/ ./
|
|
3462
|
+
|
|
3463
|
+
# Install dependencies (no frozen-lockfile since turbo prune creates a subset)
|
|
3464
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3465
|
+
${turboInstallCmd}
|
|
3466
|
+
|
|
3467
|
+
# Stage 3: Build
|
|
3468
|
+
FROM deps AS builder
|
|
3469
|
+
|
|
3470
|
+
WORKDIR /app
|
|
3471
|
+
|
|
3472
|
+
# Copy pruned source
|
|
3473
|
+
COPY --from=pruner /app/out/full/ ./
|
|
3474
|
+
|
|
3475
|
+
# Debug: Show node_modules/.bin contents and build production server
|
|
3476
|
+
RUN echo "=== node_modules/.bin contents ===" && \
|
|
3477
|
+
ls -la node_modules/.bin/ 2>/dev/null || echo "node_modules/.bin not found" && \
|
|
3478
|
+
echo "=== Checking for gkm ===" && \
|
|
3479
|
+
which gkm 2>/dev/null || echo "gkm not in PATH" && \
|
|
3480
|
+
ls -la node_modules/.bin/gkm 2>/dev/null || echo "gkm binary not found in node_modules/.bin" && \
|
|
3481
|
+
echo "=== Running build ===" && \
|
|
3482
|
+
./node_modules/.bin/gkm build --provider server --production
|
|
3483
|
+
|
|
3484
|
+
# Stage 4: Production
|
|
3485
|
+
FROM ${baseImage} AS runner
|
|
3486
|
+
|
|
3487
|
+
WORKDIR /app
|
|
3488
|
+
|
|
3489
|
+
RUN apk add --no-cache tini
|
|
3490
|
+
|
|
3491
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
3492
|
+
adduser --system --uid 1001 hono
|
|
3493
|
+
|
|
3494
|
+
COPY --from=builder --chown=hono:nodejs /app/.gkm/server/dist/server.mjs ./
|
|
3495
|
+
|
|
3496
|
+
ENV NODE_ENV=production
|
|
3497
|
+
ENV PORT=${port}
|
|
3498
|
+
|
|
3499
|
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
3500
|
+
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
3501
|
+
|
|
3502
|
+
USER hono
|
|
3503
|
+
|
|
3504
|
+
EXPOSE ${port}
|
|
3505
|
+
|
|
3506
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
3507
|
+
CMD ["node", "server.mjs"]
|
|
3508
|
+
`;
|
|
3649
3509
|
}
|
|
3650
3510
|
/**
|
|
3651
|
-
*
|
|
3652
|
-
* @param imageRef - Full image reference (registry/name:tag)
|
|
3653
|
-
* @param appName - Name of the app (used for Dockerfile.{appName} in workspaces)
|
|
3654
|
-
* @param buildArgs - Build arguments to pass to docker build
|
|
3511
|
+
* Generate a slim Dockerfile for pre-built bundles
|
|
3655
3512
|
*/
|
|
3656
|
-
|
|
3657
|
-
|
|
3658
|
-
|
|
3659
|
-
|
|
3660
|
-
|
|
3661
|
-
|
|
3662
|
-
|
|
3663
|
-
|
|
3664
|
-
|
|
3665
|
-
|
|
3666
|
-
|
|
3667
|
-
|
|
3668
|
-
|
|
3669
|
-
|
|
3670
|
-
|
|
3671
|
-
|
|
3672
|
-
|
|
3673
|
-
|
|
3674
|
-
|
|
3675
|
-
|
|
3676
|
-
|
|
3677
|
-
|
|
3678
|
-
|
|
3679
|
-
|
|
3680
|
-
|
|
3681
|
-
|
|
3682
|
-
|
|
3683
|
-
|
|
3684
|
-
|
|
3685
|
-
|
|
3686
|
-
|
|
3687
|
-
|
|
3688
|
-
|
|
3689
|
-
|
|
3690
|
-
|
|
3513
|
+
function generateSlimDockerfile(options) {
|
|
3514
|
+
const { baseImage, port, healthCheckPath } = options;
|
|
3515
|
+
return `# Slim Dockerfile for pre-built production bundle
|
|
3516
|
+
FROM ${baseImage}
|
|
3517
|
+
|
|
3518
|
+
WORKDIR /app
|
|
3519
|
+
|
|
3520
|
+
# Install tini for proper signal handling as PID 1
|
|
3521
|
+
# Handles SIGTERM propagation and zombie process reaping
|
|
3522
|
+
RUN apk add --no-cache tini
|
|
3523
|
+
|
|
3524
|
+
# Create non-root user
|
|
3525
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
3526
|
+
adduser --system --uid 1001 hono
|
|
3527
|
+
|
|
3528
|
+
# Copy pre-built bundle
|
|
3529
|
+
COPY .gkm/server/dist/server.mjs ./
|
|
3530
|
+
|
|
3531
|
+
# Environment
|
|
3532
|
+
ENV NODE_ENV=production
|
|
3533
|
+
ENV PORT=${port}
|
|
3534
|
+
|
|
3535
|
+
# Health check
|
|
3536
|
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
3537
|
+
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
3538
|
+
|
|
3539
|
+
# Switch to non-root user
|
|
3540
|
+
USER hono
|
|
3541
|
+
|
|
3542
|
+
EXPOSE ${port}
|
|
3543
|
+
|
|
3544
|
+
# Use tini as entrypoint to handle PID 1 responsibilities
|
|
3545
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
3546
|
+
CMD ["node", "server.mjs"]
|
|
3547
|
+
`;
|
|
3691
3548
|
}
|
|
3692
3549
|
/**
|
|
3693
|
-
*
|
|
3550
|
+
* Generate .dockerignore file
|
|
3694
3551
|
*/
|
|
3695
|
-
|
|
3696
|
-
|
|
3697
|
-
|
|
3698
|
-
|
|
3699
|
-
|
|
3700
|
-
|
|
3701
|
-
|
|
3702
|
-
|
|
3703
|
-
|
|
3704
|
-
|
|
3705
|
-
|
|
3552
|
+
function generateDockerignore() {
|
|
3553
|
+
return `# Dependencies
|
|
3554
|
+
node_modules
|
|
3555
|
+
.pnpm-store
|
|
3556
|
+
|
|
3557
|
+
# Build output (except what we need)
|
|
3558
|
+
.gkm/aws*
|
|
3559
|
+
.gkm/server/*.ts
|
|
3560
|
+
!.gkm/server/dist
|
|
3561
|
+
|
|
3562
|
+
# IDE and editor
|
|
3563
|
+
.idea
|
|
3564
|
+
.vscode
|
|
3565
|
+
*.swp
|
|
3566
|
+
*.swo
|
|
3567
|
+
|
|
3568
|
+
# Git
|
|
3569
|
+
.git
|
|
3570
|
+
.gitignore
|
|
3571
|
+
|
|
3572
|
+
# Logs
|
|
3573
|
+
*.log
|
|
3574
|
+
npm-debug.log*
|
|
3575
|
+
pnpm-debug.log*
|
|
3576
|
+
|
|
3577
|
+
# Test files
|
|
3578
|
+
**/*.test.ts
|
|
3579
|
+
**/*.spec.ts
|
|
3580
|
+
**/__tests__
|
|
3581
|
+
coverage
|
|
3582
|
+
|
|
3583
|
+
# Documentation
|
|
3584
|
+
docs
|
|
3585
|
+
*.md
|
|
3586
|
+
!README.md
|
|
3587
|
+
|
|
3588
|
+
# Environment files (handle secrets separately)
|
|
3589
|
+
.env
|
|
3590
|
+
.env.*
|
|
3591
|
+
!.env.example
|
|
3592
|
+
|
|
3593
|
+
# Docker files (don't copy recursively)
|
|
3594
|
+
Dockerfile*
|
|
3595
|
+
docker-compose*
|
|
3596
|
+
.dockerignore
|
|
3597
|
+
`;
|
|
3706
3598
|
}
|
|
3707
3599
|
/**
|
|
3708
|
-
*
|
|
3600
|
+
* Generate docker-entrypoint.sh for custom startup logic
|
|
3709
3601
|
*/
|
|
3710
|
-
|
|
3711
|
-
|
|
3712
|
-
|
|
3713
|
-
|
|
3714
|
-
|
|
3715
|
-
|
|
3716
|
-
|
|
3717
|
-
|
|
3718
|
-
|
|
3719
|
-
|
|
3720
|
-
|
|
3721
|
-
|
|
3722
|
-
|
|
3723
|
-
|
|
3724
|
-
logger$5.log("\n Example docker run:");
|
|
3725
|
-
logger$5.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
|
|
3726
|
-
}
|
|
3727
|
-
return {
|
|
3728
|
-
imageRef,
|
|
3729
|
-
masterKey
|
|
3730
|
-
};
|
|
3602
|
+
function generateDockerEntrypoint() {
|
|
3603
|
+
return `#!/bin/sh
|
|
3604
|
+
set -e
|
|
3605
|
+
|
|
3606
|
+
# Run any custom startup scripts here
|
|
3607
|
+
# Example: wait for database
|
|
3608
|
+
# until nc -z $DB_HOST $DB_PORT; do
|
|
3609
|
+
# echo "Waiting for database..."
|
|
3610
|
+
# sleep 1
|
|
3611
|
+
# done
|
|
3612
|
+
|
|
3613
|
+
# Execute the main command
|
|
3614
|
+
exec "$@"
|
|
3615
|
+
`;
|
|
3731
3616
|
}
|
|
3732
3617
|
/**
|
|
3733
|
-
* Resolve Docker
|
|
3734
|
-
* - imageName: from config, or cwd package.json, or 'app' (for Docker image)
|
|
3735
|
-
* - projectName: from root package.json, or 'app' (for Dokploy project)
|
|
3736
|
-
* - appName: from cwd package.json, or projectName (for Dokploy app within project)
|
|
3618
|
+
* Resolve Docker configuration from GkmConfig with defaults
|
|
3737
3619
|
*/
|
|
3738
|
-
function resolveDockerConfig(config$1) {
|
|
3739
|
-
const
|
|
3740
|
-
|
|
3741
|
-
|
|
3620
|
+
function resolveDockerConfig$1(config$1) {
|
|
3621
|
+
const docker = config$1.docker ?? {};
|
|
3622
|
+
let defaultImageName = "api";
|
|
3623
|
+
try {
|
|
3624
|
+
const pkg$1 = __require(`${process.cwd()}/package.json`);
|
|
3625
|
+
if (pkg$1.name) defaultImageName = pkg$1.name.replace(/^@[^/]+\//, "");
|
|
3626
|
+
} catch {}
|
|
3742
3627
|
return {
|
|
3743
|
-
registry:
|
|
3744
|
-
imageName,
|
|
3745
|
-
|
|
3746
|
-
|
|
3628
|
+
registry: docker.registry ?? "",
|
|
3629
|
+
imageName: docker.imageName ?? defaultImageName,
|
|
3630
|
+
baseImage: docker.baseImage ?? "node:22-alpine",
|
|
3631
|
+
port: docker.port ?? 3e3,
|
|
3632
|
+
compose: docker.compose
|
|
3747
3633
|
};
|
|
3748
3634
|
}
|
|
3749
|
-
|
|
3750
|
-
//#endregion
|
|
3751
|
-
//#region src/deploy/dokploy.ts
|
|
3752
|
-
const logger$4 = console;
|
|
3753
|
-
/**
|
|
3754
|
-
* Get the Dokploy API token from stored credentials or environment
|
|
3755
|
-
*/
|
|
3756
|
-
async function getApiToken$1() {
|
|
3757
|
-
const token = await getDokployToken();
|
|
3758
|
-
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
3759
|
-
return token;
|
|
3760
|
-
}
|
|
3761
3635
|
/**
|
|
3762
|
-
*
|
|
3636
|
+
* Generate a Dockerfile for Next.js frontend apps using standalone output.
|
|
3637
|
+
* Uses turbo prune for monorepo optimization.
|
|
3638
|
+
* @internal Exported for testing
|
|
3763
3639
|
*/
|
|
3764
|
-
|
|
3765
|
-
const
|
|
3766
|
-
|
|
3767
|
-
|
|
3768
|
-
|
|
3769
|
-
|
|
3640
|
+
function generateNextjsDockerfile(options) {
|
|
3641
|
+
const { baseImage, port, appPath, turboPackage, packageManager, publicUrlArgs = ["NEXT_PUBLIC_API_URL", "NEXT_PUBLIC_AUTH_URL"] } = options;
|
|
3642
|
+
const pm = getPmConfig(packageManager);
|
|
3643
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
3644
|
+
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
3645
|
+
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
3646
|
+
const publicUrlArgDeclarations = publicUrlArgs.map((arg) => `ARG ${arg}=""`).join("\n");
|
|
3647
|
+
const publicUrlEnvDeclarations = publicUrlArgs.map((arg) => `ENV ${arg}=$${arg}`).join("\n");
|
|
3648
|
+
return `# syntax=docker/dockerfile:1
|
|
3649
|
+
# Next.js standalone Dockerfile with turbo prune optimization
|
|
3650
|
+
|
|
3651
|
+
# Stage 1: Prune monorepo
|
|
3652
|
+
FROM ${baseImage} AS pruner
|
|
3653
|
+
|
|
3654
|
+
WORKDIR /app
|
|
3655
|
+
|
|
3656
|
+
${installPm}
|
|
3657
|
+
|
|
3658
|
+
COPY . .
|
|
3659
|
+
|
|
3660
|
+
# Prune to only include necessary packages
|
|
3661
|
+
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
3662
|
+
|
|
3663
|
+
# Stage 2: Install dependencies
|
|
3664
|
+
FROM ${baseImage} AS deps
|
|
3665
|
+
|
|
3666
|
+
WORKDIR /app
|
|
3667
|
+
|
|
3668
|
+
${installPm}
|
|
3669
|
+
|
|
3670
|
+
# Copy pruned lockfile and package.jsons
|
|
3671
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
3672
|
+
COPY --from=pruner /app/out/json/ ./
|
|
3673
|
+
|
|
3674
|
+
# Install dependencies
|
|
3675
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3676
|
+
${turboInstallCmd}
|
|
3677
|
+
|
|
3678
|
+
# Stage 3: Build
|
|
3679
|
+
FROM deps AS builder
|
|
3680
|
+
|
|
3681
|
+
WORKDIR /app
|
|
3682
|
+
|
|
3683
|
+
# Build-time args for public API URLs (populated by gkm deploy)
|
|
3684
|
+
# These get baked into the Next.js build as public environment variables
|
|
3685
|
+
${publicUrlArgDeclarations}
|
|
3686
|
+
|
|
3687
|
+
# Convert ARGs to ENVs for Next.js build
|
|
3688
|
+
${publicUrlEnvDeclarations}
|
|
3689
|
+
|
|
3690
|
+
# Copy pruned source
|
|
3691
|
+
COPY --from=pruner /app/out/full/ ./
|
|
3692
|
+
|
|
3693
|
+
# Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
|
|
3694
|
+
# Using wildcard to make it optional for single-app projects
|
|
3695
|
+
COPY --from=pruner /app/tsconfig.* ./
|
|
3696
|
+
|
|
3697
|
+
# Ensure public directory exists (may be empty for scaffolded projects)
|
|
3698
|
+
RUN mkdir -p ${appPath}/public
|
|
3699
|
+
|
|
3700
|
+
# Set Next.js to produce standalone output
|
|
3701
|
+
ENV NEXT_TELEMETRY_DISABLED=1
|
|
3702
|
+
|
|
3703
|
+
# Build the application
|
|
3704
|
+
RUN ${turboCmd} run build --filter=${turboPackage}
|
|
3705
|
+
|
|
3706
|
+
# Stage 4: Production
|
|
3707
|
+
FROM ${baseImage} AS runner
|
|
3708
|
+
|
|
3709
|
+
WORKDIR /app
|
|
3710
|
+
|
|
3711
|
+
# Install tini for proper signal handling
|
|
3712
|
+
RUN apk add --no-cache tini
|
|
3713
|
+
|
|
3714
|
+
# Create non-root user
|
|
3715
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
3716
|
+
adduser --system --uid 1001 nextjs
|
|
3717
|
+
|
|
3718
|
+
# Set environment
|
|
3719
|
+
ENV NODE_ENV=production
|
|
3720
|
+
ENV NEXT_TELEMETRY_DISABLED=1
|
|
3721
|
+
ENV PORT=${port}
|
|
3722
|
+
ENV HOSTNAME="0.0.0.0"
|
|
3723
|
+
|
|
3724
|
+
# Copy static files and standalone output
|
|
3725
|
+
COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/standalone ./
|
|
3726
|
+
COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/.next/static ./${appPath}/.next/static
|
|
3727
|
+
COPY --from=builder --chown=nextjs:nodejs /app/${appPath}/public ./${appPath}/public
|
|
3728
|
+
|
|
3729
|
+
USER nextjs
|
|
3730
|
+
|
|
3731
|
+
EXPOSE ${port}
|
|
3732
|
+
|
|
3733
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
3734
|
+
CMD ["node", "${appPath}/server.js"]
|
|
3735
|
+
`;
|
|
3770
3736
|
}
|
|
3771
3737
|
/**
|
|
3772
|
-
*
|
|
3738
|
+
* Generate a Dockerfile for backend apps in a workspace.
|
|
3739
|
+
* Uses turbo prune for monorepo optimization.
|
|
3740
|
+
* @internal Exported for testing
|
|
3773
3741
|
*/
|
|
3774
|
-
|
|
3775
|
-
const {
|
|
3776
|
-
|
|
3777
|
-
|
|
3778
|
-
|
|
3779
|
-
const
|
|
3780
|
-
|
|
3781
|
-
|
|
3782
|
-
|
|
3783
|
-
|
|
3784
|
-
|
|
3785
|
-
|
|
3786
|
-
|
|
3787
|
-
|
|
3788
|
-
|
|
3789
|
-
|
|
3790
|
-
|
|
3791
|
-
|
|
3792
|
-
|
|
3793
|
-
|
|
3794
|
-
|
|
3795
|
-
|
|
3796
|
-
|
|
3797
|
-
|
|
3798
|
-
|
|
3799
|
-
|
|
3800
|
-
|
|
3801
|
-
|
|
3802
|
-
|
|
3803
|
-
|
|
3804
|
-
|
|
3805
|
-
|
|
3806
|
-
|
|
3807
|
-
|
|
3808
|
-
|
|
3809
|
-
|
|
3810
|
-
|
|
3811
|
-
|
|
3812
|
-
|
|
3813
|
-
|
|
3814
|
-
|
|
3815
|
-
|
|
3816
|
-
|
|
3817
|
-
|
|
3818
|
-
|
|
3819
|
-
|
|
3820
|
-
|
|
3821
|
-
|
|
3822
|
-
|
|
3823
|
-
|
|
3824
|
-
|
|
3825
|
-
|
|
3826
|
-
|
|
3827
|
-
|
|
3828
|
-
|
|
3829
|
-
|
|
3830
|
-
|
|
3831
|
-
|
|
3832
|
-
|
|
3833
|
-
}
|
|
3742
|
+
function generateBackendDockerfile(options) {
|
|
3743
|
+
const { baseImage, port, appPath, turboPackage, packageManager, healthCheckPath = "/health" } = options;
|
|
3744
|
+
const pm = getPmConfig(packageManager);
|
|
3745
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
3746
|
+
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
3747
|
+
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
3748
|
+
return `# syntax=docker/dockerfile:1
|
|
3749
|
+
# Backend Dockerfile with turbo prune optimization
|
|
3750
|
+
|
|
3751
|
+
# Stage 1: Prune monorepo
|
|
3752
|
+
FROM ${baseImage} AS pruner
|
|
3753
|
+
|
|
3754
|
+
WORKDIR /app
|
|
3755
|
+
|
|
3756
|
+
${installPm}
|
|
3757
|
+
|
|
3758
|
+
COPY . .
|
|
3759
|
+
|
|
3760
|
+
# Prune to only include necessary packages
|
|
3761
|
+
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
3762
|
+
|
|
3763
|
+
# Stage 2: Install dependencies
|
|
3764
|
+
FROM ${baseImage} AS deps
|
|
3765
|
+
|
|
3766
|
+
WORKDIR /app
|
|
3767
|
+
|
|
3768
|
+
${installPm}
|
|
3769
|
+
|
|
3770
|
+
# Copy pruned lockfile and package.jsons
|
|
3771
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
3772
|
+
COPY --from=pruner /app/out/json/ ./
|
|
3773
|
+
|
|
3774
|
+
# Install dependencies
|
|
3775
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3776
|
+
${turboInstallCmd}
|
|
3777
|
+
|
|
3778
|
+
# Stage 3: Build
|
|
3779
|
+
FROM deps AS builder
|
|
3780
|
+
|
|
3781
|
+
WORKDIR /app
|
|
3782
|
+
|
|
3783
|
+
# Build-time args for encrypted secrets
|
|
3784
|
+
ARG GKM_ENCRYPTED_CREDENTIALS=""
|
|
3785
|
+
ARG GKM_CREDENTIALS_IV=""
|
|
3786
|
+
|
|
3787
|
+
# Copy pruned source
|
|
3788
|
+
COPY --from=pruner /app/out/full/ ./
|
|
3789
|
+
|
|
3790
|
+
# Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
|
|
3791
|
+
# Using wildcard to make it optional for single-app projects
|
|
3792
|
+
COPY --from=pruner /app/gkm.config.* ./
|
|
3793
|
+
COPY --from=pruner /app/tsconfig.* ./
|
|
3794
|
+
|
|
3795
|
+
# Write encrypted credentials for gkm build to embed
|
|
3796
|
+
RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
|
|
3797
|
+
mkdir -p ${appPath}/.gkm && \
|
|
3798
|
+
echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
|
|
3799
|
+
echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
|
|
3800
|
+
fi
|
|
3834
3801
|
|
|
3835
|
-
|
|
3836
|
-
|
|
3837
|
-
|
|
3838
|
-
|
|
3839
|
-
|
|
3840
|
-
|
|
3841
|
-
|
|
3842
|
-
|
|
3843
|
-
|
|
3844
|
-
|
|
3845
|
-
|
|
3846
|
-
|
|
3847
|
-
|
|
3848
|
-
|
|
3849
|
-
|
|
3850
|
-
|
|
3851
|
-
|
|
3852
|
-
|
|
3853
|
-
|
|
3854
|
-
|
|
3855
|
-
|
|
3856
|
-
|
|
3802
|
+
# Build production server using gkm
|
|
3803
|
+
RUN cd ${appPath} && ./node_modules/.bin/gkm build --provider server --production
|
|
3804
|
+
|
|
3805
|
+
# Stage 4: Production
|
|
3806
|
+
FROM ${baseImage} AS runner
|
|
3807
|
+
|
|
3808
|
+
WORKDIR /app
|
|
3809
|
+
|
|
3810
|
+
RUN apk add --no-cache tini
|
|
3811
|
+
|
|
3812
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
3813
|
+
adduser --system --uid 1001 hono
|
|
3814
|
+
|
|
3815
|
+
# Copy bundled server
|
|
3816
|
+
COPY --from=builder --chown=hono:nodejs /app/${appPath}/.gkm/server/dist/server.mjs ./
|
|
3817
|
+
|
|
3818
|
+
ENV NODE_ENV=production
|
|
3819
|
+
ENV PORT=${port}
|
|
3820
|
+
|
|
3821
|
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
3822
|
+
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
3823
|
+
|
|
3824
|
+
USER hono
|
|
3825
|
+
|
|
3826
|
+
EXPOSE ${port}
|
|
3827
|
+
|
|
3828
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
3829
|
+
CMD ["node", "server.mjs"]
|
|
3830
|
+
`;
|
|
3857
3831
|
}
|
|
3858
3832
|
/**
|
|
3859
|
-
*
|
|
3833
|
+
* Generate a Dockerfile for apps with a custom entry point.
|
|
3834
|
+
* Uses esbuild to bundle the entry point into dist/index.mjs with all dependencies.
|
|
3835
|
+
* This is used for apps that don't use gkm routes (e.g., Better Auth servers).
|
|
3836
|
+
* @internal Exported for testing
|
|
3860
3837
|
*/
|
|
3861
|
-
|
|
3862
|
-
const
|
|
3863
|
-
const
|
|
3864
|
-
|
|
3865
|
-
|
|
3866
|
-
|
|
3838
|
+
function generateEntryDockerfile(options) {
|
|
3839
|
+
const { baseImage, port, appPath, entry, turboPackage, packageManager, healthCheckPath = "/health" } = options;
|
|
3840
|
+
const pm = getPmConfig(packageManager);
|
|
3841
|
+
const installPm = pm.install ? `RUN ${pm.install}` : "";
|
|
3842
|
+
const turboInstallCmd = getTurboInstallCmd(packageManager);
|
|
3843
|
+
const turboCmd = packageManager === "pnpm" ? "pnpm dlx turbo" : "npx turbo";
|
|
3844
|
+
return `# syntax=docker/dockerfile:1
|
|
3845
|
+
# Entry-based Dockerfile with turbo prune + tsdown bundling
|
|
3846
|
+
|
|
3847
|
+
# Stage 1: Prune monorepo
|
|
3848
|
+
FROM ${baseImage} AS pruner
|
|
3849
|
+
|
|
3850
|
+
WORKDIR /app
|
|
3851
|
+
|
|
3852
|
+
${installPm}
|
|
3853
|
+
|
|
3854
|
+
COPY . .
|
|
3855
|
+
|
|
3856
|
+
# Prune to only include necessary packages
|
|
3857
|
+
RUN ${turboCmd} prune ${turboPackage} --docker
|
|
3858
|
+
|
|
3859
|
+
# Stage 2: Install dependencies
|
|
3860
|
+
FROM ${baseImage} AS deps
|
|
3861
|
+
|
|
3862
|
+
WORKDIR /app
|
|
3863
|
+
|
|
3864
|
+
${installPm}
|
|
3865
|
+
|
|
3866
|
+
# Copy pruned lockfile and package.jsons
|
|
3867
|
+
COPY --from=pruner /app/out/${pm.lockfile} ./
|
|
3868
|
+
COPY --from=pruner /app/out/json/ ./
|
|
3869
|
+
|
|
3870
|
+
# Install dependencies
|
|
3871
|
+
RUN --mount=type=cache,id=${pm.cacheId},target=${pm.cacheTarget} \\
|
|
3872
|
+
${turboInstallCmd}
|
|
3873
|
+
|
|
3874
|
+
# Stage 3: Build with tsdown
|
|
3875
|
+
FROM deps AS builder
|
|
3876
|
+
|
|
3877
|
+
WORKDIR /app
|
|
3878
|
+
|
|
3879
|
+
# Build-time args for encrypted secrets
|
|
3880
|
+
ARG GKM_ENCRYPTED_CREDENTIALS=""
|
|
3881
|
+
ARG GKM_CREDENTIALS_IV=""
|
|
3882
|
+
|
|
3883
|
+
# Copy pruned source
|
|
3884
|
+
COPY --from=pruner /app/out/full/ ./
|
|
3885
|
+
|
|
3886
|
+
# Copy workspace root configs for turbo builds (turbo prune doesn't include root configs)
|
|
3887
|
+
# Using wildcard to make it optional for single-app projects
|
|
3888
|
+
COPY --from=pruner /app/tsconfig.* ./
|
|
3889
|
+
|
|
3890
|
+
# Write encrypted credentials for tsdown to embed via define
|
|
3891
|
+
RUN if [ -n "$GKM_ENCRYPTED_CREDENTIALS" ]; then \
|
|
3892
|
+
mkdir -p ${appPath}/.gkm && \
|
|
3893
|
+
echo "$GKM_ENCRYPTED_CREDENTIALS" > ${appPath}/.gkm/credentials.enc && \
|
|
3894
|
+
echo "$GKM_CREDENTIALS_IV" > ${appPath}/.gkm/credentials.iv; \
|
|
3895
|
+
fi
|
|
3896
|
+
|
|
3897
|
+
# Bundle entry point with esbuild (outputs to dist/index.mjs)
|
|
3898
|
+
# Creates a fully standalone bundle with all dependencies included
|
|
3899
|
+
# Use define to embed credentials if present
|
|
3900
|
+
RUN cd ${appPath} && \
|
|
3901
|
+
if [ -f .gkm/credentials.enc ]; then \
|
|
3902
|
+
CREDS=$(cat .gkm/credentials.enc) && \
|
|
3903
|
+
IV=$(cat .gkm/credentials.iv) && \
|
|
3904
|
+
npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
|
|
3905
|
+
--outfile=dist/index.mjs --packages=bundle \
|
|
3906
|
+
--banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);' \
|
|
3907
|
+
--define:__GKM_ENCRYPTED_CREDENTIALS__="'\\"$CREDS\\"'" \
|
|
3908
|
+
--define:__GKM_CREDENTIALS_IV__="'\\"$IV\\"'"; \
|
|
3909
|
+
else \
|
|
3910
|
+
npx esbuild ${entry} --bundle --platform=node --target=node22 --format=esm \
|
|
3911
|
+
--outfile=dist/index.mjs --packages=bundle \
|
|
3912
|
+
--banner:js='import { createRequire } from "module"; const require = createRequire(import.meta.url);'; \
|
|
3913
|
+
fi
|
|
3914
|
+
|
|
3915
|
+
# Stage 4: Production
|
|
3916
|
+
FROM ${baseImage} AS runner
|
|
3917
|
+
|
|
3918
|
+
WORKDIR /app
|
|
3919
|
+
|
|
3920
|
+
RUN apk add --no-cache tini
|
|
3921
|
+
|
|
3922
|
+
RUN addgroup --system --gid 1001 nodejs && \\
|
|
3923
|
+
adduser --system --uid 1001 app
|
|
3924
|
+
|
|
3925
|
+
# Copy bundled output only (no node_modules needed - fully bundled)
|
|
3926
|
+
COPY --from=builder --chown=app:nodejs /app/${appPath}/dist/index.mjs ./
|
|
3927
|
+
|
|
3928
|
+
ENV NODE_ENV=production
|
|
3929
|
+
ENV PORT=${port}
|
|
3930
|
+
|
|
3931
|
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \\
|
|
3932
|
+
CMD wget -qO- http://localhost:${port}${healthCheckPath} > /dev/null 2>&1 || exit 1
|
|
3933
|
+
|
|
3934
|
+
USER app
|
|
3935
|
+
|
|
3936
|
+
EXPOSE ${port}
|
|
3937
|
+
|
|
3938
|
+
ENTRYPOINT ["/sbin/tini", "--"]
|
|
3939
|
+
CMD ["node", "index.mjs"]
|
|
3940
|
+
`;
|
|
3867
3941
|
}
|
|
3942
|
+
|
|
3943
|
+
//#endregion
|
|
3944
|
+
//#region src/docker/index.ts
|
|
3945
|
+
const logger$5 = console;
|
|
3868
3946
|
/**
|
|
3869
|
-
*
|
|
3947
|
+
* Docker command implementation
|
|
3948
|
+
* Generates Dockerfile, docker-compose.yml, and related files
|
|
3949
|
+
*
|
|
3950
|
+
* Default: Multi-stage Dockerfile that builds from source inside Docker
|
|
3951
|
+
* --slim: Slim Dockerfile that copies pre-built bundle (requires prior build)
|
|
3870
3952
|
*/
|
|
3871
|
-
function
|
|
3872
|
-
|
|
3873
|
-
|
|
3874
|
-
|
|
3875
|
-
|
|
3876
|
-
|
|
3877
|
-
|
|
3878
|
-
|
|
3953
|
+
async function dockerCommand(options) {
|
|
3954
|
+
const loadedConfig = await loadWorkspaceConfig();
|
|
3955
|
+
if (loadedConfig.type === "workspace") {
|
|
3956
|
+
logger$5.log("📦 Detected workspace configuration");
|
|
3957
|
+
return workspaceDockerCommand(loadedConfig.workspace, options);
|
|
3958
|
+
}
|
|
3959
|
+
const config$1 = await loadConfig();
|
|
3960
|
+
const dockerConfig = resolveDockerConfig$1(config$1);
|
|
3961
|
+
const serverConfig = typeof config$1.providers?.server === "object" ? config$1.providers.server : void 0;
|
|
3962
|
+
const healthCheckPath = serverConfig?.production?.healthCheck ?? "/health";
|
|
3963
|
+
const useSlim = options.slim === true;
|
|
3964
|
+
if (useSlim) {
|
|
3965
|
+
const distDir = join(process.cwd(), ".gkm", "server", "dist");
|
|
3966
|
+
const hasBuild = existsSync(join(distDir, "server.mjs"));
|
|
3967
|
+
if (!hasBuild) throw new Error("Slim Dockerfile requires a pre-built bundle. Run `gkm build --provider server --production` first, or omit --slim to use multi-stage build.");
|
|
3968
|
+
}
|
|
3969
|
+
const dockerDir = join(process.cwd(), ".gkm", "docker");
|
|
3970
|
+
await mkdir(dockerDir, { recursive: true });
|
|
3971
|
+
const packageManager = detectPackageManager$1();
|
|
3972
|
+
const inMonorepo = isMonorepo();
|
|
3973
|
+
const hasTurbo = hasTurboConfig();
|
|
3974
|
+
let useTurbo = options.turbo ?? false;
|
|
3975
|
+
if (inMonorepo && !useSlim) if (hasTurbo) {
|
|
3976
|
+
useTurbo = true;
|
|
3977
|
+
logger$5.log(" Detected monorepo with turbo.json - using turbo prune");
|
|
3978
|
+
} else throw new Error("Monorepo detected but turbo.json not found.\n\nDocker builds in monorepos require Turborepo for proper dependency isolation.\n\nTo fix this:\n 1. Install turbo: pnpm add -Dw turbo\n 2. Create turbo.json in your monorepo root\n 3. Run this command again\n\nSee: https://turbo.build/repo/docs/guides/tools/docker");
|
|
3979
|
+
let turboPackage = options.turboPackage ?? dockerConfig.imageName;
|
|
3980
|
+
if (useTurbo && !options.turboPackage) try {
|
|
3981
|
+
const pkg$1 = __require(`${process.cwd()}/package.json`);
|
|
3982
|
+
if (pkg$1.name) {
|
|
3983
|
+
turboPackage = pkg$1.name;
|
|
3984
|
+
logger$5.log(` Turbo package: ${turboPackage}`);
|
|
3985
|
+
}
|
|
3986
|
+
} catch {}
|
|
3987
|
+
const templateOptions = {
|
|
3988
|
+
imageName: dockerConfig.imageName,
|
|
3989
|
+
baseImage: dockerConfig.baseImage,
|
|
3990
|
+
port: dockerConfig.port,
|
|
3991
|
+
healthCheckPath,
|
|
3992
|
+
prebuilt: useSlim,
|
|
3993
|
+
turbo: useTurbo,
|
|
3994
|
+
turboPackage,
|
|
3995
|
+
packageManager
|
|
3879
3996
|
};
|
|
3997
|
+
const dockerfile = useSlim ? generateSlimDockerfile(templateOptions) : generateMultiStageDockerfile(templateOptions);
|
|
3998
|
+
const dockerMode = useSlim ? "slim" : useTurbo ? "turbo" : "multi-stage";
|
|
3999
|
+
const dockerfilePath = join(dockerDir, "Dockerfile");
|
|
4000
|
+
await writeFile(dockerfilePath, dockerfile);
|
|
4001
|
+
logger$5.log(`Generated: .gkm/docker/Dockerfile (${dockerMode}, ${packageManager})`);
|
|
4002
|
+
const composeOptions = {
|
|
4003
|
+
imageName: dockerConfig.imageName,
|
|
4004
|
+
registry: options.registry ?? dockerConfig.registry,
|
|
4005
|
+
port: dockerConfig.port,
|
|
4006
|
+
healthCheckPath,
|
|
4007
|
+
services: dockerConfig.compose?.services ?? {}
|
|
4008
|
+
};
|
|
4009
|
+
const hasServices = Array.isArray(composeOptions.services) ? composeOptions.services.length > 0 : Object.keys(composeOptions.services).length > 0;
|
|
4010
|
+
const dockerCompose = hasServices ? generateDockerCompose(composeOptions) : generateMinimalDockerCompose(composeOptions);
|
|
4011
|
+
const composePath = join(dockerDir, "docker-compose.yml");
|
|
4012
|
+
await writeFile(composePath, dockerCompose);
|
|
4013
|
+
logger$5.log("Generated: .gkm/docker/docker-compose.yml");
|
|
4014
|
+
const dockerignore = generateDockerignore();
|
|
4015
|
+
const dockerignorePath = join(process.cwd(), ".dockerignore");
|
|
4016
|
+
await writeFile(dockerignorePath, dockerignore);
|
|
4017
|
+
logger$5.log("Generated: .dockerignore (project root)");
|
|
4018
|
+
const entrypoint = generateDockerEntrypoint();
|
|
4019
|
+
const entrypointPath = join(dockerDir, "docker-entrypoint.sh");
|
|
4020
|
+
await writeFile(entrypointPath, entrypoint);
|
|
4021
|
+
logger$5.log("Generated: .gkm/docker/docker-entrypoint.sh");
|
|
4022
|
+
const result = {
|
|
4023
|
+
dockerfile: dockerfilePath,
|
|
4024
|
+
dockerCompose: composePath,
|
|
4025
|
+
dockerignore: dockerignorePath,
|
|
4026
|
+
entrypoint: entrypointPath
|
|
4027
|
+
};
|
|
4028
|
+
if (options.build) await buildDockerImage(dockerConfig.imageName, options);
|
|
4029
|
+
if (options.push) await pushDockerImage(dockerConfig.imageName, options);
|
|
4030
|
+
return result;
|
|
3880
4031
|
}
|
|
3881
4032
|
/**
|
|
3882
|
-
*
|
|
3883
|
-
|
|
3884
|
-
function
|
|
3885
|
-
return state?.applications[appName];
|
|
3886
|
-
}
|
|
3887
|
-
/**
|
|
3888
|
-
* Set application ID in state (mutates state)
|
|
3889
|
-
*/
|
|
3890
|
-
function setApplicationId(state, appName, applicationId) {
|
|
3891
|
-
state.applications[appName] = applicationId;
|
|
3892
|
-
}
|
|
3893
|
-
/**
|
|
3894
|
-
* Get postgres ID from state
|
|
3895
|
-
*/
|
|
3896
|
-
function getPostgresId(state) {
|
|
3897
|
-
return state?.services.postgresId;
|
|
3898
|
-
}
|
|
3899
|
-
/**
|
|
3900
|
-
* Set postgres ID in state (mutates state)
|
|
4033
|
+
* Ensure lockfile exists in the build context
|
|
4034
|
+
* For monorepos, copies from workspace root if needed
|
|
4035
|
+
* Returns cleanup function if file was copied
|
|
3901
4036
|
*/
|
|
3902
|
-
function
|
|
3903
|
-
|
|
4037
|
+
function ensureLockfile(cwd) {
|
|
4038
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
4039
|
+
if (!lockfilePath) {
|
|
4040
|
+
logger$5.warn("\n⚠️ No lockfile found. Docker build may fail or use stale dependencies.");
|
|
4041
|
+
return null;
|
|
4042
|
+
}
|
|
4043
|
+
const lockfileName = basename(lockfilePath);
|
|
4044
|
+
const localLockfile = join(cwd, lockfileName);
|
|
4045
|
+
if (lockfilePath === localLockfile) return null;
|
|
4046
|
+
logger$5.log(` Copying ${lockfileName} from monorepo root...`);
|
|
4047
|
+
copyFileSync(lockfilePath, localLockfile);
|
|
4048
|
+
return () => {
|
|
4049
|
+
try {
|
|
4050
|
+
unlinkSync(localLockfile);
|
|
4051
|
+
} catch {}
|
|
4052
|
+
};
|
|
3904
4053
|
}
|
|
3905
4054
|
/**
|
|
3906
|
-
*
|
|
4055
|
+
* Build Docker image
|
|
4056
|
+
* Uses BuildKit for cache mount support
|
|
3907
4057
|
*/
|
|
3908
|
-
function
|
|
3909
|
-
|
|
4058
|
+
async function buildDockerImage(imageName, options) {
|
|
4059
|
+
const tag = options.tag ?? "latest";
|
|
4060
|
+
const registry = options.registry;
|
|
4061
|
+
const fullImageName = registry ? `${registry}/${imageName}:${tag}` : `${imageName}:${tag}`;
|
|
4062
|
+
logger$5.log(`\n🐳 Building Docker image: ${fullImageName}`);
|
|
4063
|
+
const cwd = process.cwd();
|
|
4064
|
+
const cleanup = ensureLockfile(cwd);
|
|
4065
|
+
try {
|
|
4066
|
+
execSync(`DOCKER_BUILDKIT=1 docker build -f .gkm/docker/Dockerfile -t ${fullImageName} .`, {
|
|
4067
|
+
cwd,
|
|
4068
|
+
stdio: "inherit",
|
|
4069
|
+
env: {
|
|
4070
|
+
...process.env,
|
|
4071
|
+
DOCKER_BUILDKIT: "1"
|
|
4072
|
+
}
|
|
4073
|
+
});
|
|
4074
|
+
logger$5.log(`✅ Docker image built: ${fullImageName}`);
|
|
4075
|
+
} catch (error) {
|
|
4076
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
4077
|
+
} finally {
|
|
4078
|
+
cleanup?.();
|
|
4079
|
+
}
|
|
3910
4080
|
}
|
|
3911
4081
|
/**
|
|
3912
|
-
*
|
|
4082
|
+
* Push Docker image to registry
|
|
3913
4083
|
*/
|
|
3914
|
-
function
|
|
3915
|
-
|
|
4084
|
+
async function pushDockerImage(imageName, options) {
|
|
4085
|
+
const tag = options.tag ?? "latest";
|
|
4086
|
+
const registry = options.registry;
|
|
4087
|
+
if (!registry) throw new Error("Registry is required to push Docker image. Use --registry or configure docker.registry in gkm.config.ts");
|
|
4088
|
+
const fullImageName = `${registry}/${imageName}:${tag}`;
|
|
4089
|
+
logger$5.log(`\n🚀 Pushing Docker image: ${fullImageName}`);
|
|
4090
|
+
try {
|
|
4091
|
+
execSync(`docker push ${fullImageName}`, {
|
|
4092
|
+
cwd: process.cwd(),
|
|
4093
|
+
stdio: "inherit"
|
|
4094
|
+
});
|
|
4095
|
+
logger$5.log(`✅ Docker image pushed: ${fullImageName}`);
|
|
4096
|
+
} catch (error) {
|
|
4097
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
4098
|
+
}
|
|
3916
4099
|
}
|
|
3917
|
-
|
|
3918
|
-
//#endregion
|
|
3919
|
-
//#region src/deploy/dns/hostinger-api.ts
|
|
3920
|
-
/**
|
|
3921
|
-
* Hostinger DNS API client
|
|
3922
|
-
*
|
|
3923
|
-
* API Documentation: https://developers.hostinger.com/
|
|
3924
|
-
* Authentication: Bearer token from hpanel.hostinger.com/profile/api
|
|
3925
|
-
*/
|
|
3926
|
-
const HOSTINGER_API_BASE = "https://developers.hostinger.com";
|
|
3927
4100
|
/**
|
|
3928
|
-
*
|
|
4101
|
+
* Get the package name from package.json in an app directory.
|
|
3929
4102
|
*/
|
|
3930
|
-
|
|
3931
|
-
|
|
3932
|
-
|
|
3933
|
-
|
|
3934
|
-
|
|
3935
|
-
|
|
3936
|
-
|
|
4103
|
+
function getAppPackageName(appPath) {
|
|
4104
|
+
try {
|
|
4105
|
+
const pkgPath = join(appPath, "package.json");
|
|
4106
|
+
if (!existsSync(pkgPath)) return void 0;
|
|
4107
|
+
const content = readFileSync(pkgPath, "utf-8");
|
|
4108
|
+
const pkg$1 = JSON.parse(content);
|
|
4109
|
+
return pkg$1.name;
|
|
4110
|
+
} catch {
|
|
4111
|
+
return void 0;
|
|
3937
4112
|
}
|
|
3938
|
-
}
|
|
4113
|
+
}
|
|
3939
4114
|
/**
|
|
3940
|
-
*
|
|
3941
|
-
*
|
|
3942
|
-
* @example
|
|
3943
|
-
* ```ts
|
|
3944
|
-
* const api = new HostingerApi(token);
|
|
3945
|
-
*
|
|
3946
|
-
* // Get all records for a domain
|
|
3947
|
-
* const records = await api.getRecords('traflabs.io');
|
|
3948
|
-
*
|
|
3949
|
-
* // Create/update records
|
|
3950
|
-
* await api.upsertRecords('traflabs.io', [
|
|
3951
|
-
* { name: 'api.joemoer', type: 'A', ttl: 300, records: ['1.2.3.4'] }
|
|
3952
|
-
* ]);
|
|
3953
|
-
* ```
|
|
4115
|
+
* Generate Dockerfiles for all apps in a workspace.
|
|
4116
|
+
* @internal Exported for testing
|
|
3954
4117
|
*/
|
|
3955
|
-
|
|
3956
|
-
|
|
3957
|
-
|
|
3958
|
-
|
|
3959
|
-
|
|
3960
|
-
|
|
3961
|
-
|
|
3962
|
-
|
|
3963
|
-
|
|
3964
|
-
const
|
|
3965
|
-
const
|
|
3966
|
-
|
|
3967
|
-
|
|
3968
|
-
|
|
3969
|
-
|
|
3970
|
-
|
|
3971
|
-
|
|
4118
|
+
async function workspaceDockerCommand(workspace, options) {
|
|
4119
|
+
const results = [];
|
|
4120
|
+
const apps = Object.entries(workspace.apps);
|
|
4121
|
+
logger$5.log(`\n🐳 Generating Dockerfiles for workspace: ${workspace.name}`);
|
|
4122
|
+
const dockerDir = join(workspace.root, ".gkm", "docker");
|
|
4123
|
+
await mkdir(dockerDir, { recursive: true });
|
|
4124
|
+
const packageManager = detectPackageManager$1(workspace.root);
|
|
4125
|
+
logger$5.log(` Package manager: ${packageManager}`);
|
|
4126
|
+
for (const [appName, app] of apps) {
|
|
4127
|
+
const appPath = app.path;
|
|
4128
|
+
const fullAppPath = join(workspace.root, appPath);
|
|
4129
|
+
const turboPackage = getAppPackageName(fullAppPath) ?? appName;
|
|
4130
|
+
const imageName = appName;
|
|
4131
|
+
const hasEntry = !!app.entry;
|
|
4132
|
+
const buildType = hasEntry ? "entry" : app.type;
|
|
4133
|
+
logger$5.log(`\n 📄 Generating Dockerfile for ${appName} (${buildType})`);
|
|
4134
|
+
let dockerfile;
|
|
4135
|
+
if (app.type === "frontend") dockerfile = generateNextjsDockerfile({
|
|
4136
|
+
imageName,
|
|
4137
|
+
baseImage: "node:22-alpine",
|
|
4138
|
+
port: app.port,
|
|
4139
|
+
appPath,
|
|
4140
|
+
turboPackage,
|
|
4141
|
+
packageManager
|
|
3972
4142
|
});
|
|
3973
|
-
if (
|
|
3974
|
-
|
|
3975
|
-
|
|
3976
|
-
|
|
3977
|
-
|
|
3978
|
-
|
|
3979
|
-
|
|
3980
|
-
|
|
3981
|
-
|
|
3982
|
-
}
|
|
3983
|
-
const text = await response.text();
|
|
3984
|
-
if (!text || text.trim() === "") return void 0;
|
|
3985
|
-
return JSON.parse(text);
|
|
3986
|
-
}
|
|
3987
|
-
/**
|
|
3988
|
-
* Get all DNS records for a domain
|
|
3989
|
-
*
|
|
3990
|
-
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
3991
|
-
*/
|
|
3992
|
-
async getRecords(domain) {
|
|
3993
|
-
const response = await this.request("GET", `/api/dns/v1/zones/${domain}`);
|
|
3994
|
-
return response.data || [];
|
|
3995
|
-
}
|
|
3996
|
-
/**
|
|
3997
|
-
* Create or update DNS records
|
|
3998
|
-
*
|
|
3999
|
-
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
4000
|
-
* @param records - Records to create/update
|
|
4001
|
-
* @param overwrite - If true, replaces all existing records. If false, merges with existing.
|
|
4002
|
-
*/
|
|
4003
|
-
async upsertRecords(domain, records, overwrite = false) {
|
|
4004
|
-
await this.request("PUT", `/api/dns/v1/zones/${domain}`, {
|
|
4005
|
-
overwrite,
|
|
4006
|
-
zone: records
|
|
4143
|
+
else if (app.entry) dockerfile = generateEntryDockerfile({
|
|
4144
|
+
imageName,
|
|
4145
|
+
baseImage: "node:22-alpine",
|
|
4146
|
+
port: app.port,
|
|
4147
|
+
appPath,
|
|
4148
|
+
entry: app.entry,
|
|
4149
|
+
turboPackage,
|
|
4150
|
+
packageManager,
|
|
4151
|
+
healthCheckPath: "/health"
|
|
4007
4152
|
});
|
|
4008
|
-
|
|
4009
|
-
|
|
4010
|
-
|
|
4011
|
-
|
|
4012
|
-
|
|
4013
|
-
|
|
4014
|
-
|
|
4015
|
-
|
|
4016
|
-
|
|
4017
|
-
|
|
4018
|
-
|
|
4019
|
-
|
|
4153
|
+
else dockerfile = generateBackendDockerfile({
|
|
4154
|
+
imageName,
|
|
4155
|
+
baseImage: "node:22-alpine",
|
|
4156
|
+
port: app.port,
|
|
4157
|
+
appPath,
|
|
4158
|
+
turboPackage,
|
|
4159
|
+
packageManager,
|
|
4160
|
+
healthCheckPath: "/health"
|
|
4161
|
+
});
|
|
4162
|
+
const dockerfilePath = join(dockerDir, `Dockerfile.${appName}`);
|
|
4163
|
+
await writeFile(dockerfilePath, dockerfile);
|
|
4164
|
+
logger$5.log(` Generated: .gkm/docker/Dockerfile.${appName}`);
|
|
4165
|
+
results.push({
|
|
4166
|
+
appName,
|
|
4167
|
+
type: app.type,
|
|
4168
|
+
dockerfile: dockerfilePath,
|
|
4169
|
+
imageName
|
|
4020
4170
|
});
|
|
4021
|
-
return true;
|
|
4022
|
-
}
|
|
4023
|
-
/**
|
|
4024
|
-
* Delete specific DNS records
|
|
4025
|
-
*
|
|
4026
|
-
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
4027
|
-
* @param filters - Filters to match records for deletion
|
|
4028
|
-
*/
|
|
4029
|
-
async deleteRecords(domain, filters) {
|
|
4030
|
-
await this.request("DELETE", `/api/dns/v1/zones/${domain}`, { filters });
|
|
4031
|
-
}
|
|
4032
|
-
/**
|
|
4033
|
-
* Check if a specific record exists
|
|
4034
|
-
*
|
|
4035
|
-
* @param domain - Root domain (e.g., 'traflabs.io')
|
|
4036
|
-
* @param name - Subdomain name (e.g., 'api.joemoer')
|
|
4037
|
-
* @param type - Record type (e.g., 'A')
|
|
4038
|
-
*/
|
|
4039
|
-
async recordExists(domain, name$1, type$1 = "A") {
|
|
4040
|
-
const records = await this.getRecords(domain);
|
|
4041
|
-
return records.some((r) => r.name === name$1 && r.type === type$1);
|
|
4042
4171
|
}
|
|
4043
|
-
|
|
4044
|
-
|
|
4045
|
-
|
|
4046
|
-
|
|
4047
|
-
|
|
4048
|
-
|
|
4049
|
-
|
|
4050
|
-
|
|
4051
|
-
|
|
4052
|
-
|
|
4053
|
-
|
|
4054
|
-
|
|
4055
|
-
|
|
4056
|
-
name: subdomain,
|
|
4057
|
-
type: "A",
|
|
4058
|
-
ttl,
|
|
4059
|
-
records: [{ content: ip }]
|
|
4060
|
-
}]);
|
|
4061
|
-
return true;
|
|
4172
|
+
const dockerignore = generateDockerignore();
|
|
4173
|
+
const dockerignorePath = join(workspace.root, ".dockerignore");
|
|
4174
|
+
await writeFile(dockerignorePath, dockerignore);
|
|
4175
|
+
logger$5.log(`\n Generated: .dockerignore (workspace root)`);
|
|
4176
|
+
const dockerCompose = generateWorkspaceCompose(workspace, { registry: options.registry });
|
|
4177
|
+
const composePath = join(dockerDir, "docker-compose.yml");
|
|
4178
|
+
await writeFile(composePath, dockerCompose);
|
|
4179
|
+
logger$5.log(` Generated: .gkm/docker/docker-compose.yml`);
|
|
4180
|
+
logger$5.log(`\n✅ Generated ${results.length} Dockerfile(s) + docker-compose.yml`);
|
|
4181
|
+
logger$5.log("\n📋 Build commands:");
|
|
4182
|
+
for (const result of results) {
|
|
4183
|
+
const icon = result.type === "backend" ? "⚙️" : "🌐";
|
|
4184
|
+
logger$5.log(` ${icon} docker build -f .gkm/docker/Dockerfile.${result.appName} -t ${result.imageName} .`);
|
|
4062
4185
|
}
|
|
4063
|
-
|
|
4186
|
+
logger$5.log("\n📋 Run all services:");
|
|
4187
|
+
logger$5.log(" docker compose -f .gkm/docker/docker-compose.yml up --build");
|
|
4188
|
+
return {
|
|
4189
|
+
apps: results,
|
|
4190
|
+
dockerCompose: composePath,
|
|
4191
|
+
dockerignore: dockerignorePath
|
|
4192
|
+
};
|
|
4193
|
+
}
|
|
4064
4194
|
|
|
4065
4195
|
//#endregion
|
|
4066
|
-
//#region src/deploy/
|
|
4067
|
-
const logger$3 = console;
|
|
4196
|
+
//#region src/deploy/docker.ts
|
|
4068
4197
|
/**
|
|
4069
|
-
*
|
|
4198
|
+
* Get app name from package.json in the current working directory
|
|
4199
|
+
* Used for Dokploy app/project naming
|
|
4070
4200
|
*/
|
|
4071
|
-
|
|
4201
|
+
function getAppNameFromCwd$1() {
|
|
4202
|
+
const packageJsonPath = join(process.cwd(), "package.json");
|
|
4203
|
+
if (!existsSync(packageJsonPath)) return void 0;
|
|
4072
4204
|
try {
|
|
4073
|
-
const
|
|
4074
|
-
return
|
|
4075
|
-
} catch
|
|
4076
|
-
|
|
4077
|
-
}
|
|
4078
|
-
}
|
|
4079
|
-
/**
|
|
4080
|
-
* Extract subdomain from full hostname relative to root domain
|
|
4081
|
-
*
|
|
4082
|
-
* @example
|
|
4083
|
-
* extractSubdomain('api.joemoer.traflabs.io', 'traflabs.io') => 'api.joemoer'
|
|
4084
|
-
* extractSubdomain('joemoer.traflabs.io', 'traflabs.io') => 'joemoer'
|
|
4085
|
-
*/
|
|
4086
|
-
function extractSubdomain(hostname, rootDomain) {
|
|
4087
|
-
if (!hostname.endsWith(rootDomain)) throw new Error(`Hostname ${hostname} is not under root domain ${rootDomain}`);
|
|
4088
|
-
const subdomain = hostname.slice(0, -(rootDomain.length + 1));
|
|
4089
|
-
return subdomain || "@";
|
|
4090
|
-
}
|
|
4091
|
-
/**
|
|
4092
|
-
* Generate required DNS records for a deployment
|
|
4093
|
-
*/
|
|
4094
|
-
function generateRequiredRecords(appHostnames, rootDomain, serverIp) {
|
|
4095
|
-
const records = [];
|
|
4096
|
-
for (const [appName, hostname] of appHostnames) {
|
|
4097
|
-
const subdomain = extractSubdomain(hostname, rootDomain);
|
|
4098
|
-
records.push({
|
|
4099
|
-
hostname,
|
|
4100
|
-
subdomain,
|
|
4101
|
-
type: "A",
|
|
4102
|
-
value: serverIp,
|
|
4103
|
-
appName
|
|
4104
|
-
});
|
|
4105
|
-
}
|
|
4106
|
-
return records;
|
|
4107
|
-
}
|
|
4108
|
-
/**
|
|
4109
|
-
* Print DNS records table
|
|
4110
|
-
*/
|
|
4111
|
-
function printDnsRecordsTable(records, rootDomain) {
|
|
4112
|
-
logger$3.log("\n 📋 DNS Records for " + rootDomain + ":");
|
|
4113
|
-
logger$3.log(" ┌─────────────────────────────────────┬──────┬─────────────────┬────────┐");
|
|
4114
|
-
logger$3.log(" │ Subdomain │ Type │ Value │ Status │");
|
|
4115
|
-
logger$3.log(" ├─────────────────────────────────────┼──────┼─────────────────┼────────┤");
|
|
4116
|
-
for (const record of records) {
|
|
4117
|
-
const subdomain = record.subdomain.padEnd(35);
|
|
4118
|
-
const type$1 = record.type.padEnd(4);
|
|
4119
|
-
const value = record.value.padEnd(15);
|
|
4120
|
-
let status;
|
|
4121
|
-
if (record.error) status = "✗";
|
|
4122
|
-
else if (record.created) status = "✓ new";
|
|
4123
|
-
else if (record.existed) status = "✓";
|
|
4124
|
-
else status = "?";
|
|
4125
|
-
logger$3.log(` │ ${subdomain} │ ${type$1} │ ${value} │ ${status.padEnd(6)} │`);
|
|
4126
|
-
}
|
|
4127
|
-
logger$3.log(" └─────────────────────────────────────┴──────┴─────────────────┴────────┘");
|
|
4205
|
+
const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
|
|
4206
|
+
if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
|
|
4207
|
+
} catch {}
|
|
4208
|
+
return void 0;
|
|
4128
4209
|
}
|
|
4129
4210
|
/**
|
|
4130
|
-
*
|
|
4211
|
+
* Get app name from package.json adjacent to the lockfile (project root)
|
|
4212
|
+
* Used for Docker image naming
|
|
4131
4213
|
*/
|
|
4132
|
-
function
|
|
4133
|
-
|
|
4134
|
-
|
|
4135
|
-
|
|
4136
|
-
|
|
4214
|
+
function getAppNameFromPackageJson() {
|
|
4215
|
+
const cwd = process.cwd();
|
|
4216
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
4217
|
+
if (!lockfilePath) return void 0;
|
|
4218
|
+
const projectRoot = dirname(lockfilePath);
|
|
4219
|
+
const packageJsonPath = join(projectRoot, "package.json");
|
|
4220
|
+
if (!existsSync(packageJsonPath)) return void 0;
|
|
4221
|
+
try {
|
|
4222
|
+
const pkg$1 = JSON.parse(readFileSync(packageJsonPath, "utf-8"));
|
|
4223
|
+
if (pkg$1.name) return pkg$1.name.replace(/^@[^/]+\//, "");
|
|
4224
|
+
} catch {}
|
|
4225
|
+
return void 0;
|
|
4137
4226
|
}
|
|
4227
|
+
const logger$4 = console;
|
|
4138
4228
|
/**
|
|
4139
|
-
*
|
|
4229
|
+
* Get the full image reference
|
|
4140
4230
|
*/
|
|
4141
|
-
|
|
4142
|
-
|
|
4143
|
-
|
|
4144
|
-
stdout$1.write(message);
|
|
4145
|
-
return new Promise((resolve$1) => {
|
|
4146
|
-
let value = "";
|
|
4147
|
-
const onData = (char) => {
|
|
4148
|
-
const c = char.toString();
|
|
4149
|
-
if (c === "\n" || c === "\r") {
|
|
4150
|
-
stdin$1.setRawMode(false);
|
|
4151
|
-
stdin$1.pause();
|
|
4152
|
-
stdin$1.removeListener("data", onData);
|
|
4153
|
-
stdout$1.write("\n");
|
|
4154
|
-
resolve$1(value);
|
|
4155
|
-
} else if (c === "") {
|
|
4156
|
-
stdin$1.setRawMode(false);
|
|
4157
|
-
stdin$1.pause();
|
|
4158
|
-
stdout$1.write("\n");
|
|
4159
|
-
process.exit(1);
|
|
4160
|
-
} else if (c === "" || c === "\b") {
|
|
4161
|
-
if (value.length > 0) value = value.slice(0, -1);
|
|
4162
|
-
} else value += c;
|
|
4163
|
-
};
|
|
4164
|
-
stdin$1.setRawMode(true);
|
|
4165
|
-
stdin$1.resume();
|
|
4166
|
-
stdin$1.on("data", onData);
|
|
4167
|
-
});
|
|
4231
|
+
function getImageRef(registry, imageName, tag) {
|
|
4232
|
+
if (registry) return `${registry}/${imageName}:${tag}`;
|
|
4233
|
+
return `${imageName}:${tag}`;
|
|
4168
4234
|
}
|
|
4169
4235
|
/**
|
|
4170
|
-
*
|
|
4236
|
+
* Build Docker image
|
|
4237
|
+
* @param imageRef - Full image reference (registry/name:tag)
|
|
4238
|
+
* @param appName - Name of the app (used for Dockerfile.{appName} in workspaces)
|
|
4239
|
+
* @param buildArgs - Build arguments to pass to docker build
|
|
4171
4240
|
*/
|
|
4172
|
-
async function
|
|
4173
|
-
|
|
4174
|
-
|
|
4175
|
-
|
|
4176
|
-
|
|
4177
|
-
|
|
4178
|
-
|
|
4179
|
-
|
|
4180
|
-
|
|
4181
|
-
|
|
4182
|
-
|
|
4183
|
-
|
|
4184
|
-
|
|
4185
|
-
|
|
4241
|
+
async function buildImage(imageRef, appName, buildArgs) {
|
|
4242
|
+
logger$4.log(`\n🔨 Building Docker image: ${imageRef}`);
|
|
4243
|
+
const cwd = process.cwd();
|
|
4244
|
+
const lockfilePath = findLockfilePath(cwd);
|
|
4245
|
+
const lockfileDir = lockfilePath ? dirname(lockfilePath) : cwd;
|
|
4246
|
+
const inMonorepo = lockfileDir !== cwd;
|
|
4247
|
+
if (appName || inMonorepo) logger$4.log(" Generating Dockerfile for monorepo (turbo prune)...");
|
|
4248
|
+
else logger$4.log(" Generating Dockerfile...");
|
|
4249
|
+
await dockerCommand({});
|
|
4250
|
+
const dockerfileSuffix = appName ? `.${appName}` : "";
|
|
4251
|
+
const dockerfilePath = `.gkm/docker/Dockerfile${dockerfileSuffix}`;
|
|
4252
|
+
const buildCwd = lockfilePath && (inMonorepo || appName) ? lockfileDir : cwd;
|
|
4253
|
+
if (buildCwd !== cwd) logger$4.log(` Building from workspace root: ${buildCwd}`);
|
|
4254
|
+
const buildArgsString = buildArgs && buildArgs.length > 0 ? buildArgs.map((arg) => `--build-arg "${arg}"`).join(" ") : "";
|
|
4255
|
+
try {
|
|
4256
|
+
const cmd = [
|
|
4257
|
+
"DOCKER_BUILDKIT=1 docker build",
|
|
4258
|
+
"--platform linux/amd64",
|
|
4259
|
+
`-f ${dockerfilePath}`,
|
|
4260
|
+
`-t ${imageRef}`,
|
|
4261
|
+
buildArgsString,
|
|
4262
|
+
"."
|
|
4263
|
+
].filter(Boolean).join(" ");
|
|
4264
|
+
execSync(cmd, {
|
|
4265
|
+
cwd: buildCwd,
|
|
4266
|
+
stdio: "inherit",
|
|
4267
|
+
env: {
|
|
4268
|
+
...process.env,
|
|
4269
|
+
DOCKER_BUILDKIT: "1"
|
|
4270
|
+
}
|
|
4271
|
+
});
|
|
4272
|
+
logger$4.log(`✅ Image built: ${imageRef}`);
|
|
4273
|
+
} catch (error) {
|
|
4274
|
+
throw new Error(`Failed to build Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
4186
4275
|
}
|
|
4187
|
-
return records;
|
|
4188
4276
|
}
|
|
4189
4277
|
/**
|
|
4190
|
-
*
|
|
4278
|
+
* Push Docker image to registry
|
|
4191
4279
|
*/
|
|
4192
|
-
async function
|
|
4193
|
-
|
|
4194
|
-
if (!token) {
|
|
4195
|
-
logger$3.log("\n 📋 Hostinger API token not found.");
|
|
4196
|
-
logger$3.log(" Get your token from: https://hpanel.hostinger.com/profile/api\n");
|
|
4197
|
-
try {
|
|
4198
|
-
token = await promptForToken(" Hostinger API Token: ");
|
|
4199
|
-
await storeHostingerToken(token);
|
|
4200
|
-
logger$3.log(" ✓ Token saved");
|
|
4201
|
-
} catch {
|
|
4202
|
-
logger$3.log(" ⚠ Could not get token, skipping DNS creation");
|
|
4203
|
-
return records.map((r) => ({
|
|
4204
|
-
...r,
|
|
4205
|
-
error: "No API token"
|
|
4206
|
-
}));
|
|
4207
|
-
}
|
|
4208
|
-
}
|
|
4209
|
-
const api = new HostingerApi(token);
|
|
4210
|
-
const results = [];
|
|
4211
|
-
let existingRecords = [];
|
|
4280
|
+
async function pushImage(imageRef) {
|
|
4281
|
+
logger$4.log(`\n☁️ Pushing image: ${imageRef}`);
|
|
4212
4282
|
try {
|
|
4213
|
-
|
|
4283
|
+
execSync(`docker push ${imageRef}`, {
|
|
4284
|
+
cwd: process.cwd(),
|
|
4285
|
+
stdio: "inherit"
|
|
4286
|
+
});
|
|
4287
|
+
logger$4.log(`✅ Image pushed: ${imageRef}`);
|
|
4214
4288
|
} catch (error) {
|
|
4215
|
-
|
|
4216
|
-
logger$3.log(` ⚠ Failed to fetch existing DNS records: ${message}`);
|
|
4217
|
-
return records.map((r) => ({
|
|
4218
|
-
...r,
|
|
4219
|
-
error: message
|
|
4220
|
-
}));
|
|
4221
|
-
}
|
|
4222
|
-
for (const record of records) {
|
|
4223
|
-
const existing = existingRecords.find((r) => r.name === record.subdomain && r.type === "A");
|
|
4224
|
-
if (existing) {
|
|
4225
|
-
results.push({
|
|
4226
|
-
...record,
|
|
4227
|
-
existed: true,
|
|
4228
|
-
created: false
|
|
4229
|
-
});
|
|
4230
|
-
continue;
|
|
4231
|
-
}
|
|
4232
|
-
try {
|
|
4233
|
-
await api.upsertRecords(rootDomain, [{
|
|
4234
|
-
name: record.subdomain,
|
|
4235
|
-
type: "A",
|
|
4236
|
-
ttl,
|
|
4237
|
-
records: [{ content: record.value }]
|
|
4238
|
-
}]);
|
|
4239
|
-
results.push({
|
|
4240
|
-
...record,
|
|
4241
|
-
created: true,
|
|
4242
|
-
existed: false
|
|
4243
|
-
});
|
|
4244
|
-
} catch (error) {
|
|
4245
|
-
const message = error instanceof Error ? error.message : "Unknown error";
|
|
4246
|
-
results.push({
|
|
4247
|
-
...record,
|
|
4248
|
-
error: message
|
|
4249
|
-
});
|
|
4250
|
-
}
|
|
4289
|
+
throw new Error(`Failed to push Docker image: ${error instanceof Error ? error.message : "Unknown error"}`);
|
|
4251
4290
|
}
|
|
4252
|
-
return results;
|
|
4253
4291
|
}
|
|
4254
4292
|
/**
|
|
4255
|
-
*
|
|
4293
|
+
* Deploy using Docker (build and optionally push image)
|
|
4256
4294
|
*/
|
|
4257
|
-
async function
|
|
4258
|
-
|
|
4259
|
-
const
|
|
4260
|
-
|
|
4261
|
-
|
|
4262
|
-
|
|
4263
|
-
|
|
4264
|
-
|
|
4265
|
-
|
|
4266
|
-
|
|
4267
|
-
|
|
4268
|
-
|
|
4269
|
-
|
|
4295
|
+
async function deployDocker(options) {
|
|
4296
|
+
const { stage, tag, skipPush, masterKey, config: config$1, buildArgs } = options;
|
|
4297
|
+
const imageName = config$1.imageName;
|
|
4298
|
+
const imageRef = getImageRef(config$1.registry, imageName, tag);
|
|
4299
|
+
await buildImage(imageRef, config$1.appName, buildArgs);
|
|
4300
|
+
if (!skipPush) if (!config$1.registry) logger$4.warn("\n⚠️ No registry configured. Use --skip-push or configure docker.registry in gkm.config.ts");
|
|
4301
|
+
else await pushImage(imageRef);
|
|
4302
|
+
logger$4.log("\n✅ Docker deployment ready!");
|
|
4303
|
+
logger$4.log(`\n📋 Deployment details:`);
|
|
4304
|
+
logger$4.log(` Image: ${imageRef}`);
|
|
4305
|
+
logger$4.log(` Stage: ${stage}`);
|
|
4306
|
+
if (masterKey) {
|
|
4307
|
+
logger$4.log(`\n🔐 Deploy with this environment variable:`);
|
|
4308
|
+
logger$4.log(` GKM_MASTER_KEY=${masterKey}`);
|
|
4309
|
+
logger$4.log("\n Example docker run:");
|
|
4310
|
+
logger$4.log(` docker run -e GKM_MASTER_KEY=${masterKey} ${imageRef}`);
|
|
4270
4311
|
}
|
|
4271
|
-
|
|
4272
|
-
|
|
4273
|
-
|
|
4274
|
-
|
|
4275
|
-
|
|
4276
|
-
|
|
4277
|
-
|
|
4278
|
-
|
|
4312
|
+
return {
|
|
4313
|
+
imageRef,
|
|
4314
|
+
masterKey
|
|
4315
|
+
};
|
|
4316
|
+
}
|
|
4317
|
+
/**
|
|
4318
|
+
* Resolve Docker deploy config from gkm config
|
|
4319
|
+
* - imageName: from config, or cwd package.json, or 'app' (for Docker image)
|
|
4320
|
+
* - projectName: from root package.json, or 'app' (for Dokploy project)
|
|
4321
|
+
* - appName: from cwd package.json, or projectName (for Dokploy app within project)
|
|
4322
|
+
*/
|
|
4323
|
+
function resolveDockerConfig(config$1) {
|
|
4324
|
+
const projectName = getAppNameFromPackageJson() ?? "app";
|
|
4325
|
+
const appName = getAppNameFromCwd$1() ?? projectName;
|
|
4326
|
+
const imageName = config$1.docker?.imageName ?? appName;
|
|
4327
|
+
return {
|
|
4328
|
+
registry: config$1.docker?.registry,
|
|
4329
|
+
imageName,
|
|
4330
|
+
projectName,
|
|
4331
|
+
appName
|
|
4332
|
+
};
|
|
4333
|
+
}
|
|
4334
|
+
|
|
4335
|
+
//#endregion
|
|
4336
|
+
//#region src/deploy/dokploy.ts
|
|
4337
|
+
const logger$3 = console;
|
|
4338
|
+
/**
|
|
4339
|
+
* Get the Dokploy API token from stored credentials or environment
|
|
4340
|
+
*/
|
|
4341
|
+
async function getApiToken$1() {
|
|
4342
|
+
const token = await getDokployToken();
|
|
4343
|
+
if (!token) throw new Error("Dokploy credentials not found.\nRun \"gkm login --service dokploy\" to authenticate, or set DOKPLOY_API_TOKEN.");
|
|
4344
|
+
return token;
|
|
4345
|
+
}
|
|
4346
|
+
/**
|
|
4347
|
+
* Create a Dokploy API client
|
|
4348
|
+
*/
|
|
4349
|
+
async function createApi$1(endpoint) {
|
|
4350
|
+
const token = await getApiToken$1();
|
|
4351
|
+
return new DokployApi({
|
|
4352
|
+
baseUrl: endpoint,
|
|
4353
|
+
token
|
|
4354
|
+
});
|
|
4355
|
+
}
|
|
4356
|
+
/**
|
|
4357
|
+
* Deploy to Dokploy
|
|
4358
|
+
*/
|
|
4359
|
+
async function deployDokploy(options) {
|
|
4360
|
+
const { stage, imageRef, masterKey, config: config$1 } = options;
|
|
4361
|
+
logger$3.log(`\n🎯 Deploying to Dokploy...`);
|
|
4362
|
+
logger$3.log(` Endpoint: ${config$1.endpoint}`);
|
|
4363
|
+
logger$3.log(` Application: ${config$1.applicationId}`);
|
|
4364
|
+
const api = await createApi$1(config$1.endpoint);
|
|
4365
|
+
logger$3.log(` Configuring Docker image: ${imageRef}`);
|
|
4366
|
+
const registryOptions = {};
|
|
4367
|
+
if (config$1.registryId) {
|
|
4368
|
+
registryOptions.registryId = config$1.registryId;
|
|
4369
|
+
logger$3.log(` Using Dokploy registry: ${config$1.registryId}`);
|
|
4370
|
+
} else {
|
|
4371
|
+
const storedRegistryId = await getDokployRegistryId();
|
|
4372
|
+
if (storedRegistryId) {
|
|
4373
|
+
registryOptions.registryId = storedRegistryId;
|
|
4374
|
+
logger$3.log(` Using stored Dokploy registry: ${storedRegistryId}`);
|
|
4375
|
+
} else if (config$1.registryCredentials) {
|
|
4376
|
+
registryOptions.username = config$1.registryCredentials.username;
|
|
4377
|
+
registryOptions.password = config$1.registryCredentials.password;
|
|
4378
|
+
registryOptions.registryUrl = config$1.registryCredentials.registryUrl;
|
|
4379
|
+
logger$3.log(` Using registry credentials for: ${config$1.registryCredentials.registryUrl}`);
|
|
4380
|
+
} else {
|
|
4381
|
+
const username = process.env.DOCKER_REGISTRY_USERNAME;
|
|
4382
|
+
const password = process.env.DOCKER_REGISTRY_PASSWORD;
|
|
4383
|
+
const registryUrl = process.env.DOCKER_REGISTRY_URL || config$1.registry;
|
|
4384
|
+
if (username && password && registryUrl) {
|
|
4385
|
+
registryOptions.username = username;
|
|
4386
|
+
registryOptions.password = password;
|
|
4387
|
+
registryOptions.registryUrl = registryUrl;
|
|
4388
|
+
logger$3.log(` Using registry credentials from environment`);
|
|
4389
|
+
}
|
|
4390
|
+
}
|
|
4279
4391
|
}
|
|
4280
|
-
|
|
4281
|
-
|
|
4282
|
-
|
|
4283
|
-
|
|
4284
|
-
|
|
4285
|
-
|
|
4286
|
-
const
|
|
4287
|
-
|
|
4288
|
-
|
|
4289
|
-
|
|
4290
|
-
|
|
4291
|
-
|
|
4292
|
-
|
|
4293
|
-
|
|
4392
|
+
await api.saveDockerProvider(config$1.applicationId, imageRef, registryOptions);
|
|
4393
|
+
logger$3.log(" ✓ Docker provider configured");
|
|
4394
|
+
const envVars = {};
|
|
4395
|
+
if (masterKey) envVars.GKM_MASTER_KEY = masterKey;
|
|
4396
|
+
if (Object.keys(envVars).length > 0) {
|
|
4397
|
+
logger$3.log(" Updating environment variables...");
|
|
4398
|
+
const envString = Object.entries(envVars).map(([key, value]) => `${key}=${value}`).join("\n");
|
|
4399
|
+
await api.saveApplicationEnv(config$1.applicationId, envString);
|
|
4400
|
+
logger$3.log(" ✓ Environment variables updated");
|
|
4401
|
+
}
|
|
4402
|
+
logger$3.log(" Triggering deployment...");
|
|
4403
|
+
await api.deployApplication(config$1.applicationId);
|
|
4404
|
+
logger$3.log(" ✓ Deployment triggered");
|
|
4405
|
+
logger$3.log("\n✅ Dokploy deployment initiated!");
|
|
4406
|
+
logger$3.log(`\n📋 Deployment details:`);
|
|
4407
|
+
logger$3.log(` Image: ${imageRef}`);
|
|
4408
|
+
logger$3.log(` Stage: ${stage}`);
|
|
4409
|
+
logger$3.log(` Application ID: ${config$1.applicationId}`);
|
|
4410
|
+
if (masterKey) logger$3.log(`\n🔐 GKM_MASTER_KEY has been set in Dokploy environment`);
|
|
4411
|
+
const deploymentUrl = `${config$1.endpoint}/project/${config$1.projectId}`;
|
|
4412
|
+
logger$3.log(`\n🔗 View deployment: ${deploymentUrl}`);
|
|
4294
4413
|
return {
|
|
4295
|
-
|
|
4296
|
-
|
|
4297
|
-
|
|
4414
|
+
imageRef,
|
|
4415
|
+
masterKey,
|
|
4416
|
+
url: deploymentUrl
|
|
4298
4417
|
};
|
|
4299
4418
|
}
|
|
4300
4419
|
|
|
@@ -4373,6 +4492,107 @@ function getPublicUrlArgNames(app) {
|
|
|
4373
4492
|
return app.dependencies.map((dep) => `NEXT_PUBLIC_${dep.toUpperCase()}_URL`);
|
|
4374
4493
|
}
|
|
4375
4494
|
|
|
4495
|
+
//#endregion
|
|
4496
|
+
//#region src/deploy/env-resolver.ts
|
|
4497
|
+
/**
|
|
4498
|
+
* Generate a secure random secret (64 hex characters = 32 bytes)
|
|
4499
|
+
*/
|
|
4500
|
+
function generateSecret() {
|
|
4501
|
+
return randomBytes(32).toString("hex");
|
|
4502
|
+
}
|
|
4503
|
+
/**
|
|
4504
|
+
* Get or generate a secret for an app.
|
|
4505
|
+
* If the secret already exists in state, returns it.
|
|
4506
|
+
* Otherwise generates a new one and stores it.
|
|
4507
|
+
*/
|
|
4508
|
+
function getOrGenerateSecret(state, appName, secretName) {
|
|
4509
|
+
const existing = getGeneratedSecret(state, appName, secretName);
|
|
4510
|
+
if (existing) return existing;
|
|
4511
|
+
const generated = generateSecret();
|
|
4512
|
+
setGeneratedSecret(state, appName, secretName, generated);
|
|
4513
|
+
return generated;
|
|
4514
|
+
}
|
|
4515
|
+
/**
|
|
4516
|
+
* Build a DATABASE_URL for an app with per-app credentials
|
|
4517
|
+
*/
|
|
4518
|
+
function buildDatabaseUrl(credentials, postgres) {
|
|
4519
|
+
const { dbUser, dbPassword } = credentials;
|
|
4520
|
+
const { host, port, database } = postgres;
|
|
4521
|
+
return `postgresql://${encodeURIComponent(dbUser)}:${encodeURIComponent(dbPassword)}@${host}:${port}/${database}`;
|
|
4522
|
+
}
|
|
4523
|
+
/**
|
|
4524
|
+
* Build a REDIS_URL
|
|
4525
|
+
*/
|
|
4526
|
+
function buildRedisUrl(redis) {
|
|
4527
|
+
const { host, port, password } = redis;
|
|
4528
|
+
if (password) return `redis://:${encodeURIComponent(password)}@${host}:${port}`;
|
|
4529
|
+
return `redis://${host}:${port}`;
|
|
4530
|
+
}
|
|
4531
|
+
/**
|
|
4532
|
+
* Resolve a single environment variable
|
|
4533
|
+
*/
|
|
4534
|
+
function resolveEnvVar(varName, context) {
|
|
4535
|
+
switch (varName) {
|
|
4536
|
+
case "PORT": return String(context.app.port);
|
|
4537
|
+
case "NODE_ENV": return context.stage === "production" ? "production" : "development";
|
|
4538
|
+
case "DATABASE_URL":
|
|
4539
|
+
if (context.appCredentials && context.postgres) return buildDatabaseUrl(context.appCredentials, context.postgres);
|
|
4540
|
+
break;
|
|
4541
|
+
case "REDIS_URL":
|
|
4542
|
+
if (context.redis) return buildRedisUrl(context.redis);
|
|
4543
|
+
break;
|
|
4544
|
+
case "BETTER_AUTH_URL": return `https://${context.appHostname}`;
|
|
4545
|
+
case "BETTER_AUTH_SECRET": return getOrGenerateSecret(context.state, context.appName, "BETTER_AUTH_SECRET");
|
|
4546
|
+
case "BETTER_AUTH_TRUSTED_ORIGINS":
|
|
4547
|
+
if (context.frontendUrls.length > 0) return context.frontendUrls.join(",");
|
|
4548
|
+
break;
|
|
4549
|
+
case "GKM_MASTER_KEY":
|
|
4550
|
+
if (context.masterKey) return context.masterKey;
|
|
4551
|
+
break;
|
|
4552
|
+
}
|
|
4553
|
+
if (context.userSecrets) {
|
|
4554
|
+
if (context.userSecrets.custom[varName]) return context.userSecrets.custom[varName];
|
|
4555
|
+
if (varName in context.userSecrets.urls) return context.userSecrets.urls[varName];
|
|
4556
|
+
if (varName === "POSTGRES_PASSWORD" && context.userSecrets.services.postgres) return context.userSecrets.services.postgres.password;
|
|
4557
|
+
if (varName === "REDIS_PASSWORD" && context.userSecrets.services.redis) return context.userSecrets.services.redis.password;
|
|
4558
|
+
}
|
|
4559
|
+
return void 0;
|
|
4560
|
+
}
|
|
4561
|
+
/**
|
|
4562
|
+
* Resolve all environment variables for an app
|
|
4563
|
+
*/
|
|
4564
|
+
function resolveEnvVars(requiredVars, context) {
|
|
4565
|
+
const resolved = {};
|
|
4566
|
+
const missing = [];
|
|
4567
|
+
for (const varName of requiredVars) {
|
|
4568
|
+
const value = resolveEnvVar(varName, context);
|
|
4569
|
+
if (value !== void 0) resolved[varName] = value;
|
|
4570
|
+
else missing.push(varName);
|
|
4571
|
+
}
|
|
4572
|
+
return {
|
|
4573
|
+
resolved,
|
|
4574
|
+
missing
|
|
4575
|
+
};
|
|
4576
|
+
}
|
|
4577
|
+
/**
|
|
4578
|
+
* Format missing variables error message
|
|
4579
|
+
*/
|
|
4580
|
+
function formatMissingVarsError(appName, missing, stage) {
|
|
4581
|
+
const varList = missing.map((v) => ` - ${v}`).join("\n");
|
|
4582
|
+
return `Deployment failed: ${appName} is missing required environment variables:\n${varList}\n\nAdd them with:\n gkm secrets:set <VAR_NAME> <value> --stage ${stage}\n\nOr add them to the app's requiredEnv in gkm.config.ts to have them auto-resolved.`;
|
|
4583
|
+
}
|
|
4584
|
+
/**
|
|
4585
|
+
* Validate that all required environment variables can be resolved
|
|
4586
|
+
*/
|
|
4587
|
+
function validateEnvVars(requiredVars, context) {
|
|
4588
|
+
const { resolved, missing } = resolveEnvVars(requiredVars, context);
|
|
4589
|
+
return {
|
|
4590
|
+
valid: missing.length === 0,
|
|
4591
|
+
missing,
|
|
4592
|
+
resolved
|
|
4593
|
+
};
|
|
4594
|
+
}
|
|
4595
|
+
|
|
4376
4596
|
//#endregion
|
|
4377
4597
|
//#region src/deploy/init.ts
|
|
4378
4598
|
const logger$2 = console;
|
|
@@ -4645,14 +4865,35 @@ function generateSecretsReport(encryptedApps, sniffedApps) {
|
|
|
4645
4865
|
|
|
4646
4866
|
//#endregion
|
|
4647
4867
|
//#region src/deploy/sniffer.ts
|
|
4868
|
+
const __filename = fileURLToPath(import.meta.url);
|
|
4869
|
+
const __dirname = dirname(__filename);
|
|
4870
|
+
/**
|
|
4871
|
+
* Resolve the path to a sniffer helper file.
|
|
4872
|
+
* Handles both dev (.ts with tsx) and production (.mjs from dist).
|
|
4873
|
+
*
|
|
4874
|
+
* In production: sniffer.ts is bundled into dist/index.mjs, but sniffer helper
|
|
4875
|
+
* files are output to dist/deploy/ as standalone modules for subprocess loading.
|
|
4876
|
+
*
|
|
4877
|
+
* In development: All files are in src/deploy/ and loaded via tsx.
|
|
4878
|
+
*/
|
|
4879
|
+
function resolveSnifferFile(baseName) {
|
|
4880
|
+
const deployMjsPath = resolve(__dirname, "deploy", `${baseName}.mjs`);
|
|
4881
|
+
if (existsSync(deployMjsPath)) return deployMjsPath;
|
|
4882
|
+
const mjsPath = resolve(__dirname, `${baseName}.mjs`);
|
|
4883
|
+
if (existsSync(mjsPath)) return mjsPath;
|
|
4884
|
+
const tsPath = resolve(__dirname, `${baseName}.ts`);
|
|
4885
|
+
if (existsSync(tsPath)) return tsPath;
|
|
4886
|
+
return tsPath;
|
|
4887
|
+
}
|
|
4648
4888
|
/**
|
|
4649
4889
|
* Get required environment variables for an app.
|
|
4650
4890
|
*
|
|
4651
|
-
* Detection strategy:
|
|
4652
|
-
*
|
|
4653
|
-
*
|
|
4654
|
-
*
|
|
4655
|
-
*
|
|
4891
|
+
* Detection strategy (in order):
|
|
4892
|
+
* 1. Frontend apps: Returns empty (no server secrets)
|
|
4893
|
+
* 2. Apps with `requiredEnv`: Uses explicit list from config
|
|
4894
|
+
* 3. Entry apps: Imports entry file in subprocess to capture config.parse() calls
|
|
4895
|
+
* 4. Apps with `envParser`: Runs SnifferEnvironmentParser to detect usage
|
|
4896
|
+
* 5. Apps with neither: Returns empty
|
|
4656
4897
|
*
|
|
4657
4898
|
* This function handles "fire and forget" async operations gracefully,
|
|
4658
4899
|
* capturing errors and unhandled rejections without failing the build.
|
|
@@ -4673,6 +4914,14 @@ async function sniffAppEnvironment(app, appName, workspacePath, options = {}) {
|
|
|
4673
4914
|
appName,
|
|
4674
4915
|
requiredEnvVars: [...app.requiredEnv]
|
|
4675
4916
|
};
|
|
4917
|
+
if (app.entry) {
|
|
4918
|
+
const result = await sniffEntryFile(app.entry, app.path, workspacePath);
|
|
4919
|
+
if (logWarnings && result.error) console.warn(`[sniffer] ${appName}: Entry file threw error during sniffing (env vars still captured): ${result.error.message}`);
|
|
4920
|
+
return {
|
|
4921
|
+
appName,
|
|
4922
|
+
requiredEnvVars: result.envVars
|
|
4923
|
+
};
|
|
4924
|
+
}
|
|
4676
4925
|
if (app.envParser) {
|
|
4677
4926
|
const result = await sniffEnvParser(app.envParser, app.path, workspacePath);
|
|
4678
4927
|
if (logWarnings) {
|
|
@@ -4690,6 +4939,80 @@ async function sniffAppEnvironment(app, appName, workspacePath, options = {}) {
|
|
|
4690
4939
|
};
|
|
4691
4940
|
}
|
|
4692
4941
|
/**
|
|
4942
|
+
* Sniff an entry file by importing it in a subprocess.
|
|
4943
|
+
*
|
|
4944
|
+
* Entry apps call `config.parse()` at module load time. To capture which
|
|
4945
|
+
* env vars are accessed, we:
|
|
4946
|
+
* 1. Spawn a subprocess with a module loader hook
|
|
4947
|
+
* 2. The loader intercepts `@geekmidas/envkit` and replaces EnvironmentParser
|
|
4948
|
+
* with SnifferEnvironmentParser
|
|
4949
|
+
* 3. Import the entry file (triggers config.parse())
|
|
4950
|
+
* 4. Capture and return the accessed env var names
|
|
4951
|
+
*
|
|
4952
|
+
* This approach provides process isolation - each app is sniffed in its own
|
|
4953
|
+
* subprocess, preventing module cache pollution.
|
|
4954
|
+
*
|
|
4955
|
+
* @param entryPath - Relative path to the entry file (e.g., './src/index.ts')
|
|
4956
|
+
* @param appPath - The app's path relative to workspace (e.g., 'apps/auth')
|
|
4957
|
+
* @param workspacePath - Absolute path to workspace root
|
|
4958
|
+
* @returns EntrySniffResult with env vars and optional error
|
|
4959
|
+
*/
|
|
4960
|
+
async function sniffEntryFile(entryPath, appPath, workspacePath) {
|
|
4961
|
+
const fullEntryPath = resolve(workspacePath, appPath, entryPath);
|
|
4962
|
+
const loaderPath = resolveSnifferFile("sniffer-loader");
|
|
4963
|
+
const workerPath = resolveSnifferFile("sniffer-worker");
|
|
4964
|
+
return new Promise((resolvePromise) => {
|
|
4965
|
+
const child = spawn("node", [
|
|
4966
|
+
"--import",
|
|
4967
|
+
loaderPath,
|
|
4968
|
+
workerPath,
|
|
4969
|
+
fullEntryPath
|
|
4970
|
+
], {
|
|
4971
|
+
cwd: resolve(workspacePath, appPath),
|
|
4972
|
+
stdio: [
|
|
4973
|
+
"ignore",
|
|
4974
|
+
"pipe",
|
|
4975
|
+
"pipe"
|
|
4976
|
+
],
|
|
4977
|
+
env: {
|
|
4978
|
+
...process.env,
|
|
4979
|
+
NODE_OPTIONS: "--import tsx"
|
|
4980
|
+
}
|
|
4981
|
+
});
|
|
4982
|
+
let stdout$1 = "";
|
|
4983
|
+
let stderr = "";
|
|
4984
|
+
child.stdout.on("data", (data) => {
|
|
4985
|
+
stdout$1 += data.toString();
|
|
4986
|
+
});
|
|
4987
|
+
child.stderr.on("data", (data) => {
|
|
4988
|
+
stderr += data.toString();
|
|
4989
|
+
});
|
|
4990
|
+
child.on("close", (code) => {
|
|
4991
|
+
try {
|
|
4992
|
+
const jsonMatch = stdout$1.match(/\{[^{}]*"envVars"[^{}]*\}[^{]*$/);
|
|
4993
|
+
if (jsonMatch) {
|
|
4994
|
+
const result = JSON.parse(jsonMatch[0]);
|
|
4995
|
+
resolvePromise({
|
|
4996
|
+
envVars: result.envVars || [],
|
|
4997
|
+
error: result.error ? new Error(result.error) : void 0
|
|
4998
|
+
});
|
|
4999
|
+
return;
|
|
5000
|
+
}
|
|
5001
|
+
} catch {}
|
|
5002
|
+
resolvePromise({
|
|
5003
|
+
envVars: [],
|
|
5004
|
+
error: new Error(`Failed to sniff entry file (exit code ${code}): ${stderr || stdout$1 || "No output"}`)
|
|
5005
|
+
});
|
|
5006
|
+
});
|
|
5007
|
+
child.on("error", (err) => {
|
|
5008
|
+
resolvePromise({
|
|
5009
|
+
envVars: [],
|
|
5010
|
+
error: err
|
|
5011
|
+
});
|
|
5012
|
+
});
|
|
5013
|
+
});
|
|
5014
|
+
}
|
|
5015
|
+
/**
|
|
4693
5016
|
* Run the SnifferEnvironmentParser on an envParser module to detect
|
|
4694
5017
|
* which environment variables it accesses.
|
|
4695
5018
|
*
|
|
@@ -4799,10 +5122,130 @@ async function prompt(message, hidden = false) {
|
|
|
4799
5122
|
}
|
|
4800
5123
|
}
|
|
4801
5124
|
/**
|
|
5125
|
+
* Wait for Postgres to be ready to accept connections.
|
|
5126
|
+
*
|
|
5127
|
+
* Polls the Postgres server until it accepts a connection or max retries reached.
|
|
5128
|
+
* Used after enabling the external port to ensure the database is accessible
|
|
5129
|
+
* before creating users.
|
|
5130
|
+
*
|
|
5131
|
+
* @param host - The Postgres server hostname
|
|
5132
|
+
* @param port - The external port (typically 5432)
|
|
5133
|
+
* @param user - Master database user (postgres)
|
|
5134
|
+
* @param password - Master database password
|
|
5135
|
+
* @param database - Database name to connect to
|
|
5136
|
+
* @param maxRetries - Maximum number of connection attempts (default: 30)
|
|
5137
|
+
* @param retryIntervalMs - Milliseconds between retries (default: 2000)
|
|
5138
|
+
* @throws Error if Postgres is not ready after maxRetries
|
|
5139
|
+
*/
|
|
5140
|
+
async function waitForPostgres(host, port, user, password, database, maxRetries = 30, retryIntervalMs = 2e3) {
|
|
5141
|
+
for (let i = 0; i < maxRetries; i++) try {
|
|
5142
|
+
const client = new Client({
|
|
5143
|
+
host,
|
|
5144
|
+
port,
|
|
5145
|
+
user,
|
|
5146
|
+
password,
|
|
5147
|
+
database
|
|
5148
|
+
});
|
|
5149
|
+
await client.connect();
|
|
5150
|
+
await client.end();
|
|
5151
|
+
return;
|
|
5152
|
+
} catch {
|
|
5153
|
+
if (i < maxRetries - 1) {
|
|
5154
|
+
logger$1.log(` Waiting for Postgres... (${i + 1}/${maxRetries})`);
|
|
5155
|
+
await new Promise((r) => setTimeout(r, retryIntervalMs));
|
|
5156
|
+
}
|
|
5157
|
+
}
|
|
5158
|
+
throw new Error(`Postgres not ready after ${maxRetries} retries`);
|
|
5159
|
+
}
|
|
5160
|
+
/**
|
|
5161
|
+
* Initialize Postgres with per-app users and schemas.
|
|
5162
|
+
*
|
|
5163
|
+
* This function implements the same user/schema isolation pattern used in local
|
|
5164
|
+
* dev mode (see docker/postgres/init.sh). It:
|
|
5165
|
+
*
|
|
5166
|
+
* 1. Temporarily enables the external Postgres port
|
|
5167
|
+
* 2. Connects using master credentials
|
|
5168
|
+
* 3. Creates each user with appropriate schema permissions
|
|
5169
|
+
* 4. Disables the external port for security
|
|
5170
|
+
*
|
|
5171
|
+
* Schema assignment follows this pattern:
|
|
5172
|
+
* - `api` app: Uses `public` schema (shared tables, migrations run here)
|
|
5173
|
+
* - Other apps: Get their own schema with `search_path` configured
|
|
5174
|
+
*
|
|
5175
|
+
* @param api - The Dokploy API client
|
|
5176
|
+
* @param postgres - The provisioned Postgres service details
|
|
5177
|
+
* @param serverHostname - The Dokploy server hostname (for external connection)
|
|
5178
|
+
* @param users - Array of users to create with their schema configuration
|
|
5179
|
+
*
|
|
5180
|
+
* @example
|
|
5181
|
+
* ```ts
|
|
5182
|
+
* await initializePostgresUsers(api, postgres, 'dokploy.example.com', [
|
|
5183
|
+
* { name: 'api', password: 'xxx', usePublicSchema: true },
|
|
5184
|
+
* { name: 'auth', password: 'yyy', usePublicSchema: false },
|
|
5185
|
+
* ]);
|
|
5186
|
+
* ```
|
|
5187
|
+
*/
|
|
5188
|
+
async function initializePostgresUsers(api, postgres, serverHostname, users) {
|
|
5189
|
+
logger$1.log("\n🔧 Initializing database users...");
|
|
5190
|
+
const externalPort = 5432;
|
|
5191
|
+
logger$1.log(` Enabling external port ${externalPort}...`);
|
|
5192
|
+
await api.savePostgresExternalPort(postgres.postgresId, externalPort);
|
|
5193
|
+
await api.deployPostgres(postgres.postgresId);
|
|
5194
|
+
logger$1.log(` Waiting for Postgres to be accessible at ${serverHostname}:${externalPort}...`);
|
|
5195
|
+
await waitForPostgres(serverHostname, externalPort, postgres.databaseUser, postgres.databasePassword, postgres.databaseName);
|
|
5196
|
+
const client = new Client({
|
|
5197
|
+
host: serverHostname,
|
|
5198
|
+
port: externalPort,
|
|
5199
|
+
user: postgres.databaseUser,
|
|
5200
|
+
password: postgres.databasePassword,
|
|
5201
|
+
database: postgres.databaseName
|
|
5202
|
+
});
|
|
5203
|
+
try {
|
|
5204
|
+
await client.connect();
|
|
5205
|
+
for (const user of users) {
|
|
5206
|
+
const schemaName = user.usePublicSchema ? "public" : user.name;
|
|
5207
|
+
logger$1.log(` Creating user "${user.name}" with schema "${schemaName}"...`);
|
|
5208
|
+
await client.query(`
|
|
5209
|
+
DO $$ BEGIN
|
|
5210
|
+
CREATE USER "${user.name}" WITH PASSWORD '${user.password}';
|
|
5211
|
+
EXCEPTION WHEN duplicate_object THEN
|
|
5212
|
+
ALTER USER "${user.name}" WITH PASSWORD '${user.password}';
|
|
5213
|
+
END $$;
|
|
5214
|
+
`);
|
|
5215
|
+
if (user.usePublicSchema) await client.query(`
|
|
5216
|
+
GRANT ALL ON SCHEMA public TO "${user.name}";
|
|
5217
|
+
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON TABLES TO "${user.name}";
|
|
5218
|
+
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON SEQUENCES TO "${user.name}";
|
|
5219
|
+
`);
|
|
5220
|
+
else await client.query(`
|
|
5221
|
+
CREATE SCHEMA IF NOT EXISTS "${schemaName}" AUTHORIZATION "${user.name}";
|
|
5222
|
+
ALTER USER "${user.name}" SET search_path TO "${schemaName}";
|
|
5223
|
+
GRANT USAGE ON SCHEMA "${schemaName}" TO "${user.name}";
|
|
5224
|
+
GRANT ALL ON ALL TABLES IN SCHEMA "${schemaName}" TO "${user.name}";
|
|
5225
|
+
ALTER DEFAULT PRIVILEGES IN SCHEMA "${schemaName}" GRANT ALL ON TABLES TO "${user.name}";
|
|
5226
|
+
`);
|
|
5227
|
+
logger$1.log(` ✓ User "${user.name}" configured`);
|
|
5228
|
+
}
|
|
5229
|
+
} finally {
|
|
5230
|
+
await client.end();
|
|
5231
|
+
}
|
|
5232
|
+
logger$1.log(" Disabling external port...");
|
|
5233
|
+
await api.savePostgresExternalPort(postgres.postgresId, null);
|
|
5234
|
+
await api.deployPostgres(postgres.postgresId);
|
|
5235
|
+
logger$1.log(" ✓ Database users initialized");
|
|
5236
|
+
}
|
|
5237
|
+
/**
|
|
5238
|
+
* Get the server hostname from the Dokploy endpoint URL
|
|
5239
|
+
*/
|
|
5240
|
+
function getServerHostname(endpoint) {
|
|
5241
|
+
const url = new URL(endpoint);
|
|
5242
|
+
return url.hostname;
|
|
5243
|
+
}
|
|
5244
|
+
/**
|
|
4802
5245
|
* Provision docker compose services in Dokploy
|
|
4803
5246
|
* @internal Exported for testing
|
|
4804
5247
|
*/
|
|
4805
|
-
async function provisionServices(api, projectId, environmentId,
|
|
5248
|
+
async function provisionServices(api, projectId, environmentId, projectName, services, existingServiceIds) {
|
|
4806
5249
|
logger$1.log(`\n🔍 provisionServices called: services=${JSON.stringify(services)}, envId=${environmentId}`);
|
|
4807
5250
|
if (!services || !environmentId) {
|
|
4808
5251
|
logger$1.log(" Skipping: no services or no environmentId");
|
|
@@ -4823,9 +5266,12 @@ async function provisionServices(api, projectId, environmentId, appName, service
|
|
|
4823
5266
|
else logger$1.log(` ⚠ Cached ID invalid, will create new`);
|
|
4824
5267
|
}
|
|
4825
5268
|
if (!postgres) {
|
|
4826
|
-
const
|
|
4827
|
-
const
|
|
4828
|
-
const result = await api.findOrCreatePostgres(postgresName, projectId, environmentId, {
|
|
5269
|
+
const databasePassword = randomBytes(16).toString("hex");
|
|
5270
|
+
const databaseName = projectName.replace(/-/g, "_");
|
|
5271
|
+
const result = await api.findOrCreatePostgres(postgresName, projectId, environmentId, {
|
|
5272
|
+
databaseName,
|
|
5273
|
+
databasePassword
|
|
5274
|
+
});
|
|
4829
5275
|
postgres = result.postgres;
|
|
4830
5276
|
created = result.created;
|
|
4831
5277
|
if (created) {
|
|
@@ -4893,12 +5339,6 @@ async function provisionServices(api, projectId, environmentId, appName, service
|
|
|
4893
5339
|
*/
|
|
4894
5340
|
async function ensureDokploySetup(config$1, dockerConfig, stage, services) {
|
|
4895
5341
|
logger$1.log("\n🔧 Checking Dokploy setup...");
|
|
4896
|
-
const { readStageSecrets: readStageSecrets$1 } = await import("./storage-DNj_I11J.mjs");
|
|
4897
|
-
const existingSecrets = await readStageSecrets$1(stage);
|
|
4898
|
-
const existingUrls = {
|
|
4899
|
-
DATABASE_URL: existingSecrets?.urls?.DATABASE_URL,
|
|
4900
|
-
REDIS_URL: existingSecrets?.urls?.REDIS_URL
|
|
4901
|
-
};
|
|
4902
5342
|
let creds = await getDokployCredentials();
|
|
4903
5343
|
if (!creds) {
|
|
4904
5344
|
logger$1.log("\n📋 Dokploy credentials not found. Let's set them up.");
|
|
@@ -5224,6 +5664,8 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5224
5664
|
postgres: services.db !== void 0 && services.db !== false,
|
|
5225
5665
|
redis: services.cache !== void 0 && services.cache !== false
|
|
5226
5666
|
};
|
|
5667
|
+
let provisionedPostgres = null;
|
|
5668
|
+
let provisionedRedis = null;
|
|
5227
5669
|
if (dockerServices.postgres || dockerServices.redis) {
|
|
5228
5670
|
logger$1.log("\n🔧 Provisioning infrastructure services...");
|
|
5229
5671
|
const existingServiceIds = {
|
|
@@ -5232,17 +5674,64 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5232
5674
|
};
|
|
5233
5675
|
const provisionResult = await provisionServices(api, project.projectId, environmentId, workspace.name, dockerServices, existingServiceIds);
|
|
5234
5676
|
if (provisionResult?.serviceIds) {
|
|
5235
|
-
if (provisionResult.serviceIds.postgresId)
|
|
5236
|
-
|
|
5677
|
+
if (provisionResult.serviceIds.postgresId) {
|
|
5678
|
+
setPostgresId(state, provisionResult.serviceIds.postgresId);
|
|
5679
|
+
provisionedPostgres = await api.getPostgres(provisionResult.serviceIds.postgresId);
|
|
5680
|
+
}
|
|
5681
|
+
if (provisionResult.serviceIds.redisId) {
|
|
5682
|
+
setRedisId(state, provisionResult.serviceIds.redisId);
|
|
5683
|
+
provisionedRedis = await api.getRedis(provisionResult.serviceIds.redisId);
|
|
5684
|
+
}
|
|
5237
5685
|
}
|
|
5238
5686
|
}
|
|
5239
5687
|
const backendApps = appsToDeployNames.filter((name$1) => workspace.apps[name$1].type === "backend");
|
|
5240
5688
|
const frontendApps = appsToDeployNames.filter((name$1) => workspace.apps[name$1].type === "frontend");
|
|
5689
|
+
const perAppDbCredentials = /* @__PURE__ */ new Map();
|
|
5690
|
+
if (provisionedPostgres && backendApps.length > 0) {
|
|
5691
|
+
const appsNeedingDb = backendApps.filter((appName) => {
|
|
5692
|
+
const requirements = sniffedApps.get(appName);
|
|
5693
|
+
return requirements?.requiredEnvVars.includes("DATABASE_URL");
|
|
5694
|
+
});
|
|
5695
|
+
if (appsNeedingDb.length > 0) {
|
|
5696
|
+
logger$1.log(`\n🔐 Setting up per-app database credentials...`);
|
|
5697
|
+
logger$1.log(` Apps needing DATABASE_URL: ${appsNeedingDb.join(", ")}`);
|
|
5698
|
+
const existingCredentials = getAllAppCredentials(state);
|
|
5699
|
+
const usersToCreate = [];
|
|
5700
|
+
for (const appName of appsNeedingDb) {
|
|
5701
|
+
let credentials = existingCredentials[appName];
|
|
5702
|
+
if (credentials) logger$1.log(` ${appName}: Using existing credentials from state`);
|
|
5703
|
+
else {
|
|
5704
|
+
const password = randomBytes(16).toString("hex");
|
|
5705
|
+
credentials = {
|
|
5706
|
+
dbUser: appName,
|
|
5707
|
+
dbPassword: password
|
|
5708
|
+
};
|
|
5709
|
+
setAppCredentials(state, appName, credentials);
|
|
5710
|
+
logger$1.log(` ${appName}: Generated new credentials`);
|
|
5711
|
+
}
|
|
5712
|
+
perAppDbCredentials.set(appName, credentials);
|
|
5713
|
+
usersToCreate.push({
|
|
5714
|
+
name: appName,
|
|
5715
|
+
password: credentials.dbPassword,
|
|
5716
|
+
usePublicSchema: appName === "api"
|
|
5717
|
+
});
|
|
5718
|
+
}
|
|
5719
|
+
const serverHostname = getServerHostname(creds.endpoint);
|
|
5720
|
+
await initializePostgresUsers(api, provisionedPostgres, serverHostname, usersToCreate);
|
|
5721
|
+
}
|
|
5722
|
+
}
|
|
5241
5723
|
const publicUrls = {};
|
|
5242
5724
|
const results = [];
|
|
5243
5725
|
const dokployConfig = workspace.deploy.dokploy;
|
|
5244
5726
|
const appHostnames = /* @__PURE__ */ new Map();
|
|
5245
5727
|
const appDomainIds = /* @__PURE__ */ new Map();
|
|
5728
|
+
const frontendUrls = [];
|
|
5729
|
+
for (const appName of frontendApps) {
|
|
5730
|
+
const app = workspace.apps[appName];
|
|
5731
|
+
const isMainFrontend = isMainFrontendApp(appName, app, workspace.apps);
|
|
5732
|
+
const hostname = resolveHost(appName, app, stage, dokployConfig, isMainFrontend);
|
|
5733
|
+
frontendUrls.push(`https://${hostname}`);
|
|
5734
|
+
}
|
|
5246
5735
|
if (backendApps.length > 0) {
|
|
5247
5736
|
logger$1.log("\n📦 PHASE 1: Deploying backend applications...");
|
|
5248
5737
|
for (const appName of backendApps) {
|
|
@@ -5286,14 +5775,46 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5286
5775
|
},
|
|
5287
5776
|
buildArgs
|
|
5288
5777
|
});
|
|
5289
|
-
const
|
|
5290
|
-
|
|
5778
|
+
const backendHost = resolveHost(appName, app, stage, dokployConfig, false);
|
|
5779
|
+
const envContext = {
|
|
5780
|
+
app,
|
|
5781
|
+
appName,
|
|
5782
|
+
stage,
|
|
5783
|
+
state,
|
|
5784
|
+
appCredentials: perAppDbCredentials.get(appName),
|
|
5785
|
+
postgres: provisionedPostgres ? {
|
|
5786
|
+
host: provisionedPostgres.appName,
|
|
5787
|
+
port: 5432,
|
|
5788
|
+
database: provisionedPostgres.databaseName
|
|
5789
|
+
} : void 0,
|
|
5790
|
+
redis: provisionedRedis ? {
|
|
5791
|
+
host: provisionedRedis.appName,
|
|
5792
|
+
port: 6379,
|
|
5793
|
+
password: provisionedRedis.databasePassword
|
|
5794
|
+
} : void 0,
|
|
5795
|
+
appHostname: backendHost,
|
|
5796
|
+
frontendUrls,
|
|
5797
|
+
userSecrets: stageSecrets ?? void 0,
|
|
5798
|
+
masterKey: appSecrets?.masterKey
|
|
5799
|
+
};
|
|
5800
|
+
const appRequirements = sniffedApps.get(appName);
|
|
5801
|
+
const requiredVars = appRequirements?.requiredEnvVars ?? [];
|
|
5802
|
+
const { valid, missing, resolved } = validateEnvVars(requiredVars, envContext);
|
|
5803
|
+
if (!valid) throw new Error(formatMissingVarsError(appName, missing, stage));
|
|
5804
|
+
const envVars = Object.entries(resolved).map(([key, value]) => `${key}=${value}`);
|
|
5805
|
+
if (Object.keys(resolved).length > 0) logger$1.log(` Resolved ${Object.keys(resolved).length} env vars: ${Object.keys(resolved).join(", ")}`);
|
|
5291
5806
|
await api.saveDockerProvider(application.applicationId, imageRef, { registryId });
|
|
5292
5807
|
await api.saveApplicationEnv(application.applicationId, envVars.join("\n"));
|
|
5293
5808
|
logger$1.log(` Deploying to Dokploy...`);
|
|
5294
5809
|
await api.deployApplication(application.applicationId);
|
|
5295
|
-
const
|
|
5296
|
-
|
|
5810
|
+
const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
|
|
5811
|
+
const existingDomain = existingDomains.find((d) => d.host === backendHost);
|
|
5812
|
+
if (existingDomain) {
|
|
5813
|
+
appHostnames.set(appName, backendHost);
|
|
5814
|
+
appDomainIds.set(appName, existingDomain.domainId);
|
|
5815
|
+
publicUrls[appName] = `https://${backendHost}`;
|
|
5816
|
+
logger$1.log(` ✓ Domain: https://${backendHost} (existing)`);
|
|
5817
|
+
} else try {
|
|
5297
5818
|
const domain = await api.createDomain({
|
|
5298
5819
|
host: backendHost,
|
|
5299
5820
|
port: app.port,
|
|
@@ -5303,18 +5824,13 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5303
5824
|
});
|
|
5304
5825
|
appHostnames.set(appName, backendHost);
|
|
5305
5826
|
appDomainIds.set(appName, domain.domainId);
|
|
5306
|
-
|
|
5307
|
-
|
|
5308
|
-
logger$1.log(` ✓ Domain: ${publicUrl}`);
|
|
5827
|
+
publicUrls[appName] = `https://${backendHost}`;
|
|
5828
|
+
logger$1.log(` ✓ Domain: https://${backendHost} (created)`);
|
|
5309
5829
|
} catch (domainError) {
|
|
5830
|
+
const message = domainError instanceof Error ? domainError.message : "Unknown error";
|
|
5831
|
+
logger$1.log(` ⚠ Domain creation failed: ${message}`);
|
|
5310
5832
|
appHostnames.set(appName, backendHost);
|
|
5311
|
-
try {
|
|
5312
|
-
const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
|
|
5313
|
-
const matchingDomain = existingDomains.find((d) => d.host === backendHost);
|
|
5314
|
-
if (matchingDomain) appDomainIds.set(appName, matchingDomain.domainId);
|
|
5315
|
-
} catch {}
|
|
5316
5833
|
publicUrls[appName] = `https://${backendHost}`;
|
|
5317
|
-
logger$1.log(` ℹ Domain already configured: https://${backendHost}`);
|
|
5318
5834
|
}
|
|
5319
5835
|
results.push({
|
|
5320
5836
|
appName,
|
|
@@ -5383,7 +5899,14 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5383
5899
|
await api.deployApplication(application.applicationId);
|
|
5384
5900
|
const isMainFrontend = isMainFrontendApp(appName, app, workspace.apps);
|
|
5385
5901
|
const frontendHost = resolveHost(appName, app, stage, dokployConfig, isMainFrontend);
|
|
5386
|
-
|
|
5902
|
+
const existingFrontendDomains = await api.getDomainsByApplicationId(application.applicationId);
|
|
5903
|
+
const existingFrontendDomain = existingFrontendDomains.find((d) => d.host === frontendHost);
|
|
5904
|
+
if (existingFrontendDomain) {
|
|
5905
|
+
appHostnames.set(appName, frontendHost);
|
|
5906
|
+
appDomainIds.set(appName, existingFrontendDomain.domainId);
|
|
5907
|
+
publicUrls[appName] = `https://${frontendHost}`;
|
|
5908
|
+
logger$1.log(` ✓ Domain: https://${frontendHost} (existing)`);
|
|
5909
|
+
} else try {
|
|
5387
5910
|
const domain = await api.createDomain({
|
|
5388
5911
|
host: frontendHost,
|
|
5389
5912
|
port: app.port,
|
|
@@ -5393,18 +5916,13 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5393
5916
|
});
|
|
5394
5917
|
appHostnames.set(appName, frontendHost);
|
|
5395
5918
|
appDomainIds.set(appName, domain.domainId);
|
|
5396
|
-
|
|
5397
|
-
|
|
5398
|
-
logger$1.log(` ✓ Domain: ${publicUrl}`);
|
|
5919
|
+
publicUrls[appName] = `https://${frontendHost}`;
|
|
5920
|
+
logger$1.log(` ✓ Domain: https://${frontendHost} (created)`);
|
|
5399
5921
|
} catch (domainError) {
|
|
5922
|
+
const message = domainError instanceof Error ? domainError.message : "Unknown error";
|
|
5923
|
+
logger$1.log(` ⚠ Domain creation failed: ${message}`);
|
|
5400
5924
|
appHostnames.set(appName, frontendHost);
|
|
5401
|
-
try {
|
|
5402
|
-
const existingDomains = await api.getDomainsByApplicationId(application.applicationId);
|
|
5403
|
-
const matchingDomain = existingDomains.find((d) => d.host === frontendHost);
|
|
5404
|
-
if (matchingDomain) appDomainIds.set(appName, matchingDomain.domainId);
|
|
5405
|
-
} catch {}
|
|
5406
5925
|
publicUrls[appName] = `https://${frontendHost}`;
|
|
5407
|
-
logger$1.log(` ℹ Domain already configured: https://${frontendHost}`);
|
|
5408
5926
|
}
|
|
5409
5927
|
results.push({
|
|
5410
5928
|
appName,
|
|
@@ -5432,6 +5950,10 @@ async function workspaceDeployCommand(workspace, options) {
|
|
|
5432
5950
|
const dnsConfig = workspace.deploy.dns;
|
|
5433
5951
|
if (dnsConfig && appHostnames.size > 0) {
|
|
5434
5952
|
const dnsResult = await orchestrateDns(appHostnames, dnsConfig, creds.endpoint);
|
|
5953
|
+
if (dnsResult?.serverIp && appHostnames.size > 0) {
|
|
5954
|
+
await verifyDnsRecords(appHostnames, dnsResult.serverIp, state);
|
|
5955
|
+
await writeStageState(workspace.root, stage, state);
|
|
5956
|
+
}
|
|
5435
5957
|
if (dnsResult?.success && appHostnames.size > 0) {
|
|
5436
5958
|
logger$1.log("\n🔒 Validating domains for SSL certificates...");
|
|
5437
5959
|
for (const [appName, hostname] of appHostnames) try {
|
|
@@ -5732,10 +6254,10 @@ const GEEKMIDAS_VERSIONS = {
|
|
|
5732
6254
|
"@geekmidas/cli": CLI_VERSION,
|
|
5733
6255
|
"@geekmidas/client": "~0.5.0",
|
|
5734
6256
|
"@geekmidas/cloud": "~0.2.0",
|
|
5735
|
-
"@geekmidas/constructs": "~0.
|
|
6257
|
+
"@geekmidas/constructs": "~0.8.0",
|
|
5736
6258
|
"@geekmidas/db": "~0.3.0",
|
|
5737
6259
|
"@geekmidas/emailkit": "~0.2.0",
|
|
5738
|
-
"@geekmidas/envkit": "~0.
|
|
6260
|
+
"@geekmidas/envkit": "~0.7.0",
|
|
5739
6261
|
"@geekmidas/errors": "~0.1.0",
|
|
5740
6262
|
"@geekmidas/events": "~0.2.0",
|
|
5741
6263
|
"@geekmidas/logger": "~0.4.0",
|
|
@@ -5744,7 +6266,7 @@ const GEEKMIDAS_VERSIONS = {
|
|
|
5744
6266
|
"@geekmidas/services": "~0.2.0",
|
|
5745
6267
|
"@geekmidas/storage": "~0.1.0",
|
|
5746
6268
|
"@geekmidas/studio": "~0.4.0",
|
|
5747
|
-
"@geekmidas/telescope": "~0.
|
|
6269
|
+
"@geekmidas/telescope": "~0.6.0",
|
|
5748
6270
|
"@geekmidas/testkit": "~0.6.0"
|
|
5749
6271
|
};
|
|
5750
6272
|
|